From e7efd93a85e306b34ad215325e07633b43f3b33b Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 7 Apr 2023 11:12:26 -0400 Subject: [PATCH 001/762] Add BLS signature utils. --- go.mod | 17 +++++---- go.sum | 24 ++++++++++++ lib/types_bls_signature.go | 67 +++++++++++++++++++++++++++++++++ lib/types_bls_signature_test.go | 32 ++++++++++++++++ 4 files changed, 133 insertions(+), 7 deletions(-) create mode 100644 lib/types_bls_signature.go create mode 100644 lib/types_bls_signature_test.go diff --git a/go.mod b/go.mod index 1cbd55b30..beb35bb7c 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/decred/dcrd/lru v1.1.1 github.com/deso-protocol/go-deadlock v1.0.0 github.com/deso-protocol/go-merkle-tree v1.0.0 - github.com/dgraph-io/badger/v3 v3.2103.0 + github.com/dgraph-io/badger/v3 v3.2103.5 github.com/ethereum/go-ethereum v1.9.25 github.com/fatih/color v1.13.0 github.com/gernest/mention v2.0.0+incompatible @@ -31,10 +31,10 @@ require ( github.com/spf13/cobra v1.1.3 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.7.1 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.0 github.com/tyler-smith/go-bip39 v1.0.2 github.com/unrolled/secure v1.0.8 - golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c gopkg.in/DataDog/dd-trace-go.v1 v1.29.0 ) @@ -49,12 +49,13 @@ require ( github.com/DataDog/zstd v1.4.8 // indirect github.com/Microsoft/go-winio v0.4.16 // indirect github.com/NVIDIA/cstruct v0.0.0-20210817223100-441a06a021c8 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/bwesterb/go-ristretto v1.2.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect - github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373 // indirect @@ -71,6 +72,7 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/klauspost/compress v1.12.3 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.1 // indirect github.com/mattn/go-colorable v0.1.9 // indirect @@ -78,6 +80,7 @@ require ( github.com/mattn/goveralls v0.0.6 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/onflow/flow-go/crypto v0.24.7 // indirect github.com/onsi/ginkgo v1.15.0 // indirect github.com/onsi/gomega v1.10.5 // indirect github.com/pelletier/go-toml v1.7.0 // indirect @@ -99,8 +102,8 @@ require ( github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.opencensus.io v0.23.0 // indirect golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect - golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect + golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect + golang.org/x/sys v0.0.0-20221010170243-090e33056c14 // indirect golang.org/x/text v0.3.6 // indirect golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect golang.org/x/tools v0.1.5 // indirect @@ -111,6 +114,6 @@ require ( gopkg.in/ini.v1 v1.51.0 // indirect gopkg.in/kyokomi/emoji.v1 v1.5.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect mellium.im/sasl v0.2.1 // indirect ) diff --git a/go.sum b/go.sum index 0ae955cd1..96544d908 100644 --- a/go.sum +++ b/go.sum @@ -65,6 +65,8 @@ github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnC github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= +github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -128,9 +130,13 @@ github.com/deso-protocol/go-merkle-tree v1.0.0 h1:9zkI5dQsITYy77s4kbTGPQmZnhQ+Ls github.com/deso-protocol/go-merkle-tree v1.0.0/go.mod h1:V/vbg/maaNv6G7zf9VVs645nLFx/jsO2L/awFB/S/ZU= github.com/dgraph-io/badger/v3 v3.2103.0 h1:abkD2EnP3+6Tj8h5LI1y00dJ9ICKTIAzvG9WmZ8S2c4= github.com/dgraph-io/badger/v3 v3.2103.0/go.mod h1:GHMCYxuDWyzbHkh4k3yyg4PM61tJPFfEGSMbE3Vd5QE= +github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= +github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= github.com/dgraph-io/ristretto v0.0.4-0.20210309073149-3836124cdc5a/go.mod h1:MIonLggsKgZLUSt414ExgwNtlOL5MuEoAJP514mwGe8= github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -213,6 +219,7 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -303,6 +310,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -363,6 +372,8 @@ github.com/oleiade/lane v1.0.1 h1:hXofkn7GEOubzTwNpeL9MaNy8WxolCYb9cInAIeqShU= github.com/oleiade/lane v1.0.1/go.mod h1:IyTkraa4maLfjq/GmHR+Dxb4kCMtEGeb+qmhlrQ5Mk4= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onflow/flow-go/crypto v0.24.7 h1:RCLuB83At4z5wkAyUCF7MYEnPoIIOHghJaODuJyEoW0= +github.com/onflow/flow-go/crypto v0.24.7/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= @@ -456,6 +467,8 @@ github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUW github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -464,6 +477,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= @@ -534,6 +550,8 @@ golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -592,6 +610,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -646,6 +666,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14 h1:k5II8e6QD8mITdi+okbbmR/cIyEbeXLBhy5Ha4nevyc= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -763,6 +785,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/lib/types_bls_signature.go b/lib/types_bls_signature.go new file mode 100644 index 000000000..961ba9250 --- /dev/null +++ b/lib/types_bls_signature.go @@ -0,0 +1,67 @@ +package lib + +import ( + flowCrypto "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/crypto/hash" + "io" +) + +// +// TYPES: BLSPublicKey +// + +const BLSSigningAlgorithm = flowCrypto.BLSBLS12381 + +var BLSHashingAlgorithm = hash.NewSHA2_256() + +type BLSPublicKey struct { + PublicKey flowCrypto.PublicKey +} + +func NewBLSPublicKey(publicKeyBytes []byte) (*BLSPublicKey, error) { + publicKey, err := flowCrypto.DecodePublicKey(BLSSigningAlgorithm, publicKeyBytes) + return &BLSPublicKey{PublicKey: publicKey}, err +} + +func (blsPublicKey *BLSPublicKey) ToBytes() []byte { + return EncodeByteArray(blsPublicKey.PublicKey.Encode()) +} + +func (blsPublicKey *BLSPublicKey) FromBytes(rr io.Reader) error { + publicKeyBytes, err := DecodeByteArray(rr) + if err != nil { + return err + } + blsPublicKey.PublicKey, err = flowCrypto.DecodePublicKey(BLSSigningAlgorithm, publicKeyBytes) + return err +} + +func (blsPublicKey *BLSPublicKey) Verify(blsSignature *BLSSignature, input []byte) (bool, error) { + return blsPublicKey.PublicKey.Verify(blsSignature.Signature, input, BLSHashingAlgorithm) +} + +func (blsPublicKey *BLSPublicKey) Eq(other *BLSPublicKey) bool { + return blsPublicKey.PublicKey.Equals(other.PublicKey) +} + +// +// TYPES: BLSSignature +// + +type BLSSignature struct { + Signature flowCrypto.Signature +} + +func NewBLSSignature(signature []byte) *BLSSignature { + return &BLSSignature{Signature: signature} +} + +func (blsSignature *BLSSignature) ToBytes() []byte { + return EncodeByteArray(blsSignature.Signature) +} + +func (blsSignature *BLSSignature) FromBytes(rr io.Reader) error { + var err error + blsSignature.Signature, err = DecodeByteArray(rr) + return err +} diff --git a/lib/types_bls_signature_test.go b/lib/types_bls_signature_test.go new file mode 100644 index 000000000..d50e39cf0 --- /dev/null +++ b/lib/types_bls_signature_test.go @@ -0,0 +1,32 @@ +package lib + +import ( + flowCrypto "github.com/onflow/flow-go/crypto" + "github.com/stretchr/testify/require" + "testing" +) + +func TestVerifyingBLSSignatures(t *testing.T) { + // Generate two BLS public/private key pairs. + privateKey1, err := flowCrypto.GeneratePrivateKey(BLSSigningAlgorithm, RandomBytes(64)) + require.NoError(t, err) + publicKey1 := privateKey1.PublicKey() + blsPublicKey1, err := NewBLSPublicKey(publicKey1.Encode()) + require.NoError(t, err) + + privateKey2, err := flowCrypto.GeneratePrivateKey(BLSSigningAlgorithm, RandomBytes(64)) + require.NoError(t, err) + publicKey2 := privateKey2.PublicKey() + blsPublicKey2, err := NewBLSPublicKey(publicKey2.Encode()) + require.NoError(t, err) + + // Test Eq(). + require.True(t, blsPublicKey1.Eq(blsPublicKey1)) + require.True(t, blsPublicKey2.Eq(blsPublicKey2)) + require.False(t, blsPublicKey1.Eq(blsPublicKey2)) + + // Test Verify(). + + // Test BLSPublicKey.ToBytes() and BLSPublicKey.FromBytes(). + // Test BLSSignature.ToBytes() and BLSSignature.FromBytes(). +} From 5d9abb139df1187842c9231e838c193c0cf58a22 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 7 Apr 2023 11:54:43 -0400 Subject: [PATCH 002/762] Get tests passing. --- lib/types_bls_signature.go | 10 +++++-- lib/types_bls_signature_test.go | 51 +++++++++++++++++++++++++++++++-- 2 files changed, 57 insertions(+), 4 deletions(-) diff --git a/lib/types_bls_signature.go b/lib/types_bls_signature.go index 961ba9250..b6725cf13 100644 --- a/lib/types_bls_signature.go +++ b/lib/types_bls_signature.go @@ -1,8 +1,10 @@ +//go:build relic + package lib import ( + "bytes" flowCrypto "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "io" ) @@ -12,7 +14,7 @@ import ( const BLSSigningAlgorithm = flowCrypto.BLSBLS12381 -var BLSHashingAlgorithm = hash.NewSHA2_256() +var BLSHashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") type BLSPublicKey struct { PublicKey flowCrypto.PublicKey @@ -65,3 +67,7 @@ func (blsSignature *BLSSignature) FromBytes(rr io.Reader) error { blsSignature.Signature, err = DecodeByteArray(rr) return err } + +func (blsSignature *BLSSignature) Eq(other *BLSSignature) bool { + return bytes.Equal(blsSignature.ToBytes(), other.ToBytes()) +} diff --git a/lib/types_bls_signature_test.go b/lib/types_bls_signature_test.go index d50e39cf0..01d4656dc 100644 --- a/lib/types_bls_signature_test.go +++ b/lib/types_bls_signature_test.go @@ -1,6 +1,7 @@ package lib import ( + "bytes" flowCrypto "github.com/onflow/flow-go/crypto" "github.com/stretchr/testify/require" "testing" @@ -20,13 +21,59 @@ func TestVerifyingBLSSignatures(t *testing.T) { blsPublicKey2, err := NewBLSPublicKey(publicKey2.Encode()) require.NoError(t, err) - // Test Eq(). + // Test BLSPublicKey.Eq(). require.True(t, blsPublicKey1.Eq(blsPublicKey1)) require.True(t, blsPublicKey2.Eq(blsPublicKey2)) require.False(t, blsPublicKey1.Eq(blsPublicKey2)) - // Test Verify(). + // Test BLSPublicKey.Verify(). + // 1. PrivateKey1 signs a random payload. + // 2. Verify BLSPublicKey1 is the signer. + // 3. Verify BLSPublicKey2 is not the signer. + // 4. PrivateKey2 signs a random payload. + // 5. Verify BLSPublicKey1 is not the signer. + // 6. Verify BLSPublicKey2 is the signer. + randomPayload1 := RandomBytes(256) + signature1, err := privateKey1.Sign(randomPayload1, BLSHashingAlgorithm) + require.NoError(t, err) + blsSignature1 := NewBLSSignature(signature1) + isVerified, err := blsPublicKey1.Verify(blsSignature1, randomPayload1) + require.NoError(t, err) + require.True(t, isVerified) + isVerified, err = blsPublicKey2.Verify(blsSignature1, randomPayload1) + require.NoError(t, err) + require.False(t, isVerified) + + randomPayload2 := RandomBytes(256) + signature2, err := privateKey2.Sign(randomPayload2, BLSHashingAlgorithm) + require.NoError(t, err) + blsSignature2 := NewBLSSignature(signature2) + isVerified, err = blsPublicKey1.Verify(blsSignature2, randomPayload2) + require.NoError(t, err) + require.False(t, isVerified) + isVerified, err = blsPublicKey2.Verify(blsSignature2, randomPayload2) + require.NoError(t, err) + require.True(t, isVerified) + + // Test BLSPublicKey.Eq(). + require.True(t, blsPublicKey1.Eq(blsPublicKey1)) + require.True(t, blsPublicKey2.Eq(blsPublicKey2)) + require.False(t, blsPublicKey1.Eq(blsPublicKey2)) + + // Test BLSSignature.Eq(). + require.True(t, blsSignature1.Eq(blsSignature1)) + require.True(t, blsSignature2.Eq(blsSignature2)) + require.False(t, blsSignature1.Eq(blsSignature2)) // Test BLSPublicKey.ToBytes() and BLSPublicKey.FromBytes(). + blsPublicKeyBytes := blsPublicKey1.ToBytes() + copyBLSPublicKey1 := &BLSPublicKey{} + require.NoError(t, copyBLSPublicKey1.FromBytes(bytes.NewBuffer(blsPublicKeyBytes))) + require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) + // Test BLSSignature.ToBytes() and BLSSignature.FromBytes(). + blsSignatureBytes := blsSignature1.ToBytes() + copyBLSSignature := &BLSSignature{} + require.NoError(t, copyBLSSignature.FromBytes(bytes.NewBuffer(blsSignatureBytes))) + require.True(t, blsSignature1.Eq(copyBLSSignature)) } From fdc2ff7c732b09de921194f1935fb380151016f4 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 7 Apr 2023 12:45:29 -0400 Subject: [PATCH 003/762] Update test dockerfile to install relic. --- .gitignore | 1 + lib/types_bls_signature.go | 2 -- scripts/install-relic.sh | 35 +++++++++++++++++++++++++++++++++++ test.Dockerfile | 8 +++++--- 4 files changed, 41 insertions(+), 5 deletions(-) create mode 100755 scripts/install-relic.sh diff --git a/.gitignore b/.gitignore index c7b1bb1aa..dc0267338 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ quick_testing dump_txns compare_blocks.go backend +core .idea diff --git a/lib/types_bls_signature.go b/lib/types_bls_signature.go index b6725cf13..2a44e7f9f 100644 --- a/lib/types_bls_signature.go +++ b/lib/types_bls_signature.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/scripts/install-relic.sh b/scripts/install-relic.sh new file mode 100755 index 000000000..f19971afb --- /dev/null +++ b/scripts/install-relic.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# This script installs Relic, a C library dependency for the BLS signature implementation +# provided by Flow. This file was copy-pasted as-is from Flow's installation README found +# here: https://github.com/onflow/flow-go/tree/master/crypto. + +# crypto package +PKG_NAME="github.com/onflow/flow-go/crypto" + +# go get the package +go get ${PKG_NAME} + +# go.mod +MOD_FILE="./go.mod" + +# the version of onflow/flow-go/crypto used in the project is read from the go.mod file +if [ -f "${MOD_FILE}" ] +then + # extract the version from the go.mod file + VERSION="$(grep ${PKG_NAME} < ${MOD_FILE} | cut -d' ' -f 2)" + # using the right version, get the package directory path + PKG_DIR="$(go env GOPATH)/pkg/mod/${PKG_NAME}@${VERSION}" +else + { echo "couldn't find go.mod file - make sure the script is in the project root directory"; exit 1; } +fi + +# grant permissions if not existant +if [[ ! -r ${PKG_DIR} || ! -w ${PKG_DIR} || ! -x ${PKG_DIR} ]]; then + sudo chmod -R 755 "${PKG_DIR}" +fi + +# get into the package directory and set up the external dependencies +( + cd "${PKG_DIR}" || { echo "cd into the GOPATH package folder failed"; exit 1; } + go generate +) diff --git a/test.Dockerfile b/test.Dockerfile index a31beba78..e7ce306c0 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -2,15 +2,17 @@ FROM alpine:latest AS core RUN apk update RUN apk upgrade -RUN apk add --update go gcc g++ vips vips-dev +RUN apk add --update bash cmake git go gcc g++ make vips vips-dev WORKDIR /deso/src/core COPY go.mod . COPY go.sum . - RUN go mod download +COPY scripts/install-relic.sh . +RUN ./install-relic.sh + COPY desohash desohash COPY cmd cmd COPY lib lib @@ -21,4 +23,4 @@ COPY main.go . # build backend RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go -ENTRYPOINT ["go", "test", "-v", "github.com/deso-protocol/core/lib"] +ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "github.com/deso-protocol/core/lib"] From 1123964e1096f019b65bce6374478485b45e88df Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 7 Apr 2023 14:11:26 -0400 Subject: [PATCH 004/762] Add relic + no relic files. --- lib/types_bls_signature.go | 2 + lib/types_bls_signature_no_relic.go | 62 ++++++++++++++++++++++++ lib/types_bls_signature_no_relic_test.go | 13 +++++ lib/types_bls_signature_test.go | 2 + 4 files changed, 79 insertions(+) create mode 100644 lib/types_bls_signature_no_relic.go create mode 100644 lib/types_bls_signature_no_relic_test.go diff --git a/lib/types_bls_signature.go b/lib/types_bls_signature.go index 2a44e7f9f..b6725cf13 100644 --- a/lib/types_bls_signature.go +++ b/lib/types_bls_signature.go @@ -1,3 +1,5 @@ +//go:build relic + package lib import ( diff --git a/lib/types_bls_signature_no_relic.go b/lib/types_bls_signature_no_relic.go new file mode 100644 index 000000000..2445f05bc --- /dev/null +++ b/lib/types_bls_signature_no_relic.go @@ -0,0 +1,62 @@ +//go:build !relic + +package lib + +import ( + flowCrypto "github.com/onflow/flow-go/crypto" + "io" +) + +// +// TYPES: BLSPublicKey +// + +const BLSNoRelicError = "BLS keys can't be used without Relic installed" + +type BLSPublicKey struct { + PublicKey flowCrypto.PublicKey +} + +func NewBLSPublicKey(publicKeyBytes []byte) (*BLSPublicKey, error) { + panic(BLSNoRelicError) +} + +func (blsPublicKey *BLSPublicKey) ToBytes() []byte { + panic(BLSNoRelicError) +} + +func (blsPublicKey *BLSPublicKey) FromBytes(rr io.Reader) error { + panic(BLSNoRelicError) +} + +func (blsPublicKey *BLSPublicKey) Verify(blsSignature *BLSSignature, input []byte) (bool, error) { + panic(BLSNoRelicError) +} + +func (blsPublicKey *BLSPublicKey) Eq(other *BLSPublicKey) bool { + panic(BLSNoRelicError) +} + +// +// TYPES: BLSSignature +// + +type BLSSignature struct { + Signature flowCrypto.Signature +} + +func NewBLSSignature(signature []byte) *BLSSignature { + panic(BLSNoRelicError) +} + +func (blsSignature *BLSSignature) ToBytes() []byte { + panic(BLSNoRelicError) +} + +func (blsSignature *BLSSignature) FromBytes(rr io.Reader) error { + panic(BLSNoRelicError) +} + +func (blsSignature *BLSSignature) Eq(other *BLSSignature) bool { + panic(BLSNoRelicError) +} diff --git a/lib/types_bls_signature_no_relic_test.go b/lib/types_bls_signature_no_relic_test.go new file mode 100644 index 000000000..e4be0292e --- /dev/null +++ b/lib/types_bls_signature_no_relic_test.go @@ -0,0 +1,13 @@ +//go:build !relic + +package lib + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestVerifyingBLSSignatures(t *testing.T) { + assert.Panics(t, func() { NewBLSPublicKey(nil) }) + assert.Panics(t, func() { NewBLSSignature(nil) }) +} diff --git a/lib/types_bls_signature_test.go b/lib/types_bls_signature_test.go index 01d4656dc..d410ba871 100644 --- a/lib/types_bls_signature_test.go +++ b/lib/types_bls_signature_test.go @@ -1,3 +1,5 @@ +//go:build relic + package lib import ( From 470dff830a8880cc16ef044bdfb526fac8b76432 Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Fri, 21 Apr 2023 14:18:53 -0400 Subject: [PATCH 005/762] RegisterAsValidator + UnregisterAsValidator TxnTypes (#495) * Add block view validator. * Add unregister as validator types. * Add validator entry db utils. * Add blockchain utils. * Add utxo view getters. * Delete then set instead of updating validator entry. * Add connect register as validator logic. * Add disconnect register as validator. * Add connect and disconnect unregister as validator. * Add unregister as validator blockchain and mempool utils. * Only overwrite validatorID if new validator. * Use url parse request URI to validate domains. * Tie in register/unregister txn types. * Finish blockchain.go construct validator txns. * Add test file template. * Add tests for validations. * Add tests for registering, querying, unregistering w/o flushing. * Find top validators by stake across the db and UtxoView. * Fix tests when flushing to db. * Test updating a validator. * Store blockheights as uint64. * Fix disconnect logic for register as validator txn. * Fix disconnect tests for unregister txn. * Add stubbed voting public key fields. * Change weird UtxoOperation version to see if fixes CI. * Fix block heights in tests. * Add passing test for creating validator derived key. * Fix merge conflicts more. * Test registering + unregistering validators using derived keys. * Update TODO comments. * Clean up comments. * Tie in txindex metadata creation. * Split out setting/flushing global stake amount nanos. * include extra data, txn version, txn nonce, and txn fee nanos when unmarshaling in go (#496) * Address PR feedback. * make Read transaction functions public (#498) Co-authored-by: Lazy Nina <> * Ln/fix peer panic issue (#499) * only expect v2 bundle if peer has a srv and a blockchain * use .Params instead of .srv.blockchain.Params * add recover from panic in _handleOutExpectedResponse * add comment * convert _handleOutExpectedResponse to switch case * Address PR feedback. * Update the ValidatorMapKey fields. * Make copy of value when merging extradata. * Make a copy of the ValidatorEntries on a new view. * Add comment re: validators by stake index value. --------- Co-authored-by: Lazy Nina <81658138+lazynina@users.noreply.github.com> --- lib/block_view.go | 45 +- lib/block_view_flush.go | 6 + lib/block_view_types.go | 47 +- lib/block_view_validator.go | 1493 ++++++++++++++++++++++++++++++ lib/block_view_validator_test.go | 619 +++++++++++++ lib/constants.go | 24 +- lib/db_utils.go | 57 +- lib/mempool.go | 8 + lib/network.go | 138 +-- lib/peer.go | 66 +- 10 files changed, 2409 insertions(+), 94 deletions(-) create mode 100644 lib/block_view_validator.go create mode 100644 lib/block_view_validator_test.go diff --git a/lib/block_view.go b/lib/block_view.go index 3e8293681..9a616b90d 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "github.com/btcsuite/btcd/wire" + "github.com/holiman/uint256" "math" "math/big" "reflect" @@ -113,6 +114,12 @@ type UtxoView struct { // Map of DeSoNonce and PKID to TransactorNonceEntry TransactorNonceMapKeyToTransactorNonceEntry map[TransactorNonceMapKey]*TransactorNonceEntry + // Validator mappings + ValidatorMapKeyToValidatorEntry map[ValidatorMapKey]*ValidatorEntry + + // Global stake across validators + GlobalStakeAmountNanos *uint256.Int + // The hash of the tip the view is currently referencing. Mainly used // for error-checking when doing a bulk operation on the view. TipHash *BlockHash @@ -201,6 +208,12 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // Transaction nonce map bav.TransactorNonceMapKeyToTransactorNonceEntry = make(map[TransactorNonceMapKey]*TransactorNonceEntry) + + // ValidatorEntries + bav.ValidatorMapKeyToValidatorEntry = make(map[ValidatorMapKey]*ValidatorEntry) + + // Global stake across validators + bav.GlobalStakeAmountNanos = uint256.NewInt() } func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { @@ -453,6 +466,15 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.TransactorNonceMapKeyToTransactorNonceEntry[entryKey] = &newEntry } + // Copy the ValidatorEntries + newView.ValidatorMapKeyToValidatorEntry = make(map[ValidatorMapKey]*ValidatorEntry, len(bav.ValidatorMapKeyToValidatorEntry)) + for entryKey, entry := range bav.ValidatorMapKeyToValidatorEntry { + newView.ValidatorMapKeyToValidatorEntry[entryKey] = entry.Copy() + } + + // Copy the GlobalStakeAmountNanos. + newView.GlobalStakeAmountNanos = bav.GlobalStakeAmountNanos.Clone() + return newView, nil } @@ -1290,6 +1312,14 @@ func (bav *UtxoView) DisconnectTransaction(currentTxn *MsgDeSoTxn, txnHash *Bloc case TxnTypeNewMessage: return bav._disconnectNewMessage( OperationTypeNewMessage, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + + case TxnTypeRegisterAsValidator: + return bav._disconnectRegisterAsValidator( + OperationTypeRegisterAsValidator, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + + case TxnTypeUnregisterAsValidator: + return bav._disconnectUnregisterAsValidator( + OperationTypeUnregisterAsValidator, currentTxn, txnHash, utxoOpsForTxn, blockHeight) } return fmt.Errorf("DisconnectBlock: Unimplemented txn type %v", currentTxn.TxnMeta.GetTxnType().String()) @@ -3192,6 +3222,13 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, totalInput, totalOutput, utxoOpsForTxn, err = bav._connectNewMessage( txn, txHash, blockHeight, verifySignatures) + + case TxnTypeRegisterAsValidator: + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectRegisterAsValidator(txn, txHash, blockHeight, verifySignatures) + + case TxnTypeUnregisterAsValidator: + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUnregisterAsValidator(txn, txHash, blockHeight, verifySignatures) + default: err = fmt.Errorf("ConnectTransaction: Unimplemented txn type %v", txn.TxnMeta.GetTxnType().String()) } @@ -3955,12 +3992,16 @@ func mergeExtraData(oldMap map[string][]byte, newMap map[string][]byte) map[stri // Add the values from the oldMap for kk, vv := range oldMap { - retMap[kk] = vv + vvCopy := make([]byte, len(vv)) + copy(vvCopy, vv) + retMap[kk] = vvCopy } // Add the values from the newMap. Allow the newMap values to overwrite the // oldMap values during the merge. for kk, vv := range newMap { - retMap[kk] = vv + vvCopy := make([]byte, len(vv)) + copy(vvCopy, vv) + retMap[kk] = vvCopy } return retMap diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index fc3c257ae..5ee8ab26a 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -140,6 +140,12 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushNonceEntriesToDbWithTxn(txn); err != nil { return err } + if err := bav._flushValidatorEntriesToDbWithTxn(txn, blockHeight); err != nil { + return err + } + if err := bav._flushGlobalStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { + return err + } return nil } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 3ee7b6f10..3aca53fff 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -110,9 +110,10 @@ const ( EncoderTypeDmThreadEntry EncoderType = 37 EncoderTypeDeSoNonce EncoderType = 38 EncoderTypeTransactorNonceEntry EncoderType = 39 + EncoderTypeValidatorEntry EncoderType = 40 // EncoderTypeEndBlockView encoder type should be at the end and is used for automated tests. - EncoderTypeEndBlockView EncoderType = 40 + EncoderTypeEndBlockView EncoderType = 41 ) // Txindex encoder types. @@ -147,9 +148,11 @@ const ( EncoderTypeAccessGroupTxindexMetadata EncoderType = 1000027 EncoderTypeAccessGroupMembersTxindexMetadata EncoderType = 1000028 EncoderTypeNewMessageTxindexMetadata EncoderType = 1000029 + EncoderTypeRegisterAsValidatorTxindexMetadata EncoderType = 1000030 + EncoderTypeUnregisterAsValidatorTxindexMetadata EncoderType = 1000031 // EncoderTypeEndTxIndex encoder type should be at the end and is used for automated tests. - EncoderTypeEndTxIndex EncoderType = 1000030 + EncoderTypeEndTxIndex EncoderType = 1000032 ) // This function translates the EncoderType into an empty DeSoEncoder struct. @@ -236,6 +239,8 @@ func (encoderType EncoderType) New() DeSoEncoder { return &DeSoNonce{} case EncoderTypeTransactorNonceEntry: return &TransactorNonceEntry{} + case EncoderTypeValidatorEntry: + return &ValidatorEntry{} } // Txindex encoder types @@ -300,6 +305,10 @@ func (encoderType EncoderType) New() DeSoEncoder { return &AccessGroupMembersTxindexMetadata{} case EncoderTypeNewMessageTxindexMetadata: return &NewMessageTxindexMetadata{} + case EncoderTypeRegisterAsValidatorTxindexMetadata: + return &RegisterAsValidatorTxindexMetadata{} + case EncoderTypeUnregisterAsValidatorTxindexMetadata: + return &UnregisterAsValidatorTxindexMetadata{} default: return nil } @@ -594,7 +603,10 @@ const ( OperationTypeAddBalance OperationType = 36 OperationTypeSpendBalance OperationType = 37 OperationTypeDeleteExpiredNonces OperationType = 38 - // NEXT_TAG = 39 + OperationTypeRegisterAsValidator OperationType = 39 + OperationTypeUnregisterAsValidator OperationType = 40 + + // NEXT_TAG = 41 ) func (op OperationType) String() string { @@ -675,6 +687,10 @@ func (op OperationType) String() string { return "OperationTypeSpendBalance" case OperationTypeDeleteExpiredNonces: return "OperationTypeDeleteExpiredNonces" + case OperationTypeRegisterAsValidator: + return "OperationTypeRegisterAsValidator" + case OperationTypeUnregisterAsValidator: + return "OperationTypeUnregisterAsValidator" } return "OperationTypeUNKNOWN" } @@ -859,6 +875,9 @@ type UtxoOperation struct { // When we connect a block, we delete expired nonce entries. PrevNonceEntries []*TransactorNonceEntry + + // PrevValidatorEntry is the previous ValidatorEntry prior to a register or unregister txn. + PrevValidatorEntry *ValidatorEntry } func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -1176,6 +1195,11 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada } } + if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + // PrevValidatorEntry + data = append(data, EncodeToBytes(blockHeight, op.PrevValidatorEntry, skipMetadata...)...) + } + return data } @@ -1791,11 +1815,26 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. } } + if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + // PrevValidatorEntry + prevValidatorEntry := &ValidatorEntry{} + if exist, err := DecodeFromBytes(prevValidatorEntry, rr); exist && err == nil { + op.PrevValidatorEntry = prevValidatorEntry + } else if err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevValidatorEntry") + } + } + return nil } func (op *UtxoOperation) GetVersionByte(blockHeight uint64) byte { - return GetMigrationVersion(blockHeight, AssociationsAndAccessGroupsMigration, BalanceModelMigration) + return GetMigrationVersion( + blockHeight, + AssociationsAndAccessGroupsMigration, + BalanceModelMigration, + ProofOfStakeNewTxnTypesMigration, + ) } func (op *UtxoOperation) GetEncoderType() EncoderType { diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go new file mode 100644 index 000000000..7dd592069 --- /dev/null +++ b/lib/block_view_validator.go @@ -0,0 +1,1493 @@ +package lib + +import ( + "bytes" + "fmt" + "github.com/dgraph-io/badger/v3" + "github.com/golang/glog" + "github.com/holiman/uint256" + "github.com/pkg/errors" + "math" + "net/url" + "sort" +) + +// +// TYPES: ValidatorEntry +// + +type ValidatorEntry struct { + ValidatorID *BlockHash + ValidatorPKID *PKID + // Note: if someone is updating their ValidatorEntry, they need to include + // all domains. The Domains field is not appended to. It is overwritten. + Domains [][]byte + DisableDelegatedStake bool + // TODO: We will implement BLS public keys and signatures in a subsequent PR. + // For now, we include them just as a placeholder byte slice. + VotingPublicKey []byte + VotingPublicKeySignature []byte + VotingSignatureBlockHeight uint64 + TotalStakeAmountNanos *uint256.Int + RegisteredAtBlockHeight uint64 + ExtraData map[string][]byte + isDeleted bool +} + +type ValidatorMapKey struct { + // The MapKey has to contain all fields that are used in Badger keys. + // Otherwise, an update to the UtxoView will not be able to update or + // delete all relevant Badger rows. + ValidatorPKID PKID + TotalStakeAmountNanos uint256.Int + RegisteredAtBlockHeight uint64 +} + +func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { + // Copy domains. + var domainsCopy [][]byte + for _, domain := range validatorEntry.Domains { + domainsCopy = append(domainsCopy, append([]byte{}, domain...)) // Makes a copy. + } + + // Copy ExtraData. + extraDataCopy := make(map[string][]byte) + for key, value := range validatorEntry.ExtraData { + valueCopy := make([]byte, len(value)) + copy(valueCopy, value) + extraDataCopy[key] = valueCopy + } + + // Return new ValidatorEntry. + return &ValidatorEntry{ + ValidatorID: validatorEntry.ValidatorID.NewBlockHash(), + ValidatorPKID: validatorEntry.ValidatorPKID.NewPKID(), + Domains: domainsCopy, + DisableDelegatedStake: validatorEntry.DisableDelegatedStake, + VotingPublicKey: append([]byte{}, validatorEntry.VotingPublicKey...), + VotingPublicKeySignature: append([]byte{}, validatorEntry.VotingPublicKeySignature...), + VotingSignatureBlockHeight: validatorEntry.VotingSignatureBlockHeight, + TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), + RegisteredAtBlockHeight: validatorEntry.RegisteredAtBlockHeight, + ExtraData: extraDataCopy, + isDeleted: validatorEntry.isDeleted, + } +} + +func (validatorEntry *ValidatorEntry) ToMapKey() ValidatorMapKey { + return ValidatorMapKey{ + ValidatorPKID: *validatorEntry.ValidatorPKID, + TotalStakeAmountNanos: *validatorEntry.TotalStakeAmountNanos, + RegisteredAtBlockHeight: validatorEntry.RegisteredAtBlockHeight, + } +} + +func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeToBytes(blockHeight, validatorEntry.ValidatorID, skipMetadata...)...) + data = append(data, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID, skipMetadata...)...) + + // Domains + data = append(data, UintToBuf(uint64(len(validatorEntry.Domains)))...) + for _, domain := range validatorEntry.Domains { + data = append(data, EncodeByteArray(domain)...) + } + + data = append(data, BoolToByte(validatorEntry.DisableDelegatedStake)) + data = append(data, EncodeByteArray(validatorEntry.VotingPublicKey)...) + data = append(data, EncodeByteArray(validatorEntry.VotingPublicKeySignature)...) + data = append(data, UintToBuf(validatorEntry.VotingSignatureBlockHeight)...) + data = append(data, EncodeUint256(validatorEntry.TotalStakeAmountNanos)...) + data = append(data, UintToBuf(validatorEntry.RegisteredAtBlockHeight)...) + data = append(data, EncodeExtraData(validatorEntry.ExtraData)...) + return data +} + +func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // ValidatorID + validatorID := &BlockHash{} + if exist, err := DecodeFromBytes(validatorID, rr); exist && err == nil { + validatorEntry.ValidatorID = validatorID + } else if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading ValidatorID: ") + } + + // ValidatorPKID + validatorPKID := &PKID{} + if exist, err := DecodeFromBytes(validatorPKID, rr); exist && err == nil { + validatorEntry.ValidatorPKID = validatorPKID + } else if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading ValidatorPKID: ") + } + + // Domains + numDomains, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading Domains: ") + } + for ii := 0; ii < int(numDomains); ii++ { + domain, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading Domains: ") + } + validatorEntry.Domains = append(validatorEntry.Domains, domain) + } + + // DisableDelegatedStake + validatorEntry.DisableDelegatedStake, err = ReadBoolByte(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading DisableDelegatedStake: ") + } + + // VotingPublicKey + validatorEntry.VotingPublicKey, err = DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingPublicKey: ") + } + + // VotingPublicKeySignature + validatorEntry.VotingPublicKeySignature, err = DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingPublicKeySignature: ") + } + + // VotingSignatureBlockHeight + validatorEntry.VotingSignatureBlockHeight, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingSignatureBlockHeight: ") + } + + // TotalStakeAmountNanos + validatorEntry.TotalStakeAmountNanos, err = DecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading TotalStakeAmountNanos: ") + } + + // RegisteredAtBlockHeight + validatorEntry.RegisteredAtBlockHeight, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading RegisteredAtBlockHeight: ") + } + + // ExtraData + validatorEntry.ExtraData, err = DecodeExtraData(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading ExtraData: ") + } + + return nil +} + +func (validatorEntry *ValidatorEntry) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (validatorEntry *ValidatorEntry) GetEncoderType() EncoderType { + return EncoderTypeValidatorEntry +} + +// +// TYPES: RegisterAsValidatorMetadata +// + +type RegisterAsValidatorMetadata struct { + Domains [][]byte + DisableDelegatedStake bool + VotingPublicKey []byte + VotingPublicKeySignature []byte + VotingSignatureBlockHeight uint64 +} + +func (txnData *RegisterAsValidatorMetadata) GetTxnType() TxnType { + return TxnTypeRegisterAsValidator +} + +func (txnData *RegisterAsValidatorMetadata) ToBytes(preSignature bool) ([]byte, error) { + var data []byte + + // Domains + data = append(data, UintToBuf(uint64(len(txnData.Domains)))...) + for _, domain := range txnData.Domains { + data = append(data, EncodeByteArray(domain)...) + } + + data = append(data, BoolToByte(txnData.DisableDelegatedStake)) + data = append(data, EncodeByteArray(txnData.VotingPublicKey)...) + data = append(data, EncodeByteArray(txnData.VotingPublicKeySignature)...) + data = append(data, UintToBuf(txnData.VotingSignatureBlockHeight)...) + return data, nil +} + +func (txnData *RegisterAsValidatorMetadata) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + // Domains + numDomains, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading Domains: ") + } + for ii := 0; ii < int(numDomains); ii++ { + domain, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading Domains: ") + } + txnData.Domains = append(txnData.Domains, domain) + } + + // DisableDelegatedStake + txnData.DisableDelegatedStake, err = ReadBoolByte(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading DisableDelegatedStake: ") + } + + // VotingPublicKey + txnData.VotingPublicKey, err = DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingPublicKey: ") + } + + // VotingPublicKeySignature + txnData.VotingPublicKeySignature, err = DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingPublicKeySignature: ") + } + + // VotingSignatureBlockHeight + txnData.VotingSignatureBlockHeight, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingSignatureBlockHeight: ") + } + + return nil +} + +func (txnData *RegisterAsValidatorMetadata) New() DeSoTxnMetadata { + return &RegisterAsValidatorMetadata{} +} + +// +// TYPES: UnregisterAsValidatorMetadata +// + +type UnregisterAsValidatorMetadata struct{} + +func (txnData *UnregisterAsValidatorMetadata) GetTxnType() TxnType { + return TxnTypeUnregisterAsValidator +} + +func (txnData *UnregisterAsValidatorMetadata) ToBytes(preSignature bool) ([]byte, error) { + return []byte{}, nil +} + +func (txnData *UnregisterAsValidatorMetadata) FromBytes(data []byte) error { + return nil +} + +func (txnData *UnregisterAsValidatorMetadata) New() DeSoTxnMetadata { + return &UnregisterAsValidatorMetadata{} +} + +// +// TYPES: RegisterAsValidatorTxindexMetadata +// + +type RegisterAsValidatorTxindexMetadata struct { + ValidatorPublicKeyBase58Check string + Domains []string + DisableDelegatedStake bool + VotingPublicKey string + VotingPublicKeySignature string + VotingSignatureBlockHeight uint64 + UnstakedStakers []*UnstakedStakerTxindexMetadata +} + +func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) + + // Domains + data = append(data, UintToBuf(uint64(len(txindexMetadata.Domains)))...) + for _, domain := range txindexMetadata.Domains { + data = append(data, EncodeByteArray([]byte(domain))...) + } + + data = append(data, BoolToByte(txindexMetadata.DisableDelegatedStake)) + data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKey))...) + data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKeySignature))...) + data = append(data, UintToBuf(txindexMetadata.VotingSignatureBlockHeight)...) + + // UnstakedStakers + data = append(data, UintToBuf(uint64(len(txindexMetadata.UnstakedStakers)))...) + for _, unstakedStaker := range txindexMetadata.UnstakedStakers { + data = append(data, unstakedStaker.RawEncodeWithoutMetadata(blockHeight, skipMetadata...)...) + } + + return data +} + +func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // ValidatorPublicKeyBase58Check + validatorPublicKeyBase58CheckBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading ValidatorPublicKeyBase58Check: ") + } + txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) + + // Domains + numDomains, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading Domains: ") + } + for ii := 0; ii < int(numDomains); ii++ { + domain, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading Domains: ") + } + txindexMetadata.Domains = append(txindexMetadata.Domains, string(domain)) + } + + // DisableDelegatedStake + txindexMetadata.DisableDelegatedStake, err = ReadBoolByte(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading DisableDelegatedStake: ") + } + + // VotingPublicKey + votingPublicKeyBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading VotingPublicKey: ") + } + txindexMetadata.VotingPublicKey = string(votingPublicKeyBytes) + + // VotingPublicKeySignature + votingPublicKeySignatureBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading VotingPublicKeySignature: ") + } + txindexMetadata.VotingPublicKeySignature = string(votingPublicKeySignatureBytes) + + // VotingSignatureBlockHeight + txindexMetadata.VotingSignatureBlockHeight, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading VotingSignatureBlockHeight: ") + } + + // UnstakedStakers + numUnstakedStakers, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading UnstakedStakers: ") + } + for ii := 0; ii < int(numUnstakedStakers); ii++ { + unstakedStaker := &UnstakedStakerTxindexMetadata{} + err = unstakedStaker.RawDecodeWithoutMetadata(blockHeight, rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading UnstakedStakers: ") + } + txindexMetadata.UnstakedStakers = append(txindexMetadata.UnstakedStakers, unstakedStaker) + } + + return nil +} + +func (txindexMetadata *RegisterAsValidatorTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *RegisterAsValidatorTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeRegisterAsValidatorTxindexMetadata +} + +// +// TYPES: UnstakedStakerTxindexMetadata +// + +type UnstakedStakerTxindexMetadata struct { + StakerPublicKeyBase58Check string + UnstakeAmountNanos *uint256.Int +} + +func (txindexMetadata *UnstakedStakerTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) + data = append(data, EncodeUint256(txindexMetadata.UnstakeAmountNanos)...) + return data +} + +func (txindexMetadata *UnstakedStakerTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // StakerPublicKeyBase58Check + stakerPublicKeyBase58CheckBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "UnstakedStakerTxindexMetadata.Decode: Problem reading StakerPublicKeyBase58Check: ") + } + txindexMetadata.StakerPublicKeyBase58Check = string(stakerPublicKeyBase58CheckBytes) + + // UnstakeAmountNanos + txindexMetadata.UnstakeAmountNanos, err = DecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "UnstakedStakerTxindexMetadata.Decode: Problem reading UnstakeAmountNanos: ") + } + + return nil +} + +// +// TYPES: UnregisterAsValidatorTxindexMetadata +// + +type UnregisterAsValidatorTxindexMetadata struct { + ValidatorPublicKeyBase58Check string + UnstakedStakers []*UnstakedStakerTxindexMetadata +} + +func (txindexMetadata *UnregisterAsValidatorTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) + + // UnstakedStakers + data = append(data, UintToBuf(uint64(len(txindexMetadata.UnstakedStakers)))...) + for _, unstakedStaker := range txindexMetadata.UnstakedStakers { + data = append(data, unstakedStaker.RawEncodeWithoutMetadata(blockHeight, skipMetadata...)...) + } + + return data +} + +func (txindexMetadata *UnregisterAsValidatorTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // ValidatorPublicKeyBase58Check + validatorPublicKeyBase58CheckBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "UnregisterAsValidatorTxindexMetadata.Decode: Problem reading ValidatorPublicKeyBase58Check: ") + } + txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) + + // UnstakedStakers + numUnstakedStakers, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UnregisterAsValidatorTxindexMetadata.Decode: Problem reading UnstakedStakers: ") + } + for ii := 0; ii < int(numUnstakedStakers); ii++ { + unstakedStaker := &UnstakedStakerTxindexMetadata{} + err = unstakedStaker.RawDecodeWithoutMetadata(blockHeight, rr) + if err != nil { + return errors.Wrapf(err, "UnregisterAsValidatorTxindexMetadata.Decode: Problem reading UnstakedStakers: ") + } + txindexMetadata.UnstakedStakers = append(txindexMetadata.UnstakedStakers, unstakedStaker) + } + + return nil +} + +func (txindexMetadata *UnregisterAsValidatorTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *UnregisterAsValidatorTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeUnregisterAsValidatorTxindexMetadata +} + +// +// DB UTILS +// + +func DBKeyForValidatorByPKID(validatorEntry *ValidatorEntry) []byte { + key := append([]byte{}, Prefixes.PrefixValidatorByPKID...) + key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) + return key +} + +func DBKeyForValidatorByStake(validatorEntry *ValidatorEntry) []byte { + key := append([]byte{}, Prefixes.PrefixValidatorByStake...) + // FIXME: ensure that this left-pads the uint256 to be equal width + key = append(key, EncodeUint256(validatorEntry.TotalStakeAmountNanos)...) // Highest stake first + key = append(key, EncodeUint64(math.MaxUint64-validatorEntry.RegisteredAtBlockHeight)...) // Oldest first + key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) + return key +} + +func DBKeyForGlobalStakeAmountNanos() []byte { + return append([]byte{}, Prefixes.PrefixGlobalStakeAmountNanos...) +} + +func DBGetValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID) (*ValidatorEntry, error) { + var ret *ValidatorEntry + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetValidatorByPKIDWithTxn(txn, snap, pkid) + return innerErr + }) + return ret, err +} + +func DBGetValidatorByPKIDWithTxn(txn *badger.Txn, snap *Snapshot, pkid *PKID) (*ValidatorEntry, error) { + // Retrieve ValidatorEntry from db. + key := DBKeyForValidatorByPKID(&ValidatorEntry{ValidatorPKID: pkid}) + validatorBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return nil. + if err == badger.ErrKeyNotFound { + return nil, nil + } + return nil, errors.Wrapf(err, "DBGetValidatorByPKID: problem retrieving ValidatorEntry") + } + + // Decode ValidatorEntry from bytes. + validatorEntry := &ValidatorEntry{} + rr := bytes.NewReader(validatorBytes) + if exist, err := DecodeFromBytes(validatorEntry, rr); !exist || err != nil { + return nil, errors.Wrapf(err, "DBGetValidatorByPKID: problem decoding ValidatorEntry") + } + return validatorEntry, nil +} + +func DBGetTopValidatorsByStake( + handle *badger.DB, + snap *Snapshot, + limit int, + validatorEntriesToSkip []*ValidatorEntry, +) ([]*ValidatorEntry, error) { + var validatorEntries []*ValidatorEntry + + // Convert ValidatorEntriesToSkip to ValidatorEntryKeysToSkip. + validatorKeysToSkip := NewSet([]string{}) + for _, validatorEntryToSkip := range validatorEntriesToSkip { + validatorKeysToSkip.Add(string(DBKeyForValidatorByStake(validatorEntryToSkip))) + } + + // Retrieve top N ValidatorEntry PKIDs by stake. + key := append([]byte{}, Prefixes.PrefixValidatorByStake...) + _, validatorPKIDsBytes, err := EnumerateKeysForPrefixWithLimitOffsetOrder( + handle, key, limit, nil, true, validatorKeysToSkip, + ) + if err != nil { + return nil, errors.Wrapf(err, "DBGetTopValidatorsByStake: problem retrieving top validators: ") + } + + // For each PKID, retrieve the ValidatorEntry by PKID. + for _, validatorPKIDBytes := range validatorPKIDsBytes { + // Convert PKIDBytes to PKID. + validatorPKID := &PKID{} + exists, err := DecodeFromBytes(validatorPKID, bytes.NewReader(validatorPKIDBytes)) + if !exists || err != nil { + return nil, errors.Wrapf(err, "DBGetTopValidatorsByStake: problem reading ValidatorPKID: ") + } + // Retrieve ValidatorEntry by PKID. + validatorEntry, err := DBGetValidatorByPKID(handle, snap, validatorPKID) + if err != nil { + return nil, errors.Wrapf(err, "DBGetTopValidatorsByStake: problem retrieving validator by PKID: ") + } + validatorEntries = append(validatorEntries, validatorEntry) + } + + return validatorEntries, nil +} + +func DBGetGlobalStakeAmountNanos(handle *badger.DB, snap *Snapshot) (*uint256.Int, error) { + var ret *uint256.Int + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetGlobalStakeAmountNanosWithTxn(txn, snap) + return innerErr + }) + return ret, err +} + +func DBGetGlobalStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot) (*uint256.Int, error) { + // Retrieve from db. + key := DBKeyForGlobalStakeAmountNanos() + globalStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return 0. + if err == badger.ErrKeyNotFound { + return uint256.NewInt(), nil + } + return nil, errors.Wrapf(err, "DBGetGlobalStakeAmountNanosWithTxn: problem retrieving value") + } + + // Decode from bytes. + var globalStakeAmountNanos *uint256.Int + rr := bytes.NewReader(globalStakeAmountNanosBytes) + globalStakeAmountNanos, err = DecodeUint256(rr) + if err != nil { + return nil, errors.Wrapf(err, "DBGetGlobalStakeAmountNanosWithTxn: problem decoding value") + } + return globalStakeAmountNanos, nil +} + +func DBPutValidatorWithTxn( + txn *badger.Txn, + snap *Snapshot, + validatorEntry *ValidatorEntry, + blockHeight uint64, +) error { + if validatorEntry == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBPutValidatorWithTxn: called with nil ValidatorEntry") + return nil + } + + // Set ValidatorEntry in PrefixValidatorByPKID. + key := DBKeyForValidatorByPKID(validatorEntry) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry)); err != nil { + return errors.Wrapf( + err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByPKID", + ) + } + + // Set ValidatorEntry.PKID in PrefixValidatorByStake. + key = DBKeyForValidatorByStake(validatorEntry) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID)); err != nil { + return errors.Wrapf( + err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStake", + ) + } + + return nil +} + +func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorEntry *ValidatorEntry) error { + if validatorEntry == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBDeleteValidatorWithTxn: called with nil ValidatorEntry") + return nil + } + + // Delete ValidatorEntry from PrefixValidatorByPKID. + key := DBKeyForValidatorByPKID(validatorEntry) + if err := DBDeleteWithTxn(txn, snap, key); err != nil { + return errors.Wrapf( + err, "DBDeleteValidatorWithTxn: problem deleting ValidatorEntry from index PrefixValidatorByPKID", + ) + } + + // Delete ValidatorEntry.PKID from PrefixValidatorByStake. + key = DBKeyForValidatorByStake(validatorEntry) + if err := DBDeleteWithTxn(txn, snap, key); err != nil { + return errors.Wrapf( + err, "DBDeleteValidatorWithTxn: problem deleting ValidatorEntry from index PrefixValidatorByStake", + ) + } + + return nil +} + +func DBPutGlobalStakeAmountNanosWithTxn( + txn *badger.Txn, + snap *Snapshot, + globalStakeAmountNanos *uint256.Int, + blockHeight uint64, +) error { + if globalStakeAmountNanos == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBPutGlobalStakeAmountNanosWithTxn: called with nil GlobalStakeAmountNanos") + return nil + } + + key := DBKeyForGlobalStakeAmountNanos() + return DBSetWithTxn(txn, snap, key, EncodeUint256(globalStakeAmountNanos)) +} + +// +// BLOCKCHAIN UTILS +// + +func (bc *Blockchain) CreateRegisterAsValidatorTxn( + transactorPublicKey []byte, + metadata *RegisterAsValidatorMetadata, + extraData map[string][]byte, + minFeeRateNanosPerKB uint64, + mempool *DeSoMempool, + additionalOutputs []*DeSoOutput, +) ( + _txn *MsgDeSoTxn, + _totalInput uint64, + _changeAmount uint64, + _fees uint64, + _err error, +) { + // Create a txn containing the RegisterAsValidator fields. + txn := &MsgDeSoTxn{ + PublicKey: transactorPublicKey, + TxnMeta: metadata, + TxOutputs: additionalOutputs, + ExtraData: extraData, + // We wait to compute the signature until + // we've added all the inputs and change. + } + + // Create a new UtxoView. If we have access to a mempool object, use + // it to get an augmented view that factors in pending transactions. + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + if err != nil { + return nil, 0, 0, 0, errors.Wrap( + err, "Blockchain.CreateRegisterAsValidatorTxn: problem creating new utxo view: ", + ) + } + if mempool != nil { + utxoView, err = mempool.GetAugmentedUniversalView() + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateRegisterAsValidatorTxn: problem getting augmented utxo view from mempool: ", + ) + } + } + + // Validate txn metadata. + if err = utxoView.IsValidRegisterAsValidatorMetadata(transactorPublicKey, metadata); err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateRegisterAsValidatorTxn: invalid txn metadata: ", + ) + } + + // We don't need to make any tweaks to the amount because + // it's basically a standard "pay per kilobyte" transaction. + totalInput, spendAmount, changeAmount, fees, err := bc.AddInputsAndChangeToTransaction( + txn, minFeeRateNanosPerKB, mempool, + ) + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateRegisterAsValidatorTxn: problem adding inputs: ", + ) + } + + // Validate that the transaction has at least one input, even if it all goes + // to change. This ensures that the transaction will not be "replayable." + if len(txn.TxInputs) == 0 && bc.blockTip().Height+1 < bc.params.ForkHeights.BalanceModelBlockHeight { + return nil, 0, 0, 0, errors.New( + "Blockchain.CreateRegisterAsValidatorTxn: txn has zero inputs, try increasing the fee rate", + ) + } + + // Sanity-check that the spendAmount is zero. + if spendAmount != 0 { + return nil, 0, 0, 0, fmt.Errorf( + "Blockchain.CreateRegisterAsValidatorTxn: spend amount is non-zero: %d", spendAmount, + ) + } + return txn, totalInput, changeAmount, fees, nil +} + +func (bc *Blockchain) CreateUnregisterAsValidatorTxn( + transactorPublicKey []byte, + metadata *UnregisterAsValidatorMetadata, + extraData map[string][]byte, + minFeeRateNanosPerKB uint64, + mempool *DeSoMempool, + additionalOutputs []*DeSoOutput, +) ( + _txn *MsgDeSoTxn, + _totalInput uint64, + _changeAmount uint64, + _fees uint64, + _err error, +) { + // Create a txn containing the UnregisterAsValidator fields. + txn := &MsgDeSoTxn{ + PublicKey: transactorPublicKey, + TxnMeta: metadata, + TxOutputs: additionalOutputs, + ExtraData: extraData, + // We wait to compute the signature until + // we've added all the inputs and change. + } + + // Create a new UtxoView. If we have access to a mempool object, use + // it to get an augmented view that factors in pending transactions. + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + if err != nil { + return nil, 0, 0, 0, errors.Wrap( + err, "Blockchain.CreateUnregisterAsValidatorTxn: problem creating new utxo view: ", + ) + } + if mempool != nil { + utxoView, err = mempool.GetAugmentedUniversalView() + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnregisterAsValidatorTxn: problem getting augmented utxo view from mempool: ", + ) + } + } + + // Validate txn metadata. + if err = utxoView.IsValidUnregisterAsValidatorMetadata(transactorPublicKey, metadata); err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnregisterAsValidatorTxn: invalid txn metadata: ", + ) + } + + // We don't need to make any tweaks to the amount because + // it's basically a standard "pay per kilobyte" transaction. + totalInput, spendAmount, changeAmount, fees, err := bc.AddInputsAndChangeToTransaction( + txn, minFeeRateNanosPerKB, mempool, + ) + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnregisterAsValidatorTxn: problem adding inputs: ", + ) + } + + // Validate that the transaction has at least one input, even if it all goes + // to change. This ensures that the transaction will not be "replayable." + if len(txn.TxInputs) == 0 && bc.blockTip().Height+1 < bc.params.ForkHeights.BalanceModelBlockHeight { + return nil, 0, 0, 0, errors.New( + "Blockchain.CreateUnregisterAsValidatorTxn: txn has zero inputs, try increasing the fee rate", + ) + } + + // Sanity-check that the spendAmount is zero. + if spendAmount != 0 { + return nil, 0, 0, 0, fmt.Errorf( + "Blockchain.CreateUnregisterAsValidatorTxn: spend amount is non-zero: %d", spendAmount, + ) + } + return txn, totalInput, changeAmount, fees, nil +} + +// +// UTXO VIEW UTILS +// + +func (bav *UtxoView) _connectRegisterAsValidator( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + verifySignatures bool, +) ( + _totalInput uint64, + _totalOutput uint64, + _utxoOps []*UtxoOperation, + _err error, +) { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + return 0, 0, nil, RuleErrorProofofStakeTxnBeforeBlockHeight + } + + // Validate the txn TxnType. + if txn.TxnMeta.GetTxnType() != TxnTypeRegisterAsValidator { + return 0, 0, nil, fmt.Errorf( + "_connectRegisterAsValidator: called with bad TxnType %s", txn.TxnMeta.GetTxnType().String(), + ) + } + + // Connect a basic transfer to get the total input and the + // total output without considering the txn metadata. + totalInput, totalOutput, utxoOpsForTxn, err := bav._connectBasicTransfer( + txn, txHash, blockHeight, verifySignatures, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectRegisterAsValidator: ") + } + if verifySignatures { + // _connectBasicTransfer has already checked that the txn is signed + // by the top-level public key, which we take to be the sender's + // public key so there is no need to verify anything further. + } + + // Grab the txn metadata. + txMeta := txn.TxnMeta.(*RegisterAsValidatorMetadata) + + // Validate the txn metadata. + if err = bav.IsValidRegisterAsValidatorMetadata(txn.PublicKey, txMeta); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectRegisterAsValidator: ") + } + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return 0, 0, nil, RuleErrorInvalidValidatorPKID + } + + // Check if there is an existing ValidatorEntry that will be overwritten. + // The existing ValidatorEntry will be restored if we disconnect this transaction. + prevValidatorEntry, err := bav.GetValidatorByPKID(transactorPKIDEntry.PKID) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectRegisterAsValidator: ") + } + // Delete the existing ValidatorEntry, if exists. There will be an existing ValidatorEntry + // if the transactor is updating their ValidatorEntry. There will not be, if the transactor + // is registering a ValidatorEntry for the first time (or it was previously unregistered). + // Note that we don't need to check isDeleted because the Get returns nil if isDeleted=true. + if prevValidatorEntry != nil { + bav._deleteValidatorEntryMappings(prevValidatorEntry) + } + + // Set ValidatorID only if this is a new ValidatorEntry. + validatorID := txHash.NewBlockHash() + if prevValidatorEntry != nil { + validatorID = prevValidatorEntry.ValidatorID.NewBlockHash() + } + + // Calculate TotalStakeAmountNanos. + totalStakeAmountNanos := uint256.NewInt() + if prevValidatorEntry != nil { + totalStakeAmountNanos = prevValidatorEntry.TotalStakeAmountNanos.Clone() + } + + // TODO: In subsequent PR, unstake delegated stakers if updating DisableDelegatedStake=true. + // We will also need to update the TotalStakeAmountNanos and the GlobalStakeAmountNanos. + if prevValidatorEntry != nil && + !prevValidatorEntry.DisableDelegatedStake && // Validator previously allowed delegated stake. + txMeta.DisableDelegatedStake { // Validator no longer allows delegated stake. + } + + // Set RegisteredAtBlockHeight only if this is a new ValidatorEntry. + registeredAtBlockHeight := uint64(blockHeight) + if prevValidatorEntry != nil { + registeredAtBlockHeight = prevValidatorEntry.RegisteredAtBlockHeight + } + + // Retrieve existing ExtraData to merge with any new ExtraData. + var prevExtraData map[string][]byte + if prevValidatorEntry != nil { + prevExtraData = prevValidatorEntry.ExtraData + } + + // Construct new ValidatorEntry from metadata. + currentValidatorEntry := &ValidatorEntry{ + ValidatorID: validatorID, + ValidatorPKID: transactorPKIDEntry.PKID, + // Note: if someone is updating their ValidatorEntry, they need to include + // all domains. The Domains field is not appended to. It is overwritten. + Domains: txMeta.Domains, + DisableDelegatedStake: txMeta.DisableDelegatedStake, + VotingPublicKey: txMeta.VotingPublicKey, + VotingPublicKeySignature: txMeta.VotingPublicKeySignature, + VotingSignatureBlockHeight: txMeta.VotingSignatureBlockHeight, + TotalStakeAmountNanos: totalStakeAmountNanos, + RegisteredAtBlockHeight: registeredAtBlockHeight, + ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), + } + // Set the ValidatorEntry. + bav._setValidatorEntryMappings(currentValidatorEntry) + + // Add a UTXO operation + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeRegisterAsValidator, + PrevValidatorEntry: prevValidatorEntry, + // PrevStakeEntries: prevStakeEntries, // TODO: in subsequent PR + }) + return totalInput, totalOutput, utxoOpsForTxn, nil +} + +func (bav *UtxoView) _disconnectRegisterAsValidator( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32, +) error { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectRegisterAsValidator: ") + } + + // Validate the last operation is a RegisterAsValidator operation. + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectRegisterAsValidator: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + operationData := utxoOpsForTxn[operationIndex] + if operationData.Type != OperationTypeRegisterAsValidator { + return fmt.Errorf( + "_disconnectRegisterAsValidator: trying to revert %v but found %v", + OperationTypeRegisterAsValidator, + operationData.Type, + ) + } + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(currentTxn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return RuleErrorInvalidValidatorPKID + } + + // Delete the current ValidatorEntry. + currentValidatorEntry, err := bav.GetValidatorByPKID(transactorPKIDEntry.PKID) + if err != nil { + return errors.Wrapf(err, "_disconnectRegisterAsValidator: ") + } + // Note that we don't need to check isDeleted because the Get returns nil if isDeleted=true. + if currentValidatorEntry == nil { + return fmt.Errorf( + "_disconnectRegisterAsValidator: no ValidatorEntry found for %v", transactorPKIDEntry.PKID, + ) + } + bav._deleteValidatorEntryMappings(currentValidatorEntry) + + // Restore the PrevValidatorEntry, if exists. The PrevValidatorEntry won't exist if this was + // the first time this ValidatorEntry was created. The PrevValidatorEntry will exist if this + // was an update operation on an existing ValidatorEntry. + prevValidatorEntry := operationData.PrevValidatorEntry + if prevValidatorEntry != nil { + bav._setValidatorEntryMappings(prevValidatorEntry) + } + + // TODO: In subsequent PR, if PrevStakeEntries, delete the + // current StakeEntries and restore the prev StakeEntries. + // This should also update GlobalStakeAmountNanos. + + // Disconnect the BasicTransfer. + return bav._disconnectBasicTransfer( + currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, + ) +} + +func (bav *UtxoView) _connectUnregisterAsValidator( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + verifySignatures bool, +) ( + _totalInput uint64, + _totalOutput uint64, + _utxoOps []*UtxoOperation, + _err error, +) { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + return 0, 0, nil, RuleErrorProofofStakeTxnBeforeBlockHeight + } + + // Validate the txn TxnType. + if txn.TxnMeta.GetTxnType() != TxnTypeUnregisterAsValidator { + return 0, 0, nil, fmt.Errorf( + "_connectUnregisterAsValidator: called with bad TxnType %s", txn.TxnMeta.GetTxnType().String(), + ) + } + + // Connect a basic transfer to get the total input and the + // total output without considering the txn metadata. + totalInput, totalOutput, utxoOpsForTxn, err := bav._connectBasicTransfer( + txn, txHash, blockHeight, verifySignatures, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") + } + if verifySignatures { + // _connectBasicTransfer has already checked that the txn is signed + // by the top-level public key, which we take to be the sender's + // public key so there is no need to verify anything further. + } + + // Grab the txn metadata. + txMeta := txn.TxnMeta.(*UnregisterAsValidatorMetadata) + + // Validate the txn metadata. + if err = bav.IsValidUnregisterAsValidatorMetadata(txn.PublicKey, txMeta); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") + } + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return 0, 0, nil, RuleErrorInvalidValidatorPKID + } + + // TODO: In subsequent PR, unstake all StakeEntries for this validator. + // This should also update GlobalStakeAmountNanos. + + // Delete the existing ValidatorEntry. + prevValidatorEntry, err := bav.GetValidatorByPKID(transactorPKIDEntry.PKID) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") + } + // Note that we don't need to check isDeleted because the Get returns nil if isDeleted=true. + if prevValidatorEntry == nil { + return 0, 0, nil, RuleErrorValidatorNotFound + } + bav._deleteValidatorEntryMappings(prevValidatorEntry) + + // Add a UTXO operation. + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeUnregisterAsValidator, + PrevValidatorEntry: prevValidatorEntry, + // PrevStakeEntries: prevStakeEntries, // TODO: in subsequent PR + }) + return totalInput, totalOutput, utxoOpsForTxn, nil +} + +func (bav *UtxoView) _disconnectUnregisterAsValidator( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32, +) error { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectUnregisterAsValidator: ") + } + + // Validate the last operation is an UnregisterAsValidator operation. + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectUnregisterAsValidator: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + operationData := utxoOpsForTxn[operationIndex] + if operationData.Type != OperationTypeUnregisterAsValidator { + return fmt.Errorf( + "_disconnectUnregisterAsValidator: trying to revert %v but found %v", + OperationTypeUnregisterAsValidator, + operationData.Type, + ) + } + + // Restore the PrevValidatorEntry. + prevValidatorEntry := operationData.PrevValidatorEntry + if prevValidatorEntry == nil { + // This should never happen as you can only unregister an existing ValidatorEntry + // when connecting. So disconnecting should always have a PrevValidatorEntry. + return fmt.Errorf( + "_disconnectUnregisterAsValidator: no deleted ValidatorEntry found for %v", currentTxn.PublicKey, + ) + } + bav._setValidatorEntryMappings(prevValidatorEntry) + + // TODO: In subsequent PR, restore the prev StakeEntries, if any. + + // Disconnect the BasicTransfer. + return bav._disconnectBasicTransfer( + currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, + ) +} + +func (bav *UtxoView) IsValidRegisterAsValidatorMetadata(transactorPublicKey []byte, metadata *RegisterAsValidatorMetadata) error { + // Validate ValidatorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return RuleErrorInvalidValidatorPKID + } + + // Validate Domains. + if len(metadata.Domains) < 1 { + return RuleErrorValidatorNoDomains + } + if len(metadata.Domains) > MaxValidatorNumDomains { + return RuleErrorValidatorTooManyDomains + } + var domainStrings []string + for _, domain := range metadata.Domains { + _, err := url.ParseRequestURI(string(domain)) + if err != nil { + return fmt.Errorf("%s: %v", RuleErrorValidatorInvalidDomain, domain) + } + domainStrings = append(domainStrings, string(domain)) + } + if len(NewSet(domainStrings).ToSlice()) != len(domainStrings) { + return RuleErrorValidatorDuplicateDomains + } + + // TODO: In subsequent PR, validate VotingPublicKey, VotingPublicKeySignature, and VotingSignatureBlockHeight. + return nil +} + +func (bav *UtxoView) IsValidUnregisterAsValidatorMetadata(transactorPublicKey []byte, metadata *UnregisterAsValidatorMetadata) error { + // Validate ValidatorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return RuleErrorInvalidValidatorPKID + } + + // Validate ValidatorEntry exists. + validatorEntry, err := bav.GetValidatorByPKID(transactorPKIDEntry.PKID) + if err != nil { + return errors.Wrapf(err, "IsValidUnregisterAsValidatorMetadata: ") + } + if validatorEntry == nil { + return RuleErrorValidatorNotFound + } + + return nil +} + +func (bav *UtxoView) GetValidatorByPKID(pkid *PKID) (*ValidatorEntry, error) { + // First check the UtxoView. + + // There can be multiple ValidatorEntries for a given PKID in the UtxoView since the ValidatorMapKey + // contains ValidatorPKID, TotalStakeAmountNanos, and RegisteredAtBlockHeight. We need to loop through + // all the ValidatorEntries and find the one matching the given PKID that is !isDeleted. There should + // ever only be zero or one such matching ValidatorEntries. If the only matching ValidatorEntries are + // all isDeleted then we shouldn't check the database as the corresponding rows in the database will + // be deleted once the UtxoView is flushed. + isDeleted := false + + for _, validatorEntry := range bav.ValidatorMapKeyToValidatorEntry { + if validatorEntry == nil { + // This should never happen but is a sanity check. + continue + } + if !validatorEntry.ValidatorPKID.Eq(pkid) { + continue + } + if validatorEntry.isDeleted { + isDeleted = true + continue + } + // If we get to this point, we found a matching + // !isDeleted ValidatorEntry for the given PKID. + return validatorEntry, nil + } + + if isDeleted { + // If we get to this point, we found one or more matching ValidatorEntries + // for the given PKID, but they were all isDeleted. We do not want to check + // the database but instead just return nil, no ValidatorEntry found. + return nil, nil + } + + // If no ValidatorEntry (either isDeleted or !isDeleted) was found + // in the UtxoView for the given PKID, check the database. + dbValidatorEntry, err := DBGetValidatorByPKID(bav.Handle, bav.Snapshot, pkid) + if err != nil { + return nil, err + } + // Cache the ValidatorEntry from the db in the UtxoView. + bav._setValidatorEntryMappings(dbValidatorEntry) + return dbValidatorEntry, nil +} + +func (bav *UtxoView) GetTopValidatorsByStake(limit int) ([]*ValidatorEntry, error) { + // Validate limit param. + if limit <= 0 { + return []*ValidatorEntry{}, nil + } + // Create a slice of UtxoViewValidatorEntries. We want to skip pulling these from the database in + // case they have been updated in the UtxoView and the changes have not yet flushed to the database. + // Updates to a ValidatorEntry could include adding/removing stake or being deleted which would + // impact our ordering. We pull N ValidatorEntries not present in the UtxoView from the database + // then sort the UtxoViewValidatorEntries and DatabaseValidatorEntries together to find the top N + // ValidatorEntries by stake across both the UtxoView and database. + var utxoViewValidatorEntries []*ValidatorEntry + for _, validatorEntry := range bav.ValidatorMapKeyToValidatorEntry { + utxoViewValidatorEntries = append(utxoViewValidatorEntries, validatorEntry) + } + // Pull top N ValidatorEntries from the database (not present in the UtxoView). + validatorEntries, err := DBGetTopValidatorsByStake(bav.Handle, bav.Snapshot, limit, utxoViewValidatorEntries) + if err != nil { + return nil, errors.Wrapf(err, "GetTopValidatorsByStake: error retrieving entries from db: ") + } + // Add !isDeleted ValidatorEntries from the UtxoView to the ValidatorEntries from the db. + for _, validatorEntry := range utxoViewValidatorEntries { + if !validatorEntry.isDeleted { + validatorEntries = append(validatorEntries, validatorEntry) + } + } + // Sort the ValidatorEntries DESC by TotalStakeAmountNanos. + sort.Slice(validatorEntries, func(ii, jj int) bool { + return validatorEntries[ii].TotalStakeAmountNanos.Cmp(validatorEntries[jj].TotalStakeAmountNanos) > 0 + }) + // Return top N. + upperBound := int(math.Min(float64(limit), float64(len(validatorEntries)))) + return validatorEntries[0:upperBound], nil +} + +func (bav *UtxoView) GetGlobalStakeAmountNanos() (*uint256.Int, error) { + var globalStakeAmountNanos *uint256.Int + var err error + // Read the GlobalStakeAmountNanos from the UtxoView. + if bav.GlobalStakeAmountNanos != nil { + globalStakeAmountNanos = bav.GlobalStakeAmountNanos.Clone() + } + // If not set, read the GlobalStakeAmountNanos from the db. + if globalStakeAmountNanos == nil || globalStakeAmountNanos.IsZero() { + globalStakeAmountNanos, err = DBGetGlobalStakeAmountNanos(bav.Handle, bav.Snapshot) + if err != nil { + return nil, err + } + if globalStakeAmountNanos == nil { + globalStakeAmountNanos = uint256.NewInt() + } + // Cache the GlobaleStakeAmountNanos from the db in the UtxoView. + bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) + } + return globalStakeAmountNanos, nil +} + +func (bav *UtxoView) _setValidatorEntryMappings(validatorEntry *ValidatorEntry) { + // This function shouldn't be called with nil. + if validatorEntry == nil { + glog.Errorf("_setValidatorEntryMappings: called with nil entry, this should never happen") + return + } + bav.ValidatorMapKeyToValidatorEntry[validatorEntry.ToMapKey()] = validatorEntry +} + +func (bav *UtxoView) _deleteValidatorEntryMappings(validatorEntry *ValidatorEntry) { + // This function shouldn't be called with nil. + if validatorEntry == nil { + glog.Errorf("_deleteValidatorEntryMappings: called with nil entry, this should never happen") + return + } + // Create a tombstone entry. + tombstoneEntry := *validatorEntry + tombstoneEntry.isDeleted = true + // Set the mappings to the point to the tombstone entry. + bav._setValidatorEntryMappings(&tombstoneEntry) +} + +func (bav *UtxoView) _setGlobalStakeAmountNanos(globalStakeAmountNanos *uint256.Int) { + // This function shouldn't be called with nil. + if globalStakeAmountNanos == nil { + glog.Errorf("_setGlobalStakeAmountNanos: called with nil entry, this should never happen") + return + } + bav.GlobalStakeAmountNanos = globalStakeAmountNanos.Clone() +} + +func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Delete all entries in the ValidatorMapKeyToValidatorEntry UtxoView map. + for validatorMapKeyIter, validatorEntryIter := range bav.ValidatorMapKeyToValidatorEntry { + // Make a copy of the iterators since we make references to them below. + validatorMapKey := validatorMapKeyIter + validatorEntry := *validatorEntryIter + + // Sanity-check that the entry matches the map key. + validatorMapKeyInEntry := validatorEntry.ToMapKey() + if validatorMapKeyInEntry != validatorMapKey { + return fmt.Errorf( + "_flushValidatorEntriesToDbWithTxn: ValidatorEnry key %v doesn't match MapKey %v", + &validatorMapKeyInEntry, + &validatorMapKey, + ) + } + + // Delete the existing mappings in the db for this ValidatorMapKey. They + // will be re-added if the corresponding entry in memory has isDeleted=false. + if err := DBDeleteValidatorWithTxn(txn, bav.Snapshot, &validatorEntry); err != nil { + return errors.Wrapf(err, "_flushValidatorEntriesToDbWithTxn: ") + } + } + + // Set any !isDeleted ValidatorEntries in the ValidatorMapKeyToValidatorEntry UtxoView map. + for _, validatorEntryIter := range bav.ValidatorMapKeyToValidatorEntry { + validatorEntry := *validatorEntryIter + if validatorEntry.isDeleted { + // If ValidatorEntry.isDeleted then there's nothing to + // do because we already deleted the entry above. + } else { + // If !ValidatorEntry.isDeleted then we put the + // corresponding mappings for it into the db. + if err := DBPutValidatorWithTxn(txn, bav.Snapshot, &validatorEntry, blockHeight); err != nil { + return errors.Wrapf(err, "_flushValidatorEntriesToDbWithTxn: ") + } + } + } + + return nil +} + +func (bav *UtxoView) _flushGlobalStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + return DBPutGlobalStakeAmountNanosWithTxn(txn, bav.Snapshot, bav.GlobalStakeAmountNanos, blockHeight) +} + +// +// MEMPOOL UTILS +// + +func (bav *UtxoView) CreateRegisterAsValidatorTxindexMetadata( + utxoOp *UtxoOperation, + txn *MsgDeSoTxn, +) ( + *RegisterAsValidatorTxindexMetadata, + []*AffectedPublicKey, +) { + metadata := txn.TxnMeta.(*RegisterAsValidatorMetadata) + + // Cast ValidatorPublicKey to ValidatorPublicKeyBase58Check. + validatorPublicKeyBase58Check := PkToString(txn.PublicKey, bav.Params) + + // Cast domains from []byte to string. + var domains []string + for _, domain := range metadata.Domains { + domains = append(domains, string(domain)) + } + + // TODO: In subsequent PR, pull UnstakedStakers from PrevStakeEntries on UtxoOperation. + var unstakedStakers []*UnstakedStakerTxindexMetadata + + // Construct TxindexMetadata. + txindexMetadata := &RegisterAsValidatorTxindexMetadata{ + ValidatorPublicKeyBase58Check: validatorPublicKeyBase58Check, + Domains: domains, + DisableDelegatedStake: metadata.DisableDelegatedStake, + // TODO: In a subsequent PR, update to convert BLS public keys and signatures to strings. + VotingPublicKey: string(metadata.VotingPublicKey), + VotingPublicKeySignature: string(metadata.VotingPublicKeySignature), + VotingSignatureBlockHeight: metadata.VotingSignatureBlockHeight, + UnstakedStakers: unstakedStakers, + } + + // Construct AffectedPublicKeys. + affectedPublicKeys := []*AffectedPublicKey{ + { + PublicKeyBase58Check: validatorPublicKeyBase58Check, + Metadata: "RegisteredValidatorPublicKeyBase58Check", + }, + } + for _, unstakedStaker := range unstakedStakers { + affectedPublicKeys = append(affectedPublicKeys, &AffectedPublicKey{ + PublicKeyBase58Check: unstakedStaker.StakerPublicKeyBase58Check, + Metadata: "UnstakedStakerPublicKeyBase58Check", + }) + } + + return txindexMetadata, affectedPublicKeys +} + +func (bav *UtxoView) CreateUnregisterAsValidatorTxindexMetadata( + utxoOp *UtxoOperation, + txn *MsgDeSoTxn, +) ( + *UnregisterAsValidatorTxindexMetadata, + []*AffectedPublicKey, +) { + // Cast ValidatorPublicKey to ValidatorPublicKeyBase58Check. + validatorPublicKeyBase58Check := PkToString(txn.PublicKey, bav.Params) + + // TODO: In subsequent PR, pull UnstakedStakers from PrevStakeEntries on UtxoOperation. + var unstakedStakers []*UnstakedStakerTxindexMetadata + + // Construct TxindexMetadata. + txindexMetadata := &UnregisterAsValidatorTxindexMetadata{ + ValidatorPublicKeyBase58Check: validatorPublicKeyBase58Check, + UnstakedStakers: unstakedStakers, + } + + // Construct AffectedPublicKeys. + affectedPublicKeys := []*AffectedPublicKey{ + { + PublicKeyBase58Check: validatorPublicKeyBase58Check, + Metadata: "UnregisteredValidatorPublicKeyBase58Check", + }, + } + for _, unstakedStaker := range unstakedStakers { + affectedPublicKeys = append(affectedPublicKeys, &AffectedPublicKey{ + PublicKeyBase58Check: unstakedStaker.StakerPublicKeyBase58Check, + Metadata: "UnstakedStakerPublicKeyBase58Check", + }) + } + + return txindexMetadata, affectedPublicKeys +} + +// +// CONSTANTS +// + +const RuleErrorProofofStakeTxnBeforeBlockHeight RuleError = "RuleErrorProofOfStakeTxnBeforeBlockHeight" +const RuleErrorInvalidValidatorPKID RuleError = "RuleErrorInvalidValidatorPKID" +const RuleErrorValidatorNoDomains RuleError = "RuleErrorValidatorNoDomains" +const RuleErrorValidatorTooManyDomains RuleError = "RuleErrorValidatorTooManyDomains" +const RuleErrorValidatorInvalidDomain RuleError = "RuleErrorValidatorInvalidDomain" +const RuleErrorValidatorDuplicateDomains RuleError = "RuleErrorValidatorDuplicateDomains" +const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" + +const MaxValidatorNumDomains int = 12 diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go new file mode 100644 index 000000000..be1fc3909 --- /dev/null +++ b/lib/block_view_validator_test.go @@ -0,0 +1,619 @@ +package lib + +import ( + "errors" + "fmt" + "github.com/btcsuite/btcd/btcec" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + "math" + "testing" +) + +func TestValidatorRegistration(t *testing.T) { + _testValidatorRegistration(t, false) + _testValidatorRegistration(t, true) + _testValidatorRegistrationWithDerivedKey(t) +} + +func _testValidatorRegistration(t *testing.T, flushToDB bool) { + // Local variables + var registerMetadata *RegisterAsValidatorMetadata + var validatorEntry *ValidatorEntry + var validatorEntries []*ValidatorEntry + var globalStakeAmountNanos *uint256.Int + var err error + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize fork heights. + params.ForkHeights.BalanceModelBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + utxoView := func() *UtxoView { + newUtxoView, err := mempool.GetAugmentedUniversalView() + require.NoError(t, err) + return newUtxoView + } + _ = utxoView + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height + 1) + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID + m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID + m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID + _, _, _, _, _ = m0PKID, m1PKID, m2PKID, m3PKID, m4PKID + + { + // Param Updater set min fee rate to 101 nanos per KB + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // RuleErrorProofOfStakeTxnBeforeBlockHeight + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = math.MaxUint32 + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) + + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + } + { + // RuleErrorValidatorNoDomains + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{}, + DisableDelegatedStake: false, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorNoDomains) + } + { + // RuleErrorValidatorTooManyDomains + var domains [][]byte + for ii := 0; ii <= MaxValidatorNumDomains+1; ii++ { + domains = append(domains, []byte(fmt.Sprintf("https://example.com/%d", ii))) + } + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: domains, + DisableDelegatedStake: false, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorTooManyDomains) + } + { + // RuleErrorValidatorInvalidDomain + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("InvalidURL")}, + DisableDelegatedStake: false, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidDomain) + } + { + // RuleErrorValidatorDuplicateDomains + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com"), []byte("https://example.com")}, + DisableDelegatedStake: false, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorDuplicateDomains) + } + { + // Happy path: register a validator + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + } + extraData := map[string][]byte{"TestKey": []byte("TestValue1")} + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB, + ) + require.NoError(t, err) + } + { + // Query: retrieve ValidatorEntry by PKID + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.ValidatorPKID, m0PKID) + require.Len(t, validatorEntry.Domains, 1) + require.Equal(t, string(validatorEntry.Domains[0]), "https://example.com") + require.False(t, validatorEntry.DisableDelegatedStake) + require.Equal(t, string(validatorEntry.ExtraData["TestKey"]), "TestValue1") + } + { + // Query: retrieve top ValidatorEntries by stake + validatorEntries, err = utxoView().GetTopValidatorsByStake(0) + require.NoError(t, err) + require.Empty(t, validatorEntries) + + validatorEntries, err = utxoView().GetTopValidatorsByStake(2) + require.NoError(t, err) + require.Len(t, validatorEntries, 1) + require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) + } + { + // Query: retrieve GlobalStakeAmountNanos + globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + } + { + // Happy path: update a validator + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, + DisableDelegatedStake: false, + } + extraData := map[string][]byte{"TestKey": []byte("TestValue2")} + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB, + ) + require.NoError(t, err) + } + { + // Query: retrieve ValidatorEntry by PKID, make sure it has been updated + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.ValidatorPKID, m0PKID) + require.Len(t, validatorEntry.Domains, 2) + require.Equal(t, string(validatorEntry.Domains[0]), "https://example1.com") + require.Equal(t, string(validatorEntry.Domains[1]), "https://example2.com") + require.False(t, validatorEntry.DisableDelegatedStake) + require.Equal(t, string(validatorEntry.ExtraData["TestKey"]), "TestValue2") + } + { + // Sad path: unregister validator that doesn't exist + _, _, _, err = _submitUnregisterAsValidatorTxn(testMeta, m1Pub, m1Priv, flushToDB) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorNotFound) + } + { + // Happy path: unregister validator + _, _, _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) + require.NoError(t, err) + } + { + // Sad path: unregister validator that doesn't exist + _, _, _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorNotFound) + } + { + // Query: retrieve ValidatorEntry by PKID + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Nil(t, validatorEntry) + } + { + // Query: retrieve top ValidatorEntries by stake + validatorEntries, err = utxoView().GetTopValidatorsByStake(1) + require.NoError(t, err) + require.Empty(t, validatorEntries) + } + { + // Query: retrieve GlobalStakeAmountNanos + globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + } + + // Flush mempool to the db and test rollbacks. + require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) + _executeAllTestRollbackAndFlush(testMeta) +} + +func _submitRegisterAsValidatorTxn( + testMeta *TestMeta, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + metadata *RegisterAsValidatorMetadata, + extraData map[string][]byte, + flushToDB bool, +) (_utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { + // Record transactor's prevBalance. + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) + + // Convert PublicKeyBase58Check to PkBytes. + updaterPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) + require.NoError(testMeta.t, err) + + // Create the transaction. + txn, totalInputMake, changeAmountMake, feesMake, err := testMeta.chain.CreateRegisterAsValidatorTxn( + updaterPkBytes, + metadata, + extraData, + testMeta.feeRateNanosPerKb, + testMeta.mempool, + []*DeSoOutput{}, + ) + if err != nil { + return nil, nil, 0, err + } + require.Equal(testMeta.t, totalInputMake, changeAmountMake+feesMake) + + // Sign the transaction now that its inputs are set up. + _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) + + // Connect the transaction. + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( + txn, + txn.Hash(), + getTxnSize(*txn), + testMeta.savedHeight, + true, + false, + ) + if err != nil { + return nil, nil, 0, err + } + require.Equal(testMeta.t, totalInput, totalOutput+fees) + require.Equal(testMeta.t, totalInput, totalInputMake) + require.Equal(testMeta.t, OperationTypeRegisterAsValidator, utxoOps[len(utxoOps)-1].Type) + if flushToDB { + require.NoError(testMeta.t, testMeta.mempool.universalUtxoView.FlushToDb(uint64(testMeta.savedHeight))) + } + require.NoError(testMeta.t, testMeta.mempool.RegenerateReadOnlyView()) + + // Record the txn. + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + return utxoOps, txn, testMeta.savedHeight, nil +} + +func _submitUnregisterAsValidatorTxn( + testMeta *TestMeta, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + flushToDB bool, +) (_utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { + // Record transactor's prevBalance. + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) + + // Convert PublicKeyBase58Check to PkBytes. + updaterPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) + require.NoError(testMeta.t, err) + + // Create the transaction. + txn, totalInputMake, changeAmountMake, feesMake, err := testMeta.chain.CreateUnregisterAsValidatorTxn( + updaterPkBytes, + &UnregisterAsValidatorMetadata{}, + nil, + testMeta.feeRateNanosPerKb, + testMeta.mempool, + []*DeSoOutput{}, + ) + if err != nil { + return nil, nil, 0, err + } + require.Equal(testMeta.t, totalInputMake, changeAmountMake+feesMake) + + // Sign the transaction now that its inputs are set up. + _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) + + // Connect the transaction. + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( + txn, + txn.Hash(), + getTxnSize(*txn), + testMeta.savedHeight, + true, + false, + ) + if err != nil { + return nil, nil, 0, err + } + require.Equal(testMeta.t, totalInput, totalOutput+fees) + require.Equal(testMeta.t, totalInput, totalInputMake) + require.Equal(testMeta.t, OperationTypeUnregisterAsValidator, utxoOps[len(utxoOps)-1].Type) + if flushToDB { + require.NoError(testMeta.t, testMeta.mempool.universalUtxoView.FlushToDb(uint64(testMeta.savedHeight))) + } + require.NoError(testMeta.t, testMeta.mempool.RegenerateReadOnlyView()) + + // Record the txn. + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + return utxoOps, txn, testMeta.savedHeight, nil +} + +func _testValidatorRegistrationWithDerivedKey(t *testing.T) { + var err error + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize fork heights. + params.ForkHeights.NFTTransferOrBurnAndDerivedKeysBlockHeight = uint32(0) + params.ForkHeights.DerivedKeySetSpendingLimitsBlockHeight = uint32(0) + params.ForkHeights.DerivedKeyTrackSpendingLimitsBlockHeight = uint32(0) + params.ForkHeights.DerivedKeyEthSignatureCompatibilityBlockHeight = uint32(0) + params.ForkHeights.ExtraDataOnEntriesBlockHeight = uint32(0) + params.ForkHeights.AssociationsAndAccessGroupsBlockHeight = uint32(0) + params.ForkHeights.BalanceModelBlockHeight = uint32(1) + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + senderPkBytes, _, err := Base58CheckDecode(senderPkString) + require.NoError(t, err) + senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) + require.NoError(t, err) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID + + _submitAuthorizeDerivedKeyTxn := func(txnType TxnType, count uint64) (string, error) { + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + txnType: count, + }, + } + + derivedKeyMetadata, derivedKeyAuthPriv := _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit( + t, senderPrivKey, blockHeight+5, txnSpendingLimit, false, blockHeight, + ) + derivedKeyAuthPrivBase58Check := Base58CheckEncode(derivedKeyAuthPriv.Serialize(), true, params) + + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, senderPkString) + + utxoOps, txn, _, err := _doAuthorizeTxnWithExtraDataAndSpendingLimits( + testMeta, + utxoView, + testMeta.feeRateNanosPerKb, + senderPkBytes, + derivedKeyMetadata.DerivedPublicKey, + derivedKeyAuthPrivBase58Check, + derivedKeyMetadata.ExpirationBlock, + derivedKeyMetadata.AccessSignature, + false, + nil, + nil, + txnSpendingLimit, + ) + if err != nil { + return "", err + } + require.NoError(t, utxoView.FlushToDb(blockHeight)) + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + + err = utxoView.ValidateDerivedKey( + senderPkBytes, derivedKeyMetadata.DerivedPublicKey, blockHeight, + ) + require.NoError(t, err) + return derivedKeyAuthPrivBase58Check, nil + } + + _submitValidatorTxnWithDerivedKey := func( + transactorPkBytes []byte, derivedKeyPrivBase58Check string, inputTxn MsgDeSoTxn, + ) error { + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + var txn *MsgDeSoTxn + + switch inputTxn.TxnMeta.GetTxnType() { + // Construct txn. + case TxnTypeRegisterAsValidator: + txn, _, _, _, err = testMeta.chain.CreateRegisterAsValidatorTxn( + transactorPkBytes, + inputTxn.TxnMeta.(*RegisterAsValidatorMetadata), + make(map[string][]byte), + testMeta.feeRateNanosPerKb, + mempool, + []*DeSoOutput{}, + ) + case TxnTypeUnregisterAsValidator: + txn, _, _, _, err = testMeta.chain.CreateUnregisterAsValidatorTxn( + transactorPkBytes, + inputTxn.TxnMeta.(*UnregisterAsValidatorMetadata), + make(map[string][]byte), + testMeta.feeRateNanosPerKb, + mempool, + []*DeSoOutput{}, + ) + default: + return errors.New("invalid txn type") + } + if err != nil { + return err + } + // Sign txn. + _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) + // Store the original transactor balance. + transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, params) + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) + // Connect txn. + utxoOps, _, _, _, err := utxoView.ConnectTransaction( + txn, + txn.Hash(), + getTxnSize(*txn), + testMeta.savedHeight, + true, + false, + ) + if err != nil { + return err + } + // Flush UTXO view to the db. + require.NoError(t, utxoView.FlushToDb(blockHeight)) + // Track txn for rolling back. + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + return nil + } + + { + // Submit a RegisterAsValidator txn using a DerivedKey. + + // Create a DerivedKey that can perform one RegisterAsValidator txn. + derivedKeyPriv, err := _submitAuthorizeDerivedKeyTxn(TxnTypeRegisterAsValidator, 1) + require.NoError(t, err) + + // Perform a RegisterAsValidator txn. No error expected. + registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + } + err = _submitValidatorTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, + ) + require.NoError(t, err) + + // Validate the ValidatorEntry exists. + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + validatorEntry, err := utxoView.GetValidatorByPKID(senderPKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Len(t, validatorEntry.Domains, 1) + require.Equal(t, validatorEntry.Domains[0], []byte("https://example.com")) + + // Perform a second RegisterAsValidator txn. Error expected. + err = _submitValidatorTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "No more transactions of type REGISTER_AS_VALIDATOR are allowed on this Derived Key") + + // Perform an UnregisterAsValidator txn. Error expected. + unregisterAsValidatorMetadata := &UnregisterAsValidatorMetadata{} + err = _submitValidatorTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unregisterAsValidatorMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "No more transactions of type UNREGISTER_AS_VALIDATOR are allowed on this Derived Key") + } + { + // Submit an UnregisterAsValidator txn using a DerivedKey. + + // Create a DerivedKey that can perform one UnregisterAsValidator txn. + derivedKeyPriv, err := _submitAuthorizeDerivedKeyTxn(TxnTypeUnregisterAsValidator, 1) + require.NoError(t, err) + + // Perform an UnregisterAsValidator txn. No error expected. + unregisterAsValidatorMetadata := &UnregisterAsValidatorMetadata{} + err = _submitValidatorTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unregisterAsValidatorMetadata}, + ) + require.NoError(t, err) + + // Validate the ValidatorEntry no longer exists. + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + validatorEntry, err := utxoView.GetValidatorByPKID(senderPKID) + require.NoError(t, err) + require.Nil(t, validatorEntry) + + // Perform a second UnregisterAsValidator txn. Error expected. Validator not found. + err = _submitValidatorTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unregisterAsValidatorMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorNotFound) + + // Perform a RegisterAsValidator txn. Error expected. + registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + } + err = _submitValidatorTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "No more transactions of type REGISTER_AS_VALIDATOR are allowed on this Derived Key") + + } + + // Flush mempool to the db and test rollbacks. + require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) + _executeAllTestRollbackAndFlush(testMeta) +} diff --git a/lib/constants.go b/lib/constants.go index 03c41099f..53f1c34c6 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/pkg/errors" "log" + "math" "math/big" "os" "path/filepath" @@ -268,6 +269,11 @@ type ForkHeights struct { // to an account balance model for accounting. BalanceModelBlockHeight uint32 + // ProofOfStakeNewTxnTypesBlockHeight defines the height at which we introduced the + // new txn types to support Proof of Stake. These txns include: RegisterAsValidator, + // UnregisterAsValidator, Stake, Unstake, and UnlockStake. + ProofOfStakeNewTxnTypesBlockHeight uint32 + // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. } @@ -332,6 +338,7 @@ const ( UnlimitedDerivedKeysMigration MigrationName = "UnlimitedDerivedKeysMigration" AssociationsAndAccessGroupsMigration MigrationName = "AssociationsAndAccessGroupsMigration" BalanceModelMigration MigrationName = "BalanceModelMigration" + ProofOfStakeNewTxnTypesMigration MigrationName = "ProofOfStakeNewTxnTypesMigration" ) type EncoderMigrationHeights struct { @@ -345,6 +352,9 @@ type EncoderMigrationHeights struct { // This coincides with the BalanceModel block BalanceModel MigrationHeight + + // This coincides with the ProofOfStakeNewTxnTypesBlockHeight + ProofOfStakeNewTxnTypesMigration MigrationHeight } func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeights { @@ -369,6 +379,11 @@ func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeigh Height: uint64(forkHeights.BalanceModelBlockHeight), Name: BalanceModelMigration, }, + ProofOfStakeNewTxnTypesMigration: MigrationHeight{ + Version: 4, + Height: uint64(forkHeights.ProofOfStakeNewTxnTypesBlockHeight), + Name: ProofOfStakeNewTxnTypesMigration, + }, } } func GetEncoderMigrationHeightsList(forkHeights *ForkHeights) ( @@ -613,7 +628,8 @@ var RegtestForkHeights = ForkHeights{ AssociationsDerivedKeySpendingLimitBlockHeight: uint32(0), // For convenience, we set the block height to 1 since the // genesis block was created using the utxo model. - BalanceModelBlockHeight: uint32(1), + BalanceModelBlockHeight: uint32(1), + ProofOfStakeNewTxnTypesBlockHeight: uint32(1), // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. @@ -766,6 +782,9 @@ var MainnetForkHeights = ForkHeights{ // Mon Apr 24 2023 @ 9am PST BalanceModelBlockHeight: uint32(226839), + // FIXME: set to real block height when ready + ProofOfStakeNewTxnTypesBlockHeight: uint32(math.MaxUint32), + // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. } @@ -1029,6 +1048,9 @@ var TestnetForkHeights = ForkHeights{ // Tues Apr 11 2023 @ 5pm PT BalanceModelBlockHeight: uint32(683058), + // FIXME: set to real block height when ready + ProofOfStakeNewTxnTypesBlockHeight: uint32(math.MaxUint32), + // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. } diff --git a/lib/db_utils.go b/lib/db_utils.go index a8104da51..be208b4ee 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -476,8 +476,25 @@ type DBPrefixes struct { // -> <> PrefixNoncePKIDIndex []byte `prefix_id:"[77]" is_state:"true"` - // NEXT_TAG: 78 + // PrefixValidatorByPKID: Retrieve a validator by PKID. + // Prefix, ValidatorPKID -> ValidatorEntry + PrefixValidatorByPKID []byte `prefix_id:"[78]" is_state:"true"` + // PrefixValidatorByStake: Retrieve the top N validators by stake. + // Prefix, TotalStakeAmountNanos, MaxUint64 - RegisteredAtBlockHeight, ValidatorPKID -> ValidatorPKID + // FIXME: @DH, should we duplicate the ValidatorPKID in the key and the value? + // Alternatively, we could just store and parse the ValidatorPKID from the key + // and store a struct{} as the value. That saves on space, but makes retrieving + // the ValidatorPKID from the key bytes more complex than just reading the value + // bytes directly since the key includes other preceding fields. Interesting + // trade-off. Curious your opinion. + PrefixValidatorByStake []byte `prefix_id:"[79]" is_state:"true"` + + // PrefixGlobalStakeAmountNanos: Retrieve the cumulative stake across all validators. + // Prefix -> *uint256.Int + PrefixGlobalStakeAmountNanos []byte `prefix_id:"[80]" is_state:"true"` + + // NEXT_TAG: 81 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -679,6 +696,15 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixNoncePKIDIndex) { // prefix_id:"[77]" return false, nil + } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByPKID) { + // prefix_id:"[78]" + return true, &ValidatorEntry{} + } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStake) { + // prefix_id:"[79]" + return true, &PKID{} + } else if bytes.Equal(prefix, Prefixes.PrefixGlobalStakeAmountNanos) { + // prefix_id:"[80]" + return false, nil } return true, nil @@ -6735,6 +6761,8 @@ type TransactionMetadata struct { AccessGroupTxindexMetadata *AccessGroupTxindexMetadata `json:",omitempty"` AccessGroupMembersTxindexMetadata *AccessGroupMembersTxindexMetadata `json:",omitempty"` NewMessageTxindexMetadata *NewMessageTxindexMetadata `json:",omitempty"` + RegisterAsValidatorTxindexMetadata *RegisterAsValidatorTxindexMetadata `json:",omitempty"` + UnregisterAsValidatorTxindexMetadata *UnregisterAsValidatorTxindexMetadata `json:",omitempty"` } func (txnMeta *TransactionMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -6816,6 +6844,13 @@ func (txnMeta *TransactionMetadata) RawEncodeWithoutMetadata(blockHeight uint64, data = append(data, EncodeToBytes(blockHeight, txnMeta.NewMessageTxindexMetadata, skipMetadata...)...) } + if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + // encoding RegisterAsValidatorTxindexMetadata + data = append(data, EncodeToBytes(blockHeight, txnMeta.RegisterAsValidatorTxindexMetadata, skipMetadata...)...) + // encoding UnregisterAsValidatorTxindexMetadata + data = append(data, EncodeToBytes(blockHeight, txnMeta.UnregisterAsValidatorTxindexMetadata, skipMetadata...)...) + } + return data } @@ -7064,11 +7099,29 @@ func (txnMeta *TransactionMetadata) RawDecodeWithoutMetadata(blockHeight uint64, return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading NewMessageTxindexMetadata") } } + + if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + // decoding RegisterAsValidatorTxindexMetadata + CopyRegisterAsValidatorTxindexMetadata := &RegisterAsValidatorTxindexMetadata{} + if exist, err := DecodeFromBytes(CopyRegisterAsValidatorTxindexMetadata, rr); exist && err == nil { + txnMeta.RegisterAsValidatorTxindexMetadata = CopyRegisterAsValidatorTxindexMetadata + } else { + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading RegisterAsValidatorTxindexMetadata") + } + // decoding UnregisterAsValidatorTxindexMetadata + CopyUnregisterAsValidatorTxindexMetadata := &UnregisterAsValidatorTxindexMetadata{} + if exist, err := DecodeFromBytes(CopyUnregisterAsValidatorTxindexMetadata, rr); exist && err == nil { + txnMeta.UnregisterAsValidatorTxindexMetadata = CopyUnregisterAsValidatorTxindexMetadata + } else { + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnregisterAsValidatorTxindexMetadata") + } + } + return nil } func (txnMeta *TransactionMetadata) GetVersionByte(blockHeight uint64) byte { - return GetMigrationVersion(blockHeight, AssociationsAndAccessGroupsMigration) + return GetMigrationVersion(blockHeight, AssociationsAndAccessGroupsMigration, ProofOfStakeNewTxnTypesMigration) } func (txnMeta *TransactionMetadata) GetEncoderType() EncoderType { diff --git a/lib/mempool.go b/lib/mempool.go index 2ee8c2270..7f5c07f4e 100644 --- a/lib/mempool.go +++ b/lib/mempool.go @@ -1938,6 +1938,14 @@ func ComputeTransactionMetadata(txn *MsgDeSoTxn, utxoView *UtxoView, blockHash * PublicKeyBase58Check: PkToString(realTxMeta.RecipientAccessGroupOwnerPublicKey.ToBytes(), utxoView.Params), Metadata: "NewMessageRecipientAccessGroupOwnerPublicKe", }) + case TxnTypeRegisterAsValidator: + txindexMetadata, affectedPublicKeys := utxoView.CreateRegisterAsValidatorTxindexMetadata(utxoOps[len(utxoOps)-1], txn) + txnMeta.RegisterAsValidatorTxindexMetadata = txindexMetadata + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) + case TxnTypeUnregisterAsValidator: + txindexMetadata, affectedPublicKeys := utxoView.CreateUnregisterAsValidatorTxindexMetadata(utxoOps[len(utxoOps)-1], txn) + txnMeta.UnregisterAsValidatorTxindexMetadata = txindexMetadata + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) } return txnMeta } diff --git a/lib/network.go b/lib/network.go index e6686a8e0..4a81b112b 100644 --- a/lib/network.go +++ b/lib/network.go @@ -239,8 +239,10 @@ const ( TxnTypeAccessGroup TxnType = 31 TxnTypeAccessGroupMembers TxnType = 32 TxnTypeNewMessage TxnType = 33 + TxnTypeRegisterAsValidator TxnType = 34 + TxnTypeUnregisterAsValidator TxnType = 35 - // NEXT_ID = 34 + // NEXT_ID = 36 ) type TxnString string @@ -280,6 +282,8 @@ const ( TxnStringAccessGroup TxnString = "ACCESS_GROUP" TxnStringAccessGroupMembers TxnString = "ACCESS_GROUP_MEMBERS" TxnStringNewMessage TxnString = "NEW_MESSAGE" + TxnStringRegisterAsValidator TxnString = "REGISTER_AS_VALIDATOR" + TxnStringUnregisterAsValidator TxnString = "UNREGISTER_AS_VALIDATOR" ) var ( @@ -291,7 +295,8 @@ var ( TxnTypeAcceptNFTTransfer, TxnTypeBurnNFT, TxnTypeAuthorizeDerivedKey, TxnTypeMessagingGroup, TxnTypeDAOCoin, TxnTypeDAOCoinTransfer, TxnTypeDAOCoinLimitOrder, TxnTypeCreateUserAssociation, TxnTypeDeleteUserAssociation, TxnTypeCreatePostAssociation, TxnTypeDeletePostAssociation, - TxnTypeAccessGroup, TxnTypeAccessGroupMembers, TxnTypeNewMessage, + TxnTypeAccessGroup, TxnTypeAccessGroupMembers, TxnTypeNewMessage, TxnTypeRegisterAsValidator, + TxnTypeUnregisterAsValidator, } AllTxnString = []TxnString{ TxnStringUnset, TxnStringBlockReward, TxnStringBasicTransfer, TxnStringBitcoinExchange, TxnStringPrivateMessage, @@ -301,7 +306,8 @@ var ( TxnStringAcceptNFTTransfer, TxnStringBurnNFT, TxnStringAuthorizeDerivedKey, TxnStringMessagingGroup, TxnStringDAOCoin, TxnStringDAOCoinTransfer, TxnStringDAOCoinLimitOrder, TxnStringCreateUserAssociation, TxnStringDeleteUserAssociation, TxnStringCreatePostAssociation, TxnStringDeletePostAssociation, - TxnStringAccessGroup, TxnStringAccessGroupMembers, TxnStringNewMessage, + TxnStringAccessGroup, TxnStringAccessGroupMembers, TxnStringNewMessage, TxnStringRegisterAsValidator, + TxnStringUnregisterAsValidator, } ) @@ -381,6 +387,10 @@ func (txnType TxnType) GetTxnString() TxnString { return TxnStringAccessGroupMembers case TxnTypeNewMessage: return TxnStringNewMessage + case TxnTypeRegisterAsValidator: + return TxnStringRegisterAsValidator + case TxnTypeUnregisterAsValidator: + return TxnStringUnregisterAsValidator default: return TxnStringUndefined } @@ -454,6 +464,10 @@ func GetTxnTypeFromString(txnString TxnString) TxnType { return TxnTypeAccessGroupMembers case TxnStringNewMessage: return TxnTypeNewMessage + case TxnStringRegisterAsValidator: + return TxnTypeRegisterAsValidator + case TxnStringUnregisterAsValidator: + return TxnTypeUnregisterAsValidator default: // TxnTypeUnset means we couldn't find a matching txn type return TxnTypeUnset @@ -535,6 +549,10 @@ func NewTxnMetadata(txType TxnType) (DeSoTxnMetadata, error) { return (&AccessGroupMembersMetadata{}).New(), nil case TxnTypeNewMessage: return (&NewMessageMetadata{}).New(), nil + case TxnTypeRegisterAsValidator: + return (&RegisterAsValidatorMetadata{}).New(), nil + case TxnTypeUnregisterAsValidator: + return (&UnregisterAsValidatorMetadata{}).New(), nil default: return nil, fmt.Errorf("NewTxnMetadata: Unrecognized TxnType: %v; make sure you add the new type of transaction to NewTxnMetadata", txType) } @@ -1087,7 +1105,7 @@ func (msg *MsgDeSoTransactionBundle) FromBytes(data []byte) error { for ii := uint64(0); ii < numTransactions; ii++ { retTransaction := NewMessage(MsgTypeTxn).(*MsgDeSoTxn) - if err := _readTransactionV0Fields(rr, retTransaction); err != nil { + if err := ReadTransactionV0Fields(rr, retTransaction); err != nil { return errors.Wrapf(err, "MsgDeSoTransaction.FromBytes: ") } @@ -3099,7 +3117,7 @@ func (msg *MsgDeSoTxn) ToBytes(preSignature bool) ([]byte, error) { return data, nil } -func _readTransaction(rr io.Reader) (*MsgDeSoTxn, error) { +func ReadTransaction(rr io.Reader) (*MsgDeSoTxn, error) { ret := NewMessage(MsgTypeTxn).(*MsgDeSoTxn) // When the DeSo blockchain switched from UTXOs to a balance model, new fields had to be // added to the transaction struct (ie. TxnFeeNanos and TxnNonce). In order to maintain @@ -3107,11 +3125,11 @@ func _readTransaction(rr io.Reader) (*MsgDeSoTxn, error) { // transaction and we only attempt to read them if we have not reached EOF after reading // the original "basic" transaction fields. Thus, we split the _readTransaction // deserialization process into these two steps below. - if err := _readTransactionV0Fields(rr, ret); err != nil { - return nil, errors.Wrapf(err, "_readTransaction: Problem reading basic transaction fields") + if err := ReadTransactionV0Fields(rr, ret); err != nil { + return nil, errors.Wrapf(err, "ReadTransaction: Problem reading basic transaction fields") } - if err := _readTransactionV1Fields(rr, ret); err != nil { - return nil, errors.Wrapf(err, "_readTransaction: Problem reading extra transaction fields") + if err := ReadTransactionV1Fields(rr, ret); err != nil { + return nil, errors.Wrapf(err, "ReadTransaction: Problem reading extra transaction fields") } return ret, nil } @@ -3121,24 +3139,24 @@ func _readTransaction(rr io.Reader) (*MsgDeSoTxn, error) { // for TransactionBundles, which expect transactions to only include these fields. After // the balance model block height nodes will rely on the new TransactionBundleV2 struct, // which will allow transactions to contain arbitrary fields. -func _readTransactionV0Fields(rr io.Reader, ret *MsgDeSoTxn) error { +func ReadTransactionV0Fields(rr io.Reader, ret *MsgDeSoTxn) error { // De-serialize the inputs numInputs, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem converting len(msg.TxInputs)") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem converting len(msg.TxInputs)") } for ii := uint64(0); ii < numInputs; ii++ { currentInput := NewDeSoInput() _, err = io.ReadFull(rr, currentInput.TxID[:]) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem converting input txid") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem converting input txid") } inputIndex, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem converting input index") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem converting input index") } if inputIndex > uint64(^uint32(0)) { - return fmt.Errorf("_readTransactionV0Fields: Input index (%d) must not exceed (%d)", inputIndex, ^uint32(0)) + return fmt.Errorf("ReadTransactionV0Fields: Input index (%d) must not exceed (%d)", inputIndex, ^uint32(0)) } currentInput.Index = uint32(inputIndex) @@ -3148,19 +3166,19 @@ func _readTransactionV0Fields(rr io.Reader, ret *MsgDeSoTxn) error { // De-serialize the outputs numOutputs, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem converting len(msg.TxOutputs)") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem converting len(msg.TxOutputs)") } for ii := uint64(0); ii < numOutputs; ii++ { currentOutput := &DeSoOutput{} currentOutput.PublicKey = make([]byte, btcec.PubKeyBytesLenCompressed) _, err = io.ReadFull(rr, currentOutput.PublicKey) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem reading DeSoOutput.PublicKey") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem reading DeSoOutput.PublicKey") } amountNanos, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem reading DeSoOutput.AmountNanos") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem reading DeSoOutput.AmountNanos") } currentOutput.AmountNanos = amountNanos @@ -3172,86 +3190,86 @@ func _readTransactionV0Fields(rr io.Reader, ret *MsgDeSoTxn) error { // Encode the type as a uvarint. txnMetaType, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem reading MsgDeSoTxn.TxnType") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem reading MsgDeSoTxn.TxnType") } ret.TxnMeta, err = NewTxnMetadata(TxnType(txnMetaType)) if err != nil { - return fmt.Errorf("_readTransactionV0Fields: Problem initializing metadata: %v", err) + return fmt.Errorf("ReadTransactionV0Fields: Problem initializing metadata: %v", err) } if ret.TxnMeta == nil { - return fmt.Errorf("_readTransactionV0Fields: Metadata was nil: %v", ret.TxnMeta) + return fmt.Errorf("ReadTransactionV0Fields: Metadata was nil: %v", ret.TxnMeta) } metaLen, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem reading len(TxnMeta)") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem reading len(TxnMeta)") } if metaLen > MaxMessagePayload { - return fmt.Errorf("_readTransactionV0Fields.FromBytes: metaLen length %d longer than max %d", metaLen, MaxMessagePayload) + return fmt.Errorf("ReadTransactionV0Fields.FromBytes: metaLen length %d longer than max %d", metaLen, MaxMessagePayload) } metaBuf, err := SafeMakeSliceWithLength[byte](metaLen) if err != nil { - return fmt.Errorf("_readTransactionV0Fields.FromBytes: Problem creating slice for metaBuf") + return fmt.Errorf("ReadTransactionV0Fields.FromBytes: Problem creating slice for metaBuf") } _, err = io.ReadFull(rr, metaBuf) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem reading TxnMeta") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem reading TxnMeta") } err = ret.TxnMeta.FromBytes(metaBuf) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem decoding TxnMeta: ") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem decoding TxnMeta: ") } // De-serialize the public key if there is one pkLen, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem reading len(DeSoTxn.PublicKey)") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem reading len(DeSoTxn.PublicKey)") } if pkLen > MaxMessagePayload { - return fmt.Errorf("_readTransactionV0Fields.FromBytes: pkLen length %d longer than max %d", pkLen, MaxMessagePayload) + return fmt.Errorf("ReadTransactionV0Fields.FromBytes: pkLen length %d longer than max %d", pkLen, MaxMessagePayload) } ret.PublicKey = nil if pkLen != 0 { ret.PublicKey, err = SafeMakeSliceWithLength[byte](pkLen) if err != nil { - return fmt.Errorf("_readTransactionV0Fields.FromBytes: Problem making slice for PublicKey") + return fmt.Errorf("ReadTransactionV0Fields.FromBytes: Problem making slice for PublicKey") } _, err = io.ReadFull(rr, ret.PublicKey) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem reading DeSoTxn.PublicKey") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem reading DeSoTxn.PublicKey") } } // De-serialize the ExtraData extraData, err := DecodeExtraData(rr) if err != nil { - return fmt.Errorf("_readTransactionV0Fields: Error decoding extra data: %v", err) + return fmt.Errorf("ReadTransactionV0Fields: Error decoding extra data: %v", err) } ret.ExtraData = extraData // De-serialize the signature if there is one. sigLen, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem reading len(DeSoTxn.Signature)") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem reading len(DeSoTxn.Signature)") } if sigLen > MaxMessagePayload { - return fmt.Errorf("_readTransactionV0Fields.FromBytes: sigLen length %d longer than max %d", sigLen, MaxMessagePayload) + return fmt.Errorf("ReadTransactionV0Fields.FromBytes: sigLen length %d longer than max %d", sigLen, MaxMessagePayload) } ret.Signature.SetSignature(nil) if sigLen != 0 { sigBytes, err := SafeMakeSliceWithLength[byte](sigLen) if err != nil { - return fmt.Errorf("_readTransactionV0Fields.FromBytes: Problem making slice for sigBytes") + return fmt.Errorf("ReadTransactionV0Fields.FromBytes: Problem making slice for sigBytes") } _, err = io.ReadFull(rr, sigBytes) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem reading DeSoTxn.Signature") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem reading DeSoTxn.Signature") } // Verify that the signature is valid. err = ret.Signature.FromBytes(sigBytes) if err != nil { - return errors.Wrapf(err, "_readTransactionV0Fields: Problem parsing DeSoTxn.Signature bytes") + return errors.Wrapf(err, "ReadTransactionV0Fields: Problem parsing DeSoTxn.Signature bytes") } } return nil @@ -3259,21 +3277,21 @@ func _readTransactionV0Fields(rr io.Reader, ret *MsgDeSoTxn) error { // This function takes an io.Reader and attempts to read the transaction fields that were // added after the BalanceModelBlockHeight, if the reader has not reached EOF. See the comments -// in _readTransaction() and above _readTransactionV0Fields() for more info. -func _readTransactionV1Fields(rr io.Reader, ret *MsgDeSoTxn) error { +// in _readTransaction() and above ReadTransactionV0Fields() for more info. +func ReadTransactionV1Fields(rr io.Reader, ret *MsgDeSoTxn) error { txnVersion, err := ReadUvarint(rr) if err == io.EOF { return nil } else if err != nil { return errors.Wrapf( - err, "_readTransactionV1Fields: Problem parsing DeSoTxn.TxnVersion bytes") + err, "ReadTransactionV1Fields: Problem parsing DeSoTxn.TxnVersion bytes") } ret.TxnVersion = DeSoTxnVersion(txnVersion) txnFeeNanos, err := ReadUvarint(rr) if err != nil { return errors.Wrapf( - err, "_readTransactionV1Fields: Problem parsing DeSoTxn.TxnFeeNanos bytes") + err, "ReadTransactionV1Fields: Problem parsing DeSoTxn.TxnFeeNanos bytes") } ret.TxnFeeNanos = txnFeeNanos @@ -3281,7 +3299,7 @@ func _readTransactionV1Fields(rr io.Reader, ret *MsgDeSoTxn) error { err = txnNonce.ReadDeSoNonce(rr) if err != nil { return errors.Wrapf( - err, "_readTransactionV1Fields: Problem parsing DeSoTxn.TxnNonce bytes") + err, "ReadTransactionV1Fields: Problem parsing DeSoTxn.TxnNonce bytes") } ret.TxnNonce = txnNonce @@ -3291,7 +3309,7 @@ func _readTransactionV1Fields(rr io.Reader, ret *MsgDeSoTxn) error { func (msg *MsgDeSoTxn) FromBytes(data []byte) error { rr := bytes.NewReader(data) - ret, err := _readTransaction(rr) + ret, err := ReadTransaction(rr) if err != nil { return errors.Wrapf(err, "MsgDeSoTxn.FromBytes: Problem reading txn: ") } @@ -3363,7 +3381,7 @@ func SignTransactionBytes(txnBytes []byte, privateKey *btcec.PrivateKey, isDeriv // As we're signing the transaction using a derived key, we // pass the key to extraData. rr := bytes.NewReader(txnBytes) - txn, err := _readTransaction(rr) + txn, err := ReadTransaction(rr) if err != nil { return nil, nil, errors.Wrapf(err, "SignTransactionBytes: Problem reading txn: ") } @@ -3443,26 +3461,38 @@ func (msg *MsgDeSoTxn) UnmarshalJSON(data []byte) error { // MsgDeSoTxn (which is admittedly very rare and a test can easily catch this // by erroring when the number of fields changes with a helpful message). anonymousTxn := struct { - TxInputs []*DeSoInput - TxOutputs []*DeSoOutput - TxnMeta DeSoTxnMetadata - PublicKey []byte - Signature DeSoSignature - TxnType uint64 + TxnVersion DeSoTxnVersion + TxInputs []*DeSoInput + TxOutputs []*DeSoOutput + TxnFeeNanos uint64 + TxnNonce *DeSoNonce + TxnMeta DeSoTxnMetadata + PublicKey []byte + ExtraData map[string][]byte + Signature DeSoSignature + TxnType uint64 }{ - TxInputs: msg.TxInputs, - TxOutputs: msg.TxOutputs, - TxnMeta: msg.TxnMeta, - PublicKey: msg.PublicKey, - Signature: msg.Signature, - TxnType: msg.TxnTypeJSON, + TxnVersion: msg.TxnVersion, + TxInputs: msg.TxInputs, + TxOutputs: msg.TxOutputs, + TxnFeeNanos: msg.TxnFeeNanos, + TxnNonce: msg.TxnNonce, + TxnMeta: msg.TxnMeta, + PublicKey: msg.PublicKey, + ExtraData: msg.ExtraData, + Signature: msg.Signature, + TxnType: msg.TxnTypeJSON, } json.Unmarshal(data, &anonymousTxn) + msg.TxnVersion = anonymousTxn.TxnVersion msg.TxInputs = anonymousTxn.TxInputs msg.TxOutputs = anonymousTxn.TxOutputs + msg.TxnFeeNanos = anonymousTxn.TxnFeeNanos + msg.TxnNonce = anonymousTxn.TxnNonce msg.TxnMeta = anonymousTxn.TxnMeta msg.PublicKey = anonymousTxn.PublicKey + msg.ExtraData = anonymousTxn.ExtraData msg.Signature = anonymousTxn.Signature // Don't set the TxnTypeJSON when unmarshaling. It should never be used in // Go code, only at the interface between Go and non-Go. diff --git a/lib/peer.go b/lib/peer.go index c0ad096e0..95e0144ec 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -812,7 +812,8 @@ func (pp *Peer) _handleOutExpectedResponse(msg DeSoMessage) { // If we're sending the peer a GetBlocks message, we expect to receive the // blocks at minimum within a few seconds of each other. stallTimeout := time.Duration(int64(pp.stallTimeoutSeconds) * int64(time.Second)) - if msg.GetMsgType() == MsgTypeGetBlocks { + switch msg.GetMsgType() { + case MsgTypeGetBlocks: getBlocks := msg.(*MsgDeSoGetBlocks) // We have one block expected for each entry in the message. for ii := range getBlocks.HashList { @@ -822,45 +823,48 @@ func (pp *Peer) _handleOutExpectedResponse(msg DeSoMessage) { MessageType: MsgTypeBlock, }) } - } - - // If we're sending a GetHeaders message, the Peer should respond within - // a few seconds with a HeaderBundle. - if msg.GetMsgType() == MsgTypeGetHeaders { + case MsgTypeGetHeaders: + // If we're sending a GetHeaders message, the Peer should respond within + // a few seconds with a HeaderBundle. pp._addExpectedResponse(&ExpectedResponse{ TimeExpected: time.Now().Add(stallTimeout), MessageType: MsgTypeHeaderBundle, }) - } - - // If we're sending a GetTransactions message, the Peer should respond within - // a few seconds with a TransactionBundle. Every GetTransactions message should - // receive a TransactionBundle in response. The - // Server handles situations in which we request certain hashes but only get - // back a subset of them in the response (i.e. a case in which we received a - // timely reply but the reply was incomplete). - // - // NOTE: at the BalanceModelBlockHeight, MsgTypeTransactionBundle is replaced by - // the more capable MsgTypeTransactionBundleV2. - nextBlockHeight := pp.srv.blockchain.blockTip().Height + 1 - expectedMsgType := MsgTypeTransactionBundle - if nextBlockHeight >= pp.srv.blockchain.params.ForkHeights.BalanceModelBlockHeight { - expectedMsgType = MsgTypeTransactionBundleV2 - } - if msg.GetMsgType() == MsgTypeGetTransactions { + case MsgTypeGetSnapshot: + // If we're sending a GetSnapshot message, the peer should respond within a few seconds with a SnapshotData. pp._addExpectedResponse(&ExpectedResponse{ TimeExpected: time.Now().Add(stallTimeout), - MessageType: expectedMsgType, - // The Server handles situations in which the Peer doesn't send us all of - // the hashes we were expecting using timeouts on requested hashes. + MessageType: MsgTypeSnapshotData, }) - } - - // If we're sending a GetSnapshot message, the peer should respond within a few seconds with a SnapshotData. - if msg.GetMsgType() == MsgTypeGetSnapshot { + case MsgTypeGetTransactions: + // If we're sending a GetTransactions message, the Peer should respond within + // a few seconds with a TransactionBundle. Every GetTransactions message should + // receive a TransactionBundle in response. The + // Server handles situations in which we request certain hashes but only get + // back a subset of them in the response (i.e. a case in which we received a + // timely reply but the reply was incomplete). + // + // NOTE: at the BalanceModelBlockHeight, MsgTypeTransactionBundle is replaced by + // the more capable MsgTypeTransactionBundleV2. + // TODO: After fork, remove this recover block and always expect msg type MsgTypeTransactionBundleV2. + defer func() { + if r := recover(); r != nil { + isSrvNil := pp.srv == nil + isBlockchainNil := isSrvNil && pp.srv.blockchain == nil + isBlockTipNil := !isSrvNil && !isBlockchainNil && pp.srv.blockchain.blockTip() == nil + glog.Errorf( + "Peer._handleOutExpectedResponse: Recovered from panic: %v.\nsrv is nil: %t\nsrv.Blockchain is nil: %t\n,srv.Blockchain.BlockTip is nil: %t", r, isSrvNil, isBlockchainNil, isBlockTipNil) + } + }() + expectedMsgType := MsgTypeTransactionBundle + if pp.srv.blockchain.blockTip().Height+1 >= pp.Params.ForkHeights.BalanceModelBlockHeight { + expectedMsgType = MsgTypeTransactionBundleV2 + } pp._addExpectedResponse(&ExpectedResponse{ TimeExpected: time.Now().Add(stallTimeout), - MessageType: MsgTypeSnapshotData, + MessageType: expectedMsgType, + // The Server handles situations in which the Peer doesn't send us all of + // the hashes we were expecting using timeouts on requested hashes. }) } } From a6ae43b4f8ff480ec2370c1a74b5e3e7dc6818ee Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Tue, 25 Apr 2023 16:32:00 -0400 Subject: [PATCH 006/762] Mf/add block view stake (#501) * Add block view validator. * Add unregister as validator types. * Add validator entry db utils. * Add blockchain utils. * Add utxo view getters. * Delete then set instead of updating validator entry. * Add connect register as validator logic. * Add disconnect register as validator. * Add connect and disconnect unregister as validator. * Add unregister as validator blockchain and mempool utils. * Only overwrite validatorID if new validator. * Use url parse request URI to validate domains. * Tie in register/unregister txn types. * Finish blockchain.go construct validator txns. * Add test file template. * Add tests for validations. * Add tests for registering, querying, unregistering w/o flushing. * Find top validators by stake across the db and UtxoView. * Fix tests when flushing to db. * Test updating a validator. * Store blockheights as uint64. * Fix disconnect logic for register as validator txn. * Fix disconnect tests for unregister txn. * Add stubbed voting public key fields. * Change weird UtxoOperation version to see if fixes CI. * Fix block heights in tests. * Add passing test for creating validator derived key. * Add block view stake. * Add types and byte encodings. * Fix merge conflicts more. * Add db utils. * Fix failing CI. * Add metadata validations. * Reorder keys in prefix. * Test registering + unregistering validators using derived keys. * Update TODO comments. * Clean up comments. * Add UtxoView utils. * Add mempool utils. * Tie in txindex metadata creation. * Tie in where txindex metadata gets set. * Start on connect stake txn. * Split out setting/flushing global stake amount nanos. * Start in on connect + disconnect stake operation. * Add first stake connect tests. * Add test for staking more. * Add connect unstake. * Test happy path unstaking connect logic. * Add connect unstake sad path tests. * Add unlock stake connect + disconnect logic. * Add happy path connect unlock stake test. * Address PR feedback. * Build out db loop for retrieving matching locked stake entries. * Add unlock stake sad path tests. * Add stake, unstake, unlock stake txn spending limits. * Fix failing CI pt 1. * Set test fork heights using helper. * Fix CI pt 2. * Add skeleton for testing staking w/ derived keys. * Start adding tests for staking with derived keys. * Add tests for unlock stake with derived key. * Fix failing stake with derived key tests. * Add tests for validating stake derived keys. * Add tests for derived key that can stake w/ any validator. * Add tests for staking with unlimited derived key. * Start writing to metamask string logic + tests. * Add unlock stake txn spending limits to metamask string logic. * Set min fee on register as validator test. * Fix derived key min fee tests. * Re-enable rollbacks in tests. * Disconnect basic transfer when disconnecting txns. * Use nondeterministic signing alg in tests. * Account for DESO when staking. * Account for DESO moving in unlock stake txn. * Move GetValidatorByPK to block_view_validator. * adding generic encode/decode deso encoder slices and single structs (#503) * check in progress on adding generic encode/decode deso encoder slices * add DecodeDeSoEncoder for decode single structs * trying stuff w/ generics * more messing w/ generics * fix validator entry decoding * revert to old decode deso encoder slice * remove comment * safe make * add test case * Address PR feedback pt1. * Address PR feedback pt1. * Encode validator total stake amount nanos in key. (#502) * Encode validator total stake amount nanos in key. * Add comment for EncodeUint256 vs EncodeOptionalUint256. * Fallback to global stake limit if scoped is exceeded. * Add moving DESO to totalInput, totalOutput. * Store stake/unstake spending limits as uint256.Ints. * use deso encoder decode (#505) * use deso encoder decode * don't use decodedesoencoder for public keys * Wrap error messages. * Address more PR feedback. * Add tests for top validators by stake. (#506) * Add tests for top validators by stake. * Split up tests. * Add TestTopValidatorsByStakeMergingDbAndUtxoView. * Fix GetLockedStakeEntriesInRange and add test. * Resolve PR comments. --------- Co-authored-by: Lazy Nina <81658138+lazynina@users.noreply.github.com> --- lib/block_view.go | 108 +- lib/block_view_derived_key.go | 41 + lib/block_view_derived_key_test.go | 2 +- lib/block_view_flush.go | 6 + lib/block_view_stake.go | 2514 ++++++++++++++++++++++++++++ lib/block_view_stake_test.go | 1756 +++++++++++++++++++ lib/block_view_types.go | 83 +- lib/block_view_types_test.go | 58 + lib/block_view_validator.go | 77 +- lib/block_view_validator_test.go | 393 ++++- lib/db_utils.go | 47 +- lib/generics.go | 49 + lib/generics_test.go | 40 + lib/mempool.go | 12 + lib/network.go | 300 +++- lib/network_test.go | 2 +- lib/types.go | 4 + 17 files changed, 5420 insertions(+), 72 deletions(-) create mode 100644 lib/block_view_stake.go create mode 100644 lib/block_view_stake_test.go diff --git a/lib/block_view.go b/lib/block_view.go index 9a616b90d..2e1841849 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -120,6 +120,12 @@ type UtxoView struct { // Global stake across validators GlobalStakeAmountNanos *uint256.Int + // Stake mappings + StakeMapKeyToStakeEntry map[StakeMapKey]*StakeEntry + + // Locked stake mappings + LockedStakeMapKeyToLockedStakeEntry map[LockedStakeMapKey]*LockedStakeEntry + // The hash of the tip the view is currently referencing. Mainly used // for error-checking when doing a bulk operation on the view. TipHash *BlockHash @@ -212,8 +218,16 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // ValidatorEntries bav.ValidatorMapKeyToValidatorEntry = make(map[ValidatorMapKey]*ValidatorEntry) - // Global stake across validators - bav.GlobalStakeAmountNanos = uint256.NewInt() + // Global stake across validators. We deliberately want this to initialize to nil and not zero + // since a zero value will overwrite an existing GlobalStakeAmountNanos value in the db, whereas + // a nil GlobalStakeAmountNanos value signifies that this value was never set. + bav.GlobalStakeAmountNanos = nil + + // StakeEntries + bav.StakeMapKeyToStakeEntry = make(map[StakeMapKey]*StakeEntry) + + // LockedStakeEntries + bav.LockedStakeMapKeyToLockedStakeEntry = make(map[LockedStakeMapKey]*LockedStakeEntry) } func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { @@ -473,7 +487,23 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { } // Copy the GlobalStakeAmountNanos. - newView.GlobalStakeAmountNanos = bav.GlobalStakeAmountNanos.Clone() + if bav.GlobalStakeAmountNanos != nil { + newView.GlobalStakeAmountNanos = bav.GlobalStakeAmountNanos.Clone() + } + + // Copy the StakeEntries + newView.StakeMapKeyToStakeEntry = make(map[StakeMapKey]*StakeEntry, len(bav.StakeMapKeyToStakeEntry)) + for entryKey, entry := range bav.StakeMapKeyToStakeEntry { + newView.StakeMapKeyToStakeEntry[entryKey] = entry.Copy() + } + + // Copy the LockedStakeEntries + newView.LockedStakeMapKeyToLockedStakeEntry = make( + map[LockedStakeMapKey]*LockedStakeEntry, len(bav.LockedStakeMapKeyToLockedStakeEntry), + ) + for entryKey, entry := range bav.LockedStakeMapKeyToLockedStakeEntry { + newView.LockedStakeMapKeyToLockedStakeEntry[entryKey] = entry.Copy() + } return newView, nil } @@ -1320,6 +1350,18 @@ func (bav *UtxoView) DisconnectTransaction(currentTxn *MsgDeSoTxn, txnHash *Bloc case TxnTypeUnregisterAsValidator: return bav._disconnectUnregisterAsValidator( OperationTypeUnregisterAsValidator, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + + case TxnTypeStake: + return bav._disconnectStake( + OperationTypeStake, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + + case TxnTypeUnstake: + return bav._disconnectUnstake( + OperationTypeUnstake, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + + case TxnTypeUnlockStake: + return bav._disconnectUnlockStake( + OperationTypeUnlockStake, currentTxn, txnHash, utxoOpsForTxn, blockHeight) } return fmt.Errorf("DisconnectBlock: Unimplemented txn type %v", currentTxn.TxnMeta.GetTxnType().String()) @@ -2267,6 +2309,24 @@ func (bav *UtxoView) _checkAndUpdateDerivedKeySpendingLimit( derivedKeyEntry, txnMeta); err != nil { return utxoOpsForTxn, err } + case TxnTypeStake: + txnMeta := txn.TxnMeta.(*StakeMetadata) + if derivedKeyEntry, err = bav._checkStakeTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry, txn.PublicKey, txnMeta); err != nil { + return utxoOpsForTxn, err + } + case TxnTypeUnstake: + txnMeta := txn.TxnMeta.(*UnstakeMetadata) + if derivedKeyEntry, err = bav._checkUnstakeTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry, txn.PublicKey, txnMeta); err != nil { + return utxoOpsForTxn, err + } + case TxnTypeUnlockStake: + txnMeta := txn.TxnMeta.(*UnlockStakeMetadata) + if derivedKeyEntry, err = bav._checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry, txn.PublicKey, txnMeta); err != nil { + return utxoOpsForTxn, err + } default: // If we get here, it means we're dealing with a txn that doesn't have any special // granular limits to deal with. This means we just check whether we have @@ -3229,6 +3289,15 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, case TxnTypeUnregisterAsValidator: totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUnregisterAsValidator(txn, txHash, blockHeight, verifySignatures) + case TxnTypeStake: + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectStake(txn, txHash, blockHeight, verifySignatures) + + case TxnTypeUnstake: + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUnstake(txn, txHash, blockHeight, verifySignatures) + + case TxnTypeUnlockStake: + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUnlockStake(txn, txHash, blockHeight, verifySignatures) + default: err = fmt.Errorf("ConnectTransaction: Unimplemented txn type %v", txn.TxnMeta.GetTxnType().String()) } @@ -3311,6 +3380,29 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, ) } } + if txn.TxnMeta.GetTxnType() == TxnTypeUnlockStake { + if len(utxoOpsForTxn) == 0 { + return nil, 0, 0, 0, errors.New( + "ConnectTransaction: TxnTypeUnlockStake must return UtxoOpsForTxn", + ) + } + utxoOp := utxoOpsForTxn[len(utxoOpsForTxn)-1] + if utxoOp == nil || utxoOp.Type != OperationTypeUnlockStake { + return nil, 0, 0, 0, errors.New( + "ConnectTransaction: TxnTypeUnlockStake must correspond to OperationTypeUnlockStake", + ) + } + totalLockedAmountNanos := uint256.NewInt() + for _, prevLockedStakeEntry := range utxoOp.PrevLockedStakeEntries { + totalLockedAmountNanos, err = SafeUint256().Add( + totalLockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos, + ) + if err != nil { + return nil, 0, 0, 0, errors.Wrapf(err, "ConnectTransaction: error computing TotalLockedAmountNanos: ") + } + } + desoLockedDelta = big.NewInt(0).Neg(totalLockedAmountNanos.ToBig()) + } if big.NewInt(0).Add(balanceDelta, desoLockedDelta).Sign() > 0 { return nil, 0, 0, 0, RuleErrorBalanceChangeGreaterThanZero } @@ -3985,6 +4077,16 @@ func (bav *UtxoView) GetSpendableDeSoBalanceNanosForPublicKey(pkBytes []byte, return spendableBalanceNanos, nil } +func copyExtraData(extraData map[string][]byte) map[string][]byte { + extraDataCopy := make(map[string][]byte) + for key, value := range extraData { + valueCopy := make([]byte, len(value)) + copy(valueCopy, value) + extraDataCopy[key] = valueCopy + } + return extraDataCopy +} + func mergeExtraData(oldMap map[string][]byte, newMap map[string][]byte) map[string][]byte { // Always create the map from scratch, since modifying the map on // newMap could modify the map on the oldMap otherwise. diff --git a/lib/block_view_derived_key.go b/lib/block_view_derived_key.go index 84ef41540..c39055ab8 100644 --- a/lib/block_view_derived_key.go +++ b/lib/block_view_derived_key.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "github.com/btcsuite/btcd/btcec" + "github.com/holiman/uint256" "github.com/pkg/errors" "reflect" "strconv" @@ -188,6 +189,9 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( AssociationLimitMap: make(map[AssociationLimitKey]uint64), AccessGroupMap: make(map[AccessGroupLimitKey]uint64), AccessGroupMemberMap: make(map[AccessGroupMemberLimitKey]uint64), + StakeLimitMap: make(map[StakeLimitKey]*uint256.Int), + UnstakeLimitMap: make(map[StakeLimitKey]*uint256.Int), + UnlockStakeLimitMap: make(map[StakeLimitKey]uint64), } if prevDerivedKeyEntry != nil && !prevDerivedKeyEntry.isDeleted { // Copy the existing transaction spending limit. @@ -305,6 +309,43 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( } } } + + // ====== Proof of Stake New Txn Types Fork ====== + if blockHeight >= bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + // StakeLimitMap + for stakeLimitKey, stakingLimit := range transactionSpendingLimit.StakeLimitMap { + if err = bav.IsValidStakeLimitKey(txn.PublicKey, stakeLimitKey); err != nil { + return 0, 0, nil, err + } + if stakingLimit.IsZero() { + delete(newTransactionSpendingLimit.StakeLimitMap, stakeLimitKey) + } else { + newTransactionSpendingLimit.StakeLimitMap[stakeLimitKey] = stakingLimit + } + } + // UnstakeLimitMap + for unstakeLimitKey, unstakingLimit := range transactionSpendingLimit.UnstakeLimitMap { + if err = bav.IsValidStakeLimitKey(txn.PublicKey, unstakeLimitKey); err != nil { + return 0, 0, nil, err + } + if unstakingLimit.IsZero() { + delete(newTransactionSpendingLimit.UnstakeLimitMap, unstakeLimitKey) + } else { + newTransactionSpendingLimit.UnstakeLimitMap[unstakeLimitKey] = unstakingLimit + } + } + // UnlockStakeLimitMap + for unlockStakeLimitKey, transactionCount := range transactionSpendingLimit.UnlockStakeLimitMap { + if err = bav.IsValidStakeLimitKey(txn.PublicKey, unlockStakeLimitKey); err != nil { + return 0, 0, nil, err + } + if transactionCount == 0 { + delete(newTransactionSpendingLimit.UnlockStakeLimitMap, unlockStakeLimitKey) + } else { + newTransactionSpendingLimit.UnlockStakeLimitMap[unlockStakeLimitKey] = transactionCount + } + } + } } } } diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index dc98430a7..65d39fd29 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -832,7 +832,7 @@ func _doAuthorizeTxnWithExtraDataAndSpendingLimits(testMeta *TestMeta, utxoView // Sign the transaction now that its inputs are set up. // We have to set the solution byte because we're signing // the transaction with derived key on behalf of the owner. - _signTxnWithDerivedKey(t, txn, derivedPrivBase58Check) + _signTxnWithDerivedKeyAndType(t, txn, derivedPrivBase58Check, 1) txHash := txn.Hash() utxoOps, totalInput, totalOutput, fees, err := diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index d30bf58c7..4f7ed6bf2 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -146,6 +146,12 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushGlobalStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { return err } + if err := bav._flushStakeEntriesToDbWithTxn(txn, blockHeight); err != nil { + return err + } + if err := bav._flushLockedStakeEntriesToDbWithTxn(txn, blockHeight); err != nil { + return err + } return nil } diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go new file mode 100644 index 000000000..f5a3dd70a --- /dev/null +++ b/lib/block_view_stake.go @@ -0,0 +1,2514 @@ +package lib + +import ( + "bytes" + "fmt" + "github.com/dgraph-io/badger/v3" + "github.com/golang/glog" + "github.com/holiman/uint256" + "github.com/pkg/errors" + "sort" +) + +// +// TYPES: StakeEntry +// + +type StakeEntry struct { + StakeID *BlockHash + StakerPKID *PKID + ValidatorPKID *PKID + StakeAmountNanos *uint256.Int + ExtraData map[string][]byte + isDeleted bool +} + +type StakeMapKey struct { + ValidatorPKID PKID + StakerPKID PKID +} + +func (stakeEntry *StakeEntry) Copy() *StakeEntry { + return &StakeEntry{ + StakeID: stakeEntry.StakeID.NewBlockHash(), + StakerPKID: stakeEntry.StakerPKID.NewPKID(), + ValidatorPKID: stakeEntry.ValidatorPKID.NewPKID(), + StakeAmountNanos: stakeEntry.StakeAmountNanos.Clone(), + ExtraData: copyExtraData(stakeEntry.ExtraData), + isDeleted: stakeEntry.isDeleted, + } +} + +func (stakeEntry *StakeEntry) Eq(other *StakeEntry) bool { + return stakeEntry.StakeID.IsEqual(other.StakeID) +} + +func (stakeEntry *StakeEntry) ToMapKey() StakeMapKey { + return StakeMapKey{ + StakerPKID: *stakeEntry.StakerPKID, + ValidatorPKID: *stakeEntry.ValidatorPKID, + } +} + +func (stakeEntry *StakeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeToBytes(blockHeight, stakeEntry.StakeID, skipMetadata...)...) + data = append(data, EncodeToBytes(blockHeight, stakeEntry.StakerPKID, skipMetadata...)...) + data = append(data, EncodeToBytes(blockHeight, stakeEntry.ValidatorPKID, skipMetadata...)...) + data = append(data, EncodeUint256(stakeEntry.StakeAmountNanos)...) + data = append(data, EncodeExtraData(stakeEntry.ExtraData)...) + return data +} + +func (stakeEntry *StakeEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // StakeID + stakeEntry.StakeID, err = DecodeDeSoEncoder(&BlockHash{}, rr) + if err != nil { + return errors.Wrapf(err, "StakeEntry.Decode: Problem reading StakeID: ") + } + + // StakerPKID + stakeEntry.StakerPKID, err = DecodeDeSoEncoder(&PKID{}, rr) + if err != nil { + return errors.Wrapf(err, "StakeEntry.Decode: Problem reading StakerPKID: ") + } + + // ValidatorPKID + stakeEntry.ValidatorPKID, err = DecodeDeSoEncoder(&PKID{}, rr) + if err != nil { + return errors.Wrapf(err, "StakeEntry.Decode: Problem reading ValidatorPKID: ") + } + + // StakeAmountNanos + stakeEntry.StakeAmountNanos, err = DecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "StakeEntry.Decode: Problem reading StakeAmountNanos: ") + } + + // ExtraData + stakeEntry.ExtraData, err = DecodeExtraData(rr) + if err != nil { + return errors.Wrapf(err, "StakeEntry.Decode: Problem reading ExtraData: ") + } + + return err +} + +func (stakeEntry *StakeEntry) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (stakeEntry *StakeEntry) GetEncoderType() EncoderType { + return EncoderTypeStakeEntry +} + +// +// TYPES: LockedStakeEntry +// + +type LockedStakeEntry struct { + LockedStakeID *BlockHash + StakerPKID *PKID + ValidatorPKID *PKID + LockedAmountNanos *uint256.Int + LockedAtEpochNumber uint64 + ExtraData map[string][]byte + isDeleted bool +} + +type LockedStakeMapKey struct { + ValidatorPKID PKID + StakerPKID PKID + LockedAtEpochNumber uint64 +} + +func (lockedStakeEntry *LockedStakeEntry) Copy() *LockedStakeEntry { + return &LockedStakeEntry{ + LockedStakeID: lockedStakeEntry.LockedStakeID.NewBlockHash(), + StakerPKID: lockedStakeEntry.StakerPKID.NewPKID(), + ValidatorPKID: lockedStakeEntry.ValidatorPKID.NewPKID(), + LockedAmountNanos: lockedStakeEntry.LockedAmountNanos.Clone(), + LockedAtEpochNumber: lockedStakeEntry.LockedAtEpochNumber, + ExtraData: copyExtraData(lockedStakeEntry.ExtraData), + isDeleted: lockedStakeEntry.isDeleted, + } +} + +func (lockedStakeEntry *LockedStakeEntry) Eq(other *LockedStakeEntry) bool { + return lockedStakeEntry.LockedStakeID.IsEqual(other.LockedStakeID) +} + +func (lockedStakeEntry *LockedStakeEntry) ToMapKey() LockedStakeMapKey { + return LockedStakeMapKey{ + StakerPKID: *lockedStakeEntry.StakerPKID, + ValidatorPKID: *lockedStakeEntry.ValidatorPKID, + LockedAtEpochNumber: lockedStakeEntry.LockedAtEpochNumber, + } +} + +func (lockedStakeEntry *LockedStakeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeToBytes(blockHeight, lockedStakeEntry.LockedStakeID, skipMetadata...)...) + data = append(data, EncodeToBytes(blockHeight, lockedStakeEntry.StakerPKID, skipMetadata...)...) + data = append(data, EncodeToBytes(blockHeight, lockedStakeEntry.ValidatorPKID, skipMetadata...)...) + data = append(data, EncodeUint256(lockedStakeEntry.LockedAmountNanos)...) + data = append(data, UintToBuf(lockedStakeEntry.LockedAtEpochNumber)...) + data = append(data, EncodeExtraData(lockedStakeEntry.ExtraData)...) + return data +} + +func (lockedStakeEntry *LockedStakeEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // LockedStakeID + lockedStakeEntry.LockedStakeID, err = DecodeDeSoEncoder(&BlockHash{}, rr) + if err != nil { + return errors.Wrapf(err, "LockedStakeEntry.Decode: Problem reading LockedStakeID: ") + } + + // StakerPKID + lockedStakeEntry.StakerPKID, err = DecodeDeSoEncoder(&PKID{}, rr) + if err != nil { + return errors.Wrapf(err, "LockedStakeEntry.Decode: Problem reading StakerPKID: ") + } + + // ValidatorPKID + lockedStakeEntry.ValidatorPKID, err = DecodeDeSoEncoder(&PKID{}, rr) + if err != nil { + return errors.Wrapf(err, "LockedStakeEntry.Decode: Problem reading ValidatorPKID: ") + } + + // LockedAmountNanos + lockedStakeEntry.LockedAmountNanos, err = DecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "LockedStakeEntry.Decode: Problem reading LockedAmountNanos: ") + } + + // LockedAtEpochNumber + lockedStakeEntry.LockedAtEpochNumber, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "LockedStakeEntry.Decode: Problem reading LockedAtEpochNumber: ") + } + + // ExtraData + lockedStakeEntry.ExtraData, err = DecodeExtraData(rr) + if err != nil { + return errors.Wrapf(err, "LockedStakeEntry.Decode: Problem reading ExtraData: ") + } + + return err +} + +func (lockedStakeEntry *LockedStakeEntry) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (lockedStakeEntry *LockedStakeEntry) GetEncoderType() EncoderType { + return EncoderTypeLockedStakeEntry +} + +// +// TYPES: StakeMetadata +// + +type StakeMetadata struct { + ValidatorPublicKey *PublicKey + StakeAmountNanos *uint256.Int +} + +func (txnData *StakeMetadata) GetTxnType() TxnType { + return TxnTypeStake +} + +func (txnData *StakeMetadata) ToBytes(preSignature bool) ([]byte, error) { + var data []byte + data = append(data, EncodeByteArray(txnData.ValidatorPublicKey.ToBytes())...) + data = append(data, EncodeUint256(txnData.StakeAmountNanos)...) + return data, nil +} + +func (txnData *StakeMetadata) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + // ValidatorPublicKey + validatorPublicKeyBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "StakeMetadata.FromBytes: Problem reading ValidatorPublicKey: ") + } + txnData.ValidatorPublicKey = NewPublicKey(validatorPublicKeyBytes) + + // StakeAmountNanos + txnData.StakeAmountNanos, err = DecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "StakeMetadata.FromBytes: Problem reading StakeAmountNanos: ") + } + + return nil +} + +func (txnData *StakeMetadata) New() DeSoTxnMetadata { + return &StakeMetadata{} +} + +// +// TYPES: UnstakeMetadata +// + +type UnstakeMetadata struct { + ValidatorPublicKey *PublicKey + UnstakeAmountNanos *uint256.Int +} + +func (txnData *UnstakeMetadata) GetTxnType() TxnType { + return TxnTypeUnstake +} + +func (txnData *UnstakeMetadata) ToBytes(preSignature bool) ([]byte, error) { + var data []byte + data = append(data, EncodeByteArray(txnData.ValidatorPublicKey.ToBytes())...) + data = append(data, EncodeUint256(txnData.UnstakeAmountNanos)...) + return data, nil +} + +func (txnData *UnstakeMetadata) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + // ValidatorPublicKey + validatorPublicKeyBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "UnstakeMetadata.FromBytes: Problem reading ValidatorPublicKey: ") + } + txnData.ValidatorPublicKey = NewPublicKey(validatorPublicKeyBytes) + + // UnstakeAmountNanos + txnData.UnstakeAmountNanos, err = DecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "UnstakeMetadata.FromBytes: Problem reading UnstakeAmountNanos: ") + } + + return nil +} + +func (txnData *UnstakeMetadata) New() DeSoTxnMetadata { + return &UnstakeMetadata{} +} + +// +// TYPES: UnlockStakeMetadata +// + +type UnlockStakeMetadata struct { + ValidatorPublicKey *PublicKey + StartEpochNumber uint64 + EndEpochNumber uint64 +} + +func (txnData *UnlockStakeMetadata) GetTxnType() TxnType { + return TxnTypeUnlockStake +} + +func (txnData *UnlockStakeMetadata) ToBytes(preSignature bool) ([]byte, error) { + var data []byte + data = append(data, EncodeByteArray(txnData.ValidatorPublicKey.ToBytes())...) + data = append(data, UintToBuf(txnData.StartEpochNumber)...) + data = append(data, UintToBuf(txnData.EndEpochNumber)...) + return data, nil +} + +func (txnData *UnlockStakeMetadata) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + // ValidatorPublicKey + validatorPublicKeyBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "UnlockStakeMetadata.FromBytes: Problem reading ValidatorPublicKey: ") + } + txnData.ValidatorPublicKey = NewPublicKey(validatorPublicKeyBytes) + + // StartEpochNumber + txnData.StartEpochNumber, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UnlockStakeMetadata.FromBytes: Problem reading StartEpochNumber: ") + } + + // EndEpochNumber + txnData.EndEpochNumber, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UnlockStakeMetadata.FromBytes: Problem reading EndEpochNumber: ") + } + + return nil +} + +func (txnData *UnlockStakeMetadata) New() DeSoTxnMetadata { + return &UnlockStakeMetadata{} +} + +// +// TYPES: StakeTxindexMetadata +// + +type StakeTxindexMetadata struct { + StakerPublicKeyBase58Check string + ValidatorPublicKeyBase58Check string + StakeAmountNanos *uint256.Int +} + +func (txindexMetadata *StakeTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) + data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) + data = append(data, EncodeUint256(txindexMetadata.StakeAmountNanos)...) + return data +} + +func (txindexMetadata *StakeTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // StakerPublicKeyBase58Check + stakerPublicKeyBase58CheckBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "StakeTxindexMetadata.Decode: Problem reading StakerPublicKeyBase58Check: ") + } + txindexMetadata.StakerPublicKeyBase58Check = string(stakerPublicKeyBase58CheckBytes) + + // ValidatorPublicKeyBase58Check + validatorPublicKeyBase58CheckBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "StakeTxindexMetadata.Decode: Problem reading ValidatorPublicKeyBase58Check: ") + } + txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) + + // StakeAmountNanos + txindexMetadata.StakeAmountNanos, err = DecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "StakeTxindexMetadata.Decode: Problem reading StakeAmountNanos: ") + } + + return nil +} + +func (txindexMetadata *StakeTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *StakeTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeStakeTxindexMetadata +} + +// +// TYPES: UnstakeTxindexMetadata +// + +type UnstakeTxindexMetadata struct { + StakerPublicKeyBase58Check string + ValidatorPublicKeyBase58Check string + UnstakeAmountNanos *uint256.Int +} + +func (txindexMetadata *UnstakeTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) + data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) + data = append(data, EncodeUint256(txindexMetadata.UnstakeAmountNanos)...) + return data +} + +func (txindexMetadata *UnstakeTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // StakerPublicKeyBase58Check + stakerPublicKeyBase58CheckBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "UnstakeTxindexMetadata.Decode: Problem reading StakerPublicKeyBase58Check: ") + } + txindexMetadata.StakerPublicKeyBase58Check = string(stakerPublicKeyBase58CheckBytes) + + // ValidatorPublicKeyBase58Check + validatorPublicKeyBase58CheckBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "UnstakeTxindexMetadata.Decode: Problem reading ValidatorPublicKeyBase58Check: ") + } + txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) + + // UnstakeAmountNanos + txindexMetadata.UnstakeAmountNanos, err = DecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "UnstakeTxindexMetadata.Decode: Problem reading UnstakeAmountNanos: ") + } + + return nil +} + +func (txindexMetadata *UnstakeTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *UnstakeTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeUnstakeTxindexMetadata +} + +// +// TYPES: UnlockStakeTxindexMetadata +// + +type UnlockStakeTxindexMetadata struct { + StakerPublicKeyBase58Check string + ValidatorPublicKeyBase58Check string + StartEpochNumber uint64 + EndEpochNumber uint64 + TotalUnlockedAmountNanos *uint256.Int +} + +func (txindexMetadata *UnlockStakeTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) + data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) + data = append(data, UintToBuf(txindexMetadata.StartEpochNumber)...) + data = append(data, UintToBuf(txindexMetadata.EndEpochNumber)...) + data = append(data, EncodeUint256(txindexMetadata.TotalUnlockedAmountNanos)...) + return data +} + +func (txindexMetadata *UnlockStakeTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // StakerPublicKeyBase58Check + stakerPublicKeyBase58CheckBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "UnlockStakeTxindexMetadata.Decode: Problem reading StakerPublicKeyBase58Check: ") + } + txindexMetadata.StakerPublicKeyBase58Check = string(stakerPublicKeyBase58CheckBytes) + + // ValidatorPublicKeyBase58Check + validatorPublicKeyBase58CheckBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "UnlockStakeTxindexMetadata.Decode: Problem reading ValidatorPublicKeyBase58Check: ") + } + txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) + + // StartEpochNumber + txindexMetadata.StartEpochNumber, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UnlockStakeTxindexMetadata.Decode: Problem reading StartEpochNumber: ") + } + + // EndEpochNumber + txindexMetadata.EndEpochNumber, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UnlockStakeTxindexMetadata.Decode: Problem reading EndEpochNumber: ") + } + + // TotalUnlockedAmountNanos + txindexMetadata.TotalUnlockedAmountNanos, err = DecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "UnlockStakeTxindexMetadata.Decode: Problem reading TotalUnlockedAmountNanos: ") + } + + return nil +} + +func (txindexMetadata *UnlockStakeTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *UnlockStakeTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeUnlockStakeTxindexMetadata +} + +// +// DB UTILS +// + +func DBKeyForStakeByValidatorByStaker(stakeEntry *StakeEntry) []byte { + var data []byte + data = append(data, Prefixes.PrefixStakeByValidatorByStaker...) + data = append(data, stakeEntry.ValidatorPKID.ToBytes()...) + data = append(data, stakeEntry.StakerPKID.ToBytes()...) + return data +} + +func DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry *LockedStakeEntry) []byte { + data := DBPrefixKeyForLockedStakeByValidatorByStaker(lockedStakeEntry) + data = append(data, UintToBuf(lockedStakeEntry.LockedAtEpochNumber)...) + return data +} + +func DBPrefixKeyForLockedStakeByValidatorByStaker(lockedStakeEntry *LockedStakeEntry) []byte { + var data []byte + data = append(data, Prefixes.PrefixLockedStakeByValidatorByStakerByLockedAt...) + data = append(data, lockedStakeEntry.ValidatorPKID.ToBytes()...) + data = append(data, lockedStakeEntry.StakerPKID.ToBytes()...) + return data +} + +func DBGetStakeEntry( + handle *badger.DB, + snap *Snapshot, + validatorPKID *PKID, + stakerPKID *PKID, +) (*StakeEntry, error) { + var ret *StakeEntry + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetStakeEntryWithTxn(txn, snap, validatorPKID, stakerPKID) + return innerErr + }) + return ret, err +} + +func DBGetStakeEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + validatorPKID *PKID, + stakerPKID *PKID, +) (*StakeEntry, error) { + // Retrieve StakeEntry from db. + key := DBKeyForStakeByValidatorByStaker(&StakeEntry{ValidatorPKID: validatorPKID, StakerPKID: stakerPKID}) + stakeEntryBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return nil. + if err == badger.ErrKeyNotFound { + return nil, nil + } + return nil, errors.Wrapf(err, "DBGetStakeByValidatorByStaker: problem retrieving StakeEntry: ") + } + + // Decode StakeEntry from bytes. + rr := bytes.NewReader(stakeEntryBytes) + stakeEntry, err := DecodeDeSoEncoder(&StakeEntry{}, rr) + if err != nil { + return nil, errors.Wrapf(err, "DBGetStakeByValidatorByStaker: problem decoding StakeEntry: ") + } + return stakeEntry, nil +} + +func DBGetLockedStakeEntry( + handle *badger.DB, + snap *Snapshot, + validatorPKID *PKID, + stakerPKID *PKID, + lockedAtEpochNumber uint64, +) (*LockedStakeEntry, error) { + var ret *LockedStakeEntry + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetLockedStakeEntryWithTxn( + txn, snap, validatorPKID, stakerPKID, lockedAtEpochNumber, + ) + return innerErr + }) + return ret, err +} + +func DBGetLockedStakeEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + validatorPKID *PKID, + stakerPKID *PKID, + lockedAtEpochNumber uint64, +) (*LockedStakeEntry, error) { + // Retrieve LockedStakeEntry from db. + key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(&LockedStakeEntry{ + ValidatorPKID: validatorPKID, + StakerPKID: stakerPKID, + LockedAtEpochNumber: lockedAtEpochNumber, + }) + lockedStakeEntryBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return nil. + if err == badger.ErrKeyNotFound { + return nil, nil + } + return nil, errors.Wrapf( + err, "DBGetLockedStakeByValidatorByStakerByLockedAt: problem retrieving LockedStakeEntry: ", + ) + } + + // Decode LockedStakeEntry from bytes. + rr := bytes.NewReader(lockedStakeEntryBytes) + lockedStakeEntry, err := DecodeDeSoEncoder(&LockedStakeEntry{}, rr) + if err != nil { + return nil, errors.Wrapf( + err, "DBGetLockedStakeByValidatorByStakerByLockedAt: problem decoding LockedStakeEntry: ", + ) + } + return lockedStakeEntry, nil +} + +func DBGetLockedStakeEntriesInRange( + handle *badger.DB, + snap *Snapshot, + validatorPKID *PKID, + stakerPKID *PKID, + startEpochNumber uint64, + endEpochNumber uint64, +) ([]*LockedStakeEntry, error) { + var ret []*LockedStakeEntry + var err error + handle.View(func(txn *badger.Txn) error { + ret, err = DBGetLockedStakeEntriesInRangeWithTxn( + txn, snap, validatorPKID, stakerPKID, startEpochNumber, endEpochNumber, + ) + return nil + }) + return ret, err +} + +func DBGetLockedStakeEntriesInRangeWithTxn( + txn *badger.Txn, + snap *Snapshot, + validatorPKID *PKID, + stakerPKID *PKID, + startEpochNumber uint64, + endEpochNumber uint64, +) ([]*LockedStakeEntry, error) { + // Retrieve LockedStakeEntries from db matching ValidatorPKID, StakerPKID, and + // StartEpochNumber <= LockedAtEpochNumber <= EndEpochNumber. + + // Start at the StartEpochNumber. + startKey := DBKeyForLockedStakeByValidatorByStakerByLockedAt(&LockedStakeEntry{ + ValidatorPKID: validatorPKID, + StakerPKID: stakerPKID, + LockedAtEpochNumber: startEpochNumber, + }) + + // Consider only LockedStakeEntries for this ValidatorPKID, StakerPKID. + prefixKey := DBPrefixKeyForLockedStakeByValidatorByStaker(&LockedStakeEntry{ + ValidatorPKID: validatorPKID, + StakerPKID: stakerPKID, + }) + + // Create an iterator. + iterator := txn.NewIterator(badger.DefaultIteratorOptions) + defer iterator.Close() + + // Store matching LockedStakeEntries to return. + var lockedStakeEntries []*LockedStakeEntry + + // Loop. + for iterator.Seek(startKey); iterator.ValidForPrefix(prefixKey); iterator.Next() { + // Retrieve the LockedStakeEntryBytes. + lockedStakeEntryBytes, err := iterator.Item().ValueCopy(nil) + if err != nil { + return nil, errors.Wrapf(err, "DBGetLockedStakeEntriesInRange: error retrieving LockedStakeEntry: ") + } + + // Convert LockedStakeEntryBytes to LockedStakeEntry. + rr := bytes.NewReader(lockedStakeEntryBytes) + lockedStakeEntry, err := DecodeDeSoEncoder(&LockedStakeEntry{}, rr) + if err != nil { + return nil, errors.Wrapf(err, "DBGetLockedStakeEntriesInRange: error decoding LockedStakeEntry: ") + } + + // Break if LockedStakeEntry.LockedAtEpochNumber > EndEpochNumber. + if lockedStakeEntry.LockedAtEpochNumber > endEpochNumber { + break + } + + // Add LockedStakeEntry to return slice. + lockedStakeEntries = append(lockedStakeEntries, lockedStakeEntry) + } + + return lockedStakeEntries, nil +} + +func DBPutStakeEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + stakeEntry *StakeEntry, + blockHeight uint64, +) error { + if stakeEntry == nil { + return nil + } + + // Set StakeEntry in PrefixStakeByValidatorByStaker. + key := DBKeyForStakeByValidatorByStaker(stakeEntry) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, stakeEntry)); err != nil { + return errors.Wrapf( + err, "DBPutStakeWithTxn: problem storing StakeEntry in index PrefixStakeByValidatorByStaker", + ) + } + + return nil +} + +func DBPutLockedStakeEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + lockedStakeEntry *LockedStakeEntry, + blockHeight uint64, +) error { + if lockedStakeEntry == nil { + return nil + } + + // Set LockedStakeEntry in PrefixLockedStakeByValidatorByStakerByLockedAt. + key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, lockedStakeEntry)); err != nil { + return errors.Wrapf( + err, "DBPutLockedStakeWithTxn: problem storing LockedStakeEntry in index PrefixLockedStakeByValidatorByStakerByLockedAt", + ) + } + + return nil +} + +func DBDeleteStakeEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + stakeEntry *StakeEntry, + blockHeight uint64, +) error { + if stakeEntry == nil { + return nil + } + + // Delete StakeEntry from PrefixStakeByValidatorByStaker. + key := DBKeyForStakeByValidatorByStaker(stakeEntry) + if err := DBDeleteWithTxn(txn, snap, key); err != nil { + return errors.Wrapf( + err, "DBDeleteStakeWithTxn: problem deleting StakeEntry from index PrefixStakeByValidatorByStaker", + ) + } + + return nil +} + +func DBDeleteLockedStakeEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + lockedStakeEntry *LockedStakeEntry, + blockHeight uint64, +) error { + if lockedStakeEntry == nil { + return nil + } + + // Delete LockedStakeEntry from PrefixLockedStakeByValidatorByStakerByLockedAt. + key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry) + if err := DBDeleteWithTxn(txn, snap, key); err != nil { + return errors.Wrapf( + err, "DBDeleteLockedStakeWithTxn: problem deleting StakeEntry from index PrefixLockedStakeByValidatorByStakerByLockedAt", + ) + } + + return nil +} + +// +// BLOCKCHAIN UTILS +// + +func (bc *Blockchain) CreateStakeTxn( + transactorPublicKey []byte, + metadata *StakeMetadata, + extraData map[string][]byte, + minFeeRateNanosPerKB uint64, + mempool *DeSoMempool, + additionalOutputs []*DeSoOutput, +) ( + _txn *MsgDeSoTxn, + _totalInput uint64, + _changeAmount uint64, + _fees uint64, + _err error, +) { + // Create a txn containing the metadata fields. + txn := &MsgDeSoTxn{ + PublicKey: transactorPublicKey, + TxnMeta: metadata, + TxOutputs: additionalOutputs, + ExtraData: extraData, + // We wait to compute the signature until + // we've added all the inputs and change. + } + + // Create a new UtxoView. If we have access to a mempool object, use + // it to get an augmented view that factors in pending transactions. + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + if err != nil { + return nil, 0, 0, 0, errors.Wrap( + err, "Blockchain.CreateStakeTxn: problem creating new utxo view: ", + ) + } + if mempool != nil { + utxoView, err = mempool.GetAugmentedUniversalView() + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateStakeTxn: problem getting augmented utxo view from mempool: ", + ) + } + } + + // Validate txn metadata. + blockHeight := bc.blockTip().Height + 1 + if err = utxoView.IsValidStakeMetadata(transactorPublicKey, metadata, blockHeight); err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateStakeTxn: invalid txn metadata: ", + ) + } + + // We don't need to make any tweaks to the amount because + // it's basically a standard "pay per kilobyte" transaction. + totalInput, spendAmount, changeAmount, fees, err := bc.AddInputsAndChangeToTransaction( + txn, minFeeRateNanosPerKB, mempool, + ) + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateStakeTxn: problem adding inputs: ", + ) + } + + // Validate that the transaction has at least one input, even if it all goes + // to change. This ensures that the transaction will not be "replayable." + if len(txn.TxInputs) == 0 && bc.blockTip().Height+1 < bc.params.ForkHeights.BalanceModelBlockHeight { + return nil, 0, 0, 0, errors.New( + "Blockchain.CreateStakeTxn: txn has zero inputs, try increasing the fee rate", + ) + } + + // Sanity-check that the spendAmount is zero. + if spendAmount != 0 { + return nil, 0, 0, 0, fmt.Errorf( + "Blockchain.CreateStakeTxn: spend amount is non-zero: %d", spendAmount, + ) + } + return txn, totalInput, changeAmount, fees, nil +} + +func (bc *Blockchain) CreateUnstakeTxn( + transactorPublicKey []byte, + metadata *UnstakeMetadata, + extraData map[string][]byte, + minFeeRateNanosPerKB uint64, + mempool *DeSoMempool, + additionalOutputs []*DeSoOutput, +) ( + _txn *MsgDeSoTxn, + _totalInput uint64, + _changeAmount uint64, + _fees uint64, + _err error, +) { + // Create a txn containing the metadata fields. + txn := &MsgDeSoTxn{ + PublicKey: transactorPublicKey, + TxnMeta: metadata, + TxOutputs: additionalOutputs, + ExtraData: extraData, + // We wait to compute the signature until + // we've added all the inputs and change. + } + + // Create a new UtxoView. If we have access to a mempool object, use + // it to get an augmented view that factors in pending transactions. + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + if err != nil { + return nil, 0, 0, 0, errors.Wrap( + err, "Blockchain.CreateUnstakeTxn: problem creating new utxo view: ", + ) + } + if mempool != nil { + utxoView, err = mempool.GetAugmentedUniversalView() + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnstakeTxn: problem getting augmented utxo view from mempool: ", + ) + } + } + + // Validate txn metadata. + if err = utxoView.IsValidUnstakeMetadata(transactorPublicKey, metadata); err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnstakeTxn: invalid txn metadata: ", + ) + } + + // We don't need to make any tweaks to the amount because + // it's basically a standard "pay per kilobyte" transaction. + totalInput, spendAmount, changeAmount, fees, err := bc.AddInputsAndChangeToTransaction( + txn, minFeeRateNanosPerKB, mempool, + ) + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnstakeTxn: problem adding inputs: ", + ) + } + + // Validate that the transaction has at least one input, even if it all goes + // to change. This ensures that the transaction will not be "replayable." + if len(txn.TxInputs) == 0 && bc.blockTip().Height+1 < bc.params.ForkHeights.BalanceModelBlockHeight { + return nil, 0, 0, 0, errors.New( + "Blockchain.CreateUnstakeTxn: txn has zero inputs, try increasing the fee rate", + ) + } + + // Sanity-check that the spendAmount is zero. + if spendAmount != 0 { + return nil, 0, 0, 0, fmt.Errorf( + "Blockchain.CreateUnstakeTxn: spend amount is non-zero: %d", spendAmount, + ) + } + return txn, totalInput, changeAmount, fees, nil +} + +func (bc *Blockchain) CreateUnlockStakeTxn( + transactorPublicKey []byte, + metadata *UnlockStakeMetadata, + extraData map[string][]byte, + minFeeRateNanosPerKB uint64, + mempool *DeSoMempool, + additionalOutputs []*DeSoOutput, +) ( + _txn *MsgDeSoTxn, + _totalInput uint64, + _changeAmount uint64, + _fees uint64, + _err error, +) { + // Create a txn containing the metadata fields. + txn := &MsgDeSoTxn{ + PublicKey: transactorPublicKey, + TxnMeta: metadata, + TxOutputs: additionalOutputs, + ExtraData: extraData, + // We wait to compute the signature until + // we've added all the inputs and change. + } + + // Create a new UtxoView. If we have access to a mempool object, use + // it to get an augmented view that factors in pending transactions. + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + if err != nil { + return nil, 0, 0, 0, errors.Wrap( + err, "Blockchain.CreateUnlockStakeTxn: problem creating new utxo view: ", + ) + } + if mempool != nil { + utxoView, err = mempool.GetAugmentedUniversalView() + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnlockStakeTxn: problem getting augmented utxo view from mempool: ", + ) + } + } + + // Validate txn metadata. + if err = utxoView.IsValidUnlockStakeMetadata(transactorPublicKey, metadata); err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnlockStakeTxn: invalid txn metadata: ", + ) + } + + // We don't need to make any tweaks to the amount because + // it's basically a standard "pay per kilobyte" transaction. + totalInput, spendAmount, changeAmount, fees, err := bc.AddInputsAndChangeToTransaction( + txn, minFeeRateNanosPerKB, mempool, + ) + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnlockStakeTxn: problem adding inputs: ", + ) + } + + // Validate that the transaction has at least one input, even if it all goes + // to change. This ensures that the transaction will not be "replayable." + if len(txn.TxInputs) == 0 && bc.blockTip().Height+1 < bc.params.ForkHeights.BalanceModelBlockHeight { + return nil, 0, 0, 0, errors.New( + "Blockchain.CreateUnlockStakeTxn: txn has zero inputs, try increasing the fee rate", + ) + } + + // Sanity-check that the spendAmount is zero. + if spendAmount != 0 { + return nil, 0, 0, 0, fmt.Errorf( + "Blockchain.CreateUnlockStakeTxn: spend amount is non-zero: %d", spendAmount, + ) + } + return txn, totalInput, changeAmount, fees, nil +} + +// +// UTXO VIEW UTILS +// + +func (bav *UtxoView) _connectStake( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + verifySignatures bool, +) ( + _totalInput uint64, + _totalOutput uint64, + _utxoOps []*UtxoOperation, + _err error, +) { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { + return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectStake: ") + } + + // Validate the txn TxnType. + if txn.TxnMeta.GetTxnType() != TxnTypeStake { + return 0, 0, nil, fmt.Errorf( + "_connectStake: called with bad TxnType %s", txn.TxnMeta.GetTxnType().String(), + ) + } + + // Grab the txn metadata. + txMeta := txn.TxnMeta.(*StakeMetadata) + + // Validate the txn metadata. + if err := bav.IsValidStakeMetadata(txn.PublicKey, txMeta, blockHeight); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: ") + } + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidStakerPKID, "_connectStake: ") + } + + // Retrieve the existing ValidatorEntry. It must exist. The PrevValidatorEntry + // will be restored if we disconnect this transaction. + prevValidatorEntry, err := bav.GetValidatorByPublicKey(txMeta.ValidatorPublicKey) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: ") + } + if prevValidatorEntry == nil || prevValidatorEntry.isDeleted || prevValidatorEntry.DisableDelegatedStake { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "_connectStake: ") + } + + // Convert StakeAmountNanos *uint256.Int to StakeAmountNanosUint64 uint64. + if txMeta.StakeAmountNanos == nil || !txMeta.StakeAmountNanos.IsUint64() { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidStakeAmountNanos, "_connectStake: ") + } + stakeAmountNanosUint64 := txMeta.StakeAmountNanos.Uint64() + + // Connect a BasicTransfer to get the total input and the + // total output without considering the txn metadata. This + // BasicTransfer also includes the extra spend associated + // with the amount the transactor is staking. + totalInput, totalOutput, utxoOpsForTxn, err := bav._connectBasicTransferWithExtraSpend( + txn, txHash, blockHeight, stakeAmountNanosUint64, verifySignatures, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: ") + } + if verifySignatures { + // _connectBasicTransfer has already checked that the txn is signed + // by the top-level public key, which we take to be the sender's + // public key so there is no need to verify anything further. + } + + // Check if there is an existing StakeEntry that will be updated. + // The existing StakeEntry will be restored if we disconnect this transaction. + prevStakeEntry, err := bav.GetStakeEntry(prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: ") + } + // Delete the existing StakeEntry, if exists. + if prevStakeEntry != nil { + bav._deleteStakeEntryMappings(prevStakeEntry) + } + + // Set StakeID only if this is a new StakeEntry. + stakeID := txHash + if prevStakeEntry != nil { + stakeID = prevStakeEntry.StakeID + } + + // Calculate StakeAmountNanos. + stakeAmountNanos := txMeta.StakeAmountNanos.Clone() + if prevStakeEntry != nil { + stakeAmountNanos, err = SafeUint256().Add(stakeAmountNanos, prevStakeEntry.StakeAmountNanos) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to existing StakeAmountNanos: ") + } + } + + // Retrieve existing ExtraData to merge with any new ExtraData. + var prevExtraData map[string][]byte + if prevStakeEntry != nil { + prevExtraData = prevStakeEntry.ExtraData + } + + // Construct new StakeEntry from metadata. + currentStakeEntry := &StakeEntry{ + StakeID: stakeID, + StakerPKID: transactorPKIDEntry.PKID, + ValidatorPKID: prevValidatorEntry.ValidatorPKID, + StakeAmountNanos: stakeAmountNanos, + ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), + } + // Set the new StakeEntry. + bav._setStakeEntryMappings(currentStakeEntry) + + // Update the ValidatorEntry.TotalStakeAmountNanos. + // 1. Copy the existing ValidatorEntry. + currentValidatorEntry := prevValidatorEntry.Copy() + // 2. Delete the existing ValidatorEntry. + bav._deleteValidatorEntryMappings(prevValidatorEntry) + // 3. Update the new ValidatorEntry's TotalStakeAmountNanos. + currentValidatorEntry.TotalStakeAmountNanos, err = SafeUint256().Add( + currentValidatorEntry.TotalStakeAmountNanos, txMeta.StakeAmountNanos, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to TotalStakeAmountNanos: ") + } + // 4. Set the new ValidatorEntry. + bav._setValidatorEntryMappings(currentValidatorEntry) + + // Increase the GlobalStakeAmountNanos. + // Retrieve the existing GlobalStakeAmountNanos. + // The PrevGlobalStakeAmountNanos will be restored if we disconnect this transaction. + prevGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error retrieving GlobalStakeAmountNanos: ") + } + globalStakeAmountNanos, err := SafeUint256().Add(prevGlobalStakeAmountNanos, txMeta.StakeAmountNanos) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to GlobalStakeAmountNanos: ") + } + // Set the new GlobalStakeAmountNanos. + bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) + + // Add the StakeAmountNanos to TotalOutput. The coins being staked are already + // part of the TotalInput. But they are not burned, so they are an implicit + // output even though they do not go to a specific public key's balance. + totalOutput, err = SafeUint64().Add(totalOutput, stakeAmountNanosUint64) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to TotalOutput: ") + } + + // Add a UTXO operation + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeStake, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, + PrevStakeEntries: []*StakeEntry{prevStakeEntry}, + }) + return totalInput, totalOutput, utxoOpsForTxn, nil +} + +func (bav *UtxoView) _disconnectStake( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32, +) error { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { + return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectStake: ") + } + + // Validate the last operation is a Stake operation. + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectStake: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + operationData := utxoOpsForTxn[operationIndex] + if operationData.Type != OperationTypeStake { + return fmt.Errorf( + "_disconnectStake: trying to revert %v but found %v", + OperationTypeStake, + operationData.Type, + ) + } + txMeta := currentTxn.TxnMeta.(*StakeMetadata) + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(currentTxn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidStakerPKID, "_disconnectStake: ") + } + + // Restore the PrevValidatorEntry. + prevValidatorEntry := operationData.PrevValidatorEntry + if prevValidatorEntry == nil { + return fmt.Errorf( + "_disconnectStake: no prev ValidatorEntry found for %v", txMeta.ValidatorPublicKey, + ) + } + // 1. Delete the CurrentValidatorEntry. + currentValidatorEntry, err := bav.GetValidatorByPKID(prevValidatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "_disconnectStake: ") + } + if currentValidatorEntry == nil { + return fmt.Errorf( + "_disconnectStake: no current ValidatorEntry found for %v", txMeta.ValidatorPublicKey, + ) + } + bav._deleteValidatorEntryMappings(currentValidatorEntry) + // 2. Set the PrevValidatorEntry. + bav._setValidatorEntryMappings(prevValidatorEntry) + + // Restore the PrevStakeEntry. + // 1. Delete the CurrentStakeEntry. + currentStakeEntry, err := bav.GetStakeEntry(prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID) + if err != nil { + return errors.Wrapf(err, "_disconnectStake: ") + } + if currentStakeEntry == nil { + return fmt.Errorf("_disconnectStake: no current StakeEntry found for %v", currentTxn.PublicKey) + } + bav._deleteStakeEntryMappings(currentStakeEntry) + // 2. Set the PrevStakeEntry, if exists. The PrevStakeEntry will exist if the transactor + // was adding stake to an existing StakeEntry. It will not exist if this is the first + // stake the transactor has staked with this validator. + if len(operationData.PrevStakeEntries) > 1 { + return fmt.Errorf("_disconnectStake: more than one prev StakeEntry found for %v", currentTxn.PublicKey) + } else if len(operationData.PrevStakeEntries) == 1 { + bav._setStakeEntryMappings(operationData.PrevStakeEntries[0]) + } + + // Restore the PrevGlobalStakeAmountNanos. + bav._setGlobalStakeAmountNanos(operationData.PrevGlobalStakeAmountNanos) + + // Disconnect the BasicTransfer. Disconnecting the BasicTransfer also returns + // the extra spend associated with the amount the transactor staked. + return bav._disconnectBasicTransfer( + currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, + ) +} + +func (bav *UtxoView) _connectUnstake( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + verifySignatures bool, +) ( + _totalInput uint64, + _totalOutput uint64, + _utxoOps []*UtxoOperation, + _err error, +) { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { + return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectUnstake: ") + } + + // Validate the txn TxnType. + if txn.TxnMeta.GetTxnType() != TxnTypeUnstake { + return 0, 0, nil, fmt.Errorf( + "_connectUnstake: called with bad TxnType %s", txn.TxnMeta.GetTxnType().String(), + ) + } + + // Connect a basic transfer to get the total input and the + // total output without considering the txn metadata. + totalInput, totalOutput, utxoOpsForTxn, err := bav._connectBasicTransfer( + txn, txHash, blockHeight, verifySignatures, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") + } + if verifySignatures { + // _connectBasicTransfer has already checked that the txn is signed + // by the top-level public key, which we take to be the sender's + // public key so there is no need to verify anything further. + } + + // Grab the txn metadata. + txMeta := txn.TxnMeta.(*UnstakeMetadata) + + // Validate the txn metadata. + if err = bav.IsValidUnstakeMetadata(txn.PublicKey, txMeta); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") + } + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidStakerPKID, "_connectUnstake: ") + } + + // Retrieve PrevValidatorEntry. This will be restored if we disconnect the txn. + prevValidatorEntry, err := bav.GetValidatorByPublicKey(txMeta.ValidatorPublicKey) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") + } + if prevValidatorEntry == nil || prevValidatorEntry.isDeleted { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "_connectUnstake: ") + } + + // Retrieve PrevStakeEntry. This will be restored if we disconnect the txn. + prevStakeEntry, err := bav.GetStakeEntry(prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") + } + if prevStakeEntry == nil || prevStakeEntry.isDeleted { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidUnstakeNoStakeFound, "_connectUnstake: ") + } + if prevStakeEntry.StakeAmountNanos.Cmp(txMeta.UnstakeAmountNanos) < 0 { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidUnstakeInsufficientStakeFound, "_connectUnstake: ") + } + + // Update the StakeEntry, decreasing the StakeAmountNanos. + // 1. Calculate the updated StakeAmountNanos. + stakeAmountNanos, err := SafeUint256().Sub(prevStakeEntry.StakeAmountNanos, txMeta.UnstakeAmountNanos) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error subtracting UnstakeAmountNanos from StakeAmountNanos: ") + } + // 2. Create a CurrentStakeEntry, if updated StakeAmountNanos > 0. + var currentStakeEntry *StakeEntry + if stakeAmountNanos.Cmp(uint256.NewInt()) > 0 { + currentStakeEntry = prevStakeEntry.Copy() + currentStakeEntry.StakeAmountNanos = stakeAmountNanos.Clone() + } + // 3. Delete the PrevStakeEntry. + bav._deleteStakeEntryMappings(prevStakeEntry) + // 4. Set the CurrentStakeEntry, if exists. The CurrentStakeEntry will not exist + // if the transactor has unstaked all stake assigned to this validator. + if currentStakeEntry != nil { + bav._setStakeEntryMappings(currentStakeEntry) + } + + // Update the ValidatorEntry.TotalStakeAmountNanos. + // 1. Copy the existing ValidatorEntry. + currentValidatorEntry := prevValidatorEntry.Copy() + // 2. Delete the existing ValidatorEntry. + bav._deleteValidatorEntryMappings(prevValidatorEntry) + // 3. Update the new ValidatorEntry's TotalStakeAmountNanos. + currentValidatorEntry.TotalStakeAmountNanos, err = SafeUint256().Sub( + currentValidatorEntry.TotalStakeAmountNanos, txMeta.UnstakeAmountNanos, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error subtracting UnstakeAmountNanos from TotalStakeAmountNanos: ") + } + // 4. Set the new ValidatorEntry. + bav._setValidatorEntryMappings(currentValidatorEntry) + + // Decrease the GlobalStakeAmountNanos. + // 1. Retrieve the existing GlobalStakeAmountNanos. This will be restored if we disconnect this txn. + prevGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error retrieving GlobalStakeAmountNanos: ") + } + globalStakeAmountNanos, err := SafeUint256().Sub(prevGlobalStakeAmountNanos, txMeta.UnstakeAmountNanos) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error subtracting UnstakeAmountNanos from GlobalStakeAmountNanos: ") + } + // 2. Set the new GlobalStakeAmountNanos. + bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) + + // Update the LockedStakeEntry, if exists. Create if not. + currentEpochNumber := uint64(0) // TODO: set this + // 1. Retrieve the PrevLockedStakeEntry. This will be restored if we disconnect this txn. + prevLockedStakeEntry, err := bav.GetLockedStakeEntry( + prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID, currentEpochNumber, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") + } + // 2. Create a CurrrentLockedStakeEntry. + var currentLockedStakeEntry *LockedStakeEntry + if prevLockedStakeEntry != nil { + // Update the existing LockedStakeEntry. + currentLockedStakeEntry = prevLockedStakeEntry.Copy() + currentLockedStakeEntry.LockedAmountNanos, err = SafeUint256().Add( + prevLockedStakeEntry.LockedAmountNanos, txMeta.UnstakeAmountNanos, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error adding UnstakeAmountNanos to LockedAmountNanos") + } + currentLockedStakeEntry.ExtraData = mergeExtraData(prevLockedStakeEntry.ExtraData, txn.ExtraData) + } else { + // Create a new LockedStakeEntry. + currentLockedStakeEntry = &LockedStakeEntry{ + LockedStakeID: txn.Hash(), + StakerPKID: transactorPKIDEntry.PKID, + ValidatorPKID: prevValidatorEntry.ValidatorPKID, + LockedAmountNanos: txMeta.UnstakeAmountNanos, + LockedAtEpochNumber: currentEpochNumber, + ExtraData: txn.ExtraData, + } + } + // 3. Delete the PrevLockedStakeEntry, if exists. + if prevLockedStakeEntry != nil { + bav._deleteLockedStakeEntryMappings(prevLockedStakeEntry) + } + // 4. Set the CurrentLockedStakeEntry. + bav._setLockedStakeEntryMappings(currentLockedStakeEntry) + + // Add a UTXO operation + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeUnstake, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, + PrevStakeEntries: []*StakeEntry{prevStakeEntry}, + PrevLockedStakeEntries: []*LockedStakeEntry{prevLockedStakeEntry}, + }) + return totalInput, totalOutput, utxoOpsForTxn, nil +} + +func (bav *UtxoView) _disconnectUnstake( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32, +) error { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { + return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectUnstake: ") + } + + // Validate the last operation is an Unstake operation. + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectUnstake: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + operationData := utxoOpsForTxn[operationIndex] + if operationData.Type != OperationTypeUnstake { + return fmt.Errorf( + "_disconnectUnstake: trying to revert %v but found %v", + OperationTypeUnstake, + operationData.Type, + ) + } + txMeta := currentTxn.TxnMeta.(*UnstakeMetadata) + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(currentTxn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidStakerPKID, "_disconnectUnstake: ") + } + + // Restore the PrevValidatorEntry. + prevValidatorEntry := operationData.PrevValidatorEntry + if prevValidatorEntry == nil { + return fmt.Errorf( + "_disconnectUnstake: no prev ValidatorEntry found for %v", txMeta.ValidatorPublicKey, + ) + } + // 1. Delete the CurrentValidatorEntry. + currentValidatorEntry, err := bav.GetValidatorByPKID(prevValidatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "_disconnectUnstake: ") + } + if currentValidatorEntry == nil { + return fmt.Errorf( + "_disconnectUnstake: no current ValidatorEntry found for %v", txMeta.ValidatorPublicKey, + ) + } + bav._deleteValidatorEntryMappings(currentValidatorEntry) + // 2. Set the PrevValidatorEntry. + bav._setValidatorEntryMappings(prevValidatorEntry) + + // Restore the PrevStakeEntry. + // 1. Delete the CurrentStakeEntry, if exists. The CurrentStakeEntry will exist if the transactor + // still has stake assigned to this validator. The CurrentStakeEntry will not exist if the + // transactor unstaked all stake. + currentStakeEntry, err := bav.GetStakeEntry(prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID) + if err != nil { + return errors.Wrapf(err, "_disconnectUnstake: ") + } + if currentStakeEntry != nil { + bav._deleteStakeEntryMappings(currentStakeEntry) + } + // 2. Set the PrevStakeEntry. + if len(operationData.PrevStakeEntries) < 1 { + return fmt.Errorf("_disconnectUnstake: no prev StakeEntry found for %v", currentTxn.PublicKey) + } + if len(operationData.PrevStakeEntries) > 1 { + return fmt.Errorf("_disconnectUnstake: more than one prev StakeEntry found for %v", currentTxn.PublicKey) + } + bav._setStakeEntryMappings(operationData.PrevStakeEntries[0]) + + // Restore the PrevGlobalStakeAmountNanos. + bav._setGlobalStakeAmountNanos(operationData.PrevGlobalStakeAmountNanos) + + // Restore the PrevLockedStakeEntry, if exists. The PrevLockedStakeEntry will exist if the + // transactor has previously unstaked stake assigned to this validator within the same epoch. + // The PrevLockedStakeEntry will not exist otherwise. + currentEpochNumber := uint64(0) // TODO: set this + // 1. Retrieve the CurrentLockedStakeEntry. + currentLockedStakeEntry, err := bav.GetLockedStakeEntry( + prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID, currentEpochNumber, + ) + if err != nil { + return errors.Wrapf(err, "_disconnectUnstake: ") + } + // 2. Delete the CurrentLockedStakeEntry. + bav._deleteLockedStakeEntryMappings(currentLockedStakeEntry) + // 3. Set the PrevLockedStakeEntry, if exists. + if len(operationData.PrevLockedStakeEntries) > 1 { + return fmt.Errorf("_disconnectUnstake: more than one prev LockedStakeEntry found for %v", currentTxn.PublicKey) + } + if len(operationData.PrevLockedStakeEntries) == 1 { + bav._setLockedStakeEntryMappings(operationData.PrevLockedStakeEntries[0]) + } + + // Disconnect the basic transfer. + return bav._disconnectBasicTransfer( + currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, + ) +} + +func (bav *UtxoView) _connectUnlockStake( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + verifySignatures bool, +) ( + _totalInput uint64, + _totalOutput uint64, + _utxoOps []*UtxoOperation, + _err error, +) { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { + return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectUnlockStake: ") + } + + // Validate the txn TxnType. + if txn.TxnMeta.GetTxnType() != TxnTypeUnlockStake { + return 0, 0, nil, fmt.Errorf( + "_connectUnlockStake: called with bad TxnType %s", txn.TxnMeta.GetTxnType().String(), + ) + } + + // Grab the txn metadata. + txMeta := txn.TxnMeta.(*UnlockStakeMetadata) + + // Validate the txn metadata. + if err := bav.IsValidUnlockStakeMetadata(txn.PublicKey, txMeta); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnlockStake: ") + } + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidStakerPKID, "_connectUnlockStake: ") + } + + // Convert ValidatorPublicKey to ValidatorPKID. + validatorPKIDEntry := bav.GetPKIDForPublicKey(txMeta.ValidatorPublicKey.ToBytes()) + if validatorPKIDEntry == nil || validatorPKIDEntry.isDeleted { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "_connectUnlockStake: ") + } + + // Retrieve the PrevLockedStakeEntries. These will be restored if we disconnect this txn. + prevLockedStakeEntries, err := bav.GetLockedStakeEntriesInRange( + validatorPKIDEntry.PKID, transactorPKIDEntry.PKID, txMeta.StartEpochNumber, txMeta.EndEpochNumber, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnlockStake: ") + } + if len(prevLockedStakeEntries) == 0 { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidUnlockStakeNoUnlockableStakeFound, "_connectUnlockStake: ") + } + + // Connect a basic transfer to get the total input and the + // total output without considering the txn metadata. + totalInput, totalOutput, utxoOpsForTxn, err := bav._connectBasicTransfer( + txn, txHash, blockHeight, verifySignatures, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnlockStake: ") + } + if verifySignatures { + // _connectBasicTransfer has already checked that the txn is signed + // by the top-level public key, which we take to be the sender's + // public key so there is no need to verify anything further. + } + + // Calculate the TotalUnlockedAmountNanos and delete the PrevLockedStakeEntries. + totalUnlockedAmountNanos := uint256.NewInt() + for _, prevLockedStakeEntry := range prevLockedStakeEntries { + totalUnlockedAmountNanos, err = SafeUint256().Add( + totalUnlockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnlockStake: ") + } + bav._deleteLockedStakeEntryMappings(prevLockedStakeEntry) + } + if !totalUnlockedAmountNanos.IsUint64() { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidUnlockStakeUnlockableStakeOverflowsUint64, "_connectUnlockStake: ") + } + totalUnlockedAmountNanosUint64 := totalUnlockedAmountNanos.Uint64() + + // Add TotalUnlockedAmountNanos to TotalInput. The unlocked coins are an + // implicit input even though they do not come from a specific public key. + totalInput, err = SafeUint64().Add(totalInput, totalUnlockedAmountNanosUint64) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnlockStake: error adding TotalUnlockedAmountNanos to TotalInput: ") + } + + // Add TotalUnlockedAmountNanos to TotalOutput. The unlocked + // coins being sent to the transactor are an implicit output. + totalOutput, err = SafeUint64().Add(totalOutput, totalUnlockedAmountNanosUint64) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnlockStake: error adding TotalUnlockedAmountNanos to TotalOutput: ") + } + + // Return TotalUnlockedAmountNanos back to the transactor. We can use + // _addBalance here since we validate that connectUnlockStake can only + // occur after the BalanceModelBlockHeight. + utxoOp, err := bav._addBalance(totalUnlockedAmountNanosUint64, txn.PublicKey) + if err != nil { + return 0, 0, nil, errors.Wrapf( + err, "_connectUnlockStake: error adding TotalUnlockedAmountNanos to the transactor balance: ", + ) + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOp) + + // Add a UTXO operation + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeUnlockStake, + PrevLockedStakeEntries: prevLockedStakeEntries, + }) + return totalInput, totalOutput, utxoOpsForTxn, nil +} + +func (bav *UtxoView) _disconnectUnlockStake( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32, +) error { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { + return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectUnlockStake: ") + } + + // Validate the last operation is an UnlockStake operation. + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectUnlockStake: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + operationData := utxoOpsForTxn[operationIndex] + if operationData.Type != OperationTypeUnlockStake { + return fmt.Errorf( + "_disconnectUnlockStake: trying to revert %v but found %v", + OperationTypeUnlockStake, + operationData.Type, + ) + } + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(currentTxn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidStakerPKID, "_disconnectUnlockStake: ") + } + + // Calculate the TotalUnlockedAmountNanos. + totalUnlockedAmountNanos := uint256.NewInt() + var err error + for _, prevLockedStakeEntry := range operationData.PrevLockedStakeEntries { + totalUnlockedAmountNanos, err = SafeUint256().Add( + totalUnlockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "_disconnectUnlockStake: ") + } + } + if !totalUnlockedAmountNanos.IsUint64() { + return errors.Wrapf(RuleErrorInvalidUnlockStakeUnlockableStakeOverflowsUint64, "_disconnectUnlockStake: ") + } + + // Unadd TotalUnlockedAmountNanos from the transactor. + err = bav._unAddBalance(totalUnlockedAmountNanos.Uint64(), currentTxn.PublicKey) + if err != nil { + return errors.Wrapf(err, "_disconnectUnlockStake: error unadding TotalUnlockedAmountNanos from the transactor balance: ") + } + + // Restore the PrevLockedStakeEntries. + for _, prevLockedStakeEntry := range operationData.PrevLockedStakeEntries { + bav._setLockedStakeEntryMappings(prevLockedStakeEntry) + } + + // Disconnect the basic transfer. + return bav._disconnectBasicTransfer( + currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, + ) +} + +func (bav *UtxoView) IsValidStakeMetadata(transactorPkBytes []byte, metadata *StakeMetadata, blockHeight uint32) error { + // Validate TransactorPublicKey. + transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPkBytes) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidStakerPKID, "UtxoView.IsValidStakeMetadata: ") + } + + // Validate ValidatorPublicKey. + validatorEntry, err := bav.GetValidatorByPublicKey(metadata.ValidatorPublicKey) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidStakeMetadata: ") + } + if validatorEntry == nil || validatorEntry.isDeleted || validatorEntry.DisableDelegatedStake { + return errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView.IsValidStakeMetadata: ") + } + + // Validate 0 < StakeAmountNanos <= transactor's DESO Balance. We ignore + // the txn fees in this check. The StakeAmountNanos will be validated to + // be less than the transactor's DESO balance net of txn fees in the call + // to connectBasicTransferWithExtraSpend. + if metadata.StakeAmountNanos == nil || + metadata.StakeAmountNanos.IsZero() || + !metadata.StakeAmountNanos.IsUint64() { + return errors.Wrapf(RuleErrorInvalidStakeAmountNanos, "UtxoView.IsValidStakeMetadata: ") + } + transactorDeSoBalanceNanos, err := bav.GetSpendableDeSoBalanceNanosForPublicKey(transactorPkBytes, blockHeight-1) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidStakeMetadata: ") + } + if uint256.NewInt().SetUint64(transactorDeSoBalanceNanos).Cmp(metadata.StakeAmountNanos) < 0 { + return errors.Wrapf(RuleErrorInvalidStakeInsufficientBalance, "UtxoView.IsValidStakeMetadata: ") + } + + return nil +} + +func (bav *UtxoView) IsValidUnstakeMetadata(transactorPkBytes []byte, metadata *UnstakeMetadata) error { + // Validate TransactorPublicKey. + transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPkBytes) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidStakerPKID, "UtxoView.IsValidUnstakeMetadata: ") + } + + // Validate ValidatorPublicKey. + validatorEntry, err := bav.GetValidatorByPublicKey(metadata.ValidatorPublicKey) + if err != nil { + return errors.Wrapf(err, "IsValidUnstakeMetadata: ") + } + if validatorEntry == nil || validatorEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView.IsValidUnstakeMetadata: ") + } + + // Validate StakeEntry exists. + stakeEntry, err := bav.GetStakeEntry(validatorEntry.ValidatorPKID, transactorPKIDEntry.PKID) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidUnstakeMetadata: ") + } + if stakeEntry == nil || stakeEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidUnstakeNoStakeFound, "UtxoView.IsValidUnstakeMetadata: ") + } + + // Validate 0 < UnstakeAmountNanos <= StakeEntry.StakeAmountNanos. + if metadata.UnstakeAmountNanos == nil || metadata.UnstakeAmountNanos.IsZero() { + return errors.Wrapf(RuleErrorInvalidUnstakeAmountNanos, "UtxoView.IsValidUnstakeMetadata: ") + } + if stakeEntry.StakeAmountNanos.Cmp(metadata.UnstakeAmountNanos) < 0 { + return errors.Wrapf(RuleErrorInvalidUnstakeInsufficientStakeFound, "UtxoView.IsValidUnstakeMetadata: ") + } + + return nil +} + +func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metadata *UnlockStakeMetadata) error { + // Validate TransactorPublicKey. + transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPkBytes) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidStakerPKID, "UtxoView.IsValidUnlockStakeMetadata: ") + } + + // Validate ValidatorPublicKey. + validatorEntry, err := bav.GetValidatorByPublicKey(metadata.ValidatorPublicKey) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: ") + } + if validatorEntry == nil || validatorEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView.IsValidUnlockStakeMetadata: ") + } + + // Validate StartEpochNumber and EndEpochNumber. + if metadata.StartEpochNumber > metadata.EndEpochNumber { + return errors.Wrapf(RuleErrorInvalidUnlockStakeEpochRange, "UtxoView.IsValidUnlockStakeMetadata: ") + } + // TODO: validate EndEpochNumber is <= CurrentEpochNumber - 2 + + // Validate LockedStakeEntries exist. + lockedStakeEntries, err := bav.GetLockedStakeEntriesInRange( + validatorEntry.ValidatorPKID, transactorPKIDEntry.PKID, metadata.StartEpochNumber, metadata.EndEpochNumber, + ) + existsLockedStakeEntries := false + for _, lockedStakeEntry := range lockedStakeEntries { + if lockedStakeEntry != nil && !lockedStakeEntry.isDeleted { + existsLockedStakeEntries = true + break + } + } + if !existsLockedStakeEntries { + return errors.Wrapf(RuleErrorInvalidUnlockStakeNoUnlockableStakeFound, "UtxoView.IsValidUnlockStakeMetadata: ") + } + + return nil +} + +func (bav *UtxoView) GetStakeEntry(validatorPKID *PKID, stakerPKID *PKID) (*StakeEntry, error) { + // Error if either input is nil. + if validatorPKID == nil { + return nil, errors.New("UtxoView.GetStakeEntry: nil ValidatorPKID provided as input") + } + if stakerPKID == nil { + return nil, errors.New("UtxoView.GetStakeEntry: nil StakerPKID provided as input") + } + // First, check the UtxoView. + stakeMapKey := StakeMapKey{ValidatorPKID: *validatorPKID, StakerPKID: *stakerPKID} + if stakeEntry, exists := bav.StakeMapKeyToStakeEntry[stakeMapKey]; exists { + // If StakeEntry.isDeleted, return nil. + if stakeEntry.isDeleted { + return nil, nil + } + return stakeEntry, nil + } + // Then, check the database. + stakeEntry, err := DBGetStakeEntry(bav.Handle, bav.Snapshot, validatorPKID, stakerPKID) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetStakeEntry: ") + } + if stakeEntry != nil { + // Cache the StakeEntry in the UtxoView if exists. + bav._setStakeEntryMappings(stakeEntry) + } + return stakeEntry, nil +} + +func (bav *UtxoView) GetLockedStakeEntry( + validatorPKID *PKID, + stakerPKID *PKID, + lockedAtEpochNumber uint64, +) (*LockedStakeEntry, error) { + // Error if either input is nil. + if validatorPKID == nil { + return nil, errors.New("UtxoView.GetLockedStakeEntry: nil ValidatorPKID provided as input") + } + if stakerPKID == nil { + return nil, errors.New("UtxoView.GetLockedStakeEntry: nil StakerPKID provided as input") + } + // First, check the UtxoView. + lockedStakeMapKey := LockedStakeMapKey{ + ValidatorPKID: *validatorPKID, + StakerPKID: *stakerPKID, + LockedAtEpochNumber: lockedAtEpochNumber, + } + if lockedStakeEntry, exists := bav.LockedStakeMapKeyToLockedStakeEntry[lockedStakeMapKey]; exists { + // If LockedStakeEntry.isDeleted, return nil. + if lockedStakeEntry.isDeleted { + return nil, nil + } + return lockedStakeEntry, nil + } + // Then, check the database. + lockedStakeEntry, err := DBGetLockedStakeEntry(bav.Handle, bav.Snapshot, validatorPKID, stakerPKID, lockedAtEpochNumber) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetLockedStakeEntry: ") + } + if lockedStakeEntry != nil { + // Cache the LockedStakeEntry in the UtxoView if exists. + bav._setLockedStakeEntryMappings(lockedStakeEntry) + } + return lockedStakeEntry, nil +} + +func (bav *UtxoView) GetLockedStakeEntriesInRange( + validatorPKID *PKID, + stakerPKID *PKID, + startEpochNumber uint64, + endEpochNumber uint64, +) ([]*LockedStakeEntry, error) { + // Validate inputs. + if validatorPKID == nil { + return nil, errors.New("UtxoView.GetLockedStakeEntriesInRange: nil ValidatorPKID provided as input") + } + if stakerPKID == nil { + return nil, errors.New("UtxoView.GetLockedStakeEntriesInRange: nil StakerPKID provided as input") + } + if startEpochNumber > endEpochNumber { + return nil, errors.New("UtxoView.GetLockedStakeEntriesInRange: invalid LockedAtEpochNumber range provided as input") + } + + // Store matching LockedStakeEntries in a map to prevent + // returning duplicates between the db and UtxoView. + lockedStakeEntriesMap := make(map[LockedStakeMapKey]*LockedStakeEntry) + + // First, pull matching LockedStakeEntries from the db. + dbLockedStakeEntries, err := DBGetLockedStakeEntriesInRange( + bav.Handle, bav.Snapshot, validatorPKID, stakerPKID, startEpochNumber, endEpochNumber, + ) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetLockedStakeEntriesInRange: ") + } + for _, lockedStakeEntry := range dbLockedStakeEntries { + lockedStakeEntriesMap[lockedStakeEntry.ToMapKey()] = lockedStakeEntry + } + + // Then, pull matching LockedStakeEntries from the UtxoView. + // Loop through all LockedStakeEntries in the UtxoView. + for _, lockedStakeEntry := range bav.LockedStakeMapKeyToLockedStakeEntry { + // Filter to matching LockedStakeEntries. + if !lockedStakeEntry.ValidatorPKID.Eq(validatorPKID) || + !lockedStakeEntry.StakerPKID.Eq(stakerPKID) || + lockedStakeEntry.LockedAtEpochNumber < startEpochNumber || + lockedStakeEntry.LockedAtEpochNumber > endEpochNumber { + continue + } + + if lockedStakeEntry.isDeleted { + // Remove from map if isDeleted. + delete(lockedStakeEntriesMap, lockedStakeEntry.ToMapKey()) + } else { + // Otherwise, add to map. + lockedStakeEntriesMap[lockedStakeEntry.ToMapKey()] = lockedStakeEntry + } + } + + // Convert LockedStakeEntries map to slice, sorted by LockedAtEpochNumber ASC. + var lockedStakeEntries []*LockedStakeEntry + for _, lockedStakeEntry := range lockedStakeEntriesMap { + lockedStakeEntries = append(lockedStakeEntries, lockedStakeEntry) + } + sort.Slice(lockedStakeEntries, func(ii, jj int) bool { + return lockedStakeEntries[ii].LockedAtEpochNumber < lockedStakeEntries[jj].LockedAtEpochNumber + }) + return lockedStakeEntries, nil +} + +func (bav *UtxoView) _setStakeEntryMappings(stakeEntry *StakeEntry) { + // This function shouldn't be called with nil. + if stakeEntry == nil { + glog.Errorf("_setStakeEntryMappings: called with nil entry, this should never happen") + return + } + bav.StakeMapKeyToStakeEntry[stakeEntry.ToMapKey()] = stakeEntry +} + +func (bav *UtxoView) _setLockedStakeEntryMappings(lockedStakeEntry *LockedStakeEntry) { + // This function shouldn't be called with nil. + if lockedStakeEntry == nil { + glog.Errorf("_setLockedStakeEntryMappings: called with nil entry, this should never happen") + return + } + bav.LockedStakeMapKeyToLockedStakeEntry[lockedStakeEntry.ToMapKey()] = lockedStakeEntry +} + +func (bav *UtxoView) _deleteStakeEntryMappings(stakeEntry *StakeEntry) { + // This function shouldn't be called with nil. + if stakeEntry == nil { + glog.Errorf("_deleteStakeEntryMappings: called with nil entry, this should never happen") + return + } + // Create a tombstone entry. + tombstoneEntry := *stakeEntry + tombstoneEntry.isDeleted = true + // Set the mappings to the point to the tombstone entry. + bav._setStakeEntryMappings(&tombstoneEntry) +} + +func (bav *UtxoView) _deleteLockedStakeEntryMappings(lockedStakeEntry *LockedStakeEntry) { + // This function shouldn't be called with nil. + if lockedStakeEntry == nil { + glog.Errorf("_deleteLockedStakeEntryMappings: called with nil entry, this should never happen") + return + } + // Create a tombstone entry. + tombstoneEntry := *lockedStakeEntry + tombstoneEntry.isDeleted = true + // Set the mappings to the point to the tombstone entry. + bav._setLockedStakeEntryMappings(&tombstoneEntry) +} + +func (bav *UtxoView) _flushStakeEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Delete all entries in the UtxoView map. + for mapKeyIter, entryIter := range bav.StakeMapKeyToStakeEntry { + // Make a copy of the iterators since we make references to them below. + mapKey := mapKeyIter + entry := *entryIter + + // Sanity-check that the entry matches the map key. + mapKeyInEntry := entry.ToMapKey() + if mapKeyInEntry != mapKey { + return fmt.Errorf( + "_flushStakeEntriesToDbWithTxn: StakeEntry key %v doesn't match MapKey %v", + &mapKeyInEntry, + &mapKey, + ) + } + + // Delete the existing mappings in the db for this MapKey. They will be + // re-added if the corresponding entry in-memory has isDeleted=false. + if err := DBDeleteStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight); err != nil { + return errors.Wrapf(err, "_flushStakeEntriesToDbWithTxn: ") + } + } + + // Set any !isDeleted entries in the UtxoView map. + for _, entryIter := range bav.StakeMapKeyToStakeEntry { + entry := *entryIter + if entry.isDeleted { + // If isDeleted then there's nothing to do because + // we already deleted the entry above. + } else { + // If !isDeleted then we put the corresponding + // mappings for it into the db. + if err := DBPutStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight); err != nil { + return errors.Wrapf(err, "_flushStakeEntriesToDbWithTxn: ") + } + } + } + + return nil +} + +func (bav *UtxoView) _flushLockedStakeEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Delete all entries in the UtxoView map. + for mapKeyIter, entryIter := range bav.LockedStakeMapKeyToLockedStakeEntry { + // Make a copy of the iterators since we make references to them below. + mapKey := mapKeyIter + entry := *entryIter + + // Sanity-check that the entry matches the map key. + mapKeyInEntry := entry.ToMapKey() + if mapKeyInEntry != mapKey { + return fmt.Errorf( + "_flushLockedStakeEntriesToDbWithTxn: LockedStakeEntry key %v doesn't match MapKey %v", + &mapKeyInEntry, + &mapKey, + ) + } + + // Delete the existing mappings in the db for this MapKey. They will be + // re-added if the corresponding entry in-memory has isDeleted=false. + if err := DBDeleteLockedStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight); err != nil { + return errors.Wrapf(err, "_flushLockedStakeEntriesToDbWithTxn: ") + } + } + + // Set any !isDeleted entries in the UtxoView map. + for _, entryIter := range bav.LockedStakeMapKeyToLockedStakeEntry { + entry := *entryIter + if entry.isDeleted { + // If isDeleted then there's nothing to do because + // we already deleted the entry above. + } else { + // If !isDeleted then we put the corresponding + // mappings for it into the db. + if err := DBPutLockedStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight); err != nil { + return errors.Wrapf(err, "_flushLockedStakeEntriesToDbWithTxn: ") + } + } + } + + return nil +} + +// +// MEMPOOL UTILS +// + +func (bav *UtxoView) CreateStakeTxindexMetadata(utxoOp *UtxoOperation, txn *MsgDeSoTxn) (*StakeTxindexMetadata, []*AffectedPublicKey) { + metadata := txn.TxnMeta.(*StakeMetadata) + + // Convert TransactorPublicKeyBytes to StakerPublicKeyBase58Check. + stakerPublicKeyBase58Check := PkToString(txn.PublicKey, bav.Params) + + // Convert ValidatorPublicKey to ValidatorPublicKeyBase58Check. + validatorPublicKeyBase58Check := PkToString(metadata.ValidatorPublicKey.ToBytes(), bav.Params) + + // Construct TxindexMetadata. + txindexMetadata := &StakeTxindexMetadata{ + StakerPublicKeyBase58Check: stakerPublicKeyBase58Check, + ValidatorPublicKeyBase58Check: validatorPublicKeyBase58Check, + StakeAmountNanos: metadata.StakeAmountNanos, + } + + // Construct AffectedPublicKeys. + affectedPublicKeys := []*AffectedPublicKey{ + { + PublicKeyBase58Check: stakerPublicKeyBase58Check, + Metadata: "StakerPublicKeyBase58Check", + }, + { + PublicKeyBase58Check: validatorPublicKeyBase58Check, + Metadata: "ValidatorStakedToPublicKeyBase58Check", + }, + } + + return txindexMetadata, affectedPublicKeys +} + +func (bav *UtxoView) CreateUnstakeTxindexMetadata(utxoOp *UtxoOperation, txn *MsgDeSoTxn) (*UnstakeTxindexMetadata, []*AffectedPublicKey) { + metadata := txn.TxnMeta.(*UnstakeMetadata) + + // Convert TransactorPublicKeyBytes to StakerPublicKeyBase58Check. + stakerPublicKeyBase58Check := PkToString(txn.PublicKey, bav.Params) + + // Convert ValidatorPublicKey to ValidatorPublicKeyBase58Check. + validatorPublicKeyBase58Check := PkToString(metadata.ValidatorPublicKey.ToBytes(), bav.Params) + + // Construct TxindexMetadata. + txindexMetadata := &UnstakeTxindexMetadata{ + StakerPublicKeyBase58Check: stakerPublicKeyBase58Check, + ValidatorPublicKeyBase58Check: validatorPublicKeyBase58Check, + UnstakeAmountNanos: metadata.UnstakeAmountNanos, + } + + // Construct AffectedPublicKeys. + affectedPublicKeys := []*AffectedPublicKey{ + { + PublicKeyBase58Check: stakerPublicKeyBase58Check, + Metadata: "UnstakerPublicKeyBase58Check", + }, + { + PublicKeyBase58Check: validatorPublicKeyBase58Check, + Metadata: "ValidatorUnstakedFromPublicKeyBase58Check", + }, + } + + return txindexMetadata, affectedPublicKeys +} + +func (bav *UtxoView) CreateUnlockStakeTxindexMetadata(utxoOp *UtxoOperation, txn *MsgDeSoTxn) (*UnlockStakeTxindexMetadata, []*AffectedPublicKey) { + metadata := txn.TxnMeta.(*UnlockStakeMetadata) + + // Convert TransactorPublicKeyBytes to StakerPublicKeyBase58Check. + stakerPublicKeyBase58Check := PkToString(txn.PublicKey, bav.Params) + + // Convert ValidatorPublicKey to ValidatorPublicKeyBase58Check. + validatorPublicKeyBase58Check := PkToString(metadata.ValidatorPublicKey.ToBytes(), bav.Params) + + // Calculate TotalUnlockedAmountNanos. + totalUnlockedAmountNanos := uint256.NewInt() + var err error + for _, prevLockedStakeEntry := range utxoOp.PrevLockedStakeEntries { + totalUnlockedAmountNanos, err = SafeUint256().Add( + totalUnlockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos, + ) + if err != nil { + glog.Errorf("CreateUnlockStakeTxindexMetadata: error calculating TotalUnlockedAmountNanos: %v", err) + totalUnlockedAmountNanos = uint256.NewInt() + break + } + } + + // Construct TxindexMetadata. + txindexMetadata := &UnlockStakeTxindexMetadata{ + StakerPublicKeyBase58Check: stakerPublicKeyBase58Check, + ValidatorPublicKeyBase58Check: validatorPublicKeyBase58Check, + StartEpochNumber: metadata.StartEpochNumber, + EndEpochNumber: metadata.EndEpochNumber, + TotalUnlockedAmountNanos: totalUnlockedAmountNanos, + } + + // Construct AffectedPublicKeys. + affectedPublicKeys := []*AffectedPublicKey{ + { + PublicKeyBase58Check: stakerPublicKeyBase58Check, + Metadata: "UnlockedStakerPublicKeyBase58Check", + }, + } + + return txindexMetadata, affectedPublicKeys +} + +// +// TRANSACTION SPENDING LIMITS +// + +type StakeLimitKey struct { + ValidatorPKID PKID + StakerPKID PKID +} + +func MakeStakeLimitKey(validatorPKID *PKID, stakerPKID *PKID) StakeLimitKey { + return StakeLimitKey{ + ValidatorPKID: *validatorPKID, + StakerPKID: *stakerPKID, + } +} + +func (stakeLimitKey *StakeLimitKey) Encode() []byte { + var data []byte + data = append(data, stakeLimitKey.ValidatorPKID.ToBytes()...) + data = append(data, stakeLimitKey.StakerPKID.ToBytes()...) + return data +} + +func (stakeLimitKey *StakeLimitKey) Decode(rr *bytes.Reader) error { + var err error + + // ValidatorPKID + validatorPKID := &PKID{} + if err = validatorPKID.FromBytes(rr); err != nil { + return errors.Wrap(err, "StakeLimitKey.Decode: Problem reading ValidatorPKID: ") + } + stakeLimitKey.ValidatorPKID = *validatorPKID + + // StakerPKID + stakerPKID := &PKID{} + if err = stakerPKID.FromBytes(rr); err != nil { + return errors.Wrap(err, "StakeLimitKey.Decode: Problem reading StakerPKID: ") + } + stakeLimitKey.StakerPKID = *stakerPKID + + return nil +} + +func (bav *UtxoView) _checkStakeTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry DerivedKeyEntry, + transactorPublicKeyBytes []byte, + txMeta *StakeMetadata, +) (DerivedKeyEntry, error) { + // The DerivedKeyEntry.TransactionSpendingLimit for staking maps + // ValidatorPKID || StakerPKID to the amount of stake-able DESO + // nanos allowed for this derived key. + + // Convert TransactorPublicKeyBytes to StakerPKID. + stakerPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKeyBytes) + if stakerPKIDEntry == nil || stakerPKIDEntry.isDeleted { + return derivedKeyEntry, errors.Wrapf(RuleErrorInvalidStakerPKID, "UtxoView._checkStakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + + // Convert ValidatorPublicKey to ValidatorPKID. + validatorEntry, err := bav.GetValidatorByPublicKey(txMeta.ValidatorPublicKey) + if err != nil { + return derivedKeyEntry, errors.Wrapf(err, "_checkStakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + if validatorEntry == nil || validatorEntry.isDeleted { + return derivedKeyEntry, errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView._checkStakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + + // Check spending limit for this validator. + // If not found, check spending limit for any validator. + isSpendingLimitExceeded := false + + for _, validatorPKID := range []*PKID{validatorEntry.ValidatorPKID, &ZeroPKID} { + // Retrieve DerivedKeyEntry.TransactionSpendingLimit. + stakeLimitKey := MakeStakeLimitKey(validatorPKID, stakerPKIDEntry.PKID) + spendingLimit, exists := derivedKeyEntry.TransactionSpendingLimitTracker.StakeLimitMap[stakeLimitKey] + if !exists { + continue + } + spendingLimitCmp := spendingLimit.Cmp(txMeta.StakeAmountNanos) + + // If the amount being staked exceeds the spending limit, note it, and skip this spending limit. + // This solves for the case where the amount being staked is greater than the spending limit + // scoped to a specific validator but may be within the limit scoped to any validator. + if spendingLimitCmp < 0 { + isSpendingLimitExceeded = true + continue + } + + // If the spending limit exceeds the amount being staked, update the spending limit. + if spendingLimitCmp > 0 { + updatedSpendingLimit, err := SafeUint256().Sub(spendingLimit, txMeta.StakeAmountNanos) + if err != nil { + return derivedKeyEntry, errors.Wrapf(err, "_checkStakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + if !updatedSpendingLimit.IsUint64() { + // This should never happen, but good to double-check. + return derivedKeyEntry, errors.New( + "_checkStakeTxnSpendingLimitAndUpdateDerivedKey: updated spending limit exceeds uint64", + ) + } + derivedKeyEntry.TransactionSpendingLimitTracker.StakeLimitMap[stakeLimitKey] = updatedSpendingLimit + return derivedKeyEntry, nil + } + + // If we get to this point, the spending limit exactly equals + // the amount being staked. Delete the spending limit. + delete(derivedKeyEntry.TransactionSpendingLimitTracker.StakeLimitMap, stakeLimitKey) + return derivedKeyEntry, nil + } + + // Error if the spending limit was found but the staking limit was exceeded. + if isSpendingLimitExceeded { + return derivedKeyEntry, errors.Wrapf(RuleErrorStakeTransactionSpendingLimitExceeded, "UtxoView._checkStakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + + // If we get to this point, we didn't find a matching spending limit. + return derivedKeyEntry, errors.Wrapf(RuleErrorStakeTransactionSpendingLimitNotFound, "UtxoView._checkStakeTxnSpendingLimitAndUpdateDerivedKey: ") +} + +func (bav *UtxoView) _checkUnstakeTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry DerivedKeyEntry, + transactorPublicKeyBytes []byte, + txMeta *UnstakeMetadata, +) (DerivedKeyEntry, error) { + // The DerivedKeyEntry.TransactionSpendingLimit for unstaking maps + // ValidatorPKID || StakerPKID to the amount of unstake-able DESO + // nanos allowed for this derived key. + + // Convert TransactorPublicKeyBytes to StakerPKID. + stakerPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKeyBytes) + if stakerPKIDEntry == nil || stakerPKIDEntry.isDeleted { + return derivedKeyEntry, errors.Wrapf(RuleErrorInvalidStakerPKID, "UtxoView._checkUnstakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + + // Convert ValidatorPublicKey to ValidatorPKID. + validatorEntry, err := bav.GetValidatorByPublicKey(txMeta.ValidatorPublicKey) + if err != nil { + return derivedKeyEntry, errors.Wrapf(err, "_checkUnstakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + if validatorEntry == nil || validatorEntry.isDeleted { + return derivedKeyEntry, errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView._checkUnstakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + + // Check spending limit for this validator. + // If not found, check spending limit for any validator. + isSpendingLimitExceeded := false + + for _, validatorPKID := range []*PKID{validatorEntry.ValidatorPKID, &ZeroPKID} { + // Retrieve DerivedKeyEntry.TransactionSpendingLimit. + stakeLimitKey := MakeStakeLimitKey(validatorPKID, stakerPKIDEntry.PKID) + spendingLimit, exists := derivedKeyEntry.TransactionSpendingLimitTracker.UnstakeLimitMap[stakeLimitKey] + if !exists { + continue + } + spendingLimitCmp := spendingLimit.Cmp(txMeta.UnstakeAmountNanos) + + // If the amount being unstaked exceeds the spending limit, note it, and skip this spending limit. + // This solves for the case where the amount being unstaked is greater than the spending limit + // scoped to a specific validator but may be within the limit scoped to any validator. + if spendingLimitCmp < 0 { + isSpendingLimitExceeded = true + continue + } + + // If the spending limit exceeds the amount being unstaked, update the spending limit. + if spendingLimitCmp > 0 { + updatedSpendingLimit, err := SafeUint256().Sub(spendingLimit, txMeta.UnstakeAmountNanos) + if err != nil { + return derivedKeyEntry, errors.Wrapf(err, "_checkUnstakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + if !updatedSpendingLimit.IsUint64() { + // This should never happen, but good to double-check. + return derivedKeyEntry, errors.New( + "_checkUnstakeTxnSpendingLimitAndUpdateDerivedKey: updated spending limit exceeds uint64", + ) + } + derivedKeyEntry.TransactionSpendingLimitTracker.UnstakeLimitMap[stakeLimitKey] = updatedSpendingLimit + return derivedKeyEntry, nil + } + + // If we get to this point, the spending limit exactly equals + // the amount being unstaked. Delete the spending limit. + delete(derivedKeyEntry.TransactionSpendingLimitTracker.UnstakeLimitMap, stakeLimitKey) + return derivedKeyEntry, nil + } + + // Error if the spending limit was found but the unstaking limit was exceeded. + if isSpendingLimitExceeded { + return derivedKeyEntry, errors.Wrapf(RuleErrorUnstakeTransactionSpendingLimitExceeded, "UtxoView._checkUnstakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + + // If we get to this point, we didn't find a matching spending limit. + return derivedKeyEntry, errors.Wrapf(RuleErrorUnstakeTransactionSpendingLimitNotFound, "UtxoView._checkUnstakeTxnSpendingLimitAndUpdateDerivedKey: ") +} + +func (bav *UtxoView) _checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry DerivedKeyEntry, + transactorPublicKeyBytes []byte, + txMeta *UnlockStakeMetadata, +) (DerivedKeyEntry, error) { + // The DerivedKeyEntry.TransactionSpendingLimit for unlocking stake maps + // ValidatorPKID || StakerPKID to the number of UnlockStake transactions + // this derived key is allowed to perform. + + // Convert TransactorPublicKeyBytes to StakerPKID. + stakerPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKeyBytes) + if stakerPKIDEntry == nil || stakerPKIDEntry.isDeleted { + return derivedKeyEntry, errors.Wrapf(RuleErrorInvalidStakerPKID, "UtxoView._checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + + // Convert ValidatorPublicKey to ValidatorPKID. + validatorEntry, err := bav.GetValidatorByPublicKey(txMeta.ValidatorPublicKey) + if err != nil { + return derivedKeyEntry, errors.Wrapf(err, "_checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + if validatorEntry == nil || validatorEntry.isDeleted { + return derivedKeyEntry, errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView._checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey: ") + } + + // Check spending limit for this validator. + // If not found, check spending limit for any validator. + for _, validatorPKID := range []*PKID{validatorEntry.ValidatorPKID, &ZeroPKID} { + // Retrieve DerivedKeyEntry.TransactionSpendingLimit. + stakeLimitKey := MakeStakeLimitKey(validatorPKID, stakerPKIDEntry.PKID) + spendingLimit, exists := derivedKeyEntry.TransactionSpendingLimitTracker.UnlockStakeLimitMap[stakeLimitKey] + if !exists || spendingLimit <= 0 { + continue + } + + // Delete the spending limit if we've exhausted the spending limit for this key. + if spendingLimit == 1 { + delete(derivedKeyEntry.TransactionSpendingLimitTracker.UnlockStakeLimitMap, stakeLimitKey) + } else { + // Otherwise decrement it by 1. + derivedKeyEntry.TransactionSpendingLimitTracker.UnlockStakeLimitMap[stakeLimitKey]-- + } + + // If we get to this point, we found a matching spending limit which we either deleted or decremented. + return derivedKeyEntry, nil + } + + // If we get to this point, we didn't find a matching spending limit. + return derivedKeyEntry, errors.Wrapf(RuleErrorUnlockStakeTransactionSpendingLimitNotFound, "UtxoView._checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey: ") +} + +func (bav *UtxoView) IsValidStakeLimitKey(transactorPublicKeyBytes []byte, stakeLimitKey StakeLimitKey) error { + // Convert TransactorPublicKeyBytes to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKeyBytes) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return errors.Wrapf(RuleErrorTransactionSpendingLimitInvalidStaker, "UtxoView.IsValidStakeLimitKey: ") + } + + // Verify TransactorPKID == StakerPKID. + if !transactorPKIDEntry.PKID.Eq(&stakeLimitKey.StakerPKID) { + return errors.Wrapf(RuleErrorTransactionSpendingLimitInvalidStaker, "UtxoView.IsValidStakeLimitKey: ") + } + + // Verify ValidatorEntry. + if stakeLimitKey.ValidatorPKID.IsZeroPKID() { + // The ZeroPKID is a special case that indicates that the spending limit + // applies to any validator. In this case, we don't need to check that the + // validator exists, as there is no validator registered for the ZeroPKID. + return nil + } + validatorEntry, err := bav.GetValidatorByPKID(&stakeLimitKey.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "IsValidStakeLimitKey: ") + } + if validatorEntry == nil || validatorEntry.isDeleted || validatorEntry.DisableDelegatedStake { + return errors.Wrapf(RuleErrorTransactionSpendingLimitInvalidValidator, "UtxoView.IsValidStakeLimitKey: ") + } + + return nil +} + +// +// CONSTANTS +// + +const RuleErrorInvalidStakerPKID RuleError = "RuleErrorInvalidStakerPKID" +const RuleErrorInvalidStakeAmountNanos RuleError = "RuleErrorInvalidStakeAmountNanos" +const RuleErrorInvalidStakeInsufficientBalance RuleError = "RuleErrorInvalidStakeInsufficientBalance" +const RuleErrorInvalidUnstakeNoStakeFound RuleError = "RuleErrorInvalidUnstakeNoStakeFound" +const RuleErrorInvalidUnstakeAmountNanos RuleError = "RuleErrorInvalidUnstakeAmountNanos" +const RuleErrorInvalidUnstakeInsufficientStakeFound RuleError = "RuleErrorInvalidUnstakeInsufficientStakeFound" +const RuleErrorInvalidUnlockStakeEpochRange RuleError = "RuleErrorInvalidUnlockStakeEpochRange" +const RuleErrorInvalidUnlockStakeNoUnlockableStakeFound RuleError = "RuleErrorInvalidUnlockStakeNoUnlockableStakeFound" +const RuleErrorInvalidUnlockStakeUnlockableStakeOverflowsUint64 RuleError = "RuleErrorInvalidUnlockStakeUnlockableStakeOverflowsUint64" +const RuleErrorStakeTransactionSpendingLimitNotFound RuleError = "RuleErrorStakeTransactionSpendingLimitNotFound" +const RuleErrorStakeTransactionSpendingLimitExceeded RuleError = "RuleErrorStakeTransactionSpendingLimitExceeded" +const RuleErrorUnstakeTransactionSpendingLimitNotFound RuleError = "RuleErrorUnstakeTransactionSpendingLimitNotFound" +const RuleErrorUnstakeTransactionSpendingLimitExceeded RuleError = "RuleErrorUnstakeTransactionSpendingLimitExceeded" +const RuleErrorUnlockStakeTransactionSpendingLimitNotFound RuleError = "RuleErrorUnlockStakeTransactionSpendingLimitNotFound" +const RuleErrorTransactionSpendingLimitInvalidStaker RuleError = "RuleErrorTransactionSpendingLimitInvalidStaker" +const RuleErrorTransactionSpendingLimitInvalidValidator RuleError = "RuleErrorTransactionSpendingLimitInvalidValidator" diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go new file mode 100644 index 000000000..2f3d0cd98 --- /dev/null +++ b/lib/block_view_stake_test.go @@ -0,0 +1,1756 @@ +package lib + +import ( + "errors" + "github.com/btcsuite/btcd/btcec" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + "math" + "testing" +) + +func TestStaking(t *testing.T) { + _testStaking(t, false) + _testStaking(t, true) + _testStakingWithDerivedKey(t) +} + +func _testStaking(t *testing.T, flushToDB bool) { + // Local variables + var err error + + // Initialize fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + chain.snapshot = nil + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height + 1) + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID + m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID + m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID + _, _, _, _, _ = m0PKID, m1PKID, m2PKID, m3PKID, m4PKID + + // Helper utils + utxoView := func() *UtxoView { + newUtxoView, err := mempool.GetAugmentedUniversalView() + require.NoError(t, err) + return newUtxoView + } + + getDESOBalanceNanos := func(publicKeyBytes []byte) uint64 { + desoBalanceNanos, err := utxoView().GetDeSoBalanceNanosForPublicKey(publicKeyBytes) + require.NoError(t, err) + return desoBalanceNanos + } + + { + // Param Updater set min fee rate to 101 nanos per KB + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // m0 registers as a validator. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Len(t, validatorEntry.Domains, 1) + require.Equal(t, validatorEntry.Domains[0], []byte("https://example.com")) + require.True(t, validatorEntry.TotalStakeAmountNanos.IsZero()) + } + // + // STAKING + // + { + // RuleErrorProofOfStakeTxnBeforeBlockHeight + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = math.MaxUint32 + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) + + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + } + { + // RuleErrorInvalidValidatorPKID + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m2PkBytes), + StakeAmountNanos: uint256.NewInt(), + } + _, err = _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidValidatorPKID) + } + { + // RuleErrorInvalidStakeAmountNanos + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: nil, + } + _, err = _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidStakeAmountNanos) + } + { + // RuleErrorInvalidStakeAmountNanos + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt(), + } + _, err = _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidStakeAmountNanos) + } + { + // RuleErrorInvalidStakeAmountNanos + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: MaxUint256, + } + _, err = _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidStakeAmountNanos) + } + { + // RuleErrorInvalidStakeInsufficientBalance + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(math.MaxUint64), + } + _, err = _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidStakeInsufficientBalance) + } + { + // m1 stakes with m0. + m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + extraData := map[string][]byte{"TestKey": []byte("TestValue")} + feeNanos, err := _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, extraData, flushToDB, + ) + require.NoError(t, err) + + // Verify StakeEntry.StakeAmountNanos. + stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.NotNil(t, stakeEntry) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue")) + + // Verify ValidatorEntry.TotalStakeAmountNanos. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // Verify GlobalStakeAmountNanos. + globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). + m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + require.Equal(t, m1OldDESOBalanceNanos-feeNanos-stakeMetadata.StakeAmountNanos.Uint64(), m1NewDESOBalanceNanos) + } + { + // m1 stakes more with m0. + m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(50), + } + extraData := map[string][]byte{"TestKey": []byte("TestValue2")} + feeNanos, err := _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, extraData, flushToDB, + ) + require.NoError(t, err) + + // Verify StakeEntry.StakeAmountNanos. + stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.NotNil(t, stakeEntry) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue2")) + + // Verify ValidatorEntry.TotalStakeAmountNanos. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + + // Verify GlobalStakeAmountNanos. + globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + + // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). + m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + require.Equal(t, m1OldDESOBalanceNanos-feeNanos-stakeMetadata.StakeAmountNanos.Uint64(), m1NewDESOBalanceNanos) + } + // + // UNSTAKING + // + { + // RuleErrorProofOfStakeTxnBeforeBlockHeight + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = math.MaxUint32 + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(40), + } + _, err = _submitUnstakeTxn( + testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) + + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + } + { + // RuleErrorInvalidValidatorPKID + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m2PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(40), + } + _, err = _submitUnstakeTxn( + testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidValidatorPKID) + } + { + // RuleErrorInvalidUnstakeNoStakeFound + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(40), + } + _, err = _submitUnstakeTxn( + testMeta, m2Pub, m2Priv, unstakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnstakeNoStakeFound) + } + { + // RuleErrorInvalidUnstakeAmountNanos + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: nil, + } + _, err = _submitUnstakeTxn( + testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnstakeAmountNanos) + } + { + // RuleErrorInvalidUnstakeAmountNanos + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt(), + } + _, err = _submitUnstakeTxn( + testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnstakeAmountNanos) + } + { + // RuleErrorInvalidUnstakeInsufficientStakeFound + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: MaxUint256, + } + _, err = _submitUnstakeTxn( + testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnstakeInsufficientStakeFound) + } + { + // m1 unstakes from m0. + m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(40), + } + extraData := map[string][]byte{"TestKey": []byte("TestValue")} + feeNanos, err := _submitUnstakeTxn( + testMeta, m1Pub, m1Priv, unstakeMetadata, extraData, flushToDB, + ) + require.NoError(t, err) + + // Verify StakeEntry.StakeAmountNanos. + stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(110)) + + // Verify ValidatorEntry.TotalStakeAmountNanos. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(110)) + + // Verify GlobalStakeAmountNanos. + globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(110)) + + // Verify LockedStakeEntry.UnstakeAmountNanos. + currentEpochNumber := uint64(0) // TODO: get epoch number from db. + lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) + require.NoError(t, err) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(40)) + require.Equal(t, lockedStakeEntry.ExtraData["TestKey"], []byte("TestValue")) + + // Verify m1's balance stays the same (net of fees). + m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + require.Equal(t, m1OldDESOBalanceNanos-feeNanos, m1NewDESOBalanceNanos) + } + { + // m1 unstakes more from m0. + m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(30), + } + extraData := map[string][]byte{"TestKey": []byte("TestValue2")} + feeNanos, err := _submitUnstakeTxn( + testMeta, m1Pub, m1Priv, unstakeMetadata, extraData, flushToDB, + ) + require.NoError(t, err) + + // Verify StakeEntry.StakeAmountNanos. + stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(80)) + + // Verify ValidatorEntry.TotalStakeAmountNanos. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(80)) + + // Verify GlobalStakeAmountNanos. + globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(80)) + + // Verify LockedStakeEntry.UnstakeAmountNanos. + currentEpochNumber := uint64(0) // TODO: get epoch number from db. + lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) + require.NoError(t, err) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(70)) + require.Equal(t, lockedStakeEntry.ExtraData["TestKey"], []byte("TestValue2")) + + // Verify m1's balance stays the same (net of fees). + m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + require.Equal(t, m1OldDESOBalanceNanos-feeNanos, m1NewDESOBalanceNanos) + } + { + // m1 unstakes the rest of their stake with m0. + m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(80), + } + feeNanos, err := _submitUnstakeTxn( + testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + // Verify StakeEntry.isDeleted. + stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.Nil(t, stakeEntry) + + // Verify ValidatorEntry.TotalStakeAmountNanos. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) + + // Verify GlobalStakeAmountNanos. + globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + + // Verify LockedStakeEntry.UnstakeAmountNanos. + currentEpochNumber := uint64(0) // TODO: get epoch number from db. + lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) + require.NoError(t, err) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, lockedStakeEntry.ExtraData["TestKey"], []byte("TestValue2")) + + // Verify m1's balance stays the same (net of fees). + m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + require.Equal(t, m1OldDESOBalanceNanos-feeNanos, m1NewDESOBalanceNanos) + } + // + // UNLOCK STAKE + // + { + // RuleErrorProofOfStakeTxnBeforeBlockHeight + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = math.MaxUint32 + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: 0, + EndEpochNumber: 0, + } + _, err = _submitUnlockStakeTxn( + testMeta, m1Pub, m1Priv, unlockStakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) + + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + } + { + // RuleErrorInvalidValidatorPKID + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m2PkBytes), + StartEpochNumber: 0, + EndEpochNumber: 0, + } + _, err = _submitUnlockStakeTxn( + testMeta, m1Pub, m1Priv, unlockStakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidValidatorPKID) + } + { + // RuleErrorInvalidUnlockStakeEpochRange + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: 1, + EndEpochNumber: 0, + } + _, err = _submitUnlockStakeTxn( + testMeta, m1Pub, m1Priv, unlockStakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnlockStakeEpochRange) + } + { + // m1 unlocks stake that was assigned to m0. + lockedStakeEntries, err := utxoView().GetLockedStakeEntriesInRange(m0PKID, m1PKID, 0, 0) + require.NoError(t, err) + require.Equal(t, len(lockedStakeEntries), 1) + require.Equal(t, lockedStakeEntries[0].LockedAmountNanos, uint256.NewInt().SetUint64(150)) + + m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: 0, + EndEpochNumber: 0, + } + feeNanos, err := _submitUnlockStakeTxn( + testMeta, m1Pub, m1Priv, unlockStakeMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + // Verify StakeEntry.isDeleted. + stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.Nil(t, stakeEntry) + + // Verify ValidatorEntry.TotalStakeAmountNanos. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) + + // Verify GlobalStakeAmountNanos. + globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + + // Verify LockedStakeEntry.isDeleted. + currentEpochNumber := uint64(0) // TODO: get epoch number from db. + lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) + require.NoError(t, err) + require.Nil(t, lockedStakeEntry) + + // Verify m1's DESO balance increases by LockedAmountNanos (net of fees). + m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + require.Equal(t, m1OldDESOBalanceNanos-feeNanos+uint64(150), m1NewDESOBalanceNanos) + } + { + // RuleErrorInvalidUnlockStakeNoUnlockableStakeFound + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: 0, + EndEpochNumber: 0, + } + _, err = _submitUnlockStakeTxn( + testMeta, m1Pub, m1Priv, unlockStakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnlockStakeNoUnlockableStakeFound) + } + + // Flush mempool to the db and test rollbacks. + require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) + _executeAllTestRollbackAndFlush(testMeta) +} + +func _submitStakeTxn( + testMeta *TestMeta, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + metadata *StakeMetadata, + extraData map[string][]byte, + flushToDB bool, +) (_fees uint64, _err error) { + // Record transactor's prevBalance. + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) + + // Convert PublicKeyBase58Check to PkBytes. + updaterPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) + require.NoError(testMeta.t, err) + + // Create the transaction. + txn, totalInputMake, changeAmountMake, feesMake, err := testMeta.chain.CreateStakeTxn( + updaterPkBytes, + metadata, + extraData, + testMeta.feeRateNanosPerKb, + testMeta.mempool, + []*DeSoOutput{}, + ) + if err != nil { + return 0, err + } + require.Equal(testMeta.t, totalInputMake, changeAmountMake+feesMake) + + // Sign the transaction now that its inputs are set up. + _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) + + // Connect the transaction. + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( + txn, + txn.Hash(), + getTxnSize(*txn), + testMeta.savedHeight, + true, + false, + ) + if err != nil { + return 0, err + } + require.Equal(testMeta.t, totalInput, totalOutput+fees) + require.Equal(testMeta.t, totalInput, totalInputMake+metadata.StakeAmountNanos.Uint64()) + require.Equal(testMeta.t, OperationTypeStake, utxoOps[len(utxoOps)-1].Type) + if flushToDB { + require.NoError(testMeta.t, testMeta.mempool.universalUtxoView.FlushToDb(uint64(testMeta.savedHeight))) + } + require.NoError(testMeta.t, testMeta.mempool.RegenerateReadOnlyView()) + + // Record the txn. + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + return fees, nil +} + +func _submitUnstakeTxn( + testMeta *TestMeta, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + metadata *UnstakeMetadata, + extraData map[string][]byte, + flushToDB bool, +) (_fees uint64, _err error) { + // Record transactor's prevBalance. + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) + + // Convert PublicKeyBase58Check to PkBytes. + updaterPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) + require.NoError(testMeta.t, err) + + // Create the transaction. + txn, totalInputMake, changeAmountMake, feesMake, err := testMeta.chain.CreateUnstakeTxn( + updaterPkBytes, + metadata, + extraData, + testMeta.feeRateNanosPerKb, + testMeta.mempool, + []*DeSoOutput{}, + ) + if err != nil { + return 0, err + } + require.Equal(testMeta.t, totalInputMake, changeAmountMake+feesMake) + + // Sign the transaction now that its inputs are set up. + _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) + + // Connect the transaction. + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( + txn, + txn.Hash(), + getTxnSize(*txn), + testMeta.savedHeight, + true, + false, + ) + if err != nil { + return 0, err + } + require.Equal(testMeta.t, totalInput, totalOutput+fees) + require.Equal(testMeta.t, totalInput, totalInputMake) + require.Equal(testMeta.t, OperationTypeUnstake, utxoOps[len(utxoOps)-1].Type) + if flushToDB { + require.NoError(testMeta.t, testMeta.mempool.universalUtxoView.FlushToDb(uint64(testMeta.savedHeight))) + } + require.NoError(testMeta.t, testMeta.mempool.RegenerateReadOnlyView()) + + // Record the txn. + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + return fees, nil +} + +func _submitUnlockStakeTxn( + testMeta *TestMeta, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + metadata *UnlockStakeMetadata, + extraData map[string][]byte, + flushToDB bool, +) (_fees uint64, _err error) { + // Record transactor's prevBalance. + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) + + // Convert PublicKeyBase58Check to PkBytes. + updaterPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) + require.NoError(testMeta.t, err) + + // Create the transaction. + txn, totalInputMake, changeAmountMake, feesMake, err := testMeta.chain.CreateUnlockStakeTxn( + updaterPkBytes, + metadata, + extraData, + testMeta.feeRateNanosPerKb, + testMeta.mempool, + []*DeSoOutput{}, + ) + if err != nil { + return 0, err + } + require.Equal(testMeta.t, totalInputMake, changeAmountMake+feesMake) + + // Sign the transaction now that its inputs are set up. + _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) + + // Connect the transaction. + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( + txn, + txn.Hash(), + getTxnSize(*txn), + testMeta.savedHeight, + true, + false, + ) + if err != nil { + return 0, err + } + require.Equal(testMeta.t, totalInput, totalOutput+fees) + // TotalInput = TotalInputMake + TotalUnlockedAmountNanos + require.True(testMeta.t, totalInput > totalInputMake) + require.Equal(testMeta.t, OperationTypeUnlockStake, utxoOps[len(utxoOps)-1].Type) + if flushToDB { + require.NoError(testMeta.t, testMeta.mempool.universalUtxoView.FlushToDb(uint64(testMeta.savedHeight))) + } + require.NoError(testMeta.t, testMeta.mempool.RegenerateReadOnlyView()) + + // Record the txn. + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + return fees, nil +} + +func _testStakingWithDerivedKey(t *testing.T) { + var derivedKeyPriv string + var err error + + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize fork heights. + params.ForkHeights.DeSoUnlimitedDerivedKeysBlockHeight = uint32(0) + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + chain.snapshot = nil + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID + + senderPkBytes, _, err := Base58CheckDecode(senderPkString) + require.NoError(t, err) + senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) + require.NoError(t, err) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID + + newUtxoView := func() *UtxoView { + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + return utxoView + } + + getDESOBalanceNanos := func(publicKeyBytes []byte) uint64 { + desoBalanceNanos, err := newUtxoView().GetDeSoBalanceNanosForPublicKey(publicKeyBytes) + require.NoError(t, err) + return desoBalanceNanos + } + + _submitAuthorizeDerivedKeyTxn := func(txnSpendingLimit *TransactionSpendingLimit) (string, error) { + utxoView := newUtxoView() + derivedKeyMetadata, derivedKeyAuthPriv := _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit( + t, senderPrivKey, blockHeight+5, txnSpendingLimit, false, blockHeight, + ) + derivedKeyAuthPrivBase58Check := Base58CheckEncode(derivedKeyAuthPriv.Serialize(), true, params) + + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, senderPkString) + + utxoOps, txn, _, err := _doAuthorizeTxnWithExtraDataAndSpendingLimits( + testMeta, + utxoView, + testMeta.feeRateNanosPerKb, + senderPkBytes, + derivedKeyMetadata.DerivedPublicKey, + derivedKeyAuthPrivBase58Check, + derivedKeyMetadata.ExpirationBlock, + derivedKeyMetadata.AccessSignature, + false, + nil, + nil, + txnSpendingLimit, + ) + if err != nil { + return "", err + } + require.NoError(t, utxoView.FlushToDb(blockHeight)) + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + + err = utxoView.ValidateDerivedKey( + senderPkBytes, derivedKeyMetadata.DerivedPublicKey, blockHeight, + ) + require.NoError(t, err) + return derivedKeyAuthPrivBase58Check, nil + } + + _submitStakeTxnWithDerivedKey := func( + transactorPkBytes []byte, derivedKeyPrivBase58Check string, inputTxn MsgDeSoTxn, + ) (_fees uint64, _err error) { + utxoView := newUtxoView() + var txn *MsgDeSoTxn + + switch inputTxn.TxnMeta.GetTxnType() { + // Construct txn. + case TxnTypeStake: + txn, _, _, _, err = testMeta.chain.CreateStakeTxn( + transactorPkBytes, + inputTxn.TxnMeta.(*StakeMetadata), + make(map[string][]byte), + testMeta.feeRateNanosPerKb, + mempool, + []*DeSoOutput{}, + ) + case TxnTypeUnstake: + txn, _, _, _, err = testMeta.chain.CreateUnstakeTxn( + transactorPkBytes, + inputTxn.TxnMeta.(*UnstakeMetadata), + make(map[string][]byte), + testMeta.feeRateNanosPerKb, + mempool, + []*DeSoOutput{}, + ) + case TxnTypeUnlockStake: + txn, _, _, _, err = testMeta.chain.CreateUnlockStakeTxn( + transactorPkBytes, + inputTxn.TxnMeta.(*UnlockStakeMetadata), + make(map[string][]byte), + testMeta.feeRateNanosPerKb, + mempool, + []*DeSoOutput{}, + ) + default: + return 0, errors.New("invalid txn type") + } + if err != nil { + return 0, err + } + // Sign txn. + _signTxnWithDerivedKeyAndType(t, txn, derivedKeyPrivBase58Check, 1) + // Store the original transactor balance. + transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, params) + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) + // Connect txn. + utxoOps, _, _, fees, err := utxoView.ConnectTransaction( + txn, + txn.Hash(), + getTxnSize(*txn), + testMeta.savedHeight, + true, + false, + ) + if err != nil { + return 0, err + } + // Flush UTXO view to the db. + require.NoError(t, utxoView.FlushToDb(blockHeight)) + // Track txn for rolling back. + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + return fees, nil + } + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // m0 registers as a validator. + registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example1.com")}, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, true, + ) + require.NoError(t, err) + } + { + // m1 registers as a validator. + registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example2.com")}, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m1Pub, m1Priv, registerAsValidatorMetadata, nil, true, + ) + require.NoError(t, err) + } + { + // RuleErrorTransactionSpendingLimitInvalidStaker + // sender tries to create a DerivedKey that would allow + // m1 to stake 100 $DESO nanos with m0. Errors. + stakeLimitKey := MakeStakeLimitKey(m0PKID, m1PKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.Error(t, err) + } + { + // RuleErrorTransactionSpendingLimitInvalidValidator + // sender tries to create a DerivedKey to stake with m2. Validator doesn't exist. Errors. + stakeLimitKey := MakeStakeLimitKey(m2PKID, senderPKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.Error(t, err) + } + { + // RuleErrorTransactionSpendingLimitInvalidStaker + // sender tries to create a DerivedKey that would allow + // m1 to unstake 100 $DESO nanos from m0. Errors. + stakeLimitKey := MakeStakeLimitKey(m0PKID, m1PKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.Error(t, err) + } + { + // RuleErrorTransactionSpendingLimitInvalidValidator + // sender tries to create a DerivedKey to unstake from m2. Validator doesn't exist. Errors. + stakeLimitKey := MakeStakeLimitKey(m2PKID, senderPKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.Error(t, err) + } + { + // RuleErrorTransactionSpendingLimitInvalidStaker + // sender tries to create a DerivedKey that would allow + // m1 to unlock stake from m0. Errors. + stakeLimitKey := MakeStakeLimitKey(m0PKID, m1PKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 100}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.Error(t, err) + } + { + // RuleErrorTransactionSpendingLimitInvalidValidator + // sender tries to create a DerivedKey to stake with m2. Validator doesn't exist. Errors. + stakeLimitKey := MakeStakeLimitKey(m2PKID, senderPKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 100}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.Error(t, err) + } + { + // sender stakes with m0 using a DerivedKey. + + // sender creates a DerivedKey to stake up to 100 $DESO nanos with m0. + stakeLimitKey := MakeStakeLimitKey(m0PKID, senderPKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + + // sender tries to stake 100 $DESO nanos with m1 using the DerivedKey. Errors. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorStakeTransactionSpendingLimitNotFound) + + // sender tries to stake 200 $DESO nanos with m0 using the DerivedKey. Errors. + stakeMetadata = &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(200), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorStakeTransactionSpendingLimitExceeded) + + // sender stakes 100 $DESO nanos with m0 using the DerivedKey. Succeeds. + senderOldDESOBalanceNanos := getDESOBalanceNanos(senderPkBytes) + stakeMetadata = &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + feeNanos, err := _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, + ) + require.NoError(t, err) + + // StakeEntry was created. + stakeEntry, err := newUtxoView().GetStakeEntry(m0PKID, senderPKID) + require.NoError(t, err) + require.NotNil(t, stakeEntry) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // Verify sender's DESO balance is reduced by StakeAmountNanos (net of fees). + senderNewDESOBalanceNanos := getDESOBalanceNanos(senderPkBytes) + require.Equal(t, senderOldDESOBalanceNanos-feeNanos-stakeMetadata.StakeAmountNanos.Uint64(), senderNewDESOBalanceNanos) + } + { + // sender unstakes from m0 using a DerivedKey. + + // sender creates a DerivedKey to unstake up to 50 $DESO nanos from m0. + stakeLimitKey := MakeStakeLimitKey(m0PKID, senderPKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(50)}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + + // sender tries to unstake 50 $DESO nanos from m1 using the DerivedKey. Errors. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnstakeNoStakeFound) + + // sender stakes 50 $DESO nanos with m1. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitStakeTxn( + testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender tries to unstake 50 $DESO nanos from m1 using the DerivedKey. Errors. + unstakeMetadata = &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorUnstakeTransactionSpendingLimitNotFound) + + // sender tries to unstake 200 $DESO nanos from m0 using the DerivedKey. Errors. + unstakeMetadata = &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(200), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnstakeInsufficientStakeFound) + + // sender tries to unstake 100 $DESO nanos from m0 using the DerivedKey. Errors. + unstakeMetadata = &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorUnstakeTransactionSpendingLimitExceeded) + + // sender unstakes 50 $DESO nanos from m0 using the DerivedKey. Succeeds. + unstakeMetadata = &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, + ) + require.NoError(t, err) + + // StakeEntry was updated. + stakeEntry, err := newUtxoView().GetStakeEntry(m0PKID, senderPKID) + require.NoError(t, err) + require.NotNil(t, stakeEntry) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(50)) + + // LockedStakeEntry was created. + epochNumber := uint64(0) // TODO: get epoch number from db. + lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, senderPKID, epochNumber) + require.NoError(t, err) + require.NotNil(t, lockedStakeEntry) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(50)) + } + { + // sender unlocks stake using a DerivedKey. + + // sender creates a DerivedKey to perform 1 unlock stake operation with m0. + stakeLimitKey := MakeStakeLimitKey(m0PKID, senderPKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 1}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + + // sender tries to unlock all stake from m1 using the DerivedKey. Errors. + epochNumber := uint64(0) // TODO: get epoch number from db. + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StartEpochNumber: epochNumber, + EndEpochNumber: epochNumber, + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnlockStakeNoUnlockableStakeFound) + + // sender unstakes 50 $DESO nanos from m1. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitUnstakeTxn( + testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender tries to unlock all stake from m1 using the DerivedKey. Errors. + unlockStakeMetadata = &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StartEpochNumber: epochNumber, + EndEpochNumber: epochNumber, + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorUnlockStakeTransactionSpendingLimitNotFound) + + // sender unlocks all stake from m0 using the DerivedKey. Succeeds. + senderOldDESOBalanceNanos := getDESOBalanceNanos(senderPkBytes) + unlockStakeMetadata = &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: epochNumber, + EndEpochNumber: epochNumber, + } + feeNanos, err := _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, + ) + require.NoError(t, err) + + // LockedStakeEntry was deleted. + lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, senderPKID, epochNumber) + require.NoError(t, err) + require.Nil(t, lockedStakeEntry) + + // Verify sender's DESO balance was increased by 50 DESO nanos (net of fees). + senderNewDESOBalanceNanos := getDESOBalanceNanos(senderPkBytes) + require.Equal(t, senderOldDESOBalanceNanos-feeNanos+uint64(50), senderNewDESOBalanceNanos) + + // sender stakes + unstakes 50 $DESO nanos with m0. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitStakeTxn( + testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, + ) + require.NoError(t, err) + unstakeMetadata = &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitUnstakeTxn( + testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender tries to unlock all stake from m0 using the DerivedKey. Errors. + unlockStakeMetadata = &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: epochNumber, + EndEpochNumber: epochNumber, + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorUnlockStakeTransactionSpendingLimitNotFound) + } + { + // sender stakes, unstakes, and unlocks stake using a DerivedKey scoped to any validator. + + // sender creates a DerivedKey that can stake, unstake, and unlock stake with any validator. + stakeLimitKey := MakeStakeLimitKey(&ZeroPKID, senderPKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(50)}, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(50)}, + UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 2}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + + // sender stakes with m0 using the DerivedKey. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(25), + } + _, err = _submitStakeTxn( + testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender stakes with m1 using the DerivedKey. + stakeMetadata = &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(25), + } + _, err = _submitStakeTxn( + testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender unstakes from m0 using the DerivedKey. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(25), + } + _, err = _submitUnstakeTxn( + testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender unstakes from m1 using the DerivedKey. + unstakeMetadata = &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(25), + } + _, err = _submitUnstakeTxn( + testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender unlocks stake from m0 using the DerivedKey. + epochNumber := uint64(0) // TODO: get epoch number from db. + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: epochNumber, + EndEpochNumber: epochNumber, + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, + ) + require.NoError(t, err) + + // sender unlocks stake from m1 using the DerivedKey. + unlockStakeMetadata = &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StartEpochNumber: epochNumber, + EndEpochNumber: epochNumber, + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, + ) + require.NoError(t, err) + } + { + // sender stakes, unstakes, and unlocks stake using an IsUnlimited DerivedKey. + + // sender creates an IsUnlimited DerivedKey. + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: 0, + IsUnlimited: true, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + + // sender stakes with m0 using the DerivedKey. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(25), + } + _, err = _submitStakeTxn( + testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender stakes with m1 using the DerivedKey. + stakeMetadata = &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(25), + } + _, err = _submitStakeTxn( + testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender unstakes from m0 using the DerivedKey. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(25), + } + _, err = _submitUnstakeTxn( + testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender unstakes from m1 using the DerivedKey. + unstakeMetadata = &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(25), + } + _, err = _submitUnstakeTxn( + testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, + ) + require.NoError(t, err) + + // sender unlocks stake from m0 using the DerivedKey. + epochNumber := uint64(0) // TODO: get epoch number from db. + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: epochNumber, + EndEpochNumber: epochNumber, + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, + ) + require.NoError(t, err) + + // sender unlocks stake from m1 using the DerivedKey. + unlockStakeMetadata = &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StartEpochNumber: epochNumber, + EndEpochNumber: epochNumber, + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, + ) + require.NoError(t, err) + } + { + // sender exhausts a TransactionSpendingLimit scoped to a single validator. + // We fall back to check if there is a TransactionSpendingLimit scoped to + // any validator to cover their staking + unstaking + unlocking stake txns. + + // sender creates a DerivedKey to stake, unstake, and unlock stake with m1 or any validator. + scopedStakeLimitKey := MakeStakeLimitKey(m1PKID, senderPKID) + globalStakeLimitKey := MakeStakeLimitKey(&ZeroPKID, senderPKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{ + scopedStakeLimitKey: uint256.NewInt().SetUint64(100), + globalStakeLimitKey: uint256.NewInt().SetUint64(200), + }, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{ + scopedStakeLimitKey: uint256.NewInt().SetUint64(100), + globalStakeLimitKey: uint256.NewInt().SetUint64(200), + }, + UnlockStakeLimitMap: map[StakeLimitKey]uint64{scopedStakeLimitKey: 1, globalStakeLimitKey: 1}, + } + derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + + // sender stakes with m1 using the global TransactionSpendingLimit. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(200), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, + ) + require.NoError(t, err) + + // sender unstakes from m1 using the global TransactionSpendingLimit. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(200), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, + ) + require.NoError(t, err) + + // sender unlocks stake from m1 using the scoped TransactionSpendingLimit. + epochNumber := uint64(0) // TODO: get epoch number from db. + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StartEpochNumber: epochNumber, + EndEpochNumber: epochNumber, + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, + ) + require.NoError(t, err) + + // sender stakes with m1 using the scoped TransactionSpendingLimit. + stakeMetadata = &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, + ) + require.NoError(t, err) + + // sender unstakes from m1 using the scoped TransactionSpendingLimit. + unstakeMetadata = &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, + ) + require.NoError(t, err) + + // sender unlocks stake from m1 using the global TransactionSpendingLimit. + epochNumber = uint64(0) // TODO: get epoch number from db. + unlockStakeMetadata = &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StartEpochNumber: epochNumber, + EndEpochNumber: epochNumber, + } + _, err = _submitStakeTxnWithDerivedKey( + senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, + ) + require.NoError(t, err) + } + { + // Test TransactionSpendingLimit.ToMetamaskString() scoped to one validator. + stakeLimitKey1 := MakeStakeLimitKey(m0PKID, senderPKID) + stakeLimitKey2 := MakeStakeLimitKey(m1PKID, senderPKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{ + stakeLimitKey1: uint256.NewInt().SetUint64(uint64(1.5 * float64(NanosPerUnit))), + stakeLimitKey2: uint256.NewInt().SetUint64(uint64(2.0 * float64(NanosPerUnit))), + }, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{ + stakeLimitKey1: uint256.NewInt().SetUint64(uint64(3.25 * float64(NanosPerUnit))), + }, + UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey1: 2, stakeLimitKey2: 3}, + } + metamaskStr := txnSpendingLimit.ToMetamaskString(params) + require.Equal(t, metamaskStr, + "Spending limits on the derived key:\n"+ + "\tTotal $DESO Limit: 1.0 $DESO\n"+ + "\tTransaction Count Limit: \n"+ + "\t\tAUTHORIZE_DERIVED_KEY: 1\n"+ + "\tStaking Restrictions:\n"+ + "\t\t[\n"+ + "\t\t\tValidator PKID: "+m0Pub+"\n"+ + "\t\t\tStaker PKID: "+senderPkString+"\n"+ + "\t\t\tStaking Limit: 1.50 $DESO\n"+ + "\t\t]\n"+ + "\t\t[\n"+ + "\t\t\tValidator PKID: "+m1Pub+"\n"+ + "\t\t\tStaker PKID: "+senderPkString+"\n"+ + "\t\t\tStaking Limit: 2.00 $DESO\n"+ + "\t\t]\n"+ + "\tUnstaking Restrictions:\n"+ + "\t\t[\n"+ + "\t\t\tValidator PKID: "+m0Pub+"\n"+ + "\t\t\tStaker PKID: "+senderPkString+"\n"+ + "\t\t\tUnstaking Limit: 3.25 $DESO\n"+ + "\t\t]\n"+ + "\tUnlocking Stake Restrictions:\n"+ + "\t\t[\n"+ + "\t\t\tValidator PKID: "+m0Pub+"\n"+ + "\t\t\tStaker PKID: "+senderPkString+"\n"+ + "\t\t\tTransaction Count: 2\n"+ + "\t\t]\n"+ + "\t\t[\n"+ + "\t\t\tValidator PKID: "+m1Pub+"\n"+ + "\t\t\tStaker PKID: "+senderPkString+"\n"+ + "\t\t\tTransaction Count: 3\n"+ + "\t\t]\n", + ) + } + { + // Test TransactionSpendingLimit.ToMetamaskString() scoped to any validator. + stakeLimitKey := MakeStakeLimitKey(&ZeroPKID, senderPKID) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + }, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{ + stakeLimitKey: uint256.NewInt().SetUint64(uint64(0.65 * float64(NanosPerUnit))), + }, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{ + stakeLimitKey: uint256.NewInt().SetUint64(uint64(2.1 * float64(NanosPerUnit))), + }, + UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 1}, + } + metamaskStr := txnSpendingLimit.ToMetamaskString(params) + require.Equal(t, metamaskStr, + "Spending limits on the derived key:\n"+ + "\tTotal $DESO Limit: 1.0 $DESO\n"+ + "\tTransaction Count Limit: \n"+ + "\t\tAUTHORIZE_DERIVED_KEY: 1\n"+ + "\tStaking Restrictions:\n"+ + "\t\t[\n"+ + "\t\t\tValidator PKID: Any\n"+ + "\t\t\tStaker PKID: "+senderPkString+"\n"+ + "\t\t\tStaking Limit: 0.65 $DESO\n"+ + "\t\t]\n"+ + "\tUnstaking Restrictions:\n"+ + "\t\t[\n"+ + "\t\t\tValidator PKID: Any\n"+ + "\t\t\tStaker PKID: "+senderPkString+"\n"+ + "\t\t\tUnstaking Limit: 2.10 $DESO\n"+ + "\t\t]\n"+ + "\tUnlocking Stake Restrictions:\n"+ + "\t\t[\n"+ + "\t\t\tValidator PKID: Any\n"+ + "\t\t\tStaker PKID: "+senderPkString+"\n"+ + "\t\t\tTransaction Count: 1\n"+ + "\t\t]\n", + ) + } + + // Flush mempool to the db and test rollbacks. + require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) + _executeAllTestRollbackAndFlush(testMeta) +} + +func TestGetLockedStakeEntriesInRange(t *testing.T) { + // For this test, we manually place LockedStakeEntries in the database and + // UtxoView to test merging the two to GetLockedStakeEntriesInRange. + + // Initialize test chain and UtxoView. + chain, params, db := NewLowDifficultyBlockchain(t) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + blockHeight := uint64(chain.blockTip().Height + 1) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + + // Set a LockedStakeEntry in the db. + lockedStakeEntry := &LockedStakeEntry{ + ValidatorPKID: m0PKID, + StakerPKID: m0PKID, + LockedAtEpochNumber: 1, + } + utxoView._setLockedStakeEntryMappings(lockedStakeEntry) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + + // Verify LockedStakeEntry is in the db. + lockedStakeEntry, err = DBGetLockedStakeEntry(db, chain.snapshot, m0PKID, m0PKID, 1) + require.NoError(t, err) + require.NotNil(t, lockedStakeEntry) + + // Verify LockedStakeEntry is not in the UtxoView. + require.Empty(t, utxoView.LockedStakeMapKeyToLockedStakeEntry) + + // Set another LockedStakeEntry in the db. + lockedStakeEntry = &LockedStakeEntry{ + ValidatorPKID: m0PKID, + StakerPKID: m0PKID, + LockedAtEpochNumber: 2, + } + utxoView._setLockedStakeEntryMappings(lockedStakeEntry) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + + // Fetch the LockedStakeEntry so it is also cached in the UtxoView. + lockedStakeEntry, err = utxoView.GetLockedStakeEntry(m0PKID, m0PKID, 2) + require.NoError(t, err) + require.NotNil(t, lockedStakeEntry) + + // Verify the LockedStakeEntry is in the db. + lockedStakeEntry, err = DBGetLockedStakeEntry(db, chain.snapshot, m0PKID, m0PKID, 2) + require.NoError(t, err) + require.NotNil(t, lockedStakeEntry) + + // Verify the LockedStakeEntry is also in the UtxoView. + require.Len(t, utxoView.LockedStakeMapKeyToLockedStakeEntry, 1) + require.NotNil(t, utxoView.LockedStakeMapKeyToLockedStakeEntry[lockedStakeEntry.ToMapKey()]) + + // Set another LockedStakeEntry in the UtxoView. + utxoViewLockedStakeEntry := &LockedStakeEntry{ + ValidatorPKID: m0PKID, + StakerPKID: m0PKID, + LockedAtEpochNumber: 3, + } + utxoView._setLockedStakeEntryMappings(utxoViewLockedStakeEntry) + + // Verify the LockedStakeEntry is not in the db. + lockedStakeEntry, err = DBGetLockedStakeEntry(db, chain.snapshot, m0PKID, m0PKID, 3) + require.NoError(t, err) + require.Nil(t, lockedStakeEntry) + + // Verify the LockedStakeEntry is in the UtxoView. + require.Len(t, utxoView.LockedStakeMapKeyToLockedStakeEntry, 2) + require.NotNil(t, utxoView.LockedStakeMapKeyToLockedStakeEntry[utxoViewLockedStakeEntry.ToMapKey()]) + + // Verify GetLockedStakeEntriesInRange. + lockedStakeEntries, err := utxoView.GetLockedStakeEntriesInRange(m0PKID, m0PKID, 1, 3) + require.NoError(t, err) + require.Len(t, lockedStakeEntries, 3) + require.Equal(t, lockedStakeEntries[0].LockedAtEpochNumber, uint64(1)) + require.Equal(t, lockedStakeEntries[1].LockedAtEpochNumber, uint64(2)) + require.Equal(t, lockedStakeEntries[2].LockedAtEpochNumber, uint64(3)) + + // A few more edge case tests for GetLockedStakeEntriesInRange. + lockedStakeEntries, err = utxoView.GetLockedStakeEntriesInRange(m0PKID, m0PKID, 0, 4) + require.NoError(t, err) + require.Len(t, lockedStakeEntries, 3) + + // Nil ValidatorPKID. + lockedStakeEntries, err = utxoView.GetLockedStakeEntriesInRange(nil, m0PKID, 1, 3) + require.Error(t, err) + require.Contains(t, err.Error(), "nil ValidatorPKID provided as input") + + // Nil StakerPKID. + lockedStakeEntries, err = utxoView.GetLockedStakeEntriesInRange(m0PKID, nil, 1, 3) + require.Error(t, err) + require.Contains(t, err.Error(), "nil StakerPKID provided as input") + + // StartEpochNumber > EndEpochNumber. + lockedStakeEntries, err = utxoView.GetLockedStakeEntriesInRange(m0PKID, m0PKID, 3, 1) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid LockedAtEpochNumber range provided as input") + + // None found for this ValidatorPKID. + lockedStakeEntries, err = utxoView.GetLockedStakeEntriesInRange(m1PKID, m0PKID, 1, 3) + require.NoError(t, err) + require.Empty(t, lockedStakeEntries) + + // None found for this StakerPKID. + lockedStakeEntries, err = utxoView.GetLockedStakeEntriesInRange(m0PKID, m1PKID, 1, 3) + require.NoError(t, err) + require.Empty(t, lockedStakeEntries) + + // None found for this LockedAtEpochNumber range. + lockedStakeEntries, err = utxoView.GetLockedStakeEntriesInRange(m0PKID, m0PKID, 5, 6) + require.NoError(t, err) + require.Empty(t, lockedStakeEntries) +} diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 3aca53fff..4c6e3fd2f 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -111,9 +111,11 @@ const ( EncoderTypeDeSoNonce EncoderType = 38 EncoderTypeTransactorNonceEntry EncoderType = 39 EncoderTypeValidatorEntry EncoderType = 40 + EncoderTypeStakeEntry EncoderType = 41 + EncoderTypeLockedStakeEntry EncoderType = 42 // EncoderTypeEndBlockView encoder type should be at the end and is used for automated tests. - EncoderTypeEndBlockView EncoderType = 41 + EncoderTypeEndBlockView EncoderType = 43 ) // Txindex encoder types. @@ -150,9 +152,12 @@ const ( EncoderTypeNewMessageTxindexMetadata EncoderType = 1000029 EncoderTypeRegisterAsValidatorTxindexMetadata EncoderType = 1000030 EncoderTypeUnregisterAsValidatorTxindexMetadata EncoderType = 1000031 + EncoderTypeStakeTxindexMetadata EncoderType = 1000032 + EncoderTypeUnstakeTxindexMetadata EncoderType = 1000033 + EncoderTypeUnlockStakeTxindexMetadata EncoderType = 1000034 // EncoderTypeEndTxIndex encoder type should be at the end and is used for automated tests. - EncoderTypeEndTxIndex EncoderType = 1000032 + EncoderTypeEndTxIndex EncoderType = 1000035 ) // This function translates the EncoderType into an empty DeSoEncoder struct. @@ -241,6 +246,10 @@ func (encoderType EncoderType) New() DeSoEncoder { return &TransactorNonceEntry{} case EncoderTypeValidatorEntry: return &ValidatorEntry{} + case EncoderTypeStakeEntry: + return &StakeEntry{} + case EncoderTypeLockedStakeEntry: + return &LockedStakeEntry{} } // Txindex encoder types @@ -309,6 +318,12 @@ func (encoderType EncoderType) New() DeSoEncoder { return &RegisterAsValidatorTxindexMetadata{} case EncoderTypeUnregisterAsValidatorTxindexMetadata: return &UnregisterAsValidatorTxindexMetadata{} + case EncoderTypeStakeTxindexMetadata: + return &StakeTxindexMetadata{} + case EncoderTypeUnstakeTxindexMetadata: + return &UnstakeTxindexMetadata{} + case EncoderTypeUnlockStakeTxindexMetadata: + return &UnlockStakeTxindexMetadata{} default: return nil } @@ -605,8 +620,11 @@ const ( OperationTypeDeleteExpiredNonces OperationType = 38 OperationTypeRegisterAsValidator OperationType = 39 OperationTypeUnregisterAsValidator OperationType = 40 + OperationTypeStake OperationType = 41 + OperationTypeUnstake OperationType = 42 + OperationTypeUnlockStake OperationType = 43 - // NEXT_TAG = 41 + // NEXT_TAG = 44 ) func (op OperationType) String() string { @@ -691,6 +709,12 @@ func (op OperationType) String() string { return "OperationTypeRegisterAsValidator" case OperationTypeUnregisterAsValidator: return "OperationTypeUnregisterAsValidator" + case OperationTypeStake: + return "OperationTypeStake" + case OperationTypeUnstake: + return "OperationTypeUnstake" + case OperationTypeUnlockStake: + return "OperationTypeUnlockStake" } return "OperationTypeUNKNOWN" } @@ -876,8 +900,21 @@ type UtxoOperation struct { // When we connect a block, we delete expired nonce entries. PrevNonceEntries []*TransactorNonceEntry - // PrevValidatorEntry is the previous ValidatorEntry prior to a register or unregister txn. + // PrevValidatorEntry is the previous ValidatorEntry prior to a + // register, unregister, stake, or unstake txn. PrevValidatorEntry *ValidatorEntry + + // PrevGlobalStakeAmountNanos is the previous GlobalStakeAmountNanos + // prior to a stake or unstake operation txn. + PrevGlobalStakeAmountNanos *uint256.Int + + // PrevStakeEntries is a slice of StakeEntries prior to + // a register, unregister, stake, or unstake txn. + PrevStakeEntries []*StakeEntry + + // PrevLockedStakeEntries is a slice of LockedStakeEntries + // prior to a unstake or unlock stake txn. + PrevLockedStakeEntries []*LockedStakeEntry } func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -1198,6 +1235,15 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { // PrevValidatorEntry data = append(data, EncodeToBytes(blockHeight, op.PrevValidatorEntry, skipMetadata...)...) + + // PrevGlobalStakeAmountNanos + data = append(data, EncodeUint256(op.PrevGlobalStakeAmountNanos)...) + + // PrevStakeEntries + data = append(data, EncodeDeSoEncoderSlice(op.PrevStakeEntries, blockHeight, skipMetadata...)...) + + // PrevLockedStakeEntries + data = append(data, EncodeDeSoEncoderSlice(op.PrevLockedStakeEntries, blockHeight, skipMetadata...)...) } return data @@ -1817,11 +1863,25 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { // PrevValidatorEntry - prevValidatorEntry := &ValidatorEntry{} - if exist, err := DecodeFromBytes(prevValidatorEntry, rr); exist && err == nil { - op.PrevValidatorEntry = prevValidatorEntry - } else if err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevValidatorEntry") + if op.PrevValidatorEntry, err = DecodeDeSoEncoder(&ValidatorEntry{}, rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevValidatorEntry: ") + } + + // PrevGlobalStakeAmountNanos + if prevGlobalStakeAmountNanos, err := DecodeUint256(rr); err == nil { + op.PrevGlobalStakeAmountNanos = prevGlobalStakeAmountNanos + } else { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevGlobalStakeAmountNanos: ") + } + + // PrevStakeEntries + if op.PrevStakeEntries, err = DecodeDeSoEncoderSlice[*StakeEntry](rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevStakeEntries: ") + } + + // PrevLockedStakeEntries + if op.PrevLockedStakeEntries, err = DecodeDeSoEncoderSlice[*LockedStakeEntry](rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockedStakeEntries: ") } } @@ -4843,6 +4903,11 @@ func DecodeMapStringUint64(rr *bytes.Reader) (map[string]uint64, error) { return nil, nil } +// EncodeUint256 is useful for space-efficient encoding of uint256s. +// It does not guarantee fixed-width encoding, so should not be used +// in BadgerDB keys. Use EncodeOptionalUint256 instead, which does +// guarantee fixed-width encoding. Both EncodeUint256 and +// EncodeOptionalUint256 can handle nil inputs. func EncodeUint256(number *uint256.Int) []byte { var data []byte if number != nil { diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index c52e52a20..7448697d1 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "github.com/brianvoe/gofakeit" + "github.com/holiman/uint256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "reflect" @@ -390,3 +391,60 @@ func TestUtxoEntryEncodeDecode(t *testing.T) { } }) } + +func TestEncodingUint256s(t *testing.T) { + // Create three uint256.Ints. + num1 := uint256.NewInt() + num2 := uint256.NewInt().SetUint64(598128756) + num3 := MaxUint256 + + // Encode them to bytes using EncodeUint256. + encoded1 := EncodeUint256(num1) + encoded2 := EncodeUint256(num2) + encoded3 := EncodeUint256(num3) + + // Decode them from bytes using DecodeUint256. Verify values. + rr := bytes.NewReader(encoded1) + decoded1, err := DecodeUint256(rr) + require.NoError(t, err) + require.True(t, num1.Eq(decoded1)) + + rr = bytes.NewReader(encoded2) + decoded2, err := DecodeUint256(rr) + require.NoError(t, err) + require.True(t, num2.Eq(decoded2)) + + rr = bytes.NewReader(encoded3) + decoded3, err := DecodeUint256(rr) + require.NoError(t, err) + require.True(t, num3.Eq(decoded3)) + + // Test that EncodeUint256 does not provide a fixed-width byte encoding. + require.NotEqual(t, len(encoded1), len(encoded2)) + require.NotEqual(t, len(encoded1), len(encoded3)) + + // Encode them to bytes using EncodeOptionalUint256. + encoded1 = EncodeOptionalUint256(num1) + encoded2 = EncodeOptionalUint256(num2) + encoded3 = EncodeOptionalUint256(num3) + + // Decode them from bytes using ReadOptionalUint256. Verify values. + rr = bytes.NewReader(encoded1) + decoded1, err = ReadOptionalUint256(rr) + require.NoError(t, err) + require.True(t, num1.Eq(decoded1)) + + rr = bytes.NewReader(encoded2) + decoded2, err = ReadOptionalUint256(rr) + require.NoError(t, err) + require.True(t, num2.Eq(decoded2)) + + rr = bytes.NewReader(encoded3) + decoded3, err = ReadOptionalUint256(rr) + require.NoError(t, err) + require.True(t, num3.Eq(decoded3)) + + // Test that EncodeOptionalUint256 provides a fixed-width byte encoding. + require.Equal(t, len(encoded1), len(encoded2)) + require.Equal(t, len(encoded1), len(encoded3)) +} diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 7dd592069..344b60c50 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -50,14 +50,6 @@ func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { domainsCopy = append(domainsCopy, append([]byte{}, domain...)) // Makes a copy. } - // Copy ExtraData. - extraDataCopy := make(map[string][]byte) - for key, value := range validatorEntry.ExtraData { - valueCopy := make([]byte, len(value)) - copy(valueCopy, value) - extraDataCopy[key] = valueCopy - } - // Return new ValidatorEntry. return &ValidatorEntry{ ValidatorID: validatorEntry.ValidatorID.NewBlockHash(), @@ -69,7 +61,7 @@ func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { VotingSignatureBlockHeight: validatorEntry.VotingSignatureBlockHeight, TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), RegisteredAtBlockHeight: validatorEntry.RegisteredAtBlockHeight, - ExtraData: extraDataCopy, + ExtraData: copyExtraData(validatorEntry.ExtraData), isDeleted: validatorEntry.isDeleted, } } @@ -505,8 +497,9 @@ func DBKeyForValidatorByPKID(validatorEntry *ValidatorEntry) []byte { func DBKeyForValidatorByStake(validatorEntry *ValidatorEntry) []byte { key := append([]byte{}, Prefixes.PrefixValidatorByStake...) - // FIXME: ensure that this left-pads the uint256 to be equal width - key = append(key, EncodeUint256(validatorEntry.TotalStakeAmountNanos)...) // Highest stake first + // TotalStakeAmountNanos will never be nil here, but EncodeOptionalUint256 + // is used because it provides a fixed-width encoding of uint256.Ints. + key = append(key, EncodeOptionalUint256(validatorEntry.TotalStakeAmountNanos)...) // Highest stake first key = append(key, EncodeUint64(math.MaxUint64-validatorEntry.RegisteredAtBlockHeight)...) // Oldest first key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) return key @@ -867,7 +860,7 @@ func (bav *UtxoView) _connectRegisterAsValidator( ) { // Validate the starting block height. if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { - return 0, 0, nil, RuleErrorProofofStakeTxnBeforeBlockHeight + return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectRegisterAsValidator: ") } // Validate the txn TxnType. @@ -902,7 +895,7 @@ func (bav *UtxoView) _connectRegisterAsValidator( // Convert TransactorPublicKey to TransactorPKID. transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { - return 0, 0, nil, RuleErrorInvalidValidatorPKID + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "_connectRegisterAsValidator: ") } // Check if there is an existing ValidatorEntry that will be overwritten. @@ -1006,7 +999,7 @@ func (bav *UtxoView) _disconnectRegisterAsValidator( // Convert TransactorPublicKey to TransactorPKID. transactorPKIDEntry := bav.GetPKIDForPublicKey(currentTxn.PublicKey) if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { - return RuleErrorInvalidValidatorPKID + return errors.Wrapf(RuleErrorInvalidValidatorPKID, "_disconnectRegisterAsValidator: ") } // Delete the current ValidatorEntry. @@ -1053,7 +1046,7 @@ func (bav *UtxoView) _connectUnregisterAsValidator( ) { // Validate the starting block height. if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { - return 0, 0, nil, RuleErrorProofofStakeTxnBeforeBlockHeight + return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectUnregisterAsValidator: ") } // Validate the txn TxnType. @@ -1088,7 +1081,7 @@ func (bav *UtxoView) _connectUnregisterAsValidator( // Convert TransactorPublicKey to TransactorPKID. transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { - return 0, 0, nil, RuleErrorInvalidValidatorPKID + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "_connectUnregisterAsValidator: ") } // TODO: In subsequent PR, unstake all StakeEntries for this validator. @@ -1101,7 +1094,7 @@ func (bav *UtxoView) _connectUnregisterAsValidator( } // Note that we don't need to check isDeleted because the Get returns nil if isDeleted=true. if prevValidatorEntry == nil { - return 0, 0, nil, RuleErrorValidatorNotFound + return 0, 0, nil, errors.Wrapf(RuleErrorValidatorNotFound, "_connectUnregisterAsValidator: ") } bav._deleteValidatorEntryMappings(prevValidatorEntry) @@ -1163,26 +1156,26 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata(transactorPublicKey []by // Validate ValidatorPKID. transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKey) if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { - return RuleErrorInvalidValidatorPKID + return errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } // Validate Domains. if len(metadata.Domains) < 1 { - return RuleErrorValidatorNoDomains + return errors.Wrapf(RuleErrorValidatorNoDomains, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } if len(metadata.Domains) > MaxValidatorNumDomains { - return RuleErrorValidatorTooManyDomains + return errors.Wrapf(RuleErrorValidatorTooManyDomains, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } var domainStrings []string for _, domain := range metadata.Domains { _, err := url.ParseRequestURI(string(domain)) if err != nil { - return fmt.Errorf("%s: %v", RuleErrorValidatorInvalidDomain, domain) + return fmt.Errorf("UtxoView.IsValidRegisterAsValidatorMetadata: %s: %v", RuleErrorValidatorInvalidDomain, domain) } domainStrings = append(domainStrings, string(domain)) } if len(NewSet(domainStrings).ToSlice()) != len(domainStrings) { - return RuleErrorValidatorDuplicateDomains + return errors.Wrapf(RuleErrorValidatorDuplicateDomains, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } // TODO: In subsequent PR, validate VotingPublicKey, VotingPublicKeySignature, and VotingSignatureBlockHeight. @@ -1193,16 +1186,16 @@ func (bav *UtxoView) IsValidUnregisterAsValidatorMetadata(transactorPublicKey [] // Validate ValidatorPKID. transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKey) if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { - return RuleErrorInvalidValidatorPKID + return errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView.IsValidUnregisterAsValidatorMetadata: ") } // Validate ValidatorEntry exists. validatorEntry, err := bav.GetValidatorByPKID(transactorPKIDEntry.PKID) if err != nil { - return errors.Wrapf(err, "IsValidUnregisterAsValidatorMetadata: ") + return errors.Wrapf(err, "UtxoView.IsValidUnregisterAsValidatorMetadata: ") } if validatorEntry == nil { - return RuleErrorValidatorNotFound + return errors.Wrapf(RuleErrorValidatorNotFound, "UtxoView.IsValidUnregisterAsValidatorMetadata: ") } return nil @@ -1247,13 +1240,30 @@ func (bav *UtxoView) GetValidatorByPKID(pkid *PKID) (*ValidatorEntry, error) { // in the UtxoView for the given PKID, check the database. dbValidatorEntry, err := DBGetValidatorByPKID(bav.Handle, bav.Snapshot, pkid) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "UtxoView.GetValidatorByPKID: ") + } + if dbValidatorEntry != nil { + // Cache the ValidatorEntry from the db in the UtxoView. + bav._setValidatorEntryMappings(dbValidatorEntry) } - // Cache the ValidatorEntry from the db in the UtxoView. - bav._setValidatorEntryMappings(dbValidatorEntry) return dbValidatorEntry, nil } +func (bav *UtxoView) GetValidatorByPublicKey(validatorPublicKey *PublicKey) (*ValidatorEntry, error) { + validatorPKIDEntry := bav.GetPKIDForPublicKey(validatorPublicKey.ToBytes()) + if validatorPKIDEntry == nil || validatorPKIDEntry.isDeleted { + return nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView.GetValidatorByPublicKey: ") + } + validatorEntry, err := bav.GetValidatorByPKID(validatorPKIDEntry.PKID) + if err != nil { + return nil, err + } + if validatorEntry == nil || validatorEntry.isDeleted { + return nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView.GetValidatorByPublicKey: ") + } + return validatorEntry, nil +} + func (bav *UtxoView) GetTopValidatorsByStake(limit int) ([]*ValidatorEntry, error) { // Validate limit param. if limit <= 0 { @@ -1272,7 +1282,7 @@ func (bav *UtxoView) GetTopValidatorsByStake(limit int) ([]*ValidatorEntry, erro // Pull top N ValidatorEntries from the database (not present in the UtxoView). validatorEntries, err := DBGetTopValidatorsByStake(bav.Handle, bav.Snapshot, limit, utxoViewValidatorEntries) if err != nil { - return nil, errors.Wrapf(err, "GetTopValidatorsByStake: error retrieving entries from db: ") + return nil, errors.Wrapf(err, "UtxoView.GetTopValidatorsByStake: error retrieving entries from db: ") } // Add !isDeleted ValidatorEntries from the UtxoView to the ValidatorEntries from the db. for _, validatorEntry := range utxoViewValidatorEntries { @@ -1297,10 +1307,11 @@ func (bav *UtxoView) GetGlobalStakeAmountNanos() (*uint256.Int, error) { globalStakeAmountNanos = bav.GlobalStakeAmountNanos.Clone() } // If not set, read the GlobalStakeAmountNanos from the db. + // TODO: Confirm if the GlobalStakeAmountNanos.IsZero() that we should look in the db. if globalStakeAmountNanos == nil || globalStakeAmountNanos.IsZero() { globalStakeAmountNanos, err = DBGetGlobalStakeAmountNanos(bav.Handle, bav.Snapshot) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "UtxoView.GetGlobalStakeAmountNanos: ") } if globalStakeAmountNanos == nil { globalStakeAmountNanos = uint256.NewInt() @@ -1385,6 +1396,12 @@ func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHei } func (bav *UtxoView) _flushGlobalStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // If GlobalStakeAmountNanos is nil, then it was never + // set and shouldn't overwrite the value in the db. + if bav.GlobalStakeAmountNanos == nil { + return nil + } + return DBPutGlobalStakeAmountNanosWithTxn(txn, bav.Snapshot, bav.GlobalStakeAmountNanos, blockHeight) } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index be1fc3909..5c16ebefc 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -16,6 +16,11 @@ func TestValidatorRegistration(t *testing.T) { _testValidatorRegistrationWithDerivedKey(t) } +func TestGetTopValidatorsByStake(t *testing.T) { + _testGetTopValidatorsByStake(t, false) + _testGetTopValidatorsByStake(t, true) +} + func _testValidatorRegistration(t *testing.T, flushToDB bool) { // Local variables var registerMetadata *RegisterAsValidatorMetadata @@ -24,15 +29,14 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { var globalStakeAmountNanos *uint256.Int var err error + // Initialize fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) - // Initialize fork heights. - params.ForkHeights.BalanceModelBlockHeight = uint32(1) - GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) - GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - utxoView := func() *UtxoView { newUtxoView, err := mempool.GetAugmentedUniversalView() require.NoError(t, err) @@ -74,7 +78,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { _, _, _, _, _ = m0PKID, m1PKID, m2PKID, m3PKID, m4PKID { - // Param Updater set min fee rate to 101 nanos per KB + // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true _updateGlobalParamsEntryWithTestMeta( testMeta, @@ -382,18 +386,15 @@ func _submitUnregisterAsValidatorTxn( func _testValidatorRegistrationWithDerivedKey(t *testing.T) { var err error + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) // Initialize fork heights. - params.ForkHeights.NFTTransferOrBurnAndDerivedKeysBlockHeight = uint32(0) - params.ForkHeights.DerivedKeySetSpendingLimitsBlockHeight = uint32(0) - params.ForkHeights.DerivedKeyTrackSpendingLimitsBlockHeight = uint32(0) - params.ForkHeights.DerivedKeyEthSignatureCompatibilityBlockHeight = uint32(0) - params.ForkHeights.ExtraDataOnEntriesBlockHeight = uint32(0) - params.ForkHeights.AssociationsAndAccessGroupsBlockHeight = uint32(0) - params.ForkHeights.BalanceModelBlockHeight = uint32(1) params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -508,7 +509,7 @@ func _testValidatorRegistrationWithDerivedKey(t *testing.T) { return err } // Sign txn. - _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) + _signTxnWithDerivedKeyAndType(t, txn, derivedKeyPrivBase58Check, 1) // Store the original transactor balance. transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, params) prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) @@ -533,6 +534,21 @@ func _testValidatorRegistrationWithDerivedKey(t *testing.T) { return nil } + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } { // Submit a RegisterAsValidator txn using a DerivedKey. @@ -617,3 +633,352 @@ func _testValidatorRegistrationWithDerivedKey(t *testing.T) { require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) _executeAllTestRollbackAndFlush(testMeta) } + +func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { + var validatorEntries []*ValidatorEntry + var err error + + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize PoS fork height. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + utxoView := func() *UtxoView { + newUtxoView, err := mempool.GetAugmentedUniversalView() + require.NoError(t, err) + return newUtxoView + } + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID + m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID + m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID + _, _, _, _, _ = m0PKID, m1PKID, m2PKID, m3PKID, m4PKID + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // m0 registers as a validator. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m0.com")}, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + // Verify top validators. + validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 1) + require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt()) + } + { + // m1 registers as a validator. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m1.com")}, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + // Verify top validators. + validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 2) + } + { + // m2 registers as a validator. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m2.com")}, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m2Pub, m2Priv, registerMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + // Verify top validators. + validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 3) + } + { + // m3 stakes 100 DESO nanos with m0. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxn(testMeta, m3Pub, m3Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m3 stakes 200 DESO nanos with m1. + stakeMetadata = &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(200), + } + _, err = _submitStakeTxn(testMeta, m3Pub, m3Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m3 stakes 300 DESO nanos with m2. + stakeMetadata = &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m2PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(300), + } + _, err = _submitStakeTxn(testMeta, m3Pub, m3Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // Verify top validators. + validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 3) + require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) + require.Equal(t, validatorEntries[1].ValidatorPKID, m1PKID) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, validatorEntries[2].ValidatorPKID, m0PKID) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // m3 unstakes from m1. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(150), + } + _, err = _submitUnstakeTxn(testMeta, m3Pub, m3Priv, unstakeMetadata, nil, flushToDB) + + // Verify top validators. + validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 3) + require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) + require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[2].ValidatorPKID, m1PKID) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50)) + } + { + // m3 unstakes more from m1. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitUnstakeTxn(testMeta, m3Pub, m3Priv, unstakeMetadata, nil, flushToDB) + + // Verify top validators. + validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 3) + require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) + require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[2].ValidatorPKID, m1PKID) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(0)) + } + { + // m2 unregisters as validator. + _, _, _, err = _submitUnregisterAsValidatorTxn(testMeta, m2Pub, m2Priv, flushToDB) + require.NoError(t, err) + + // Verify top validators. + validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 2) + require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[1].ValidatorPKID, m1PKID) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(0)) + } + { + // m4 stakes with m1. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(150), + } + _, err = _submitStakeTxn(testMeta, m4Pub, m4Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // Verify top validators. + validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 2) + require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // m4 stakes more with m1. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m1PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxn(testMeta, m4Pub, m4Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // Verify top validators. + validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 2) + require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(250)) + require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // Verify top validators with LIMIT. + validatorEntries, err = utxoView().GetTopValidatorsByStake(1) + require.NoError(t, err) + require.Len(t, validatorEntries, 1) + require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(250)) + } + + // Flush mempool to the db and test rollbacks. + require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) + _executeAllTestRollbackAndFlush(testMeta) +} + +func TestGetTopValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { + // For this test, we manually place ValidatorEntries in the database and + // UtxoView to test merging the two to determine the TopValidatorsByStake. + + // Initialize test chain and UtxoView. + chain, params, db := NewLowDifficultyBlockchain(t) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + blockHeight := uint64(chain.blockTip().Height + 1) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID + + // Store m0's ValidatorEntry in the db with TotalStake = 100 nanos. + validatorEntry := &ValidatorEntry{ + ValidatorPKID: m0PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), + } + utxoView._setValidatorEntryMappings(validatorEntry) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + + // Verify m0 is stored in the db. + validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // Verify m0 is not stored in the UtxoView. + require.Empty(t, utxoView.ValidatorMapKeyToValidatorEntry) + + // Store m1's ValidatorEntry in the database with TotalStake = 200 nanos. + validatorEntry = &ValidatorEntry{ + ValidatorPKID: m1PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(200), + } + utxoView._setValidatorEntryMappings(validatorEntry) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + + // Fetch m1 so it is also cached in the UtxoView. + validatorEntry, err = utxoView.GetValidatorByPKID(m1PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + + // Verify m1 is stored in the db. + validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m1PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(200)) + + // Verify m1 is also stored in the UtxoView. + require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 1) + require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[validatorEntry.ToMapKey()].ValidatorPKID, m1PKID) + require.Equal( + t, + utxoView.ValidatorMapKeyToValidatorEntry[validatorEntry.ToMapKey()].TotalStakeAmountNanos, + uint256.NewInt().SetUint64(200), + ) + + // Store m2's ValidatorEntry in the UtxoView. + m2ValidatorEntry := &ValidatorEntry{ + ValidatorPKID: m2PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), + } + utxoView._setValidatorEntryMappings(m2ValidatorEntry) + + // Verify m2 is not stored in the db. + validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m2PKID) + require.NoError(t, err) + require.Nil(t, validatorEntry) + + // Verify m2 is stored in the UtxoView. + require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 2) + + require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m2ValidatorEntry.ToMapKey()].ValidatorPKID, m2PKID) + require.Equal( + t, + utxoView.ValidatorMapKeyToValidatorEntry[m2ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, + uint256.NewInt().SetUint64(50), + ) + + // Fetch TopValidatorsByStake merging ValidatorEntries from the db and UtxoView. + validatorEntries, err := utxoView.GetTopValidatorsByStake(3) + require.NoError(t, err) + require.Len(t, validatorEntries, 3) + require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[2].ValidatorPKID, m2PKID) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50)) +} diff --git a/lib/db_utils.go b/lib/db_utils.go index 8c5373c46..1657ff25a 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -494,7 +494,15 @@ type DBPrefixes struct { // Prefix -> *uint256.Int PrefixGlobalStakeAmountNanos []byte `prefix_id:"[80]" is_state:"true"` - // NEXT_TAG: 81 + // PrefixStakeByValidatorByStaker: Retrieve a StakeEntry. + // Prefix, ValidatorPKID, StakerPKID -> StakeEntry + PrefixStakeByValidatorByStaker []byte `prefix_id:"[81]" is_state:"true"` + + // PrefixLockedStakeByValidatorByStakerByLockedAt: Retrieve a LockedStakeEntry. + // Prefix, ValidatorPKID, StakerPKID, LockedAtEpochNumber -> LockedStakeEntry + PrefixLockedStakeByValidatorByStakerByLockedAt []byte `prefix_id:"[82]" is_state:"true"` + + // NEXT_TAG: 83 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -705,6 +713,12 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixGlobalStakeAmountNanos) { // prefix_id:"[80]" return false, nil + } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorByStaker) { + // prefix_id:"[81]" + return true, &StakeEntry{} + } else if bytes.Equal(prefix, Prefixes.PrefixLockedStakeByValidatorByStakerByLockedAt) { + // prefix_id:"[82]" + return true, &LockedStakeEntry{} } return true, nil @@ -6763,6 +6777,9 @@ type TransactionMetadata struct { NewMessageTxindexMetadata *NewMessageTxindexMetadata `json:",omitempty"` RegisterAsValidatorTxindexMetadata *RegisterAsValidatorTxindexMetadata `json:",omitempty"` UnregisterAsValidatorTxindexMetadata *UnregisterAsValidatorTxindexMetadata `json:",omitempty"` + StakeTxindexMetadata *StakeTxindexMetadata `json:",omitempty"` + UnstakeTxindexMetadata *UnstakeTxindexMetadata `json:",omitempty"` + UnlockStakeTxindexMetadata *UnlockStakeTxindexMetadata `json:",omitempty"` } func (txnMeta *TransactionMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -6849,6 +6866,12 @@ func (txnMeta *TransactionMetadata) RawEncodeWithoutMetadata(blockHeight uint64, data = append(data, EncodeToBytes(blockHeight, txnMeta.RegisterAsValidatorTxindexMetadata, skipMetadata...)...) // encoding UnregisterAsValidatorTxindexMetadata data = append(data, EncodeToBytes(blockHeight, txnMeta.UnregisterAsValidatorTxindexMetadata, skipMetadata...)...) + // encoding StakeTxindexMetadata + data = append(data, EncodeToBytes(blockHeight, txnMeta.StakeTxindexMetadata, skipMetadata...)...) + // encoding UnstakeTxindexMetadata + data = append(data, EncodeToBytes(blockHeight, txnMeta.UnstakeTxindexMetadata, skipMetadata...)...) + // encoding UnlockStakeTxindexMetadata + data = append(data, EncodeToBytes(blockHeight, txnMeta.UnlockStakeTxindexMetadata, skipMetadata...)...) } return data @@ -7102,19 +7125,25 @@ func (txnMeta *TransactionMetadata) RawDecodeWithoutMetadata(blockHeight uint64, if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { // decoding RegisterAsValidatorTxindexMetadata - CopyRegisterAsValidatorTxindexMetadata := &RegisterAsValidatorTxindexMetadata{} - if exist, err := DecodeFromBytes(CopyRegisterAsValidatorTxindexMetadata, rr); exist && err == nil { - txnMeta.RegisterAsValidatorTxindexMetadata = CopyRegisterAsValidatorTxindexMetadata - } else { + if txnMeta.RegisterAsValidatorTxindexMetadata, err = DecodeDeSoEncoder(&RegisterAsValidatorTxindexMetadata{}, rr); err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading RegisterAsValidatorTxindexMetadata") } // decoding UnregisterAsValidatorTxindexMetadata - CopyUnregisterAsValidatorTxindexMetadata := &UnregisterAsValidatorTxindexMetadata{} - if exist, err := DecodeFromBytes(CopyUnregisterAsValidatorTxindexMetadata, rr); exist && err == nil { - txnMeta.UnregisterAsValidatorTxindexMetadata = CopyUnregisterAsValidatorTxindexMetadata - } else { + if txnMeta.UnregisterAsValidatorTxindexMetadata, err = DecodeDeSoEncoder(&UnregisterAsValidatorTxindexMetadata{}, rr); err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnregisterAsValidatorTxindexMetadata") } + // decoding StakeTxindexMetadata + if txnMeta.StakeTxindexMetadata, err = DecodeDeSoEncoder(&StakeTxindexMetadata{}, rr); err != nil { + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading StakeTxindexMetadata") + } + // decoding UnstakeTxindexMetadata + if txnMeta.UnstakeTxindexMetadata, err = DecodeDeSoEncoder(&UnstakeTxindexMetadata{}, rr); err != nil { + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnstakeTxindexMetadata") + } + // decoding UnlockStakeTxindexMetadata + if txnMeta.UnlockStakeTxindexMetadata, err = DecodeDeSoEncoder(&UnlockStakeTxindexMetadata{}, rr); err != nil { + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnlockStakeTxindexMetadata") + } } return nil diff --git a/lib/generics.go b/lib/generics.go index 4e8d59b7e..3b8dba907 100644 --- a/lib/generics.go +++ b/lib/generics.go @@ -1,5 +1,10 @@ package lib +import ( + "bytes" + "github.com/pkg/errors" +) + // Generic Set object. Retains the order elements are addd to the set. type Set[T comparable] struct { _innerMap map[T]struct{} @@ -71,3 +76,47 @@ func MapSet[T comparable, K any](set *Set[T], mapFunc func(elem T) (K, error)) ( } return results, nil } + +func DecodeDeSoEncoder[T DeSoEncoder](entry T, rr *bytes.Reader) (T, error) { + var emptyEntry T + exist, err := DecodeFromBytes(entry, rr) + if err != nil { + return emptyEntry, errors.Wrapf(err, "DecodeDeSoEncoder: Problem decoding from bytes") + } + if !exist { + return emptyEntry, nil + } + return entry, nil +} + +func EncodeDeSoEncoderSlice[T DeSoEncoder](inputSlice []T, blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + numItems := uint64(len(inputSlice)) + data = append(data, UintToBuf(numItems)...) + for _, item := range inputSlice { + data = append(data, EncodeToBytes(blockHeight, item, skipMetadata...)...) + } + return data +} + +func DecodeDeSoEncoderSlice[T DeSoEncoder](rr *bytes.Reader) ([]T, error) { + numItems, err := ReadUvarint(rr) + if err != nil { + return nil, errors.Wrapf(err, "DecodeDeSoEncoderSlice: Problem decoding numItems") + } + // Note: is it more efficient to do a make with specific length and set at each index? + inputs, err := SafeMakeSliceWithLength[T](numItems) + if err != nil { + return nil, errors.Wrapf(err, "DecodeDeSoEncoderSlice: Problem making slice with length %d", numItems) + } + var results []T + for ii := uint64(0); ii < numItems; ii++ { + prevEntry := inputs[ii].GetEncoderType().New() + if exist, err := DecodeFromBytes(prevEntry, rr); exist && err == nil { + results = append(results, prevEntry.(T)) + } else if err != nil { + return nil, errors.Wrapf(err, "DecodeDeSoEncoderSlice: Problem decoding item %d of %d", ii, numItems) + } + } + return results, nil +} diff --git a/lib/generics_test.go b/lib/generics_test.go index f00f6fafb..67beb30e0 100644 --- a/lib/generics_test.go +++ b/lib/generics_test.go @@ -1,6 +1,7 @@ package lib import ( + "bytes" "github.com/pkg/errors" "github.com/stretchr/testify/require" "testing" @@ -41,3 +42,42 @@ func TestSet(t *testing.T) { require.Equal(t, err.Error(), "TESTERROR") require.Nil(t, nilSet) } + +func TestGenericDeSoEncoderAndDecode(t *testing.T) { + + tne := &TransactorNonceEntry{ + TransactorPKID: &PKID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, + Nonce: &DeSoNonce{ + ExpirationBlockHeight: 1723, + PartialID: 142, + }, + } + encoded := EncodeToBytes(0, tne, false) + var decoded *TransactorNonceEntry + var err error + decoded, err = DecodeDeSoEncoder(&TransactorNonceEntry{}, bytes.NewReader(encoded)) + + require.NoError(t, err) + require.True(t, decoded.TransactorPKID.Eq(tne.TransactorPKID)) + require.Equal(t, decoded.Nonce.ExpirationBlockHeight, tne.Nonce.ExpirationBlockHeight) + require.Equal(t, decoded.Nonce.PartialID, tne.Nonce.PartialID) + + tneSlice := []*TransactorNonceEntry{tne} + for i := 0; i < 10; i++ { + copiedTNE := tne.Copy() + copiedTNE.Nonce.ExpirationBlockHeight += 10 + copiedTNE.Nonce.PartialID += 10 + tneSlice = append(tneSlice, tne) + } + + encodedSlice := EncodeDeSoEncoderSlice[*TransactorNonceEntry](tneSlice, 0, false) + decodedSlice, err := DecodeDeSoEncoderSlice[*TransactorNonceEntry](bytes.NewReader(encodedSlice)) + + require.NoError(t, err) + require.Equal(t, len(decodedSlice), len(tneSlice)) + for i := 0; i < len(decodedSlice); i++ { + require.True(t, decodedSlice[i].TransactorPKID.Eq(tneSlice[i].TransactorPKID)) + require.Equal(t, decodedSlice[i].Nonce.ExpirationBlockHeight, tneSlice[i].Nonce.ExpirationBlockHeight) + require.Equal(t, decodedSlice[i].Nonce.PartialID, tneSlice[i].Nonce.PartialID) + } +} diff --git a/lib/mempool.go b/lib/mempool.go index 7f5c07f4e..548118e73 100644 --- a/lib/mempool.go +++ b/lib/mempool.go @@ -1946,6 +1946,18 @@ func ComputeTransactionMetadata(txn *MsgDeSoTxn, utxoView *UtxoView, blockHash * txindexMetadata, affectedPublicKeys := utxoView.CreateUnregisterAsValidatorTxindexMetadata(utxoOps[len(utxoOps)-1], txn) txnMeta.UnregisterAsValidatorTxindexMetadata = txindexMetadata txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) + case TxnTypeStake: + txindexMetadata, affectedPublicKeys := utxoView.CreateStakeTxindexMetadata(utxoOps[len(utxoOps)-1], txn) + txnMeta.StakeTxindexMetadata = txindexMetadata + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) + case TxnTypeUnstake: + txindexMetadata, affectedPublicKeys := utxoView.CreateUnstakeTxindexMetadata(utxoOps[len(utxoOps)-1], txn) + txnMeta.UnstakeTxindexMetadata = txindexMetadata + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) + case TxnTypeUnlockStake: + txindexMetadata, affectedPublicKeys := utxoView.CreateUnlockStakeTxindexMetadata(utxoOps[len(utxoOps)-1], txn) + txnMeta.UnlockStakeTxindexMetadata = txindexMetadata + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) } return txnMeta } diff --git a/lib/network.go b/lib/network.go index 4a81b112b..ddaa1e342 100644 --- a/lib/network.go +++ b/lib/network.go @@ -241,8 +241,11 @@ const ( TxnTypeNewMessage TxnType = 33 TxnTypeRegisterAsValidator TxnType = 34 TxnTypeUnregisterAsValidator TxnType = 35 + TxnTypeStake TxnType = 36 + TxnTypeUnstake TxnType = 37 + TxnTypeUnlockStake TxnType = 38 - // NEXT_ID = 36 + // NEXT_ID = 39 ) type TxnString string @@ -284,6 +287,9 @@ const ( TxnStringNewMessage TxnString = "NEW_MESSAGE" TxnStringRegisterAsValidator TxnString = "REGISTER_AS_VALIDATOR" TxnStringUnregisterAsValidator TxnString = "UNREGISTER_AS_VALIDATOR" + TxnStringStake TxnString = "STAKE" + TxnStringUnstake TxnString = "UNSTAKE" + TxnStringUnlockStake TxnString = "UNLOCK_STAKE" ) var ( @@ -296,7 +302,7 @@ var ( TxnTypeDAOCoin, TxnTypeDAOCoinTransfer, TxnTypeDAOCoinLimitOrder, TxnTypeCreateUserAssociation, TxnTypeDeleteUserAssociation, TxnTypeCreatePostAssociation, TxnTypeDeletePostAssociation, TxnTypeAccessGroup, TxnTypeAccessGroupMembers, TxnTypeNewMessage, TxnTypeRegisterAsValidator, - TxnTypeUnregisterAsValidator, + TxnTypeUnregisterAsValidator, TxnTypeStake, TxnTypeUnstake, TxnTypeUnlockStake, } AllTxnString = []TxnString{ TxnStringUnset, TxnStringBlockReward, TxnStringBasicTransfer, TxnStringBitcoinExchange, TxnStringPrivateMessage, @@ -307,7 +313,7 @@ var ( TxnStringDAOCoin, TxnStringDAOCoinTransfer, TxnStringDAOCoinLimitOrder, TxnStringCreateUserAssociation, TxnStringDeleteUserAssociation, TxnStringCreatePostAssociation, TxnStringDeletePostAssociation, TxnStringAccessGroup, TxnStringAccessGroupMembers, TxnStringNewMessage, TxnStringRegisterAsValidator, - TxnStringUnregisterAsValidator, + TxnStringUnregisterAsValidator, TxnStringStake, TxnStringUnstake, TxnStringUnlockStake, } ) @@ -391,6 +397,12 @@ func (txnType TxnType) GetTxnString() TxnString { return TxnStringRegisterAsValidator case TxnTypeUnregisterAsValidator: return TxnStringUnregisterAsValidator + case TxnTypeStake: + return TxnStringStake + case TxnTypeUnstake: + return TxnStringUnstake + case TxnTypeUnlockStake: + return TxnStringUnlockStake default: return TxnStringUndefined } @@ -468,6 +480,12 @@ func GetTxnTypeFromString(txnString TxnString) TxnType { return TxnTypeRegisterAsValidator case TxnStringUnregisterAsValidator: return TxnTypeUnregisterAsValidator + case TxnStringStake: + return TxnTypeStake + case TxnStringUnstake: + return TxnTypeUnstake + case TxnStringUnlockStake: + return TxnTypeUnlockStake default: // TxnTypeUnset means we couldn't find a matching txn type return TxnTypeUnset @@ -553,6 +571,12 @@ func NewTxnMetadata(txType TxnType) (DeSoTxnMetadata, error) { return (&RegisterAsValidatorMetadata{}).New(), nil case TxnTypeUnregisterAsValidator: return (&UnregisterAsValidatorMetadata{}).New(), nil + case TxnTypeStake: + return (&StakeMetadata{}).New(), nil + case TxnTypeUnstake: + return (&UnstakeMetadata{}).New(), nil + case TxnTypeUnlockStake: + return (&UnlockStakeMetadata{}).New(), nil default: return nil, fmt.Errorf("NewTxnMetadata: Unrecognized TxnType: %v; make sure you add the new type of transaction to NewTxnMetadata", txType) } @@ -5355,6 +5379,20 @@ type TransactionSpendingLimit struct { // - AppScopeType: one of { Any, Scoped } // - AssociationOperation: one of { Any, Create, Delete } AssociationLimitMap map[AssociationLimitKey]uint64 + + // ===== ENCODER MIGRATION ProofOfStakeNewTxnTypesMigration ===== + // ValidatorPKID || StakerPKID to amount of stake-able $DESO. + // Note that this is not a limit on the number of Stake txns that + // this derived key can perform but instead a limit on the amount + // of $DESO this derived key can stake. + StakeLimitMap map[StakeLimitKey]*uint256.Int + // ValidatorPKID || StakerPKID to amount of unstake-able DESO. + // Note that this is not a limit on the number of Unstake txns that + // this derived key can perform but instead a limit on the amount + // of $DESO this derived key can unstake. + UnstakeLimitMap map[StakeLimitKey]*uint256.Int + // ValidatorPKID || StakerPKID to number of UnlockStake transactions. + UnlockStakeLimitMap map[StakeLimitKey]uint64 } // ToMetamaskString encodes the TransactionSpendingLimit into a Metamask-compatible string. The encoded string will @@ -5599,6 +5637,102 @@ func (tsl *TransactionSpendingLimit) ToMetamaskString(params *DeSoParams) string indentationCounter-- } + // StakeLimitMap + if len(tsl.StakeLimitMap) > 0 { + var stakeLimitStr []string + str += _indt(indentationCounter) + "Staking Restrictions:\n" + indentationCounter++ + for limitKey, limit := range tsl.StakeLimitMap { + opString := _indt(indentationCounter) + "[\n" + + indentationCounter++ + // ValidatorPKID + validatorPublicKeyBase58Check := "Any" + if !limitKey.ValidatorPKID.Eq(&ZeroPKID) { + validatorPublicKeyBase58Check = Base58CheckEncode(limitKey.ValidatorPKID.ToBytes(), false, params) + } + opString += _indt(indentationCounter) + "Validator PKID: " + validatorPublicKeyBase58Check + "\n" + // StakerPKID + stakerPublicKeyBase58Check := Base58CheckEncode(limitKey.StakerPKID.ToBytes(), false, params) + opString += _indt(indentationCounter) + "Staker PKID: " + stakerPublicKeyBase58Check + "\n" + // StakeLimit + stakeLimitDESO := NewFloat().Quo( + NewFloat().SetInt(limit.ToBig()), NewFloat().SetUint64(NanosPerUnit), + ) + opString += _indt(indentationCounter) + fmt.Sprintf("Staking Limit: %.2f $DESO\n", stakeLimitDESO) + + indentationCounter-- + opString += _indt(indentationCounter) + "]\n" + stakeLimitStr = append(stakeLimitStr, opString) + } + // Ensure deterministic ordering of the transaction count limit strings by doing a lexicographical sort. + sortStringsAndAddToLimitStr(stakeLimitStr) + indentationCounter-- + } + + // UnstakeLimitMap + if len(tsl.UnstakeLimitMap) > 0 { + var unstakeLimitStr []string + str += _indt(indentationCounter) + "Unstaking Restrictions:\n" + indentationCounter++ + for limitKey, limit := range tsl.UnstakeLimitMap { + opString := _indt(indentationCounter) + "[\n" + + indentationCounter++ + // ValidatorPKID + validatorPublicKeyBase58Check := "Any" + if !limitKey.ValidatorPKID.Eq(&ZeroPKID) { + validatorPublicKeyBase58Check = Base58CheckEncode(limitKey.ValidatorPKID.ToBytes(), false, params) + } + opString += _indt(indentationCounter) + "Validator PKID: " + validatorPublicKeyBase58Check + "\n" + // StakerPKID + stakerPublicKeyBase58Check := Base58CheckEncode(limitKey.StakerPKID.ToBytes(), false, params) + opString += _indt(indentationCounter) + "Staker PKID: " + stakerPublicKeyBase58Check + "\n" + // UnstakeLimit + unstakeLimitDESO := NewFloat().Quo( + NewFloat().SetInt(limit.ToBig()), NewFloat().SetUint64(NanosPerUnit), + ) + opString += _indt(indentationCounter) + fmt.Sprintf("Unstaking Limit: %.2f $DESO\n", unstakeLimitDESO) + + indentationCounter-- + opString += _indt(indentationCounter) + "]\n" + unstakeLimitStr = append(unstakeLimitStr, opString) + } + // Ensure deterministic ordering of the transaction count limit strings by doing a lexicographical sort. + sortStringsAndAddToLimitStr(unstakeLimitStr) + indentationCounter-- + } + + // UnlockStakeLimitMap + if len(tsl.UnlockStakeLimitMap) > 0 { + var unlockStakeLimitStr []string + str += _indt(indentationCounter) + "Unlocking Stake Restrictions:\n" + indentationCounter++ + for limitKey, limit := range tsl.UnlockStakeLimitMap { + opString := _indt(indentationCounter) + "[\n" + + indentationCounter++ + // ValidatorPKID + validatorPublicKeyBase58Check := "Any" + if !limitKey.ValidatorPKID.Eq(&ZeroPKID) { + validatorPublicKeyBase58Check = Base58CheckEncode(limitKey.ValidatorPKID.ToBytes(), false, params) + } + opString += _indt(indentationCounter) + "Validator PKID: " + validatorPublicKeyBase58Check + "\n" + // StakerPKID + stakerPublicKeyBase58Check := Base58CheckEncode(limitKey.StakerPKID.ToBytes(), false, params) + opString += _indt(indentationCounter) + "Staker PKID: " + stakerPublicKeyBase58Check + "\n" + // UnlockStakeLimit + opString += _indt(indentationCounter) + "Transaction Count: " + strconv.FormatUint(limit, 10) + "\n" + + indentationCounter-- + opString += _indt(indentationCounter) + "]\n" + unlockStakeLimitStr = append(unlockStakeLimitStr, opString) + } + // Ensure deterministic ordering of the transaction count limit strings by doing a lexicographical sort. + sortStringsAndAddToLimitStr(unlockStakeLimitStr) + indentationCounter-- + } + // IsUnlimited if tsl.IsUnlimited { str += "Unlimited" @@ -5812,6 +5946,69 @@ func (tsl *TransactionSpendingLimit) ToBytes(blockHeight uint64) ([]byte, error) data = append(data, accessGroupsBytes...) } + // StakeLimitMap, UnstakeLimitMap, and UnlockStakeLimitMap, gated by the encoder migration. + if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + // StakeLimitMap + stakeLimitMapLength := uint64(len(tsl.StakeLimitMap)) + data = append(data, UintToBuf(stakeLimitMapLength)...) + if stakeLimitMapLength > 0 { + keys, err := SafeMakeSliceWithLengthAndCapacity[StakeLimitKey](0, stakeLimitMapLength) + if err != nil { + return nil, err + } + for key := range tsl.StakeLimitMap { + keys = append(keys, key) + } + sort.Slice(keys, func(ii, jj int) bool { + return hex.EncodeToString(keys[ii].Encode()) < hex.EncodeToString(keys[jj].Encode()) + }) + for _, key := range keys { + data = append(data, key.Encode()...) + data = append(data, EncodeUint256(tsl.StakeLimitMap[key])...) + } + } + + // UnstakeLimitMap + unstakeLimitMapLength := uint64(len(tsl.UnstakeLimitMap)) + data = append(data, UintToBuf(unstakeLimitMapLength)...) + if unstakeLimitMapLength > 0 { + keys, err := SafeMakeSliceWithLengthAndCapacity[StakeLimitKey](0, unstakeLimitMapLength) + if err != nil { + return nil, err + } + for key := range tsl.UnstakeLimitMap { + keys = append(keys, key) + } + sort.Slice(keys, func(ii, jj int) bool { + return hex.EncodeToString(keys[ii].Encode()) < hex.EncodeToString(keys[jj].Encode()) + }) + for _, key := range keys { + data = append(data, key.Encode()...) + data = append(data, EncodeUint256(tsl.UnstakeLimitMap[key])...) + } + } + + // UnlockStakeLimitMap + unlockStakeLimitMapLength := uint64(len(tsl.UnlockStakeLimitMap)) + data = append(data, UintToBuf(unlockStakeLimitMapLength)...) + if unlockStakeLimitMapLength > 0 { + keys, err := SafeMakeSliceWithLengthAndCapacity[StakeLimitKey](0, unlockStakeLimitMapLength) + if err != nil { + return nil, err + } + for key := range tsl.UnlockStakeLimitMap { + keys = append(keys, key) + } + sort.Slice(keys, func(ii, jj int) bool { + return hex.EncodeToString(keys[ii].Encode()) < hex.EncodeToString(keys[jj].Encode()) + }) + for _, key := range keys { + data = append(data, key.Encode()...) + data = append(data, UintToBuf(tsl.UnlockStakeLimitMap[key])...) + } + } + } + return data, nil } @@ -6021,6 +6218,81 @@ func (tsl *TransactionSpendingLimit) FromBytes(blockHeight uint64, rr *bytes.Rea } } + // StakeLimitMap, UnstakeLimitMap, and UnlockStakeLimitMap, gated by the encoder migration. + if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + // StakeLimitMap + stakeLimitMapLen, err := ReadUvarint(rr) + if err != nil { + return err + } + tsl.StakeLimitMap = make(map[StakeLimitKey]*uint256.Int) + if stakeLimitMapLen > 0 { + for ii := uint64(0); ii < stakeLimitMapLen; ii++ { + stakeLimitKey := &StakeLimitKey{} + if err = stakeLimitKey.Decode(rr); err != nil { + return errors.Wrap(err, "Error decoding StakeLimitKey: ") + } + var stakeLimitDESONanos *uint256.Int + stakeLimitDESONanos, err = DecodeUint256(rr) + if err != nil { + return err + } + if _, exists := tsl.StakeLimitMap[*stakeLimitKey]; exists { + return errors.New("StakeLimitKey already exists in StakeLimitMap") + } + tsl.StakeLimitMap[*stakeLimitKey] = stakeLimitDESONanos + } + } + + // UnstakeLimitMap + unstakeLimitMapLen, err := ReadUvarint(rr) + if err != nil { + return err + } + tsl.UnstakeLimitMap = make(map[StakeLimitKey]*uint256.Int) + if unstakeLimitMapLen > 0 { + for ii := uint64(0); ii < unstakeLimitMapLen; ii++ { + stakeLimitKey := &StakeLimitKey{} + if err = stakeLimitKey.Decode(rr); err != nil { + return errors.Wrap(err, "Error decoding StakeLimitKey: ") + } + var unstakeLimitDESONanos *uint256.Int + unstakeLimitDESONanos, err = DecodeUint256(rr) + if err != nil { + return err + } + if _, exists := tsl.UnstakeLimitMap[*stakeLimitKey]; exists { + return errors.New("StakeLimitKey already exists in UnstakeLimitMap") + } + tsl.UnstakeLimitMap[*stakeLimitKey] = unstakeLimitDESONanos + } + } + + // UnlockStakeLimitMap + unlockStakeLimitMapLen, err := ReadUvarint(rr) + if err != nil { + return err + } + tsl.UnlockStakeLimitMap = make(map[StakeLimitKey]uint64) + if unlockStakeLimitMapLen > 0 { + for ii := uint64(0); ii < unlockStakeLimitMapLen; ii++ { + stakeLimitKey := &StakeLimitKey{} + if err = stakeLimitKey.Decode(rr); err != nil { + return errors.Wrap(err, "Error decoding StakeLimitKey: ") + } + var operationCount uint64 + operationCount, err = ReadUvarint(rr) + if err != nil { + return err + } + if _, exists := tsl.UnlockStakeLimitMap[*stakeLimitKey]; exists { + return errors.New("StakeLimitKey already exists in UnlockStakeLimitMap") + } + tsl.UnlockStakeLimitMap[*stakeLimitKey] = operationCount + } + } + } + return nil } @@ -6077,6 +6349,9 @@ func (tsl *TransactionSpendingLimit) Copy() *TransactionSpendingLimit { DAOCoinLimitOrderLimitMap: make(map[DAOCoinLimitOrderLimitKey]uint64), AccessGroupMap: make(map[AccessGroupLimitKey]uint64), AccessGroupMemberMap: make(map[AccessGroupMemberLimitKey]uint64), + StakeLimitMap: make(map[StakeLimitKey]*uint256.Int), + UnstakeLimitMap: make(map[StakeLimitKey]*uint256.Int), + UnlockStakeLimitMap: make(map[StakeLimitKey]uint64), IsUnlimited: tsl.IsUnlimited, } @@ -6117,11 +6392,23 @@ func (tsl *TransactionSpendingLimit) Copy() *TransactionSpendingLimit { copyTSL.AccessGroupMemberMap[accessGroupMemberLimitKey] = accessGroupMemberCount } + for stakeLimitKey, stakeLimitDESONanos := range tsl.StakeLimitMap { + copyTSL.StakeLimitMap[stakeLimitKey] = stakeLimitDESONanos + } + + for stakeLimitKey, unstakeLimitDESONanos := range tsl.UnstakeLimitMap { + copyTSL.UnstakeLimitMap[stakeLimitKey] = unstakeLimitDESONanos + } + + for stakeLimitKey, unlockStakeOperationCount := range tsl.UnlockStakeLimitMap { + copyTSL.UnlockStakeLimitMap[stakeLimitKey] = unlockStakeOperationCount + } + return copyTSL } func (bav *UtxoView) CheckIfValidUnlimitedSpendingLimit(tsl *TransactionSpendingLimit, blockHeight uint32) (_isUnlimited bool, _err error) { - AssertDependencyStructFieldNumbers(&TransactionSpendingLimit{}, 10) + AssertDependencyStructFieldNumbers(&TransactionSpendingLimit{}, 13) if tsl.IsUnlimited && blockHeight < bav.Params.ForkHeights.DeSoUnlimitedDerivedKeysBlockHeight { return false, RuleErrorUnlimitedDerivedKeyBeforeBlockHeight @@ -6137,7 +6424,10 @@ func (bav *UtxoView) CheckIfValidUnlimitedSpendingLimit(tsl *TransactionSpending len(tsl.DAOCoinLimitOrderLimitMap) > 0 || len(tsl.AssociationLimitMap) > 0 || len(tsl.AccessGroupMap) > 0 || - len(tsl.AccessGroupMemberMap) > 0) { + len(tsl.AccessGroupMemberMap) > 0 || + len(tsl.StakeLimitMap) > 0 || + len(tsl.UnstakeLimitMap) > 0 || + len(tsl.UnlockStakeLimitMap) > 0) { return tsl.IsUnlimited, RuleErrorUnlimitedDerivedKeyNonEmptySpendingLimits } diff --git a/lib/network_test.go b/lib/network_test.go index f83d99d89..b99595c94 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -1555,7 +1555,7 @@ func TestUnlimitedSpendingLimitMetamaskEncoding(t *testing.T) { // Test the spending limit encoding using the standard scheme. spendingLimitBytes, err := spendingLimit.ToBytes(1) require.NoError(err) - require.Equal(true, reflect.DeepEqual(spendingLimitBytes, []byte{0, 0, 0, 0, 0, 0, 1, 0, 0, 0})) + require.Equal(true, reflect.DeepEqual(spendingLimitBytes, []byte{0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0})) // Test the spending limit encoding using the metamask scheme. require.Equal(true, reflect.DeepEqual( diff --git a/lib/types.go b/lib/types.go index cb5a9f654..71bb6630f 100644 --- a/lib/types.go +++ b/lib/types.go @@ -273,6 +273,10 @@ func ReadOptionalBlockHash(rr *bytes.Reader) (*BlockHash, error) { return nil, nil } +// EncodeOptionalUint256 guarantees fixed-width encoding which is useful +// in BadgerDB keys. It is less space-efficient than EncodeUint256, +// which should be used elsewhere. Both EncodeUint256 and +// EncodeOptionalUint256 can handle nil inputs. func EncodeOptionalUint256(val *uint256.Int) []byte { if val == nil { return UintToBuf(uint64(0)) From 7ae84b500774e06ea0400312a70e4c05d940dd2f Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Fri, 28 Apr 2023 10:04:47 -0400 Subject: [PATCH 007/762] Mf/unstake stakers for unregistering validator (#513) * Add block view validator. * Add unregister as validator types. * Add validator entry db utils. * Add blockchain utils. * Add utxo view getters. * Delete then set instead of updating validator entry. * Add connect register as validator logic. * Add disconnect register as validator. * Add connect and disconnect unregister as validator. * Add unregister as validator blockchain and mempool utils. * Only overwrite validatorID if new validator. * Use url parse request URI to validate domains. * Tie in register/unregister txn types. * Finish blockchain.go construct validator txns. * Add test file template. * Add tests for validations. * Add tests for registering, querying, unregistering w/o flushing. * Find top validators by stake across the db and UtxoView. * Fix tests when flushing to db. * Test updating a validator. * Store blockheights as uint64. * Fix disconnect logic for register as validator txn. * Fix disconnect tests for unregister txn. * Add stubbed voting public key fields. * Change weird UtxoOperation version to see if fixes CI. * Fix block heights in tests. * Add passing test for creating validator derived key. * Add block view stake. * Add types and byte encodings. * Fix merge conflicts more. * Add db utils. * Fix failing CI. * Add metadata validations. * Reorder keys in prefix. * Test registering + unregistering validators using derived keys. * Update TODO comments. * Clean up comments. * Add UtxoView utils. * Add mempool utils. * Tie in txindex metadata creation. * Tie in where txindex metadata gets set. * Start on connect stake txn. * Split out setting/flushing global stake amount nanos. * Start in on connect + disconnect stake operation. * Add first stake connect tests. * Add test for staking more. * Add connect unstake. * Test happy path unstaking connect logic. * Add connect unstake sad path tests. * Add unlock stake connect + disconnect logic. * Add happy path connect unlock stake test. * Address PR feedback. * Build out db loop for retrieving matching locked stake entries. * Add unlock stake sad path tests. * Add stake, unstake, unlock stake txn spending limits. * Fix failing CI pt 1. * Set test fork heights using helper. * Fix CI pt 2. * Add skeleton for testing staking w/ derived keys. * Start adding tests for staking with derived keys. * Add tests for unlock stake with derived key. * Fix failing stake with derived key tests. * Add tests for validating stake derived keys. * Add tests for derived key that can stake w/ any validator. * Add tests for staking with unlimited derived key. * Start writing to metamask string logic + tests. * Add unlock stake txn spending limits to metamask string logic. * Set min fee on register as validator test. * Fix derived key min fee tests. * Re-enable rollbacks in tests. * Disconnect basic transfer when disconnecting txns. * Use nondeterministic signing alg in tests. * Account for DESO when staking. * Account for DESO moving in unlock stake txn. * Move GetValidatorByPK to block_view_validator. * adding generic encode/decode deso encoder slices and single structs (#503) * check in progress on adding generic encode/decode deso encoder slices * add DecodeDeSoEncoder for decode single structs * trying stuff w/ generics * more messing w/ generics * fix validator entry decoding * revert to old decode deso encoder slice * remove comment * safe make * add test case * Address PR feedback pt1. * Address PR feedback pt1. * Encode validator total stake amount nanos in key. (#502) * Encode validator total stake amount nanos in key. * Add comment for EncodeUint256 vs EncodeOptionalUint256. * Fallback to global stake limit if scoped is exceeded. * Add moving DESO to totalInput, totalOutput. * Store stake/unstake spending limits as uint256.Ints. * use deso encoder decode (#505) * use deso encoder decode * don't use decodedesoencoder for public keys * Wrap error messages. * Address more PR feedback. * Add tests for top validators by stake. (#506) * Add tests for top validators by stake. * Split up tests. * Add TestTopValidatorsByStakeMergingDbAndUtxoView. * Fix GetLockedStakeEntriesInRange and add test. * Resolve PR comments. * Unstake stakers for unregistering validator. * Dont allow updating DisableDelegatedStakers with existing DelegatedStakers. * Move DisableDelegatedStake validation to metadata validation util. * Add test for unregistering validator unstaking stakers. * Resolve PR feedback. --------- Co-authored-by: Lazy Nina <81658138+lazynina@users.noreply.github.com> --- lib/block_view_stake.go | 118 ++++++--- lib/block_view_stake_test.go | 8 +- lib/block_view_validator.go | 229 +++++++++++++---- lib/block_view_validator_test.go | 405 +++++++++++++++++++++++++++++-- 4 files changed, 654 insertions(+), 106 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index f5a3dd70a..737df7bfa 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -523,13 +523,17 @@ func (txindexMetadata *UnlockStakeTxindexMetadata) GetEncoderType() EncoderType // func DBKeyForStakeByValidatorByStaker(stakeEntry *StakeEntry) []byte { - var data []byte - data = append(data, Prefixes.PrefixStakeByValidatorByStaker...) - data = append(data, stakeEntry.ValidatorPKID.ToBytes()...) + data := DBKeyForStakeByValidator(stakeEntry) data = append(data, stakeEntry.StakerPKID.ToBytes()...) return data } +func DBKeyForStakeByValidator(stakeEntry *StakeEntry) []byte { + data := append([]byte{}, Prefixes.PrefixStakeByValidatorByStaker...) + data = append(data, stakeEntry.ValidatorPKID.ToBytes()...) + return data +} + func DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry *LockedStakeEntry) []byte { data := DBPrefixKeyForLockedStakeByValidatorByStaker(lockedStakeEntry) data = append(data, UintToBuf(lockedStakeEntry.LockedAtEpochNumber)...) @@ -537,8 +541,7 @@ func DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry *LockedSt } func DBPrefixKeyForLockedStakeByValidatorByStaker(lockedStakeEntry *LockedStakeEntry) []byte { - var data []byte - data = append(data, Prefixes.PrefixLockedStakeByValidatorByStakerByLockedAt...) + data := append([]byte{}, Prefixes.PrefixLockedStakeByValidatorByStakerByLockedAt...) data = append(data, lockedStakeEntry.ValidatorPKID.ToBytes()...) data = append(data, lockedStakeEntry.StakerPKID.ToBytes()...) return data @@ -585,6 +588,29 @@ func DBGetStakeEntryWithTxn( return stakeEntry, nil } +func DBGetStakeEntriesForValidatorPKID(handle *badger.DB, snap *Snapshot, validatorPKID *PKID) ([]*StakeEntry, error) { + // Retrieve StakeEntries from db. + prefix := DBKeyForStakeByValidator(&StakeEntry{ValidatorPKID: validatorPKID}) + _, valsFound, err := EnumerateKeysForPrefixWithLimitOffsetOrder( + handle, prefix, 0, nil, false, NewSet([]string{}), + ) + if err != nil { + return nil, errors.Wrapf(err, "DBGetStakeEntriesForValidatorPKID: problem retrieving StakeEntries: ") + } + + // Decode StakeEntries from bytes. + var stakeEntries []*StakeEntry + for _, stakeEntryBytes := range valsFound { + rr := bytes.NewReader(stakeEntryBytes) + stakeEntry, err := DecodeDeSoEncoder(&StakeEntry{}, rr) + if err != nil { + return nil, errors.Wrapf(err, "DBGetStakeEntriesForValidatorPKID: problem decoding StakeEntry: ") + } + stakeEntries = append(stakeEntries, stakeEntry) + } + return stakeEntries, nil +} + func DBGetLockedStakeEntry( handle *badger.DB, snap *Snapshot, @@ -1080,7 +1106,7 @@ func (bav *UtxoView) _connectStake( if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectStake: ") } - if prevValidatorEntry == nil || prevValidatorEntry.isDeleted || prevValidatorEntry.DisableDelegatedStake { + if prevValidatorEntry == nil || prevValidatorEntry.isDeleted { return 0, 0, nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "_connectStake: ") } @@ -1751,9 +1777,12 @@ func (bav *UtxoView) IsValidStakeMetadata(transactorPkBytes []byte, metadata *St if err != nil { return errors.Wrapf(err, "UtxoView.IsValidStakeMetadata: ") } - if validatorEntry == nil || validatorEntry.isDeleted || validatorEntry.DisableDelegatedStake { + if validatorEntry == nil || validatorEntry.isDeleted { return errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView.IsValidStakeMetadata: ") } + if !transactorPKIDEntry.PKID.Eq(validatorEntry.ValidatorPKID) && validatorEntry.DisableDelegatedStake { + return errors.Wrapf(RuleErrorInvalidStakeValidatorDisabledDelegatedStake, "UtxoView.IsValidStakeMetadata: ") + } // Validate 0 < StakeAmountNanos <= transactor's DESO Balance. We ignore // the txn fees in this check. The StakeAmountNanos will be validated to @@ -1880,6 +1909,43 @@ func (bav *UtxoView) GetStakeEntry(validatorPKID *PKID, stakerPKID *PKID) (*Stak return stakeEntry, nil } +func (bav *UtxoView) GetStakeEntriesForValidatorPKID(validatorPKID *PKID) ([]*StakeEntry, error) { + // Validate inputs. + if validatorPKID == nil { + return nil, errors.New("UtxoView.GetStakeEntriesForValidatorPKID: nil ValidatorPKID provided as input") + } + + // First, pull matching StakeEntries from the database and cache them in the UtxoView. + dbStakeEntries, err := DBGetStakeEntriesForValidatorPKID(bav.Handle, bav.Snapshot, validatorPKID) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetStakeEntriesForValidatorPKID: error retrieving StakeEntries from the db: ") + } + for _, stakeEntry := range dbStakeEntries { + // Cache results in the UtxoView. + if _, exists := bav.StakeMapKeyToStakeEntry[stakeEntry.ToMapKey()]; !exists { + bav._setStakeEntryMappings(stakeEntry) + } + } + + // Then, pull matching StakeEntries from the UtxoView. + var stakeEntries []*StakeEntry + for _, stakeEntry := range bav.StakeMapKeyToStakeEntry { + if !stakeEntry.ValidatorPKID.Eq(validatorPKID) || stakeEntry.isDeleted { + continue + } + stakeEntries = append(stakeEntries, stakeEntry) + } + + // Sort by StakerPKID so that the ordering is deterministic. + sort.Slice(stakeEntries, func(ii, jj int) bool { + return bytes.Compare( + stakeEntries[ii].StakerPKID.ToBytes(), + stakeEntries[jj].StakerPKID.ToBytes(), + ) < 0 + }) + return stakeEntries, nil +} + func (bav *UtxoView) GetLockedStakeEntry( validatorPKID *PKID, stakerPKID *PKID, @@ -1934,11 +2000,7 @@ func (bav *UtxoView) GetLockedStakeEntriesInRange( return nil, errors.New("UtxoView.GetLockedStakeEntriesInRange: invalid LockedAtEpochNumber range provided as input") } - // Store matching LockedStakeEntries in a map to prevent - // returning duplicates between the db and UtxoView. - lockedStakeEntriesMap := make(map[LockedStakeMapKey]*LockedStakeEntry) - - // First, pull matching LockedStakeEntries from the db. + // First, pull matching LockedStakeEntries from the db and cache them in the UtxoView. dbLockedStakeEntries, err := DBGetLockedStakeEntriesInRange( bav.Handle, bav.Snapshot, validatorPKID, stakerPKID, startEpochNumber, endEpochNumber, ) @@ -1946,34 +2008,27 @@ func (bav *UtxoView) GetLockedStakeEntriesInRange( return nil, errors.Wrapf(err, "UtxoView.GetLockedStakeEntriesInRange: ") } for _, lockedStakeEntry := range dbLockedStakeEntries { - lockedStakeEntriesMap[lockedStakeEntry.ToMapKey()] = lockedStakeEntry + // Cache results in the UtxoView. + if _, exists := bav.LockedStakeMapKeyToLockedStakeEntry[lockedStakeEntry.ToMapKey()]; !exists { + bav._setLockedStakeEntryMappings(lockedStakeEntry) + } } // Then, pull matching LockedStakeEntries from the UtxoView. - // Loop through all LockedStakeEntries in the UtxoView. + var lockedStakeEntries []*LockedStakeEntry for _, lockedStakeEntry := range bav.LockedStakeMapKeyToLockedStakeEntry { // Filter to matching LockedStakeEntries. if !lockedStakeEntry.ValidatorPKID.Eq(validatorPKID) || !lockedStakeEntry.StakerPKID.Eq(stakerPKID) || lockedStakeEntry.LockedAtEpochNumber < startEpochNumber || - lockedStakeEntry.LockedAtEpochNumber > endEpochNumber { + lockedStakeEntry.LockedAtEpochNumber > endEpochNumber || + lockedStakeEntry.isDeleted { continue } - - if lockedStakeEntry.isDeleted { - // Remove from map if isDeleted. - delete(lockedStakeEntriesMap, lockedStakeEntry.ToMapKey()) - } else { - // Otherwise, add to map. - lockedStakeEntriesMap[lockedStakeEntry.ToMapKey()] = lockedStakeEntry - } - } - - // Convert LockedStakeEntries map to slice, sorted by LockedAtEpochNumber ASC. - var lockedStakeEntries []*LockedStakeEntry - for _, lockedStakeEntry := range lockedStakeEntriesMap { lockedStakeEntries = append(lockedStakeEntries, lockedStakeEntry) } + + // Sort LockedStakeEntries by LockedAtEpochNumber ASC. sort.Slice(lockedStakeEntries, func(ii, jj int) bool { return lockedStakeEntries[ii].LockedAtEpochNumber < lockedStakeEntries[jj].LockedAtEpochNumber }) @@ -2485,9 +2540,12 @@ func (bav *UtxoView) IsValidStakeLimitKey(transactorPublicKeyBytes []byte, stake if err != nil { return errors.Wrapf(err, "IsValidStakeLimitKey: ") } - if validatorEntry == nil || validatorEntry.isDeleted || validatorEntry.DisableDelegatedStake { + if validatorEntry == nil || validatorEntry.isDeleted { return errors.Wrapf(RuleErrorTransactionSpendingLimitInvalidValidator, "UtxoView.IsValidStakeLimitKey: ") } + if !transactorPKIDEntry.PKID.Eq(&stakeLimitKey.ValidatorPKID) && validatorEntry.DisableDelegatedStake { + return errors.Wrapf(RuleErrorTransactionSpendingLimitValidatorDisabledDelegatedStake, "UtxoView.IsValidStakeLimitKey: ") + } return nil } @@ -2499,6 +2557,7 @@ func (bav *UtxoView) IsValidStakeLimitKey(transactorPublicKeyBytes []byte, stake const RuleErrorInvalidStakerPKID RuleError = "RuleErrorInvalidStakerPKID" const RuleErrorInvalidStakeAmountNanos RuleError = "RuleErrorInvalidStakeAmountNanos" const RuleErrorInvalidStakeInsufficientBalance RuleError = "RuleErrorInvalidStakeInsufficientBalance" +const RuleErrorInvalidStakeValidatorDisabledDelegatedStake RuleError = "RuleErrorInvalidStakeValidatorDisabledDelegatedStake" const RuleErrorInvalidUnstakeNoStakeFound RuleError = "RuleErrorInvalidUnstakeNoStakeFound" const RuleErrorInvalidUnstakeAmountNanos RuleError = "RuleErrorInvalidUnstakeAmountNanos" const RuleErrorInvalidUnstakeInsufficientStakeFound RuleError = "RuleErrorInvalidUnstakeInsufficientStakeFound" @@ -2512,3 +2571,4 @@ const RuleErrorUnstakeTransactionSpendingLimitExceeded RuleError = "RuleErrorUns const RuleErrorUnlockStakeTransactionSpendingLimitNotFound RuleError = "RuleErrorUnlockStakeTransactionSpendingLimitNotFound" const RuleErrorTransactionSpendingLimitInvalidStaker RuleError = "RuleErrorTransactionSpendingLimitInvalidStaker" const RuleErrorTransactionSpendingLimitInvalidValidator RuleError = "RuleErrorTransactionSpendingLimitInvalidValidator" +const RuleErrorTransactionSpendingLimitValidatorDisabledDelegatedStake RuleError = "RuleErrorTransactionSpendingLimitValidatorDisabledDelegatedStake" diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 2f3d0cd98..fedf50442 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -49,17 +49,10 @@ func _testStaking(t *testing.T, flushToDB bool) { _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) - _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) - _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3) - _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e3) _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID - m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID - m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID - m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID - _, _, _, _, _ = m0PKID, m1PKID, m2PKID, m3PKID, m4PKID // Helper utils utxoView := func() *UtxoView { @@ -792,6 +785,7 @@ func _testStakingWithDerivedKey(t *testing.T) { _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 344b60c50..fb9ecbdc5 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -292,7 +292,6 @@ type RegisterAsValidatorTxindexMetadata struct { VotingPublicKey string VotingPublicKeySignature string VotingSignatureBlockHeight uint64 - UnstakedStakers []*UnstakedStakerTxindexMetadata } func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -309,13 +308,6 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetad data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKey))...) data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKeySignature))...) data = append(data, UintToBuf(txindexMetadata.VotingSignatureBlockHeight)...) - - // UnstakedStakers - data = append(data, UintToBuf(uint64(len(txindexMetadata.UnstakedStakers)))...) - for _, unstakedStaker := range txindexMetadata.UnstakedStakers { - data = append(data, unstakedStaker.RawEncodeWithoutMetadata(blockHeight, skipMetadata...)...) - } - return data } @@ -368,20 +360,6 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawDecodeWithoutMetad return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading VotingSignatureBlockHeight: ") } - // UnstakedStakers - numUnstakedStakers, err := ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading UnstakedStakers: ") - } - for ii := 0; ii < int(numUnstakedStakers); ii++ { - unstakedStaker := &UnstakedStakerTxindexMetadata{} - err = unstakedStaker.RawDecodeWithoutMetadata(blockHeight, rr) - if err != nil { - return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading UnstakedStakers: ") - } - txindexMetadata.UnstakedStakers = append(txindexMetadata.UnstakedStakers, unstakedStaker) - } - return nil } @@ -924,13 +902,6 @@ func (bav *UtxoView) _connectRegisterAsValidator( totalStakeAmountNanos = prevValidatorEntry.TotalStakeAmountNanos.Clone() } - // TODO: In subsequent PR, unstake delegated stakers if updating DisableDelegatedStake=true. - // We will also need to update the TotalStakeAmountNanos and the GlobalStakeAmountNanos. - if prevValidatorEntry != nil && - !prevValidatorEntry.DisableDelegatedStake && // Validator previously allowed delegated stake. - txMeta.DisableDelegatedStake { // Validator no longer allows delegated stake. - } - // Set RegisteredAtBlockHeight only if this is a new ValidatorEntry. registeredAtBlockHeight := uint64(blockHeight) if prevValidatorEntry != nil { @@ -965,7 +936,6 @@ func (bav *UtxoView) _connectRegisterAsValidator( utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ Type: OperationTypeRegisterAsValidator, PrevValidatorEntry: prevValidatorEntry, - // PrevStakeEntries: prevStakeEntries, // TODO: in subsequent PR }) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1023,10 +993,6 @@ func (bav *UtxoView) _disconnectRegisterAsValidator( bav._setValidatorEntryMappings(prevValidatorEntry) } - // TODO: In subsequent PR, if PrevStakeEntries, delete the - // current StakeEntries and restore the prev StakeEntries. - // This should also update GlobalStakeAmountNanos. - // Disconnect the BasicTransfer. return bav._disconnectBasicTransfer( currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, @@ -1084,8 +1050,81 @@ func (bav *UtxoView) _connectUnregisterAsValidator( return 0, 0, nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "_connectUnregisterAsValidator: ") } - // TODO: In subsequent PR, unstake all StakeEntries for this validator. - // This should also update GlobalStakeAmountNanos. + // Retrieve PrevStakeEntries for this ValidatorPKID. + prevStakeEntries, err := bav.GetStakeEntriesForValidatorPKID(transactorPKIDEntry.PKID) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: error retrieving StakeEntries: ") + } + + // Delete each StakeEntry and create or update the corresponding LockedStakeEntry. + // Track TotalUnstakedAmountNanos and PrevLockedStakeEntries. + totalUnstakedAmountNanos := uint256.NewInt() + var prevLockedStakeEntries []*LockedStakeEntry + + currentEpochNumber := uint64(0) // TODO: Retrieve this from the db. + + for _, prevStakeEntry := range prevStakeEntries { + // Add the UnstakedAmountNanos to the TotalUnstakedAmountNanos. + totalUnstakedAmountNanos, err = SafeUint256().Add( + totalUnstakedAmountNanos, prevStakeEntry.StakeAmountNanos, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf( + err, "_connectUnregisterAsValidator: error adding UnstakedAmountNanos to TotalUnstakedAmountNanos: ", + ) + } + + // Retrieve the existing LockedStakeEntry, if exists. + prevLockedStakeEntry, err := bav.GetLockedStakeEntry( + prevStakeEntry.ValidatorPKID, prevStakeEntry.StakerPKID, currentEpochNumber, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf( + err, "_connectUnregisterAsValidator: error retrieving LockedStakeEntry: ", + ) + } + + // Copy the existing LockedStakeEntry and update the LockedAmountNanos, if exists. + // Create a new LockedStakeEntry with the unstaked LockedAmountNanos, otherwise. + var lockedStakeEntry *LockedStakeEntry + + if prevLockedStakeEntry != nil { + prevLockedStakeEntries = append(prevLockedStakeEntries, prevLockedStakeEntry) + lockedStakeEntry = prevLockedStakeEntry.Copy() + lockedStakeEntry.LockedAmountNanos, err = SafeUint256().Add( + lockedStakeEntry.LockedAmountNanos, prevStakeEntry.StakeAmountNanos, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf( + err, "_connectUnregisterAsValidator: error adding LockedStakeEntry.LockedAmountNanos: ", + ) + } + } else { + lockedStakeEntry = &LockedStakeEntry{ + // What to set as the LockedStakeID here is not obvious. Typically, we use the + // TxHash, but since there may be multiple LockedStakeEntries created here, one + // for each unstaked staker, that would result in multiple LockedStakeEntries + // with the same LockedStakeID. For that reason, we opted to use the + // PrevStakeEntry's StakeID instead. + LockedStakeID: prevStakeEntry.StakeID.NewBlockHash(), + StakerPKID: prevStakeEntry.StakerPKID.NewPKID(), + ValidatorPKID: prevStakeEntry.ValidatorPKID.NewPKID(), + LockedAmountNanos: prevStakeEntry.StakeAmountNanos.Clone(), + LockedAtEpochNumber: currentEpochNumber, + } + } + + // Delete the PrevStakeEntry. + bav._deleteStakeEntryMappings(prevStakeEntry) + + // Delete the PrevLockedStakeEntry, if exists. + if prevLockedStakeEntry != nil { + bav._deleteLockedStakeEntryMappings(prevLockedStakeEntry) + } + + // Set the new LockedStakeEntry. + bav._setLockedStakeEntryMappings(lockedStakeEntry) + } // Delete the existing ValidatorEntry. prevValidatorEntry, err := bav.GetValidatorByPKID(transactorPKIDEntry.PKID) @@ -1098,11 +1137,38 @@ func (bav *UtxoView) _connectUnregisterAsValidator( } bav._deleteValidatorEntryMappings(prevValidatorEntry) + // Sanity check that TotalUnstakedAmountNanos == PrevValidatorEntry.TotalStakedAmountNanos. + if !totalUnstakedAmountNanos.Eq(prevValidatorEntry.TotalStakeAmountNanos) { + return 0, 0, nil, errors.New( + "_connectUnregisterAsValidator: TotalUnstakedAmountNanos does not match ValidatorEntry.TotalStakedAmountNanos: ", + ) + } + + // Decrease the GlobalStakeAmountNanos by the amount that was unstaked. + // Fetch the existing GlobalStakeAmountNanos. + prevGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: error fetching GlobalStakeAmountNanos: ") + } + // Subtract the amount that was unstaked. + globalStakeAmountNanos, err := SafeUint256().Sub( + prevGlobalStakeAmountNanos, totalUnstakedAmountNanos, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf( + err, "_connectUnregisterAsValidator: error subtracting TotalUnstakedAmountNanos from GlobalStakeAmountNanos: ", + ) + } + // Set the new GlobalStakeAmountNanos. + bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) + // Add a UTXO operation. utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ - Type: OperationTypeUnregisterAsValidator, - PrevValidatorEntry: prevValidatorEntry, - // PrevStakeEntries: prevStakeEntries, // TODO: in subsequent PR + Type: OperationTypeUnregisterAsValidator, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, + PrevStakeEntries: prevStakeEntries, + PrevLockedStakeEntries: prevLockedStakeEntries, }) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1133,7 +1199,7 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( ) } - // Restore the PrevValidatorEntry. + // Restore the PrevValidatorEntry. This must always exist. prevValidatorEntry := operationData.PrevValidatorEntry if prevValidatorEntry == nil { // This should never happen as you can only unregister an existing ValidatorEntry @@ -1144,7 +1210,38 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( } bav._setValidatorEntryMappings(prevValidatorEntry) - // TODO: In subsequent PR, restore the prev StakeEntries, if any. + // Restore the PrevStakeEntries, if any. + for _, prevStakeEntry := range operationData.PrevStakeEntries { + // Delete the CurrentStakeEntry. + currentStakeEntry, err := bav.GetStakeEntry(prevStakeEntry.ValidatorPKID, prevStakeEntry.StakerPKID) + if err != nil { + return errors.Wrapf(err, "_disconnectUnregisterAsValidator: error retrieving CurrentStakeEntry: ") + } + bav._deleteStakeEntryMappings(currentStakeEntry) + + // Set the PrevStakeEntry. + bav._setStakeEntryMappings(prevStakeEntry) + } + + // Restore the PrevLockedStakeEntries, if any. + currentEpochNumber := uint64(0) // TODO: Retrieve this from the db. + + for _, prevLockedStakeEntry := range operationData.PrevLockedStakeEntries { + // Delete the CurrentLockedStakeEntry. + currentLockedStakeEntry, err := bav.GetLockedStakeEntry( + prevLockedStakeEntry.ValidatorPKID, prevLockedStakeEntry.StakerPKID, currentEpochNumber, + ) + if err != nil { + return errors.Wrapf(err, "_disconnectUnregisterAsValidator: error retrieving CurrentLockedStakeEntry: ") + } + bav._deleteLockedStakeEntryMappings(currentLockedStakeEntry) + + // Set the PrevLockedStakeEntry. + bav._setLockedStakeEntryMappings(prevLockedStakeEntry) + } + + // Restore the PrevGlobalStakeAmountNanos. + bav._setGlobalStakeAmountNanos(operationData.PrevGlobalStakeAmountNanos) // Disconnect the BasicTransfer. return bav._disconnectBasicTransfer( @@ -1179,6 +1276,33 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata(transactorPublicKey []by } // TODO: In subsequent PR, validate VotingPublicKey, VotingPublicKeySignature, and VotingSignatureBlockHeight. + + // Error if updating DisableDelegatedStake from false to + // true and there are existing delegated StakeEntries. + validatorEntry, err := bav.GetValidatorByPKID(transactorPKIDEntry.PKID) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error retrieving existing ValidatorEntry: ") + } + if validatorEntry != nil && // ValidatorEntry exists + !validatorEntry.DisableDelegatedStake && // Existing ValidatorEntry.DisableDelegatedStake = false + metadata.DisableDelegatedStake { // Updating DisableDelegatedStake = true + + // Retrieve existing StakeEntries. + prevStakeEntries, err := bav.GetStakeEntriesForValidatorPKID(transactorPKIDEntry.PKID) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error retrieving existing StakeEntries: ") + } + + // Error if any belong to someone other than the validator. + for _, stakeEntry := range prevStakeEntries { + if !stakeEntry.StakerPKID.Eq(transactorPKIDEntry.PKID) { + return errors.Wrapf( + RuleErrorValidatorDisablingExistingDelegatedStakers, "UtxoView.IsValidRegisterAsValidatorMetadata: ", + ) + } + } + } + return nil } @@ -1427,9 +1551,6 @@ func (bav *UtxoView) CreateRegisterAsValidatorTxindexMetadata( domains = append(domains, string(domain)) } - // TODO: In subsequent PR, pull UnstakedStakers from PrevStakeEntries on UtxoOperation. - var unstakedStakers []*UnstakedStakerTxindexMetadata - // Construct TxindexMetadata. txindexMetadata := &RegisterAsValidatorTxindexMetadata{ ValidatorPublicKeyBase58Check: validatorPublicKeyBase58Check, @@ -1439,7 +1560,6 @@ func (bav *UtxoView) CreateRegisterAsValidatorTxindexMetadata( VotingPublicKey: string(metadata.VotingPublicKey), VotingPublicKeySignature: string(metadata.VotingPublicKeySignature), VotingSignatureBlockHeight: metadata.VotingSignatureBlockHeight, - UnstakedStakers: unstakedStakers, } // Construct AffectedPublicKeys. @@ -1449,12 +1569,6 @@ func (bav *UtxoView) CreateRegisterAsValidatorTxindexMetadata( Metadata: "RegisteredValidatorPublicKeyBase58Check", }, } - for _, unstakedStaker := range unstakedStakers { - affectedPublicKeys = append(affectedPublicKeys, &AffectedPublicKey{ - PublicKeyBase58Check: unstakedStaker.StakerPublicKeyBase58Check, - Metadata: "UnstakedStakerPublicKeyBase58Check", - }) - } return txindexMetadata, affectedPublicKeys } @@ -1469,9 +1583,19 @@ func (bav *UtxoView) CreateUnregisterAsValidatorTxindexMetadata( // Cast ValidatorPublicKey to ValidatorPublicKeyBase58Check. validatorPublicKeyBase58Check := PkToString(txn.PublicKey, bav.Params) - // TODO: In subsequent PR, pull UnstakedStakers from PrevStakeEntries on UtxoOperation. + // Pull UnstakedStakers from PrevStakeEntries on UtxoOperation. var unstakedStakers []*UnstakedStakerTxindexMetadata + for _, stakeEntry := range utxoOp.PrevStakeEntries { + stakerPublicKeyBytes := bav.GetPublicKeyForPKID(stakeEntry.StakerPKID) + stakerPublicKeyBase58Check := PkToString(stakerPublicKeyBytes, bav.Params) + + unstakedStakers = append(unstakedStakers, &UnstakedStakerTxindexMetadata{ + StakerPublicKeyBase58Check: stakerPublicKeyBase58Check, + UnstakeAmountNanos: stakeEntry.StakeAmountNanos, + }) + } + // Construct TxindexMetadata. txindexMetadata := &UnregisterAsValidatorTxindexMetadata{ ValidatorPublicKeyBase58Check: validatorPublicKeyBase58Check, @@ -1506,5 +1630,6 @@ const RuleErrorValidatorTooManyDomains RuleError = "RuleErrorValidatorTooManyDom const RuleErrorValidatorInvalidDomain RuleError = "RuleErrorValidatorInvalidDomain" const RuleErrorValidatorDuplicateDomains RuleError = "RuleErrorValidatorDuplicateDomains" const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" +const RuleErrorValidatorDisablingExistingDelegatedStakers RuleError = "RuleErrorValidatorDisablingExistingDelegatedStakers" const MaxValidatorNumDomains int = 12 diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 5c16ebefc..889b80c64 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -16,11 +16,6 @@ func TestValidatorRegistration(t *testing.T) { _testValidatorRegistrationWithDerivedKey(t) } -func TestGetTopValidatorsByStake(t *testing.T) { - _testGetTopValidatorsByStake(t, false) - _testGetTopValidatorsByStake(t, true) -} - func _testValidatorRegistration(t *testing.T, flushToDB bool) { // Local variables var registerMetadata *RegisterAsValidatorMetadata @@ -42,7 +37,6 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.NoError(t, err) return newUtxoView } - _ = utxoView // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { @@ -64,18 +58,9 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) - _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) - _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) - _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3) - _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e3) _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID - m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID - m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID - m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID - m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID - _, _, _, _, _ = m0PKID, m1PKID, m2PKID, m3PKID, m4PKID { // ParamUpdater set min fee rate @@ -634,6 +619,11 @@ func _testValidatorRegistrationWithDerivedKey(t *testing.T) { _executeAllTestRollbackAndFlush(testMeta) } +func TestGetTopValidatorsByStake(t *testing.T) { + _testGetTopValidatorsByStake(t, false) + _testGetTopValidatorsByStake(t, true) +} + func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { var validatorEntries []*ValidatorEntry var err error @@ -686,9 +676,6 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID - m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID - m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID - _, _, _, _, _ = m0PKID, m1PKID, m2PKID, m3PKID, m4PKID { // ParamUpdater set min fee rate @@ -982,3 +969,385 @@ func TestGetTopValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal(t, validatorEntries[2].ValidatorPKID, m2PKID) require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50)) } + +func TestUpdatingValidatorDisableDelegatedStake(t *testing.T) { + _testUpdatingValidatorDisableDelegatedStake(t, false) + _testUpdatingValidatorDisableDelegatedStake(t, true) +} + +func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { + var validatorEntry *ValidatorEntry + var stakeEntries []*StakeEntry + var err error + + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize PoS fork height. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + utxoView := func() *UtxoView { + newUtxoView, err := mempool.GetAugmentedUniversalView() + require.NoError(t, err) + return newUtxoView + } + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // m0 registers as a validator with DisableDelegatedStake = FALSE. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: false, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.False(t, validatorEntry.DisableDelegatedStake) + + stakeEntries, err = utxoView().GetStakeEntriesForValidatorPKID(m0PKID) + require.NoError(t, err) + require.Empty(t, stakeEntries) + } + { + // m0 updates DisableDelegatedStake = TRUE. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: true, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.True(t, validatorEntry.DisableDelegatedStake) + } + { + // m0 stakes with himself. This is allowed even though DisableDelegatedStake = TRUE. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxn( + testMeta, m0Pub, m0Priv, stakeMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + stakeEntries, err = utxoView().GetStakeEntriesForValidatorPKID(m0PKID) + require.NoError(t, err) + require.Len(t, stakeEntries, 1) + require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) + } + { + // m1 tries to stake with m0. Errors. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidStakeValidatorDisabledDelegatedStake) + } + { + // m0 updates DisableDelegatedStake = FALSE. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: false, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.False(t, validatorEntry.DisableDelegatedStake) + } + { + // m1 stakes with m0. Succeeds. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + stakeEntries, err = utxoView().GetStakeEntriesForValidatorPKID(m0PKID) + require.NoError(t, err) + require.Len(t, stakeEntries, 2) + } + { + // m0 tries to update DisableDelegateStake = TRUE. Errors. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: true, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorDisablingExistingDelegatedStakers) + } + + // Flush mempool to the db and test rollbacks. + require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) + _executeAllTestRollbackAndFlush(testMeta) +} + +func TestUnregisterAsValidator(t *testing.T) { + _testUnregisterAsValidator(t, false) + _testUnregisterAsValidator(t, true) +} + +func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { + var validatorEntry *ValidatorEntry + var stakeEntry *StakeEntry + var lockedStakeEntry *LockedStakeEntry + _ = lockedStakeEntry + var globalStakeAmountNanos *uint256.Int + var err error + + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize PoS fork height. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + utxoView := func() *UtxoView { + newUtxoView, err := mempool.GetAugmentedUniversalView() + require.NoError(t, err) + return newUtxoView + } + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + + currentEpochNumber := uint64(0) // TODO: Retrieve this from the db. + _ = currentEpochNumber + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // m0 registers as a validator. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m0.com")}, + } + _, _, _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + } + { + // m0 stakes with himself. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(600), + } + _, err = _submitStakeTxn( + testMeta, m0Pub, m0Priv, stakeMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m0PKID) + require.NoError(t, err) + require.NotNil(t, stakeEntry) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(600)) + + globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(600)) + } + { + // m1 stakes with m0. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(400), + } + _, err = _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, + ) + require.NoError(t, err) + + stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.NotNil(t, stakeEntry) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(400)) + + globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(1000)) + } + { + // m1 partially unstakes with m0. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitUnstakeTxn( + testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, + ) + + // m1's StakeEntry is updated. + stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.NotNil(t, stakeEntry) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(300)) + + // m1 has a LockedStakeEntry created. + lockedStakeEntry, err = utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) + require.NoError(t, err) + require.NotNil(t, lockedStakeEntry) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalStakeAmountNanos is updated. + globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(900)) + } + { + // m0 unregisters as a validator. + _, _, _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) + require.NoError(t, err) + + // m0's ValidatorEntry is deleted. + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Nil(t, validatorEntry) + + // m0 is unstaked. + // m0's StakeEntry is deleted. + stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m0PKID) + require.NoError(t, err) + require.Nil(t, stakeEntry) + // m0's has a LockedStakeEntry created. + lockedStakeEntry, err = utxoView().GetLockedStakeEntry(m0PKID, m0PKID, currentEpochNumber) + require.NoError(t, err) + require.NotNil(t, lockedStakeEntry) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(600)) + + // m1 is unstaked. + // m1's StakeEntry is deleted. + stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.Nil(t, stakeEntry) + // m1's LockedStakeEntry is updated. + lockedStakeEntry, err = utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) + require.NoError(t, err) + require.NotNil(t, lockedStakeEntry) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(400)) + + // GlobalStakeAmountNanos is updated. + globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + } + + // Flush mempool to the db and test rollbacks. + require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) + _executeAllTestRollbackAndFlush(testMeta) +} From 4c8c8150c8c891d3e04abbdb3414b51ef32c8a85 Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Thu, 4 May 2023 10:29:26 -0400 Subject: [PATCH 008/762] Add epoch data type. (#514) * Add epoch data type. * Update func naming. * Fix nil error. * Add prefix encoder db util. * Rename field to FinalBlockHeight. * Avoid snapshotting issue for now. * Update ancestral record before setting in db. * Flush the CurrentEpochEntry to the db like we do for other entries. * Return error instead of logging. --- lib/block_view.go | 11 +++ lib/block_view_flush.go | 6 ++ lib/block_view_stake.go | 16 ++-- lib/block_view_types.go | 5 +- lib/db_utils.go | 9 ++- lib/pos_epoch.go | 170 ++++++++++++++++++++++++++++++++++++++++ lib/pos_epoch_test.go | 81 +++++++++++++++++++ 7 files changed, 288 insertions(+), 10 deletions(-) create mode 100644 lib/pos_epoch.go create mode 100644 lib/pos_epoch_test.go diff --git a/lib/block_view.go b/lib/block_view.go index 2e1841849..597f0605b 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -126,6 +126,9 @@ type UtxoView struct { // Locked stake mappings LockedStakeMapKeyToLockedStakeEntry map[LockedStakeMapKey]*LockedStakeEntry + // Current EpochEntry + CurrentEpochEntry *EpochEntry + // The hash of the tip the view is currently referencing. Mainly used // for error-checking when doing a bulk operation on the view. TipHash *BlockHash @@ -228,6 +231,9 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // LockedStakeEntries bav.LockedStakeMapKeyToLockedStakeEntry = make(map[LockedStakeMapKey]*LockedStakeEntry) + + // CurrentEpochEntry + bav.CurrentEpochEntry = nil } func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { @@ -505,6 +511,11 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.LockedStakeMapKeyToLockedStakeEntry[entryKey] = entry.Copy() } + // Copy the CurrentEpochEntry + if bav.CurrentEpochEntry != nil { + newView.CurrentEpochEntry = bav.CurrentEpochEntry.Copy() + } + return newView, nil } diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 4f7ed6bf2..6ae70f214 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -152,6 +152,12 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushLockedStakeEntriesToDbWithTxn(txn, blockHeight); err != nil { return err } + // TODO: We may want to move this into a new FlushToDb function that only flushes + // entries set in the OnEpochEndHook. No sense in wasting a bunch of cycles flushing + // all the other entries which will always be nil/empty in the OnEpochEndHook. + if err := bav._flushCurrentEpochEntryToDbWithTxn(txn, blockHeight); err != nil { + return err + } return nil } diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 737df7bfa..245debc2e 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -576,14 +576,14 @@ func DBGetStakeEntryWithTxn( if err == badger.ErrKeyNotFound { return nil, nil } - return nil, errors.Wrapf(err, "DBGetStakeByValidatorByStaker: problem retrieving StakeEntry: ") + return nil, errors.Wrapf(err, "DBGetStakeEntry: problem retrieving StakeEntry: ") } // Decode StakeEntry from bytes. rr := bytes.NewReader(stakeEntryBytes) stakeEntry, err := DecodeDeSoEncoder(&StakeEntry{}, rr) if err != nil { - return nil, errors.Wrapf(err, "DBGetStakeByValidatorByStaker: problem decoding StakeEntry: ") + return nil, errors.Wrapf(err, "DBGetStakeEntry: problem decoding StakeEntry: ") } return stakeEntry, nil } @@ -649,7 +649,7 @@ func DBGetLockedStakeEntryWithTxn( return nil, nil } return nil, errors.Wrapf( - err, "DBGetLockedStakeByValidatorByStakerByLockedAt: problem retrieving LockedStakeEntry: ", + err, "DBGetLockedStakeEntry: problem retrieving LockedStakeEntry: ", ) } @@ -658,7 +658,7 @@ func DBGetLockedStakeEntryWithTxn( lockedStakeEntry, err := DecodeDeSoEncoder(&LockedStakeEntry{}, rr) if err != nil { return nil, errors.Wrapf( - err, "DBGetLockedStakeByValidatorByStakerByLockedAt: problem decoding LockedStakeEntry: ", + err, "DBGetLockedStakeEntry: problem decoding LockedStakeEntry: ", ) } return lockedStakeEntry, nil @@ -755,7 +755,7 @@ func DBPutStakeEntryWithTxn( key := DBKeyForStakeByValidatorByStaker(stakeEntry) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, stakeEntry)); err != nil { return errors.Wrapf( - err, "DBPutStakeWithTxn: problem storing StakeEntry in index PrefixStakeByValidatorByStaker", + err, "DBPutStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByValidatorByStaker: ", ) } @@ -776,7 +776,7 @@ func DBPutLockedStakeEntryWithTxn( key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, lockedStakeEntry)); err != nil { return errors.Wrapf( - err, "DBPutLockedStakeWithTxn: problem storing LockedStakeEntry in index PrefixLockedStakeByValidatorByStakerByLockedAt", + err, "DBPutLockedStakeEntryWithTxn: problem storing LockedStakeEntry in index PrefixLockedStakeByValidatorByStakerByLockedAt: ", ) } @@ -797,7 +797,7 @@ func DBDeleteStakeEntryWithTxn( key := DBKeyForStakeByValidatorByStaker(stakeEntry) if err := DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( - err, "DBDeleteStakeWithTxn: problem deleting StakeEntry from index PrefixStakeByValidatorByStaker", + err, "DBDeleteStakeEntryWithTxn: problem deleting StakeEntry from index PrefixStakeByValidatorByStaker: ", ) } @@ -818,7 +818,7 @@ func DBDeleteLockedStakeEntryWithTxn( key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry) if err := DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( - err, "DBDeleteLockedStakeWithTxn: problem deleting StakeEntry from index PrefixLockedStakeByValidatorByStakerByLockedAt", + err, "DBDeleteLockedStakeEntryWithTxn: problem deleting StakeEntry from index PrefixLockedStakeByValidatorByStakerByLockedAt: ", ) } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 4c6e3fd2f..679d737ec 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -113,9 +113,10 @@ const ( EncoderTypeValidatorEntry EncoderType = 40 EncoderTypeStakeEntry EncoderType = 41 EncoderTypeLockedStakeEntry EncoderType = 42 + EncoderTypeEpochEntry EncoderType = 43 // EncoderTypeEndBlockView encoder type should be at the end and is used for automated tests. - EncoderTypeEndBlockView EncoderType = 43 + EncoderTypeEndBlockView EncoderType = 44 ) // Txindex encoder types. @@ -250,6 +251,8 @@ func (encoderType EncoderType) New() DeSoEncoder { return &StakeEntry{} case EncoderTypeLockedStakeEntry: return &LockedStakeEntry{} + case EncoderTypeEpochEntry: + return &EpochEntry{} } // Txindex encoder types diff --git a/lib/db_utils.go b/lib/db_utils.go index 1657ff25a..4ca3b736c 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -502,7 +502,11 @@ type DBPrefixes struct { // Prefix, ValidatorPKID, StakerPKID, LockedAtEpochNumber -> LockedStakeEntry PrefixLockedStakeByValidatorByStakerByLockedAt []byte `prefix_id:"[82]" is_state:"true"` - // NEXT_TAG: 83 + // PrefixCurrentEpoch: Retrieve the current EpochEntry. + // Prefix -> EpochEntry + PrefixCurrentEpoch []byte `prefix_id:"[83]" is_state:"true"` + + // NEXT_TAG: 84 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -719,6 +723,9 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixLockedStakeByValidatorByStakerByLockedAt) { // prefix_id:"[82]" return true, &LockedStakeEntry{} + } else if bytes.Equal(prefix, Prefixes.PrefixCurrentEpoch) { + // prefix_id:"[83]" + return true, &EpochEntry{} } return true, nil diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go new file mode 100644 index 000000000..c4685fea3 --- /dev/null +++ b/lib/pos_epoch.go @@ -0,0 +1,170 @@ +package lib + +import ( + "bytes" + "github.com/dgraph-io/badger/v3" + "github.com/golang/glog" + "github.com/pkg/errors" +) + +// +// TYPE +// + +type EpochEntry struct { + EpochNumber uint64 + FinalBlockHeight uint64 +} + +func (epochEntry *EpochEntry) Copy() *EpochEntry { + return &EpochEntry{ + EpochNumber: epochEntry.EpochNumber, + FinalBlockHeight: epochEntry.FinalBlockHeight, + } +} + +func (epochEntry *EpochEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, UintToBuf(epochEntry.EpochNumber)...) + data = append(data, UintToBuf(epochEntry.FinalBlockHeight)...) + return data +} + +func (epochEntry *EpochEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // EpochNumber + epochEntry.EpochNumber, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "EpochEntry.Decode: Problem reading EpochNumber: ") + } + + // FinalBlockHeight + epochEntry.FinalBlockHeight, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "EpochEntry.Decode: Problem reading FinalBlockHeight: ") + } + + return err +} + +func (epochEntry *EpochEntry) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (epochEntry *EpochEntry) GetEncoderType() EncoderType { + return EncoderTypeEpochEntry +} + +// +// UTXO VIEW UTILS +// + +func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { + var epochEntry *EpochEntry + var err error + + // First, check the UtxoView. + epochEntry = bav.CurrentEpochEntry + if epochEntry != nil { + return epochEntry.Copy(), nil + } + + // If not found, check the database. + epochEntry, err = DBGetCurrentEpochEntry(bav.Handle, bav.Snapshot) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetCurrentEpoch: problem retrieving EpochEntry from db: ") + } + if epochEntry != nil { + // Cache in the UtxoView. + bav.CurrentEpochEntry = epochEntry.Copy() + } + return epochEntry, nil +} + +func (bav *UtxoView) GetCurrentEpochNumber() (uint64, error) { + epochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return 0, errors.Wrapf(err, "UtxoView.GetCurrentEpochNumber: ") + } + if epochEntry == nil { + return 0, errors.New("UtxoView.GetCurrentEpochNumber: no CurrentEpochEntry found") + } + return epochEntry.EpochNumber, nil +} + +func (bav *UtxoView) _setCurrentEpochEntry(epochEntry *EpochEntry) { + if epochEntry == nil { + glog.Errorf("UtxoView._setCurrentEpochEntry: called with nil EpochEntry") + return + } + bav.CurrentEpochEntry = epochEntry.Copy() +} + +func (bav *UtxoView) _flushCurrentEpochEntryToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + if bav.CurrentEpochEntry == nil { + // It is possible that the current UtxoView never interacted with the CurrentEpochEntry + // in which case the CurrentEpochEntry in the UtxoView will be nil. In that case, we + // don't want to overwrite what is in the database. Just no-op. + return nil + } + if err := DBPutCurrentEpochEntryWithTxn(txn, bav.Snapshot, bav.CurrentEpochEntry, blockHeight); err != nil { + return errors.Wrapf(err, "_flushCurrentEpochEntryToDbWithTxn: ") + } + return nil +} + +// +// DB UTILS +// + +func DBKeyForCurrentEpoch() []byte { + return append([]byte{}, Prefixes.PrefixCurrentEpoch...) +} + +func DBGetCurrentEpochEntry(handle *badger.DB, snap *Snapshot) (*EpochEntry, error) { + var ret *EpochEntry + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetCurrentEpochEntryWithTxn(txn, snap) + return innerErr + }) + return ret, err +} + +func DBGetCurrentEpochEntryWithTxn(txn *badger.Txn, snap *Snapshot) (*EpochEntry, error) { + // Retrieve StakeEntry from db. + key := DBKeyForCurrentEpoch() + epochEntryBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return nil. + if err == badger.ErrKeyNotFound { + return nil, nil + } + return nil, errors.Wrapf(err, "DBGetCurrentEpochEntry: problem retrieving EpochEntry: ") + } + + // Decode EpochEntry from bytes. + rr := bytes.NewReader(epochEntryBytes) + epochEntry, err := DecodeDeSoEncoder(&EpochEntry{}, rr) + if err != nil { + return nil, errors.Wrapf(err, "DBGetCurrentEpochEntry: problem decoding EpochEntry: ") + } + return epochEntry, nil +} + +func DBPutCurrentEpochEntryWithTxn(txn *badger.Txn, snap *Snapshot, epochEntry *EpochEntry, blockHeight uint64) error { + // Set EpochEntry in PrefixCurrentEpoch. + if epochEntry == nil { + // This is just a safety check that we are not accidentally overwriting an + // existing EpochEntry with a nil EpochEntry. This should never happen. + return errors.New("DBPutCurrentEpochEntryWithTxn: called with nil EpochEntry") + } + key := DBKeyForCurrentEpoch() + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, epochEntry)); err != nil { + return errors.Wrapf( + err, "DBPutCurrentEpochEntryWithTxn: problem storing EpochEntry in index PrefixCurrentEpoch: ", + ) + } + return nil +} diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go new file mode 100644 index 000000000..3d84f1ec9 --- /dev/null +++ b/lib/pos_epoch_test.go @@ -0,0 +1,81 @@ +package lib + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestCurrentEpoch(t *testing.T) { + var epochEntry *EpochEntry + var err error + + // Initialize blockchain. + chain, params, db := NewLowDifficultyBlockchain(t) + blockHeight := uint64(chain.blockTip().Height) + 1 + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + + // Test that the CurrentEpoch is nil in the db. + epochEntry, err = DBGetCurrentEpochEntry(db, utxoView.Snapshot) + require.NoError(t, err) + require.Nil(t, epochEntry) + + // Test that the CurrentEpoch is nil in the UtxoView. + require.Nil(t, utxoView.CurrentEpochEntry) + + // Test GetCurrentEpoch(). + epochEntry, err = utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + require.Nil(t, epochEntry) + + // Set the CurrentEpoch. + epochEntry = &EpochEntry{ + EpochNumber: 1, + FinalBlockHeight: blockHeight + 5, + } + utxoView._setCurrentEpochEntry(epochEntry) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + + // Test that the CurrentEpoch is set in the db. + epochEntry, err = DBGetCurrentEpochEntry(db, utxoView.Snapshot) + require.NoError(t, err) + require.NotNil(t, epochEntry) + require.Equal(t, epochEntry.EpochNumber, uint64(1)) + require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) + + // Test that the CurrentEpoch is flushed from the UtxoView. + require.Nil(t, utxoView.CurrentEpochEntry) + + // Test GetCurrentEpoch(). + epochEntry, err = utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + require.NotNil(t, epochEntry) + require.Equal(t, epochEntry.EpochNumber, uint64(1)) + require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) + + // Test that the CurrentEpoch is set in the UtxoView. + epochEntry = utxoView.CurrentEpochEntry + require.NotNil(t, epochEntry) + require.Equal(t, epochEntry.EpochNumber, uint64(1)) + require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) + + // Delete CurrentEpoch from the UtxoView. + utxoView.CurrentEpochEntry = nil + require.Nil(t, utxoView.CurrentEpochEntry) + + // CurrentEpoch still exists in the db. + epochEntry, err = DBGetCurrentEpochEntry(db, utxoView.Snapshot) + require.NoError(t, err) + require.NotNil(t, epochEntry) + + // GetCurrentEpoch() should return the CurrentEpoch from the db. + epochEntry, err = utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + require.NotNil(t, epochEntry) + + // CurrentEpoch gets cached in the UtxoView. + require.NotNil(t, utxoView.CurrentEpochEntry) + + // Test GetCurrentEpochNumber(). + require.Equal(t, utxoView.CurrentEpochEntry.EpochNumber, uint64(1)) +} From 8ce1f3fee3c6689ef050184289e939a18f3a6227 Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Thu, 4 May 2023 10:48:35 -0400 Subject: [PATCH 009/762] Incorporate epoch number into PoS txns. (#515) * make sure donePeerChan is not nil (#512) Co-authored-by: Lazy Nina <> * Add epoch data type. * Update func naming. * Fix nil error. * Add prefix encoder db util. * Rename field to FinalBlockHeight. * Avoid snapshotting issue for now. * Update ancestral record before setting in db. * Incorporate epoch number into PoS txns. * block height gate balance snapshot comparison (#516) * [stable] Release 3.4.1 * Re-enable force-checksum (#519) --------- Co-authored-by: Lazy Nina <81658138+lazynina@users.noreply.github.com> Co-authored-by: lazynina Co-authored-by: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> --- cmd/run.go | 2 +- lib/block_view.go | 96 ++++++++++++++++---------------- lib/block_view_stake.go | 14 ++++- lib/block_view_stake_test.go | 86 +++++++++++++++------------- lib/block_view_validator.go | 16 ++++-- lib/block_view_validator_test.go | 37 ++++++++++-- lib/peer.go | 2 +- 7 files changed, 154 insertions(+), 99 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 4f4d400f4..57a03c8a3 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -61,7 +61,7 @@ func SetupRunFlags(cmd *cobra.Command) { "Max sync block height") // Hyper Sync cmd.PersistentFlags().Bool("hypersync", true, "Use hyper sync protocol for faster block syncing") - cmd.PersistentFlags().Bool("force-checksum", false, "When true, the node will panic if the "+ + cmd.PersistentFlags().Bool("force-checksum", true, "When true, the node will panic if the "+ "local state checksum differs from the network checksum reported by its peers.") // Snapshot cmd.PersistentFlags().Uint64("snapshot-block-height-period", 1000, "Set the snapshot epoch period. Snapshots are taken at block heights divisible by the period.") diff --git a/lib/block_view.go b/lib/block_view.go index 597f0605b..e9f55bde5 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3093,56 +3093,58 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, // Take snapshot of balance balanceSnapshot := make(map[PublicKey]uint64) - for publicKey, balance := range bav.PublicKeyToDeSoBalanceNanos { - balanceSnapshot[publicKey] = balance - } - // Special case: take snapshot of the creator coin entry. var creatorCoinSnapshot *CoinEntry - if txn.TxnMeta.GetTxnType() == TxnTypeCreatorCoin { - // Get the creator coin entry. - creatorCoinTxnMeta := txn.TxnMeta.(*CreatorCoinMetadataa) - creatorProfile := bav.GetProfileEntryForPublicKey(creatorCoinTxnMeta.ProfilePublicKey) - if creatorProfile == nil || creatorProfile.IsDeleted() { - return nil, 0, 0, 0, fmt.Errorf("_connectTransaction: Profile not found for "+ - "public key: %v", PkToString(creatorCoinTxnMeta.ProfilePublicKey, bav.Params)) - } - creatorCoinSnapshot = creatorProfile.CreatorCoinEntry.Copy() - } - // When an NFT is sold, we may need to account for royalties that end up getting - // generated and paid to a user's creator coin directly. nftCreatorCoinRoyaltyEntriesSnapshot := make(map[PKID]*CoinEntry) - if txn.TxnMeta.GetTxnType() == TxnTypeAcceptNFTBid || txn.TxnMeta.GetTxnType() == TxnTypeNFTBid { - // We don't really care if it's an NFT buy now bid or not. We just want to - // capture the royalties that occur to account for ALL DESO. - var nftPostHash *BlockHash - if txn.TxnMeta.GetTxnType() == TxnTypeAcceptNFTBid { - nftPostHash = txn.TxnMeta.(*AcceptNFTBidMetadata).NFTPostHash - } else { - nftPostHash = txn.TxnMeta.(*NFTBidMetadata).NFTPostHash - } - postEntry := bav.GetPostEntryForPostHash(nftPostHash) - if postEntry == nil || postEntry.IsDeleted() { - return nil, 0, 0, 0, errors.Wrapf(RuleErrorNFTBidOnNonExistentPost, "_connectTransaction: PostEntry not found for "+ - "post hash: %v", nftPostHash.String()) - } - nftCreatorProfileEntry := bav.GetProfileEntryForPublicKey(postEntry.PosterPublicKey) - if nftCreatorProfileEntry == nil || nftCreatorProfileEntry.IsDeleted() { - return nil, 0, 0, 0, fmt.Errorf("_connectTransaction: Profile not found for "+ - "public key: %v", PkToString(postEntry.PosterPublicKey, bav.Params)) - } - pkidEntry := bav.GetPKIDForPublicKey(postEntry.PosterPublicKey) - if pkidEntry == nil || pkidEntry.isDeleted { - return nil, 0, 0, 0, fmt.Errorf("_connectTransaction: PKID not found for "+ - "public key: %v", PkToString(postEntry.PosterPublicKey, bav.Params)) + if blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight { + for publicKey, balance := range bav.PublicKeyToDeSoBalanceNanos { + balanceSnapshot[publicKey] = balance } - nftCreatorCoinRoyaltyEntriesSnapshot[*(pkidEntry.PKID)] = nftCreatorProfileEntry.CreatorCoinEntry.Copy() - for pkid := range postEntry.AdditionalNFTRoyaltiesToCoinsBasisPoints { - profileEntry := bav.GetProfileEntryForPKID(&pkid) - if profileEntry == nil || profileEntry.IsDeleted() { + // Special case: take snapshot of the creator coin entry. + if txn.TxnMeta.GetTxnType() == TxnTypeCreatorCoin { + // Get the creator coin entry. + creatorCoinTxnMeta := txn.TxnMeta.(*CreatorCoinMetadataa) + creatorProfile := bav.GetProfileEntryForPublicKey(creatorCoinTxnMeta.ProfilePublicKey) + if creatorProfile == nil || creatorProfile.IsDeleted() { + return nil, 0, 0, 0, fmt.Errorf("_connectTransaction: Profile not found for "+ + "public key: %v", PkToString(creatorCoinTxnMeta.ProfilePublicKey, bav.Params)) + } + creatorCoinSnapshot = creatorProfile.CreatorCoinEntry.Copy() + } + // When an NFT is sold, we may need to account for royalties that end up getting + // generated and paid to a user's creator coin directly. + if txn.TxnMeta.GetTxnType() == TxnTypeAcceptNFTBid || txn.TxnMeta.GetTxnType() == TxnTypeNFTBid { + // We don't really care if it's an NFT buy now bid or not. We just want to + // capture the royalties that occur to account for ALL DESO. + var nftPostHash *BlockHash + if txn.TxnMeta.GetTxnType() == TxnTypeAcceptNFTBid { + nftPostHash = txn.TxnMeta.(*AcceptNFTBidMetadata).NFTPostHash + } else { + nftPostHash = txn.TxnMeta.(*NFTBidMetadata).NFTPostHash + } + postEntry := bav.GetPostEntryForPostHash(nftPostHash) + if postEntry == nil || postEntry.IsDeleted() { + return nil, 0, 0, 0, errors.Wrapf(RuleErrorNFTBidOnNonExistentPost, "_connectTransaction: PostEntry not found for "+ + "post hash: %v", nftPostHash.String()) + } + nftCreatorProfileEntry := bav.GetProfileEntryForPublicKey(postEntry.PosterPublicKey) + if nftCreatorProfileEntry == nil || nftCreatorProfileEntry.IsDeleted() { return nil, 0, 0, 0, fmt.Errorf("_connectTransaction: Profile not found for "+ - "pkid: %v", PkToString(pkid.ToBytes(), bav.Params)) + "public key: %v", PkToString(postEntry.PosterPublicKey, bav.Params)) + } + pkidEntry := bav.GetPKIDForPublicKey(postEntry.PosterPublicKey) + if pkidEntry == nil || pkidEntry.isDeleted { + return nil, 0, 0, 0, fmt.Errorf("_connectTransaction: PKID not found for "+ + "public key: %v", PkToString(postEntry.PosterPublicKey, bav.Params)) + } + nftCreatorCoinRoyaltyEntriesSnapshot[*(pkidEntry.PKID)] = nftCreatorProfileEntry.CreatorCoinEntry.Copy() + for pkid := range postEntry.AdditionalNFTRoyaltiesToCoinsBasisPoints { + profileEntry := bav.GetProfileEntryForPKID(&pkid) + if profileEntry == nil || profileEntry.IsDeleted() { + return nil, 0, 0, 0, fmt.Errorf("_connectTransaction: Profile not found for "+ + "pkid: %v", PkToString(pkid.ToBytes(), bav.Params)) + } + nftCreatorCoinRoyaltyEntriesSnapshot[pkid] = profileEntry.CreatorCoinEntry.Copy() } - nftCreatorCoinRoyaltyEntriesSnapshot[pkid] = profileEntry.CreatorCoinEntry.Copy() } } @@ -3360,8 +3362,8 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, // Validate that we aren't printing any DESO if txn.TxnMeta.GetTxnType() != TxnTypeBlockReward && - txn.TxnMeta.GetTxnType() != TxnTypeBitcoinExchange { - + txn.TxnMeta.GetTxnType() != TxnTypeBitcoinExchange && + blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight { balanceDelta, _, err := bav._compareBalancesToSnapshot(balanceSnapshot) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "ConnectTransaction: error comparing current balances to snapshot") diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 245debc2e..eeb4bfcf0 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1427,8 +1427,13 @@ func (bav *UtxoView) _connectUnstake( // 2. Set the new GlobalStakeAmountNanos. bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error retrieving CurrentEpochNumber: ") + } + // Update the LockedStakeEntry, if exists. Create if not. - currentEpochNumber := uint64(0) // TODO: set this // 1. Retrieve the PrevLockedStakeEntry. This will be restored if we disconnect this txn. prevLockedStakeEntry, err := bav.GetLockedStakeEntry( prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID, currentEpochNumber, @@ -1555,10 +1560,15 @@ func (bav *UtxoView) _disconnectUnstake( // Restore the PrevGlobalStakeAmountNanos. bav._setGlobalStakeAmountNanos(operationData.PrevGlobalStakeAmountNanos) + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return errors.Wrapf(err, "_disconnectUnstake: error retrieving CurrentEpochNumber: ") + } + // Restore the PrevLockedStakeEntry, if exists. The PrevLockedStakeEntry will exist if the // transactor has previously unstaked stake assigned to this validator within the same epoch. // The PrevLockedStakeEntry will not exist otherwise. - currentEpochNumber := uint64(0) // TODO: set this // 1. Retrieve the CurrentLockedStakeEntry. currentLockedStakeEntry, err := bav.GetLockedStakeEntry( prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID, currentEpochNumber, diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index fedf50442..f3e06fefc 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -67,6 +67,15 @@ func _testStaking(t *testing.T, flushToDB bool) { return desoBalanceNanos } + // Seed a CurrentEpochEntry. + epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + require.NoError(t, err) + currentEpochNumber, err := utxoView().GetCurrentEpochNumber() + require.NoError(t, err) + { // Param Updater set min fee rate to 101 nanos per KB params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true @@ -368,7 +377,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(110)) // Verify LockedStakeEntry.UnstakeAmountNanos. - currentEpochNumber := uint64(0) // TODO: get epoch number from db. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(40)) @@ -407,7 +415,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(80)) // Verify LockedStakeEntry.UnstakeAmountNanos. - currentEpochNumber := uint64(0) // TODO: get epoch number from db. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(70)) @@ -445,7 +452,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) // Verify LockedStakeEntry.UnstakeAmountNanos. - currentEpochNumber := uint64(0) // TODO: get epoch number from db. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(150)) @@ -466,8 +472,8 @@ func _testStaking(t *testing.T, flushToDB bool) { unlockStakeMetadata := &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StartEpochNumber: 0, - EndEpochNumber: 0, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitUnlockStakeTxn( testMeta, m1Pub, m1Priv, unlockStakeMetadata, nil, flushToDB, @@ -483,8 +489,8 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidValidatorPKID unlockStakeMetadata := &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m2PkBytes), - StartEpochNumber: 0, - EndEpochNumber: 0, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitUnlockStakeTxn( testMeta, m1Pub, m1Priv, unlockStakeMetadata, nil, flushToDB, @@ -507,7 +513,9 @@ func _testStaking(t *testing.T, flushToDB bool) { } { // m1 unlocks stake that was assigned to m0. - lockedStakeEntries, err := utxoView().GetLockedStakeEntriesInRange(m0PKID, m1PKID, 0, 0) + lockedStakeEntries, err := utxoView().GetLockedStakeEntriesInRange( + m0PKID, m1PKID, currentEpochNumber, currentEpochNumber, + ) require.NoError(t, err) require.Equal(t, len(lockedStakeEntries), 1) require.Equal(t, lockedStakeEntries[0].LockedAmountNanos, uint256.NewInt().SetUint64(150)) @@ -515,8 +523,8 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) unlockStakeMetadata := &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StartEpochNumber: 0, - EndEpochNumber: 0, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } feeNanos, err := _submitUnlockStakeTxn( testMeta, m1Pub, m1Priv, unlockStakeMetadata, nil, flushToDB, @@ -539,7 +547,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) // Verify LockedStakeEntry.isDeleted. - currentEpochNumber := uint64(0) // TODO: get epoch number from db. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) require.Nil(t, lockedStakeEntry) @@ -916,6 +923,13 @@ func _testStakingWithDerivedKey(t *testing.T) { return fees, nil } + // Seed a CurrentEpochEntry. + epochUtxoView := newUtxoView() + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + currentEpochNumber, err := newUtxoView().GetCurrentEpochNumber() + require.NoError(t, err) + { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true @@ -1182,8 +1196,7 @@ func _testStakingWithDerivedKey(t *testing.T) { require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(50)) // LockedStakeEntry was created. - epochNumber := uint64(0) // TODO: get epoch number from db. - lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, senderPKID, epochNumber) + lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, senderPKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(50)) @@ -1204,11 +1217,10 @@ func _testStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) // sender tries to unlock all stake from m1 using the DerivedKey. Errors. - epochNumber := uint64(0) // TODO: get epoch number from db. unlockStakeMetadata := &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StartEpochNumber: epochNumber, - EndEpochNumber: epochNumber, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, @@ -1229,8 +1241,8 @@ func _testStakingWithDerivedKey(t *testing.T) { // sender tries to unlock all stake from m1 using the DerivedKey. Errors. unlockStakeMetadata = &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StartEpochNumber: epochNumber, - EndEpochNumber: epochNumber, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, @@ -1242,8 +1254,8 @@ func _testStakingWithDerivedKey(t *testing.T) { senderOldDESOBalanceNanos := getDESOBalanceNanos(senderPkBytes) unlockStakeMetadata = &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StartEpochNumber: epochNumber, - EndEpochNumber: epochNumber, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } feeNanos, err := _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, @@ -1251,7 +1263,7 @@ func _testStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) // LockedStakeEntry was deleted. - lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, senderPKID, epochNumber) + lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, senderPKID, currentEpochNumber) require.NoError(t, err) require.Nil(t, lockedStakeEntry) @@ -1280,8 +1292,8 @@ func _testStakingWithDerivedKey(t *testing.T) { // sender tries to unlock all stake from m0 using the DerivedKey. Errors. unlockStakeMetadata = &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StartEpochNumber: epochNumber, - EndEpochNumber: epochNumber, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, @@ -1347,11 +1359,10 @@ func _testStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) // sender unlocks stake from m0 using the DerivedKey. - epochNumber := uint64(0) // TODO: get epoch number from db. unlockStakeMetadata := &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StartEpochNumber: epochNumber, - EndEpochNumber: epochNumber, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, @@ -1361,8 +1372,8 @@ func _testStakingWithDerivedKey(t *testing.T) { // sender unlocks stake from m1 using the DerivedKey. unlockStakeMetadata = &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StartEpochNumber: epochNumber, - EndEpochNumber: epochNumber, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, @@ -1421,11 +1432,10 @@ func _testStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) // sender unlocks stake from m0 using the DerivedKey. - epochNumber := uint64(0) // TODO: get epoch number from db. unlockStakeMetadata := &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StartEpochNumber: epochNumber, - EndEpochNumber: epochNumber, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, @@ -1435,8 +1445,8 @@ func _testStakingWithDerivedKey(t *testing.T) { // sender unlocks stake from m1 using the DerivedKey. unlockStakeMetadata = &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StartEpochNumber: epochNumber, - EndEpochNumber: epochNumber, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, @@ -1490,11 +1500,10 @@ func _testStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) // sender unlocks stake from m1 using the scoped TransactionSpendingLimit. - epochNumber := uint64(0) // TODO: get epoch number from db. unlockStakeMetadata := &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StartEpochNumber: epochNumber, - EndEpochNumber: epochNumber, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, @@ -1522,11 +1531,10 @@ func _testStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) // sender unlocks stake from m1 using the global TransactionSpendingLimit. - epochNumber = uint64(0) // TODO: get epoch number from db. unlockStakeMetadata = &UnlockStakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StartEpochNumber: epochNumber, - EndEpochNumber: epochNumber, + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unlockStakeMetadata}, diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index fb9ecbdc5..163f12e7a 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1056,13 +1056,17 @@ func (bav *UtxoView) _connectUnregisterAsValidator( return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: error retrieving StakeEntries: ") } + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: error retrieving CurrentEpochNumber: ") + } + // Delete each StakeEntry and create or update the corresponding LockedStakeEntry. // Track TotalUnstakedAmountNanos and PrevLockedStakeEntries. totalUnstakedAmountNanos := uint256.NewInt() var prevLockedStakeEntries []*LockedStakeEntry - currentEpochNumber := uint64(0) // TODO: Retrieve this from the db. - for _, prevStakeEntry := range prevStakeEntries { // Add the UnstakedAmountNanos to the TotalUnstakedAmountNanos. totalUnstakedAmountNanos, err = SafeUint256().Add( @@ -1223,9 +1227,13 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( bav._setStakeEntryMappings(prevStakeEntry) } - // Restore the PrevLockedStakeEntries, if any. - currentEpochNumber := uint64(0) // TODO: Retrieve this from the db. + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return errors.Wrapf(err, "_disconnectUnregisterAsValidator: error retrieving CurrentEpochNumber: ") + } + // Restore the PrevLockedStakeEntries, if any. for _, prevLockedStakeEntry := range operationData.PrevLockedStakeEntries { // Delete the CurrentLockedStakeEntry. currentLockedStakeEntry, err := bav.GetLockedStakeEntry( diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 889b80c64..5a3e2853b 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -62,6 +62,12 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + // Seed a CurrentEpochEntry. + epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true @@ -412,9 +418,14 @@ func _testValidatorRegistrationWithDerivedKey(t *testing.T) { senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID - _submitAuthorizeDerivedKeyTxn := func(txnType TxnType, count uint64) (string, error) { + newUtxoView := func() *UtxoView { utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) + return utxoView + } + + _submitAuthorizeDerivedKeyTxn := func(txnType TxnType, count uint64) (string, error) { + utxoView := newUtxoView() txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit @@ -463,8 +474,7 @@ func _testValidatorRegistrationWithDerivedKey(t *testing.T) { _submitValidatorTxnWithDerivedKey := func( transactorPkBytes []byte, derivedKeyPrivBase58Check string, inputTxn MsgDeSoTxn, ) error { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) + utxoView := newUtxoView() var txn *MsgDeSoTxn switch inputTxn.TxnMeta.GetTxnType() { @@ -519,6 +529,12 @@ func _testValidatorRegistrationWithDerivedKey(t *testing.T) { return nil } + // Seed a CurrentEpochEntry. + epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true @@ -677,6 +693,12 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID + // Seed a CurrentEpochEntry. + epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true @@ -1210,8 +1232,13 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID - currentEpochNumber := uint64(0) // TODO: Retrieve this from the db. - _ = currentEpochNumber + // Seed a CurrentEpochEntry. + epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + currentEpochNumber, err := utxoView().GetCurrentEpochNumber() + require.NoError(t, err) { // ParamUpdater set min fee rate diff --git a/lib/peer.go b/lib/peer.go index 95e0144ec..654a62ada 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -1510,7 +1510,7 @@ func (pp *Peer) Disconnect() { // Add the Peer to donePeers so that the ConnectionManager and Server can do any // cleanup they need to do. - if pp.cmgr != nil && atomic.LoadInt32(&pp.cmgr.shutdown) == 0 { + if pp.cmgr != nil && atomic.LoadInt32(&pp.cmgr.shutdown) == 0 && pp.cmgr.donePeerChan != nil { pp.cmgr.donePeerChan <- pp } } From 5c37ce05e667bf3d5ba4e4b524d935a41e12da22 Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Thu, 4 May 2023 11:06:33 -0400 Subject: [PATCH 010/762] Mf/validate stake lockup period (#517) * Add epoch data type. * Update func naming. * Fix nil error. * Add prefix encoder db util. * Rename field to FinalBlockHeight. * Avoid snapshotting issue for now. * Update ancestral record before setting in db. * Incorporate epoch number into PoS txns. * Validate stake lockup period. * Validate stake lockup period. * Clean up test file changes. * Set stake lockup period to 3 epochs. * Resolve PR feedback. * Remove accidental newline. --- lib/block_view_stake.go | 24 ++++- lib/block_view_stake_test.go | 199 ++++++++++++++++++++++++++++++++--- lib/constants.go | 11 ++ 3 files changed, 216 insertions(+), 18 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index eeb4bfcf0..c22434e64 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1134,12 +1134,14 @@ func (bav *UtxoView) _connectStake( // Check if there is an existing StakeEntry that will be updated. // The existing StakeEntry will be restored if we disconnect this transaction. + var prevStakeEntries []*StakeEntry prevStakeEntry, err := bav.GetStakeEntry(prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID) if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectStake: ") } // Delete the existing StakeEntry, if exists. if prevStakeEntry != nil { + prevStakeEntries = append(prevStakeEntries, prevStakeEntry) bav._deleteStakeEntryMappings(prevStakeEntry) } @@ -1217,7 +1219,7 @@ func (bav *UtxoView) _connectStake( Type: OperationTypeStake, PrevValidatorEntry: prevValidatorEntry, PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, - PrevStakeEntries: []*StakeEntry{prevStakeEntry}, + PrevStakeEntries: prevStakeEntries, }) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1378,6 +1380,7 @@ func (bav *UtxoView) _connectUnstake( if prevStakeEntry.StakeAmountNanos.Cmp(txMeta.UnstakeAmountNanos) < 0 { return 0, 0, nil, errors.Wrapf(RuleErrorInvalidUnstakeInsufficientStakeFound, "_connectUnstake: ") } + prevStakeEntries := []*StakeEntry{prevStakeEntry} // Update the StakeEntry, decreasing the StakeAmountNanos. // 1. Calculate the updated StakeAmountNanos. @@ -1442,6 +1445,7 @@ func (bav *UtxoView) _connectUnstake( return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") } // 2. Create a CurrrentLockedStakeEntry. + var prevLockedStakeEntries []*LockedStakeEntry var currentLockedStakeEntry *LockedStakeEntry if prevLockedStakeEntry != nil { // Update the existing LockedStakeEntry. @@ -1466,6 +1470,7 @@ func (bav *UtxoView) _connectUnstake( } // 3. Delete the PrevLockedStakeEntry, if exists. if prevLockedStakeEntry != nil { + prevLockedStakeEntries = append(prevLockedStakeEntries, prevLockedStakeEntry) bav._deleteLockedStakeEntryMappings(prevLockedStakeEntry) } // 4. Set the CurrentLockedStakeEntry. @@ -1476,8 +1481,8 @@ func (bav *UtxoView) _connectUnstake( Type: OperationTypeUnstake, PrevValidatorEntry: prevValidatorEntry, PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, - PrevStakeEntries: []*StakeEntry{prevStakeEntry}, - PrevLockedStakeEntries: []*LockedStakeEntry{prevLockedStakeEntry}, + PrevStakeEntries: prevStakeEntries, + PrevLockedStakeEntries: prevLockedStakeEntries, }) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1870,7 +1875,17 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada if metadata.StartEpochNumber > metadata.EndEpochNumber { return errors.Wrapf(RuleErrorInvalidUnlockStakeEpochRange, "UtxoView.IsValidUnlockStakeMetadata: ") } - // TODO: validate EndEpochNumber is <= CurrentEpochNumber - 2 + + // Retrieve CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving CurrentEpochNumber: ") + } + + // Validate EndEpochNumber + StakeLockupEpochDuration <= CurrentEpochNumber. + if metadata.EndEpochNumber+bav.Params.StakeLockupEpochDuration > currentEpochNumber { + return errors.Wrapf(RuleErrorInvalidUnlockStakeMustWaitLockupDuration, "UtxoView.IsValidUnlockStakeMetadata: ") + } // Validate LockedStakeEntries exist. lockedStakeEntries, err := bav.GetLockedStakeEntriesInRange( @@ -2572,6 +2587,7 @@ const RuleErrorInvalidUnstakeNoStakeFound RuleError = "RuleErrorInvalidUnstakeNo const RuleErrorInvalidUnstakeAmountNanos RuleError = "RuleErrorInvalidUnstakeAmountNanos" const RuleErrorInvalidUnstakeInsufficientStakeFound RuleError = "RuleErrorInvalidUnstakeInsufficientStakeFound" const RuleErrorInvalidUnlockStakeEpochRange RuleError = "RuleErrorInvalidUnlockStakeEpochRange" +const RuleErrorInvalidUnlockStakeMustWaitLockupDuration RuleError = "RuleErrorInvalidUnlockStakeMustWaitLockupDuration" const RuleErrorInvalidUnlockStakeNoUnlockableStakeFound RuleError = "RuleErrorInvalidUnlockStakeNoUnlockableStakeFound" const RuleErrorInvalidUnlockStakeUnlockableStakeOverflowsUint64 RuleError = "RuleErrorInvalidUnlockStakeUnlockableStakeOverflowsUint64" const RuleErrorStakeTransactionSpendingLimitNotFound RuleError = "RuleErrorStakeTransactionSpendingLimitNotFound" diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index f3e06fefc..8cb71fbc8 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -28,6 +28,10 @@ func _testStaking(t *testing.T, flushToDB bool) { mempool, miner := NewTestMiner(t, chain, params, true) chain.snapshot = nil + // For these tests, we set StakeLockupEpochDuration to zero. + // We test the lockup logic in a separate test. + params.StakeLockupEpochDuration = 0 + // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { _, err = miner.MineAndProcessSingleBlock(0, mempool) @@ -72,7 +76,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) - require.NoError(t, err) currentEpochNumber, err := utxoView().GetCurrentEpochNumber() require.NoError(t, err) @@ -511,6 +514,19 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorInvalidUnlockStakeEpochRange) } + { + // RuleErrorInvalidUnlockStakeNoUnlockableStakeFound + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: 0, + EndEpochNumber: 0, + } + _, err = _submitUnlockStakeTxn( + testMeta, m1Pub, m1Priv, unlockStakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnlockStakeNoUnlockableStakeFound) + } { // m1 unlocks stake that was assigned to m0. lockedStakeEntries, err := utxoView().GetLockedStakeEntriesInRange( @@ -555,19 +571,6 @@ func _testStaking(t *testing.T, flushToDB bool) { m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) require.Equal(t, m1OldDESOBalanceNanos-feeNanos+uint64(150), m1NewDESOBalanceNanos) } - { - // RuleErrorInvalidUnlockStakeNoUnlockableStakeFound - unlockStakeMetadata := &UnlockStakeMetadata{ - ValidatorPublicKey: NewPublicKey(m0PkBytes), - StartEpochNumber: 0, - EndEpochNumber: 0, - } - _, err = _submitUnlockStakeTxn( - testMeta, m1Pub, m1Priv, unlockStakeMetadata, nil, flushToDB, - ) - require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorInvalidUnlockStakeNoUnlockableStakeFound) - } // Flush mempool to the db and test rollbacks. require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) @@ -771,6 +774,10 @@ func _testStakingWithDerivedKey(t *testing.T) { GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) chain.snapshot = nil + // For these tests, we set StakeLockupEpochDuration to zero. + // We test the lockup logic in a separate test. + params.StakeLockupEpochDuration = 0 + // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { _, err = miner.MineAndProcessSingleBlock(0, mempool) @@ -1756,3 +1763,167 @@ func TestGetLockedStakeEntriesInRange(t *testing.T) { require.NoError(t, err) require.Empty(t, lockedStakeEntries) } + +func TestStakeLockupEpochDuration(t *testing.T) { + var err error + + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize fork heights. + params.ForkHeights.DeSoUnlimitedDerivedKeysBlockHeight = uint32(0) + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + chain.snapshot = nil + + // For these tests, we set StakeLockupEpochDuration to 3. + // We test the lockup logic in a separate test. + params.StakeLockupEpochDuration = 3 + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + + newUtxoView := func() *UtxoView { + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + return utxoView + } + + // Seed a CurrentEpochEntry. + epochUtxoView := newUtxoView() + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 5, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + currentEpochNumber, err := newUtxoView().GetCurrentEpochNumber() + require.NoError(t, err) + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // m0 registers as a validator. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m1.com")}, + } + _, _, _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, true) + require.NoError(t, err) + + validatorEntry, err := newUtxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + } + { + // m0 stakes with himself. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxn(testMeta, m0Pub, m0Priv, stakeMetadata, nil, true) + require.NoError(t, err) + + stakeEntry, err := newUtxoView().GetStakeEntry(m0PKID, m0PKID) + require.NoError(t, err) + require.NotNil(t, stakeEntry) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // m0 unstakes from himself. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitUnstakeTxn(testMeta, m0Pub, m0Priv, unstakeMetadata, nil, true) + require.NoError(t, err) + + stakeEntry, err := newUtxoView().GetStakeEntry(m0PKID, m0PKID) + require.NoError(t, err) + require.Nil(t, stakeEntry) + + lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, m0PKID, currentEpochNumber) + require.NoError(t, err) + require.NotNil(t, lockedStakeEntry) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // RuleErrorInvalidUnlockStakeMustWaitLockupDuration + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: currentEpochNumber, + EndEpochNumber: currentEpochNumber, + } + _, err = _submitUnlockStakeTxn(testMeta, m0Pub, m0Priv, unlockStakeMetadata, nil, true) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidUnlockStakeMustWaitLockupDuration) + } + { + // Simulate three epochs passing by seeding a new CurrentEpochEntry. + // Note that we can't test the disconnect logic after these tests + // since we have updated the CurrentEpochNumber. + epochUtxoView = newUtxoView() + epochUtxoView._setCurrentEpochEntry( + &EpochEntry{EpochNumber: currentEpochNumber + 3, FinalBlockHeight: blockHeight + 10}, + ) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + currentEpochNumber, err = newUtxoView().GetCurrentEpochNumber() + require.NoError(t, err) + } + { + // m0 unlocks his stake. + oldDesoBalanceNanos, err := newUtxoView().GetDeSoBalanceNanosForPublicKey(m0PkBytes) + require.NoError(t, err) + + unlockStakeMetadata := &UnlockStakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StartEpochNumber: currentEpochNumber - 3, + EndEpochNumber: currentEpochNumber - 3, + } + feeNanos, err := _submitUnlockStakeTxn(testMeta, m0Pub, m0Priv, unlockStakeMetadata, nil, true) + require.NoError(t, err) + + lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, m0PKID, currentEpochNumber-2) + require.NoError(t, err) + require.Nil(t, lockedStakeEntry) + + newDesoBalanceNanos, err := newUtxoView().GetDeSoBalanceNanosForPublicKey(m0PkBytes) + require.NoError(t, err) + require.Equal(t, oldDesoBalanceNanos-feeNanos+uint64(100), newDesoBalanceNanos) + } +} diff --git a/lib/constants.go b/lib/constants.go index 53f1c34c6..e8fd7360d 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -596,6 +596,11 @@ type DeSoParams struct { // attack the bancor curve to any meaningful measure. CreatorCoinAutoSellThresholdNanos uint64 + // StakeLockupEpochDuration is the number of epochs that a + // user must wait before unlocking their unstaked stake. + // TODO: Move this to GlobalParamsEntry. + StakeLockupEpochDuration uint64 + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -968,6 +973,9 @@ var DeSoMainnetParams = DeSoParams{ // reserve ratios. CreatorCoinAutoSellThresholdNanos: uint64(10), + // Unstaked stake can be unlocked after a minimum of N elapsed epochs. + StakeLockupEpochDuration: uint64(3), + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1196,6 +1204,9 @@ var DeSoTestnetParams = DeSoParams{ // reserve ratios. CreatorCoinAutoSellThresholdNanos: uint64(10), + // Unstaked stake can be unlocked after a minimum of N elapsed epochs. + StakeLockupEpochDuration: uint64(3), + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), From df5ddd805d8eba90f3c37f830841891da03ec6b6 Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Thu, 4 May 2023 11:32:06 -0400 Subject: [PATCH 011/762] Add jailed status to validator entries. (#518) * Add epoch data type. * Update func naming. * Fix nil error. * Add prefix encoder db util. * Rename field to FinalBlockHeight. * Avoid snapshotting issue for now. * Update ancestral record before setting in db. * Incorporate epoch number into PoS txns. * Validate stake lockup period. * Validate stake lockup period. * Clean up test file changes. * Add jailed status to validator entries. * Set stake lockup period to 3 epochs. * Set LastActiveEpochNumber to CurrentEpochNumber. * Add ValidatorEntry.JailedAtEpochNumber field. * Resolve PR feedback. --- lib/block_view_validator.go | 110 +++++++++++++++++--- lib/block_view_validator_test.go | 170 ++++++++++++++++++++++--------- lib/db_utils.go | 17 ++++ lib/db_utils_test.go | 20 ++++ 4 files changed, 255 insertions(+), 62 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 163f12e7a..14aaa2c28 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -30,10 +30,35 @@ type ValidatorEntry struct { VotingSignatureBlockHeight uint64 TotalStakeAmountNanos *uint256.Int RegisteredAtBlockHeight uint64 + LastActiveAtEpochNumber uint64 + JailedAtEpochNumber uint64 ExtraData map[string][]byte isDeleted bool } +func (validatorEntry *ValidatorEntry) Status() ValidatorStatus { + // ValidatorEntry.Status() is a virtual/derived field that is not stored in + // the database, but instead constructed from other ValidatorEntry fields. + // No sense in storing duplicative data twice. This saves memory and ensures + // that e.g. the ValidatorEntry.JailedAtEpochNumber field and the + // ValidatorEntry.Status() return value will never get out of sync. + // + // Make sure that any fields referenced here are included in the ValidatorMapKey + // since the ValidatorEntry.Status() value is used as a field in a Badger index. + if validatorEntry.JailedAtEpochNumber > uint64(0) { + return ValidatorStatusJailed + } + return ValidatorStatusActive +} + +type ValidatorStatus uint8 + +const ( + ValidatorStatusInvalid ValidatorStatus = 0 + ValidatorStatusActive ValidatorStatus = 1 + ValidatorStatusJailed ValidatorStatus = 2 +) + type ValidatorMapKey struct { // The MapKey has to contain all fields that are used in Badger keys. // Otherwise, an update to the UtxoView will not be able to update or @@ -41,6 +66,7 @@ type ValidatorMapKey struct { ValidatorPKID PKID TotalStakeAmountNanos uint256.Int RegisteredAtBlockHeight uint64 + JailedAtEpochNumber uint64 } func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { @@ -61,6 +87,8 @@ func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { VotingSignatureBlockHeight: validatorEntry.VotingSignatureBlockHeight, TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), RegisteredAtBlockHeight: validatorEntry.RegisteredAtBlockHeight, + LastActiveAtEpochNumber: validatorEntry.LastActiveAtEpochNumber, + JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, ExtraData: copyExtraData(validatorEntry.ExtraData), isDeleted: validatorEntry.isDeleted, } @@ -71,6 +99,7 @@ func (validatorEntry *ValidatorEntry) ToMapKey() ValidatorMapKey { ValidatorPKID: *validatorEntry.ValidatorPKID, TotalStakeAmountNanos: *validatorEntry.TotalStakeAmountNanos, RegisteredAtBlockHeight: validatorEntry.RegisteredAtBlockHeight, + JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, } } @@ -91,6 +120,8 @@ func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint6 data = append(data, UintToBuf(validatorEntry.VotingSignatureBlockHeight)...) data = append(data, EncodeUint256(validatorEntry.TotalStakeAmountNanos)...) data = append(data, UintToBuf(validatorEntry.RegisteredAtBlockHeight)...) + data = append(data, UintToBuf(validatorEntry.LastActiveAtEpochNumber)...) + data = append(data, UintToBuf(validatorEntry.JailedAtEpochNumber)...) data = append(data, EncodeExtraData(validatorEntry.ExtraData)...) return data } @@ -163,6 +194,18 @@ func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint6 return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading RegisteredAtBlockHeight: ") } + // LastActiveAtEpochNumber + validatorEntry.LastActiveAtEpochNumber, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading LastActiveAtEpochNumber: ") + } + + // JailedAtEpochNumber + validatorEntry.JailedAtEpochNumber, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading JailedAtEpochNumber: ") + } + // ExtraData validatorEntry.ExtraData, err = DecodeExtraData(rr) if err != nil { @@ -475,6 +518,7 @@ func DBKeyForValidatorByPKID(validatorEntry *ValidatorEntry) []byte { func DBKeyForValidatorByStake(validatorEntry *ValidatorEntry) []byte { key := append([]byte{}, Prefixes.PrefixValidatorByStake...) + key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) // TotalStakeAmountNanos will never be nil here, but EncodeOptionalUint256 // is used because it provides a fixed-width encoding of uint256.Ints. key = append(key, EncodeOptionalUint256(validatorEntry.TotalStakeAmountNanos)...) // Highest stake first @@ -518,7 +562,7 @@ func DBGetValidatorByPKIDWithTxn(txn *badger.Txn, snap *Snapshot, pkid *PKID) (* return validatorEntry, nil } -func DBGetTopValidatorsByStake( +func DBGetTopActiveValidatorsByStake( handle *badger.DB, snap *Snapshot, limit int, @@ -532,13 +576,14 @@ func DBGetTopValidatorsByStake( validatorKeysToSkip.Add(string(DBKeyForValidatorByStake(validatorEntryToSkip))) } - // Retrieve top N ValidatorEntry PKIDs by stake. + // Retrieve top N active ValidatorEntry PKIDs by stake. key := append([]byte{}, Prefixes.PrefixValidatorByStake...) + key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) _, validatorPKIDsBytes, err := EnumerateKeysForPrefixWithLimitOffsetOrder( handle, key, limit, nil, true, validatorKeysToSkip, ) if err != nil { - return nil, errors.Wrapf(err, "DBGetTopValidatorsByStake: problem retrieving top validators: ") + return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem retrieving top validators: ") } // For each PKID, retrieve the ValidatorEntry by PKID. @@ -547,12 +592,12 @@ func DBGetTopValidatorsByStake( validatorPKID := &PKID{} exists, err := DecodeFromBytes(validatorPKID, bytes.NewReader(validatorPKIDBytes)) if !exists || err != nil { - return nil, errors.Wrapf(err, "DBGetTopValidatorsByStake: problem reading ValidatorPKID: ") + return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem reading ValidatorPKID: ") } // Retrieve ValidatorEntry by PKID. validatorEntry, err := DBGetValidatorByPKID(handle, snap, validatorPKID) if err != nil { - return nil, errors.Wrapf(err, "DBGetTopValidatorsByStake: problem retrieving validator by PKID: ") + return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem retrieving validator by PKID: ") } validatorEntries = append(validatorEntries, validatorEntry) } @@ -902,12 +947,36 @@ func (bav *UtxoView) _connectRegisterAsValidator( totalStakeAmountNanos = prevValidatorEntry.TotalStakeAmountNanos.Clone() } - // Set RegisteredAtBlockHeight only if this is a new ValidatorEntry. + // Set RegisteredAtBlockHeight to CurrentBlockHeight if this is a new ValidatorEntry. + // Otherwise, retain the existing RegisteredAtBlockHeight. registeredAtBlockHeight := uint64(blockHeight) if prevValidatorEntry != nil { registeredAtBlockHeight = prevValidatorEntry.RegisteredAtBlockHeight } + // Set LastActiveAtEpochNumber to CurrentEpochNumber if this is a new ValidatorEntry. + // Otherwise, retain the existing LastActiveAtEpochNumber. + var lastActiveAtEpochNumber uint64 + if prevValidatorEntry != nil { + // Retain the existing LastActiveAtEpochNumber. + lastActiveAtEpochNumber = prevValidatorEntry.LastActiveAtEpochNumber + } else { + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectRegisterAsValidator: error retrieving CurrentEpochNumber: ") + } + // Set LastActiveAtEpochNumber to CurrentEpochNumber. + lastActiveAtEpochNumber = currentEpochNumber + } + + // Set JailedAtEpochNumber to zero if this is a new ValidatorEntry. + // Otherwise, retain the existing JailedAtEpochNumber. + jailedAtEpochNumber := uint64(0) + if prevValidatorEntry != nil { + jailedAtEpochNumber = prevValidatorEntry.JailedAtEpochNumber + } + // Retrieve existing ExtraData to merge with any new ExtraData. var prevExtraData map[string][]byte if prevValidatorEntry != nil { @@ -927,6 +996,8 @@ func (bav *UtxoView) _connectRegisterAsValidator( VotingSignatureBlockHeight: txMeta.VotingSignatureBlockHeight, TotalStakeAmountNanos: totalStakeAmountNanos, RegisteredAtBlockHeight: registeredAtBlockHeight, + LastActiveAtEpochNumber: lastActiveAtEpochNumber, + JailedAtEpochNumber: jailedAtEpochNumber, ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), } // Set the ValidatorEntry. @@ -1135,8 +1206,7 @@ func (bav *UtxoView) _connectUnregisterAsValidator( if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") } - // Note that we don't need to check isDeleted because the Get returns nil if isDeleted=true. - if prevValidatorEntry == nil { + if prevValidatorEntry == nil || prevValidatorEntry.isDeleted { return 0, 0, nil, errors.Wrapf(RuleErrorValidatorNotFound, "_connectUnregisterAsValidator: ") } bav._deleteValidatorEntryMappings(prevValidatorEntry) @@ -1396,7 +1466,7 @@ func (bav *UtxoView) GetValidatorByPublicKey(validatorPublicKey *PublicKey) (*Va return validatorEntry, nil } -func (bav *UtxoView) GetTopValidatorsByStake(limit int) ([]*ValidatorEntry, error) { +func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry, error) { // Validate limit param. if limit <= 0 { return []*ValidatorEntry{}, nil @@ -1411,14 +1481,24 @@ func (bav *UtxoView) GetTopValidatorsByStake(limit int) ([]*ValidatorEntry, erro for _, validatorEntry := range bav.ValidatorMapKeyToValidatorEntry { utxoViewValidatorEntries = append(utxoViewValidatorEntries, validatorEntry) } - // Pull top N ValidatorEntries from the database (not present in the UtxoView). - validatorEntries, err := DBGetTopValidatorsByStake(bav.Handle, bav.Snapshot, limit, utxoViewValidatorEntries) + // Pull top N active ValidatorEntries from the database (not present in the UtxoView). + dbValidatorEntries, err := DBGetTopActiveValidatorsByStake(bav.Handle, bav.Snapshot, limit, utxoViewValidatorEntries) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetTopValidatorsByStake: error retrieving entries from db: ") + return nil, errors.Wrapf(err, "UtxoView.GetTopActiveValidatorsByStake: error retrieving entries from db: ") + } + // Cache top N active ValidatorEntries from the db in the UtxoView. + for _, validatorEntry := range dbValidatorEntries { + // We only pull ValidatorEntries from the db that are not present in the + // UtxoView. As a sanity check, we double-check that the ValidatorEntry + // is not already in the UtxoView here. + if _, exists := bav.ValidatorMapKeyToValidatorEntry[validatorEntry.ToMapKey()]; !exists { + bav._setValidatorEntryMappings(validatorEntry) + } } - // Add !isDeleted ValidatorEntries from the UtxoView to the ValidatorEntries from the db. - for _, validatorEntry := range utxoViewValidatorEntries { - if !validatorEntry.isDeleted { + // Pull !isDeleted, active ValidatorEntries from the UtxoView. + var validatorEntries []*ValidatorEntry + for _, validatorEntry := range bav.ValidatorMapKeyToValidatorEntry { + if !validatorEntry.isDeleted && validatorEntry.Status() == ValidatorStatusActive { validatorEntries = append(validatorEntries, validatorEntry) } } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 5a3e2853b..1ce1e2c9c 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -178,12 +178,12 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Equal(t, string(validatorEntry.ExtraData["TestKey"]), "TestValue1") } { - // Query: retrieve top ValidatorEntries by stake - validatorEntries, err = utxoView().GetTopValidatorsByStake(0) + // Query: retrieve top active ValidatorEntries by stake + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(0) require.NoError(t, err) require.Empty(t, validatorEntries) - validatorEntries, err = utxoView().GetTopValidatorsByStake(2) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(2) require.NoError(t, err) require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) @@ -241,8 +241,8 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Nil(t, validatorEntry) } { - // Query: retrieve top ValidatorEntries by stake - validatorEntries, err = utxoView().GetTopValidatorsByStake(1) + // Query: retrieve top active ValidatorEntries by stake + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(1) require.NoError(t, err) require.Empty(t, validatorEntries) } @@ -635,12 +635,12 @@ func _testValidatorRegistrationWithDerivedKey(t *testing.T) { _executeAllTestRollbackAndFlush(testMeta) } -func TestGetTopValidatorsByStake(t *testing.T) { - _testGetTopValidatorsByStake(t, false) - _testGetTopValidatorsByStake(t, true) +func TestGetTopActiveValidatorsByStake(t *testing.T) { + _testGetTopActiveValidatorsByStake(t, false) + _testGetTopActiveValidatorsByStake(t, true) } -func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { +func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { var validatorEntries []*ValidatorEntry var err error @@ -725,7 +725,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) @@ -742,7 +742,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 2) } @@ -757,7 +757,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 3) } @@ -787,7 +787,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) @@ -806,7 +806,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { _, err = _submitUnstakeTxn(testMeta, m3Pub, m3Priv, unstakeMetadata, nil, flushToDB) // Verify top validators. - validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) @@ -825,7 +825,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { _, err = _submitUnstakeTxn(testMeta, m3Pub, m3Priv, unstakeMetadata, nil, flushToDB) // Verify top validators. - validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) @@ -841,7 +841,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) @@ -859,7 +859,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) @@ -877,7 +877,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) @@ -887,7 +887,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { } { // Verify top validators with LIMIT. - validatorEntries, err = utxoView().GetTopValidatorsByStake(1) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(1) require.NoError(t, err) require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) @@ -899,7 +899,7 @@ func _testGetTopValidatorsByStake(t *testing.T, flushToDB bool) { _executeAllTestRollbackAndFlush(testMeta) } -func TestGetTopValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { +func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // For this test, we manually place ValidatorEntries in the database and // UtxoView to test merging the two to determine the TopValidatorsByStake. @@ -909,9 +909,18 @@ func TestGetTopValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.NoError(t, err) blockHeight := uint64(chain.blockTip().Height + 1) + // m0 will be stored in the db with Stake=100. m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + // m1 will be stored in the db with Stake=400 and Status=Jailed. m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + // m2 will be stored in the db and UtxoView with Stake=300. m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID + // m3 will be stored in the db and UtxoView with Stake=600 and isDeleted=true. + m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID + // m4 will be stored in the UtxoView only with Stake=50. + m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID + // m5 will be stored in the UtxoView only with Stake=500 and Status=Jailed. + m5PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m5PkBytes).PKID // Store m0's ValidatorEntry in the db with TotalStake = 100 nanos. validatorEntry := &ValidatorEntry{ @@ -930,65 +939,126 @@ func TestGetTopValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Verify m0 is not stored in the UtxoView. require.Empty(t, utxoView.ValidatorMapKeyToValidatorEntry) - // Store m1's ValidatorEntry in the database with TotalStake = 200 nanos. + // Store m1's jailed ValidatorEntry in the db with TotalStake = 400 nanos. validatorEntry = &ValidatorEntry{ ValidatorPKID: m1PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(200), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), + JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(validatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) - // Fetch m1 so it is also cached in the UtxoView. - validatorEntry, err = utxoView.GetValidatorByPKID(m1PKID) + // Verify m1 is stored in the db. + validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m1PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) - // Verify m1 is stored in the db. - validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m1PKID) + // Store m2's ValidatorEntry in the db with TotalStake = 300 nanos. + m2ValidatorEntry := &ValidatorEntry{ + ValidatorPKID: m2PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), + } + utxoView._setValidatorEntryMappings(m2ValidatorEntry) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + + // Verify m2 is stored in the db. + validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m2PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) + + // Store m3's ValidatorEntry in the db with TotalStake = 600 nanos. + m3ValidatorEntry := &ValidatorEntry{ + ValidatorPKID: m3PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), + } + utxoView._setValidatorEntryMappings(m3ValidatorEntry) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + + // Verify m3 is stored in the db. + validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m3PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(600)) + + // Fetch m2 so it is also cached in the UtxoView. + validatorEntry, err = utxoView.GetValidatorByPKID(m2PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(200)) - // Verify m1 is also stored in the UtxoView. + // Verify m2 is also stored in the UtxoView. require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 1) - require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[validatorEntry.ToMapKey()].ValidatorPKID, m1PKID) + require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m2ValidatorEntry.ToMapKey()].ValidatorPKID, m2PKID) require.Equal( t, - utxoView.ValidatorMapKeyToValidatorEntry[validatorEntry.ToMapKey()].TotalStakeAmountNanos, - uint256.NewInt().SetUint64(200), + utxoView.ValidatorMapKeyToValidatorEntry[m2ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, + uint256.NewInt().SetUint64(300), ) - // Store m2's ValidatorEntry in the UtxoView. - m2ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m2PKID, + // Store m3's ValidatorEntry in the UtxoView with isDeleted=true. + utxoView._deleteValidatorEntryMappings(m3ValidatorEntry) + + // Verify m3 is stored in the UtxoView with isDeleted=true. + require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m3ValidatorEntry.ToMapKey()].ValidatorPKID, m3PKID) + require.True(t, utxoView.ValidatorMapKeyToValidatorEntry[m3ValidatorEntry.ToMapKey()].isDeleted) + + // Store m4's ValidatorEntry in the UtxoView with TotalStake = 50 nanos. + m4ValidatorEntry := &ValidatorEntry{ + ValidatorPKID: m4PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), } - utxoView._setValidatorEntryMappings(m2ValidatorEntry) + utxoView._setValidatorEntryMappings(m4ValidatorEntry) - // Verify m2 is not stored in the db. - validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m2PKID) + // Verify m4 is not stored in the db. + validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m4PKID) require.NoError(t, err) require.Nil(t, validatorEntry) - // Verify m2 is stored in the UtxoView. - require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 2) - - require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m2ValidatorEntry.ToMapKey()].ValidatorPKID, m2PKID) + // Verify m4 is stored in the UtxoView. + require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 3) + require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m4ValidatorEntry.ToMapKey()].ValidatorPKID, m4PKID) require.Equal( t, - utxoView.ValidatorMapKeyToValidatorEntry[m2ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, + utxoView.ValidatorMapKeyToValidatorEntry[m4ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50), ) - // Fetch TopValidatorsByStake merging ValidatorEntries from the db and UtxoView. - validatorEntries, err := utxoView.GetTopValidatorsByStake(3) + // Store m5's jailed ValidatorEntry in the UtxoView with TotalStake = 500 nanos. + m5ValidatorEntry := &ValidatorEntry{ + ValidatorPKID: m5PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), + JailedAtEpochNumber: 1, + } + utxoView._setValidatorEntryMappings(m5ValidatorEntry) + + // Verify m5 is not stored in the db. + validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m5PKID) + require.NoError(t, err) + require.Nil(t, validatorEntry) + + // Verify m5 is stored in the UtxoView. + require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 4) + require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m5ValidatorEntry.ToMapKey()].ValidatorPKID, m5PKID) + require.Equal( + t, + utxoView.ValidatorMapKeyToValidatorEntry[m5ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, + uint256.NewInt().SetUint64(500), + ) + require.Equal( + t, utxoView.ValidatorMapKeyToValidatorEntry[m5ValidatorEntry.ToMapKey()].Status(), ValidatorStatusJailed, + ) + + // Fetch TopActiveValidatorsByStake merging ValidatorEntries from the db and UtxoView. + validatorEntries, err := utxoView.GetTopActiveValidatorsByStake(6) require.NoError(t, err) require.Len(t, validatorEntries, 3) - require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - require.Equal(t, validatorEntries[2].ValidatorPKID, m2PKID) + require.Equal(t, validatorEntries[2].ValidatorPKID, m4PKID) require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50)) } @@ -1046,6 +1116,12 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + // Seed a CurrentEpochEntry. + epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true diff --git a/lib/db_utils.go b/lib/db_utils.go index 4ca3b736c..f0f21f79e 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -4223,6 +4223,23 @@ func DecodeUint64(scoreBytes []byte) uint64 { return binary.BigEndian.Uint64(scoreBytes) } +func EncodeUint8(num uint8) []byte { + return []byte{num} +} + +func DecodeUint8(numBytes []byte) uint8 { + return numBytes[0] +} + +func ReadUint8(rr *bytes.Reader) (uint8, error) { + var numBytes [1]byte + _, err := io.ReadFull(rr, numBytes[:]) + if err != nil { + return 0, err + } + return DecodeUint8(numBytes[:]), nil +} + func DbPutNanosPurchasedWithTxn(txn *badger.Txn, snap *Snapshot, nanosPurchased uint64) error { return DBSetWithTxn(txn, snap, Prefixes.PrefixNanosPurchased, EncodeUint64(nanosPurchased)) } diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index b9e9acef8..d20d17490 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -1,8 +1,10 @@ package lib import ( + "bytes" "io/ioutil" "log" + "math" "math/big" "testing" "time" @@ -665,3 +667,21 @@ func TestDeleteExpiredTransactorNonceEntries(t *testing.T) { } } + +func TestEncodeUint8(t *testing.T) { + for _, num := range []uint8{0, 95, math.MaxUint8} { + // Encode to bytes. + encoded := EncodeUint8(num) + require.Len(t, encoded, 1) + + // Decode from bytes. + decoded := DecodeUint8(encoded) + require.Equal(t, num, decoded) + + // Read from bytes. + rr := bytes.NewReader(encoded) + decoded2, err := ReadUint8(rr) + require.NoError(t, err) + require.Equal(t, num, decoded2) + } +} From 9f14c21dd5b2269d6965eeec963b999dfbb9a2b7 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 5 May 2023 11:18:40 -0400 Subject: [PATCH 012/762] Add more private key, public key, signature utils. --- lib/types_bls_signature.go | 123 +++++++++++++++++++---- lib/types_bls_signature_no_relic.go | 64 ++++++++++-- lib/types_bls_signature_no_relic_test.go | 6 +- lib/types_bls_signature_test.go | 81 +++++++++------ 4 files changed, 215 insertions(+), 59 deletions(-) diff --git a/lib/types_bls_signature.go b/lib/types_bls_signature.go index b6725cf13..635ea9f4f 100644 --- a/lib/types_bls_signature.go +++ b/lib/types_bls_signature.go @@ -4,32 +4,88 @@ package lib import ( "bytes" + "encoding/hex" flowCrypto "github.com/onflow/flow-go/crypto" "io" + "strings" ) +const BLSSigningAlgorithm = flowCrypto.BLSBLS12381 + +// TODO: what should the domainTag param be? +var BLSHashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") + // -// TYPES: BLSPublicKey +// TYPES: BLSPrivateKey // -const BLSSigningAlgorithm = flowCrypto.BLSBLS12381 +type BLSPrivateKey struct { + PrivateKey flowCrypto.PrivateKey +} -var BLSHashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") +func NewBLSPrivateKey() (*BLSPrivateKey, error) { + // This is a helper util for generating a random BLSPrivateKey. + privateKey, err := flowCrypto.GeneratePrivateKey(BLSSigningAlgorithm, RandomBytes(64)) + if err != nil { + return nil, err + } + return &BLSPrivateKey{PrivateKey: privateKey}, nil +} + +func (blsPrivateKey *BLSPrivateKey) Sign(payloadBytes []byte) (*BLSSignature, error) { + signature, err := blsPrivateKey.PrivateKey.Sign(payloadBytes, BLSHashingAlgorithm) + if err != nil { + return nil, err + } + return &BLSSignature{Signature: signature}, nil +} + +func (blsPrivateKey *BLSPrivateKey) PublicKey() *BLSPublicKey { + return &BLSPublicKey{PublicKey: blsPrivateKey.PrivateKey.PublicKey()} +} + +func (blsPrivateKey *BLSPrivateKey) ToString() string { + return blsPrivateKey.PrivateKey.String() +} + +func (blsPrivateKey *BLSPrivateKey) FromString(privateKeyString string) error { + // Chop off leading 0x, if exists. Otherwise, does nothing. + privateKeyStringCopy, _ := strings.CutPrefix(privateKeyString, "0x") + // Convert from hex string to byte slice. + privateKeyBytes, err := hex.DecodeString(privateKeyStringCopy) + if err != nil { + return err + } + // Convert from byte slice to BLSPrivateKey. + blsPrivateKey.PrivateKey, err = flowCrypto.DecodePrivateKey(BLSSigningAlgorithm, privateKeyBytes) + return err +} + +func (blsPrivateKey *BLSPrivateKey) Eq(other *BLSPrivateKey) bool { + return blsPrivateKey.PrivateKey.Equals(other.PrivateKey) +} + +// +// TYPES: BLSPublicKey +// type BLSPublicKey struct { PublicKey flowCrypto.PublicKey } -func NewBLSPublicKey(publicKeyBytes []byte) (*BLSPublicKey, error) { - publicKey, err := flowCrypto.DecodePublicKey(BLSSigningAlgorithm, publicKeyBytes) - return &BLSPublicKey{PublicKey: publicKey}, err +func (blsPublicKey *BLSPublicKey) Verify(blsSignature *BLSSignature, input []byte) (bool, error) { + return blsPublicKey.PublicKey.Verify(blsSignature.Signature, input, BLSHashingAlgorithm) } func (blsPublicKey *BLSPublicKey) ToBytes() []byte { return EncodeByteArray(blsPublicKey.PublicKey.Encode()) } -func (blsPublicKey *BLSPublicKey) FromBytes(rr io.Reader) error { +func (blsPublicKey *BLSPublicKey) FromBytes(publicKeyBytes []byte) error { + return blsPublicKey.ReadBytes(bytes.NewReader(publicKeyBytes)) +} + +func (blsPublicKey *BLSPublicKey) ReadBytes(rr io.Reader) error { publicKeyBytes, err := DecodeByteArray(rr) if err != nil { return err @@ -38,8 +94,21 @@ func (blsPublicKey *BLSPublicKey) FromBytes(rr io.Reader) error { return err } -func (blsPublicKey *BLSPublicKey) Verify(blsSignature *BLSSignature, input []byte) (bool, error) { - return blsPublicKey.PublicKey.Verify(blsSignature.Signature, input, BLSHashingAlgorithm) +func (blsPublicKey *BLSPublicKey) ToString() string { + return blsPublicKey.PublicKey.String() +} + +func (blsPublicKey *BLSPublicKey) FromString(publicKeyString string) error { + // Chop off leading 0x, if exists. Otherwise, does nothing. + publicKeyStringCopy, _ := strings.CutPrefix(publicKeyString, "0x") + // Convert from hex string to byte slice. + publicKeyBytes, err := hex.DecodeString(publicKeyStringCopy) + if err != nil { + return err + } + // Convert from byte slice to BLSPublicKey. + blsPublicKey.PublicKey, err = flowCrypto.DecodePublicKey(BLSSigningAlgorithm, publicKeyBytes) + return err } func (blsPublicKey *BLSPublicKey) Eq(other *BLSPublicKey) bool { @@ -54,18 +123,38 @@ type BLSSignature struct { Signature flowCrypto.Signature } -func NewBLSSignature(signature []byte) *BLSSignature { - return &BLSSignature{Signature: signature} +func (blsSignature *BLSSignature) ToBytes() []byte { + return EncodeByteArray(blsSignature.Signature.Bytes()) } -func (blsSignature *BLSSignature) ToBytes() []byte { - return EncodeByteArray(blsSignature.Signature) +func (blsSignature *BLSSignature) FromBytes(signatureBytes []byte) error { + return blsSignature.ReadBytes(bytes.NewReader(signatureBytes)) } -func (blsSignature *BLSSignature) FromBytes(rr io.Reader) error { - var err error - blsSignature.Signature, err = DecodeByteArray(rr) - return err +func (blsSignature *BLSSignature) ReadBytes(rr io.Reader) error { + signatureBytes, err := DecodeByteArray(rr) + if err != nil { + return err + } + blsSignature.Signature = signatureBytes + return nil +} + +func (blsSignature *BLSSignature) ToString() string { + return blsSignature.Signature.String() +} + +func (blsSignature *BLSSignature) FromString(signatureString string) error { + // Chop off leading 0x, if exists. Otherwise, does nothing. + signatureStringCopy, _ := strings.CutPrefix(signatureString, "0x") + // Convert from hex string to byte slice. + signatureBytes, err := hex.DecodeString(signatureStringCopy) + if err != nil { + return err + } + // Convert from byte slice to BLSSignature. + blsSignature.Signature = signatureBytes + return nil } func (blsSignature *BLSSignature) Eq(other *BLSSignature) bool { diff --git a/lib/types_bls_signature_no_relic.go b/lib/types_bls_signature_no_relic.go index 2445f05bc..86b39d76a 100644 --- a/lib/types_bls_signature_no_relic.go +++ b/lib/types_bls_signature_no_relic.go @@ -7,17 +7,49 @@ import ( "io" ) +const BLSNoRelicError = "BLS keys can't be used without Relic installed" + // -// TYPES: BLSPublicKey +// TYPES: BLSPrivateKey // -const BLSNoRelicError = "BLS keys can't be used without Relic installed" +type BLSPrivateKey struct { + PrivateKey flowCrypto.PrivateKey +} + +func NewBLSPrivateKey() (*BLSPrivateKey, error) { + panic(BLSNoRelicError) +} + +func (blsPrivateKey *BLSPrivateKey) Sign(payloadBytes []byte) (*BLSSignature, error) { + panic(BLSNoRelicError) +} + +func (blsPrivateKey *BLSPrivateKey) PublicKey() *BLSPublicKey { + panic(BLSNoRelicError) +} + +func (blsPrivateKey *BLSPrivateKey) ToString() string { + panic(BLSNoRelicError) +} + +func (blsPrivateKey *BLSPrivateKey) FromString(privateKeyString string) error { + panic(BLSNoRelicError) +} + +func (blsPrivateKey *BLSPrivateKey) Eq(other *BLSPrivateKey) bool { + panic(BLSNoRelicError) +} + +// +// TYPES: BLSPublicKey +// type BLSPublicKey struct { PublicKey flowCrypto.PublicKey } -func NewBLSPublicKey(publicKeyBytes []byte) (*BLSPublicKey, error) { +func (blsPublicKey *BLSPublicKey) Verify(blsSignature *BLSSignature, input []byte) (bool, error) { panic(BLSNoRelicError) } @@ -25,11 +57,19 @@ func (blsPublicKey *BLSPublicKey) ToBytes() []byte { panic(BLSNoRelicError) } -func (blsPublicKey *BLSPublicKey) FromBytes(rr io.Reader) error { +func (blsPublicKey *BLSPublicKey) FromBytes(publicKeyBytes []byte) error { panic(BLSNoRelicError) } -func (blsPublicKey *BLSPublicKey) Verify(blsSignature *BLSSignature, input []byte) (bool, error) { +func (blsPublicKey *BLSPublicKey) ReadBytes(rr io.Reader) error { + panic(BLSNoRelicError) +} + +func (blsPublicKey *BLSPublicKey) ToString() string { + panic(BLSNoRelicError) +} + +func (blsPublicKey *BLSPublicKey) FromString(publicKeyString string) error { panic(BLSNoRelicError) } @@ -45,15 +85,23 @@ type BLSSignature struct { Signature flowCrypto.Signature } -func NewBLSSignature(signature []byte) *BLSSignature { +func (blsSignature *BLSSignature) ToBytes() []byte { panic(BLSNoRelicError) } -func (blsSignature *BLSSignature) ToBytes() []byte { +func (blsSignature *BLSSignature) FromBytes(signatureBytes []byte) error { + panic(BLSNoRelicError) +} + +func (blsSignature *BLSSignature) ReadBytes(rr io.Reader) error { + panic(BLSNoRelicError) +} + +func (blsSignature *BLSSignature) ToString() string { panic(BLSNoRelicError) } -func (blsSignature *BLSSignature) FromBytes(rr io.Reader) error { +func (blsSignature *BLSSignature) FromString(signatureString string) error { panic(BLSNoRelicError) } diff --git a/lib/types_bls_signature_no_relic_test.go b/lib/types_bls_signature_no_relic_test.go index e4be0292e..585c4a548 100644 --- a/lib/types_bls_signature_no_relic_test.go +++ b/lib/types_bls_signature_no_relic_test.go @@ -8,6 +8,8 @@ import ( ) func TestVerifyingBLSSignatures(t *testing.T) { - assert.Panics(t, func() { NewBLSPublicKey(nil) }) - assert.Panics(t, func() { NewBLSSignature(nil) }) + assert.Panics(t, func() { NewBLSPrivateKey() }) + assert.Panics(t, func() { (&BLSPrivateKey{}).FromString("") }) + assert.Panics(t, func() { (&BLSPublicKey{}).FromString("") }) + assert.Panics(t, func() { (&BLSSignature{}).FromString("") }) } diff --git a/lib/types_bls_signature_test.go b/lib/types_bls_signature_test.go index d410ba871..c43028623 100644 --- a/lib/types_bls_signature_test.go +++ b/lib/types_bls_signature_test.go @@ -4,78 +4,95 @@ package lib import ( "bytes" - flowCrypto "github.com/onflow/flow-go/crypto" "github.com/stretchr/testify/require" "testing" ) func TestVerifyingBLSSignatures(t *testing.T) { // Generate two BLS public/private key pairs. - privateKey1, err := flowCrypto.GeneratePrivateKey(BLSSigningAlgorithm, RandomBytes(64)) - require.NoError(t, err) - publicKey1 := privateKey1.PublicKey() - blsPublicKey1, err := NewBLSPublicKey(publicKey1.Encode()) + blsPrivateKey1, err := NewBLSPrivateKey() require.NoError(t, err) + blsPublicKey1 := blsPrivateKey1.PublicKey() - privateKey2, err := flowCrypto.GeneratePrivateKey(BLSSigningAlgorithm, RandomBytes(64)) - require.NoError(t, err) - publicKey2 := privateKey2.PublicKey() - blsPublicKey2, err := NewBLSPublicKey(publicKey2.Encode()) + blsPrivateKey2, err := NewBLSPrivateKey() require.NoError(t, err) - - // Test BLSPublicKey.Eq(). - require.True(t, blsPublicKey1.Eq(blsPublicKey1)) - require.True(t, blsPublicKey2.Eq(blsPublicKey2)) - require.False(t, blsPublicKey1.Eq(blsPublicKey2)) + blsPublicKey2 := blsPrivateKey2.PublicKey() // Test BLSPublicKey.Verify(). - // 1. PrivateKey1 signs a random payload. - // 2. Verify BLSPublicKey1 is the signer. - // 3. Verify BLSPublicKey2 is not the signer. - // 4. PrivateKey2 signs a random payload. - // 5. Verify BLSPublicKey1 is not the signer. - // 6. Verify BLSPublicKey2 is the signer. + // 1. PrivateKey1 signs a random payload. randomPayload1 := RandomBytes(256) - signature1, err := privateKey1.Sign(randomPayload1, BLSHashingAlgorithm) + blsSignature1, err := blsPrivateKey1.Sign(randomPayload1) require.NoError(t, err) - blsSignature1 := NewBLSSignature(signature1) + // 2. Verify BLSPublicKey1 is the signer. isVerified, err := blsPublicKey1.Verify(blsSignature1, randomPayload1) require.NoError(t, err) require.True(t, isVerified) + // 3. Verify BLSPublicKey2 is not the signer. isVerified, err = blsPublicKey2.Verify(blsSignature1, randomPayload1) require.NoError(t, err) require.False(t, isVerified) + // 4. PrivateKey2 signs a different random payload. randomPayload2 := RandomBytes(256) - signature2, err := privateKey2.Sign(randomPayload2, BLSHashingAlgorithm) + blsSignature2, err := blsPrivateKey2.Sign(randomPayload2) require.NoError(t, err) - blsSignature2 := NewBLSSignature(signature2) + // 5. Verify BLSPublicKey1 is not the signer. isVerified, err = blsPublicKey1.Verify(blsSignature2, randomPayload2) require.NoError(t, err) require.False(t, isVerified) + // 6. Verify BLSPublicKey2 is the signer. isVerified, err = blsPublicKey2.Verify(blsSignature2, randomPayload2) require.NoError(t, err) require.True(t, isVerified) + // Test BLSPrivateKey.Eq(). + require.True(t, blsPrivateKey1.Eq(blsPrivateKey1)) + require.True(t, blsPrivateKey2.Eq(blsPrivateKey2)) + require.False(t, blsPrivateKey1.Eq(blsPrivateKey2)) + + // Test BLSPrivateKey.ToString() and BLSPrivateKey.FromString(). + blsPrivateKeyString := blsPrivateKey1.ToString() + copyBLSPrivateKey1 := &BLSPrivateKey{} + require.NoError(t, copyBLSPrivateKey1.FromString(blsPrivateKeyString)) + require.True(t, blsPrivateKey1.Eq(copyBLSPrivateKey1)) + // Test BLSPublicKey.Eq(). require.True(t, blsPublicKey1.Eq(blsPublicKey1)) require.True(t, blsPublicKey2.Eq(blsPublicKey2)) require.False(t, blsPublicKey1.Eq(blsPublicKey2)) + // Test BLSPublicKey.ToBytes(), BLSPublicKey.FromBytes(), and BLSPublicKey.ReadBytes(). + blsPublicKeyBytes := blsPublicKey1.ToBytes() + copyBLSPublicKey1 := &BLSPublicKey{} + require.NoError(t, copyBLSPublicKey1.FromBytes(blsPublicKeyBytes)) + require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) + copyBLSPublicKey1 = &BLSPublicKey{} + require.NoError(t, copyBLSPublicKey1.ReadBytes(bytes.NewBuffer(blsPublicKeyBytes))) + require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) + + // Test BLSPublicKey.ToString() and BLSPublicKey.FromString(). + blsPublicKeyString := blsPublicKey1.ToString() + copyBLSPublicKey1 = &BLSPublicKey{} + require.NoError(t, copyBLSPublicKey1.FromString(blsPublicKeyString)) + require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) + // Test BLSSignature.Eq(). require.True(t, blsSignature1.Eq(blsSignature1)) require.True(t, blsSignature2.Eq(blsSignature2)) require.False(t, blsSignature1.Eq(blsSignature2)) - // Test BLSPublicKey.ToBytes() and BLSPublicKey.FromBytes(). - blsPublicKeyBytes := blsPublicKey1.ToBytes() - copyBLSPublicKey1 := &BLSPublicKey{} - require.NoError(t, copyBLSPublicKey1.FromBytes(bytes.NewBuffer(blsPublicKeyBytes))) - require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) - - // Test BLSSignature.ToBytes() and BLSSignature.FromBytes(). + // Test BLSSignature.ToBytes(), BLSSignature.FromBytes(), and BLSSignature.ReadBytes(). blsSignatureBytes := blsSignature1.ToBytes() copyBLSSignature := &BLSSignature{} - require.NoError(t, copyBLSSignature.FromBytes(bytes.NewBuffer(blsSignatureBytes))) + require.NoError(t, copyBLSSignature.FromBytes(blsSignatureBytes)) + require.True(t, blsSignature1.Eq(copyBLSSignature)) + copyBLSSignature = &BLSSignature{} + require.NoError(t, copyBLSSignature.ReadBytes(bytes.NewBuffer(blsSignatureBytes))) + require.True(t, blsSignature1.Eq(copyBLSSignature)) + + // Test BLSSignature.ToString() and BLSSignature.FromString(). + blsSignatureString := blsSignature1.ToString() + copyBLSSignature = &BLSSignature{} + require.NoError(t, copyBLSSignature.FromString(blsSignatureString)) require.True(t, blsSignature1.Eq(copyBLSSignature)) } From f1baa27990c26f04feca5fe3d9549ee125344a6a Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 5 May 2023 11:19:55 -0400 Subject: [PATCH 013/762] Remove types_ filename prefix. --- lib/{types_bls_signature.go => bls_signature.go} | 0 ...{types_bls_signature_no_relic.go => bls_signature_no_relic.go} | 0 ..._signature_no_relic_test.go => bls_signature_no_relic_test.go} | 0 lib/{types_bls_signature_test.go => bls_signature_test.go} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename lib/{types_bls_signature.go => bls_signature.go} (100%) rename lib/{types_bls_signature_no_relic.go => bls_signature_no_relic.go} (100%) rename lib/{types_bls_signature_no_relic_test.go => bls_signature_no_relic_test.go} (100%) rename lib/{types_bls_signature_test.go => bls_signature_test.go} (100%) diff --git a/lib/types_bls_signature.go b/lib/bls_signature.go similarity index 100% rename from lib/types_bls_signature.go rename to lib/bls_signature.go diff --git a/lib/types_bls_signature_no_relic.go b/lib/bls_signature_no_relic.go similarity index 100% rename from lib/types_bls_signature_no_relic.go rename to lib/bls_signature_no_relic.go diff --git a/lib/types_bls_signature_no_relic_test.go b/lib/bls_signature_no_relic_test.go similarity index 100% rename from lib/types_bls_signature_no_relic_test.go rename to lib/bls_signature_no_relic_test.go diff --git a/lib/types_bls_signature_test.go b/lib/bls_signature_test.go similarity index 100% rename from lib/types_bls_signature_test.go rename to lib/bls_signature_test.go From b6da4c030d3032ebace922a5f4490469d42e077b Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 5 May 2023 11:44:52 -0400 Subject: [PATCH 014/762] Install go v1.20 in the test.Dockerfile. --- test.Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test.Dockerfile b/test.Dockerfile index e7ce306c0..319e5f989 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -4,6 +4,9 @@ RUN apk update RUN apk upgrade RUN apk add --update bash cmake git go gcc g++ make vips vips-dev +COPY --from=golang:1.20-alpine /usr/local/go/ /usr/local/go/ +ENV PATH="/usr/local/go/bin:${PATH}" + WORKDIR /deso/src/core COPY go.mod . From 96f4ef0394e3f01602238a38ea9c91042ab30aff Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 5 May 2023 11:45:47 -0400 Subject: [PATCH 015/762] Dont apk add install go. --- test.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.Dockerfile b/test.Dockerfile index 319e5f989..417eeb737 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -2,7 +2,7 @@ FROM alpine:latest AS core RUN apk update RUN apk upgrade -RUN apk add --update bash cmake git go gcc g++ make vips vips-dev +RUN apk add --update bash cmake git gcc g++ make vips vips-dev COPY --from=golang:1.20-alpine /usr/local/go/ /usr/local/go/ ENV PATH="/usr/local/go/bin:${PATH}" From 5b476ad1ec79c8eff72c76e444e996e62518f143 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 5 May 2023 11:48:15 -0400 Subject: [PATCH 016/762] Dont re-install flow-go. --- scripts/install-relic.sh | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/scripts/install-relic.sh b/scripts/install-relic.sh index f19971afb..cd2dcb9eb 100755 --- a/scripts/install-relic.sh +++ b/scripts/install-relic.sh @@ -1,14 +1,11 @@ #!/bin/bash # This script installs Relic, a C library dependency for the BLS signature implementation -# provided by Flow. This file was copy-pasted as-is from Flow's installation README found -# here: https://github.com/onflow/flow-go/tree/master/crypto. +# provided by Flow. This file was copy-pasted from Flow's installation README found here: +# https://github.com/onflow/flow-go/tree/master/crypto. # crypto package PKG_NAME="github.com/onflow/flow-go/crypto" -# go get the package -go get ${PKG_NAME} - # go.mod MOD_FILE="./go.mod" @@ -23,7 +20,7 @@ else { echo "couldn't find go.mod file - make sure the script is in the project root directory"; exit 1; } fi -# grant permissions if not existant +# grant permissions if not existent if [[ ! -r ${PKG_DIR} || ! -w ${PKG_DIR} || ! -x ${PKG_DIR} ]]; then sudo chmod -R 755 "${PKG_DIR}" fi From bb9ecc909bd37541912d739ae6a2cbf581b835e4 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 5 May 2023 12:59:00 -0400 Subject: [PATCH 017/762] Add utils to aggregate + verify aggregate signatures. --- lib/bls_signature.go | 20 ++++++++++++++++++++ lib/bls_signature_test.go | 34 +++++++++++++++++++++++++++++++++- test.Dockerfile | 1 + 3 files changed, 54 insertions(+), 1 deletion(-) diff --git a/lib/bls_signature.go b/lib/bls_signature.go index 635ea9f4f..2cef00c17 100644 --- a/lib/bls_signature.go +++ b/lib/bls_signature.go @@ -15,6 +15,26 @@ const BLSSigningAlgorithm = flowCrypto.BLSBLS12381 // TODO: what should the domainTag param be? var BLSHashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") +func AggregateBLSSignatures(blsSignatures []*BLSSignature) (*BLSSignature, error) { + var signatures []flowCrypto.Signature + for _, blsSignature := range blsSignatures { + signatures = append(signatures, blsSignature.Signature) + } + aggregateSignature, err := flowCrypto.AggregateBLSSignatures(signatures) + if err != nil { + return nil, err + } + return &BLSSignature{Signature: aggregateSignature}, nil +} + +func VerifyAggregateBLSSignature(blsPublicKeys []*BLSPublicKey, blsSignature *BLSSignature, payloadBytes []byte) (bool, error) { + var publicKeys []flowCrypto.PublicKey + for _, blsPublicKey := range blsPublicKeys { + publicKeys = append(publicKeys, blsPublicKey.PublicKey) + } + return flowCrypto.VerifyBLSSignatureOneMessage(publicKeys, blsSignature.Signature, payloadBytes, BLSHashingAlgorithm) +} + // // TYPES: BLSPrivateKey // diff --git a/lib/bls_signature_test.go b/lib/bls_signature_test.go index c43028623..fabb81045 100644 --- a/lib/bls_signature_test.go +++ b/lib/bls_signature_test.go @@ -18,7 +18,7 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.NoError(t, err) blsPublicKey2 := blsPrivateKey2.PublicKey() - // Test BLSPublicKey.Verify(). + // Test BLSPrivateKey.Sign() and BLSPublicKey.Verify(). // 1. PrivateKey1 signs a random payload. randomPayload1 := RandomBytes(256) blsSignature1, err := blsPrivateKey1.Sign(randomPayload1) @@ -45,6 +45,38 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.NoError(t, err) require.True(t, isVerified) + // Test AggregateBLSSignatures() and VerifyAggregateBLSSignature(). + // 1. PrivateKey1 signs a random payload. + randomPayload3 := RandomBytes(256) + blsSignature1, err = blsPrivateKey1.Sign(randomPayload3) + require.NoError(t, err) + // 2. PrivateKey2 signs the same random payload. + blsSignature2, err = blsPrivateKey2.Sign(randomPayload3) + require.NoError(t, err) + // 3. Aggregate their signatures. + aggregateSignature, err := AggregateBLSSignatures([]*BLSSignature{blsSignature1, blsSignature2}) + require.NoError(t, err) + // 4. Verify the AggregateSignature. + isVerified, err = VerifyAggregateBLSSignature( + []*BLSPublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, randomPayload3, + ) + require.NoError(t, err) + require.True(t, isVerified) + // 5. Verify PrivateKey1's signature doesn't work on its own. + isVerified, err = VerifyAggregateBLSSignature([]*BLSPublicKey{blsPublicKey1}, aggregateSignature, randomPayload3) + require.NoError(t, err) + require.False(t, isVerified) + // 6. Verify PrivateKey2's signature doesn't work on its own. + isVerified, err = VerifyAggregateBLSSignature([]*BLSPublicKey{blsPublicKey2}, aggregateSignature, randomPayload3) + require.NoError(t, err) + require.False(t, isVerified) + // 7. Verify the AggregateSignature doesn't work on a different payload. + isVerified, err = VerifyAggregateBLSSignature( + []*BLSPublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, randomPayload1, + ) + require.NoError(t, err) + require.False(t, isVerified) + // Test BLSPrivateKey.Eq(). require.True(t, blsPrivateKey1.Eq(blsPrivateKey1)) require.True(t, blsPrivateKey2.Eq(blsPrivateKey2)) diff --git a/test.Dockerfile b/test.Dockerfile index 417eeb737..427daf2b8 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -11,6 +11,7 @@ WORKDIR /deso/src/core COPY go.mod . COPY go.sum . + RUN go mod download COPY scripts/install-relic.sh . From ce4076bdc50286460e1aaf017e817884eeff72ac Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 5 May 2023 13:01:19 -0400 Subject: [PATCH 018/762] Add no relic version of aggregate BLS sig funcs. --- lib/bls_signature_no_relic.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/bls_signature_no_relic.go b/lib/bls_signature_no_relic.go index 86b39d76a..7edc25790 100644 --- a/lib/bls_signature_no_relic.go +++ b/lib/bls_signature_no_relic.go @@ -9,6 +9,14 @@ import ( const BLSNoRelicError = "BLS keys can't be used without Relic installed" +func AggregateBLSSignatures(blsSignatures []*BLSSignature) (*BLSSignature, error) { + panic(BLSNoRelicError) +} + +func VerifyAggregateBLSSignature(blsPublicKeys []*BLSPublicKey, blsSignature *BLSSignature, payloadBytes []byte) (bool, error) { + panic(BLSNoRelicError) +} + // // TYPES: BLSPrivateKey // From 3ca713e25c81836237ed779d908b7b9b1a1a1980 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 8 May 2023 11:31:19 -0400 Subject: [PATCH 019/762] Add copy funcs. --- lib/bls_signature.go | 55 ++++++++++++++++-------------- lib/bls_signature_no_relic.go | 12 ++++--- lib/bls_signature_no_relic_test.go | 1 - lib/bls_signature_test.go | 46 +++++++++++++++++-------- 4 files changed, 69 insertions(+), 45 deletions(-) diff --git a/lib/bls_signature.go b/lib/bls_signature.go index 2cef00c17..ad275e94e 100644 --- a/lib/bls_signature.go +++ b/lib/bls_signature.go @@ -43,15 +43,6 @@ type BLSPrivateKey struct { PrivateKey flowCrypto.PrivateKey } -func NewBLSPrivateKey() (*BLSPrivateKey, error) { - // This is a helper util for generating a random BLSPrivateKey. - privateKey, err := flowCrypto.GeneratePrivateKey(BLSSigningAlgorithm, RandomBytes(64)) - if err != nil { - return nil, err - } - return &BLSPrivateKey{PrivateKey: privateKey}, nil -} - func (blsPrivateKey *BLSPrivateKey) Sign(payloadBytes []byte) (*BLSSignature, error) { signature, err := blsPrivateKey.PrivateKey.Sign(payloadBytes, BLSHashingAlgorithm) if err != nil { @@ -68,17 +59,17 @@ func (blsPrivateKey *BLSPrivateKey) ToString() string { return blsPrivateKey.PrivateKey.String() } -func (blsPrivateKey *BLSPrivateKey) FromString(privateKeyString string) error { +func (blsPrivateKey *BLSPrivateKey) FromString(privateKeyString string) (*BLSPrivateKey, error) { // Chop off leading 0x, if exists. Otherwise, does nothing. privateKeyStringCopy, _ := strings.CutPrefix(privateKeyString, "0x") // Convert from hex string to byte slice. privateKeyBytes, err := hex.DecodeString(privateKeyStringCopy) if err != nil { - return err + return nil, err } // Convert from byte slice to BLSPrivateKey. blsPrivateKey.PrivateKey, err = flowCrypto.DecodePrivateKey(BLSSigningAlgorithm, privateKeyBytes) - return err + return blsPrivateKey, err } func (blsPrivateKey *BLSPrivateKey) Eq(other *BLSPrivateKey) bool { @@ -101,40 +92,46 @@ func (blsPublicKey *BLSPublicKey) ToBytes() []byte { return EncodeByteArray(blsPublicKey.PublicKey.Encode()) } -func (blsPublicKey *BLSPublicKey) FromBytes(publicKeyBytes []byte) error { +func (blsPublicKey *BLSPublicKey) FromBytes(publicKeyBytes []byte) (*BLSPublicKey, error) { return blsPublicKey.ReadBytes(bytes.NewReader(publicKeyBytes)) } -func (blsPublicKey *BLSPublicKey) ReadBytes(rr io.Reader) error { +func (blsPublicKey *BLSPublicKey) ReadBytes(rr io.Reader) (*BLSPublicKey, error) { publicKeyBytes, err := DecodeByteArray(rr) if err != nil { - return err + return nil, err } blsPublicKey.PublicKey, err = flowCrypto.DecodePublicKey(BLSSigningAlgorithm, publicKeyBytes) - return err + return blsPublicKey, err } func (blsPublicKey *BLSPublicKey) ToString() string { return blsPublicKey.PublicKey.String() } -func (blsPublicKey *BLSPublicKey) FromString(publicKeyString string) error { +func (blsPublicKey *BLSPublicKey) FromString(publicKeyString string) (*BLSPublicKey, error) { // Chop off leading 0x, if exists. Otherwise, does nothing. publicKeyStringCopy, _ := strings.CutPrefix(publicKeyString, "0x") // Convert from hex string to byte slice. publicKeyBytes, err := hex.DecodeString(publicKeyStringCopy) if err != nil { - return err + return nil, err } // Convert from byte slice to BLSPublicKey. blsPublicKey.PublicKey, err = flowCrypto.DecodePublicKey(BLSSigningAlgorithm, publicKeyBytes) - return err + return blsPublicKey, err } func (blsPublicKey *BLSPublicKey) Eq(other *BLSPublicKey) bool { return blsPublicKey.PublicKey.Equals(other.PublicKey) } +func (blsPublicKey *BLSPublicKey) Copy() *BLSPublicKey { + return &BLSPublicKey{ + PublicKey: blsPublicKey.PublicKey, + } +} + // // TYPES: BLSSignature // @@ -147,36 +144,42 @@ func (blsSignature *BLSSignature) ToBytes() []byte { return EncodeByteArray(blsSignature.Signature.Bytes()) } -func (blsSignature *BLSSignature) FromBytes(signatureBytes []byte) error { +func (blsSignature *BLSSignature) FromBytes(signatureBytes []byte) (*BLSSignature, error) { return blsSignature.ReadBytes(bytes.NewReader(signatureBytes)) } -func (blsSignature *BLSSignature) ReadBytes(rr io.Reader) error { +func (blsSignature *BLSSignature) ReadBytes(rr io.Reader) (*BLSSignature, error) { signatureBytes, err := DecodeByteArray(rr) if err != nil { - return err + return nil, err } blsSignature.Signature = signatureBytes - return nil + return blsSignature, nil } func (blsSignature *BLSSignature) ToString() string { return blsSignature.Signature.String() } -func (blsSignature *BLSSignature) FromString(signatureString string) error { +func (blsSignature *BLSSignature) FromString(signatureString string) (*BLSSignature, error) { // Chop off leading 0x, if exists. Otherwise, does nothing. signatureStringCopy, _ := strings.CutPrefix(signatureString, "0x") // Convert from hex string to byte slice. signatureBytes, err := hex.DecodeString(signatureStringCopy) if err != nil { - return err + return nil, err } // Convert from byte slice to BLSSignature. blsSignature.Signature = signatureBytes - return nil + return blsSignature, nil } func (blsSignature *BLSSignature) Eq(other *BLSSignature) bool { return bytes.Equal(blsSignature.ToBytes(), other.ToBytes()) } + +func (blsSignature *BLSSignature) Copy() *BLSSignature { + return &BLSSignature{ + Signature: append([]byte{}, blsSignature.Signature.Bytes()...), + } +} diff --git a/lib/bls_signature_no_relic.go b/lib/bls_signature_no_relic.go index 7edc25790..4d72b5006 100644 --- a/lib/bls_signature_no_relic.go +++ b/lib/bls_signature_no_relic.go @@ -25,10 +25,6 @@ type BLSPrivateKey struct { PrivateKey flowCrypto.PrivateKey } -func NewBLSPrivateKey() (*BLSPrivateKey, error) { - panic(BLSNoRelicError) -} - func (blsPrivateKey *BLSPrivateKey) Sign(payloadBytes []byte) (*BLSSignature, error) { panic(BLSNoRelicError) } @@ -85,6 +81,10 @@ func (blsPublicKey *BLSPublicKey) Eq(other *BLSPublicKey) bool { panic(BLSNoRelicError) } +func (blsPublicKey *BLSPublicKey) Copy() *BLSPublicKey { + panic(BLSNoRelicError) +} + // // TYPES: BLSSignature // @@ -116,3 +116,7 @@ func (blsSignature *BLSSignature) FromString(signatureString string) error { func (blsSignature *BLSSignature) Eq(other *BLSSignature) bool { panic(BLSNoRelicError) } + +func (blsSignature *BLSSignature) Copy() *BLSSignature { + panic(BLSNoRelicError) +} diff --git a/lib/bls_signature_no_relic_test.go b/lib/bls_signature_no_relic_test.go index 585c4a548..882496ecc 100644 --- a/lib/bls_signature_no_relic_test.go +++ b/lib/bls_signature_no_relic_test.go @@ -8,7 +8,6 @@ import ( ) func TestVerifyingBLSSignatures(t *testing.T) { - assert.Panics(t, func() { NewBLSPrivateKey() }) assert.Panics(t, func() { (&BLSPrivateKey{}).FromString("") }) assert.Panics(t, func() { (&BLSPublicKey{}).FromString("") }) assert.Panics(t, func() { (&BLSSignature{}).FromString("") }) diff --git a/lib/bls_signature_test.go b/lib/bls_signature_test.go index fabb81045..9a980ad78 100644 --- a/lib/bls_signature_test.go +++ b/lib/bls_signature_test.go @@ -4,10 +4,20 @@ package lib import ( "bytes" + flowCrypto "github.com/onflow/flow-go/crypto" "github.com/stretchr/testify/require" "testing" ) +func NewBLSPrivateKey() (*BLSPrivateKey, error) { + // This is a helper util for generating a random BLSPrivateKey. + privateKey, err := flowCrypto.GeneratePrivateKey(BLSSigningAlgorithm, RandomBytes(64)) + if err != nil { + return nil, err + } + return &BLSPrivateKey{PrivateKey: privateKey}, nil +} + func TestVerifyingBLSSignatures(t *testing.T) { // Generate two BLS public/private key pairs. blsPrivateKey1, err := NewBLSPrivateKey() @@ -84,8 +94,8 @@ func TestVerifyingBLSSignatures(t *testing.T) { // Test BLSPrivateKey.ToString() and BLSPrivateKey.FromString(). blsPrivateKeyString := blsPrivateKey1.ToString() - copyBLSPrivateKey1 := &BLSPrivateKey{} - require.NoError(t, copyBLSPrivateKey1.FromString(blsPrivateKeyString)) + copyBLSPrivateKey1, err := (&BLSPrivateKey{}).FromString(blsPrivateKeyString) + require.NoError(t, err) require.True(t, blsPrivateKey1.Eq(copyBLSPrivateKey1)) // Test BLSPublicKey.Eq(). @@ -95,17 +105,17 @@ func TestVerifyingBLSSignatures(t *testing.T) { // Test BLSPublicKey.ToBytes(), BLSPublicKey.FromBytes(), and BLSPublicKey.ReadBytes(). blsPublicKeyBytes := blsPublicKey1.ToBytes() - copyBLSPublicKey1 := &BLSPublicKey{} - require.NoError(t, copyBLSPublicKey1.FromBytes(blsPublicKeyBytes)) + copyBLSPublicKey1, err := (&BLSPublicKey{}).FromBytes(blsPublicKeyBytes) + require.NoError(t, err) require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) - copyBLSPublicKey1 = &BLSPublicKey{} - require.NoError(t, copyBLSPublicKey1.ReadBytes(bytes.NewBuffer(blsPublicKeyBytes))) + copyBLSPublicKey1, err = (&BLSPublicKey{}).ReadBytes(bytes.NewBuffer(blsPublicKeyBytes)) + require.NoError(t, err) require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) // Test BLSPublicKey.ToString() and BLSPublicKey.FromString(). blsPublicKeyString := blsPublicKey1.ToString() - copyBLSPublicKey1 = &BLSPublicKey{} - require.NoError(t, copyBLSPublicKey1.FromString(blsPublicKeyString)) + copyBLSPublicKey1, err = (&BLSPublicKey{}).FromString(blsPublicKeyString) + require.NoError(t, err) require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) // Test BLSSignature.Eq(). @@ -115,16 +125,24 @@ func TestVerifyingBLSSignatures(t *testing.T) { // Test BLSSignature.ToBytes(), BLSSignature.FromBytes(), and BLSSignature.ReadBytes(). blsSignatureBytes := blsSignature1.ToBytes() - copyBLSSignature := &BLSSignature{} - require.NoError(t, copyBLSSignature.FromBytes(blsSignatureBytes)) + copyBLSSignature, err := (&BLSSignature{}).FromBytes(blsSignatureBytes) + require.NoError(t, err) require.True(t, blsSignature1.Eq(copyBLSSignature)) - copyBLSSignature = &BLSSignature{} - require.NoError(t, copyBLSSignature.ReadBytes(bytes.NewBuffer(blsSignatureBytes))) + copyBLSSignature, err = (&BLSSignature{}).ReadBytes(bytes.NewBuffer(blsSignatureBytes)) + require.NoError(t, err) require.True(t, blsSignature1.Eq(copyBLSSignature)) // Test BLSSignature.ToString() and BLSSignature.FromString(). blsSignatureString := blsSignature1.ToString() - copyBLSSignature = &BLSSignature{} - require.NoError(t, copyBLSSignature.FromString(blsSignatureString)) + copyBLSSignature, err = (&BLSSignature{}).FromString(blsSignatureString) + require.NoError(t, err) require.True(t, blsSignature1.Eq(copyBLSSignature)) + + // Test BLSPublicKey.Copy(). + blsPublicKey1Copy := blsPublicKey1.Copy() + require.True(t, blsPublicKey1.Eq(blsPublicKey1Copy)) + + // Test BLSSignature.Copy(). + blsSignature1Copy := blsSignature1.Copy() + require.True(t, blsSignature1.Eq(blsSignature1Copy)) } From 3f9c74358bfb171959c69fd790f7c885a985029e Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 8 May 2023 13:11:02 -0400 Subject: [PATCH 020/762] Update no relic func signatures. --- lib/bls_signature_no_relic.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/bls_signature_no_relic.go b/lib/bls_signature_no_relic.go index 4d72b5006..1340e1ab1 100644 --- a/lib/bls_signature_no_relic.go +++ b/lib/bls_signature_no_relic.go @@ -37,7 +37,7 @@ func (blsPrivateKey *BLSPrivateKey) ToString() string { panic(BLSNoRelicError) } -func (blsPrivateKey *BLSPrivateKey) FromString(privateKeyString string) error { +func (blsPrivateKey *BLSPrivateKey) FromString(privateKeyString string) (*BLSPrivateKey, error) { panic(BLSNoRelicError) } @@ -61,11 +61,11 @@ func (blsPublicKey *BLSPublicKey) ToBytes() []byte { panic(BLSNoRelicError) } -func (blsPublicKey *BLSPublicKey) FromBytes(publicKeyBytes []byte) error { +func (blsPublicKey *BLSPublicKey) FromBytes(publicKeyBytes []byte) (*BLSPublicKey, error) { panic(BLSNoRelicError) } -func (blsPublicKey *BLSPublicKey) ReadBytes(rr io.Reader) error { +func (blsPublicKey *BLSPublicKey) ReadBytes(rr io.Reader) (*BLSPublicKey, error) { panic(BLSNoRelicError) } @@ -73,7 +73,7 @@ func (blsPublicKey *BLSPublicKey) ToString() string { panic(BLSNoRelicError) } -func (blsPublicKey *BLSPublicKey) FromString(publicKeyString string) error { +func (blsPublicKey *BLSPublicKey) FromString(publicKeyString string) (*BLSPublicKey, error) { panic(BLSNoRelicError) } @@ -97,11 +97,11 @@ func (blsSignature *BLSSignature) ToBytes() []byte { panic(BLSNoRelicError) } -func (blsSignature *BLSSignature) FromBytes(signatureBytes []byte) error { +func (blsSignature *BLSSignature) FromBytes(signatureBytes []byte) (*BLSSignature, error) { panic(BLSNoRelicError) } -func (blsSignature *BLSSignature) ReadBytes(rr io.Reader) error { +func (blsSignature *BLSSignature) ReadBytes(rr io.Reader) (*BLSSignature, error) { panic(BLSNoRelicError) } @@ -109,7 +109,7 @@ func (blsSignature *BLSSignature) ToString() string { panic(BLSNoRelicError) } -func (blsSignature *BLSSignature) FromString(signatureString string) error { +func (blsSignature *BLSSignature) FromString(signatureString string) (*BLSSignature, error) { panic(BLSNoRelicError) } From 37edf0c783fb70da7e24fe9ca2d54e41af8be7b5 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 8 May 2023 14:03:42 -0400 Subject: [PATCH 021/762] Add better edge-case handling for BLS types. --- lib/bls_signature.go | 52 ++++++++++++++++++++++- lib/bls_signature_test.go | 89 +++++++++++++++++++++++++++++++++------ 2 files changed, 126 insertions(+), 15 deletions(-) diff --git a/lib/bls_signature.go b/lib/bls_signature.go index ad275e94e..4e364e10f 100644 --- a/lib/bls_signature.go +++ b/lib/bls_signature.go @@ -5,6 +5,7 @@ package lib import ( "bytes" "encoding/hex" + "errors" flowCrypto "github.com/onflow/flow-go/crypto" "io" "strings" @@ -44,6 +45,9 @@ type BLSPrivateKey struct { } func (blsPrivateKey *BLSPrivateKey) Sign(payloadBytes []byte) (*BLSSignature, error) { + if blsPrivateKey.PrivateKey == nil { + return nil, errors.New("BLSPrivateKey is nil") + } signature, err := blsPrivateKey.PrivateKey.Sign(payloadBytes, BLSHashingAlgorithm) if err != nil { return nil, err @@ -52,14 +56,23 @@ func (blsPrivateKey *BLSPrivateKey) Sign(payloadBytes []byte) (*BLSSignature, er } func (blsPrivateKey *BLSPrivateKey) PublicKey() *BLSPublicKey { + if blsPrivateKey.PrivateKey == nil { + return nil + } return &BLSPublicKey{PublicKey: blsPrivateKey.PrivateKey.PublicKey()} } func (blsPrivateKey *BLSPrivateKey) ToString() string { + if blsPrivateKey.PrivateKey == nil { + return "" + } return blsPrivateKey.PrivateKey.String() } func (blsPrivateKey *BLSPrivateKey) FromString(privateKeyString string) (*BLSPrivateKey, error) { + if privateKeyString == "" { + return nil, errors.New("empty BLSPrivateKey string provided") + } // Chop off leading 0x, if exists. Otherwise, does nothing. privateKeyStringCopy, _ := strings.CutPrefix(privateKeyString, "0x") // Convert from hex string to byte slice. @@ -73,6 +86,9 @@ func (blsPrivateKey *BLSPrivateKey) FromString(privateKeyString string) (*BLSPri } func (blsPrivateKey *BLSPrivateKey) Eq(other *BLSPrivateKey) bool { + if blsPrivateKey.PrivateKey == nil || other == nil { + return false + } return blsPrivateKey.PrivateKey.Equals(other.PrivateKey) } @@ -85,11 +101,18 @@ type BLSPublicKey struct { } func (blsPublicKey *BLSPublicKey) Verify(blsSignature *BLSSignature, input []byte) (bool, error) { + if blsPublicKey.PublicKey == nil { + return false, errors.New("BLSPublicKey is nil") + } return blsPublicKey.PublicKey.Verify(blsSignature.Signature, input, BLSHashingAlgorithm) } func (blsPublicKey *BLSPublicKey) ToBytes() []byte { - return EncodeByteArray(blsPublicKey.PublicKey.Encode()) + var publicKeyBytes []byte + if blsPublicKey.PublicKey != nil { + publicKeyBytes = blsPublicKey.PublicKey.Encode() + } + return EncodeByteArray(publicKeyBytes) } func (blsPublicKey *BLSPublicKey) FromBytes(publicKeyBytes []byte) (*BLSPublicKey, error) { @@ -106,10 +129,16 @@ func (blsPublicKey *BLSPublicKey) ReadBytes(rr io.Reader) (*BLSPublicKey, error) } func (blsPublicKey *BLSPublicKey) ToString() string { + if blsPublicKey.PublicKey == nil { + return "" + } return blsPublicKey.PublicKey.String() } func (blsPublicKey *BLSPublicKey) FromString(publicKeyString string) (*BLSPublicKey, error) { + if publicKeyString == "" { + return nil, errors.New("empty BLSPublicKey string provided") + } // Chop off leading 0x, if exists. Otherwise, does nothing. publicKeyStringCopy, _ := strings.CutPrefix(publicKeyString, "0x") // Convert from hex string to byte slice. @@ -123,6 +152,9 @@ func (blsPublicKey *BLSPublicKey) FromString(publicKeyString string) (*BLSPublic } func (blsPublicKey *BLSPublicKey) Eq(other *BLSPublicKey) bool { + if blsPublicKey.PublicKey == nil || other == nil { + return false + } return blsPublicKey.PublicKey.Equals(other.PublicKey) } @@ -141,7 +173,11 @@ type BLSSignature struct { } func (blsSignature *BLSSignature) ToBytes() []byte { - return EncodeByteArray(blsSignature.Signature.Bytes()) + var signatureBytes []byte + if blsSignature.Signature != nil { + signatureBytes = blsSignature.Signature.Bytes() + } + return EncodeByteArray(signatureBytes) } func (blsSignature *BLSSignature) FromBytes(signatureBytes []byte) (*BLSSignature, error) { @@ -158,10 +194,16 @@ func (blsSignature *BLSSignature) ReadBytes(rr io.Reader) (*BLSSignature, error) } func (blsSignature *BLSSignature) ToString() string { + if blsSignature.Signature == nil { + return "" + } return blsSignature.Signature.String() } func (blsSignature *BLSSignature) FromString(signatureString string) (*BLSSignature, error) { + if signatureString == "" { + return nil, errors.New("empty BLSSignature string provided") + } // Chop off leading 0x, if exists. Otherwise, does nothing. signatureStringCopy, _ := strings.CutPrefix(signatureString, "0x") // Convert from hex string to byte slice. @@ -175,10 +217,16 @@ func (blsSignature *BLSSignature) FromString(signatureString string) (*BLSSignat } func (blsSignature *BLSSignature) Eq(other *BLSSignature) bool { + if blsSignature.Signature == nil || other == nil { + return false + } return bytes.Equal(blsSignature.ToBytes(), other.ToBytes()) } func (blsSignature *BLSSignature) Copy() *BLSSignature { + if blsSignature.Signature == nil { + return &BLSSignature{} + } return &BLSSignature{ Signature: append([]byte{}, blsSignature.Signature.Bytes()...), } diff --git a/lib/bls_signature_test.go b/lib/bls_signature_test.go index 9a980ad78..6db5d1443 100644 --- a/lib/bls_signature_test.go +++ b/lib/bls_signature_test.go @@ -9,23 +9,12 @@ import ( "testing" ) -func NewBLSPrivateKey() (*BLSPrivateKey, error) { - // This is a helper util for generating a random BLSPrivateKey. - privateKey, err := flowCrypto.GeneratePrivateKey(BLSSigningAlgorithm, RandomBytes(64)) - if err != nil { - return nil, err - } - return &BLSPrivateKey{PrivateKey: privateKey}, nil -} - func TestVerifyingBLSSignatures(t *testing.T) { // Generate two BLS public/private key pairs. - blsPrivateKey1, err := NewBLSPrivateKey() - require.NoError(t, err) + blsPrivateKey1 := _generateRandomBLSPrivateKey(t) blsPublicKey1 := blsPrivateKey1.PublicKey() - blsPrivateKey2, err := NewBLSPrivateKey() - require.NoError(t, err) + blsPrivateKey2 := _generateRandomBLSPrivateKey(t) blsPublicKey2 := blsPrivateKey2.PublicKey() // Test BLSPrivateKey.Sign() and BLSPublicKey.Verify(). @@ -145,4 +134,78 @@ func TestVerifyingBLSSignatures(t *testing.T) { // Test BLSSignature.Copy(). blsSignature1Copy := blsSignature1.Copy() require.True(t, blsSignature1.Eq(blsSignature1Copy)) + + // Test nil BLSPrivateKey edge cases. + // Sign() + _, err = (&BLSPrivateKey{}).Sign(randomPayload1) + require.Error(t, err) + require.Contains(t, err.Error(), "BLSPrivateKey is nil") + // PublicKey() + require.Nil(t, (&BLSPrivateKey{}).PublicKey()) + // ToString() + require.Equal(t, (&BLSPrivateKey{}).ToString(), "") + // FromString() + _, err = (&BLSPrivateKey{}).FromString("") + require.Error(t, err) + require.Contains(t, err.Error(), "empty BLSPrivateKey string provided") + // Eq() + require.False(t, (&BLSPrivateKey{}).Eq(nil)) + require.False(t, (&BLSPrivateKey{}).Eq(&BLSPrivateKey{})) + require.False(t, (&BLSPrivateKey{}).Eq(_generateRandomBLSPrivateKey(t))) + require.False(t, _generateRandomBLSPrivateKey(t).Eq(nil)) + require.False(t, _generateRandomBLSPrivateKey(t).Eq(&BLSPrivateKey{})) + require.False(t, _generateRandomBLSPrivateKey(t).Eq(_generateRandomBLSPrivateKey(t))) + + // Test nil BLSPublicKey edge cases. + // Verify() + _, err = (&BLSPublicKey{}).Verify(blsSignature1, randomPayload1) + require.Error(t, err) + require.Contains(t, err.Error(), "BLSPublicKey is nil") + // ToBytes() + require.True(t, bytes.Equal((&BLSPublicKey{}).ToBytes(), EncodeByteArray([]byte{}))) + // FromBytes() + _, err = (&BLSPublicKey{}).FromBytes([]byte{}) + require.Error(t, err) + // ToString() + require.Equal(t, (&BLSPublicKey{}).ToString(), "") + // FromString() + _, err = (&BLSPublicKey{}).FromString("") + require.Error(t, err) + require.Contains(t, err.Error(), "empty BLSPublicKey string provided") + // Eq() + require.False(t, (&BLSPublicKey{}).Eq(nil)) + require.False(t, (&BLSPublicKey{}).Eq(&BLSPublicKey{})) + require.False(t, (&BLSPublicKey{}).Eq(_generateRandomBLSPrivateKey(t).PublicKey())) + require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq(nil)) + require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq((&BLSPrivateKey{}).PublicKey())) + require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq(_generateRandomBLSPrivateKey(t).PublicKey())) + // Copy() + require.Nil(t, (&BLSPublicKey{}).Copy().PublicKey) + + // Test nil BLSSignature edge cases. + // ToBytes() + require.True(t, bytes.Equal((&BLSSignature{}).ToBytes(), EncodeByteArray([]byte{}))) + // FromBytes() + _, err = (&BLSSignature{}).FromBytes([]byte{}) + require.Error(t, err) + // ToString() + require.Equal(t, (&BLSSignature{}).ToString(), "") + // FromString() + _, err = (&BLSSignature{}).FromString("") + require.Error(t, err) + require.Contains(t, err.Error(), "empty BLSSignature string provided") + // Eq() + require.False(t, (&BLSSignature{}).Eq(nil)) + require.False(t, (&BLSSignature{}).Eq(&BLSSignature{})) + require.False(t, (&BLSSignature{}).Eq(blsSignature1)) + require.False(t, blsSignature1.Eq(nil)) + require.False(t, blsSignature1.Eq(&BLSSignature{})) + // Copy() + require.Nil(t, (&BLSSignature{}).Copy().Signature) +} + +func _generateRandomBLSPrivateKey(t *testing.T) *BLSPrivateKey { + privateKey, err := flowCrypto.GeneratePrivateKey(BLSSigningAlgorithm, RandomBytes(64)) + require.NoError(t, err) + return &BLSPrivateKey{PrivateKey: privateKey} } From 17ef0ec114f6e472d092443612868758e6c9ca40 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 8 May 2023 14:38:47 -0400 Subject: [PATCH 022/762] Move BLS utils to own package. --- bls/signature.go | 223 +++++++++++++++++++++++++++ bls/signature_no_relic.go | 113 ++++++++++++++ bls/signature_no_relic_test.go | 14 ++ bls/signature_test.go | 221 +++++++++++++++++++++++++++ lib/bls_signature.go | 233 ----------------------------- lib/bls_signature_no_relic.go | 122 --------------- lib/bls_signature_no_relic_test.go | 14 -- lib/bls_signature_test.go | 211 -------------------------- test.Dockerfile | 7 +- 9 files changed, 575 insertions(+), 583 deletions(-) create mode 100644 bls/signature.go create mode 100644 bls/signature_no_relic.go create mode 100644 bls/signature_no_relic_test.go create mode 100644 bls/signature_test.go delete mode 100644 lib/bls_signature.go delete mode 100644 lib/bls_signature_no_relic.go delete mode 100644 lib/bls_signature_no_relic_test.go delete mode 100644 lib/bls_signature_test.go diff --git a/bls/signature.go b/bls/signature.go new file mode 100644 index 000000000..9a7038cd0 --- /dev/null +++ b/bls/signature.go @@ -0,0 +1,223 @@ +//go:build relic + +package bls + +import ( + "bytes" + "encoding/hex" + "errors" + flowCrypto "github.com/onflow/flow-go/crypto" + "strings" +) + +const SigningAlgorithm = flowCrypto.BLSBLS12381 + +// TODO: what should the domainTag param be? +var HashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") + +func AggregateSignatures(blsSignatures []*Signature) (*Signature, error) { + var signatures []flowCrypto.Signature + for _, blsSignature := range blsSignatures { + signatures = append(signatures, blsSignature.Signature) + } + aggregateSignature, err := flowCrypto.AggregateBLSSignatures(signatures) + if err != nil { + return nil, err + } + return &Signature{Signature: aggregateSignature}, nil +} + +func VerifyAggregateSignature(blsPublicKeys []*PublicKey, blsSignature *Signature, payloadBytes []byte) (bool, error) { + var publicKeys []flowCrypto.PublicKey + for _, blsPublicKey := range blsPublicKeys { + publicKeys = append(publicKeys, blsPublicKey.PublicKey) + } + return flowCrypto.VerifyBLSSignatureOneMessage(publicKeys, blsSignature.Signature, payloadBytes, HashingAlgorithm) +} + +// +// TYPES: PrivateKey +// + +type PrivateKey struct { + PrivateKey flowCrypto.PrivateKey +} + +func (blsPrivateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { + if blsPrivateKey.PrivateKey == nil { + return nil, errors.New("bls.PrivateKey is nil") + } + signature, err := blsPrivateKey.PrivateKey.Sign(payloadBytes, HashingAlgorithm) + if err != nil { + return nil, err + } + return &Signature{Signature: signature}, nil +} + +func (blsPrivateKey *PrivateKey) PublicKey() *PublicKey { + if blsPrivateKey.PrivateKey == nil { + return nil + } + return &PublicKey{PublicKey: blsPrivateKey.PrivateKey.PublicKey()} +} + +func (blsPrivateKey *PrivateKey) ToString() string { + if blsPrivateKey.PrivateKey == nil { + return "" + } + return blsPrivateKey.PrivateKey.String() +} + +func (blsPrivateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { + if privateKeyString == "" { + return nil, errors.New("empty bls.PrivateKey string provided") + } + // Chop off leading 0x, if exists. Otherwise, does nothing. + privateKeyStringCopy, _ := strings.CutPrefix(privateKeyString, "0x") + // Convert from hex string to byte slice. + privateKeyBytes, err := hex.DecodeString(privateKeyStringCopy) + if err != nil { + return nil, err + } + // Convert from byte slice to bls.PrivateKey. + blsPrivateKey.PrivateKey, err = flowCrypto.DecodePrivateKey(SigningAlgorithm, privateKeyBytes) + return blsPrivateKey, err +} + +func (blsPrivateKey *PrivateKey) Eq(other *PrivateKey) bool { + if blsPrivateKey.PrivateKey == nil || other == nil { + return false + } + return blsPrivateKey.PrivateKey.Equals(other.PrivateKey) +} + +// +// TYPES: PublicKey +// + +type PublicKey struct { + PublicKey flowCrypto.PublicKey +} + +func (blsPublicKey *PublicKey) Verify(blsSignature *Signature, input []byte) (bool, error) { + if blsPublicKey.PublicKey == nil { + return false, errors.New("bls.PublicKey is nil") + } + return blsPublicKey.PublicKey.Verify(blsSignature.Signature, input, HashingAlgorithm) +} + +func (blsPublicKey *PublicKey) ToBytes() []byte { + var publicKeyBytes []byte + if blsPublicKey.PublicKey != nil { + publicKeyBytes = blsPublicKey.PublicKey.Encode() + } + return publicKeyBytes +} + +func (blsPublicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { + if len(publicKeyBytes) == 0 { + return nil, errors.New("empty bls.PublicKey bytes provided") + } + var err error + blsPublicKey.PublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) + return blsPublicKey, err +} + +func (blsPublicKey *PublicKey) ToString() string { + if blsPublicKey.PublicKey == nil { + return "" + } + return blsPublicKey.PublicKey.String() +} + +func (blsPublicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { + if publicKeyString == "" { + return nil, errors.New("empty bls.PublicKey string provided") + } + // Chop off leading 0x, if exists. Otherwise, does nothing. + publicKeyStringCopy, _ := strings.CutPrefix(publicKeyString, "0x") + // Convert from hex string to byte slice. + publicKeyBytes, err := hex.DecodeString(publicKeyStringCopy) + if err != nil { + return nil, err + } + // Convert from byte slice to bls.PublicKey. + blsPublicKey.PublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) + return blsPublicKey, err +} + +func (blsPublicKey *PublicKey) Eq(other *PublicKey) bool { + if blsPublicKey.PublicKey == nil || other == nil { + return false + } + return blsPublicKey.PublicKey.Equals(other.PublicKey) +} + +func (blsPublicKey *PublicKey) Copy() *PublicKey { + return &PublicKey{ + PublicKey: blsPublicKey.PublicKey, + } +} + +// +// TYPES: Signature +// + +type Signature struct { + Signature flowCrypto.Signature +} + +func (blsSignature *Signature) ToBytes() []byte { + var signatureBytes []byte + if blsSignature.Signature != nil { + signatureBytes = blsSignature.Signature.Bytes() + } + return signatureBytes +} + +func (blsSignature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { + if len(signatureBytes) == 0 { + return nil, errors.New("empty bls.Signature bytes provided") + } + blsSignature.Signature = signatureBytes + return blsSignature, nil +} + +func (blsSignature *Signature) ToString() string { + if blsSignature.Signature == nil { + return "" + } + return blsSignature.Signature.String() +} + +func (blsSignature *Signature) FromString(signatureString string) (*Signature, error) { + if signatureString == "" { + return nil, errors.New("empty bls.Signature string provided") + } + // Chop off leading 0x, if exists. Otherwise, does nothing. + signatureStringCopy, _ := strings.CutPrefix(signatureString, "0x") + // Convert from hex string to byte slice. + signatureBytes, err := hex.DecodeString(signatureStringCopy) + if err != nil { + return nil, err + } + // Convert from byte slice to bls.Signature. + blsSignature.Signature = signatureBytes + return blsSignature, nil +} + +func (blsSignature *Signature) Eq(other *Signature) bool { + if blsSignature.Signature == nil || other == nil { + return false + } + return bytes.Equal(blsSignature.ToBytes(), other.ToBytes()) +} + +func (blsSignature *Signature) Copy() *Signature { + if blsSignature.Signature == nil { + return &Signature{} + } + return &Signature{ + Signature: append([]byte{}, blsSignature.Signature.Bytes()...), + } +} diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go new file mode 100644 index 000000000..47f89ab43 --- /dev/null +++ b/bls/signature_no_relic.go @@ -0,0 +1,113 @@ +//go:build !relic + +package bls + +import ( + flowCrypto "github.com/onflow/flow-go/crypto" +) + +const BLSNoRelicError = "BLS keys can't be used without Relic installed" + +func AggregateSignatures(blsSignatures []*Signature) (*Signature, error) { + panic(BLSNoRelicError) +} + +func VerifyAggregateSignature(blsPublicKeys []*PublicKey, blsSignature *Signature, payloadBytes []byte) (bool, error) { + panic(BLSNoRelicError) +} + +// +// TYPES: PrivateKey +// + +type PrivateKey struct { + PrivateKey flowCrypto.PrivateKey +} + +func (blsPrivateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { + panic(BLSNoRelicError) +} + +func (blsPrivateKey *PrivateKey) PublicKey() *PublicKey { + panic(BLSNoRelicError) +} + +func (blsPrivateKey *PrivateKey) ToString() string { + panic(BLSNoRelicError) +} + +func (blsPrivateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { + panic(BLSNoRelicError) +} + +func (blsPrivateKey *PrivateKey) Eq(other *PrivateKey) bool { + panic(BLSNoRelicError) +} + +// +// TYPES: PublicKey +// + +type PublicKey struct { + PublicKey flowCrypto.PublicKey +} + +func (blsPublicKey *PublicKey) Verify(blsSignature *Signature, input []byte) (bool, error) { + panic(BLSNoRelicError) +} + +func (blsPublicKey *PublicKey) ToBytes() []byte { + panic(BLSNoRelicError) +} + +func (blsPublicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { + panic(BLSNoRelicError) +} + +func (blsPublicKey *PublicKey) ToString() string { + panic(BLSNoRelicError) +} + +func (blsPublicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { + panic(BLSNoRelicError) +} + +func (blsPublicKey *PublicKey) Eq(other *PublicKey) bool { + panic(BLSNoRelicError) +} + +func (blsPublicKey *PublicKey) Copy() *PublicKey { + panic(BLSNoRelicError) +} + +// +// TYPES: Signature +// + +type Signature struct { + Signature flowCrypto.Signature +} + +func (blsSignature *Signature) ToBytes() []byte { + panic(BLSNoRelicError) +} + +func (blsSignature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { + panic(BLSNoRelicError) +} + +func (blsSignature *Signature) ToString() string { + panic(BLSNoRelicError) +} + +func (blsSignature *Signature) FromString(signatureString string) (*Signature, error) { + panic(BLSNoRelicError) +} + +func (blsSignature *Signature) Eq(other *Signature) bool { + panic(BLSNoRelicError) +} + +func (blsSignature *Signature) Copy() *Signature { + panic(BLSNoRelicError) +} diff --git a/bls/signature_no_relic_test.go b/bls/signature_no_relic_test.go new file mode 100644 index 000000000..85faa70a4 --- /dev/null +++ b/bls/signature_no_relic_test.go @@ -0,0 +1,14 @@ +//go:build !relic + +package bls + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestVerifyingBLSSignatures(t *testing.T) { + assert.Panics(t, func() { (&PrivateKey{}).FromString("") }) + assert.Panics(t, func() { (&PublicKey{}).FromString("") }) + assert.Panics(t, func() { (&Signature{}).FromString("") }) +} diff --git a/bls/signature_test.go b/bls/signature_test.go new file mode 100644 index 000000000..7b605babe --- /dev/null +++ b/bls/signature_test.go @@ -0,0 +1,221 @@ +//go:build relic + +package bls + +import ( + "bytes" + "crypto/rand" + flowCrypto "github.com/onflow/flow-go/crypto" + "github.com/stretchr/testify/require" + "testing" +) + +func TestVerifyingBLSSignatures(t *testing.T) { + // Generate two BLS public/private key pairs. + blsPrivateKey1 := _generateRandomBLSPrivateKey(t) + blsPublicKey1 := blsPrivateKey1.PublicKey() + + blsPrivateKey2 := _generateRandomBLSPrivateKey(t) + blsPublicKey2 := blsPrivateKey2.PublicKey() + + // Test bls.PrivateKey.Sign() and bls.PublicKey.Verify(). + // 1. PrivateKey1 signs a random payload. + randomPayload1 := _generateRandomBytes(t, 256) + blsSignature1, err := blsPrivateKey1.Sign(randomPayload1) + require.NoError(t, err) + // 2. Verify bls.PublicKey1 is the signer. + isVerified, err := blsPublicKey1.Verify(blsSignature1, randomPayload1) + require.NoError(t, err) + require.True(t, isVerified) + // 3. Verify bls.PublicKey2 is not the signer. + isVerified, err = blsPublicKey2.Verify(blsSignature1, randomPayload1) + require.NoError(t, err) + require.False(t, isVerified) + + // 4. PrivateKey2 signs a different random payload. + randomPayload2 := _generateRandomBytes(t, 256) + blsSignature2, err := blsPrivateKey2.Sign(randomPayload2) + require.NoError(t, err) + // 5. Verify bls.PublicKey1 is not the signer. + isVerified, err = blsPublicKey1.Verify(blsSignature2, randomPayload2) + require.NoError(t, err) + require.False(t, isVerified) + // 6. Verify bls.PublicKey2 is the signer. + isVerified, err = blsPublicKey2.Verify(blsSignature2, randomPayload2) + require.NoError(t, err) + require.True(t, isVerified) + + // Test AggregateSignatures() and VerifyAggregateSignature(). + // 1. PrivateKey1 signs a random payload. + randomPayload3 := _generateRandomBytes(t, 256) + blsSignature1, err = blsPrivateKey1.Sign(randomPayload3) + require.NoError(t, err) + // 2. PrivateKey2 signs the same random payload. + blsSignature2, err = blsPrivateKey2.Sign(randomPayload3) + require.NoError(t, err) + // 3. Aggregate their signatures. + aggregateSignature, err := AggregateSignatures([]*Signature{blsSignature1, blsSignature2}) + require.NoError(t, err) + // 4. Verify the AggregateSignature. + isVerified, err = VerifyAggregateSignature( + []*PublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, randomPayload3, + ) + require.NoError(t, err) + require.True(t, isVerified) + // 5. Verify PrivateKey1's signature doesn't work on its own. + isVerified, err = VerifyAggregateSignature([]*PublicKey{blsPublicKey1}, aggregateSignature, randomPayload3) + require.NoError(t, err) + require.False(t, isVerified) + // 6. Verify PrivateKey2's signature doesn't work on its own. + isVerified, err = VerifyAggregateSignature([]*PublicKey{blsPublicKey2}, aggregateSignature, randomPayload3) + require.NoError(t, err) + require.False(t, isVerified) + // 7. Verify the AggregateSignature doesn't work on a different payload. + isVerified, err = VerifyAggregateSignature( + []*PublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, randomPayload1, + ) + require.NoError(t, err) + require.False(t, isVerified) + + // Test bls.PrivateKey.Eq(). + require.True(t, blsPrivateKey1.Eq(blsPrivateKey1)) + require.True(t, blsPrivateKey2.Eq(blsPrivateKey2)) + require.False(t, blsPrivateKey1.Eq(blsPrivateKey2)) + + // Test bls.PrivateKey.ToString() and bls.PrivateKey.FromString(). + blsPrivateKeyString := blsPrivateKey1.ToString() + copyBLSPrivateKey1, err := (&PrivateKey{}).FromString(blsPrivateKeyString) + require.NoError(t, err) + require.True(t, blsPrivateKey1.Eq(copyBLSPrivateKey1)) + + // Test bls.PublicKey.Eq(). + require.True(t, blsPublicKey1.Eq(blsPublicKey1)) + require.True(t, blsPublicKey2.Eq(blsPublicKey2)) + require.False(t, blsPublicKey1.Eq(blsPublicKey2)) + + // Test bls.PublicKey.ToBytes() and bls.PublicKey.FromBytes(). + blsPublicKeyBytes := blsPublicKey1.ToBytes() + copyBLSPublicKey1, err := (&PublicKey{}).FromBytes(blsPublicKeyBytes) + require.NoError(t, err) + require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) + + // Test bls.PublicKey.ToString() and bls.PublicKey.FromString(). + blsPublicKeyString := blsPublicKey1.ToString() + copyBLSPublicKey1, err = (&PublicKey{}).FromString(blsPublicKeyString) + require.NoError(t, err) + require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) + + // Test bls.Signature.Eq(). + require.True(t, blsSignature1.Eq(blsSignature1)) + require.True(t, blsSignature2.Eq(blsSignature2)) + require.False(t, blsSignature1.Eq(blsSignature2)) + + // Test bls.Signature.ToBytes() and bls.Signature.FromBytes(). + blsSignatureBytes := blsSignature1.ToBytes() + copyBLSSignature, err := (&Signature{}).FromBytes(blsSignatureBytes) + require.NoError(t, err) + require.True(t, blsSignature1.Eq(copyBLSSignature)) + + // Test bls.Signature.ToString() and bls.Signature.FromString(). + blsSignatureString := blsSignature1.ToString() + copyBLSSignature, err = (&Signature{}).FromString(blsSignatureString) + require.NoError(t, err) + require.True(t, blsSignature1.Eq(copyBLSSignature)) + + // Test bls.PublicKey.Copy(). + blsPublicKey1Copy := blsPublicKey1.Copy() + require.True(t, blsPublicKey1.Eq(blsPublicKey1Copy)) + + // Test bls.Signature.Copy(). + blsSignature1Copy := blsSignature1.Copy() + require.True(t, blsSignature1.Eq(blsSignature1Copy)) + + // Test nil bls.PrivateKey edge cases. + // Sign() + _, err = (&PrivateKey{}).Sign(randomPayload1) + require.Error(t, err) + require.Contains(t, err.Error(), "bls.PrivateKey is nil") + // PublicKey() + require.Nil(t, (&PrivateKey{}).PublicKey()) + // ToString() + require.Equal(t, (&PrivateKey{}).ToString(), "") + // FromString() + _, err = (&PrivateKey{}).FromString("") + require.Error(t, err) + require.Contains(t, err.Error(), "empty bls.PrivateKey string provided") + // Eq() + require.False(t, (&PrivateKey{}).Eq(nil)) + require.False(t, (&PrivateKey{}).Eq(&PrivateKey{})) + require.False(t, (&PrivateKey{}).Eq(_generateRandomBLSPrivateKey(t))) + require.False(t, _generateRandomBLSPrivateKey(t).Eq(nil)) + require.False(t, _generateRandomBLSPrivateKey(t).Eq(&PrivateKey{})) + require.False(t, _generateRandomBLSPrivateKey(t).Eq(_generateRandomBLSPrivateKey(t))) + + // Test nil bls.PublicKey edge cases. + // Verify() + _, err = (&PublicKey{}).Verify(blsSignature1, randomPayload1) + require.Error(t, err) + require.Contains(t, err.Error(), "bls.PublicKey is nil") + // ToBytes() + require.True(t, bytes.Equal((&PublicKey{}).ToBytes(), []byte{})) + // FromBytes() + _, err = (&PublicKey{}).FromBytes(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "empty bls.PublicKey bytes provided") + _, err = (&PublicKey{}).FromBytes([]byte{}) + require.Error(t, err) + require.Contains(t, err.Error(), "empty bls.PublicKey bytes provided") + // ToString() + require.Equal(t, (&PublicKey{}).ToString(), "") + // FromString() + _, err = (&PublicKey{}).FromString("") + require.Error(t, err) + require.Contains(t, err.Error(), "empty bls.PublicKey string provided") + // Eq() + require.False(t, (&PublicKey{}).Eq(nil)) + require.False(t, (&PublicKey{}).Eq(&PublicKey{})) + require.False(t, (&PublicKey{}).Eq(_generateRandomBLSPrivateKey(t).PublicKey())) + require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq(nil)) + require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq((&PrivateKey{}).PublicKey())) + require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq(_generateRandomBLSPrivateKey(t).PublicKey())) + // Copy() + require.Nil(t, (&PublicKey{}).Copy().PublicKey) + + // Test nil bls.Signature edge cases. + // ToBytes() + require.True(t, bytes.Equal((&Signature{}).ToBytes(), []byte{})) + // FromBytes() + _, err = (&Signature{}).FromBytes(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "empty bls.Signature bytes provided") + _, err = (&Signature{}).FromBytes([]byte{}) + require.Error(t, err) + require.Contains(t, err.Error(), "empty bls.Signature bytes provided") + // ToString() + require.Equal(t, (&Signature{}).ToString(), "") + // FromString() + _, err = (&Signature{}).FromString("") + require.Error(t, err) + require.Contains(t, err.Error(), "empty bls.Signature string provided") + // Eq() + require.False(t, (&Signature{}).Eq(nil)) + require.False(t, (&Signature{}).Eq(&Signature{})) + require.False(t, (&Signature{}).Eq(blsSignature1)) + require.False(t, blsSignature1.Eq(nil)) + require.False(t, blsSignature1.Eq(&Signature{})) + // Copy() + require.Nil(t, (&Signature{}).Copy().Signature) +} + +func _generateRandomBLSPrivateKey(t *testing.T) *PrivateKey { + privateKey, err := flowCrypto.GeneratePrivateKey(SigningAlgorithm, _generateRandomBytes(t, 64)) + require.NoError(t, err) + return &PrivateKey{PrivateKey: privateKey} +} + +func _generateRandomBytes(t *testing.T, numBytes int) []byte { + randomBytes := make([]byte, 64) + _, err := rand.Read(randomBytes) + require.NoError(t, err) + return randomBytes +} diff --git a/lib/bls_signature.go b/lib/bls_signature.go deleted file mode 100644 index 4e364e10f..000000000 --- a/lib/bls_signature.go +++ /dev/null @@ -1,233 +0,0 @@ -//go:build relic - -package lib - -import ( - "bytes" - "encoding/hex" - "errors" - flowCrypto "github.com/onflow/flow-go/crypto" - "io" - "strings" -) - -const BLSSigningAlgorithm = flowCrypto.BLSBLS12381 - -// TODO: what should the domainTag param be? -var BLSHashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") - -func AggregateBLSSignatures(blsSignatures []*BLSSignature) (*BLSSignature, error) { - var signatures []flowCrypto.Signature - for _, blsSignature := range blsSignatures { - signatures = append(signatures, blsSignature.Signature) - } - aggregateSignature, err := flowCrypto.AggregateBLSSignatures(signatures) - if err != nil { - return nil, err - } - return &BLSSignature{Signature: aggregateSignature}, nil -} - -func VerifyAggregateBLSSignature(blsPublicKeys []*BLSPublicKey, blsSignature *BLSSignature, payloadBytes []byte) (bool, error) { - var publicKeys []flowCrypto.PublicKey - for _, blsPublicKey := range blsPublicKeys { - publicKeys = append(publicKeys, blsPublicKey.PublicKey) - } - return flowCrypto.VerifyBLSSignatureOneMessage(publicKeys, blsSignature.Signature, payloadBytes, BLSHashingAlgorithm) -} - -// -// TYPES: BLSPrivateKey -// - -type BLSPrivateKey struct { - PrivateKey flowCrypto.PrivateKey -} - -func (blsPrivateKey *BLSPrivateKey) Sign(payloadBytes []byte) (*BLSSignature, error) { - if blsPrivateKey.PrivateKey == nil { - return nil, errors.New("BLSPrivateKey is nil") - } - signature, err := blsPrivateKey.PrivateKey.Sign(payloadBytes, BLSHashingAlgorithm) - if err != nil { - return nil, err - } - return &BLSSignature{Signature: signature}, nil -} - -func (blsPrivateKey *BLSPrivateKey) PublicKey() *BLSPublicKey { - if blsPrivateKey.PrivateKey == nil { - return nil - } - return &BLSPublicKey{PublicKey: blsPrivateKey.PrivateKey.PublicKey()} -} - -func (blsPrivateKey *BLSPrivateKey) ToString() string { - if blsPrivateKey.PrivateKey == nil { - return "" - } - return blsPrivateKey.PrivateKey.String() -} - -func (blsPrivateKey *BLSPrivateKey) FromString(privateKeyString string) (*BLSPrivateKey, error) { - if privateKeyString == "" { - return nil, errors.New("empty BLSPrivateKey string provided") - } - // Chop off leading 0x, if exists. Otherwise, does nothing. - privateKeyStringCopy, _ := strings.CutPrefix(privateKeyString, "0x") - // Convert from hex string to byte slice. - privateKeyBytes, err := hex.DecodeString(privateKeyStringCopy) - if err != nil { - return nil, err - } - // Convert from byte slice to BLSPrivateKey. - blsPrivateKey.PrivateKey, err = flowCrypto.DecodePrivateKey(BLSSigningAlgorithm, privateKeyBytes) - return blsPrivateKey, err -} - -func (blsPrivateKey *BLSPrivateKey) Eq(other *BLSPrivateKey) bool { - if blsPrivateKey.PrivateKey == nil || other == nil { - return false - } - return blsPrivateKey.PrivateKey.Equals(other.PrivateKey) -} - -// -// TYPES: BLSPublicKey -// - -type BLSPublicKey struct { - PublicKey flowCrypto.PublicKey -} - -func (blsPublicKey *BLSPublicKey) Verify(blsSignature *BLSSignature, input []byte) (bool, error) { - if blsPublicKey.PublicKey == nil { - return false, errors.New("BLSPublicKey is nil") - } - return blsPublicKey.PublicKey.Verify(blsSignature.Signature, input, BLSHashingAlgorithm) -} - -func (blsPublicKey *BLSPublicKey) ToBytes() []byte { - var publicKeyBytes []byte - if blsPublicKey.PublicKey != nil { - publicKeyBytes = blsPublicKey.PublicKey.Encode() - } - return EncodeByteArray(publicKeyBytes) -} - -func (blsPublicKey *BLSPublicKey) FromBytes(publicKeyBytes []byte) (*BLSPublicKey, error) { - return blsPublicKey.ReadBytes(bytes.NewReader(publicKeyBytes)) -} - -func (blsPublicKey *BLSPublicKey) ReadBytes(rr io.Reader) (*BLSPublicKey, error) { - publicKeyBytes, err := DecodeByteArray(rr) - if err != nil { - return nil, err - } - blsPublicKey.PublicKey, err = flowCrypto.DecodePublicKey(BLSSigningAlgorithm, publicKeyBytes) - return blsPublicKey, err -} - -func (blsPublicKey *BLSPublicKey) ToString() string { - if blsPublicKey.PublicKey == nil { - return "" - } - return blsPublicKey.PublicKey.String() -} - -func (blsPublicKey *BLSPublicKey) FromString(publicKeyString string) (*BLSPublicKey, error) { - if publicKeyString == "" { - return nil, errors.New("empty BLSPublicKey string provided") - } - // Chop off leading 0x, if exists. Otherwise, does nothing. - publicKeyStringCopy, _ := strings.CutPrefix(publicKeyString, "0x") - // Convert from hex string to byte slice. - publicKeyBytes, err := hex.DecodeString(publicKeyStringCopy) - if err != nil { - return nil, err - } - // Convert from byte slice to BLSPublicKey. - blsPublicKey.PublicKey, err = flowCrypto.DecodePublicKey(BLSSigningAlgorithm, publicKeyBytes) - return blsPublicKey, err -} - -func (blsPublicKey *BLSPublicKey) Eq(other *BLSPublicKey) bool { - if blsPublicKey.PublicKey == nil || other == nil { - return false - } - return blsPublicKey.PublicKey.Equals(other.PublicKey) -} - -func (blsPublicKey *BLSPublicKey) Copy() *BLSPublicKey { - return &BLSPublicKey{ - PublicKey: blsPublicKey.PublicKey, - } -} - -// -// TYPES: BLSSignature -// - -type BLSSignature struct { - Signature flowCrypto.Signature -} - -func (blsSignature *BLSSignature) ToBytes() []byte { - var signatureBytes []byte - if blsSignature.Signature != nil { - signatureBytes = blsSignature.Signature.Bytes() - } - return EncodeByteArray(signatureBytes) -} - -func (blsSignature *BLSSignature) FromBytes(signatureBytes []byte) (*BLSSignature, error) { - return blsSignature.ReadBytes(bytes.NewReader(signatureBytes)) -} - -func (blsSignature *BLSSignature) ReadBytes(rr io.Reader) (*BLSSignature, error) { - signatureBytes, err := DecodeByteArray(rr) - if err != nil { - return nil, err - } - blsSignature.Signature = signatureBytes - return blsSignature, nil -} - -func (blsSignature *BLSSignature) ToString() string { - if blsSignature.Signature == nil { - return "" - } - return blsSignature.Signature.String() -} - -func (blsSignature *BLSSignature) FromString(signatureString string) (*BLSSignature, error) { - if signatureString == "" { - return nil, errors.New("empty BLSSignature string provided") - } - // Chop off leading 0x, if exists. Otherwise, does nothing. - signatureStringCopy, _ := strings.CutPrefix(signatureString, "0x") - // Convert from hex string to byte slice. - signatureBytes, err := hex.DecodeString(signatureStringCopy) - if err != nil { - return nil, err - } - // Convert from byte slice to BLSSignature. - blsSignature.Signature = signatureBytes - return blsSignature, nil -} - -func (blsSignature *BLSSignature) Eq(other *BLSSignature) bool { - if blsSignature.Signature == nil || other == nil { - return false - } - return bytes.Equal(blsSignature.ToBytes(), other.ToBytes()) -} - -func (blsSignature *BLSSignature) Copy() *BLSSignature { - if blsSignature.Signature == nil { - return &BLSSignature{} - } - return &BLSSignature{ - Signature: append([]byte{}, blsSignature.Signature.Bytes()...), - } -} diff --git a/lib/bls_signature_no_relic.go b/lib/bls_signature_no_relic.go deleted file mode 100644 index 1340e1ab1..000000000 --- a/lib/bls_signature_no_relic.go +++ /dev/null @@ -1,122 +0,0 @@ -//go:build !relic - -package lib - -import ( - flowCrypto "github.com/onflow/flow-go/crypto" - "io" -) - -const BLSNoRelicError = "BLS keys can't be used without Relic installed" - -func AggregateBLSSignatures(blsSignatures []*BLSSignature) (*BLSSignature, error) { - panic(BLSNoRelicError) -} - -func VerifyAggregateBLSSignature(blsPublicKeys []*BLSPublicKey, blsSignature *BLSSignature, payloadBytes []byte) (bool, error) { - panic(BLSNoRelicError) -} - -// -// TYPES: BLSPrivateKey -// - -type BLSPrivateKey struct { - PrivateKey flowCrypto.PrivateKey -} - -func (blsPrivateKey *BLSPrivateKey) Sign(payloadBytes []byte) (*BLSSignature, error) { - panic(BLSNoRelicError) -} - -func (blsPrivateKey *BLSPrivateKey) PublicKey() *BLSPublicKey { - panic(BLSNoRelicError) -} - -func (blsPrivateKey *BLSPrivateKey) ToString() string { - panic(BLSNoRelicError) -} - -func (blsPrivateKey *BLSPrivateKey) FromString(privateKeyString string) (*BLSPrivateKey, error) { - panic(BLSNoRelicError) -} - -func (blsPrivateKey *BLSPrivateKey) Eq(other *BLSPrivateKey) bool { - panic(BLSNoRelicError) -} - -// -// TYPES: BLSPublicKey -// - -type BLSPublicKey struct { - PublicKey flowCrypto.PublicKey -} - -func (blsPublicKey *BLSPublicKey) Verify(blsSignature *BLSSignature, input []byte) (bool, error) { - panic(BLSNoRelicError) -} - -func (blsPublicKey *BLSPublicKey) ToBytes() []byte { - panic(BLSNoRelicError) -} - -func (blsPublicKey *BLSPublicKey) FromBytes(publicKeyBytes []byte) (*BLSPublicKey, error) { - panic(BLSNoRelicError) -} - -func (blsPublicKey *BLSPublicKey) ReadBytes(rr io.Reader) (*BLSPublicKey, error) { - panic(BLSNoRelicError) -} - -func (blsPublicKey *BLSPublicKey) ToString() string { - panic(BLSNoRelicError) -} - -func (blsPublicKey *BLSPublicKey) FromString(publicKeyString string) (*BLSPublicKey, error) { - panic(BLSNoRelicError) -} - -func (blsPublicKey *BLSPublicKey) Eq(other *BLSPublicKey) bool { - panic(BLSNoRelicError) -} - -func (blsPublicKey *BLSPublicKey) Copy() *BLSPublicKey { - panic(BLSNoRelicError) -} - -// -// TYPES: BLSSignature -// - -type BLSSignature struct { - Signature flowCrypto.Signature -} - -func (blsSignature *BLSSignature) ToBytes() []byte { - panic(BLSNoRelicError) -} - -func (blsSignature *BLSSignature) FromBytes(signatureBytes []byte) (*BLSSignature, error) { - panic(BLSNoRelicError) -} - -func (blsSignature *BLSSignature) ReadBytes(rr io.Reader) (*BLSSignature, error) { - panic(BLSNoRelicError) -} - -func (blsSignature *BLSSignature) ToString() string { - panic(BLSNoRelicError) -} - -func (blsSignature *BLSSignature) FromString(signatureString string) (*BLSSignature, error) { - panic(BLSNoRelicError) -} - -func (blsSignature *BLSSignature) Eq(other *BLSSignature) bool { - panic(BLSNoRelicError) -} - -func (blsSignature *BLSSignature) Copy() *BLSSignature { - panic(BLSNoRelicError) -} diff --git a/lib/bls_signature_no_relic_test.go b/lib/bls_signature_no_relic_test.go deleted file mode 100644 index 882496ecc..000000000 --- a/lib/bls_signature_no_relic_test.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !relic - -package lib - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestVerifyingBLSSignatures(t *testing.T) { - assert.Panics(t, func() { (&BLSPrivateKey{}).FromString("") }) - assert.Panics(t, func() { (&BLSPublicKey{}).FromString("") }) - assert.Panics(t, func() { (&BLSSignature{}).FromString("") }) -} diff --git a/lib/bls_signature_test.go b/lib/bls_signature_test.go deleted file mode 100644 index 6db5d1443..000000000 --- a/lib/bls_signature_test.go +++ /dev/null @@ -1,211 +0,0 @@ -//go:build relic - -package lib - -import ( - "bytes" - flowCrypto "github.com/onflow/flow-go/crypto" - "github.com/stretchr/testify/require" - "testing" -) - -func TestVerifyingBLSSignatures(t *testing.T) { - // Generate two BLS public/private key pairs. - blsPrivateKey1 := _generateRandomBLSPrivateKey(t) - blsPublicKey1 := blsPrivateKey1.PublicKey() - - blsPrivateKey2 := _generateRandomBLSPrivateKey(t) - blsPublicKey2 := blsPrivateKey2.PublicKey() - - // Test BLSPrivateKey.Sign() and BLSPublicKey.Verify(). - // 1. PrivateKey1 signs a random payload. - randomPayload1 := RandomBytes(256) - blsSignature1, err := blsPrivateKey1.Sign(randomPayload1) - require.NoError(t, err) - // 2. Verify BLSPublicKey1 is the signer. - isVerified, err := blsPublicKey1.Verify(blsSignature1, randomPayload1) - require.NoError(t, err) - require.True(t, isVerified) - // 3. Verify BLSPublicKey2 is not the signer. - isVerified, err = blsPublicKey2.Verify(blsSignature1, randomPayload1) - require.NoError(t, err) - require.False(t, isVerified) - - // 4. PrivateKey2 signs a different random payload. - randomPayload2 := RandomBytes(256) - blsSignature2, err := blsPrivateKey2.Sign(randomPayload2) - require.NoError(t, err) - // 5. Verify BLSPublicKey1 is not the signer. - isVerified, err = blsPublicKey1.Verify(blsSignature2, randomPayload2) - require.NoError(t, err) - require.False(t, isVerified) - // 6. Verify BLSPublicKey2 is the signer. - isVerified, err = blsPublicKey2.Verify(blsSignature2, randomPayload2) - require.NoError(t, err) - require.True(t, isVerified) - - // Test AggregateBLSSignatures() and VerifyAggregateBLSSignature(). - // 1. PrivateKey1 signs a random payload. - randomPayload3 := RandomBytes(256) - blsSignature1, err = blsPrivateKey1.Sign(randomPayload3) - require.NoError(t, err) - // 2. PrivateKey2 signs the same random payload. - blsSignature2, err = blsPrivateKey2.Sign(randomPayload3) - require.NoError(t, err) - // 3. Aggregate their signatures. - aggregateSignature, err := AggregateBLSSignatures([]*BLSSignature{blsSignature1, blsSignature2}) - require.NoError(t, err) - // 4. Verify the AggregateSignature. - isVerified, err = VerifyAggregateBLSSignature( - []*BLSPublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, randomPayload3, - ) - require.NoError(t, err) - require.True(t, isVerified) - // 5. Verify PrivateKey1's signature doesn't work on its own. - isVerified, err = VerifyAggregateBLSSignature([]*BLSPublicKey{blsPublicKey1}, aggregateSignature, randomPayload3) - require.NoError(t, err) - require.False(t, isVerified) - // 6. Verify PrivateKey2's signature doesn't work on its own. - isVerified, err = VerifyAggregateBLSSignature([]*BLSPublicKey{blsPublicKey2}, aggregateSignature, randomPayload3) - require.NoError(t, err) - require.False(t, isVerified) - // 7. Verify the AggregateSignature doesn't work on a different payload. - isVerified, err = VerifyAggregateBLSSignature( - []*BLSPublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, randomPayload1, - ) - require.NoError(t, err) - require.False(t, isVerified) - - // Test BLSPrivateKey.Eq(). - require.True(t, blsPrivateKey1.Eq(blsPrivateKey1)) - require.True(t, blsPrivateKey2.Eq(blsPrivateKey2)) - require.False(t, blsPrivateKey1.Eq(blsPrivateKey2)) - - // Test BLSPrivateKey.ToString() and BLSPrivateKey.FromString(). - blsPrivateKeyString := blsPrivateKey1.ToString() - copyBLSPrivateKey1, err := (&BLSPrivateKey{}).FromString(blsPrivateKeyString) - require.NoError(t, err) - require.True(t, blsPrivateKey1.Eq(copyBLSPrivateKey1)) - - // Test BLSPublicKey.Eq(). - require.True(t, blsPublicKey1.Eq(blsPublicKey1)) - require.True(t, blsPublicKey2.Eq(blsPublicKey2)) - require.False(t, blsPublicKey1.Eq(blsPublicKey2)) - - // Test BLSPublicKey.ToBytes(), BLSPublicKey.FromBytes(), and BLSPublicKey.ReadBytes(). - blsPublicKeyBytes := blsPublicKey1.ToBytes() - copyBLSPublicKey1, err := (&BLSPublicKey{}).FromBytes(blsPublicKeyBytes) - require.NoError(t, err) - require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) - copyBLSPublicKey1, err = (&BLSPublicKey{}).ReadBytes(bytes.NewBuffer(blsPublicKeyBytes)) - require.NoError(t, err) - require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) - - // Test BLSPublicKey.ToString() and BLSPublicKey.FromString(). - blsPublicKeyString := blsPublicKey1.ToString() - copyBLSPublicKey1, err = (&BLSPublicKey{}).FromString(blsPublicKeyString) - require.NoError(t, err) - require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) - - // Test BLSSignature.Eq(). - require.True(t, blsSignature1.Eq(blsSignature1)) - require.True(t, blsSignature2.Eq(blsSignature2)) - require.False(t, blsSignature1.Eq(blsSignature2)) - - // Test BLSSignature.ToBytes(), BLSSignature.FromBytes(), and BLSSignature.ReadBytes(). - blsSignatureBytes := blsSignature1.ToBytes() - copyBLSSignature, err := (&BLSSignature{}).FromBytes(blsSignatureBytes) - require.NoError(t, err) - require.True(t, blsSignature1.Eq(copyBLSSignature)) - copyBLSSignature, err = (&BLSSignature{}).ReadBytes(bytes.NewBuffer(blsSignatureBytes)) - require.NoError(t, err) - require.True(t, blsSignature1.Eq(copyBLSSignature)) - - // Test BLSSignature.ToString() and BLSSignature.FromString(). - blsSignatureString := blsSignature1.ToString() - copyBLSSignature, err = (&BLSSignature{}).FromString(blsSignatureString) - require.NoError(t, err) - require.True(t, blsSignature1.Eq(copyBLSSignature)) - - // Test BLSPublicKey.Copy(). - blsPublicKey1Copy := blsPublicKey1.Copy() - require.True(t, blsPublicKey1.Eq(blsPublicKey1Copy)) - - // Test BLSSignature.Copy(). - blsSignature1Copy := blsSignature1.Copy() - require.True(t, blsSignature1.Eq(blsSignature1Copy)) - - // Test nil BLSPrivateKey edge cases. - // Sign() - _, err = (&BLSPrivateKey{}).Sign(randomPayload1) - require.Error(t, err) - require.Contains(t, err.Error(), "BLSPrivateKey is nil") - // PublicKey() - require.Nil(t, (&BLSPrivateKey{}).PublicKey()) - // ToString() - require.Equal(t, (&BLSPrivateKey{}).ToString(), "") - // FromString() - _, err = (&BLSPrivateKey{}).FromString("") - require.Error(t, err) - require.Contains(t, err.Error(), "empty BLSPrivateKey string provided") - // Eq() - require.False(t, (&BLSPrivateKey{}).Eq(nil)) - require.False(t, (&BLSPrivateKey{}).Eq(&BLSPrivateKey{})) - require.False(t, (&BLSPrivateKey{}).Eq(_generateRandomBLSPrivateKey(t))) - require.False(t, _generateRandomBLSPrivateKey(t).Eq(nil)) - require.False(t, _generateRandomBLSPrivateKey(t).Eq(&BLSPrivateKey{})) - require.False(t, _generateRandomBLSPrivateKey(t).Eq(_generateRandomBLSPrivateKey(t))) - - // Test nil BLSPublicKey edge cases. - // Verify() - _, err = (&BLSPublicKey{}).Verify(blsSignature1, randomPayload1) - require.Error(t, err) - require.Contains(t, err.Error(), "BLSPublicKey is nil") - // ToBytes() - require.True(t, bytes.Equal((&BLSPublicKey{}).ToBytes(), EncodeByteArray([]byte{}))) - // FromBytes() - _, err = (&BLSPublicKey{}).FromBytes([]byte{}) - require.Error(t, err) - // ToString() - require.Equal(t, (&BLSPublicKey{}).ToString(), "") - // FromString() - _, err = (&BLSPublicKey{}).FromString("") - require.Error(t, err) - require.Contains(t, err.Error(), "empty BLSPublicKey string provided") - // Eq() - require.False(t, (&BLSPublicKey{}).Eq(nil)) - require.False(t, (&BLSPublicKey{}).Eq(&BLSPublicKey{})) - require.False(t, (&BLSPublicKey{}).Eq(_generateRandomBLSPrivateKey(t).PublicKey())) - require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq(nil)) - require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq((&BLSPrivateKey{}).PublicKey())) - require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq(_generateRandomBLSPrivateKey(t).PublicKey())) - // Copy() - require.Nil(t, (&BLSPublicKey{}).Copy().PublicKey) - - // Test nil BLSSignature edge cases. - // ToBytes() - require.True(t, bytes.Equal((&BLSSignature{}).ToBytes(), EncodeByteArray([]byte{}))) - // FromBytes() - _, err = (&BLSSignature{}).FromBytes([]byte{}) - require.Error(t, err) - // ToString() - require.Equal(t, (&BLSSignature{}).ToString(), "") - // FromString() - _, err = (&BLSSignature{}).FromString("") - require.Error(t, err) - require.Contains(t, err.Error(), "empty BLSSignature string provided") - // Eq() - require.False(t, (&BLSSignature{}).Eq(nil)) - require.False(t, (&BLSSignature{}).Eq(&BLSSignature{})) - require.False(t, (&BLSSignature{}).Eq(blsSignature1)) - require.False(t, blsSignature1.Eq(nil)) - require.False(t, blsSignature1.Eq(&BLSSignature{})) - // Copy() - require.Nil(t, (&BLSSignature{}).Copy().Signature) -} - -func _generateRandomBLSPrivateKey(t *testing.T) *BLSPrivateKey { - privateKey, err := flowCrypto.GeneratePrivateKey(BLSSigningAlgorithm, RandomBytes(64)) - require.NoError(t, err) - return &BLSPrivateKey{PrivateKey: privateKey} -} diff --git a/test.Dockerfile b/test.Dockerfile index 427daf2b8..c2b6d318c 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -17,14 +17,15 @@ RUN go mod download COPY scripts/install-relic.sh . RUN ./install-relic.sh -COPY desohash desohash +COPY bls bls COPY cmd cmd +COPY desohash desohash COPY lib lib -COPY test_data test_data COPY migrate migrate +COPY test_data test_data COPY main.go . # build backend RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go -ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "github.com/deso-protocol/core/lib"] +ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "./..."] From a61d8b46fac4611ddbb6b05f3eabd572d88f34de Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 8 May 2023 14:55:23 -0400 Subject: [PATCH 023/762] Clean up naming conventions. --- bls/signature.go | 136 +++++++++++++++++++------------------- bls/signature_no_relic.go | 56 +++++++--------- bls/signature_test.go | 8 +-- 3 files changed, 95 insertions(+), 105 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 9a7038cd0..171744237 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -15,24 +15,24 @@ const SigningAlgorithm = flowCrypto.BLSBLS12381 // TODO: what should the domainTag param be? var HashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") -func AggregateSignatures(blsSignatures []*Signature) (*Signature, error) { - var signatures []flowCrypto.Signature - for _, blsSignature := range blsSignatures { - signatures = append(signatures, blsSignature.Signature) +func AggregateSignatures(signatures []*Signature) (*Signature, error) { + var flowSignatures []flowCrypto.Signature + for _, signature := range signatures { + flowSignatures = append(flowSignatures, signature.flowSignature) } - aggregateSignature, err := flowCrypto.AggregateBLSSignatures(signatures) + aggregateFlowSignature, err := flowCrypto.AggregateBLSSignatures(flowSignatures) if err != nil { return nil, err } - return &Signature{Signature: aggregateSignature}, nil + return &Signature{flowSignature: aggregateFlowSignature}, nil } -func VerifyAggregateSignature(blsPublicKeys []*PublicKey, blsSignature *Signature, payloadBytes []byte) (bool, error) { - var publicKeys []flowCrypto.PublicKey - for _, blsPublicKey := range blsPublicKeys { - publicKeys = append(publicKeys, blsPublicKey.PublicKey) +func VerifyAggregateSignature(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { + var flowPublicKeys []flowCrypto.PublicKey + for _, publicKey := range publicKeys { + flowPublicKeys = append(flowPublicKeys, publicKey.flowPublicKey) } - return flowCrypto.VerifyBLSSignatureOneMessage(publicKeys, blsSignature.Signature, payloadBytes, HashingAlgorithm) + return flowCrypto.VerifyBLSSignatureOneMessage(flowPublicKeys, signature.flowSignature, payloadBytes, HashingAlgorithm) } // @@ -40,35 +40,35 @@ func VerifyAggregateSignature(blsPublicKeys []*PublicKey, blsSignature *Signatur // type PrivateKey struct { - PrivateKey flowCrypto.PrivateKey + flowPrivateKey flowCrypto.PrivateKey } -func (blsPrivateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { - if blsPrivateKey.PrivateKey == nil { +func (privateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { + if privateKey.flowPrivateKey == nil { return nil, errors.New("bls.PrivateKey is nil") } - signature, err := blsPrivateKey.PrivateKey.Sign(payloadBytes, HashingAlgorithm) + flowSignature, err := privateKey.flowPrivateKey.Sign(payloadBytes, HashingAlgorithm) if err != nil { return nil, err } - return &Signature{Signature: signature}, nil + return &Signature{flowSignature: flowSignature}, nil } -func (blsPrivateKey *PrivateKey) PublicKey() *PublicKey { - if blsPrivateKey.PrivateKey == nil { +func (privateKey *PrivateKey) PublicKey() *PublicKey { + if privateKey.flowPrivateKey == nil { return nil } - return &PublicKey{PublicKey: blsPrivateKey.PrivateKey.PublicKey()} + return &PublicKey{flowPublicKey: privateKey.flowPrivateKey.PublicKey()} } -func (blsPrivateKey *PrivateKey) ToString() string { - if blsPrivateKey.PrivateKey == nil { +func (privateKey *PrivateKey) ToString() string { + if privateKey.flowPrivateKey == nil { return "" } - return blsPrivateKey.PrivateKey.String() + return privateKey.flowPrivateKey.String() } -func (blsPrivateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { +func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { if privateKeyString == "" { return nil, errors.New("empty bls.PrivateKey string provided") } @@ -80,15 +80,15 @@ func (blsPrivateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKe return nil, err } // Convert from byte slice to bls.PrivateKey. - blsPrivateKey.PrivateKey, err = flowCrypto.DecodePrivateKey(SigningAlgorithm, privateKeyBytes) - return blsPrivateKey, err + privateKey.flowPrivateKey, err = flowCrypto.DecodePrivateKey(SigningAlgorithm, privateKeyBytes) + return privateKey, err } -func (blsPrivateKey *PrivateKey) Eq(other *PrivateKey) bool { - if blsPrivateKey.PrivateKey == nil || other == nil { +func (privateKey *PrivateKey) Eq(other *PrivateKey) bool { + if privateKey.flowPrivateKey == nil || other == nil { return false } - return blsPrivateKey.PrivateKey.Equals(other.PrivateKey) + return privateKey.flowPrivateKey.Equals(other.flowPrivateKey) } // @@ -96,41 +96,41 @@ func (blsPrivateKey *PrivateKey) Eq(other *PrivateKey) bool { // type PublicKey struct { - PublicKey flowCrypto.PublicKey + flowPublicKey flowCrypto.PublicKey } -func (blsPublicKey *PublicKey) Verify(blsSignature *Signature, input []byte) (bool, error) { - if blsPublicKey.PublicKey == nil { +func (publicKey *PublicKey) Verify(signature *Signature, input []byte) (bool, error) { + if publicKey.flowPublicKey == nil { return false, errors.New("bls.PublicKey is nil") } - return blsPublicKey.PublicKey.Verify(blsSignature.Signature, input, HashingAlgorithm) + return publicKey.flowPublicKey.Verify(signature.flowSignature, input, HashingAlgorithm) } -func (blsPublicKey *PublicKey) ToBytes() []byte { +func (publicKey *PublicKey) ToBytes() []byte { var publicKeyBytes []byte - if blsPublicKey.PublicKey != nil { - publicKeyBytes = blsPublicKey.PublicKey.Encode() + if publicKey.flowPublicKey != nil { + publicKeyBytes = publicKey.flowPublicKey.Encode() } return publicKeyBytes } -func (blsPublicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { +func (publicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { if len(publicKeyBytes) == 0 { return nil, errors.New("empty bls.PublicKey bytes provided") } var err error - blsPublicKey.PublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) - return blsPublicKey, err + publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) + return publicKey, err } -func (blsPublicKey *PublicKey) ToString() string { - if blsPublicKey.PublicKey == nil { +func (publicKey *PublicKey) ToString() string { + if publicKey.flowPublicKey == nil { return "" } - return blsPublicKey.PublicKey.String() + return publicKey.flowPublicKey.String() } -func (blsPublicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { +func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { if publicKeyString == "" { return nil, errors.New("empty bls.PublicKey string provided") } @@ -142,20 +142,20 @@ func (blsPublicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, e return nil, err } // Convert from byte slice to bls.PublicKey. - blsPublicKey.PublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) - return blsPublicKey, err + publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) + return publicKey, err } -func (blsPublicKey *PublicKey) Eq(other *PublicKey) bool { - if blsPublicKey.PublicKey == nil || other == nil { +func (publicKey *PublicKey) Eq(other *PublicKey) bool { + if publicKey.flowPublicKey == nil || other == nil { return false } - return blsPublicKey.PublicKey.Equals(other.PublicKey) + return publicKey.flowPublicKey.Equals(other.flowPublicKey) } -func (blsPublicKey *PublicKey) Copy() *PublicKey { +func (publicKey *PublicKey) Copy() *PublicKey { return &PublicKey{ - PublicKey: blsPublicKey.PublicKey, + flowPublicKey: publicKey.flowPublicKey, } } @@ -164,33 +164,33 @@ func (blsPublicKey *PublicKey) Copy() *PublicKey { // type Signature struct { - Signature flowCrypto.Signature + flowSignature flowCrypto.Signature } -func (blsSignature *Signature) ToBytes() []byte { +func (signature *Signature) ToBytes() []byte { var signatureBytes []byte - if blsSignature.Signature != nil { - signatureBytes = blsSignature.Signature.Bytes() + if signature.flowSignature != nil { + signatureBytes = signature.flowSignature.Bytes() } return signatureBytes } -func (blsSignature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { +func (signature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { if len(signatureBytes) == 0 { return nil, errors.New("empty bls.Signature bytes provided") } - blsSignature.Signature = signatureBytes - return blsSignature, nil + signature.flowSignature = signatureBytes + return signature, nil } -func (blsSignature *Signature) ToString() string { - if blsSignature.Signature == nil { +func (signature *Signature) ToString() string { + if signature.flowSignature == nil { return "" } - return blsSignature.Signature.String() + return signature.flowSignature.String() } -func (blsSignature *Signature) FromString(signatureString string) (*Signature, error) { +func (signature *Signature) FromString(signatureString string) (*Signature, error) { if signatureString == "" { return nil, errors.New("empty bls.Signature string provided") } @@ -202,22 +202,22 @@ func (blsSignature *Signature) FromString(signatureString string) (*Signature, e return nil, err } // Convert from byte slice to bls.Signature. - blsSignature.Signature = signatureBytes - return blsSignature, nil + signature.flowSignature = signatureBytes + return signature, nil } -func (blsSignature *Signature) Eq(other *Signature) bool { - if blsSignature.Signature == nil || other == nil { +func (signature *Signature) Eq(other *Signature) bool { + if signature.flowSignature == nil || other == nil { return false } - return bytes.Equal(blsSignature.ToBytes(), other.ToBytes()) + return bytes.Equal(signature.ToBytes(), other.ToBytes()) } -func (blsSignature *Signature) Copy() *Signature { - if blsSignature.Signature == nil { +func (signature *Signature) Copy() *Signature { + if signature.flowSignature == nil { return &Signature{} } return &Signature{ - Signature: append([]byte{}, blsSignature.Signature.Bytes()...), + flowSignature: append([]byte{}, signature.flowSignature.Bytes()...), } } diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index 47f89ab43..17c48867e 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -2,17 +2,13 @@ package bls -import ( - flowCrypto "github.com/onflow/flow-go/crypto" -) - const BLSNoRelicError = "BLS keys can't be used without Relic installed" -func AggregateSignatures(blsSignatures []*Signature) (*Signature, error) { +func AggregateSignatures(signatures []*Signature) (*Signature, error) { panic(BLSNoRelicError) } -func VerifyAggregateSignature(blsPublicKeys []*PublicKey, blsSignature *Signature, payloadBytes []byte) (bool, error) { +func VerifyAggregateSignature(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { panic(BLSNoRelicError) } @@ -20,27 +16,25 @@ func VerifyAggregateSignature(blsPublicKeys []*PublicKey, blsSignature *Signatur // TYPES: PrivateKey // -type PrivateKey struct { - PrivateKey flowCrypto.PrivateKey -} +type PrivateKey struct{} -func (blsPrivateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { +func (privateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { panic(BLSNoRelicError) } -func (blsPrivateKey *PrivateKey) PublicKey() *PublicKey { +func (privateKey *PrivateKey) PublicKey() *PublicKey { panic(BLSNoRelicError) } -func (blsPrivateKey *PrivateKey) ToString() string { +func (privateKey *PrivateKey) ToString() string { panic(BLSNoRelicError) } -func (blsPrivateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { +func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { panic(BLSNoRelicError) } -func (blsPrivateKey *PrivateKey) Eq(other *PrivateKey) bool { +func (privateKey *PrivateKey) Eq(other *PrivateKey) bool { panic(BLSNoRelicError) } @@ -48,35 +42,33 @@ func (blsPrivateKey *PrivateKey) Eq(other *PrivateKey) bool { // TYPES: PublicKey // -type PublicKey struct { - PublicKey flowCrypto.PublicKey -} +type PublicKey struct{} -func (blsPublicKey *PublicKey) Verify(blsSignature *Signature, input []byte) (bool, error) { +func (publicKey *PublicKey) Verify(signature *Signature, input []byte) (bool, error) { panic(BLSNoRelicError) } -func (blsPublicKey *PublicKey) ToBytes() []byte { +func (publicKey *PublicKey) ToBytes() []byte { panic(BLSNoRelicError) } -func (blsPublicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { +func (publicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { panic(BLSNoRelicError) } -func (blsPublicKey *PublicKey) ToString() string { +func (publicKey *PublicKey) ToString() string { panic(BLSNoRelicError) } -func (blsPublicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { +func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { panic(BLSNoRelicError) } -func (blsPublicKey *PublicKey) Eq(other *PublicKey) bool { +func (publicKey *PublicKey) Eq(other *PublicKey) bool { panic(BLSNoRelicError) } -func (blsPublicKey *PublicKey) Copy() *PublicKey { +func (publicKey *PublicKey) Copy() *PublicKey { panic(BLSNoRelicError) } @@ -84,30 +76,28 @@ func (blsPublicKey *PublicKey) Copy() *PublicKey { // TYPES: Signature // -type Signature struct { - Signature flowCrypto.Signature -} +type Signature struct{} -func (blsSignature *Signature) ToBytes() []byte { +func (signature *Signature) ToBytes() []byte { panic(BLSNoRelicError) } -func (blsSignature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { +func (signature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { panic(BLSNoRelicError) } -func (blsSignature *Signature) ToString() string { +func (signature *Signature) ToString() string { panic(BLSNoRelicError) } -func (blsSignature *Signature) FromString(signatureString string) (*Signature, error) { +func (signature *Signature) FromString(signatureString string) (*Signature, error) { panic(BLSNoRelicError) } -func (blsSignature *Signature) Eq(other *Signature) bool { +func (signature *Signature) Eq(other *Signature) bool { panic(BLSNoRelicError) } -func (blsSignature *Signature) Copy() *Signature { +func (signature *Signature) Copy() *Signature { panic(BLSNoRelicError) } diff --git a/bls/signature_test.go b/bls/signature_test.go index 7b605babe..919ea28c0 100644 --- a/bls/signature_test.go +++ b/bls/signature_test.go @@ -179,7 +179,7 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq((&PrivateKey{}).PublicKey())) require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq(_generateRandomBLSPrivateKey(t).PublicKey())) // Copy() - require.Nil(t, (&PublicKey{}).Copy().PublicKey) + require.Nil(t, (&PublicKey{}).Copy().flowPublicKey) // Test nil bls.Signature edge cases. // ToBytes() @@ -204,13 +204,13 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.False(t, blsSignature1.Eq(nil)) require.False(t, blsSignature1.Eq(&Signature{})) // Copy() - require.Nil(t, (&Signature{}).Copy().Signature) + require.Nil(t, (&Signature{}).Copy().flowSignature) } func _generateRandomBLSPrivateKey(t *testing.T) *PrivateKey { - privateKey, err := flowCrypto.GeneratePrivateKey(SigningAlgorithm, _generateRandomBytes(t, 64)) + flowPrivateKey, err := flowCrypto.GeneratePrivateKey(SigningAlgorithm, _generateRandomBytes(t, 64)) require.NoError(t, err) - return &PrivateKey{PrivateKey: privateKey} + return &PrivateKey{flowPrivateKey: flowPrivateKey} } func _generateRandomBytes(t *testing.T, numBytes int) []byte { From 54d46bc50480b6b8c7be91e77b0a92ec7d999af3 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 8 May 2023 15:18:00 -0400 Subject: [PATCH 024/762] Expose NewPrivateKey(). --- bls/signature.go | 14 ++++++++++++++ bls/signature_no_relic.go | 4 ++++ bls/signature_no_relic_test.go | 1 + bls/signature_test.go | 5 ++--- 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 171744237..35f3e6f85 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -4,6 +4,7 @@ package bls import ( "bytes" + "crypto/rand" "encoding/hex" "errors" flowCrypto "github.com/onflow/flow-go/crypto" @@ -43,6 +44,19 @@ type PrivateKey struct { flowPrivateKey flowCrypto.PrivateKey } +func NewPrivateKey() (*PrivateKey, error) { + randomBytes := make([]byte, 64) + _, err := rand.Read(randomBytes) + if err != nil { + return nil, err + } + flowPrivateKey, err := flowCrypto.GeneratePrivateKey(SigningAlgorithm, randomBytes) + if err != nil { + return nil, err + } + return &PrivateKey{flowPrivateKey: flowPrivateKey}, nil +} + func (privateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { if privateKey.flowPrivateKey == nil { return nil, errors.New("bls.PrivateKey is nil") diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index 17c48867e..71564d6c0 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -18,6 +18,10 @@ func VerifyAggregateSignature(publicKeys []*PublicKey, signature *Signature, pay type PrivateKey struct{} +func NewPrivateKey() (*PrivateKey, error) { + panic(BLSNoRelicError) +} + func (privateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { panic(BLSNoRelicError) } diff --git a/bls/signature_no_relic_test.go b/bls/signature_no_relic_test.go index 85faa70a4..3b4456b14 100644 --- a/bls/signature_no_relic_test.go +++ b/bls/signature_no_relic_test.go @@ -8,6 +8,7 @@ import ( ) func TestVerifyingBLSSignatures(t *testing.T) { + assert.Panics(t, func() { NewPrivateKey() }) assert.Panics(t, func() { (&PrivateKey{}).FromString("") }) assert.Panics(t, func() { (&PublicKey{}).FromString("") }) assert.Panics(t, func() { (&Signature{}).FromString("") }) diff --git a/bls/signature_test.go b/bls/signature_test.go index 919ea28c0..0a2925f37 100644 --- a/bls/signature_test.go +++ b/bls/signature_test.go @@ -5,7 +5,6 @@ package bls import ( "bytes" "crypto/rand" - flowCrypto "github.com/onflow/flow-go/crypto" "github.com/stretchr/testify/require" "testing" ) @@ -208,9 +207,9 @@ func TestVerifyingBLSSignatures(t *testing.T) { } func _generateRandomBLSPrivateKey(t *testing.T) *PrivateKey { - flowPrivateKey, err := flowCrypto.GeneratePrivateKey(SigningAlgorithm, _generateRandomBytes(t, 64)) + privateKey, err := NewPrivateKey() require.NoError(t, err) - return &PrivateKey{flowPrivateKey: flowPrivateKey} + return privateKey } func _generateRandomBytes(t *testing.T, numBytes int) []byte { From c7da6573a1f11880f694c9dc382697874f5197c7 Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Mon, 8 May 2023 15:28:05 -0400 Subject: [PATCH 025/762] Mf/add unjail validator txn type (#520) * Add epoch data type. * Update func naming. * Fix nil error. * Add prefix encoder db util. * Rename field to FinalBlockHeight. * Avoid snapshotting issue for now. * Update ancestral record before setting in db. * Incorporate epoch number into PoS txns. * Validate stake lockup period. * Validate stake lockup period. * Clean up test file changes. * Add jailed status to validator entries. * Set stake lockup period to 3 epochs. * Set LastActiveEpochNumber to CurrentEpochNumber. * Add unjail validator txn type. * Add ValidatorEntry.JailedAtEpochNumber field. * Validate sufficient num epochs have passed to unjail. * Resolve merge conflicts more. * Start adding tests for unjail connect logic. * Add unjail validator tests. * Retrigger buildkite ci. * Address PR feedback. --- lib/block_view.go | 7 + lib/block_view_stake_test.go | 18 +- lib/block_view_types.go | 10 +- lib/block_view_validator.go | 342 ++++++++++++++++- lib/block_view_validator_test.go | 616 ++++++++++++++++++++++++++++--- lib/constants.go | 11 + lib/db_utils.go | 17 +- lib/mempool.go | 4 + lib/network.go | 14 +- 9 files changed, 948 insertions(+), 91 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index e9f55bde5..0ce1a150e 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -1373,6 +1373,10 @@ func (bav *UtxoView) DisconnectTransaction(currentTxn *MsgDeSoTxn, txnHash *Bloc case TxnTypeUnlockStake: return bav._disconnectUnlockStake( OperationTypeUnlockStake, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + + case TxnTypeUnjailValidator: + return bav._disconnectUnjailValidator( + OperationTypeUnjailValidator, currentTxn, txnHash, utxoOpsForTxn, blockHeight) } return fmt.Errorf("DisconnectBlock: Unimplemented txn type %v", currentTxn.TxnMeta.GetTxnType().String()) @@ -3311,6 +3315,9 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, case TxnTypeUnlockStake: totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUnlockStake(txn, txHash, blockHeight, verifySignatures) + case TxnTypeUnjailValidator: + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUnjailValidator(txn, txHash, blockHeight, verifySignatures) + default: err = fmt.Errorf("ConnectTransaction: Unimplemented txn type %v", txn.TxnMeta.GetTxnType().String()) } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 8cb71fbc8..fc8360997 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -12,7 +12,6 @@ import ( func TestStaking(t *testing.T) { _testStaking(t, false) _testStaking(t, true) - _testStakingWithDerivedKey(t) } func _testStaking(t *testing.T, flushToDB bool) { @@ -103,9 +102,7 @@ func _testStaking(t *testing.T, flushToDB bool) { registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, flushToDB) require.NoError(t, err) validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) @@ -755,7 +752,7 @@ func _submitUnlockStakeTxn( return fees, nil } -func _testStakingWithDerivedKey(t *testing.T) { +func TestStakingWithDerivedKey(t *testing.T) { var derivedKeyPriv string var err error @@ -957,9 +954,7 @@ func _testStakingWithDerivedKey(t *testing.T) { registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example1.com")}, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, true, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) } { @@ -967,9 +962,7 @@ func _testStakingWithDerivedKey(t *testing.T) { registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example2.com")}, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m1Pub, m1Priv, registerAsValidatorMetadata, nil, true, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) } { @@ -1783,7 +1776,6 @@ func TestStakeLockupEpochDuration(t *testing.T) { chain.snapshot = nil // For these tests, we set StakeLockupEpochDuration to 3. - // We test the lockup logic in a separate test. params.StakeLockupEpochDuration = 3 // Mine a few blocks to give the senderPkString some money. @@ -1843,7 +1835,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m1.com")}, } - _, _, _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, true) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, true) require.NoError(t, err) validatorEntry, err := newUtxoView().GetValidatorByPKID(m0PKID) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 679d737ec..9aa041678 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -156,9 +156,10 @@ const ( EncoderTypeStakeTxindexMetadata EncoderType = 1000032 EncoderTypeUnstakeTxindexMetadata EncoderType = 1000033 EncoderTypeUnlockStakeTxindexMetadata EncoderType = 1000034 + EncoderTypeUnjailValidatorTxindexMetadata EncoderType = 1000035 // EncoderTypeEndTxIndex encoder type should be at the end and is used for automated tests. - EncoderTypeEndTxIndex EncoderType = 1000035 + EncoderTypeEndTxIndex EncoderType = 1000036 ) // This function translates the EncoderType into an empty DeSoEncoder struct. @@ -327,6 +328,8 @@ func (encoderType EncoderType) New() DeSoEncoder { return &UnstakeTxindexMetadata{} case EncoderTypeUnlockStakeTxindexMetadata: return &UnlockStakeTxindexMetadata{} + case EncoderTypeUnjailValidatorTxindexMetadata: + return &UnjailValidatorTxindexMetadata{} default: return nil } @@ -626,8 +629,9 @@ const ( OperationTypeStake OperationType = 41 OperationTypeUnstake OperationType = 42 OperationTypeUnlockStake OperationType = 43 + OperationTypeUnjailValidator OperationType = 44 - // NEXT_TAG = 44 + // NEXT_TAG = 45 ) func (op OperationType) String() string { @@ -718,6 +722,8 @@ func (op OperationType) String() string { return "OperationTypeUnstake" case OperationTypeUnlockStake: return "OperationTypeUnlockStake" + case OperationTypeUnjailValidator: + return "OperationTypeUnjailValidator" } return "OperationTypeUNKNOWN" } diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 14aaa2c28..8993dbdc7 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -324,6 +324,28 @@ func (txnData *UnregisterAsValidatorMetadata) New() DeSoTxnMetadata { return &UnregisterAsValidatorMetadata{} } +// +// TYPES: UnjailValidatorMetadata +// + +type UnjailValidatorMetadata struct{} + +func (txnData *UnjailValidatorMetadata) GetTxnType() TxnType { + return TxnTypeUnjailValidator +} + +func (txnData *UnjailValidatorMetadata) ToBytes(preSignature bool) ([]byte, error) { + return []byte{}, nil +} + +func (txnData *UnjailValidatorMetadata) FromBytes(data []byte) error { + return nil +} + +func (txnData *UnjailValidatorMetadata) New() DeSoTxnMetadata { + return &UnjailValidatorMetadata{} +} + // // TYPES: RegisterAsValidatorTxindexMetadata // @@ -506,6 +528,29 @@ func (txindexMetadata *UnregisterAsValidatorTxindexMetadata) GetEncoderType() En return EncoderTypeUnregisterAsValidatorTxindexMetadata } +// +// TYPES: UnjailValidatorTxindexMetadata +// + +type UnjailValidatorTxindexMetadata struct { +} + +func (txindexMetadata *UnjailValidatorTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + return []byte{} +} + +func (txindexMetadata *UnjailValidatorTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + return nil +} + +func (txindexMetadata *UnjailValidatorTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *UnjailValidatorTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeUnjailValidatorTxindexMetadata +} + // // DB UTILS // @@ -832,7 +877,7 @@ func (bc *Blockchain) CreateUnregisterAsValidatorTxn( } // Validate txn metadata. - if err = utxoView.IsValidUnregisterAsValidatorMetadata(transactorPublicKey, metadata); err != nil { + if err = utxoView.IsValidUnregisterAsValidatorMetadata(transactorPublicKey); err != nil { return nil, 0, 0, 0, errors.Wrapf( err, "Blockchain.CreateUnregisterAsValidatorTxn: invalid txn metadata: ", ) @@ -866,6 +911,82 @@ func (bc *Blockchain) CreateUnregisterAsValidatorTxn( return txn, totalInput, changeAmount, fees, nil } +func (bc *Blockchain) CreateUnjailValidatorTxn( + transactorPublicKey []byte, + metadata *UnjailValidatorMetadata, + extraData map[string][]byte, + minFeeRateNanosPerKB uint64, + mempool *DeSoMempool, + additionalOutputs []*DeSoOutput, +) ( + _txn *MsgDeSoTxn, + _totalInput uint64, + _changeAmount uint64, + _fees uint64, + _err error, +) { + // Create a txn containing the UnjailValidator fields. + txn := &MsgDeSoTxn{ + PublicKey: transactorPublicKey, + TxnMeta: metadata, + TxOutputs: additionalOutputs, + ExtraData: extraData, + // We wait to compute the signature until + // we've added all the inputs and change. + } + + // Create a new UtxoView. If we have access to a mempool object, use + // it to get an augmented view that factors in pending transactions. + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + if err != nil { + return nil, 0, 0, 0, errors.Wrap( + err, "Blockchain.CreateUnjailValidatorTxn: problem creating new utxo view: ", + ) + } + if mempool != nil { + utxoView, err = mempool.GetAugmentedUniversalView() + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnjailValidatorTxn: problem getting augmented utxo view from mempool: ", + ) + } + } + + // Validate txn metadata. + if err = utxoView.IsValidUnjailValidatorMetadata(transactorPublicKey); err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnjailValidatorTxn: invalid txn metadata: ", + ) + } + + // We don't need to make any tweaks to the amount because + // it's basically a standard "pay per kilobyte" transaction. + totalInput, spendAmount, changeAmount, fees, err := bc.AddInputsAndChangeToTransaction( + txn, minFeeRateNanosPerKB, mempool, + ) + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "Blockchain.CreateUnjailValidatorTxn: problem adding inputs: ", + ) + } + + // Validate that the transaction has at least one input, even if it all goes + // to change. This ensures that the transaction will not be "replayable." + if len(txn.TxInputs) == 0 && bc.blockTip().Height+1 < bc.params.ForkHeights.BalanceModelBlockHeight { + return nil, 0, 0, 0, errors.New( + "Blockchain.CreateUnjailValidatorTxn: txn has zero inputs, try increasing the fee rate", + ) + } + + // Sanity-check that the spendAmount is zero. + if spendAmount != 0 { + return nil, 0, 0, 0, fmt.Errorf( + "Blockchain.CreateUnjailValidatorTxn: spend amount is non-zero: %d", spendAmount, + ) + } + return txn, totalInput, changeAmount, fees, nil +} + // // UTXO VIEW UTILS // @@ -1107,11 +1228,8 @@ func (bav *UtxoView) _connectUnregisterAsValidator( // public key so there is no need to verify anything further. } - // Grab the txn metadata. - txMeta := txn.TxnMeta.(*UnregisterAsValidatorMetadata) - - // Validate the txn metadata. - if err = bav.IsValidUnregisterAsValidatorMetadata(txn.PublicKey, txMeta); err != nil { + // Validate the transactor. + if err = bav.IsValidUnregisterAsValidatorMetadata(txn.PublicKey); err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") } @@ -1327,6 +1445,156 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( ) } +func (bav *UtxoView) _connectUnjailValidator( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + verifySignatures bool, +) ( + _totalInput uint64, + _totalOutput uint64, + _utxoOps []*UtxoOperation, + _err error, +) { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectUnjailValidator: ") + } + + // Validate the txn TxnType. + if txn.TxnMeta.GetTxnType() != TxnTypeUnjailValidator { + return 0, 0, nil, fmt.Errorf( + "_connectUnjailValidator: called with bad TxnType %s", txn.TxnMeta.GetTxnType().String(), + ) + } + + // Connect a basic transfer to get the total input and the + // total output without considering the txn metadata. + totalInput, totalOutput, utxoOpsForTxn, err := bav._connectBasicTransfer( + txn, txHash, blockHeight, verifySignatures, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnjailValidator: ") + } + if verifySignatures { + // _connectBasicTransfer has already checked that the txn is signed + // by the top-level public key, which we take to be the sender's + // public key so there is no need to verify anything further. + } + + // Validate the transactor. + if err = bav.IsValidUnjailValidatorMetadata(txn.PublicKey); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnjailValidator: ") + } + + // At this point, we have validated in IsValidUnjailValidatorMetadata() + // that the ValidatorEntry exists, belongs to the transactor, is jailed, + // and a sufficient number of epochs have elapsed for this validator to + // be unjailed. + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return 0, 0, nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "_connectUnjailValidator: ") + } + + // Retrieve the existing ValidatorEntry that will be overwritten. + // This ValidatorEntry will be restored if we disconnect this txn. + prevValidatorEntry, err := bav.GetValidatorByPKID(transactorPKIDEntry.PKID) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnjailValidator: ") + } + if prevValidatorEntry == nil || prevValidatorEntry.isDeleted { + return 0, 0, nil, errors.Wrapf(RuleErrorValidatorNotFound, "_connectUnjailValidator: ") + } + + // Copy the existing ValidatorEntry. + currentValidatorEntry := prevValidatorEntry.Copy() + + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnjailValidator: error retrieving CurrentEpochNumber: ") + } + + // Update LastActiveAtEpochNumber to CurrentEpochNumber. + currentValidatorEntry.LastActiveAtEpochNumber = currentEpochNumber + + // Reset JailedAtEpochNumber to zero. + currentValidatorEntry.JailedAtEpochNumber = 0 + + // Merge ExtraData with existing ExtraData. + currentValidatorEntry.ExtraData = mergeExtraData(prevValidatorEntry.ExtraData, txn.ExtraData) + + // Delete the PrevValidatorEntry. + bav._deleteValidatorEntryMappings(prevValidatorEntry) + + // Set the CurrentValidatorEntry. + bav._setValidatorEntryMappings(currentValidatorEntry) + + // Add a UTXO operation + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeUnjailValidator, + PrevValidatorEntry: prevValidatorEntry, + }) + return totalInput, totalOutput, utxoOpsForTxn, nil +} + +func (bav *UtxoView) _disconnectUnjailValidator( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32, +) error { + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectUnjailValidator: ") + } + + // Validate the last operation is an UnjailValidator operation. + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectUnjailValidator: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + operationData := utxoOpsForTxn[operationIndex] + if operationData.Type != OperationTypeUnjailValidator { + return fmt.Errorf( + "_disconnectUnjailValidator: trying to revert %v but found %v", + OperationTypeUnjailValidator, + operationData.Type, + ) + } + + // Convert TransactorPublicKey to TransactorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(currentTxn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidValidatorPKID, "_disconnectUnjailValidator: ") + } + + // Delete the current ValidatorEntry. + currentValidatorEntry, err := bav.GetValidatorByPKID(transactorPKIDEntry.PKID) + if err != nil { + return errors.Wrapf(err, "_disconnectUnjailValidator: ") + } + if currentValidatorEntry == nil || currentValidatorEntry.isDeleted { + return errors.Wrapf(RuleErrorValidatorNotFound, "_disconnectUnjailValidator: ") + } + bav._deleteValidatorEntryMappings(currentValidatorEntry) + + // Restore the PrevValidatorEntry. + prevValidatorEntry := operationData.PrevValidatorEntry + if prevValidatorEntry == nil { + return errors.New("_disconnectUnjailValidator: PrevValidatorEntry is nil") + } + bav._setValidatorEntryMappings(prevValidatorEntry) + + // Disconnect the BasicTransfer. + return bav._disconnectBasicTransfer( + currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, + ) +} + func (bav *UtxoView) IsValidRegisterAsValidatorMetadata(transactorPublicKey []byte, metadata *RegisterAsValidatorMetadata) error { // Validate ValidatorPKID. transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKey) @@ -1384,7 +1652,7 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata(transactorPublicKey []by return nil } -func (bav *UtxoView) IsValidUnregisterAsValidatorMetadata(transactorPublicKey []byte, metadata *UnregisterAsValidatorMetadata) error { +func (bav *UtxoView) IsValidUnregisterAsValidatorMetadata(transactorPublicKey []byte) error { // Validate ValidatorPKID. transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKey) if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { @@ -1396,13 +1664,48 @@ func (bav *UtxoView) IsValidUnregisterAsValidatorMetadata(transactorPublicKey [] if err != nil { return errors.Wrapf(err, "UtxoView.IsValidUnregisterAsValidatorMetadata: ") } - if validatorEntry == nil { + if validatorEntry == nil || validatorEntry.isDeleted { return errors.Wrapf(RuleErrorValidatorNotFound, "UtxoView.IsValidUnregisterAsValidatorMetadata: ") } return nil } +func (bav *UtxoView) IsValidUnjailValidatorMetadata(transactorPublicKey []byte) error { + // Validate ValidatorPKID. + transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return errors.Wrapf(RuleErrorInvalidValidatorPKID, "UtxoView.IsValidUnjailValidatorMetadata: ") + } + + // Validate ValidatorEntry exists. + validatorEntry, err := bav.GetValidatorByPKID(transactorPKIDEntry.PKID) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: ") + } + if validatorEntry == nil || validatorEntry.isDeleted { + return errors.Wrapf(RuleErrorValidatorNotFound, "UtxoView.IsValidUnjailValidatorMetadata: ") + } + + // Validate ValidatorEntry is jailed. + if validatorEntry.Status() != ValidatorStatusJailed { + return errors.Wrapf(RuleErrorUnjailingNonjailedValidator, "UtxoView.IsValidUnjailValidatorMetadata: ") + } + + // Retrieve CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving CurrentEpochNumber: ") + } + + // Validate sufficient epochs have elapsed for validator to be unjailed. + if validatorEntry.JailedAtEpochNumber+bav.Params.ValidatorJailEpochDuration > currentEpochNumber { + return errors.Wrapf(RuleErrorUnjailingValidatorTooEarly, "UtxoView.IsValidUnjailValidatorMetadata: ") + } + + return nil +} + func (bav *UtxoView) GetValidatorByPKID(pkid *PKID) (*ValidatorEntry, error) { // First check the UtxoView. @@ -1707,6 +2010,27 @@ func (bav *UtxoView) CreateUnregisterAsValidatorTxindexMetadata( return txindexMetadata, affectedPublicKeys } +func (bav *UtxoView) CreateUnjailValidatorTxindexMetadata( + utxoOp *UtxoOperation, + txn *MsgDeSoTxn, +) ( + *UnjailValidatorTxindexMetadata, + []*AffectedPublicKey, +) { + // Cast ValidatorPublicKey to ValidatorPublicKeyBase58Check. + validatorPublicKeyBase58Check := PkToString(txn.PublicKey, bav.Params) + + // Construct AffectedPublicKeys. + affectedPublicKeys := []*AffectedPublicKey{ + { + PublicKeyBase58Check: validatorPublicKeyBase58Check, + Metadata: "UnjailedValidatorPublicKeyBase58Check", + }, + } + + return &UnjailValidatorTxindexMetadata{}, affectedPublicKeys +} + // // CONSTANTS // @@ -1719,5 +2043,7 @@ const RuleErrorValidatorInvalidDomain RuleError = "RuleErrorValidatorInvalidDoma const RuleErrorValidatorDuplicateDomains RuleError = "RuleErrorValidatorDuplicateDomains" const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" const RuleErrorValidatorDisablingExistingDelegatedStakers RuleError = "RuleErrorValidatorDisablingExistingDelegatedStakers" +const RuleErrorUnjailingNonjailedValidator RuleError = "RuleErrorUnjailingNonjailedValidator" +const RuleErrorUnjailingValidatorTooEarly RuleError = "RuleErrorUnjailingValidatorTooEarly" const MaxValidatorNumDomains int = 12 diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 1ce1e2c9c..0848f404f 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -13,7 +13,6 @@ import ( func TestValidatorRegistration(t *testing.T) { _testValidatorRegistration(t, false) _testValidatorRegistration(t, true) - _testValidatorRegistrationWithDerivedKey(t) } func _testValidatorRegistration(t *testing.T, flushToDB bool) { @@ -93,9 +92,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { Domains: [][]byte{[]byte("https://example.com")}, DisableDelegatedStake: false, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) @@ -109,9 +106,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { Domains: [][]byte{}, DisableDelegatedStake: false, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorNoDomains) } @@ -125,9 +120,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { Domains: domains, DisableDelegatedStake: false, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorTooManyDomains) } @@ -137,9 +130,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { Domains: [][]byte{[]byte("InvalidURL")}, DisableDelegatedStake: false, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorInvalidDomain) } @@ -149,9 +140,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { Domains: [][]byte{[]byte("https://example.com"), []byte("https://example.com")}, DisableDelegatedStake: false, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorDuplicateDomains) } @@ -162,9 +151,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { DisableDelegatedStake: false, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) require.NoError(t, err) } { @@ -201,9 +188,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { DisableDelegatedStake: false, } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) require.NoError(t, err) } { @@ -219,18 +204,18 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // Sad path: unregister validator that doesn't exist - _, _, _, err = _submitUnregisterAsValidatorTxn(testMeta, m1Pub, m1Priv, flushToDB) + _, err = _submitUnregisterAsValidatorTxn(testMeta, m1Pub, m1Priv, flushToDB) require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorNotFound) } { // Happy path: unregister validator - _, _, _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) + _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) require.NoError(t, err) } { // Sad path: unregister validator that doesn't exist - _, _, _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) + _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorNotFound) } @@ -265,7 +250,7 @@ func _submitRegisterAsValidatorTxn( metadata *RegisterAsValidatorMetadata, extraData map[string][]byte, flushToDB bool, -) (_utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { +) (_fees uint64, _err error) { // Record transactor's prevBalance. prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) @@ -283,7 +268,7 @@ func _submitRegisterAsValidatorTxn( []*DeSoOutput{}, ) if err != nil { - return nil, nil, 0, err + return 0, err } require.Equal(testMeta.t, totalInputMake, changeAmountMake+feesMake) @@ -300,7 +285,7 @@ func _submitRegisterAsValidatorTxn( false, ) if err != nil { - return nil, nil, 0, err + return 0, err } require.Equal(testMeta.t, totalInput, totalOutput+fees) require.Equal(testMeta.t, totalInput, totalInputMake) @@ -314,7 +299,7 @@ func _submitRegisterAsValidatorTxn( testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) testMeta.txnOps = append(testMeta.txnOps, utxoOps) testMeta.txns = append(testMeta.txns, txn) - return utxoOps, txn, testMeta.savedHeight, nil + return fees, nil } func _submitUnregisterAsValidatorTxn( @@ -322,7 +307,7 @@ func _submitUnregisterAsValidatorTxn( transactorPublicKeyBase58Check string, transactorPrivateKeyBase58Check string, flushToDB bool, -) (_utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { +) (_fees uint64, _err error) { // Record transactor's prevBalance. prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) @@ -340,7 +325,7 @@ func _submitUnregisterAsValidatorTxn( []*DeSoOutput{}, ) if err != nil { - return nil, nil, 0, err + return 0, err } require.Equal(testMeta.t, totalInputMake, changeAmountMake+feesMake) @@ -357,7 +342,7 @@ func _submitUnregisterAsValidatorTxn( false, ) if err != nil { - return nil, nil, 0, err + return 0, err } require.Equal(testMeta.t, totalInput, totalOutput+fees) require.Equal(testMeta.t, totalInput, totalInputMake) @@ -371,10 +356,10 @@ func _submitUnregisterAsValidatorTxn( testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) testMeta.txnOps = append(testMeta.txnOps, utxoOps) testMeta.txns = append(testMeta.txns, txn) - return utxoOps, txn, testMeta.savedHeight, nil + return fees, nil } -func _testValidatorRegistrationWithDerivedKey(t *testing.T) { +func TestValidatorRegistrationWithDerivedKey(t *testing.T) { var err error // Initialize balance model fork heights. @@ -719,9 +704,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) // Verify top validators. @@ -736,9 +719,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m1.com")}, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) // Verify top validators. @@ -751,9 +732,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m2.com")}, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m2Pub, m2Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m2Pub, m2Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) // Verify top validators. @@ -837,7 +816,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // m2 unregisters as validator. - _, _, _, err = _submitUnregisterAsValidatorTxn(testMeta, m2Pub, m2Priv, flushToDB) + _, err = _submitUnregisterAsValidatorTxn(testMeta, m2Pub, m2Priv, flushToDB) require.NoError(t, err) // Verify top validators. @@ -1143,9 +1122,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: false, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) @@ -1163,9 +1140,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: true, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) @@ -1207,9 +1182,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: false, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) @@ -1238,9 +1211,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: true, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorDisablingExistingDelegatedStakers) } @@ -1336,9 +1307,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, } - _, _, _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) @@ -1414,7 +1383,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { } { // m0 unregisters as a validator. - _, _, _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) + _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) require.NoError(t, err) // m0's ValidatorEntry is deleted. @@ -1454,3 +1423,530 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) _executeAllTestRollbackAndFlush(testMeta) } + +func TestUnjailValidator(t *testing.T) { + _testUnjailValidator(t, false) + _testUnjailValidator(t, true) +} + +func _testUnjailValidator(t *testing.T, flushToDB bool) { + var validatorEntry *ValidatorEntry + var err error + + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize PoS fork height. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + chain.snapshot = nil + + // For these tests, we set ValidatorJailEpochDuration to 3. + params.ValidatorJailEpochDuration = 3 + + utxoView := func() *UtxoView { + newUtxoView, err := mempool.GetAugmentedUniversalView() + require.NoError(t, err) + return newUtxoView + } + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height + 1) + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + + // Seed a CurrentEpochEntry. + epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + currentEpochNumber, err := utxoView().GetCurrentEpochNumber() + require.NoError(t, err) + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // m0 registers as a validator. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + } + extraData := map[string][]byte{"TestKey": []byte("TestValue1")} + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) + require.NoError(t, err) + + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.ExtraData["TestKey"], []byte("TestValue1")) + } + { + // RuleErrorUnjailingNonjailedValidator + _, err = _submitUnjailValidatorTxn(testMeta, m0Pub, m0Priv, nil, flushToDB) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorUnjailingNonjailedValidator) + } + { + // m0 is jailed. Since this update takes place outside a transaction, + // we cannot test rollbacks. We will run into an error where m0 is + // trying to unjail himself, but he was never jailed. + + // Delete m0's ValidatorEntry from the UtxoView. + delete(mempool.universalUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) + delete(mempool.readOnlyUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) + + // Set JailedAtEpochNumber. + validatorEntry.JailedAtEpochNumber = currentEpochNumber + + // Store m0's ValidatorEntry in the db. + tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + tmpUtxoView._setValidatorEntryMappings(validatorEntry) + require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + + // Verify m0 is jailed. + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) + } + { + // m1 stakes with m0. Succeeds. You can stake to a jailed validator. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.NotNil(t, stakeEntry) + } + { + // m1 unstakes from m0. Succeeds. You can unstake from a jailed validator. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(100), + } + _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) + require.NoError(t, err) + + stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.Nil(t, stakeEntry) + + lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) + require.NoError(t, err) + require.NotNil(t, lockedStakeEntry) + } + { + // RuleErrorValidatorNotFound + _, err = _submitUnjailValidatorTxn(testMeta, m1Pub, m1Priv, nil, flushToDB) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorNotFound) + } + { + // RuleErrorUnjailingValidatorTooEarly + _, err = _submitUnjailValidatorTxn(testMeta, m0Pub, m0Priv, nil, flushToDB) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorUnjailingValidatorTooEarly) + } + { + // Simulate three epochs passing by seeding a new CurrentEpochEntry. + + // Delete the CurrentEpochEntry from the UtxoView. + mempool.universalUtxoView.CurrentEpochEntry = nil + mempool.readOnlyUtxoView.CurrentEpochEntry = nil + + // Store a new CurrentEpochEntry in the db. + epochUtxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + epochUtxoView._setCurrentEpochEntry( + &EpochEntry{EpochNumber: currentEpochNumber + 3, FinalBlockHeight: blockHeight + 10}, + ) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + + // Verify CurrentEpochNumber. + currentEpochNumber, err = utxoView().GetCurrentEpochNumber() + require.NoError(t, err) + require.Equal(t, currentEpochNumber, uint64(4)) + } + { + // RuleErrorProofofStakeTxnBeforeBlockHeight + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = math.MaxUint32 + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + _, err = _submitUnjailValidatorTxn(testMeta, m0Pub, m0Priv, nil, flushToDB) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) + + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + } + { + // m0 unjails himself. + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) + require.Equal(t, validatorEntry.LastActiveAtEpochNumber, uint64(1)) + + extraData := map[string][]byte{"TestKey": []byte("TestValue2")} + _, err = _submitUnjailValidatorTxn(testMeta, m0Pub, m0Priv, extraData, flushToDB) + require.NoError(t, err) + + validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.Status(), ValidatorStatusActive) + require.Equal(t, validatorEntry.LastActiveAtEpochNumber, uint64(4)) + require.Equal(t, validatorEntry.ExtraData["TestKey"], []byte("TestValue2")) + } +} + +func TestUnjailValidatorWithDerivedKey(t *testing.T) { + var validatorEntry *ValidatorEntry + var derivedKeyPriv string + var err error + + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize fork heights. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + senderPkBytes, _, err := Base58CheckDecode(senderPkString) + require.NoError(t, err) + senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) + require.NoError(t, err) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID + + newUtxoView := func() *UtxoView { + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + return utxoView + } + + _submitAuthorizeDerivedKeyUnjailValidatorTxn := func(count uint64) (string, error) { + utxoView := newUtxoView() + + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + TxnTypeAuthorizeDerivedKey: 1, + TxnTypeUnjailValidator: count, + }, + } + + derivedKeyMetadata, derivedKeyAuthPriv := _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit( + t, senderPrivKey, blockHeight+5, txnSpendingLimit, false, blockHeight, + ) + derivedKeyAuthPrivBase58Check := Base58CheckEncode(derivedKeyAuthPriv.Serialize(), true, params) + + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, senderPkString) + + utxoOps, txn, _, err := _doAuthorizeTxnWithExtraDataAndSpendingLimits( + testMeta, + utxoView, + testMeta.feeRateNanosPerKb, + senderPkBytes, + derivedKeyMetadata.DerivedPublicKey, + derivedKeyAuthPrivBase58Check, + derivedKeyMetadata.ExpirationBlock, + derivedKeyMetadata.AccessSignature, + false, + nil, + nil, + txnSpendingLimit, + ) + if err != nil { + return "", err + } + require.NoError(t, utxoView.FlushToDb(blockHeight)) + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + + err = utxoView.ValidateDerivedKey( + senderPkBytes, derivedKeyMetadata.DerivedPublicKey, blockHeight, + ) + require.NoError(t, err) + return derivedKeyAuthPrivBase58Check, nil + } + + _submitUnjailValidatorTxnWithDerivedKey := func(transactorPkBytes []byte, derivedKeyPrivBase58Check string) error { + utxoView := newUtxoView() + // Construct txn. + txn, _, _, _, err := testMeta.chain.CreateUnjailValidatorTxn( + transactorPkBytes, + &UnjailValidatorMetadata{}, + make(map[string][]byte), + testMeta.feeRateNanosPerKb, + mempool, + []*DeSoOutput{}, + ) + if err != nil { + return err + } + // Sign txn. + _signTxnWithDerivedKeyAndType(t, txn, derivedKeyPrivBase58Check, 1) + // Store the original transactor balance. + transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, params) + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) + // Connect txn. + utxoOps, _, _, _, err := utxoView.ConnectTransaction( + txn, + txn.Hash(), + getTxnSize(*txn), + testMeta.savedHeight, + true, + false, + ) + if err != nil { + return err + } + // Flush UTXO view to the db. + require.NoError(t, utxoView.FlushToDb(blockHeight)) + // Track txn for rolling back. + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + return nil + } + + // Seed a CurrentEpochEntry. + epochUtxoView := newUtxoView() + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + currentEpochNumber, err := newUtxoView().GetCurrentEpochNumber() + require.NoError(t, err) + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // sender registers as a validator. + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + } + _, err = _submitRegisterAsValidatorTxn(testMeta, senderPkString, senderPrivString, registerMetadata, nil, true) + require.NoError(t, err) + + validatorEntry, err = newUtxoView().GetValidatorByPKID(senderPKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + } + { + // sender is jailed. Since this update takes place outside a transaction, + // we cannot test rollbacks. We will run into an error where sender is + // trying to unjail himself, but he was never jailed. + + // Delete sender's ValidatorEntry from the UtxoView. + delete(mempool.universalUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) + delete(mempool.readOnlyUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) + + // Set JailedAtEpochNumber. + validatorEntry.JailedAtEpochNumber = currentEpochNumber + + // Store sender's ValidatorEntry in the db. + tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + tmpUtxoView._setValidatorEntryMappings(validatorEntry) + require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + + // Verify sender is jailed. + validatorEntry, err = newUtxoView().GetValidatorByPKID(senderPKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) + } + { + // sender creates a DerivedKey that can perform one UnjailValidator txn. + derivedKeyPriv, err = _submitAuthorizeDerivedKeyUnjailValidatorTxn(1) + require.NoError(t, err) + } + { + // RuleErrorUnjailingValidatorTooEarly + err = _submitUnjailValidatorTxnWithDerivedKey(senderPkBytes, derivedKeyPriv) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorUnjailingValidatorTooEarly) + } + { + // Simulate three epochs passing by seeding a new CurrentEpochEntry. + + // Delete the CurrentEpochEntry from the UtxoView. + mempool.universalUtxoView.CurrentEpochEntry = nil + mempool.readOnlyUtxoView.CurrentEpochEntry = nil + + // Store a new CurrentEpochEntry in the db. + epochUtxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + epochUtxoView._setCurrentEpochEntry( + &EpochEntry{EpochNumber: currentEpochNumber + 3, FinalBlockHeight: blockHeight + 10}, + ) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + + // Verify CurrentEpochNumber. + currentEpochNumber, err = newUtxoView().GetCurrentEpochNumber() + require.NoError(t, err) + require.Equal(t, currentEpochNumber, uint64(4)) + } + { + // sender unjails himself using a DerivedKey. + validatorEntry, err = newUtxoView().GetValidatorByPKID(senderPKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) + require.Equal(t, validatorEntry.LastActiveAtEpochNumber, uint64(1)) + + err = _submitUnjailValidatorTxnWithDerivedKey(senderPkBytes, derivedKeyPriv) + require.NoError(t, err) + + validatorEntry, err = newUtxoView().GetValidatorByPKID(senderPKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.Status(), ValidatorStatusActive) + require.Equal(t, validatorEntry.LastActiveAtEpochNumber, uint64(4)) + } +} + +func _submitUnjailValidatorTxn( + testMeta *TestMeta, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + extraData map[string][]byte, + flushToDB bool, +) (_fees uint64, _err error) { + // Record transactor's prevBalance. + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) + + // Convert PublicKeyBase58Check to PkBytes. + updaterPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) + require.NoError(testMeta.t, err) + + // Create the transaction. + txn, totalInputMake, changeAmountMake, feesMake, err := testMeta.chain.CreateUnjailValidatorTxn( + updaterPkBytes, + &UnjailValidatorMetadata{}, + extraData, + testMeta.feeRateNanosPerKb, + testMeta.mempool, + []*DeSoOutput{}, + ) + if err != nil { + return 0, err + } + require.Equal(testMeta.t, totalInputMake, changeAmountMake+feesMake) + + // Sign the transaction now that its inputs are set up. + _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) + + // Connect the transaction. + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( + txn, + txn.Hash(), + getTxnSize(*txn), + testMeta.savedHeight, + true, + false, + ) + if err != nil { + return 0, err + } + require.Equal(testMeta.t, totalInput, totalOutput+fees) + require.Equal(testMeta.t, totalInput, totalInputMake) + require.Equal(testMeta.t, OperationTypeUnjailValidator, utxoOps[len(utxoOps)-1].Type) + if flushToDB { + require.NoError(testMeta.t, testMeta.mempool.universalUtxoView.FlushToDb(uint64(testMeta.savedHeight))) + } + require.NoError(testMeta.t, testMeta.mempool.RegenerateReadOnlyView()) + + // Record the txn. + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + return fees, nil +} diff --git a/lib/constants.go b/lib/constants.go index e8fd7360d..48e745653 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -601,6 +601,11 @@ type DeSoParams struct { // TODO: Move this to GlobalParamsEntry. StakeLockupEpochDuration uint64 + // ValidatorJailEpochDuration is the number of epochs that a validator must + // wait after being jailed before submitting an UnjailValidator txn. + // TODO: Move this to GlobalParamsEntry. + ValidatorJailEpochDuration uint64 + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -976,6 +981,9 @@ var DeSoMainnetParams = DeSoParams{ // Unstaked stake can be unlocked after a minimum of N elapsed epochs. StakeLockupEpochDuration: uint64(3), + // Jailed validators can be unjailed after a minimum of N elapsed epochs. + ValidatorJailEpochDuration: uint64(3), + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1207,6 +1215,9 @@ var DeSoTestnetParams = DeSoParams{ // Unstaked stake can be unlocked after a minimum of N elapsed epochs. StakeLockupEpochDuration: uint64(3), + // Jailed validators can be unjailed after a minimum of N elapsed epochs. + ValidatorJailEpochDuration: uint64(3), + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/db_utils.go b/lib/db_utils.go index f0f21f79e..cb3892d59 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -6804,6 +6804,7 @@ type TransactionMetadata struct { StakeTxindexMetadata *StakeTxindexMetadata `json:",omitempty"` UnstakeTxindexMetadata *UnstakeTxindexMetadata `json:",omitempty"` UnlockStakeTxindexMetadata *UnlockStakeTxindexMetadata `json:",omitempty"` + UnjailValidatorTxindexMetadata *UnjailValidatorTxindexMetadata `json:",omitempty"` } func (txnMeta *TransactionMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -6896,6 +6897,8 @@ func (txnMeta *TransactionMetadata) RawEncodeWithoutMetadata(blockHeight uint64, data = append(data, EncodeToBytes(blockHeight, txnMeta.UnstakeTxindexMetadata, skipMetadata...)...) // encoding UnlockStakeTxindexMetadata data = append(data, EncodeToBytes(blockHeight, txnMeta.UnlockStakeTxindexMetadata, skipMetadata...)...) + // encoding UnjailValidatorTxindexMetadata + data = append(data, EncodeToBytes(blockHeight, txnMeta.UnjailValidatorTxindexMetadata, skipMetadata...)...) } return data @@ -7150,23 +7153,27 @@ func (txnMeta *TransactionMetadata) RawDecodeWithoutMetadata(blockHeight uint64, if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { // decoding RegisterAsValidatorTxindexMetadata if txnMeta.RegisterAsValidatorTxindexMetadata, err = DecodeDeSoEncoder(&RegisterAsValidatorTxindexMetadata{}, rr); err != nil { - return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading RegisterAsValidatorTxindexMetadata") + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading RegisterAsValidatorTxindexMetadata: ") } // decoding UnregisterAsValidatorTxindexMetadata if txnMeta.UnregisterAsValidatorTxindexMetadata, err = DecodeDeSoEncoder(&UnregisterAsValidatorTxindexMetadata{}, rr); err != nil { - return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnregisterAsValidatorTxindexMetadata") + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnregisterAsValidatorTxindexMetadata: ") } // decoding StakeTxindexMetadata if txnMeta.StakeTxindexMetadata, err = DecodeDeSoEncoder(&StakeTxindexMetadata{}, rr); err != nil { - return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading StakeTxindexMetadata") + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading StakeTxindexMetadata: ") } // decoding UnstakeTxindexMetadata if txnMeta.UnstakeTxindexMetadata, err = DecodeDeSoEncoder(&UnstakeTxindexMetadata{}, rr); err != nil { - return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnstakeTxindexMetadata") + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnstakeTxindexMetadata: ") } // decoding UnlockStakeTxindexMetadata if txnMeta.UnlockStakeTxindexMetadata, err = DecodeDeSoEncoder(&UnlockStakeTxindexMetadata{}, rr); err != nil { - return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnlockStakeTxindexMetadata") + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnlockStakeTxindexMetadata: ") + } + // decoding UnjailValidatorTxindexMetadata + if txnMeta.UnjailValidatorTxindexMetadata, err = DecodeDeSoEncoder(&UnjailValidatorTxindexMetadata{}, rr); err != nil { + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnjailValidatorTxindexMetadata: ") } } diff --git a/lib/mempool.go b/lib/mempool.go index c5d3626d4..6e7373986 100644 --- a/lib/mempool.go +++ b/lib/mempool.go @@ -1961,6 +1961,10 @@ func ComputeTransactionMetadata(txn *MsgDeSoTxn, utxoView *UtxoView, blockHash * txindexMetadata, affectedPublicKeys := utxoView.CreateUnlockStakeTxindexMetadata(utxoOps[len(utxoOps)-1], txn) txnMeta.UnlockStakeTxindexMetadata = txindexMetadata txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) + case TxnTypeUnjailValidator: + txindexMetadata, affectedPublicKeys := utxoView.CreateUnjailValidatorTxindexMetadata(utxoOps[len(utxoOps)-1], txn) + txnMeta.UnjailValidatorTxindexMetadata = txindexMetadata + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) } return txnMeta } diff --git a/lib/network.go b/lib/network.go index ddaa1e342..c096f701f 100644 --- a/lib/network.go +++ b/lib/network.go @@ -244,8 +244,9 @@ const ( TxnTypeStake TxnType = 36 TxnTypeUnstake TxnType = 37 TxnTypeUnlockStake TxnType = 38 + TxnTypeUnjailValidator TxnType = 39 - // NEXT_ID = 39 + // NEXT_ID = 40 ) type TxnString string @@ -290,6 +291,7 @@ const ( TxnStringStake TxnString = "STAKE" TxnStringUnstake TxnString = "UNSTAKE" TxnStringUnlockStake TxnString = "UNLOCK_STAKE" + TxnStringUnjailValidator TxnString = "UNJAIL_VALIDATOR" ) var ( @@ -302,7 +304,7 @@ var ( TxnTypeDAOCoin, TxnTypeDAOCoinTransfer, TxnTypeDAOCoinLimitOrder, TxnTypeCreateUserAssociation, TxnTypeDeleteUserAssociation, TxnTypeCreatePostAssociation, TxnTypeDeletePostAssociation, TxnTypeAccessGroup, TxnTypeAccessGroupMembers, TxnTypeNewMessage, TxnTypeRegisterAsValidator, - TxnTypeUnregisterAsValidator, TxnTypeStake, TxnTypeUnstake, TxnTypeUnlockStake, + TxnTypeUnregisterAsValidator, TxnTypeStake, TxnTypeUnstake, TxnTypeUnlockStake, TxnTypeUnjailValidator, } AllTxnString = []TxnString{ TxnStringUnset, TxnStringBlockReward, TxnStringBasicTransfer, TxnStringBitcoinExchange, TxnStringPrivateMessage, @@ -313,7 +315,7 @@ var ( TxnStringDAOCoin, TxnStringDAOCoinTransfer, TxnStringDAOCoinLimitOrder, TxnStringCreateUserAssociation, TxnStringDeleteUserAssociation, TxnStringCreatePostAssociation, TxnStringDeletePostAssociation, TxnStringAccessGroup, TxnStringAccessGroupMembers, TxnStringNewMessage, TxnStringRegisterAsValidator, - TxnStringUnregisterAsValidator, TxnStringStake, TxnStringUnstake, TxnStringUnlockStake, + TxnStringUnregisterAsValidator, TxnStringStake, TxnStringUnstake, TxnStringUnlockStake, TxnStringUnjailValidator, } ) @@ -403,6 +405,8 @@ func (txnType TxnType) GetTxnString() TxnString { return TxnStringUnstake case TxnTypeUnlockStake: return TxnStringUnlockStake + case TxnTypeUnjailValidator: + return TxnStringUnjailValidator default: return TxnStringUndefined } @@ -486,6 +490,8 @@ func GetTxnTypeFromString(txnString TxnString) TxnType { return TxnTypeUnstake case TxnStringUnlockStake: return TxnTypeUnlockStake + case TxnStringUnjailValidator: + return TxnTypeUnjailValidator default: // TxnTypeUnset means we couldn't find a matching txn type return TxnTypeUnset @@ -577,6 +583,8 @@ func NewTxnMetadata(txType TxnType) (DeSoTxnMetadata, error) { return (&UnstakeMetadata{}).New(), nil case TxnTypeUnlockStake: return (&UnlockStakeMetadata{}).New(), nil + case TxnTypeUnjailValidator: + return (&UnjailValidatorMetadata{}).New(), nil default: return nil, fmt.Errorf("NewTxnMetadata: Unrecognized TxnType: %v; make sure you add the new type of transaction to NewTxnMetadata", txType) } From 5d8abfc68de67e9d845a29bedd8b0c861c7db9fb Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 8 May 2023 16:10:10 -0400 Subject: [PATCH 026/762] Dont error if parsing empty string or bytes. --- bls/signature.go | 10 +++++----- bls/signature_test.go | 42 +++++++++++++++++++++--------------------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 35f3e6f85..1b7258939 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -84,7 +84,7 @@ func (privateKey *PrivateKey) ToString() string { func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { if privateKeyString == "" { - return nil, errors.New("empty bls.PrivateKey string provided") + return nil, nil } // Chop off leading 0x, if exists. Otherwise, does nothing. privateKeyStringCopy, _ := strings.CutPrefix(privateKeyString, "0x") @@ -130,7 +130,7 @@ func (publicKey *PublicKey) ToBytes() []byte { func (publicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { if len(publicKeyBytes) == 0 { - return nil, errors.New("empty bls.PublicKey bytes provided") + return nil, nil } var err error publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) @@ -146,7 +146,7 @@ func (publicKey *PublicKey) ToString() string { func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { if publicKeyString == "" { - return nil, errors.New("empty bls.PublicKey string provided") + return nil, nil } // Chop off leading 0x, if exists. Otherwise, does nothing. publicKeyStringCopy, _ := strings.CutPrefix(publicKeyString, "0x") @@ -191,7 +191,7 @@ func (signature *Signature) ToBytes() []byte { func (signature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { if len(signatureBytes) == 0 { - return nil, errors.New("empty bls.Signature bytes provided") + return nil, nil } signature.flowSignature = signatureBytes return signature, nil @@ -206,7 +206,7 @@ func (signature *Signature) ToString() string { func (signature *Signature) FromString(signatureString string) (*Signature, error) { if signatureString == "" { - return nil, errors.New("empty bls.Signature string provided") + return nil, nil } // Chop off leading 0x, if exists. Otherwise, does nothing. signatureStringCopy, _ := strings.CutPrefix(signatureString, "0x") diff --git a/bls/signature_test.go b/bls/signature_test.go index 0a2925f37..075162bde 100644 --- a/bls/signature_test.go +++ b/bls/signature_test.go @@ -139,9 +139,9 @@ func TestVerifyingBLSSignatures(t *testing.T) { // ToString() require.Equal(t, (&PrivateKey{}).ToString(), "") // FromString() - _, err = (&PrivateKey{}).FromString("") - require.Error(t, err) - require.Contains(t, err.Error(), "empty bls.PrivateKey string provided") + blsPrivateKey, err := (&PrivateKey{}).FromString("") + require.NoError(t, err) + require.Nil(t, blsPrivateKey) // Eq() require.False(t, (&PrivateKey{}).Eq(nil)) require.False(t, (&PrivateKey{}).Eq(&PrivateKey{})) @@ -158,18 +158,18 @@ func TestVerifyingBLSSignatures(t *testing.T) { // ToBytes() require.True(t, bytes.Equal((&PublicKey{}).ToBytes(), []byte{})) // FromBytes() - _, err = (&PublicKey{}).FromBytes(nil) - require.Error(t, err) - require.Contains(t, err.Error(), "empty bls.PublicKey bytes provided") - _, err = (&PublicKey{}).FromBytes([]byte{}) - require.Error(t, err) - require.Contains(t, err.Error(), "empty bls.PublicKey bytes provided") + blsPublicKey, err := (&PublicKey{}).FromBytes(nil) + require.NoError(t, err) + require.Nil(t, blsPublicKey) + blsPublicKey, err = (&PublicKey{}).FromBytes([]byte{}) + require.NoError(t, err) + require.Nil(t, blsPublicKey) // ToString() require.Equal(t, (&PublicKey{}).ToString(), "") // FromString() - _, err = (&PublicKey{}).FromString("") - require.Error(t, err) - require.Contains(t, err.Error(), "empty bls.PublicKey string provided") + blsPublicKey, err = (&PublicKey{}).FromString("") + require.NoError(t, err) + require.Nil(t, blsPublicKey) // Eq() require.False(t, (&PublicKey{}).Eq(nil)) require.False(t, (&PublicKey{}).Eq(&PublicKey{})) @@ -184,18 +184,18 @@ func TestVerifyingBLSSignatures(t *testing.T) { // ToBytes() require.True(t, bytes.Equal((&Signature{}).ToBytes(), []byte{})) // FromBytes() - _, err = (&Signature{}).FromBytes(nil) - require.Error(t, err) - require.Contains(t, err.Error(), "empty bls.Signature bytes provided") - _, err = (&Signature{}).FromBytes([]byte{}) - require.Error(t, err) - require.Contains(t, err.Error(), "empty bls.Signature bytes provided") + blsSignature, err := (&Signature{}).FromBytes(nil) + require.NoError(t, err) + require.Nil(t, blsSignature) + blsSignature, err = (&Signature{}).FromBytes([]byte{}) + require.NoError(t, err) + require.Nil(t, blsSignature) // ToString() require.Equal(t, (&Signature{}).ToString(), "") // FromString() - _, err = (&Signature{}).FromString("") - require.Error(t, err) - require.Contains(t, err.Error(), "empty bls.Signature string provided") + blsSignature, err = (&Signature{}).FromString("") + require.NoError(t, err) + require.Nil(t, blsSignature) // Eq() require.False(t, (&Signature{}).Eq(nil)) require.False(t, (&Signature{}).Eq(&Signature{})) From aca9f662e1f2df2110711cab46cc7a4045b181b6 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 8 May 2023 16:34:01 -0400 Subject: [PATCH 027/762] Try non-recursive go test syntax. --- test.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.Dockerfile b/test.Dockerfile index c2b6d318c..6eb62c4f9 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -28,4 +28,4 @@ COPY main.go . # build backend RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go -ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "./..."] +ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "github.com/deso-protocol/core/bls", "github.com/deso-protocol/core/lib"] From a09b65328673435cc5422c1b4f5db7aa927171ba Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 9 May 2023 11:15:42 -0400 Subject: [PATCH 028/762] Add bls package to Dockerfile. --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 97e2af165..04a49132f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,8 +10,9 @@ COPY go.sum . RUN go mod download -COPY desohash desohash +COPY bls bls COPY cmd cmd +COPY desohash desohash COPY lib lib COPY migrate migrate COPY test_data test_data From f3aaff6deea4aa9962a8e97407e4c6440e8a5538 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 9 May 2023 14:01:34 -0400 Subject: [PATCH 029/762] Test Copy() func. --- bls/signature_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bls/signature_test.go b/bls/signature_test.go index 075162bde..12eeb627e 100644 --- a/bls/signature_test.go +++ b/bls/signature_test.go @@ -124,10 +124,16 @@ func TestVerifyingBLSSignatures(t *testing.T) { // Test bls.PublicKey.Copy(). blsPublicKey1Copy := blsPublicKey1.Copy() require.True(t, blsPublicKey1.Eq(blsPublicKey1Copy)) + blsPublicKey1Copy.flowPublicKey = _generateRandomBLSPrivateKey(t).PublicKey().flowPublicKey + require.False(t, blsPublicKey1.Eq(blsPublicKey1Copy)) // Test bls.Signature.Copy(). blsSignature1Copy := blsSignature1.Copy() require.True(t, blsSignature1.Eq(blsSignature1Copy)) + blsRandomSignature, err := _generateRandomBLSPrivateKey(t).Sign(randomPayload1) + require.NoError(t, err) + blsSignature1Copy.flowSignature = blsRandomSignature.flowSignature + require.False(t, blsSignature1.Eq(blsSignature1Copy)) // Test nil bls.PrivateKey edge cases. // Sign() From c8ae873f31b5b88d46feb7b39fbb8de64605c51c Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Wed, 10 May 2023 16:00:37 -0400 Subject: [PATCH 030/762] Mf/convert validator voting keys to bls keys (#522) * Add BLS signature utils. * Get tests passing. * Update test dockerfile to install relic. * Add relic + no relic files. * Add more private key, public key, signature utils. * Remove types_ filename prefix. * Install go v1.20 in the test.Dockerfile. * Dont apk add install go. * Dont re-install flow-go. * Add utils to aggregate + verify aggregate signatures. * Add no relic version of aggregate BLS sig funcs. * Conver validator voting keys to BLS keys. * Add copy funcs. * Fix failing stake tests. * Validate validator voting signature block height. * Retrigger CI. * Update no relic func signatures. * Add better edge-case handling for BLS types. * Merge upstream branch. Resolve conflicts. * Move BLS utils to own package. * Clean up naming conventions. * Reference BLS utils from package. * Expose NewPrivateKey(). * Incorporate new BLS package in tests. * Dont error if parsing empty string or bytes. * Try non-recursive go test syntax. * Retrigger CI. * Add bls package to Dockerfile. * Test Copy() func. * Add encoder/decoder wrappers. * Increase voting signature window from 60 to 300. --- .gitignore | 1 + Dockerfile | 3 +- bls/signature.go | 237 +++++++++++++++++++++++++ bls/signature_no_relic.go | 107 ++++++++++++ bls/signature_no_relic_test.go | 15 ++ bls/signature_test.go | 226 ++++++++++++++++++++++++ go.mod | 19 +- go.sum | 25 +++ lib/block_view_stake_test.go | 26 ++- lib/block_view_types_test.go | 2 + lib/block_view_validator.go | 140 ++++++++++++--- lib/block_view_validator_test.go | 288 +++++++++++++++++++++++++++---- lib/constants.go | 16 ++ scripts/install-relic.sh | 32 ++++ test.Dockerfile | 15 +- 15 files changed, 1074 insertions(+), 78 deletions(-) create mode 100644 bls/signature.go create mode 100644 bls/signature_no_relic.go create mode 100644 bls/signature_no_relic_test.go create mode 100644 bls/signature_test.go create mode 100755 scripts/install-relic.sh diff --git a/.gitignore b/.gitignore index c7b1bb1aa..dc0267338 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ quick_testing dump_txns compare_blocks.go backend +core .idea diff --git a/Dockerfile b/Dockerfile index 97e2af165..04a49132f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,8 +10,9 @@ COPY go.sum . RUN go mod download -COPY desohash desohash +COPY bls bls COPY cmd cmd +COPY desohash desohash COPY lib lib COPY migrate migrate COPY test_data test_data diff --git a/bls/signature.go b/bls/signature.go new file mode 100644 index 000000000..1b7258939 --- /dev/null +++ b/bls/signature.go @@ -0,0 +1,237 @@ +//go:build relic + +package bls + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + flowCrypto "github.com/onflow/flow-go/crypto" + "strings" +) + +const SigningAlgorithm = flowCrypto.BLSBLS12381 + +// TODO: what should the domainTag param be? +var HashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") + +func AggregateSignatures(signatures []*Signature) (*Signature, error) { + var flowSignatures []flowCrypto.Signature + for _, signature := range signatures { + flowSignatures = append(flowSignatures, signature.flowSignature) + } + aggregateFlowSignature, err := flowCrypto.AggregateBLSSignatures(flowSignatures) + if err != nil { + return nil, err + } + return &Signature{flowSignature: aggregateFlowSignature}, nil +} + +func VerifyAggregateSignature(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { + var flowPublicKeys []flowCrypto.PublicKey + for _, publicKey := range publicKeys { + flowPublicKeys = append(flowPublicKeys, publicKey.flowPublicKey) + } + return flowCrypto.VerifyBLSSignatureOneMessage(flowPublicKeys, signature.flowSignature, payloadBytes, HashingAlgorithm) +} + +// +// TYPES: PrivateKey +// + +type PrivateKey struct { + flowPrivateKey flowCrypto.PrivateKey +} + +func NewPrivateKey() (*PrivateKey, error) { + randomBytes := make([]byte, 64) + _, err := rand.Read(randomBytes) + if err != nil { + return nil, err + } + flowPrivateKey, err := flowCrypto.GeneratePrivateKey(SigningAlgorithm, randomBytes) + if err != nil { + return nil, err + } + return &PrivateKey{flowPrivateKey: flowPrivateKey}, nil +} + +func (privateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { + if privateKey.flowPrivateKey == nil { + return nil, errors.New("bls.PrivateKey is nil") + } + flowSignature, err := privateKey.flowPrivateKey.Sign(payloadBytes, HashingAlgorithm) + if err != nil { + return nil, err + } + return &Signature{flowSignature: flowSignature}, nil +} + +func (privateKey *PrivateKey) PublicKey() *PublicKey { + if privateKey.flowPrivateKey == nil { + return nil + } + return &PublicKey{flowPublicKey: privateKey.flowPrivateKey.PublicKey()} +} + +func (privateKey *PrivateKey) ToString() string { + if privateKey.flowPrivateKey == nil { + return "" + } + return privateKey.flowPrivateKey.String() +} + +func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { + if privateKeyString == "" { + return nil, nil + } + // Chop off leading 0x, if exists. Otherwise, does nothing. + privateKeyStringCopy, _ := strings.CutPrefix(privateKeyString, "0x") + // Convert from hex string to byte slice. + privateKeyBytes, err := hex.DecodeString(privateKeyStringCopy) + if err != nil { + return nil, err + } + // Convert from byte slice to bls.PrivateKey. + privateKey.flowPrivateKey, err = flowCrypto.DecodePrivateKey(SigningAlgorithm, privateKeyBytes) + return privateKey, err +} + +func (privateKey *PrivateKey) Eq(other *PrivateKey) bool { + if privateKey.flowPrivateKey == nil || other == nil { + return false + } + return privateKey.flowPrivateKey.Equals(other.flowPrivateKey) +} + +// +// TYPES: PublicKey +// + +type PublicKey struct { + flowPublicKey flowCrypto.PublicKey +} + +func (publicKey *PublicKey) Verify(signature *Signature, input []byte) (bool, error) { + if publicKey.flowPublicKey == nil { + return false, errors.New("bls.PublicKey is nil") + } + return publicKey.flowPublicKey.Verify(signature.flowSignature, input, HashingAlgorithm) +} + +func (publicKey *PublicKey) ToBytes() []byte { + var publicKeyBytes []byte + if publicKey.flowPublicKey != nil { + publicKeyBytes = publicKey.flowPublicKey.Encode() + } + return publicKeyBytes +} + +func (publicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { + if len(publicKeyBytes) == 0 { + return nil, nil + } + var err error + publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) + return publicKey, err +} + +func (publicKey *PublicKey) ToString() string { + if publicKey.flowPublicKey == nil { + return "" + } + return publicKey.flowPublicKey.String() +} + +func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { + if publicKeyString == "" { + return nil, nil + } + // Chop off leading 0x, if exists. Otherwise, does nothing. + publicKeyStringCopy, _ := strings.CutPrefix(publicKeyString, "0x") + // Convert from hex string to byte slice. + publicKeyBytes, err := hex.DecodeString(publicKeyStringCopy) + if err != nil { + return nil, err + } + // Convert from byte slice to bls.PublicKey. + publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) + return publicKey, err +} + +func (publicKey *PublicKey) Eq(other *PublicKey) bool { + if publicKey.flowPublicKey == nil || other == nil { + return false + } + return publicKey.flowPublicKey.Equals(other.flowPublicKey) +} + +func (publicKey *PublicKey) Copy() *PublicKey { + return &PublicKey{ + flowPublicKey: publicKey.flowPublicKey, + } +} + +// +// TYPES: Signature +// + +type Signature struct { + flowSignature flowCrypto.Signature +} + +func (signature *Signature) ToBytes() []byte { + var signatureBytes []byte + if signature.flowSignature != nil { + signatureBytes = signature.flowSignature.Bytes() + } + return signatureBytes +} + +func (signature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { + if len(signatureBytes) == 0 { + return nil, nil + } + signature.flowSignature = signatureBytes + return signature, nil +} + +func (signature *Signature) ToString() string { + if signature.flowSignature == nil { + return "" + } + return signature.flowSignature.String() +} + +func (signature *Signature) FromString(signatureString string) (*Signature, error) { + if signatureString == "" { + return nil, nil + } + // Chop off leading 0x, if exists. Otherwise, does nothing. + signatureStringCopy, _ := strings.CutPrefix(signatureString, "0x") + // Convert from hex string to byte slice. + signatureBytes, err := hex.DecodeString(signatureStringCopy) + if err != nil { + return nil, err + } + // Convert from byte slice to bls.Signature. + signature.flowSignature = signatureBytes + return signature, nil +} + +func (signature *Signature) Eq(other *Signature) bool { + if signature.flowSignature == nil || other == nil { + return false + } + return bytes.Equal(signature.ToBytes(), other.ToBytes()) +} + +func (signature *Signature) Copy() *Signature { + if signature.flowSignature == nil { + return &Signature{} + } + return &Signature{ + flowSignature: append([]byte{}, signature.flowSignature.Bytes()...), + } +} diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go new file mode 100644 index 000000000..71564d6c0 --- /dev/null +++ b/bls/signature_no_relic.go @@ -0,0 +1,107 @@ +//go:build !relic + +package bls + +const BLSNoRelicError = "BLS keys can't be used without Relic installed" + +func AggregateSignatures(signatures []*Signature) (*Signature, error) { + panic(BLSNoRelicError) +} + +func VerifyAggregateSignature(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { + panic(BLSNoRelicError) +} + +// +// TYPES: PrivateKey +// + +type PrivateKey struct{} + +func NewPrivateKey() (*PrivateKey, error) { + panic(BLSNoRelicError) +} + +func (privateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { + panic(BLSNoRelicError) +} + +func (privateKey *PrivateKey) PublicKey() *PublicKey { + panic(BLSNoRelicError) +} + +func (privateKey *PrivateKey) ToString() string { + panic(BLSNoRelicError) +} + +func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { + panic(BLSNoRelicError) +} + +func (privateKey *PrivateKey) Eq(other *PrivateKey) bool { + panic(BLSNoRelicError) +} + +// +// TYPES: PublicKey +// + +type PublicKey struct{} + +func (publicKey *PublicKey) Verify(signature *Signature, input []byte) (bool, error) { + panic(BLSNoRelicError) +} + +func (publicKey *PublicKey) ToBytes() []byte { + panic(BLSNoRelicError) +} + +func (publicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { + panic(BLSNoRelicError) +} + +func (publicKey *PublicKey) ToString() string { + panic(BLSNoRelicError) +} + +func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { + panic(BLSNoRelicError) +} + +func (publicKey *PublicKey) Eq(other *PublicKey) bool { + panic(BLSNoRelicError) +} + +func (publicKey *PublicKey) Copy() *PublicKey { + panic(BLSNoRelicError) +} + +// +// TYPES: Signature +// + +type Signature struct{} + +func (signature *Signature) ToBytes() []byte { + panic(BLSNoRelicError) +} + +func (signature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { + panic(BLSNoRelicError) +} + +func (signature *Signature) ToString() string { + panic(BLSNoRelicError) +} + +func (signature *Signature) FromString(signatureString string) (*Signature, error) { + panic(BLSNoRelicError) +} + +func (signature *Signature) Eq(other *Signature) bool { + panic(BLSNoRelicError) +} + +func (signature *Signature) Copy() *Signature { + panic(BLSNoRelicError) +} diff --git a/bls/signature_no_relic_test.go b/bls/signature_no_relic_test.go new file mode 100644 index 000000000..3b4456b14 --- /dev/null +++ b/bls/signature_no_relic_test.go @@ -0,0 +1,15 @@ +//go:build !relic + +package bls + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestVerifyingBLSSignatures(t *testing.T) { + assert.Panics(t, func() { NewPrivateKey() }) + assert.Panics(t, func() { (&PrivateKey{}).FromString("") }) + assert.Panics(t, func() { (&PublicKey{}).FromString("") }) + assert.Panics(t, func() { (&Signature{}).FromString("") }) +} diff --git a/bls/signature_test.go b/bls/signature_test.go new file mode 100644 index 000000000..12eeb627e --- /dev/null +++ b/bls/signature_test.go @@ -0,0 +1,226 @@ +//go:build relic + +package bls + +import ( + "bytes" + "crypto/rand" + "github.com/stretchr/testify/require" + "testing" +) + +func TestVerifyingBLSSignatures(t *testing.T) { + // Generate two BLS public/private key pairs. + blsPrivateKey1 := _generateRandomBLSPrivateKey(t) + blsPublicKey1 := blsPrivateKey1.PublicKey() + + blsPrivateKey2 := _generateRandomBLSPrivateKey(t) + blsPublicKey2 := blsPrivateKey2.PublicKey() + + // Test bls.PrivateKey.Sign() and bls.PublicKey.Verify(). + // 1. PrivateKey1 signs a random payload. + randomPayload1 := _generateRandomBytes(t, 256) + blsSignature1, err := blsPrivateKey1.Sign(randomPayload1) + require.NoError(t, err) + // 2. Verify bls.PublicKey1 is the signer. + isVerified, err := blsPublicKey1.Verify(blsSignature1, randomPayload1) + require.NoError(t, err) + require.True(t, isVerified) + // 3. Verify bls.PublicKey2 is not the signer. + isVerified, err = blsPublicKey2.Verify(blsSignature1, randomPayload1) + require.NoError(t, err) + require.False(t, isVerified) + + // 4. PrivateKey2 signs a different random payload. + randomPayload2 := _generateRandomBytes(t, 256) + blsSignature2, err := blsPrivateKey2.Sign(randomPayload2) + require.NoError(t, err) + // 5. Verify bls.PublicKey1 is not the signer. + isVerified, err = blsPublicKey1.Verify(blsSignature2, randomPayload2) + require.NoError(t, err) + require.False(t, isVerified) + // 6. Verify bls.PublicKey2 is the signer. + isVerified, err = blsPublicKey2.Verify(blsSignature2, randomPayload2) + require.NoError(t, err) + require.True(t, isVerified) + + // Test AggregateSignatures() and VerifyAggregateSignature(). + // 1. PrivateKey1 signs a random payload. + randomPayload3 := _generateRandomBytes(t, 256) + blsSignature1, err = blsPrivateKey1.Sign(randomPayload3) + require.NoError(t, err) + // 2. PrivateKey2 signs the same random payload. + blsSignature2, err = blsPrivateKey2.Sign(randomPayload3) + require.NoError(t, err) + // 3. Aggregate their signatures. + aggregateSignature, err := AggregateSignatures([]*Signature{blsSignature1, blsSignature2}) + require.NoError(t, err) + // 4. Verify the AggregateSignature. + isVerified, err = VerifyAggregateSignature( + []*PublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, randomPayload3, + ) + require.NoError(t, err) + require.True(t, isVerified) + // 5. Verify PrivateKey1's signature doesn't work on its own. + isVerified, err = VerifyAggregateSignature([]*PublicKey{blsPublicKey1}, aggregateSignature, randomPayload3) + require.NoError(t, err) + require.False(t, isVerified) + // 6. Verify PrivateKey2's signature doesn't work on its own. + isVerified, err = VerifyAggregateSignature([]*PublicKey{blsPublicKey2}, aggregateSignature, randomPayload3) + require.NoError(t, err) + require.False(t, isVerified) + // 7. Verify the AggregateSignature doesn't work on a different payload. + isVerified, err = VerifyAggregateSignature( + []*PublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, randomPayload1, + ) + require.NoError(t, err) + require.False(t, isVerified) + + // Test bls.PrivateKey.Eq(). + require.True(t, blsPrivateKey1.Eq(blsPrivateKey1)) + require.True(t, blsPrivateKey2.Eq(blsPrivateKey2)) + require.False(t, blsPrivateKey1.Eq(blsPrivateKey2)) + + // Test bls.PrivateKey.ToString() and bls.PrivateKey.FromString(). + blsPrivateKeyString := blsPrivateKey1.ToString() + copyBLSPrivateKey1, err := (&PrivateKey{}).FromString(blsPrivateKeyString) + require.NoError(t, err) + require.True(t, blsPrivateKey1.Eq(copyBLSPrivateKey1)) + + // Test bls.PublicKey.Eq(). + require.True(t, blsPublicKey1.Eq(blsPublicKey1)) + require.True(t, blsPublicKey2.Eq(blsPublicKey2)) + require.False(t, blsPublicKey1.Eq(blsPublicKey2)) + + // Test bls.PublicKey.ToBytes() and bls.PublicKey.FromBytes(). + blsPublicKeyBytes := blsPublicKey1.ToBytes() + copyBLSPublicKey1, err := (&PublicKey{}).FromBytes(blsPublicKeyBytes) + require.NoError(t, err) + require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) + + // Test bls.PublicKey.ToString() and bls.PublicKey.FromString(). + blsPublicKeyString := blsPublicKey1.ToString() + copyBLSPublicKey1, err = (&PublicKey{}).FromString(blsPublicKeyString) + require.NoError(t, err) + require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) + + // Test bls.Signature.Eq(). + require.True(t, blsSignature1.Eq(blsSignature1)) + require.True(t, blsSignature2.Eq(blsSignature2)) + require.False(t, blsSignature1.Eq(blsSignature2)) + + // Test bls.Signature.ToBytes() and bls.Signature.FromBytes(). + blsSignatureBytes := blsSignature1.ToBytes() + copyBLSSignature, err := (&Signature{}).FromBytes(blsSignatureBytes) + require.NoError(t, err) + require.True(t, blsSignature1.Eq(copyBLSSignature)) + + // Test bls.Signature.ToString() and bls.Signature.FromString(). + blsSignatureString := blsSignature1.ToString() + copyBLSSignature, err = (&Signature{}).FromString(blsSignatureString) + require.NoError(t, err) + require.True(t, blsSignature1.Eq(copyBLSSignature)) + + // Test bls.PublicKey.Copy(). + blsPublicKey1Copy := blsPublicKey1.Copy() + require.True(t, blsPublicKey1.Eq(blsPublicKey1Copy)) + blsPublicKey1Copy.flowPublicKey = _generateRandomBLSPrivateKey(t).PublicKey().flowPublicKey + require.False(t, blsPublicKey1.Eq(blsPublicKey1Copy)) + + // Test bls.Signature.Copy(). + blsSignature1Copy := blsSignature1.Copy() + require.True(t, blsSignature1.Eq(blsSignature1Copy)) + blsRandomSignature, err := _generateRandomBLSPrivateKey(t).Sign(randomPayload1) + require.NoError(t, err) + blsSignature1Copy.flowSignature = blsRandomSignature.flowSignature + require.False(t, blsSignature1.Eq(blsSignature1Copy)) + + // Test nil bls.PrivateKey edge cases. + // Sign() + _, err = (&PrivateKey{}).Sign(randomPayload1) + require.Error(t, err) + require.Contains(t, err.Error(), "bls.PrivateKey is nil") + // PublicKey() + require.Nil(t, (&PrivateKey{}).PublicKey()) + // ToString() + require.Equal(t, (&PrivateKey{}).ToString(), "") + // FromString() + blsPrivateKey, err := (&PrivateKey{}).FromString("") + require.NoError(t, err) + require.Nil(t, blsPrivateKey) + // Eq() + require.False(t, (&PrivateKey{}).Eq(nil)) + require.False(t, (&PrivateKey{}).Eq(&PrivateKey{})) + require.False(t, (&PrivateKey{}).Eq(_generateRandomBLSPrivateKey(t))) + require.False(t, _generateRandomBLSPrivateKey(t).Eq(nil)) + require.False(t, _generateRandomBLSPrivateKey(t).Eq(&PrivateKey{})) + require.False(t, _generateRandomBLSPrivateKey(t).Eq(_generateRandomBLSPrivateKey(t))) + + // Test nil bls.PublicKey edge cases. + // Verify() + _, err = (&PublicKey{}).Verify(blsSignature1, randomPayload1) + require.Error(t, err) + require.Contains(t, err.Error(), "bls.PublicKey is nil") + // ToBytes() + require.True(t, bytes.Equal((&PublicKey{}).ToBytes(), []byte{})) + // FromBytes() + blsPublicKey, err := (&PublicKey{}).FromBytes(nil) + require.NoError(t, err) + require.Nil(t, blsPublicKey) + blsPublicKey, err = (&PublicKey{}).FromBytes([]byte{}) + require.NoError(t, err) + require.Nil(t, blsPublicKey) + // ToString() + require.Equal(t, (&PublicKey{}).ToString(), "") + // FromString() + blsPublicKey, err = (&PublicKey{}).FromString("") + require.NoError(t, err) + require.Nil(t, blsPublicKey) + // Eq() + require.False(t, (&PublicKey{}).Eq(nil)) + require.False(t, (&PublicKey{}).Eq(&PublicKey{})) + require.False(t, (&PublicKey{}).Eq(_generateRandomBLSPrivateKey(t).PublicKey())) + require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq(nil)) + require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq((&PrivateKey{}).PublicKey())) + require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq(_generateRandomBLSPrivateKey(t).PublicKey())) + // Copy() + require.Nil(t, (&PublicKey{}).Copy().flowPublicKey) + + // Test nil bls.Signature edge cases. + // ToBytes() + require.True(t, bytes.Equal((&Signature{}).ToBytes(), []byte{})) + // FromBytes() + blsSignature, err := (&Signature{}).FromBytes(nil) + require.NoError(t, err) + require.Nil(t, blsSignature) + blsSignature, err = (&Signature{}).FromBytes([]byte{}) + require.NoError(t, err) + require.Nil(t, blsSignature) + // ToString() + require.Equal(t, (&Signature{}).ToString(), "") + // FromString() + blsSignature, err = (&Signature{}).FromString("") + require.NoError(t, err) + require.Nil(t, blsSignature) + // Eq() + require.False(t, (&Signature{}).Eq(nil)) + require.False(t, (&Signature{}).Eq(&Signature{})) + require.False(t, (&Signature{}).Eq(blsSignature1)) + require.False(t, blsSignature1.Eq(nil)) + require.False(t, blsSignature1.Eq(&Signature{})) + // Copy() + require.Nil(t, (&Signature{}).Copy().flowSignature) +} + +func _generateRandomBLSPrivateKey(t *testing.T) *PrivateKey { + privateKey, err := NewPrivateKey() + require.NoError(t, err) + return privateKey +} + +func _generateRandomBytes(t *testing.T, numBytes int) []byte { + randomBytes := make([]byte, 64) + _, err := rand.Read(randomBytes) + require.NoError(t, err) + return randomBytes +} diff --git a/go.mod b/go.mod index 2d2bca018..1e34c1732 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/decred/dcrd/lru v1.1.1 github.com/deso-protocol/go-deadlock v1.0.0 github.com/deso-protocol/go-merkle-tree v1.0.0 - github.com/dgraph-io/badger/v3 v3.2103.0 + github.com/dgraph-io/badger/v3 v3.2103.5 github.com/ethereum/go-ethereum v1.9.25 github.com/fatih/color v1.13.0 github.com/gernest/mention v2.0.0+incompatible @@ -30,10 +30,10 @@ require ( github.com/spf13/cobra v1.1.3 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.7.1 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.0 github.com/tyler-smith/go-bip39 v1.0.2 github.com/unrolled/secure v1.0.8 - golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c gopkg.in/DataDog/dd-trace-go.v1 v1.29.0 ) @@ -47,12 +47,14 @@ require ( require ( github.com/DataDog/zstd v1.4.8 // indirect github.com/Microsoft/go-winio v0.4.16 // indirect + github.com/NVIDIA/cstruct v0.0.0-20210817223100-441a06a021c8 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/bwesterb/go-ristretto v1.2.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect - github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373 // indirect @@ -69,6 +71,7 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/klauspost/compress v1.12.3 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.1 // indirect github.com/mattn/go-colorable v0.1.9 // indirect @@ -76,6 +79,7 @@ require ( github.com/mattn/goveralls v0.0.6 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/onflow/flow-go/crypto v0.24.7 // indirect github.com/onsi/ginkgo v1.15.0 // indirect github.com/onsi/gomega v1.10.5 // indirect github.com/pelletier/go-toml v1.7.0 // indirect @@ -96,8 +100,9 @@ require ( github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.opencensus.io v0.23.0 // indirect - golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect - golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect + golang.org/x/mod v0.4.2 // indirect + golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect + golang.org/x/sys v0.0.0-20221010170243-090e33056c14 // indirect golang.org/x/text v0.3.6 // indirect golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect golang.org/x/tools v0.1.5 // indirect @@ -108,6 +113,6 @@ require ( gopkg.in/ini.v1 v1.51.0 // indirect gopkg.in/kyokomi/emoji.v1 v1.5.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect mellium.im/sasl v0.2.1 // indirect ) diff --git a/go.sum b/go.sum index 65fb736ec..c2e35d0c6 100644 --- a/go.sum +++ b/go.sum @@ -35,6 +35,7 @@ github.com/DataDog/zstd v1.4.8 h1:Rpmta4xZ/MgZnriKNd24iZMhGpP5dvUcs/uqfBapKZY= github.com/DataDog/zstd v1.4.8/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/NVIDIA/cstruct v0.0.0-20210817223100-441a06a021c8/go.mod h1:GPbuJvLD4QWiHPS6vivLzh+XMAx6va0Aucm6ipa5S0I= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -61,6 +62,8 @@ github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnC github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= +github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -124,9 +127,13 @@ github.com/deso-protocol/go-merkle-tree v1.0.0 h1:9zkI5dQsITYy77s4kbTGPQmZnhQ+Ls github.com/deso-protocol/go-merkle-tree v1.0.0/go.mod h1:V/vbg/maaNv6G7zf9VVs645nLFx/jsO2L/awFB/S/ZU= github.com/dgraph-io/badger/v3 v3.2103.0 h1:abkD2EnP3+6Tj8h5LI1y00dJ9ICKTIAzvG9WmZ8S2c4= github.com/dgraph-io/badger/v3 v3.2103.0/go.mod h1:GHMCYxuDWyzbHkh4k3yyg4PM61tJPFfEGSMbE3Vd5QE= +github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= +github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= github.com/dgraph-io/ristretto v0.0.4-0.20210309073149-3836124cdc5a/go.mod h1:MIonLggsKgZLUSt414ExgwNtlOL5MuEoAJP514mwGe8= github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -209,6 +216,7 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -299,6 +307,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -359,6 +369,8 @@ github.com/oleiade/lane v1.0.1 h1:hXofkn7GEOubzTwNpeL9MaNy8WxolCYb9cInAIeqShU= github.com/oleiade/lane v1.0.1/go.mod h1:IyTkraa4maLfjq/GmHR+Dxb4kCMtEGeb+qmhlrQ5Mk4= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onflow/flow-go/crypto v0.24.7 h1:RCLuB83At4z5wkAyUCF7MYEnPoIIOHghJaODuJyEoW0= +github.com/onflow/flow-go/crypto v0.24.7/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= @@ -452,6 +464,8 @@ github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUW github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -460,6 +474,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= @@ -531,6 +548,8 @@ golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -589,6 +608,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -642,6 +663,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14 h1:k5II8e6QD8mITdi+okbbmR/cIyEbeXLBhy5Ha4nevyc= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -758,6 +781,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index fc8360997..a75c06cdf 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -1,3 +1,5 @@ +//go:build relic + package lib import ( @@ -99,8 +101,12 @@ func _testStaking(t *testing.T, flushToDB bool) { GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, flushToDB) require.NoError(t, err) @@ -951,16 +957,24 @@ func TestStakingWithDerivedKey(t *testing.T) { } { // m0 registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com")}, + Domains: [][]byte{[]byte("https://example1.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) } { // m1 registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example2.com")}, + Domains: [][]byte{[]byte("https://example2.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) @@ -1832,8 +1846,12 @@ func TestStakeLockupEpochDuration(t *testing.T) { } { // m0 registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, + Domains: [][]byte{[]byte("https://m1.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, true) require.NoError(t, err) diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index 7448697d1..0a81bcae4 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -1,3 +1,5 @@ +//go:build relic + package lib import ( diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 8993dbdc7..d2c282e8d 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -2,11 +2,14 @@ package lib import ( "bytes" + "crypto/sha256" "fmt" + "github.com/deso-protocol/core/bls" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" + "io" "math" "net/url" "sort" @@ -21,12 +24,10 @@ type ValidatorEntry struct { ValidatorPKID *PKID // Note: if someone is updating their ValidatorEntry, they need to include // all domains. The Domains field is not appended to. It is overwritten. - Domains [][]byte - DisableDelegatedStake bool - // TODO: We will implement BLS public keys and signatures in a subsequent PR. - // For now, we include them just as a placeholder byte slice. - VotingPublicKey []byte - VotingPublicKeySignature []byte + Domains [][]byte + DisableDelegatedStake bool + VotingPublicKey *bls.PublicKey + VotingPublicKeySignature *bls.Signature VotingSignatureBlockHeight uint64 TotalStakeAmountNanos *uint256.Int RegisteredAtBlockHeight uint64 @@ -82,8 +83,8 @@ func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { ValidatorPKID: validatorEntry.ValidatorPKID.NewPKID(), Domains: domainsCopy, DisableDelegatedStake: validatorEntry.DisableDelegatedStake, - VotingPublicKey: append([]byte{}, validatorEntry.VotingPublicKey...), - VotingPublicKeySignature: append([]byte{}, validatorEntry.VotingPublicKeySignature...), + VotingPublicKey: validatorEntry.VotingPublicKey.Copy(), + VotingPublicKeySignature: validatorEntry.VotingPublicKeySignature.Copy(), VotingSignatureBlockHeight: validatorEntry.VotingSignatureBlockHeight, TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), RegisteredAtBlockHeight: validatorEntry.RegisteredAtBlockHeight, @@ -115,8 +116,8 @@ func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint6 } data = append(data, BoolToByte(validatorEntry.DisableDelegatedStake)) - data = append(data, EncodeByteArray(validatorEntry.VotingPublicKey)...) - data = append(data, EncodeByteArray(validatorEntry.VotingPublicKeySignature)...) + data = append(data, EncodeBLSPublicKey(validatorEntry.VotingPublicKey)...) + data = append(data, EncodeBLSSignature(validatorEntry.VotingPublicKeySignature)...) data = append(data, UintToBuf(validatorEntry.VotingSignatureBlockHeight)...) data = append(data, EncodeUint256(validatorEntry.TotalStakeAmountNanos)...) data = append(data, UintToBuf(validatorEntry.RegisteredAtBlockHeight)...) @@ -165,13 +166,13 @@ func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint6 } // VotingPublicKey - validatorEntry.VotingPublicKey, err = DecodeByteArray(rr) + validatorEntry.VotingPublicKey, err = DecodeBLSPublicKey(rr) if err != nil { return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingPublicKey: ") } // VotingPublicKeySignature - validatorEntry.VotingPublicKeySignature, err = DecodeByteArray(rr) + validatorEntry.VotingPublicKeySignature, err = DecodeBLSSignature(rr) if err != nil { return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingPublicKeySignature: ") } @@ -230,8 +231,8 @@ func (validatorEntry *ValidatorEntry) GetEncoderType() EncoderType { type RegisterAsValidatorMetadata struct { Domains [][]byte DisableDelegatedStake bool - VotingPublicKey []byte - VotingPublicKeySignature []byte + VotingPublicKey *bls.PublicKey + VotingPublicKeySignature *bls.Signature VotingSignatureBlockHeight uint64 } @@ -249,8 +250,8 @@ func (txnData *RegisterAsValidatorMetadata) ToBytes(preSignature bool) ([]byte, } data = append(data, BoolToByte(txnData.DisableDelegatedStake)) - data = append(data, EncodeByteArray(txnData.VotingPublicKey)...) - data = append(data, EncodeByteArray(txnData.VotingPublicKeySignature)...) + data = append(data, EncodeBLSPublicKey(txnData.VotingPublicKey)...) + data = append(data, EncodeBLSSignature(txnData.VotingPublicKeySignature)...) data = append(data, UintToBuf(txnData.VotingSignatureBlockHeight)...) return data, nil } @@ -278,13 +279,13 @@ func (txnData *RegisterAsValidatorMetadata) FromBytes(data []byte) error { } // VotingPublicKey - txnData.VotingPublicKey, err = DecodeByteArray(rr) + txnData.VotingPublicKey, err = DecodeBLSPublicKey(rr) if err != nil { return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingPublicKey: ") } // VotingPublicKeySignature - txnData.VotingPublicKeySignature, err = DecodeByteArray(rr) + txnData.VotingPublicKeySignature, err = DecodeBLSSignature(rr) if err != nil { return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingPublicKeySignature: ") } @@ -801,7 +802,8 @@ func (bc *Blockchain) CreateRegisterAsValidatorTxn( } // Validate txn metadata. - if err = utxoView.IsValidRegisterAsValidatorMetadata(transactorPublicKey, metadata); err != nil { + blockHeight := uint64(bc.blockTip().Height) + 1 + if err = utxoView.IsValidRegisterAsValidatorMetadata(transactorPublicKey, metadata, blockHeight); err != nil { return nil, 0, 0, 0, errors.Wrapf( err, "Blockchain.CreateRegisterAsValidatorTxn: invalid txn metadata: ", ) @@ -1032,7 +1034,7 @@ func (bav *UtxoView) _connectRegisterAsValidator( txMeta := txn.TxnMeta.(*RegisterAsValidatorMetadata) // Validate the txn metadata. - if err = bav.IsValidRegisterAsValidatorMetadata(txn.PublicKey, txMeta); err != nil { + if err = bav.IsValidRegisterAsValidatorMetadata(txn.PublicKey, txMeta, uint64(blockHeight)); err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectRegisterAsValidator: ") } @@ -1595,7 +1597,11 @@ func (bav *UtxoView) _disconnectUnjailValidator( ) } -func (bav *UtxoView) IsValidRegisterAsValidatorMetadata(transactorPublicKey []byte, metadata *RegisterAsValidatorMetadata) error { +func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( + transactorPublicKey []byte, + metadata *RegisterAsValidatorMetadata, + blockHeight uint64, +) error { // Validate ValidatorPKID. transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKey) if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { @@ -1621,7 +1627,31 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata(transactorPublicKey []by return errors.Wrapf(RuleErrorValidatorDuplicateDomains, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } - // TODO: In subsequent PR, validate VotingPublicKey, VotingPublicKeySignature, and VotingSignatureBlockHeight. + // Validate VotingPublicKey. + if metadata.VotingPublicKey == nil { + return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKey, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + } + + // Validate VotingSignatureBlockHeight. + if metadata.VotingSignatureBlockHeight < blockHeight || + metadata.VotingSignatureBlockHeight > blockHeight+bav.Params.ValidatorVotingSignatureBlockHeightWindow { + return errors.Wrapf(RuleErrorValidatorInvalidVotingSignatureBlockHeight, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + } + + // Validate VotingPublicKeySignature. + if metadata.VotingPublicKeySignature == nil { + return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKeySignature, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + } + votingSignaturePayload := CreateValidatorVotingSignaturePayload( + transactorPublicKey, metadata.VotingPublicKey, metadata.VotingSignatureBlockHeight, + ) + isValidBLSSignature, err := metadata.VotingPublicKey.Verify(metadata.VotingPublicKeySignature, votingSignaturePayload) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error verifying VotingPublicKeySignature: ") + } + if !isValidBLSSignature { + return errors.Wrapf(RuleErrorValidatorInvalidVotingPublicKeySignature, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + } // Error if updating DisableDelegatedStake from false to // true and there are existing delegated StakeEntries. @@ -1947,10 +1977,9 @@ func (bav *UtxoView) CreateRegisterAsValidatorTxindexMetadata( ValidatorPublicKeyBase58Check: validatorPublicKeyBase58Check, Domains: domains, DisableDelegatedStake: metadata.DisableDelegatedStake, - // TODO: In a subsequent PR, update to convert BLS public keys and signatures to strings. - VotingPublicKey: string(metadata.VotingPublicKey), - VotingPublicKeySignature: string(metadata.VotingPublicKeySignature), - VotingSignatureBlockHeight: metadata.VotingSignatureBlockHeight, + VotingPublicKey: metadata.VotingPublicKey.ToString(), + VotingPublicKeySignature: metadata.VotingPublicKeySignature.ToString(), + VotingSignatureBlockHeight: metadata.VotingSignatureBlockHeight, } // Construct AffectedPublicKeys. @@ -2031,6 +2060,61 @@ func (bav *UtxoView) CreateUnjailValidatorTxindexMetadata( return &UnjailValidatorTxindexMetadata{}, affectedPublicKeys } +// +// BLS UTILS +// + +func EncodeBLSPublicKey(blsPublicKey *bls.PublicKey) []byte { + var blsPublicKeyBytes []byte + if blsPublicKey != nil { + blsPublicKeyBytes = blsPublicKey.ToBytes() + } + return EncodeByteArray(blsPublicKeyBytes) +} + +func DecodeBLSPublicKey(rr io.Reader) (*bls.PublicKey, error) { + publicKeyBytes, err := DecodeByteArray(rr) + if err != nil { + return nil, err + } + return (&bls.PublicKey{}).FromBytes(publicKeyBytes) +} + +func EncodeBLSSignature(blsSignature *bls.Signature) []byte { + var blsSignatureBytes []byte + if blsSignature != nil { + blsSignatureBytes = blsSignature.ToBytes() + } + return EncodeByteArray(blsSignatureBytes) +} + +func DecodeBLSSignature(rr io.Reader) (*bls.Signature, error) { + signatureBytes, err := DecodeByteArray(rr) + if err != nil { + return nil, err + } + return (&bls.Signature{}).FromBytes(signatureBytes) +} + +func CreateValidatorVotingSignaturePayload( + transactorPublicKeyBytes []byte, + votingPublicKey *bls.PublicKey, + votingSignatureBlockHeight uint64, +) []byte { + var data []byte + // HASH(TransactorPublicKey) + hashedTransactorPublicKey := sha256.Sum256(transactorPublicKeyBytes) + data = append(data, hashedTransactorPublicKey[:]...) + // HASH(VotingPublicKey) + hashedVotingPublicKey := sha256.Sum256(votingPublicKey.ToBytes()) + data = append(data, hashedVotingPublicKey[:]...) + // VotingSignatureBlockHeight + data = append(data, UintToBuf(votingSignatureBlockHeight)...) + // HASH( HASH(TransactorPublicKey) || HASH(VotingPublicKey) || VotingSignatureBlockHeight ) + hashedData := sha256.Sum256(data) + return hashedData[:] +} + // // CONSTANTS // @@ -2042,6 +2126,10 @@ const RuleErrorValidatorTooManyDomains RuleError = "RuleErrorValidatorTooManyDom const RuleErrorValidatorInvalidDomain RuleError = "RuleErrorValidatorInvalidDomain" const RuleErrorValidatorDuplicateDomains RuleError = "RuleErrorValidatorDuplicateDomains" const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" +const RuleErrorValidatorMissingVotingPublicKey RuleError = "RuleErrorValidatorMissingVotingPublicKey" +const RuleErrorValidatorMissingVotingPublicKeySignature RuleError = "RuleErrorValidatorMissingVotingPublicKeySignature" +const RuleErrorValidatorInvalidVotingPublicKeySignature RuleError = "RuleErrorValidatorInvalidVotingPublicKeySignature" +const RuleErrorValidatorInvalidVotingSignatureBlockHeight RuleError = "RuleErrorValidatorInvalidVotingSignatureBlockHeight" const RuleErrorValidatorDisablingExistingDelegatedStakers RuleError = "RuleErrorValidatorDisablingExistingDelegatedStakers" const RuleErrorUnjailingNonjailedValidator RuleError = "RuleErrorUnjailingNonjailedValidator" const RuleErrorUnjailingValidatorTooEarly RuleError = "RuleErrorUnjailingValidatorTooEarly" diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 0848f404f..6cfe055a3 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -1,9 +1,12 @@ +//go:build relic + package lib import ( "errors" "fmt" "github.com/btcsuite/btcd/btcec" + "github.com/deso-protocol/core/bls" "github.com/holiman/uint256" "github.com/stretchr/testify/require" "math" @@ -88,9 +91,13 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - DisableDelegatedStake: false, + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) @@ -144,11 +151,130 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorDuplicateDomains) } + { + // RuleErrorValidatorMissingVotingPublicKey + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + } + _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingPublicKey) + } + { + // RuleErrorValidatorInvalidVotingSignatureBlockHeight: missing + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + } + _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignatureBlockHeight) + } + { + // RuleErrorValidatorInvalidVotingSignatureBlockHeight: too low + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight - 1, + } + _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignatureBlockHeight) + } + { + // RuleErrorValidatorInvalidVotingSignatureBlockHeight: too high + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight + params.ValidatorVotingSignatureBlockHeightWindow + 1, + } + _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignatureBlockHeight) + } + { + // RuleErrorValidatorMissingVotingPublicKeySignature + votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingSignatureBlockHeight: blockHeight, + } + _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingPublicKeySignature) + } + { + // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid TransactorPkBytes + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, + } + _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingPublicKeySignature) + } + { + // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid VotingPublicKey + votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + _, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, + } + _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingPublicKeySignature) + } + { + // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid VotingSignatureBlockHeight + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight + 1, + } + _, err = _submitRegisterAsValidatorTxn( + testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingPublicKeySignature) + } { // Happy path: register a validator + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - DisableDelegatedStake: false, + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -183,9 +309,13 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // Happy path: update a validator + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, - DisableDelegatedStake: false, + Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -542,9 +672,15 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { derivedKeyPriv, err := _submitAuthorizeDerivedKeyTxn(TxnTypeRegisterAsValidator, 1) require.NoError(t, err) + // Create a VotingPublicKey and VotingSignature. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes, blockHeight) + // Perform a RegisterAsValidator txn. No error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } err = _submitValidatorTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, @@ -603,9 +739,15 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorNotFound) + // Create a VotingPublicKey and VotingSignature. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes, blockHeight) + // Perform a RegisterAsValidator txn. Error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } err = _submitValidatorTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, @@ -701,8 +843,12 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, + Domains: [][]byte{[]byte("https://m0.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -716,8 +862,12 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // m1 registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, + Domains: [][]byte{[]byte("https://m1.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -729,8 +879,12 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // m2 registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m2PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m2.com")}, + Domains: [][]byte{[]byte("https://m2.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m2Pub, m2Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -902,9 +1056,13 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { m5PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m5PkBytes).PKID // Store m0's ValidatorEntry in the db with TotalStake = 100 nanos. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) validatorEntry := &ValidatorEntry{ - ValidatorPKID: m0PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), + ValidatorPKID: m0PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } utxoView._setValidatorEntryMappings(validatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -919,10 +1077,14 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Empty(t, utxoView.ValidatorMapKeyToValidatorEntry) // Store m1's jailed ValidatorEntry in the db with TotalStake = 400 nanos. + votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) validatorEntry = &ValidatorEntry{ - ValidatorPKID: m1PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), - JailedAtEpochNumber: 1, + ValidatorPKID: m1PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, + JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(validatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -935,9 +1097,13 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) // Store m2's ValidatorEntry in the db with TotalStake = 300 nanos. + votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m2PkBytes, blockHeight) m2ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m2PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), + ValidatorPKID: m2PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } utxoView._setValidatorEntryMappings(m2ValidatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -949,9 +1115,13 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) // Store m3's ValidatorEntry in the db with TotalStake = 600 nanos. + votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m3PkBytes, blockHeight) m3ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m3PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), + ValidatorPKID: m3PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } utxoView._setValidatorEntryMappings(m3ValidatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -984,9 +1154,13 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.True(t, utxoView.ValidatorMapKeyToValidatorEntry[m3ValidatorEntry.ToMapKey()].isDeleted) // Store m4's ValidatorEntry in the UtxoView with TotalStake = 50 nanos. + votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m4PkBytes, blockHeight) m4ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m4PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), + ValidatorPKID: m4PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } utxoView._setValidatorEntryMappings(m4ValidatorEntry) @@ -1005,10 +1179,14 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { ) // Store m5's jailed ValidatorEntry in the UtxoView with TotalStake = 500 nanos. + votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m5PkBytes, blockHeight) m5ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m5PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), - JailedAtEpochNumber: 1, + ValidatorPKID: m5PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, + JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(m5ValidatorEntry) @@ -1118,9 +1296,13 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 registers as a validator with DisableDelegatedStake = FALSE. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: false, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1136,9 +1318,13 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 updates DisableDelegatedStake = TRUE. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: true, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: true, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1178,9 +1364,13 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 updates DisableDelegatedStake = FALSE. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: false, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1207,9 +1397,13 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 tries to update DisableDelegateStake = TRUE. Errors. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: true, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: true, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) @@ -1304,8 +1498,12 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, + Domains: [][]byte{[]byte("https://m0.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1507,8 +1705,12 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -1809,8 +2011,12 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { } { // sender registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + VotingSignatureBlockHeight: blockHeight, } _, err = _submitRegisterAsValidatorTxn(testMeta, senderPkString, senderPrivString, registerMetadata, nil, true) require.NoError(t, err) @@ -1950,3 +2156,13 @@ func _submitUnjailValidatorTxn( testMeta.txns = append(testMeta.txns, txn) return fees, nil } + +func _generateVotingPublicKeyAndSignature(t *testing.T, transactorPkBytes []byte, blockHeight uint64) (*bls.PublicKey, *bls.Signature) { + blsPrivateKey, err := bls.NewPrivateKey() + require.NoError(t, err) + votingPublicKey := blsPrivateKey.PublicKey() + signaturePayload := CreateValidatorVotingSignaturePayload(transactorPkBytes, votingPublicKey, blockHeight) + votingSignature, err := blsPrivateKey.Sign(signaturePayload) + require.NoError(t, err) + return votingPublicKey, votingSignature +} diff --git a/lib/constants.go b/lib/constants.go index 48e745653..4530999cd 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -606,6 +606,12 @@ type DeSoParams struct { // TODO: Move this to GlobalParamsEntry. ValidatorJailEpochDuration uint64 + // When registering, a validator must include a VotingSignatureBlockHeight within CurrentBlockHeight + // to CurrentBlockHeight + ValidatorVotingSignatureBlockHeightWindow blocks. This is to prevent + // validator registration replay attacks. + // TODO: Move this to GlobalParamsEntry. + ValidatorVotingSignatureBlockHeightWindow uint64 + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -984,6 +990,11 @@ var DeSoMainnetParams = DeSoParams{ // Jailed validators can be unjailed after a minimum of N elapsed epochs. ValidatorJailEpochDuration: uint64(3), + // When registering, a validator must include a VotingSignatureBlockHeight within CurrentBlockHeight + // to CurrentBlockHeight + ValidatorVotingSignatureBlockHeightWindow blocks. This is to prevent + // validator registration replay attacks. + ValidatorVotingSignatureBlockHeightWindow: uint64(300), + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1218,6 +1229,11 @@ var DeSoTestnetParams = DeSoParams{ // Jailed validators can be unjailed after a minimum of N elapsed epochs. ValidatorJailEpochDuration: uint64(3), + // When registering, a validator must include a VotingSignatureBlockHeight within CurrentBlockHeight + // to CurrentBlockHeight + ValidatorVotingSignatureBlockHeightWindow blocks. This is to prevent + // validator registration replay attacks. + ValidatorVotingSignatureBlockHeightWindow: uint64(300), + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/scripts/install-relic.sh b/scripts/install-relic.sh new file mode 100755 index 000000000..cd2dcb9eb --- /dev/null +++ b/scripts/install-relic.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# This script installs Relic, a C library dependency for the BLS signature implementation +# provided by Flow. This file was copy-pasted from Flow's installation README found here: +# https://github.com/onflow/flow-go/tree/master/crypto. + +# crypto package +PKG_NAME="github.com/onflow/flow-go/crypto" + +# go.mod +MOD_FILE="./go.mod" + +# the version of onflow/flow-go/crypto used in the project is read from the go.mod file +if [ -f "${MOD_FILE}" ] +then + # extract the version from the go.mod file + VERSION="$(grep ${PKG_NAME} < ${MOD_FILE} | cut -d' ' -f 2)" + # using the right version, get the package directory path + PKG_DIR="$(go env GOPATH)/pkg/mod/${PKG_NAME}@${VERSION}" +else + { echo "couldn't find go.mod file - make sure the script is in the project root directory"; exit 1; } +fi + +# grant permissions if not existent +if [[ ! -r ${PKG_DIR} || ! -w ${PKG_DIR} || ! -x ${PKG_DIR} ]]; then + sudo chmod -R 755 "${PKG_DIR}" +fi + +# get into the package directory and set up the external dependencies +( + cd "${PKG_DIR}" || { echo "cd into the GOPATH package folder failed"; exit 1; } + go generate +) diff --git a/test.Dockerfile b/test.Dockerfile index a31beba78..6eb62c4f9 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -2,7 +2,10 @@ FROM alpine:latest AS core RUN apk update RUN apk upgrade -RUN apk add --update go gcc g++ vips vips-dev +RUN apk add --update bash cmake git gcc g++ make vips vips-dev + +COPY --from=golang:1.20-alpine /usr/local/go/ /usr/local/go/ +ENV PATH="/usr/local/go/bin:${PATH}" WORKDIR /deso/src/core @@ -11,14 +14,18 @@ COPY go.sum . RUN go mod download -COPY desohash desohash +COPY scripts/install-relic.sh . +RUN ./install-relic.sh + +COPY bls bls COPY cmd cmd +COPY desohash desohash COPY lib lib -COPY test_data test_data COPY migrate migrate +COPY test_data test_data COPY main.go . # build backend RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go -ENTRYPOINT ["go", "test", "-v", "github.com/deso-protocol/core/lib"] +ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "github.com/deso-protocol/core/bls", "github.com/deso-protocol/core/lib"] From 5c41878bb8aa3f689b4e752d05427d513522eabf Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 12 May 2023 14:10:56 -0400 Subject: [PATCH 031/762] Add more nil checks. --- bls/signature.go | 40 +++++++++++++++++++++++----------------- bls/signature_test.go | 2 +- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 1b7258939..93d6f9363 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -58,8 +58,8 @@ func NewPrivateKey() (*PrivateKey, error) { } func (privateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { - if privateKey.flowPrivateKey == nil { - return nil, errors.New("bls.PrivateKey is nil") + if privateKey == nil || privateKey.flowPrivateKey == nil { + return nil, errors.New("PrivateKey is nil") } flowSignature, err := privateKey.flowPrivateKey.Sign(payloadBytes, HashingAlgorithm) if err != nil { @@ -69,21 +69,21 @@ func (privateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { } func (privateKey *PrivateKey) PublicKey() *PublicKey { - if privateKey.flowPrivateKey == nil { + if privateKey == nil || privateKey.flowPrivateKey == nil { return nil } return &PublicKey{flowPublicKey: privateKey.flowPrivateKey.PublicKey()} } func (privateKey *PrivateKey) ToString() string { - if privateKey.flowPrivateKey == nil { + if privateKey == nil || privateKey.flowPrivateKey == nil { return "" } return privateKey.flowPrivateKey.String() } func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { - if privateKeyString == "" { + if privateKey == nil || privateKeyString == "" { return nil, nil } // Chop off leading 0x, if exists. Otherwise, does nothing. @@ -99,7 +99,7 @@ func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, } func (privateKey *PrivateKey) Eq(other *PrivateKey) bool { - if privateKey.flowPrivateKey == nil || other == nil { + if privateKey == nil || privateKey.flowPrivateKey == nil || other == nil { return false } return privateKey.flowPrivateKey.Equals(other.flowPrivateKey) @@ -114,7 +114,7 @@ type PublicKey struct { } func (publicKey *PublicKey) Verify(signature *Signature, input []byte) (bool, error) { - if publicKey.flowPublicKey == nil { + if publicKey == nil || publicKey.flowPublicKey == nil { return false, errors.New("bls.PublicKey is nil") } return publicKey.flowPublicKey.Verify(signature.flowSignature, input, HashingAlgorithm) @@ -122,14 +122,14 @@ func (publicKey *PublicKey) Verify(signature *Signature, input []byte) (bool, er func (publicKey *PublicKey) ToBytes() []byte { var publicKeyBytes []byte - if publicKey.flowPublicKey != nil { + if publicKey != nil && publicKey.flowPublicKey != nil { publicKeyBytes = publicKey.flowPublicKey.Encode() } return publicKeyBytes } func (publicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { - if len(publicKeyBytes) == 0 { + if publicKey == nil || len(publicKeyBytes) == 0 { return nil, nil } var err error @@ -138,14 +138,14 @@ func (publicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) } func (publicKey *PublicKey) ToString() string { - if publicKey.flowPublicKey == nil { + if publicKey == nil || publicKey.flowPublicKey == nil { return "" } return publicKey.flowPublicKey.String() } func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { - if publicKeyString == "" { + if publicKey == nil || publicKeyString == "" { return nil, nil } // Chop off leading 0x, if exists. Otherwise, does nothing. @@ -161,13 +161,16 @@ func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, erro } func (publicKey *PublicKey) Eq(other *PublicKey) bool { - if publicKey.flowPublicKey == nil || other == nil { + if publicKey == nil || publicKey.flowPublicKey == nil || other == nil { return false } return publicKey.flowPublicKey.Equals(other.flowPublicKey) } func (publicKey *PublicKey) Copy() *PublicKey { + if publicKey == nil { + return nil + } return &PublicKey{ flowPublicKey: publicKey.flowPublicKey, } @@ -183,14 +186,14 @@ type Signature struct { func (signature *Signature) ToBytes() []byte { var signatureBytes []byte - if signature.flowSignature != nil { + if signature != nil && signature.flowSignature != nil { signatureBytes = signature.flowSignature.Bytes() } return signatureBytes } func (signature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { - if len(signatureBytes) == 0 { + if signature == nil || len(signatureBytes) == 0 { return nil, nil } signature.flowSignature = signatureBytes @@ -198,14 +201,14 @@ func (signature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) } func (signature *Signature) ToString() string { - if signature.flowSignature == nil { + if signature == nil || signature.flowSignature == nil { return "" } return signature.flowSignature.String() } func (signature *Signature) FromString(signatureString string) (*Signature, error) { - if signatureString == "" { + if signature == nil || signatureString == "" { return nil, nil } // Chop off leading 0x, if exists. Otherwise, does nothing. @@ -221,13 +224,16 @@ func (signature *Signature) FromString(signatureString string) (*Signature, erro } func (signature *Signature) Eq(other *Signature) bool { - if signature.flowSignature == nil || other == nil { + if signature == nil || signature.flowSignature == nil || other == nil { return false } return bytes.Equal(signature.ToBytes(), other.ToBytes()) } func (signature *Signature) Copy() *Signature { + if signature == nil { + return nil + } if signature.flowSignature == nil { return &Signature{} } diff --git a/bls/signature_test.go b/bls/signature_test.go index 12eeb627e..b734db286 100644 --- a/bls/signature_test.go +++ b/bls/signature_test.go @@ -139,7 +139,7 @@ func TestVerifyingBLSSignatures(t *testing.T) { // Sign() _, err = (&PrivateKey{}).Sign(randomPayload1) require.Error(t, err) - require.Contains(t, err.Error(), "bls.PrivateKey is nil") + require.Contains(t, err.Error(), "PrivateKey is nil") // PublicKey() require.Nil(t, (&PrivateKey{}).PublicKey()) // ToString() From 2d99f7eae03b1155505a65ec3f9c9a17173ab254 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 12 May 2023 14:18:07 -0400 Subject: [PATCH 032/762] Address PR feedback. --- lib/block_view_validator.go | 12 ++++-------- lib/network.go | 4 ++-- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index d2c282e8d..083b9bd30 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -131,18 +131,14 @@ func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint6 var err error // ValidatorID - validatorID := &BlockHash{} - if exist, err := DecodeFromBytes(validatorID, rr); exist && err == nil { - validatorEntry.ValidatorID = validatorID - } else if err != nil { + validatorEntry.ValidatorID, err = DecodeDeSoEncoder(&BlockHash{}, rr) + if err != nil { return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading ValidatorID: ") } // ValidatorPKID - validatorPKID := &PKID{} - if exist, err := DecodeFromBytes(validatorPKID, rr); exist && err == nil { - validatorEntry.ValidatorPKID = validatorPKID - } else if err != nil { + validatorEntry.ValidatorPKID, err = DecodeDeSoEncoder(&PKID{}, rr) + if err != nil { return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading ValidatorPKID: ") } diff --git a/lib/network.go b/lib/network.go index c096f701f..b740c0faa 100644 --- a/lib/network.go +++ b/lib/network.go @@ -6401,11 +6401,11 @@ func (tsl *TransactionSpendingLimit) Copy() *TransactionSpendingLimit { } for stakeLimitKey, stakeLimitDESONanos := range tsl.StakeLimitMap { - copyTSL.StakeLimitMap[stakeLimitKey] = stakeLimitDESONanos + copyTSL.StakeLimitMap[stakeLimitKey] = stakeLimitDESONanos.Clone() } for stakeLimitKey, unstakeLimitDESONanos := range tsl.UnstakeLimitMap { - copyTSL.UnstakeLimitMap[stakeLimitKey] = unstakeLimitDESONanos + copyTSL.UnstakeLimitMap[stakeLimitKey] = unstakeLimitDESONanos.Clone() } for stakeLimitKey, unlockStakeOperationCount := range tsl.UnlockStakeLimitMap { From fd29ce36f73ae690af36403dbfea4b540da6e8c0 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 12 May 2023 14:59:29 -0400 Subject: [PATCH 033/762] Add Solorand random seed utils. --- lib/block_view.go | 8 ++ lib/block_view_flush.go | 3 + lib/db_utils.go | 9 +- lib/pos_random_seed.go | 172 ++++++++++++++++++++++++++++++++++++ lib/pos_random_seed_test.go | 42 +++++++++ 5 files changed, 233 insertions(+), 1 deletion(-) create mode 100644 lib/pos_random_seed.go create mode 100644 lib/pos_random_seed_test.go diff --git a/lib/block_view.go b/lib/block_view.go index 0ce1a150e..63145ce42 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -129,6 +129,9 @@ type UtxoView struct { // Current EpochEntry CurrentEpochEntry *EpochEntry + // Current RandomSeedHash + CurrentRandomSeedHash *RandomSeedHash + // The hash of the tip the view is currently referencing. Mainly used // for error-checking when doing a bulk operation on the view. TipHash *BlockHash @@ -516,6 +519,11 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.CurrentEpochEntry = bav.CurrentEpochEntry.Copy() } + // Copy the CurrentRandomSeedHash + if bav.CurrentRandomSeedHash != nil { + newView.CurrentRandomSeedHash = bav.CurrentRandomSeedHash.Copy() + } + return newView, nil } diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 6ae70f214..6057a11b8 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -158,6 +158,9 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushCurrentEpochEntryToDbWithTxn(txn, blockHeight); err != nil { return err } + if err := bav._flushCurrentRandomSeedHashToDbWithTxn(txn, blockHeight); err != nil { + return err + } return nil } diff --git a/lib/db_utils.go b/lib/db_utils.go index cb3892d59..c05e9e939 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -506,7 +506,11 @@ type DBPrefixes struct { // Prefix -> EpochEntry PrefixCurrentEpoch []byte `prefix_id:"[83]" is_state:"true"` - // NEXT_TAG: 84 + // PrefixCurrentRandomSeedHash: Retrieve the current RandomSeedHash. + // Prefix -> RandomSeedHash. + PrefixCurrentRandomSeedHash []byte `prefix_id:"[84]" is_state:"true"` + + // NEXT_TAG: 85 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -726,6 +730,9 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixCurrentEpoch) { // prefix_id:"[83]" return true, &EpochEntry{} + } else if bytes.Equal(prefix, Prefixes.PrefixCurrentRandomSeedHash) { + // prefix_id:"[84]" + return false, nil } return true, nil diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go new file mode 100644 index 000000000..2d13974d7 --- /dev/null +++ b/lib/pos_random_seed.go @@ -0,0 +1,172 @@ +package lib + +import ( + "crypto/sha256" + "github.com/deso-protocol/core/bls" + "github.com/dgraph-io/badger/v3" + "github.com/golang/glog" + "github.com/pkg/errors" + "io" +) + +const GenesisRandomSeedHash = "0x00" // TODO: what should we put here? + +type RandomSeedHash [32]byte + +func (randomSeedHash *RandomSeedHash) Copy() *RandomSeedHash { + var randomSeedHashCopy *RandomSeedHash + copy(randomSeedHashCopy[:], randomSeedHash[:]) + return randomSeedHashCopy +} + +func (randomSeedHash *RandomSeedHash) ToBytes() []byte { + return randomSeedHash[:] +} + +func (randomSeedHash *RandomSeedHash) FromBytes(randomSeedHashBytes []byte) (*RandomSeedHash, error) { + if len(randomSeedHashBytes) != 32 { + return nil, errors.Errorf("RandomSeedHash.FromBytes: input has length %d but should have length 32", len(randomSeedHashBytes)) + } + copy(randomSeedHash[:], randomSeedHashBytes) + return randomSeedHash, nil +} + +func EncodeRandomSeedHash(randomSeedHash *RandomSeedHash) []byte { + return EncodeByteArray(randomSeedHash.ToBytes()) +} + +func DecodeRandomSeedHash(rr io.Reader) (*RandomSeedHash, error) { + randomSeedHashBytes, err := DecodeByteArray(rr) + if err != nil { + return nil, errors.Wrapf(err, "DecodeRandomSeedHash: problem reading RandomSeedHash from bytes: ") + } + return (&RandomSeedHash{}).FromBytes(randomSeedHashBytes) +} + +func (bav *UtxoView) GenerateRandomSeedSignature(signerPrivateKey *bls.PrivateKey) (*bls.Signature, error) { + currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateRandomSeedSignature: problem retrieving CurrentRandomSeedHash: ") + } + randomSeedSignature, err := signerPrivateKey.Sign(currentRandomSeedHash[:]) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateRandomSeedSignature: problem signing CurrentRandomSeedHash: ") + } + return randomSeedSignature, nil +} + +func (bav *UtxoView) ProcessNewRandomSeedSignature( + signerPublicKey *bls.PublicKey, + randomSeedSignature *bls.Signature, +) (*RandomSeedHash, error) { + // Verify the RandomSeedSignature. + isVerified, err := bav.VerifyRandomSeedSignature(signerPublicKey, randomSeedSignature) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.ProcessNewRandomSeedSignature: problem verifying RandomSeedSignature: ") + } + if !isVerified { + return nil, errors.New("UtxoView.ProcessNewRandomSeedSignature: invalid RandomSeedSignature provided") + } + // Convert the RandomSeedSignature to a RandomSeedHash. + randomSeedSHA256 := sha256.Sum256(randomSeedSignature.ToBytes()) + randomSeedHash, err := (&RandomSeedHash{}).FromBytes(randomSeedSHA256[:]) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.ProcessNewRandomSeedSignature: problem hashing RandomSeedSignature: ") + } + // Store the RandomSeedHash in the UtxoView. + bav._setCurrentRandomSeedHash(randomSeedHash) + // Return the RandomSeedHash. + return randomSeedHash, nil +} + +func (bav *UtxoView) VerifyRandomSeedSignature(signerPublicKey *bls.PublicKey, randomSeedSignature *bls.Signature) (bool, error) { + currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() + if err != nil { + return false, errors.Wrapf(err, "UtxoView.VerifyRandomSeedSignature: problem retrieving CurrentRandomSeedHash: ") + } + return signerPublicKey.Verify(randomSeedSignature, currentRandomSeedHash[:]) +} + +func (bav *UtxoView) GetCurrentRandomSeedHash() (*RandomSeedHash, error) { + // First, check the UtxoView. + if bav.CurrentRandomSeedHash != nil { + return bav.CurrentRandomSeedHash, nil + } + // Then, check the db. + currentRandomSeedHash, err := DBGetCurrentRandomSeedHash(bav.Handle, bav.Snapshot) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetCurrentRandomSeedHash: problem retrieving CurrentRandomSeedHash from the db: ") + } + if currentRandomSeedHash != nil { + // If a RandomSeedHash is found in the db, cache in the UtxoView and return. + bav.CurrentRandomSeedHash = currentRandomSeedHash.Copy() + return currentRandomSeedHash, nil + } + // If no RandomSeedHash is found in the UtxoView or db, return the GenesisRandomSeedHash. + var genesisRandomSeedHashBytes [32]byte + return (&RandomSeedHash{}).FromBytes(genesisRandomSeedHashBytes[:]) +} + +func (bav *UtxoView) _setCurrentRandomSeedHash(randomSeedHash *RandomSeedHash) { + if randomSeedHash == nil { + glog.Errorf("UtxoView._setCurrentRandomSeedHash: called with nil entry, this should never happen") + return + } + bav.CurrentRandomSeedHash = randomSeedHash.Copy() +} + +func (bav *UtxoView) _flushCurrentRandomSeedHashToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // If CurrentRandomSeedHash is nil, then it was never + // set and shouldn't overwrite the value in the db. + if bav.CurrentRandomSeedHash == nil { + return nil + } + + return DBPutCurrentRandomSeedHashWithTxn(txn, bav.Snapshot, bav.CurrentRandomSeedHash, blockHeight) +} + +func DBKeyForCurrentRandomSeedHash() []byte { + return append([]byte{}, Prefixes.PrefixCurrentRandomSeedHash...) +} + +func DBGetCurrentRandomSeedHash(handle *badger.DB, snap *Snapshot) (*RandomSeedHash, error) { + var ret *RandomSeedHash + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetCurrentRandomSeedHashWithTxn(txn, snap) + return innerErr + }) + return ret, err +} + +func DBGetCurrentRandomSeedHashWithTxn(txn *badger.Txn, snap *Snapshot) (*RandomSeedHash, error) { + // Retrieve from db. + key := DBKeyForCurrentRandomSeedHash() + currentRandomSeedHashBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return nil. + if err == badger.ErrKeyNotFound { + return nil, nil + } + return nil, errors.Wrapf(err, "DBGetCurrentRandomSeedHashWithTxn: problem retrieving value") + } + + // Decode from bytes. + return (&RandomSeedHash{}).FromBytes(currentRandomSeedHashBytes) +} + +func DBPutCurrentRandomSeedHashWithTxn( + txn *badger.Txn, + snap *Snapshot, + currentRandomSeedHash *RandomSeedHash, + blockHeight uint64, +) error { + if currentRandomSeedHash == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBPutCurrentRandomSeedHashWithTxn: called with nil CurrentRandomSeedHash") + return nil + } + + key := DBKeyForCurrentRandomSeedHash() + return DBSetWithTxn(txn, snap, key, currentRandomSeedHash.ToBytes()) +} diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go new file mode 100644 index 000000000..7195c05c3 --- /dev/null +++ b/lib/pos_random_seed_test.go @@ -0,0 +1,42 @@ +//go:build relic + +package lib + +import ( + "github.com/deso-protocol/core/bls" + "github.com/stretchr/testify/require" + "testing" +) + +func TestCurrentRandomSeedHash(t *testing.T) { + chain, params, db := NewLowDifficultyBlockchain(t) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + + // Generate two BLS public + private key pairs. + privateKey1, err := bls.NewPrivateKey() + require.NoError(t, err) + publicKey1 := privateKey1.PublicKey() + privateKey2, err := bls.NewPrivateKey() + require.NoError(t, err) + publicKey2 := privateKey2.PublicKey() + + // Test generating + verifying RandomSeedSignatures. + randomSeedSignature1, err := utxoView.GenerateRandomSeedSignature(privateKey1) + require.NoError(t, err) + isVerified, err := utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature1) + require.NoError(t, err) + require.True(t, isVerified) + isVerified, err = utxoView.VerifyRandomSeedSignature(publicKey2, randomSeedSignature1) + require.NoError(t, err) + require.False(t, isVerified) + + randomSeedSignature2, err := utxoView.GenerateRandomSeedSignature(privateKey2) + require.NoError(t, err) + isVerified, err = utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature2) + require.NoError(t, err) + require.False(t, isVerified) + isVerified, err = utxoView.VerifyRandomSeedSignature(publicKey2, randomSeedSignature2) + require.NoError(t, err) + require.True(t, isVerified) +} From 136554ad74748905bd18139b5ae5fbb2a138b5e8 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 15 May 2023 12:18:09 -0400 Subject: [PATCH 034/762] Increase testing coverage. --- lib/pos_random_seed.go | 38 +++++++++++------------ lib/pos_random_seed_test.go | 60 +++++++++++++++++++++++++++++++------ 2 files changed, 69 insertions(+), 29 deletions(-) diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index 2d13974d7..2c83d1116 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -1,6 +1,7 @@ package lib import ( + "bytes" "crypto/sha256" "github.com/deso-protocol/core/bls" "github.com/dgraph-io/badger/v3" @@ -9,16 +10,18 @@ import ( "io" ) -const GenesisRandomSeedHash = "0x00" // TODO: what should we put here? - type RandomSeedHash [32]byte func (randomSeedHash *RandomSeedHash) Copy() *RandomSeedHash { - var randomSeedHashCopy *RandomSeedHash + randomSeedHashCopy := &RandomSeedHash{} copy(randomSeedHashCopy[:], randomSeedHash[:]) return randomSeedHashCopy } +func (randomSeedHash *RandomSeedHash) Eq(other *RandomSeedHash) bool { + return bytes.Equal(randomSeedHash.ToBytes(), other.ToBytes()) +} + func (randomSeedHash *RandomSeedHash) ToBytes() []byte { return randomSeedHash[:] } @@ -55,38 +58,31 @@ func (bav *UtxoView) GenerateRandomSeedSignature(signerPrivateKey *bls.PrivateKe return randomSeedSignature, nil } -func (bav *UtxoView) ProcessNewRandomSeedSignature( +func (bav *UtxoView) VerifyRandomSeedSignature( signerPublicKey *bls.PublicKey, randomSeedSignature *bls.Signature, ) (*RandomSeedHash, error) { // Verify the RandomSeedSignature. - isVerified, err := bav.VerifyRandomSeedSignature(signerPublicKey, randomSeedSignature) + currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.VerifyRandomSeedSignature: problem retrieving CurrentRandomSeedHash: ") + } + isVerified, err := signerPublicKey.Verify(randomSeedSignature, currentRandomSeedHash[:]) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.ProcessNewRandomSeedSignature: problem verifying RandomSeedSignature: ") + return nil, errors.Wrapf(err, "UtxoView.VerifyRandomSeedSignature: problem verifying RandomSeedSignature: ") } if !isVerified { - return nil, errors.New("UtxoView.ProcessNewRandomSeedSignature: invalid RandomSeedSignature provided") + return nil, errors.New("UtxoView.VerifyRandomSeedSignature: invalid RandomSeedSignature provided") } // Convert the RandomSeedSignature to a RandomSeedHash. randomSeedSHA256 := sha256.Sum256(randomSeedSignature.ToBytes()) randomSeedHash, err := (&RandomSeedHash{}).FromBytes(randomSeedSHA256[:]) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.ProcessNewRandomSeedSignature: problem hashing RandomSeedSignature: ") + return nil, errors.Wrapf(err, "UtxoView.VerifyRandomSeedSignature: problem hashing RandomSeedSignature: ") } - // Store the RandomSeedHash in the UtxoView. - bav._setCurrentRandomSeedHash(randomSeedHash) - // Return the RandomSeedHash. return randomSeedHash, nil } -func (bav *UtxoView) VerifyRandomSeedSignature(signerPublicKey *bls.PublicKey, randomSeedSignature *bls.Signature) (bool, error) { - currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() - if err != nil { - return false, errors.Wrapf(err, "UtxoView.VerifyRandomSeedSignature: problem retrieving CurrentRandomSeedHash: ") - } - return signerPublicKey.Verify(randomSeedSignature, currentRandomSeedHash[:]) -} - func (bav *UtxoView) GetCurrentRandomSeedHash() (*RandomSeedHash, error) { // First, check the UtxoView. if bav.CurrentRandomSeedHash != nil { @@ -102,7 +98,9 @@ func (bav *UtxoView) GetCurrentRandomSeedHash() (*RandomSeedHash, error) { bav.CurrentRandomSeedHash = currentRandomSeedHash.Copy() return currentRandomSeedHash, nil } - // If no RandomSeedHash is found in the UtxoView or db, return the GenesisRandomSeedHash. + // If no RandomSeedHash is found in the UtxoView or db, return the + // GenesisRandomSeedHash which is 32 bytes of zeroes. + // TODO: should we change this? should we store it as a constant hex and parse into a byte slice? var genesisRandomSeedHashBytes [32]byte return (&RandomSeedHash{}).FromBytes(genesisRandomSeedHashBytes[:]) } diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go index 7195c05c3..fd84ffce9 100644 --- a/lib/pos_random_seed_test.go +++ b/lib/pos_random_seed_test.go @@ -12,6 +12,7 @@ func TestCurrentRandomSeedHash(t *testing.T) { chain, params, db := NewLowDifficultyBlockchain(t) utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) + blockHeight := uint64(0) // Generate two BLS public + private key pairs. privateKey1, err := bls.NewPrivateKey() @@ -22,21 +23,62 @@ func TestCurrentRandomSeedHash(t *testing.T) { publicKey2 := privateKey2.PublicKey() // Test generating + verifying RandomSeedSignatures. + + // PrivateKey1 creates a new RandomSeedSignature. randomSeedSignature1, err := utxoView.GenerateRandomSeedSignature(privateKey1) require.NoError(t, err) - isVerified, err := utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature1) - require.NoError(t, err) - require.True(t, isVerified) - isVerified, err = utxoView.VerifyRandomSeedSignature(publicKey2, randomSeedSignature1) + // PublicKey1 is verified to correspond to PrivateKey that signed the RandomSeedSignature. + randomSeedHash1, err := utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature1) require.NoError(t, err) - require.False(t, isVerified) + require.NotNil(t, randomSeedHash1) + // PublicKey2 is not verified to correspond to the PrivateKey that signed the RandomSeedSignature. + randomSeedHash1, err = utxoView.VerifyRandomSeedSignature(publicKey2, randomSeedSignature1) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid RandomSeedSignature provided") + require.Nil(t, randomSeedHash1) + // PrivateKey2 creates a new RandomSeedSignature. randomSeedSignature2, err := utxoView.GenerateRandomSeedSignature(privateKey2) require.NoError(t, err) - isVerified, err = utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature2) + // PublicKey1 is not verified to correspond to the PrivateKey that signed the RandomSeedSignature. + randomSeedHash2, err := utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature2) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid RandomSeedSignature provided") + require.Nil(t, randomSeedHash2) + // PublicKey2 is verified to correspond to the PrivateKey that signed the RandomSeedSignature. + randomSeedHash2, err = utxoView.VerifyRandomSeedSignature(publicKey2, randomSeedSignature2) + require.NoError(t, err) + require.NotNil(t, randomSeedHash2) + + // Test updating CurrentRandomSeedHash. + + // The initial CurrentRandomSeedHash is the GenesisRandomSeedHash. + randomSeedHash1, err = utxoView.GetCurrentRandomSeedHash() + require.NoError(t, err) + require.True(t, randomSeedHash1.Eq(&RandomSeedHash{})) + + // PrivateKey1 generates a new RandomSeedSignature. + randomSeedSignature1, err = utxoView.GenerateRandomSeedSignature(privateKey1) + require.NoError(t, err) + // PublicKey1 is verified to correspond to the PrivateKey that signed the RandomSeedSignature. + randomSeedHash1, err = utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature1) + require.NoError(t, err) + require.NotNil(t, randomSeedHash1) + // The new RandomSeedHash is not the GenesisRandomSeedHash. + require.False(t, randomSeedHash1.Eq(&RandomSeedHash{})) + // We set the new CurrentRandomSeedHash. + utxoView._setCurrentRandomSeedHash(randomSeedHash1) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + + // PrivateKey2 generates a new RandomSeedSignature. + randomSeedSignature2, err = utxoView.GenerateRandomSeedSignature(privateKey2) require.NoError(t, err) - require.False(t, isVerified) - isVerified, err = utxoView.VerifyRandomSeedSignature(publicKey2, randomSeedSignature2) + // PublicKey2 is verified to correspond to the PrivateKey that signed the RandomSeedSignature. + randomSeedHash2, err = utxoView.VerifyRandomSeedSignature(publicKey2, randomSeedSignature2) require.NoError(t, err) - require.True(t, isVerified) + require.NotNil(t, randomSeedHash2) + // The new RandomSeedHash is not the GenesisRandomSeedHash. + require.False(t, randomSeedHash2.Eq(&RandomSeedHash{})) + // The new RandomSeedHash is not the previous CurrentRandomSeedHash. + require.False(t, randomSeedHash2.Eq(randomSeedHash1)) } From 8072fcee13bbe88c7e67a728ad8c8a4f126311d9 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 16 May 2023 10:01:06 -0400 Subject: [PATCH 035/762] Generate PoS leader schedules. --- lib/pos_leader_schedule.go | 77 +++++++++++++++++++++++++++++++++ lib/pos_leader_schedule_test.go | 1 + 2 files changed, 78 insertions(+) create mode 100644 lib/pos_leader_schedule.go create mode 100644 lib/pos_leader_schedule_test.go diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go new file mode 100644 index 000000000..813f14ee7 --- /dev/null +++ b/lib/pos_leader_schedule.go @@ -0,0 +1,77 @@ +package lib + +import ( + "github.com/holiman/uint256" + "github.com/pkg/errors" + "math/big" + "math/rand" +) + +func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { + numValidators := 100 // bav.Params.PoSLeaderScheduleNumValidators + + // Retrieve CurrentRandomSeedHash. + currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving CurrentRandomSeedHash: ") + } + + // Retrieve top, active validators ordered by stake. + validatorEntries, err := bav.GetTopActiveValidatorsByStake(int(numValidators)) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") + } + if len(validatorEntries) == 0 { + return nil, nil + } + + // Sum TotalStakeAmountNanos. + totalStakeAmountNanos := uint256.NewInt() + for _, validatorEntry := range validatorEntries { + totalStakeAmountNanos, err = SafeUint256().Add(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error summing TotalStakeAmountNanos: ") + } + } + + var leaderSchedule []*ValidatorEntry + + r := rand.New(rand.NewSource(int64(currentRandomSeedHash.ToUint64()))) + + // In a loop... + + for _, validatorEntry := range validatorEntries { + // Pick a random uin256.Int between 0 and TotalStakeAmountNanos. + randomUint256, err := RandomUint256(r) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error generating random uint256: ") + } + + // Iterate through ValidatorEntries until ValidatorEntry.TotalStakeAmountNanos >= randomIter. + if validatorEntry.TotalStakeAmountNanos.Gt(randomUint256) { + leaderSchedule = append(leaderSchedule, validatorEntry) + } + + // Add that ValidatorEntry to the leaderSchedule. Remove that ValidatorEntry from the validatorEntries slice. + // Subtract the ValidatorEntry.TotalStakeAmountNanos from the TotalStakeAmountNanos. + } + + return leaderSchedule, nil +} + +func RandomUint256(r *rand.Rand) (*uint256.Int, error) { + digits := []byte("0123456789abcdef") + uint256ByteSlice := make([]byte, 256) + for i := 0; i < 256; i++ { + uint256ByteSlice[i] = digits[r.Intn(16)] + } + uint256String := string(uint256ByteSlice) + + uint256BigInt, success := big.NewInt(0).SetString(uint256String, 16) + if !success { + return nil, errors.New("RandomUint256: problem converting string to big.Int") + } + randUint256 := uint256.NewInt() + randUint256.SetFromBig(uint256BigInt) + return randUint256, nil +} diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go new file mode 100644 index 000000000..55c21f80a --- /dev/null +++ b/lib/pos_leader_schedule_test.go @@ -0,0 +1 @@ +package lib From bb3e89fa18ba6dbac887346bf15a78d80014825b Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 16 May 2023 14:03:29 -0400 Subject: [PATCH 036/762] Add comments. --- bls/signature.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 93d6f9363..7f9e3d9b6 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -11,11 +11,22 @@ import ( "strings" ) +// The SigningAlgorithm for BLS keys is BLSBLS12381 which is BLS on the BLS 12-381 curve. +// This is the only supported BLS signing algorithm in the flowCrypto package. +// BLS is used such that we can aggregate signatures into one signature. const SigningAlgorithm = flowCrypto.BLSBLS12381 -// TODO: what should the domainTag param be? -var HashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") +// The HashingAlgorithm for BLS keys is the following. This algorithm is used to hash input data onto the +// BLS 12-381 curve for generating signatures. The returned instance is a Hasher and can be used to +// generate BLS signatures with the Sign() method. This is the only supported BLS Hasher in the flowCrypto +// package. The input domainTag is a separation tag that defines the protocol and its subdomain. Such tag +// should be of the format: -V-CS-with- where is the name of the protocol, +// the protocol version number, and the index of the ciphersuite in the protocol. +var HashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-V1-CS01-with-") +// AggregateSignatures takes in an input slice of bls.Signatures and aggregates them +// into a single bls.Signature. The assumption is that each of the input bls.Signatures +// were generated by different bls.PrivateKeys signing the same payload. func AggregateSignatures(signatures []*Signature) (*Signature, error) { var flowSignatures []flowCrypto.Signature for _, signature := range signatures { @@ -28,6 +39,9 @@ func AggregateSignatures(signatures []*Signature) (*Signature, error) { return &Signature{flowSignature: aggregateFlowSignature}, nil } +// VerifyAggregateSignature takes in a slice of bls.PublicKeys, a bls.Signature, and a payload and returns +// true if every bls.PublicKey in the slice signed the payload. The input bls.Signature is the aggregate +// signature of each of their respective bls.Signatures for that payload. func VerifyAggregateSignature(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { var flowPublicKeys []flowCrypto.PublicKey for _, publicKey := range publicKeys { From aa63c68730630cdcb5f24712235f7b5915b48e84 Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Thu, 18 May 2023 09:14:43 -0700 Subject: [PATCH 037/762] Diamondhands/pos txn types review (#530) * POS fixes and comments from diamondhands * Mf/pos txn types review (#531) * Add comments. * Addres review comments. * Add first batch of sanity check utils. * Add second batch of txn sanity checks. * Rename encode uint256 funcs in comments. * Nuke RegisteredAtBlockHeight. * Address review feedback. * Fix a few more typos. * Update some comments --------- Co-authored-by: diamondhands --------- Co-authored-by: Matt Foley <100429827+mattfoley8@users.noreply.github.com> --- bls/signature.go | 21 +- lib/block_view.go | 17 +- lib/block_view_profile_test.go | 7 +- lib/block_view_stake.go | 470 ++++++++++++++++++++++++----- lib/block_view_stake_test.go | 33 +-- lib/block_view_types.go | 40 +-- lib/block_view_types_test.go | 43 +-- lib/block_view_validator.go | 493 +++++++++++++++++-------------- lib/block_view_validator_test.go | 296 +++++++------------ lib/constants.go | 19 +- lib/db_utils.go | 79 +++-- lib/network.go | 22 +- lib/pos_epoch.go | 4 +- lib/types.go | 17 +- 14 files changed, 935 insertions(+), 626 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 93d6f9363..8f6cfb08e 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -7,15 +7,27 @@ import ( "crypto/rand" "encoding/hex" "errors" - flowCrypto "github.com/onflow/flow-go/crypto" "strings" + + flowCrypto "github.com/onflow/flow-go/crypto" ) +// The SigningAlgorithm for BLS keys is BLSBLS12381 which is BLS on the BLS 12-381 curve. +// This is the only supported BLS signing algorithm in the flowCrypto package. +// BLS is used such that we can aggregate signatures into one signature. const SigningAlgorithm = flowCrypto.BLSBLS12381 -// TODO: what should the domainTag param be? -var HashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") +// The HashingAlgorithm for BLS keys is the following. This algorithm is used to hash input data onto the +// BLS 12-381 curve for generating signatures. The returned instance is a Hasher and can be used to +// generate BLS signatures with the Sign() method. This is the only supported BLS Hasher in the flowCrypto +// package. The input domainTag is a separation tag that defines the protocol and its subdomain. Such tag +// should be of the format: -V-CS-with- where is the name of the protocol, +// the protocol version number, and the index of the ciphersuite in the protocol. +var HashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-V1-CS01-with-") +// AggregateSignatures takes in an input slice of bls.Signatures and aggregates them +// into a single bls.Signature. The assumption is that each of the input bls.Signatures +// were generated by different bls.PrivateKeys signing the same payload. func AggregateSignatures(signatures []*Signature) (*Signature, error) { var flowSignatures []flowCrypto.Signature for _, signature := range signatures { @@ -28,6 +40,9 @@ func AggregateSignatures(signatures []*Signature) (*Signature, error) { return &Signature{flowSignature: aggregateFlowSignature}, nil } +// VerifyAggregateSignature takes in a slice of bls.PublicKeys, a bls.Signature, and a payload and returns +// true if every bls.PublicKey in the slice signed the payload. The input bls.Signature is the aggregate +// signature of each of their respective bls.Signatures for that payload. func VerifyAggregateSignature(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { var flowPublicKeys []flowCrypto.PublicKey for _, publicKey := range publicKeys { diff --git a/lib/block_view.go b/lib/block_view.go index 0ce1a150e..83fbb069e 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4,14 +4,15 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/wire" - "github.com/holiman/uint256" "math" "math/big" "reflect" "strings" "time" + "github.com/btcsuite/btcd/wire" + "github.com/holiman/uint256" + "github.com/davecgh/go-spew/spew" "github.com/btcsuite/btcd/btcec" @@ -115,9 +116,9 @@ type UtxoView struct { TransactorNonceMapKeyToTransactorNonceEntry map[TransactorNonceMapKey]*TransactorNonceEntry // Validator mappings - ValidatorMapKeyToValidatorEntry map[ValidatorMapKey]*ValidatorEntry + ValidatorPKIDToValidatorEntry map[PKID]*ValidatorEntry - // Global stake across validators + // Global stake across all validators GlobalStakeAmountNanos *uint256.Int // Stake mappings @@ -219,7 +220,7 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { bav.TransactorNonceMapKeyToTransactorNonceEntry = make(map[TransactorNonceMapKey]*TransactorNonceEntry) // ValidatorEntries - bav.ValidatorMapKeyToValidatorEntry = make(map[ValidatorMapKey]*ValidatorEntry) + bav.ValidatorPKIDToValidatorEntry = make(map[PKID]*ValidatorEntry) // Global stake across validators. We deliberately want this to initialize to nil and not zero // since a zero value will overwrite an existing GlobalStakeAmountNanos value in the db, whereas @@ -487,9 +488,9 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { } // Copy the ValidatorEntries - newView.ValidatorMapKeyToValidatorEntry = make(map[ValidatorMapKey]*ValidatorEntry, len(bav.ValidatorMapKeyToValidatorEntry)) - for entryKey, entry := range bav.ValidatorMapKeyToValidatorEntry { - newView.ValidatorMapKeyToValidatorEntry[entryKey] = entry.Copy() + newView.ValidatorPKIDToValidatorEntry = make(map[PKID]*ValidatorEntry, len(bav.ValidatorPKIDToValidatorEntry)) + for entryKey, entry := range bav.ValidatorPKIDToValidatorEntry { + newView.ValidatorPKIDToValidatorEntry[entryKey] = entry.Copy() } // Copy the GlobalStakeAmountNanos. diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index 532520c5d..6883c23ef 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -4,14 +4,15 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/dgraph-io/badger/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "log" "os" "runtime/pprof" "testing" "time" + + "github.com/dgraph-io/badger/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func _swapIdentityWithTestMeta( diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index c22434e64..8c81416d3 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -3,19 +3,36 @@ package lib import ( "bytes" "fmt" + "sort" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "sort" ) +// Stake: Any user can assign stake to a registered validator who allows delegated stake. +// When a user stakes with a validator, they lock up $DESO from their account balance +// into a StakeEntry. As reward for staking, a user is eligible to receive a percentage +// of the block rewards attributed to the validator. Any staked $DESO is unspendable +// until the user unstakes and unlocks their stake. See below. +// +// Unstake: If a user wants to retrieve their funds from being staked with a validator, +// they must submit an Unstake transaction. This deletes or updates their existing +// StakeEntry and creates or updates a LockedStakeEntry. Unstaked stake is not immediately +// withdrawalable and usable. It is locked for a period of time as determined by a consensus +// parameter. This is to prevent byzantine users from trying to game block rewards or +// leader schedules. +// +// UnlockStake: Once sufficient time has elapsed since unstaking their funds, a user can +// submit an UnlockStake transaction to retrieve their funds. Any eligible funds are +// unlocked and returned to the user's account balance. + // // TYPES: StakeEntry // type StakeEntry struct { - StakeID *BlockHash StakerPKID *PKID ValidatorPKID *PKID StakeAmountNanos *uint256.Int @@ -30,7 +47,6 @@ type StakeMapKey struct { func (stakeEntry *StakeEntry) Copy() *StakeEntry { return &StakeEntry{ - StakeID: stakeEntry.StakeID.NewBlockHash(), StakerPKID: stakeEntry.StakerPKID.NewPKID(), ValidatorPKID: stakeEntry.ValidatorPKID.NewPKID(), StakeAmountNanos: stakeEntry.StakeAmountNanos.Clone(), @@ -40,7 +56,7 @@ func (stakeEntry *StakeEntry) Copy() *StakeEntry { } func (stakeEntry *StakeEntry) Eq(other *StakeEntry) bool { - return stakeEntry.StakeID.IsEqual(other.StakeID) + return stakeEntry.ToMapKey() == other.ToMapKey() } func (stakeEntry *StakeEntry) ToMapKey() StakeMapKey { @@ -52,10 +68,9 @@ func (stakeEntry *StakeEntry) ToMapKey() StakeMapKey { func (stakeEntry *StakeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte - data = append(data, EncodeToBytes(blockHeight, stakeEntry.StakeID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, stakeEntry.StakerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, stakeEntry.ValidatorPKID, skipMetadata...)...) - data = append(data, EncodeUint256(stakeEntry.StakeAmountNanos)...) + data = append(data, VariableEncodeUint256(stakeEntry.StakeAmountNanos)...) data = append(data, EncodeExtraData(stakeEntry.ExtraData)...) return data } @@ -63,12 +78,6 @@ func (stakeEntry *StakeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipM func (stakeEntry *StakeEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { var err error - // StakeID - stakeEntry.StakeID, err = DecodeDeSoEncoder(&BlockHash{}, rr) - if err != nil { - return errors.Wrapf(err, "StakeEntry.Decode: Problem reading StakeID: ") - } - // StakerPKID stakeEntry.StakerPKID, err = DecodeDeSoEncoder(&PKID{}, rr) if err != nil { @@ -82,7 +91,7 @@ func (stakeEntry *StakeEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *b } // StakeAmountNanos - stakeEntry.StakeAmountNanos, err = DecodeUint256(rr) + stakeEntry.StakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "StakeEntry.Decode: Problem reading StakeAmountNanos: ") } @@ -109,7 +118,6 @@ func (stakeEntry *StakeEntry) GetEncoderType() EncoderType { // type LockedStakeEntry struct { - LockedStakeID *BlockHash StakerPKID *PKID ValidatorPKID *PKID LockedAmountNanos *uint256.Int @@ -126,7 +134,6 @@ type LockedStakeMapKey struct { func (lockedStakeEntry *LockedStakeEntry) Copy() *LockedStakeEntry { return &LockedStakeEntry{ - LockedStakeID: lockedStakeEntry.LockedStakeID.NewBlockHash(), StakerPKID: lockedStakeEntry.StakerPKID.NewPKID(), ValidatorPKID: lockedStakeEntry.ValidatorPKID.NewPKID(), LockedAmountNanos: lockedStakeEntry.LockedAmountNanos.Clone(), @@ -137,7 +144,7 @@ func (lockedStakeEntry *LockedStakeEntry) Copy() *LockedStakeEntry { } func (lockedStakeEntry *LockedStakeEntry) Eq(other *LockedStakeEntry) bool { - return lockedStakeEntry.LockedStakeID.IsEqual(other.LockedStakeID) + return lockedStakeEntry.ToMapKey() == other.ToMapKey() } func (lockedStakeEntry *LockedStakeEntry) ToMapKey() LockedStakeMapKey { @@ -150,10 +157,9 @@ func (lockedStakeEntry *LockedStakeEntry) ToMapKey() LockedStakeMapKey { func (lockedStakeEntry *LockedStakeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte - data = append(data, EncodeToBytes(blockHeight, lockedStakeEntry.LockedStakeID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, lockedStakeEntry.StakerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, lockedStakeEntry.ValidatorPKID, skipMetadata...)...) - data = append(data, EncodeUint256(lockedStakeEntry.LockedAmountNanos)...) + data = append(data, VariableEncodeUint256(lockedStakeEntry.LockedAmountNanos)...) data = append(data, UintToBuf(lockedStakeEntry.LockedAtEpochNumber)...) data = append(data, EncodeExtraData(lockedStakeEntry.ExtraData)...) return data @@ -162,12 +168,6 @@ func (lockedStakeEntry *LockedStakeEntry) RawEncodeWithoutMetadata(blockHeight u func (lockedStakeEntry *LockedStakeEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { var err error - // LockedStakeID - lockedStakeEntry.LockedStakeID, err = DecodeDeSoEncoder(&BlockHash{}, rr) - if err != nil { - return errors.Wrapf(err, "LockedStakeEntry.Decode: Problem reading LockedStakeID: ") - } - // StakerPKID lockedStakeEntry.StakerPKID, err = DecodeDeSoEncoder(&PKID{}, rr) if err != nil { @@ -181,7 +181,7 @@ func (lockedStakeEntry *LockedStakeEntry) RawDecodeWithoutMetadata(blockHeight u } // LockedAmountNanos - lockedStakeEntry.LockedAmountNanos, err = DecodeUint256(rr) + lockedStakeEntry.LockedAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "LockedStakeEntry.Decode: Problem reading LockedAmountNanos: ") } @@ -225,7 +225,7 @@ func (txnData *StakeMetadata) GetTxnType() TxnType { func (txnData *StakeMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte data = append(data, EncodeByteArray(txnData.ValidatorPublicKey.ToBytes())...) - data = append(data, EncodeUint256(txnData.StakeAmountNanos)...) + data = append(data, VariableEncodeUint256(txnData.StakeAmountNanos)...) return data, nil } @@ -240,7 +240,7 @@ func (txnData *StakeMetadata) FromBytes(data []byte) error { txnData.ValidatorPublicKey = NewPublicKey(validatorPublicKeyBytes) // StakeAmountNanos - txnData.StakeAmountNanos, err = DecodeUint256(rr) + txnData.StakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "StakeMetadata.FromBytes: Problem reading StakeAmountNanos: ") } @@ -268,7 +268,7 @@ func (txnData *UnstakeMetadata) GetTxnType() TxnType { func (txnData *UnstakeMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte data = append(data, EncodeByteArray(txnData.ValidatorPublicKey.ToBytes())...) - data = append(data, EncodeUint256(txnData.UnstakeAmountNanos)...) + data = append(data, VariableEncodeUint256(txnData.UnstakeAmountNanos)...) return data, nil } @@ -283,7 +283,7 @@ func (txnData *UnstakeMetadata) FromBytes(data []byte) error { txnData.ValidatorPublicKey = NewPublicKey(validatorPublicKeyBytes) // UnstakeAmountNanos - txnData.UnstakeAmountNanos, err = DecodeUint256(rr) + txnData.UnstakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "UnstakeMetadata.FromBytes: Problem reading UnstakeAmountNanos: ") } @@ -360,7 +360,7 @@ func (txindexMetadata *StakeTxindexMetadata) RawEncodeWithoutMetadata(blockHeigh var data []byte data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) - data = append(data, EncodeUint256(txindexMetadata.StakeAmountNanos)...) + data = append(data, VariableEncodeUint256(txindexMetadata.StakeAmountNanos)...) return data } @@ -382,7 +382,7 @@ func (txindexMetadata *StakeTxindexMetadata) RawDecodeWithoutMetadata(blockHeigh txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) // StakeAmountNanos - txindexMetadata.StakeAmountNanos, err = DecodeUint256(rr) + txindexMetadata.StakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "StakeTxindexMetadata.Decode: Problem reading StakeAmountNanos: ") } @@ -412,7 +412,7 @@ func (txindexMetadata *UnstakeTxindexMetadata) RawEncodeWithoutMetadata(blockHei var data []byte data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) - data = append(data, EncodeUint256(txindexMetadata.UnstakeAmountNanos)...) + data = append(data, VariableEncodeUint256(txindexMetadata.UnstakeAmountNanos)...) return data } @@ -434,7 +434,7 @@ func (txindexMetadata *UnstakeTxindexMetadata) RawDecodeWithoutMetadata(blockHei txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) // UnstakeAmountNanos - txindexMetadata.UnstakeAmountNanos, err = DecodeUint256(rr) + txindexMetadata.UnstakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "UnstakeTxindexMetadata.Decode: Problem reading UnstakeAmountNanos: ") } @@ -468,7 +468,7 @@ func (txindexMetadata *UnlockStakeTxindexMetadata) RawEncodeWithoutMetadata(bloc data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) data = append(data, UintToBuf(txindexMetadata.StartEpochNumber)...) data = append(data, UintToBuf(txindexMetadata.EndEpochNumber)...) - data = append(data, EncodeUint256(txindexMetadata.TotalUnlockedAmountNanos)...) + data = append(data, VariableEncodeUint256(txindexMetadata.TotalUnlockedAmountNanos)...) return data } @@ -502,7 +502,7 @@ func (txindexMetadata *UnlockStakeTxindexMetadata) RawDecodeWithoutMetadata(bloc } // TotalUnlockedAmountNanos - txindexMetadata.TotalUnlockedAmountNanos, err = DecodeUint256(rr) + txindexMetadata.TotalUnlockedAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "UnlockStakeTxindexMetadata.Decode: Problem reading TotalUnlockedAmountNanos: ") } @@ -522,26 +522,26 @@ func (txindexMetadata *UnlockStakeTxindexMetadata) GetEncoderType() EncoderType // DB UTILS // -func DBKeyForStakeByValidatorByStaker(stakeEntry *StakeEntry) []byte { +func DBKeyForStakeByValidatorAndStaker(stakeEntry *StakeEntry) []byte { data := DBKeyForStakeByValidator(stakeEntry) data = append(data, stakeEntry.StakerPKID.ToBytes()...) return data } func DBKeyForStakeByValidator(stakeEntry *StakeEntry) []byte { - data := append([]byte{}, Prefixes.PrefixStakeByValidatorByStaker...) + data := append([]byte{}, Prefixes.PrefixStakeByValidatorAndStaker...) data = append(data, stakeEntry.ValidatorPKID.ToBytes()...) return data } -func DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry *LockedStakeEntry) []byte { - data := DBPrefixKeyForLockedStakeByValidatorByStaker(lockedStakeEntry) +func DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry *LockedStakeEntry) []byte { + data := DBPrefixKeyForLockedStakeByValidatorAndStaker(lockedStakeEntry) data = append(data, UintToBuf(lockedStakeEntry.LockedAtEpochNumber)...) return data } -func DBPrefixKeyForLockedStakeByValidatorByStaker(lockedStakeEntry *LockedStakeEntry) []byte { - data := append([]byte{}, Prefixes.PrefixLockedStakeByValidatorByStakerByLockedAt...) +func DBPrefixKeyForLockedStakeByValidatorAndStaker(lockedStakeEntry *LockedStakeEntry) []byte { + data := append([]byte{}, Prefixes.PrefixLockedStakeByValidatorAndStakerAndLockedAt...) data = append(data, lockedStakeEntry.ValidatorPKID.ToBytes()...) data = append(data, lockedStakeEntry.StakerPKID.ToBytes()...) return data @@ -569,7 +569,7 @@ func DBGetStakeEntryWithTxn( stakerPKID *PKID, ) (*StakeEntry, error) { // Retrieve StakeEntry from db. - key := DBKeyForStakeByValidatorByStaker(&StakeEntry{ValidatorPKID: validatorPKID, StakerPKID: stakerPKID}) + key := DBKeyForStakeByValidatorAndStaker(&StakeEntry{ValidatorPKID: validatorPKID, StakerPKID: stakerPKID}) stakeEntryBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return nil. @@ -611,6 +611,35 @@ func DBGetStakeEntriesForValidatorPKID(handle *badger.DB, snap *Snapshot, valida return stakeEntries, nil } +func DBValidatorHasDelegatedStake( + handle *badger.DB, + snap *Snapshot, + validatorPKID *PKID, + utxoDeletedStakeEntries []*StakeEntry, +) (bool, error) { + // Skip any stake the validator has assigned to himself (if exists). + skipKeys := NewSet([]string{ + string(DBKeyForStakeByValidatorAndStaker(&StakeEntry{ValidatorPKID: validatorPKID, StakerPKID: validatorPKID})), + }) + + // Skip any StakeEntries deleted in the UtxoView. + for _, utxoDeletedStakeEntry := range utxoDeletedStakeEntries { + skipKeys.Add(string(DBKeyForStakeByValidatorAndStaker(utxoDeletedStakeEntry))) + } + + // Scan for any delegated StakeEntries (limiting to at most one row). + prefix := DBKeyForStakeByValidator(&StakeEntry{ValidatorPKID: validatorPKID}) + keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( + handle, prefix, 1, nil, false, skipKeys, + ) + if err != nil { + return false, errors.Wrapf(err, "DBValidatorHasDelegatedStake: problem retrieving StakeEntries: ") + } + + // Return true if any delegated StakeEntries were found. + return len(keysFound) > 0, nil +} + func DBGetLockedStakeEntry( handle *badger.DB, snap *Snapshot, @@ -637,7 +666,7 @@ func DBGetLockedStakeEntryWithTxn( lockedAtEpochNumber uint64, ) (*LockedStakeEntry, error) { // Retrieve LockedStakeEntry from db. - key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(&LockedStakeEntry{ + key := DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(&LockedStakeEntry{ ValidatorPKID: validatorPKID, StakerPKID: stakerPKID, LockedAtEpochNumber: lockedAtEpochNumber, @@ -695,14 +724,14 @@ func DBGetLockedStakeEntriesInRangeWithTxn( // StartEpochNumber <= LockedAtEpochNumber <= EndEpochNumber. // Start at the StartEpochNumber. - startKey := DBKeyForLockedStakeByValidatorByStakerByLockedAt(&LockedStakeEntry{ + startKey := DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(&LockedStakeEntry{ ValidatorPKID: validatorPKID, StakerPKID: stakerPKID, LockedAtEpochNumber: startEpochNumber, }) // Consider only LockedStakeEntries for this ValidatorPKID, StakerPKID. - prefixKey := DBPrefixKeyForLockedStakeByValidatorByStaker(&LockedStakeEntry{ + prefixKey := DBPrefixKeyForLockedStakeByValidatorAndStaker(&LockedStakeEntry{ ValidatorPKID: validatorPKID, StakerPKID: stakerPKID, }) @@ -752,7 +781,7 @@ func DBPutStakeEntryWithTxn( } // Set StakeEntry in PrefixStakeByValidatorByStaker. - key := DBKeyForStakeByValidatorByStaker(stakeEntry) + key := DBKeyForStakeByValidatorAndStaker(stakeEntry) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, stakeEntry)); err != nil { return errors.Wrapf( err, "DBPutStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByValidatorByStaker: ", @@ -773,7 +802,7 @@ func DBPutLockedStakeEntryWithTxn( } // Set LockedStakeEntry in PrefixLockedStakeByValidatorByStakerByLockedAt. - key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry) + key := DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, lockedStakeEntry)); err != nil { return errors.Wrapf( err, "DBPutLockedStakeEntryWithTxn: problem storing LockedStakeEntry in index PrefixLockedStakeByValidatorByStakerByLockedAt: ", @@ -794,7 +823,7 @@ func DBDeleteStakeEntryWithTxn( } // Delete StakeEntry from PrefixStakeByValidatorByStaker. - key := DBKeyForStakeByValidatorByStaker(stakeEntry) + key := DBKeyForStakeByValidatorAndStaker(stakeEntry) if err := DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( err, "DBDeleteStakeEntryWithTxn: problem deleting StakeEntry from index PrefixStakeByValidatorByStaker: ", @@ -815,7 +844,7 @@ func DBDeleteLockedStakeEntryWithTxn( } // Delete LockedStakeEntry from PrefixLockedStakeByValidatorByStakerByLockedAt. - key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry) + key := DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry) if err := DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( err, "DBDeleteLockedStakeEntryWithTxn: problem deleting StakeEntry from index PrefixLockedStakeByValidatorByStakerByLockedAt: ", @@ -1116,6 +1145,12 @@ func (bav *UtxoView) _connectStake( } stakeAmountNanosUint64 := txMeta.StakeAmountNanos.Uint64() + // Retrieve the transactor's current balance to validate later. + prevBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(txn.PublicKey) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error retrieving PrevBalanceNanos: ") + } + // Connect a BasicTransfer to get the total input and the // total output without considering the txn metadata. This // BasicTransfer also includes the extra spend associated @@ -1140,17 +1175,14 @@ func (bav *UtxoView) _connectStake( return 0, 0, nil, errors.Wrapf(err, "_connectStake: ") } // Delete the existing StakeEntry, if exists. + // + // Note that we don't really need to do this, as setting a new StakeEntry will naturally cause + // the old entry to be deleted in the database. However, we do this here for clarity. if prevStakeEntry != nil { prevStakeEntries = append(prevStakeEntries, prevStakeEntry) bav._deleteStakeEntryMappings(prevStakeEntry) } - // Set StakeID only if this is a new StakeEntry. - stakeID := txHash - if prevStakeEntry != nil { - stakeID = prevStakeEntry.StakeID - } - // Calculate StakeAmountNanos. stakeAmountNanos := txMeta.StakeAmountNanos.Clone() if prevStakeEntry != nil { @@ -1168,7 +1200,6 @@ func (bav *UtxoView) _connectStake( // Construct new StakeEntry from metadata. currentStakeEntry := &StakeEntry{ - StakeID: stakeID, StakerPKID: transactorPKIDEntry.PKID, ValidatorPKID: prevValidatorEntry.ValidatorPKID, StakeAmountNanos: stakeAmountNanos, @@ -1181,6 +1212,9 @@ func (bav *UtxoView) _connectStake( // 1. Copy the existing ValidatorEntry. currentValidatorEntry := prevValidatorEntry.Copy() // 2. Delete the existing ValidatorEntry. + // + // Note that we don't really need to do this, as setting a new ValidatorEntry will naturally cause + // the old entry to be deleted in the database. However, we do this here for clarity. bav._deleteValidatorEntryMappings(prevValidatorEntry) // 3. Update the new ValidatorEntry's TotalStakeAmountNanos. currentValidatorEntry.TotalStakeAmountNanos, err = SafeUint256().Add( @@ -1214,13 +1248,19 @@ func (bav *UtxoView) _connectStake( return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to TotalOutput: ") } - // Add a UTXO operation - utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + // Create a UTXO operation + utxoOpForTxn := &UtxoOperation{ Type: OperationTypeStake, PrevValidatorEntry: prevValidatorEntry, PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, PrevStakeEntries: prevStakeEntries, - }) + } + if err = bav.SanityCheckStakeTxn( + transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.StakeAmountNanos, txn.TxnFeeNanos, prevBalanceNanos, + ); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: ") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpForTxn) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1369,7 +1409,7 @@ func (bav *UtxoView) _connectUnstake( return 0, 0, nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "_connectUnstake: ") } - // Retrieve PrevStakeEntry. This will be restored if we disconnect the txn. + // Retrieve prevStakeEntry. This will be restored if we disconnect the txn. prevStakeEntry, err := bav.GetStakeEntry(prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID) if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") @@ -1388,15 +1428,15 @@ func (bav *UtxoView) _connectUnstake( if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error subtracting UnstakeAmountNanos from StakeAmountNanos: ") } - // 2. Create a CurrentStakeEntry, if updated StakeAmountNanos > 0. + // 2. Create a currentStakeEntry, if updated StakeAmountNanos > 0. var currentStakeEntry *StakeEntry if stakeAmountNanos.Cmp(uint256.NewInt()) > 0 { currentStakeEntry = prevStakeEntry.Copy() currentStakeEntry.StakeAmountNanos = stakeAmountNanos.Clone() } - // 3. Delete the PrevStakeEntry. + // 3. Delete the prevStakeEntry. bav._deleteStakeEntryMappings(prevStakeEntry) - // 4. Set the CurrentStakeEntry, if exists. The CurrentStakeEntry will not exist + // 4. Set the currentStakeEntry, if exists. The currentStakeEntry will not exist // if the transactor has unstaked all stake assigned to this validator. if currentStakeEntry != nil { bav._setStakeEntryMappings(currentStakeEntry) @@ -1406,6 +1446,10 @@ func (bav *UtxoView) _connectUnstake( // 1. Copy the existing ValidatorEntry. currentValidatorEntry := prevValidatorEntry.Copy() // 2. Delete the existing ValidatorEntry. + // + // Note that we don't technically need to delete the ValidatorEntry here since + // the old ValidatorEntry will automatically be deleted in favor of the new one, + // but we do this here for clarity. bav._deleteValidatorEntryMappings(prevValidatorEntry) // 3. Update the new ValidatorEntry's TotalStakeAmountNanos. currentValidatorEntry.TotalStakeAmountNanos, err = SafeUint256().Sub( @@ -1460,7 +1504,6 @@ func (bav *UtxoView) _connectUnstake( } else { // Create a new LockedStakeEntry. currentLockedStakeEntry = &LockedStakeEntry{ - LockedStakeID: txn.Hash(), StakerPKID: transactorPKIDEntry.PKID, ValidatorPKID: prevValidatorEntry.ValidatorPKID, LockedAmountNanos: txMeta.UnstakeAmountNanos, @@ -1469,6 +1512,9 @@ func (bav *UtxoView) _connectUnstake( } } // 3. Delete the PrevLockedStakeEntry, if exists. + // + // Note that we don't technically need to do this since the flush will naturally delete + // the old value from the db before setting the new one, but we do it here for clarity. if prevLockedStakeEntry != nil { prevLockedStakeEntries = append(prevLockedStakeEntries, prevLockedStakeEntry) bav._deleteLockedStakeEntryMappings(prevLockedStakeEntry) @@ -1476,14 +1522,18 @@ func (bav *UtxoView) _connectUnstake( // 4. Set the CurrentLockedStakeEntry. bav._setLockedStakeEntryMappings(currentLockedStakeEntry) - // Add a UTXO operation - utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + // Create a UTXO operation. + utxoOpForTxn := &UtxoOperation{ Type: OperationTypeUnstake, PrevValidatorEntry: prevValidatorEntry, PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, PrevStakeEntries: prevStakeEntries, PrevLockedStakeEntries: prevLockedStakeEntries, - }) + } + if err = bav.SanityCheckUnstakeTxn(transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.UnstakeAmountNanos); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpForTxn) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1652,6 +1702,12 @@ func (bav *UtxoView) _connectUnlockStake( return 0, 0, nil, errors.Wrapf(RuleErrorInvalidUnlockStakeNoUnlockableStakeFound, "_connectUnlockStake: ") } + // Retrieve the transactor's current balance to validate later. + prevBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(txn.PublicKey) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnlockStake: error retrieving PrevBalanceNanos: ") + } + // Connect a basic transfer to get the total input and the // total output without considering the txn metadata. totalInput, totalOutput, utxoOpsForTxn, err := bav._connectBasicTransfer( @@ -1707,11 +1763,17 @@ func (bav *UtxoView) _connectUnlockStake( } utxoOpsForTxn = append(utxoOpsForTxn, utxoOp) - // Add a UTXO operation - utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + // Create a UTXO operation. + utxoOpForTxn := &UtxoOperation{ Type: OperationTypeUnlockStake, PrevLockedStakeEntries: prevLockedStakeEntries, - }) + } + if err = bav.SanityCheckUnlockStakeTxn( + transactorPKIDEntry.PKID, utxoOpForTxn, totalUnlockedAmountNanos, txn.TxnFeeNanos, prevBalanceNanos, + ); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnlockStake: ") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpForTxn) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1905,6 +1967,244 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada return nil } +func (bav *UtxoView) SanityCheckStakeTxn( + transactorPKID *PKID, + utxoOp *UtxoOperation, + amountNanos *uint256.Int, + feeNanos uint64, + prevBalanceNanos uint64, +) error { + if utxoOp.Type != OperationTypeStake { + return fmt.Errorf("SanityCheckStakeTxn: called with %v", utxoOp.Type) + } + + // Sanity check ValidatorEntry.TotalStakeAmountNanos increase. + if utxoOp.PrevValidatorEntry == nil { + return errors.New("SanityCheckStakeTxn: nil PrevValidatorEntry provided") + } + currentValidatorEntry, err := bav.GetValidatorByPKID(utxoOp.PrevValidatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving ValidatorEntry: ") + } + if currentValidatorEntry == nil { + return errors.New("SanityCheckStakeTxn: no CurrentValidatorEntry found") + } + validatorEntryTotalStakeAmountNanosIncrease, err := SafeUint256().Sub( + currentValidatorEntry.TotalStakeAmountNanos, utxoOp.PrevValidatorEntry.TotalStakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating TotalStakeAmountNanos increase: ") + } + if !validatorEntryTotalStakeAmountNanosIncrease.Eq(amountNanos) { + return errors.New("SanityCheckStakeTxn: TotalStakeAmountNanos increase does not match") + } + + // Validate StakeEntry.StakeAmountNanos increase. + prevStakeEntry := &StakeEntry{StakeAmountNanos: uint256.NewInt()} + if len(utxoOp.PrevStakeEntries) == 1 { + prevStakeEntry = utxoOp.PrevStakeEntries[0] + } + currentStakeEntry, err := bav.GetStakeEntry(currentValidatorEntry.ValidatorPKID, transactorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving StakeEntry: ") + } + if currentStakeEntry == nil { + return errors.New("SanityCheckStakeTxn: no CurrentStakeEntry found") + } + stakeEntryStakeAmountNanosIncrease, err := SafeUint256().Sub( + currentStakeEntry.StakeAmountNanos, prevStakeEntry.StakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating StakeAmountNanos increase: ") + } + if !stakeEntryStakeAmountNanosIncrease.Eq(amountNanos) { + return errors.New("SanityCheckStakeTxn: StakeAmountNanos increase does not match") + } + + // Validate GlobalStakeAmountNanos increase. + if utxoOp.PrevGlobalStakeAmountNanos == nil { + return errors.New("SanityCheckStakeTxn: nil PrevGlobalStakeAmountNanos provided") + } + currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving CurrentGlobalStakeAmountNanos: ") + } + globalStakeAmountNanosIncrease, err := SafeUint256().Sub( + currentGlobalStakeAmountNanos, utxoOp.PrevGlobalStakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating GlobalStakeAmountNanos increase: ") + } + if !globalStakeAmountNanosIncrease.Eq(amountNanos) { + return errors.New("SanityCheckStakeTxn: GlobalStakeAmountNanos increase does not match") + } + + // Validate TransactorBalance decrease. + // PrevTransactorBalanceNanos = CurrentTransactorBalanceNanos + AmountNanos + FeeNanos + // PrevTransactorBalanceNanos - CurrentTransactorBalanceNanos - FeeNanos = AmountNanos + currentBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(bav.GetPublicKeyForPKID(transactorPKID)) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving TransactorBalance: ") + } + transactorBalanceNanosDecrease, err := SafeUint64().Sub(prevBalanceNanos, currentBalanceNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating TransactorBalance decrease: ") + } + transactorBalanceNanosDecrease, err = SafeUint64().Sub(transactorBalanceNanosDecrease, feeNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error including fees in TransactorBalance decrease: ") + } + if !uint256.NewInt().SetUint64(transactorBalanceNanosDecrease).Eq(amountNanos) { + return errors.New("SanityCheckStakeTxn: TransactorBalance decrease does not match") + } + + return nil +} + +func (bav *UtxoView) SanityCheckUnstakeTxn(transactorPKID *PKID, utxoOp *UtxoOperation, amountNanos *uint256.Int) error { + if utxoOp.Type != OperationTypeUnstake { + return fmt.Errorf("SanityCheckUnstakeTxn: called with %v", utxoOp.Type) + } + + // Validate ValidatorEntry.TotalStakeAmountNanos decrease. + if utxoOp.PrevValidatorEntry == nil { + return errors.New("SanityCheckUnstakeTxn: nil PrevValidatorEntry provided") + } + currentValidatorEntry, err := bav.GetValidatorByPKID(utxoOp.PrevValidatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving ValidatorEntry: ") + } + if currentValidatorEntry == nil { + return errors.New("SanityCheckUnstakeTxn: no CurrentValidatorEntry found") + } + validatorEntryTotalStakeAmountNanosDecrease, err := SafeUint256().Sub( + utxoOp.PrevValidatorEntry.TotalStakeAmountNanos, currentValidatorEntry.TotalStakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating TotalStakeAmountNanos decrease: ") + } + if !validatorEntryTotalStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnstakeTxn: TotalStakeAmountNanos decrease does not match") + } + + // Validate PrevStakeEntry.StakeAmountNanos decrease. + if len(utxoOp.PrevStakeEntries) != 1 { + return errors.New("SanityCheckUnstakeTxn: PrevStakeEntries should have exactly one entry") + } + prevStakeEntry := utxoOp.PrevStakeEntries[0] + currentStakeEntry, err := bav.GetStakeEntry(prevStakeEntry.ValidatorPKID, transactorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving StakeEntry: ") + } + if currentStakeEntry == nil { + currentStakeEntry = &StakeEntry{StakeAmountNanos: uint256.NewInt()} + } + stakeEntryStakeAmountNanosDecrease, err := SafeUint256().Sub( + prevStakeEntry.StakeAmountNanos, currentStakeEntry.StakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating StakeAmountNanos decrease: ") + } + if !stakeEntryStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnstakeTxn: StakeAmountNanos decrease does not match") + } + + // Validate LockedStakeEntry.LockedAmountNanos increase. + prevLockedStakeEntry := &LockedStakeEntry{LockedAmountNanos: uint256.NewInt()} + if len(utxoOp.PrevLockedStakeEntries) == 1 { + prevLockedStakeEntry = utxoOp.PrevLockedStakeEntries[0] + } + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving CurrentEpochNumber: ") + } + currentLockedStakeEntry, err := bav.GetLockedStakeEntry( + currentValidatorEntry.ValidatorPKID, transactorPKID, currentEpochNumber, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving LockedStakeEntry: ") + } + lockedStakeEntryLockedAmountNanosIncrease, err := SafeUint256().Sub( + currentLockedStakeEntry.LockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating LockedAmountNanos increase: ") + } + if !lockedStakeEntryLockedAmountNanosIncrease.Eq(amountNanos) { + return errors.New("SanityCheckUnstakeTxn: LockedAmountNanos increase does not match") + } + + // Validate GlobalStakeAmountNanos decrease. + if utxoOp.PrevGlobalStakeAmountNanos == nil { + return errors.New("SanityCheckUnstakeTxn: nil PrevGlobalStakeAmountNanos provided") + } + currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving CurrentGlobalStakeAmountNanos: ") + } + if currentGlobalStakeAmountNanos == nil { + return errors.New("SanityCheckUnstakeTxn: no CurrentGlobalStakeAmountNanos found") + } + globalStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalStakeAmountNanos, currentGlobalStakeAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating GlobalStakeAmountNanos decrease: ") + } + if !globalStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnstakeTxn: GlobalStakeAmountNanos decrease does not match") + } + + return nil +} + +func (bav *UtxoView) SanityCheckUnlockStakeTxn( + transactorPKID *PKID, + utxoOp *UtxoOperation, + amountNanos *uint256.Int, + feeNanos uint64, + prevBalanceNanos uint64, +) error { + if utxoOp.Type != OperationTypeUnlockStake { + return fmt.Errorf("SanityCheckUnlockStakeTxn: called with %v", utxoOp.Type) + } + + // Validate PrevLockedStakeEntry.LockedAmountNanos. + if utxoOp.PrevLockedStakeEntries == nil || len(utxoOp.PrevLockedStakeEntries) == 0 { + return errors.New("SanityCheckUnlockStakeTxn: PrevLockedStakeEntries is empty") + } + totalUnlockedAmountNanos := uint256.NewInt() + var err error + for _, prevLockedStakeEntry := range utxoOp.PrevLockedStakeEntries { + totalUnlockedAmountNanos, err = SafeUint256().Add(totalUnlockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnlockStakeTxn: error calculating TotalUnlockedAmountNanos: ") + } + } + if !totalUnlockedAmountNanos.Eq(amountNanos) { + return errors.New("SanityCheckUnlockStakeTxn: TotalUnlockedAmountNanos does not match") + } + + // Validate TransactorBalanceNanos increase. + // CurrentTransactorBalanceNanos = PrevTransactorBalanceNanos + AmountNanos - FeeNanos + // CurrentTransactorBalanceNanos - PrevTransactorBalanceNanos + FeeNanos = AmountNanos + currentBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(bav.GetPublicKeyForPKID(transactorPKID)) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnlockStakeTxn: error retrieving TransactorBalance: ") + } + transactorBalanceNanosIncrease, err := SafeUint64().Sub(currentBalanceNanos, prevBalanceNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnlockStakeTxn: error calculating TransactorBalance increase: ") + } + transactorBalanceNanosIncrease, err = SafeUint64().Add(transactorBalanceNanosIncrease, feeNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error including fees in TransactorBalance decrease: ") + } + if !uint256.NewInt().SetUint64(transactorBalanceNanosIncrease).Eq(amountNanos) { + return errors.New("SanityCheckUnlockStakeTxn: TransactorBalance increase does not match") + } + + return nil +} + func (bav *UtxoView) GetStakeEntry(validatorPKID *PKID, stakerPKID *PKID) (*StakeEntry, error) { // Error if either input is nil. if validatorPKID == nil { @@ -1971,6 +2271,32 @@ func (bav *UtxoView) GetStakeEntriesForValidatorPKID(validatorPKID *PKID) ([]*St return stakeEntries, nil } +func (bav *UtxoView) ValidatorHasDelegatedStake(validatorPKID *PKID) (bool, error) { + // True if the validator has any delegated stake assigned to them. + + // First check the UtxoView. + var utxoDeletedStakeEntries []*StakeEntry + for _, stakeEntry := range bav.StakeMapKeyToStakeEntry { + if !stakeEntry.ValidatorPKID.Eq(validatorPKID) { + // Skip any stake assigned to other validators. + continue + } + if stakeEntry.StakerPKID.Eq(validatorPKID) { + // Skip any stake the validator assigned to themselves. + continue + } + if !stakeEntry.isDeleted { + // A non-deleted delegated StakeEntry for this validator was found in the UtxoView. + return true, nil + } + // A deleted delegated StakeEntry for this validator was found in the UtxoView. + utxoDeletedStakeEntries = append(utxoDeletedStakeEntries, stakeEntry) + } + + // Next, check the database skipping any deleted StakeEntries for this validator. + return DBValidatorHasDelegatedStake(bav.Handle, bav.Snapshot, validatorPKID, utxoDeletedStakeEntries) +} + func (bav *UtxoView) GetLockedStakeEntry( validatorPKID *PKID, stakerPKID *PKID, @@ -2406,6 +2732,8 @@ func (bav *UtxoView) _checkStakeTxnSpendingLimitAndUpdateDerivedKey( delete(derivedKeyEntry.TransactionSpendingLimitTracker.StakeLimitMap, stakeLimitKey) return derivedKeyEntry, nil } + // If we get here, it means that we did not find a valid spendingLimit with enough stake + // to cover the transaction's required stake amount. // Error if the spending limit was found but the staking limit was exceeded. if isSpendingLimitExceeded { @@ -2416,6 +2744,8 @@ func (bav *UtxoView) _checkStakeTxnSpendingLimitAndUpdateDerivedKey( return derivedKeyEntry, errors.Wrapf(RuleErrorStakeTransactionSpendingLimitNotFound, "UtxoView._checkStakeTxnSpendingLimitAndUpdateDerivedKey: ") } +// TODO: This function is highly-redundant with the previous function. Probably makes sense +// to consolidate in the future. func (bav *UtxoView) _checkUnstakeTxnSpendingLimitAndUpdateDerivedKey( derivedKeyEntry DerivedKeyEntry, transactorPublicKeyBytes []byte, diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index a75c06cdf..f8aefb9d8 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -4,11 +4,12 @@ package lib import ( "errors" + "math" + "testing" + "github.com/btcsuite/btcd/btcec" "github.com/holiman/uint256" "github.com/stretchr/testify/require" - "math" - "testing" ) func TestStaking(t *testing.T) { @@ -103,10 +104,9 @@ func _testStaking(t *testing.T, flushToDB bool) { votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, flushToDB) require.NoError(t, err) @@ -959,10 +959,9 @@ func TestStakingWithDerivedKey(t *testing.T) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example1.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) @@ -971,10 +970,9 @@ func TestStakingWithDerivedKey(t *testing.T) { // m1 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example2.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example2.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) @@ -1848,10 +1846,9 @@ func TestStakeLockupEpochDuration(t *testing.T) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m1.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, true) require.NoError(t, err) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 9aa041678..0e0af7f1c 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -1246,7 +1246,7 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada data = append(data, EncodeToBytes(blockHeight, op.PrevValidatorEntry, skipMetadata...)...) // PrevGlobalStakeAmountNanos - data = append(data, EncodeUint256(op.PrevGlobalStakeAmountNanos)...) + data = append(data, VariableEncodeUint256(op.PrevGlobalStakeAmountNanos)...) // PrevStakeEntries data = append(data, EncodeDeSoEncoderSlice(op.PrevStakeEntries, blockHeight, skipMetadata...)...) @@ -1877,7 +1877,7 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. } // PrevGlobalStakeAmountNanos - if prevGlobalStakeAmountNanos, err := DecodeUint256(rr); err == nil { + if prevGlobalStakeAmountNanos, err := VariableDecodeUint256(rr); err == nil { op.PrevGlobalStakeAmountNanos = prevGlobalStakeAmountNanos } else { return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevGlobalStakeAmountNanos: ") @@ -4207,7 +4207,7 @@ func (be *BalanceEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadat data = append(data, EncodeToBytes(blockHeight, be.HODLerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, be.CreatorPKID, skipMetadata...)...) - data = append(data, EncodeUint256(&be.BalanceNanos)...) + data = append(data, VariableEncodeUint256(&be.BalanceNanos)...) data = append(data, BoolToByte(be.HasPurchased)) return data @@ -4229,7 +4229,7 @@ func (be *BalanceEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.R return errors.Wrapf(err, "BalanceEntry.Decode: Problem decoding CreatorPKID") } - balanceNanos, err := DecodeUint256(rr) + balanceNanos, err := VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "BalanceEntry.Decode: Problem reading BalanceNanos") } @@ -4353,7 +4353,7 @@ func (ce *CoinEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata . data = append(data, UintToBuf(ce.CreatorBasisPoints)...) data = append(data, UintToBuf(ce.DeSoLockedNanos)...) data = append(data, UintToBuf(ce.NumberOfHolders)...) - data = append(data, EncodeUint256(&ce.CoinsInCirculationNanos)...) + data = append(data, VariableEncodeUint256(&ce.CoinsInCirculationNanos)...) data = append(data, UintToBuf(ce.CoinWatermarkNanos)...) data = append(data, BoolToByte(ce.MintingDisabled)) data = append(data, byte(ce.TransferRestrictionStatus)) @@ -4377,7 +4377,7 @@ func (ce *CoinEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Read if err != nil { return errors.Wrapf(err, "CoinEntry.Decode: Problem reading NumberOfHolders") } - coinsInCirculationNanos, err := DecodeUint256(rr) + coinsInCirculationNanos, err := VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "CoinEntry.Decode: Problem reading NumberOfHolders") } @@ -4912,12 +4912,12 @@ func DecodeMapStringUint64(rr *bytes.Reader) (map[string]uint64, error) { return nil, nil } -// EncodeUint256 is useful for space-efficient encoding of uint256s. +// VariableEncodeUint256 is useful for space-efficient encoding of uint256s. // It does not guarantee fixed-width encoding, so should not be used -// in BadgerDB keys. Use EncodeOptionalUint256 instead, which does -// guarantee fixed-width encoding. Both EncodeUint256 and -// EncodeOptionalUint256 can handle nil inputs. -func EncodeUint256(number *uint256.Int) []byte { +// in BadgerDB keys. Use FixedWidthEncodeUint256 instead, which does +// guarantee fixed-width encoding. Both VariableEncodeUint256 and +// FixedWidthEncodeUint256 can handle nil inputs. +func VariableEncodeUint256(number *uint256.Int) []byte { var data []byte if number != nil { data = append(data, BoolToByte(true)) @@ -4929,7 +4929,7 @@ func EncodeUint256(number *uint256.Int) []byte { return data } -func DecodeUint256(rr *bytes.Reader) (*uint256.Int, error) { +func VariableDecodeUint256(rr *bytes.Reader) (*uint256.Int, error) { if existenceByte, err := ReadBoolByte(rr); existenceByte && err == nil { maxUint256BytesLen := len(MaxUint256.Bytes()) intLen, err := ReadUvarint(rr) @@ -5089,8 +5089,8 @@ func (order *DAOCoinLimitOrderEntry) RawEncodeWithoutMetadata(blockHeight uint64 data = append(data, EncodeToBytes(blockHeight, order.TransactorPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, order.BuyingDAOCoinCreatorPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, order.SellingDAOCoinCreatorPKID, skipMetadata...)...) - data = append(data, EncodeUint256(order.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) - data = append(data, EncodeUint256(order.QuantityToFillInBaseUnits)...) + data = append(data, VariableEncodeUint256(order.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) + data = append(data, VariableEncodeUint256(order.QuantityToFillInBaseUnits)...) data = append(data, UintToBuf(uint64(order.OperationType))...) data = append(data, UintToBuf(uint64(order.FillType))...) data = append(data, UintToBuf(uint64(order.BlockHeight))...) @@ -5133,12 +5133,12 @@ func (order *DAOCoinLimitOrderEntry) RawDecodeWithoutMetadata(blockHeight uint64 } // ScaledExchangeRateCoinsToSellPerCoinToBuy - if order.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = DecodeUint256(rr); err != nil { + if order.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = VariableDecodeUint256(rr); err != nil { return errors.Wrapf(err, "DAOCoinLimitOrderEntry.Decode: Problem reading ScaledExchangeRateCoinsToSellPerCoinToBuy") } // QuantityToFillInBaseUnits - if order.QuantityToFillInBaseUnits, err = DecodeUint256(rr); err != nil { + if order.QuantityToFillInBaseUnits, err = VariableDecodeUint256(rr); err != nil { return errors.Wrapf(err, "DAOCoinLimitOrderEntry.Decode: Problem reading QuantityToFillInBaseUnits") } @@ -5383,8 +5383,8 @@ func (order *FilledDAOCoinLimitOrder) RawEncodeWithoutMetadata(blockHeight uint6 data = append(data, EncodeToBytes(blockHeight, order.TransactorPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, order.BuyingDAOCoinCreatorPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, order.SellingDAOCoinCreatorPKID, skipMetadata...)...) - data = append(data, EncodeUint256(order.CoinQuantityInBaseUnitsBought)...) - data = append(data, EncodeUint256(order.CoinQuantityInBaseUnitsSold)...) + data = append(data, VariableEncodeUint256(order.CoinQuantityInBaseUnitsBought)...) + data = append(data, VariableEncodeUint256(order.CoinQuantityInBaseUnitsSold)...) data = append(data, BoolToByte(order.IsFulfilled)) return data @@ -5426,12 +5426,12 @@ func (order *FilledDAOCoinLimitOrder) RawDecodeWithoutMetadata(blockHeight uint6 } // CoinQuantityInBaseUnitsBought - if order.CoinQuantityInBaseUnitsBought, err = DecodeUint256(rr); err != nil { + if order.CoinQuantityInBaseUnitsBought, err = VariableDecodeUint256(rr); err != nil { return errors.Wrapf(err, "FilledDAOCoinLimiteOrder.Decode: Problem reading CoinQuantityInBaseUnitsBought") } // CoinQuantityInBaseUnitsSold - if order.CoinQuantityInBaseUnitsSold, err = DecodeUint256(rr); err != nil { + if order.CoinQuantityInBaseUnitsSold, err = VariableDecodeUint256(rr); err != nil { return errors.Wrapf(err, "FilledDAOCoinLimiteOrder.Decode: Problem reading CoinQuantityInBaseUnitsSold") } diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index 0a81bcae4..db66e746e 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -5,13 +5,14 @@ package lib import ( "bytes" "encoding/hex" + "reflect" + "testing" + "time" + "github.com/brianvoe/gofakeit" "github.com/holiman/uint256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "reflect" - "testing" - "time" ) // Initialize empty DeSoEncoders and check if they are encoded properly. @@ -400,53 +401,53 @@ func TestEncodingUint256s(t *testing.T) { num2 := uint256.NewInt().SetUint64(598128756) num3 := MaxUint256 - // Encode them to bytes using EncodeUint256. - encoded1 := EncodeUint256(num1) - encoded2 := EncodeUint256(num2) - encoded3 := EncodeUint256(num3) + // Encode them to bytes using VariableEncodeUint256. + encoded1 := VariableEncodeUint256(num1) + encoded2 := VariableEncodeUint256(num2) + encoded3 := VariableEncodeUint256(num3) - // Decode them from bytes using DecodeUint256. Verify values. + // Decode them from bytes using VariableDecodeUint256. Verify values. rr := bytes.NewReader(encoded1) - decoded1, err := DecodeUint256(rr) + decoded1, err := VariableDecodeUint256(rr) require.NoError(t, err) require.True(t, num1.Eq(decoded1)) rr = bytes.NewReader(encoded2) - decoded2, err := DecodeUint256(rr) + decoded2, err := VariableDecodeUint256(rr) require.NoError(t, err) require.True(t, num2.Eq(decoded2)) rr = bytes.NewReader(encoded3) - decoded3, err := DecodeUint256(rr) + decoded3, err := VariableDecodeUint256(rr) require.NoError(t, err) require.True(t, num3.Eq(decoded3)) - // Test that EncodeUint256 does not provide a fixed-width byte encoding. + // Test that VariableEncodeUint256 does not provide a fixed-width byte encoding. require.NotEqual(t, len(encoded1), len(encoded2)) require.NotEqual(t, len(encoded1), len(encoded3)) - // Encode them to bytes using EncodeOptionalUint256. - encoded1 = EncodeOptionalUint256(num1) - encoded2 = EncodeOptionalUint256(num2) - encoded3 = EncodeOptionalUint256(num3) + // Encode them to bytes using FixedWidthEncodeUint256. + encoded1 = FixedWidthEncodeUint256(num1) + encoded2 = FixedWidthEncodeUint256(num2) + encoded3 = FixedWidthEncodeUint256(num3) - // Decode them from bytes using ReadOptionalUint256. Verify values. + // Decode them from bytes using FixedWidthDecodeUint256. Verify values. rr = bytes.NewReader(encoded1) - decoded1, err = ReadOptionalUint256(rr) + decoded1, err = FixedWidthDecodeUint256(rr) require.NoError(t, err) require.True(t, num1.Eq(decoded1)) rr = bytes.NewReader(encoded2) - decoded2, err = ReadOptionalUint256(rr) + decoded2, err = FixedWidthDecodeUint256(rr) require.NoError(t, err) require.True(t, num2.Eq(decoded2)) rr = bytes.NewReader(encoded3) - decoded3, err = ReadOptionalUint256(rr) + decoded3, err = FixedWidthDecodeUint256(rr) require.NoError(t, err) require.True(t, num3.Eq(decoded3)) - // Test that EncodeOptionalUint256 provides a fixed-width byte encoding. + // Test that FixedWidthEncodeUint256 provides a fixed-width byte encoding. require.Equal(t, len(encoded1), len(encoded2)) require.Equal(t, len(encoded1), len(encoded3)) } diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 083b9bd30..647044cd6 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -4,37 +4,80 @@ import ( "bytes" "crypto/sha256" "fmt" + "io" + "math" + "net/url" + "sort" + "github.com/deso-protocol/core/bls" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "io" - "math" - "net/url" - "sort" ) +// RegisterAsValidator: Registers a new validator. This transaction can be called multiple times +// if a validator needs to update any of their registration info such as their domains. Once +// a validator is registered, stake can be assigned to that validator, the validator is eligible +// to participate in consensus by voting, and may be selected as leader to propose new blocks. +// +// UnregisterAsValidator: Unregisters an existing validator. This unstakes all stake assigned to this +// validator and removes this validator from the set of eligible validators. A user would have to +// re-register by submitting a subsequent RegisterAsValidator transaction to be re-included. +// +// UnjailValidator: Unjails a jailed validator if sufficient time (epochs) have elapsed since the +// validator was first jailed. A validator is jailed if they fail to participate in consensus by +// either voting or proposing blocks for too long. A jailed validator is ineligible to receive +// any block rewards and ineligible to elected leader. +// +// FIXME: In the future, when we flesh out the jail/unjail functionality, we will want to make it +// so that the validator's stake is removed/added to GlobalStakeAmountNanos. See FIXME on +// _connectUnjailValidator for more details. + // // TYPES: ValidatorEntry // type ValidatorEntry struct { - ValidatorID *BlockHash + // The ValidatorPKID is the primary key for a ValidatorEntry. It is the PKID + // for the transactor who registered the validator. A user's PKID can only + // be associated with one validator. ValidatorPKID *PKID + // Domains is a slice of web domains where the validator can be reached. // Note: if someone is updating their ValidatorEntry, they need to include // all domains. The Domains field is not appended to. It is overwritten. - Domains [][]byte - DisableDelegatedStake bool - VotingPublicKey *bls.PublicKey - VotingPublicKeySignature *bls.Signature - VotingSignatureBlockHeight uint64 - TotalStakeAmountNanos *uint256.Int - RegisteredAtBlockHeight uint64 - LastActiveAtEpochNumber uint64 - JailedAtEpochNumber uint64 - ExtraData map[string][]byte - isDeleted bool + Domains [][]byte + // DisableDelegatedStake is a boolean that indicates whether the validator + // disallows delegated / 3rd party stake being assigned to themselves. If + // a validator sets DisableDelegatedStake to true, then they can still + // stake with themselves, but all other users will receive an error if they + // try to stake with this validator. + DisableDelegatedStake bool + // The VotingPublicKey is a BLS PublicKey that is used in consensus messages. + // A validator signs consensus messages with their VotingPrivateKey and then + // other validators can reliably prove the message came from this validator + // by verifying against their VotingPublicKey. + VotingPublicKey *bls.PublicKey + // The VotingPublicKeySignature is the signature of the SHA256(TransactorPublicKey) + // by the VotingPrivateKey. + // This proves that this validator is indeed the proper owner of the corresponding + // VotingPrivateKey. See comment on CreateValidatorVotingSignaturePayload for more details. + VotingPublicKeySignature *bls.Signature + // TotalStakeAmountNanos is a cached value of this validator's total stake, calculated + // by summing all the corresponding StakeEntries assigned to this validator. We cache + // the value here to avoid the O(N) operation of recomputing when determining a + // validator's total stake. This way it is an O(1) operation instead. + TotalStakeAmountNanos *uint256.Int + // LastActiveAtEpochNumber is the last epoch in which this validator either 1) participated in + // consensus by voting or proposing blocks, or 2) unjailed themselves. If a validator is + // inactive for too long, then they are jailed. + LastActiveAtEpochNumber uint64 + // JailedAtEpochNumber tracks when a validator was first jailed. This helps to verify + // that enough time (epochs) have passed before the validator is able to unjail themselves. + JailedAtEpochNumber uint64 + + ExtraData map[string][]byte + isDeleted bool } func (validatorEntry *ValidatorEntry) Status() ValidatorStatus { @@ -60,16 +103,6 @@ const ( ValidatorStatusJailed ValidatorStatus = 2 ) -type ValidatorMapKey struct { - // The MapKey has to contain all fields that are used in Badger keys. - // Otherwise, an update to the UtxoView will not be able to update or - // delete all relevant Badger rows. - ValidatorPKID PKID - TotalStakeAmountNanos uint256.Int - RegisteredAtBlockHeight uint64 - JailedAtEpochNumber uint64 -} - func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { // Copy domains. var domainsCopy [][]byte @@ -79,34 +112,21 @@ func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { // Return new ValidatorEntry. return &ValidatorEntry{ - ValidatorID: validatorEntry.ValidatorID.NewBlockHash(), - ValidatorPKID: validatorEntry.ValidatorPKID.NewPKID(), - Domains: domainsCopy, - DisableDelegatedStake: validatorEntry.DisableDelegatedStake, - VotingPublicKey: validatorEntry.VotingPublicKey.Copy(), - VotingPublicKeySignature: validatorEntry.VotingPublicKeySignature.Copy(), - VotingSignatureBlockHeight: validatorEntry.VotingSignatureBlockHeight, - TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), - RegisteredAtBlockHeight: validatorEntry.RegisteredAtBlockHeight, - LastActiveAtEpochNumber: validatorEntry.LastActiveAtEpochNumber, - JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, - ExtraData: copyExtraData(validatorEntry.ExtraData), - isDeleted: validatorEntry.isDeleted, - } -} - -func (validatorEntry *ValidatorEntry) ToMapKey() ValidatorMapKey { - return ValidatorMapKey{ - ValidatorPKID: *validatorEntry.ValidatorPKID, - TotalStakeAmountNanos: *validatorEntry.TotalStakeAmountNanos, - RegisteredAtBlockHeight: validatorEntry.RegisteredAtBlockHeight, - JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, + ValidatorPKID: validatorEntry.ValidatorPKID.NewPKID(), + Domains: domainsCopy, + DisableDelegatedStake: validatorEntry.DisableDelegatedStake, + VotingPublicKey: validatorEntry.VotingPublicKey.Copy(), + VotingPublicKeySignature: validatorEntry.VotingPublicKeySignature.Copy(), + TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), + LastActiveAtEpochNumber: validatorEntry.LastActiveAtEpochNumber, + JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, + ExtraData: copyExtraData(validatorEntry.ExtraData), + isDeleted: validatorEntry.isDeleted, } } func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte - data = append(data, EncodeToBytes(blockHeight, validatorEntry.ValidatorID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID, skipMetadata...)...) // Domains @@ -118,9 +138,7 @@ func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint6 data = append(data, BoolToByte(validatorEntry.DisableDelegatedStake)) data = append(data, EncodeBLSPublicKey(validatorEntry.VotingPublicKey)...) data = append(data, EncodeBLSSignature(validatorEntry.VotingPublicKeySignature)...) - data = append(data, UintToBuf(validatorEntry.VotingSignatureBlockHeight)...) - data = append(data, EncodeUint256(validatorEntry.TotalStakeAmountNanos)...) - data = append(data, UintToBuf(validatorEntry.RegisteredAtBlockHeight)...) + data = append(data, VariableEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) data = append(data, UintToBuf(validatorEntry.LastActiveAtEpochNumber)...) data = append(data, UintToBuf(validatorEntry.JailedAtEpochNumber)...) data = append(data, EncodeExtraData(validatorEntry.ExtraData)...) @@ -130,12 +148,6 @@ func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint6 func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { var err error - // ValidatorID - validatorEntry.ValidatorID, err = DecodeDeSoEncoder(&BlockHash{}, rr) - if err != nil { - return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading ValidatorID: ") - } - // ValidatorPKID validatorEntry.ValidatorPKID, err = DecodeDeSoEncoder(&PKID{}, rr) if err != nil { @@ -173,24 +185,12 @@ func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint6 return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingPublicKeySignature: ") } - // VotingSignatureBlockHeight - validatorEntry.VotingSignatureBlockHeight, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingSignatureBlockHeight: ") - } - // TotalStakeAmountNanos - validatorEntry.TotalStakeAmountNanos, err = DecodeUint256(rr) + validatorEntry.TotalStakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading TotalStakeAmountNanos: ") } - // RegisteredAtBlockHeight - validatorEntry.RegisteredAtBlockHeight, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading RegisteredAtBlockHeight: ") - } - // LastActiveAtEpochNumber validatorEntry.LastActiveAtEpochNumber, err = ReadUvarint(rr) if err != nil { @@ -225,11 +225,10 @@ func (validatorEntry *ValidatorEntry) GetEncoderType() EncoderType { // type RegisterAsValidatorMetadata struct { - Domains [][]byte - DisableDelegatedStake bool - VotingPublicKey *bls.PublicKey - VotingPublicKeySignature *bls.Signature - VotingSignatureBlockHeight uint64 + Domains [][]byte + DisableDelegatedStake bool + VotingPublicKey *bls.PublicKey + VotingPublicKeySignature *bls.Signature } func (txnData *RegisterAsValidatorMetadata) GetTxnType() TxnType { @@ -248,7 +247,6 @@ func (txnData *RegisterAsValidatorMetadata) ToBytes(preSignature bool) ([]byte, data = append(data, BoolToByte(txnData.DisableDelegatedStake)) data = append(data, EncodeBLSPublicKey(txnData.VotingPublicKey)...) data = append(data, EncodeBLSSignature(txnData.VotingPublicKeySignature)...) - data = append(data, UintToBuf(txnData.VotingSignatureBlockHeight)...) return data, nil } @@ -286,12 +284,6 @@ func (txnData *RegisterAsValidatorMetadata) FromBytes(data []byte) error { return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingPublicKeySignature: ") } - // VotingSignatureBlockHeight - txnData.VotingSignatureBlockHeight, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingSignatureBlockHeight: ") - } - return nil } @@ -353,7 +345,6 @@ type RegisterAsValidatorTxindexMetadata struct { DisableDelegatedStake bool VotingPublicKey string VotingPublicKeySignature string - VotingSignatureBlockHeight uint64 } func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -369,7 +360,6 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetad data = append(data, BoolToByte(txindexMetadata.DisableDelegatedStake)) data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKey))...) data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKeySignature))...) - data = append(data, UintToBuf(txindexMetadata.VotingSignatureBlockHeight)...) return data } @@ -416,12 +406,6 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawDecodeWithoutMetad } txindexMetadata.VotingPublicKeySignature = string(votingPublicKeySignatureBytes) - // VotingSignatureBlockHeight - txindexMetadata.VotingSignatureBlockHeight, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading VotingSignatureBlockHeight: ") - } - return nil } @@ -445,7 +429,7 @@ type UnstakedStakerTxindexMetadata struct { func (txindexMetadata *UnstakedStakerTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) - data = append(data, EncodeUint256(txindexMetadata.UnstakeAmountNanos)...) + data = append(data, VariableEncodeUint256(txindexMetadata.UnstakeAmountNanos)...) return data } @@ -460,7 +444,7 @@ func (txindexMetadata *UnstakedStakerTxindexMetadata) RawDecodeWithoutMetadata(b txindexMetadata.StakerPublicKeyBase58Check = string(stakerPublicKeyBase58CheckBytes) // UnstakeAmountNanos - txindexMetadata.UnstakeAmountNanos, err = DecodeUint256(rr) + txindexMetadata.UnstakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "UnstakedStakerTxindexMetadata.Decode: Problem reading UnstakeAmountNanos: ") } @@ -561,10 +545,7 @@ func DBKeyForValidatorByPKID(validatorEntry *ValidatorEntry) []byte { func DBKeyForValidatorByStake(validatorEntry *ValidatorEntry) []byte { key := append([]byte{}, Prefixes.PrefixValidatorByStake...) key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) - // TotalStakeAmountNanos will never be nil here, but EncodeOptionalUint256 - // is used because it provides a fixed-width encoding of uint256.Ints. - key = append(key, EncodeOptionalUint256(validatorEntry.TotalStakeAmountNanos)...) // Highest stake first - key = append(key, EncodeUint64(math.MaxUint64-validatorEntry.RegisteredAtBlockHeight)...) // Oldest first + key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) return key } @@ -618,22 +599,24 @@ func DBGetTopActiveValidatorsByStake( validatorKeysToSkip.Add(string(DBKeyForValidatorByStake(validatorEntryToSkip))) } - // Retrieve top N active ValidatorEntry PKIDs by stake. + // Retrieve top N active ValidatorEntry keys by stake. key := append([]byte{}, Prefixes.PrefixValidatorByStake...) key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) - _, validatorPKIDsBytes, err := EnumerateKeysForPrefixWithLimitOffsetOrder( + keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( handle, key, limit, nil, true, validatorKeysToSkip, ) if err != nil { return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem retrieving top validators: ") } - // For each PKID, retrieve the ValidatorEntry by PKID. - for _, validatorPKIDBytes := range validatorPKIDsBytes { + // For each key found, parse the ValidatorPKID from the key, + // then retrieve the ValidatorEntry by the ValidatorPKID. + for _, keyFound := range keysFound { + // Parse the PKIDBytes from the key. The ValidatorPKID is the last component of the key. + validatorPKIDBytes := keyFound[len(keyFound)-PublicKeyLenCompressed:] // Convert PKIDBytes to PKID. validatorPKID := &PKID{} - exists, err := DecodeFromBytes(validatorPKID, bytes.NewReader(validatorPKIDBytes)) - if !exists || err != nil { + if err = validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem reading ValidatorPKID: ") } // Retrieve ValidatorEntry by PKID. @@ -672,7 +655,7 @@ func DBGetGlobalStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot) (*uint2 // Decode from bytes. var globalStakeAmountNanos *uint256.Int rr := bytes.NewReader(globalStakeAmountNanosBytes) - globalStakeAmountNanos, err = DecodeUint256(rr) + globalStakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return nil, errors.Wrapf(err, "DBGetGlobalStakeAmountNanosWithTxn: problem decoding value") } @@ -699,9 +682,10 @@ func DBPutValidatorWithTxn( ) } - // Set ValidatorEntry.PKID in PrefixValidatorByStake. + // Set ValidatorEntry key in PrefixValidatorByStake. The value should be nil. + // We parse the ValidatorPKID from the key for this index. key = DBKeyForValidatorByStake(validatorEntry) - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID)); err != nil { + if err := DBSetWithTxn(txn, snap, key, nil); err != nil { return errors.Wrapf( err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStake", ) @@ -710,10 +694,24 @@ func DBPutValidatorWithTxn( return nil } -func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorEntry *ValidatorEntry) error { - if validatorEntry == nil { +func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PKID) error { + if validatorPKID == nil { // This should never happen but is a sanity check. - glog.Errorf("DBDeleteValidatorWithTxn: called with nil ValidatorEntry") + glog.Errorf("DBDeleteValidatorWithTxn: called with nil ValidatorPKID") + return nil + } + + // Look up the existing ValidatorEntry in the db using the PKID. We need to use this + // validator's values to delete the corresponding indexes. + validatorEntry, err := DBGetValidatorByPKIDWithTxn(txn, snap, validatorPKID) + if err != nil { + return errors.Wrapf(err, "DBDeleteValidatorWithTxn: problem retrieving "+ + "ValidatorEntry for PKID %v: ", validatorPKID) + } + + // If there is no ValidatorEntry in the DB for this PKID, then there is nothing to + // delete. + if validatorEntry == nil { return nil } @@ -749,7 +747,7 @@ func DBPutGlobalStakeAmountNanosWithTxn( } key := DBKeyForGlobalStakeAmountNanos() - return DBSetWithTxn(txn, snap, key, EncodeUint256(globalStakeAmountNanos)) + return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalStakeAmountNanos)) } // @@ -1047,32 +1045,19 @@ func (bav *UtxoView) _connectRegisterAsValidator( return 0, 0, nil, errors.Wrapf(err, "_connectRegisterAsValidator: ") } // Delete the existing ValidatorEntry, if exists. There will be an existing ValidatorEntry - // if the transactor is updating their ValidatorEntry. There will not be, if the transactor + // if the transactor is updating their ValidatorEntry. There will not be one if the transactor // is registering a ValidatorEntry for the first time (or it was previously unregistered). // Note that we don't need to check isDeleted because the Get returns nil if isDeleted=true. if prevValidatorEntry != nil { bav._deleteValidatorEntryMappings(prevValidatorEntry) } - // Set ValidatorID only if this is a new ValidatorEntry. - validatorID := txHash.NewBlockHash() - if prevValidatorEntry != nil { - validatorID = prevValidatorEntry.ValidatorID.NewBlockHash() - } - // Calculate TotalStakeAmountNanos. totalStakeAmountNanos := uint256.NewInt() if prevValidatorEntry != nil { totalStakeAmountNanos = prevValidatorEntry.TotalStakeAmountNanos.Clone() } - // Set RegisteredAtBlockHeight to CurrentBlockHeight if this is a new ValidatorEntry. - // Otherwise, retain the existing RegisteredAtBlockHeight. - registeredAtBlockHeight := uint64(blockHeight) - if prevValidatorEntry != nil { - registeredAtBlockHeight = prevValidatorEntry.RegisteredAtBlockHeight - } - // Set LastActiveAtEpochNumber to CurrentEpochNumber if this is a new ValidatorEntry. // Otherwise, retain the existing LastActiveAtEpochNumber. var lastActiveAtEpochNumber uint64 @@ -1104,20 +1089,17 @@ func (bav *UtxoView) _connectRegisterAsValidator( // Construct new ValidatorEntry from metadata. currentValidatorEntry := &ValidatorEntry{ - ValidatorID: validatorID, ValidatorPKID: transactorPKIDEntry.PKID, // Note: if someone is updating their ValidatorEntry, they need to include // all domains. The Domains field is not appended to. It is overwritten. - Domains: txMeta.Domains, - DisableDelegatedStake: txMeta.DisableDelegatedStake, - VotingPublicKey: txMeta.VotingPublicKey, - VotingPublicKeySignature: txMeta.VotingPublicKeySignature, - VotingSignatureBlockHeight: txMeta.VotingSignatureBlockHeight, - TotalStakeAmountNanos: totalStakeAmountNanos, - RegisteredAtBlockHeight: registeredAtBlockHeight, - LastActiveAtEpochNumber: lastActiveAtEpochNumber, - JailedAtEpochNumber: jailedAtEpochNumber, - ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), + Domains: txMeta.Domains, + DisableDelegatedStake: txMeta.DisableDelegatedStake, + VotingPublicKey: txMeta.VotingPublicKey, + VotingPublicKeySignature: txMeta.VotingPublicKeySignature, + TotalStakeAmountNanos: totalStakeAmountNanos, + LastActiveAtEpochNumber: lastActiveAtEpochNumber, + JailedAtEpochNumber: jailedAtEpochNumber, + ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), } // Set the ValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) @@ -1292,12 +1274,6 @@ func (bav *UtxoView) _connectUnregisterAsValidator( } } else { lockedStakeEntry = &LockedStakeEntry{ - // What to set as the LockedStakeID here is not obvious. Typically, we use the - // TxHash, but since there may be multiple LockedStakeEntries created here, one - // for each unstaked staker, that would result in multiple LockedStakeEntries - // with the same LockedStakeID. For that reason, we opted to use the - // PrevStakeEntry's StakeID instead. - LockedStakeID: prevStakeEntry.StakeID.NewBlockHash(), StakerPKID: prevStakeEntry.StakerPKID.NewPKID(), ValidatorPKID: prevStakeEntry.ValidatorPKID.NewPKID(), LockedAmountNanos: prevStakeEntry.StakeAmountNanos.Clone(), @@ -1352,14 +1328,18 @@ func (bav *UtxoView) _connectUnregisterAsValidator( // Set the new GlobalStakeAmountNanos. bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) - // Add a UTXO operation. - utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + // Create a UTXO operation. + utxoOpForTxn := &UtxoOperation{ Type: OperationTypeUnregisterAsValidator, PrevValidatorEntry: prevValidatorEntry, PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, PrevStakeEntries: prevStakeEntries, PrevLockedStakeEntries: prevLockedStakeEntries, - }) + } + if err = bav.SanityCheckUnregisterAsValidatorTxn(transactorPKIDEntry.PKID, utxoOpForTxn, totalUnstakedAmountNanos); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpForTxn) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1443,6 +1423,10 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( ) } +// FIXME: Currently, unjail does not re-add a validator's stake back to the GlobalStakeAmountNanos. +// When we flesh out the logic for jail/unjail, we will want to make it so that the process that +// jails a validator *removes* their stake from GlobalStakeAmountNanos, and the process that unjails, +// i.e. this function, *re-adds* their stake back to GlobalStakeAmountNanos. func (bav *UtxoView) _connectUnjailValidator( txn *MsgDeSoTxn, txHash *BlockHash, @@ -1628,19 +1612,11 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKey, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } - // Validate VotingSignatureBlockHeight. - if metadata.VotingSignatureBlockHeight < blockHeight || - metadata.VotingSignatureBlockHeight > blockHeight+bav.Params.ValidatorVotingSignatureBlockHeightWindow { - return errors.Wrapf(RuleErrorValidatorInvalidVotingSignatureBlockHeight, "UtxoView.IsValidRegisterAsValidatorMetadata: ") - } - // Validate VotingPublicKeySignature. if metadata.VotingPublicKeySignature == nil { return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKeySignature, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } - votingSignaturePayload := CreateValidatorVotingSignaturePayload( - transactorPublicKey, metadata.VotingPublicKey, metadata.VotingSignatureBlockHeight, - ) + votingSignaturePayload := CreateValidatorVotingSignaturePayload(transactorPublicKey) isValidBLSSignature, err := metadata.VotingPublicKey.Verify(metadata.VotingPublicKeySignature, votingSignaturePayload) if err != nil { return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error verifying VotingPublicKeySignature: ") @@ -1659,19 +1635,14 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( !validatorEntry.DisableDelegatedStake && // Existing ValidatorEntry.DisableDelegatedStake = false metadata.DisableDelegatedStake { // Updating DisableDelegatedStake = true - // Retrieve existing StakeEntries. - prevStakeEntries, err := bav.GetStakeEntriesForValidatorPKID(transactorPKIDEntry.PKID) + hasDelegatedStake, err := bav.ValidatorHasDelegatedStake(transactorPKIDEntry.PKID) if err != nil { - return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error retrieving existing StakeEntries: ") + return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error checking for existing delegated StakeEntries: ") } - - // Error if any belong to someone other than the validator. - for _, stakeEntry := range prevStakeEntries { - if !stakeEntry.StakerPKID.Eq(transactorPKIDEntry.PKID) { - return errors.Wrapf( - RuleErrorValidatorDisablingExistingDelegatedStakers, "UtxoView.IsValidRegisterAsValidatorMetadata: ", - ) - } + if hasDelegatedStake { + return errors.Wrapf( + RuleErrorValidatorDisablingExistingDelegatedStakers, "UtxoView.IsValidRegisterAsValidatorMetadata: ", + ) } } @@ -1732,40 +1703,87 @@ func (bav *UtxoView) IsValidUnjailValidatorMetadata(transactorPublicKey []byte) return nil } -func (bav *UtxoView) GetValidatorByPKID(pkid *PKID) (*ValidatorEntry, error) { - // First check the UtxoView. +func (bav *UtxoView) SanityCheckUnregisterAsValidatorTxn( + transactorPKID *PKID, + utxoOp *UtxoOperation, + amountNanos *uint256.Int, +) error { + if utxoOp.Type != OperationTypeUnregisterAsValidator { + return fmt.Errorf("SanityCheckUnregisterAsValidatorTxn: called with %v", utxoOp.Type) + } - // There can be multiple ValidatorEntries for a given PKID in the UtxoView since the ValidatorMapKey - // contains ValidatorPKID, TotalStakeAmountNanos, and RegisteredAtBlockHeight. We need to loop through - // all the ValidatorEntries and find the one matching the given PKID that is !isDeleted. There should - // ever only be zero or one such matching ValidatorEntries. If the only matching ValidatorEntries are - // all isDeleted then we shouldn't check the database as the corresponding rows in the database will - // be deleted once the UtxoView is flushed. - isDeleted := false - - for _, validatorEntry := range bav.ValidatorMapKeyToValidatorEntry { - if validatorEntry == nil { - // This should never happen but is a sanity check. - continue - } - if !validatorEntry.ValidatorPKID.Eq(pkid) { - continue + // Sanity check the deleted ValidatorEntry. + if utxoOp.PrevValidatorEntry == nil { + return errors.New("SanityCheckUnregisterAsValidatorTxn: nil PrevValidatorEntry provided") + } + if !utxoOp.PrevValidatorEntry.ValidatorPKID.Eq(transactorPKID) { + return errors.New("SanityCheckUnregisterAsValidatorTxn: ValidatorPKID doesn't match TransactorPKID") + } + if !utxoOp.PrevValidatorEntry.TotalStakeAmountNanos.Eq(amountNanos) { + return errors.New("SanityCheckUnregisterAsValidatorTxn: TotalStakeAmountNanos doesn't match") + } + currentValidatorEntry, err := bav.GetValidatorByPKID(utxoOp.PrevValidatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving ValidatorEntry: ") + } + if currentValidatorEntry != nil { + return errors.New("SanityCheckUnregisterAsValidatorTxn: ValidatorEntry was not deleted") + } + + // Sanity check that there are no existing StakeEntries for the validator. + stakeEntries, err := bav.GetStakeEntriesForValidatorPKID(utxoOp.PrevValidatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving StakeEntries: ") + } + if len(stakeEntries) != 0 { + return errors.New("SanityCheckUnregisterAsValidatorTxn: StakeEntries for ValidatorEntry still exist") + } + + // Sanity check the deleted StakeEntries. + totalUnstakedAmountNanos := uint256.NewInt() + for _, stakeEntry := range utxoOp.PrevStakeEntries { + totalUnstakedAmountNanos, err = SafeUint256().Add(totalUnstakedAmountNanos, stakeEntry.StakeAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error calculating TotalUnstakedAmountNanos: ") } + } + if !totalUnstakedAmountNanos.Eq(amountNanos) { + return errors.New("SanityCheckUnregisterAsValidatorTxn: TotalUnstakedAmountNanos doesn't match") + } + + // Sanity check that the GlobalStakeAmountNanos was decreased by amountNanos. + if utxoOp.PrevGlobalStakeAmountNanos == nil { + return errors.New("SanityCheckUnregisterAsValidatorTxn: nil PrevGlobalStakeAmountNanos provided") + } + currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving GlobalStakeAmountNanos: ") + } + globalStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalStakeAmountNanos, currentGlobalStakeAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error calculating GlobalStakeAmountNanos decrease: ") + } + if !globalStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnregisterAsValidatorTxn: GlobalStakeAmountNanos decrease doesn't match") + } + return nil +} + +func (bav *UtxoView) GetValidatorByPKID(pkid *PKID) (*ValidatorEntry, error) { + // First check the UtxoView. + validatorEntry, exists := bav.ValidatorPKIDToValidatorEntry[*pkid] + if exists { if validatorEntry.isDeleted { - isDeleted = true - continue + // If we get to this point, we found a ValidatorEntry for the given PKID + // but it was marked as isDeleted. In this case, we do not want to check + // the database but instead just return nil, no ValidatorEntry found. + return nil, nil } // If we get to this point, we found a matching // !isDeleted ValidatorEntry for the given PKID. return validatorEntry, nil } - - if isDeleted { - // If we get to this point, we found one or more matching ValidatorEntries - // for the given PKID, but they were all isDeleted. We do not want to check - // the database but instead just return nil, no ValidatorEntry found. - return nil, nil - } + // At this point, we know there was no matching ValidatorEntry in the view. // If no ValidatorEntry (either isDeleted or !isDeleted) was found // in the UtxoView for the given PKID, check the database. @@ -1807,10 +1825,12 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry // then sort the UtxoViewValidatorEntries and DatabaseValidatorEntries together to find the top N // ValidatorEntries by stake across both the UtxoView and database. var utxoViewValidatorEntries []*ValidatorEntry - for _, validatorEntry := range bav.ValidatorMapKeyToValidatorEntry { + for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { utxoViewValidatorEntries = append(utxoViewValidatorEntries, validatorEntry) } // Pull top N active ValidatorEntries from the database (not present in the UtxoView). + // Note that we will skip validators that are present in the view because we pass + // utxoViewValidatorEntries to the function. dbValidatorEntries, err := DBGetTopActiveValidatorsByStake(bav.Handle, bav.Snapshot, limit, utxoViewValidatorEntries) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GetTopActiveValidatorsByStake: error retrieving entries from db: ") @@ -1820,13 +1840,13 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry // We only pull ValidatorEntries from the db that are not present in the // UtxoView. As a sanity check, we double-check that the ValidatorEntry // is not already in the UtxoView here. - if _, exists := bav.ValidatorMapKeyToValidatorEntry[validatorEntry.ToMapKey()]; !exists { + if _, exists := bav.ValidatorPKIDToValidatorEntry[*validatorEntry.ValidatorPKID]; !exists { bav._setValidatorEntryMappings(validatorEntry) } } // Pull !isDeleted, active ValidatorEntries from the UtxoView. var validatorEntries []*ValidatorEntry - for _, validatorEntry := range bav.ValidatorMapKeyToValidatorEntry { + for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { if !validatorEntry.isDeleted && validatorEntry.Status() == ValidatorStatusActive { validatorEntries = append(validatorEntries, validatorEntry) } @@ -1841,25 +1861,20 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry } func (bav *UtxoView) GetGlobalStakeAmountNanos() (*uint256.Int, error) { - var globalStakeAmountNanos *uint256.Int - var err error // Read the GlobalStakeAmountNanos from the UtxoView. if bav.GlobalStakeAmountNanos != nil { - globalStakeAmountNanos = bav.GlobalStakeAmountNanos.Clone() + return bav.GlobalStakeAmountNanos.Clone(), nil } // If not set, read the GlobalStakeAmountNanos from the db. - // TODO: Confirm if the GlobalStakeAmountNanos.IsZero() that we should look in the db. - if globalStakeAmountNanos == nil || globalStakeAmountNanos.IsZero() { - globalStakeAmountNanos, err = DBGetGlobalStakeAmountNanos(bav.Handle, bav.Snapshot) - if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetGlobalStakeAmountNanos: ") - } - if globalStakeAmountNanos == nil { - globalStakeAmountNanos = uint256.NewInt() - } - // Cache the GlobaleStakeAmountNanos from the db in the UtxoView. - bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) + globalStakeAmountNanos, err := DBGetGlobalStakeAmountNanos(bav.Handle, bav.Snapshot) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetGlobalStakeAmountNanos: ") } + if globalStakeAmountNanos == nil { + globalStakeAmountNanos = uint256.NewInt() + } + // Cache the GlobalStakeAmountNanos from the db in the UtxoView. + bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) return globalStakeAmountNanos, nil } @@ -1869,7 +1884,7 @@ func (bav *UtxoView) _setValidatorEntryMappings(validatorEntry *ValidatorEntry) glog.Errorf("_setValidatorEntryMappings: called with nil entry, this should never happen") return } - bav.ValidatorMapKeyToValidatorEntry[validatorEntry.ToMapKey()] = validatorEntry + bav.ValidatorPKIDToValidatorEntry[*validatorEntry.ValidatorPKID] = validatorEntry } func (bav *UtxoView) _deleteValidatorEntryMappings(validatorEntry *ValidatorEntry) { @@ -1896,14 +1911,14 @@ func (bav *UtxoView) _setGlobalStakeAmountNanos(globalStakeAmountNanos *uint256. func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { // Delete all entries in the ValidatorMapKeyToValidatorEntry UtxoView map. - for validatorMapKeyIter, validatorEntryIter := range bav.ValidatorMapKeyToValidatorEntry { + for validatorMapKeyIter, validatorEntryIter := range bav.ValidatorPKIDToValidatorEntry { // Make a copy of the iterators since we make references to them below. validatorMapKey := validatorMapKeyIter validatorEntry := *validatorEntryIter // Sanity-check that the entry matches the map key. - validatorMapKeyInEntry := validatorEntry.ToMapKey() - if validatorMapKeyInEntry != validatorMapKey { + validatorMapKeyInEntry := *validatorEntry.ValidatorPKID + if !validatorMapKeyInEntry.Eq(&validatorMapKey) { return fmt.Errorf( "_flushValidatorEntriesToDbWithTxn: ValidatorEnry key %v doesn't match MapKey %v", &validatorMapKeyInEntry, @@ -1913,13 +1928,13 @@ func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHei // Delete the existing mappings in the db for this ValidatorMapKey. They // will be re-added if the corresponding entry in memory has isDeleted=false. - if err := DBDeleteValidatorWithTxn(txn, bav.Snapshot, &validatorEntry); err != nil { + if err := DBDeleteValidatorWithTxn(txn, bav.Snapshot, &validatorMapKey); err != nil { return errors.Wrapf(err, "_flushValidatorEntriesToDbWithTxn: ") } } // Set any !isDeleted ValidatorEntries in the ValidatorMapKeyToValidatorEntry UtxoView map. - for _, validatorEntryIter := range bav.ValidatorMapKeyToValidatorEntry { + for _, validatorEntryIter := range bav.ValidatorPKIDToValidatorEntry { validatorEntry := *validatorEntryIter if validatorEntry.isDeleted { // If ValidatorEntry.isDeleted then there's nothing to @@ -1975,7 +1990,6 @@ func (bav *UtxoView) CreateRegisterAsValidatorTxindexMetadata( DisableDelegatedStake: metadata.DisableDelegatedStake, VotingPublicKey: metadata.VotingPublicKey.ToString(), VotingPublicKeySignature: metadata.VotingPublicKeySignature.ToString(), - VotingSignatureBlockHeight: metadata.VotingSignatureBlockHeight, } // Construct AffectedPublicKeys. @@ -2092,23 +2106,47 @@ func DecodeBLSSignature(rr io.Reader) (*bls.Signature, error) { return (&bls.Signature{}).FromBytes(signatureBytes) } +// When registering as a validator, there are two keys that are involved: +// +// - transactorPublicKey: This is the key that is used to sign transactions on the +// network. +// +// - votingPublicKey: This is the key that is used as a part of consensus to sign +// vote and timeout messages. It is distinct from the transactorPublicKey because +// it is a BLS key rather than a standard ECDSA key, which means we can *aggregate* +// signatures generated by these keys, which is needed for our Fast-HotStuff +// consensus to be efficient. It is also useful from an operational standpoint +// to separate the key used to perform transactions on the network from the key +// used to vote on blocks (the former can remail "cold" while the latter needs +// to remain "hot"). +// +// Given that there are two keys involved, the validation of a RegisterAsValidator +// transaction needs to check a few things: +// +// 1. That the user owns the transactorPublicKey. This is proven by checking the signature +// at the transaction level. +// +// 2. That the user owns the votingPublicKey. This is proven by checking a *second* +// signature embedded in the RegisterAsValidatorMetadata, which we define below. +// +// To prove #2, it is sufficient to have the votingPublicKey sign the +// transactorPublicKey. Doing this makes it so that the signature can only ever be +// used to associate this specific votingPublicKey with this specific transactorPublicKey. +// In addition, the fact that the transactorPublicKey is required to sign the +// *entire transaction payload*, including this votingPublicKey signature, +// ensures that no *other* transactor can ever associate +// this votingPublicKey with another key. Finally, replay attacks are prevented by +// the fact that the transaction includes a nonce in its payload, signed by the +// transactorPublicKey, that only allows the transaction as a whole to be run once. +// This means that nobody can construct a transaction to re-register this validator +// without constructing a new transaction with a fresh nonce, thus requiring a new +// signature from the same transactorPublicKey, which they wouldn't have access to. func CreateValidatorVotingSignaturePayload( transactorPublicKeyBytes []byte, - votingPublicKey *bls.PublicKey, - votingSignatureBlockHeight uint64, ) []byte { - var data []byte // HASH(TransactorPublicKey) hashedTransactorPublicKey := sha256.Sum256(transactorPublicKeyBytes) - data = append(data, hashedTransactorPublicKey[:]...) - // HASH(VotingPublicKey) - hashedVotingPublicKey := sha256.Sum256(votingPublicKey.ToBytes()) - data = append(data, hashedVotingPublicKey[:]...) - // VotingSignatureBlockHeight - data = append(data, UintToBuf(votingSignatureBlockHeight)...) - // HASH( HASH(TransactorPublicKey) || HASH(VotingPublicKey) || VotingSignatureBlockHeight ) - hashedData := sha256.Sum256(data) - return hashedData[:] + return hashedTransactorPublicKey[:] } // @@ -2125,9 +2163,8 @@ const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" const RuleErrorValidatorMissingVotingPublicKey RuleError = "RuleErrorValidatorMissingVotingPublicKey" const RuleErrorValidatorMissingVotingPublicKeySignature RuleError = "RuleErrorValidatorMissingVotingPublicKeySignature" const RuleErrorValidatorInvalidVotingPublicKeySignature RuleError = "RuleErrorValidatorInvalidVotingPublicKeySignature" -const RuleErrorValidatorInvalidVotingSignatureBlockHeight RuleError = "RuleErrorValidatorInvalidVotingSignatureBlockHeight" const RuleErrorValidatorDisablingExistingDelegatedStakers RuleError = "RuleErrorValidatorDisablingExistingDelegatedStakers" const RuleErrorUnjailingNonjailedValidator RuleError = "RuleErrorUnjailingNonjailedValidator" const RuleErrorUnjailingValidatorTooEarly RuleError = "RuleErrorUnjailingValidatorTooEarly" -const MaxValidatorNumDomains int = 12 +const MaxValidatorNumDomains int = 100 diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 6cfe055a3..96126d100 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -5,12 +5,13 @@ package lib import ( "errors" "fmt" + "math" + "testing" + "github.com/btcsuite/btcd/btcec" "github.com/deso-protocol/core/bls" "github.com/holiman/uint256" "github.com/stretchr/testify/require" - "math" - "testing" ) func TestValidatorRegistration(t *testing.T) { @@ -93,11 +94,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) @@ -162,57 +162,12 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingPublicKey) } - { - // RuleErrorValidatorInvalidVotingSignatureBlockHeight: missing - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) - registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - } - _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) - require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignatureBlockHeight) - } - { - // RuleErrorValidatorInvalidVotingSignatureBlockHeight: too low - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) - registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight - 1, - } - _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) - require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignatureBlockHeight) - } - { - // RuleErrorValidatorInvalidVotingSignatureBlockHeight: too high - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) - registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight + params.ValidatorVotingSignatureBlockHeightWindow + 1, - } - _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) - require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignatureBlockHeight) - } { // RuleErrorValidatorMissingVotingPublicKeySignature votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, @@ -224,10 +179,9 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid TransactorPkBytes votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, @@ -240,25 +194,9 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) _, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, - } - _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) - require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingPublicKeySignature) - } - { - // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid VotingSignatureBlockHeight - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) - registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight + 1, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, @@ -270,11 +208,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // Happy path: register a validator votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -311,11 +248,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // Happy path: update a validator votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -677,10 +613,9 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { // Perform a RegisterAsValidator txn. No error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } err = _submitValidatorTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, @@ -744,10 +679,9 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { // Perform a RegisterAsValidator txn. Error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } err = _submitValidatorTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, @@ -845,10 +779,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -864,10 +797,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // m1 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m1.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -881,10 +813,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // m2 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m2PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m2.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m2.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m2Pub, m2Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1058,11 +989,10 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m0's ValidatorEntry in the db with TotalStake = 100 nanos. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) validatorEntry := &ValidatorEntry{ - ValidatorPKID: m0PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + ValidatorPKID: m0PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } utxoView._setValidatorEntryMappings(validatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1074,17 +1004,16 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) // Verify m0 is not stored in the UtxoView. - require.Empty(t, utxoView.ValidatorMapKeyToValidatorEntry) + require.Empty(t, utxoView.ValidatorPKIDToValidatorEntry) // Store m1's jailed ValidatorEntry in the db with TotalStake = 400 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) validatorEntry = &ValidatorEntry{ - ValidatorPKID: m1PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, - JailedAtEpochNumber: 1, + ValidatorPKID: m1PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(validatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1099,11 +1028,10 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m2's ValidatorEntry in the db with TotalStake = 300 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m2PkBytes, blockHeight) m2ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m2PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + ValidatorPKID: m2PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } utxoView._setValidatorEntryMappings(m2ValidatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1117,11 +1045,10 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m3's ValidatorEntry in the db with TotalStake = 600 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m3PkBytes, blockHeight) m3ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m3PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + ValidatorPKID: m3PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } utxoView._setValidatorEntryMappings(m3ValidatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1138,11 +1065,11 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.NotNil(t, validatorEntry) // Verify m2 is also stored in the UtxoView. - require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 1) - require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m2ValidatorEntry.ToMapKey()].ValidatorPKID, m2PKID) + require.Len(t, utxoView.ValidatorPKIDToValidatorEntry, 1) + require.Equal(t, utxoView.ValidatorPKIDToValidatorEntry[*m2ValidatorEntry.ValidatorPKID].ValidatorPKID, m2PKID) require.Equal( t, - utxoView.ValidatorMapKeyToValidatorEntry[m2ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, + utxoView.ValidatorPKIDToValidatorEntry[*m2ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300), ) @@ -1150,17 +1077,16 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { utxoView._deleteValidatorEntryMappings(m3ValidatorEntry) // Verify m3 is stored in the UtxoView with isDeleted=true. - require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m3ValidatorEntry.ToMapKey()].ValidatorPKID, m3PKID) - require.True(t, utxoView.ValidatorMapKeyToValidatorEntry[m3ValidatorEntry.ToMapKey()].isDeleted) + require.Equal(t, utxoView.ValidatorPKIDToValidatorEntry[*m3ValidatorEntry.ValidatorPKID].ValidatorPKID, m3PKID) + require.True(t, utxoView.ValidatorPKIDToValidatorEntry[*m3ValidatorEntry.ValidatorPKID].isDeleted) // Store m4's ValidatorEntry in the UtxoView with TotalStake = 50 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m4PkBytes, blockHeight) m4ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m4PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + ValidatorPKID: m4PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } utxoView._setValidatorEntryMappings(m4ValidatorEntry) @@ -1170,23 +1096,22 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Nil(t, validatorEntry) // Verify m4 is stored in the UtxoView. - require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 3) - require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m4ValidatorEntry.ToMapKey()].ValidatorPKID, m4PKID) + require.Len(t, utxoView.ValidatorPKIDToValidatorEntry, 3) + require.Equal(t, utxoView.ValidatorPKIDToValidatorEntry[*m4ValidatorEntry.ValidatorPKID].ValidatorPKID, m4PKID) require.Equal( t, - utxoView.ValidatorMapKeyToValidatorEntry[m4ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, + utxoView.ValidatorPKIDToValidatorEntry[*m4ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50), ) // Store m5's jailed ValidatorEntry in the UtxoView with TotalStake = 500 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m5PkBytes, blockHeight) m5ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m5PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, - JailedAtEpochNumber: 1, + ValidatorPKID: m5PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(m5ValidatorEntry) @@ -1196,15 +1121,15 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Nil(t, validatorEntry) // Verify m5 is stored in the UtxoView. - require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 4) - require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m5ValidatorEntry.ToMapKey()].ValidatorPKID, m5PKID) + require.Len(t, utxoView.ValidatorPKIDToValidatorEntry, 4) + require.Equal(t, utxoView.ValidatorPKIDToValidatorEntry[*m5ValidatorEntry.ValidatorPKID].ValidatorPKID, m5PKID) require.Equal( t, - utxoView.ValidatorMapKeyToValidatorEntry[m5ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, + utxoView.ValidatorPKIDToValidatorEntry[*m5ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, uint256.NewInt().SetUint64(500), ) require.Equal( - t, utxoView.ValidatorMapKeyToValidatorEntry[m5ValidatorEntry.ToMapKey()].Status(), ValidatorStatusJailed, + t, utxoView.ValidatorPKIDToValidatorEntry[*m5ValidatorEntry.ValidatorPKID].Status(), ValidatorStatusJailed, ) // Fetch TopActiveValidatorsByStake merging ValidatorEntries from the db and UtxoView. @@ -1298,11 +1223,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 registers as a validator with DisableDelegatedStake = FALSE. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1320,11 +1244,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 updates DisableDelegatedStake = TRUE. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: true, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: true, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1366,11 +1289,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 updates DisableDelegatedStake = FALSE. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1399,11 +1321,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 tries to update DisableDelegateStake = TRUE. Errors. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: true, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: true, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) @@ -1500,10 +1421,9 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1707,10 +1627,9 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -1733,8 +1652,8 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // trying to unjail himself, but he was never jailed. // Delete m0's ValidatorEntry from the UtxoView. - delete(mempool.universalUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) - delete(mempool.readOnlyUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) + delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) // Set JailedAtEpochNumber. validatorEntry.JailedAtEpochNumber = currentEpochNumber @@ -2013,10 +1932,9 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // sender registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, senderPkString, senderPrivString, registerMetadata, nil, true) require.NoError(t, err) @@ -2031,8 +1949,8 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // trying to unjail himself, but he was never jailed. // Delete sender's ValidatorEntry from the UtxoView. - delete(mempool.universalUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) - delete(mempool.readOnlyUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) + delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) // Set JailedAtEpochNumber. validatorEntry.JailedAtEpochNumber = currentEpochNumber @@ -2161,7 +2079,7 @@ func _generateVotingPublicKeyAndSignature(t *testing.T, transactorPkBytes []byte blsPrivateKey, err := bls.NewPrivateKey() require.NoError(t, err) votingPublicKey := blsPrivateKey.PublicKey() - signaturePayload := CreateValidatorVotingSignaturePayload(transactorPkBytes, votingPublicKey, blockHeight) + signaturePayload := CreateValidatorVotingSignaturePayload(transactorPkBytes) votingSignature, err := blsPrivateKey.Sign(signaturePayload) require.NoError(t, err) return votingPublicKey, votingSignature diff --git a/lib/constants.go b/lib/constants.go index 4530999cd..3ebccda33 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -3,7 +3,6 @@ package lib import ( "encoding/hex" "fmt" - "github.com/pkg/errors" "log" "math" "math/big" @@ -14,6 +13,8 @@ import ( "sort" "time" + "github.com/pkg/errors" + "github.com/holiman/uint256" "github.com/btcsuite/btcd/chaincfg" @@ -606,12 +607,6 @@ type DeSoParams struct { // TODO: Move this to GlobalParamsEntry. ValidatorJailEpochDuration uint64 - // When registering, a validator must include a VotingSignatureBlockHeight within CurrentBlockHeight - // to CurrentBlockHeight + ValidatorVotingSignatureBlockHeightWindow blocks. This is to prevent - // validator registration replay attacks. - // TODO: Move this to GlobalParamsEntry. - ValidatorVotingSignatureBlockHeightWindow uint64 - ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -990,11 +985,6 @@ var DeSoMainnetParams = DeSoParams{ // Jailed validators can be unjailed after a minimum of N elapsed epochs. ValidatorJailEpochDuration: uint64(3), - // When registering, a validator must include a VotingSignatureBlockHeight within CurrentBlockHeight - // to CurrentBlockHeight + ValidatorVotingSignatureBlockHeightWindow blocks. This is to prevent - // validator registration replay attacks. - ValidatorVotingSignatureBlockHeightWindow: uint64(300), - ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1229,11 +1219,6 @@ var DeSoTestnetParams = DeSoParams{ // Jailed validators can be unjailed after a minimum of N elapsed epochs. ValidatorJailEpochDuration: uint64(3), - // When registering, a validator must include a VotingSignatureBlockHeight within CurrentBlockHeight - // to CurrentBlockHeight + ValidatorVotingSignatureBlockHeightWindow blocks. This is to prevent - // validator registration replay attacks. - ValidatorVotingSignatureBlockHeightWindow: uint64(300), - ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/db_utils.go b/lib/db_utils.go index cb3892d59..2272501ca 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -7,7 +7,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/holiman/uint256" "io" "log" "math" @@ -18,6 +17,8 @@ import ( "strings" "time" + "github.com/holiman/uint256" + "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" "github.com/dgraph-io/badger/v3" @@ -481,26 +482,44 @@ type DBPrefixes struct { PrefixValidatorByPKID []byte `prefix_id:"[78]" is_state:"true"` // PrefixValidatorByStake: Retrieve the top N validators by stake. - // Prefix, TotalStakeAmountNanos, MaxUint64 - RegisteredAtBlockHeight, ValidatorPKID -> ValidatorPKID - // FIXME: @DH, should we duplicate the ValidatorPKID in the key and the value? - // Alternatively, we could just store and parse the ValidatorPKID from the key - // and store a struct{} as the value. That saves on space, but makes retrieving - // the ValidatorPKID from the key bytes more complex than just reading the value - // bytes directly since the key includes other preceding fields. Interesting - // trade-off. Curious your opinion. + // Prefix, TotalStakeAmountNanos, ValidatorPKID -> nil + // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. PrefixValidatorByStake []byte `prefix_id:"[79]" is_state:"true"` // PrefixGlobalStakeAmountNanos: Retrieve the cumulative stake across all validators. // Prefix -> *uint256.Int PrefixGlobalStakeAmountNanos []byte `prefix_id:"[80]" is_state:"true"` - // PrefixStakeByValidatorByStaker: Retrieve a StakeEntry. + // PrefixStakeByValidatorAndStaker: Retrieve a StakeEntry. // Prefix, ValidatorPKID, StakerPKID -> StakeEntry - PrefixStakeByValidatorByStaker []byte `prefix_id:"[81]" is_state:"true"` + PrefixStakeByValidatorAndStaker []byte `prefix_id:"[81]" is_state:"true"` - // PrefixLockedStakeByValidatorByStakerByLockedAt: Retrieve a LockedStakeEntry. + // PrefixLockedStakeByValidatorAndStakerAndLockedAt: Retrieve a LockedStakeEntry. // Prefix, ValidatorPKID, StakerPKID, LockedAtEpochNumber -> LockedStakeEntry - PrefixLockedStakeByValidatorByStakerByLockedAt []byte `prefix_id:"[82]" is_state:"true"` + // + // The way staking works is that staking to a validator is instant and creates a StakeEntry + // immediately, but UNstaking from a validator has a "cooldown" period before the funds + // are returned to the user. This cooldown period is implemented in Unstake by decrementing + // from the StakeEntry and creating a new LockedStakeEntry with the amount being unstaked. + // the LockedStakeEntry has a LockedAtEpochNumber indicating when the Unstake occurred. This + // allows the user to then call a *second* Unlock txn to pull the LockedStake into their + // wallet balance after enough epochs have passed since LockedAtEpochNumber. + // + // Below is an example: + // - User stakes 100 DESO to a validator. A StakeEntry is created containing 100 DESO. + // - User unstakes 25 DESO at epoch 123. The StakeEntry is decremented to 75 DESO and a + // LockedStakeEntry is created containing: + // * + // - Suppose the cooldown period is 3 epochs. If the user tries to call UnlockStake at + // epoch 124, for example, which is one epoch after they called Unstake, the call will + // fail because (CurrentEpoch - LockedAtEpockNumber) = 124 - 123 = 1, which is less + // than cooldown = 3. + // - After 3 epochs have passed, however, the UnlockStake transaction will work. For + // example, suppose the user calls UnlockStake at spoch 133. Now, we have + // (CurrentEpoch - LockedAtEpochNumber) = 133 - 123 = 10, which is greater than + // cooldown=3. Thus the UnlockStake will succeed, which will result in the + // LockedStakeEntry being deleted and 25 DESO being added to the user's balance. + PrefixLockedStakeByValidatorAndStakerAndLockedAt []byte `prefix_id:"[82]" is_state:"true"` // PrefixCurrentEpoch: Retrieve the current EpochEntry. // Prefix -> EpochEntry @@ -713,14 +732,14 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco return true, &ValidatorEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStake) { // prefix_id:"[79]" - return true, &PKID{} + return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixGlobalStakeAmountNanos) { // prefix_id:"[80]" return false, nil - } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorByStaker) { + } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorAndStaker) { // prefix_id:"[81]" return true, &StakeEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixLockedStakeByValidatorByStakerByLockedAt) { + } else if bytes.Equal(prefix, Prefixes.PrefixLockedStakeByValidatorAndStakerAndLockedAt) { // prefix_id:"[82]" return true, &LockedStakeEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixCurrentEpoch) { @@ -5873,7 +5892,7 @@ func (txnMeta *DAOCoinTransferTxindexMetadata) RawEncodeWithoutMetadata(blockHei var data []byte data = append(data, EncodeByteArray([]byte(txnMeta.CreatorUsername))...) - data = append(data, EncodeUint256(&txnMeta.DAOCoinToTransferNanos)...) + data = append(data, VariableEncodeUint256(&txnMeta.DAOCoinToTransferNanos)...) return data } @@ -5886,7 +5905,7 @@ func (txnMeta *DAOCoinTransferTxindexMetadata) RawDecodeWithoutMetadata(blockHei } txnMeta.CreatorUsername = string(creatorUsernameBytes) - DAOCoinToTransferNanos, err := DecodeUint256(rr) + DAOCoinToTransferNanos, err := VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "DAOCoinTransferTxindexMetadata.Decode: Problem reading DAOCoinToTransferNanos") } @@ -5916,8 +5935,8 @@ func (txnMeta *DAOCoinTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint data = append(data, EncodeByteArray([]byte(txnMeta.CreatorUsername))...) data = append(data, EncodeByteArray([]byte(txnMeta.OperationType))...) - data = append(data, EncodeUint256(txnMeta.CoinsToMintNanos)...) - data = append(data, EncodeUint256(txnMeta.CoinsToBurnNanos)...) + data = append(data, VariableEncodeUint256(txnMeta.CoinsToMintNanos)...) + data = append(data, VariableEncodeUint256(txnMeta.CoinsToBurnNanos)...) data = append(data, EncodeByteArray([]byte(txnMeta.TransferRestrictionStatus))...) return data @@ -5938,12 +5957,12 @@ func (txnMeta *DAOCoinTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint } txnMeta.OperationType = string(operationTypeBytes) - txnMeta.CoinsToMintNanos, err = DecodeUint256(rr) + txnMeta.CoinsToMintNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "DAOCoinTxindexMetadata.Decode: problem reading CoinsToMintNanos") } - txnMeta.CoinsToBurnNanos, err = DecodeUint256(rr) + txnMeta.CoinsToBurnNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "DAOCoinTxindexMetadata.Decode: problem reading CoinsToBurnNanos") } @@ -5980,8 +5999,8 @@ func (orderMeta *FilledDAOCoinLimitOrderMetadata) RawEncodeWithoutMetadata(block data = append(data, EncodeByteArray([]byte(orderMeta.TransactorPublicKeyBase58Check))...) data = append(data, EncodeByteArray([]byte(orderMeta.BuyingDAOCoinCreatorPublicKey))...) data = append(data, EncodeByteArray([]byte(orderMeta.SellingDAOCoinCreatorPublicKey))...) - data = append(data, EncodeUint256(orderMeta.CoinQuantityInBaseUnitsBought)...) - data = append(data, EncodeUint256(orderMeta.CoinQuantityInBaseUnitsSold)...) + data = append(data, VariableEncodeUint256(orderMeta.CoinQuantityInBaseUnitsBought)...) + data = append(data, VariableEncodeUint256(orderMeta.CoinQuantityInBaseUnitsSold)...) data = append(data, BoolToByte(orderMeta.IsFulfilled)) return data @@ -6011,13 +6030,13 @@ func (orderMeta *FilledDAOCoinLimitOrderMetadata) RawDecodeWithoutMetadata(block orderMeta.SellingDAOCoinCreatorPublicKey = string(sellingDAOCoinCreatorPublicKey) // CoinQuantityInBaseUnitsBought - orderMeta.CoinQuantityInBaseUnitsBought, err = DecodeUint256(rr) + orderMeta.CoinQuantityInBaseUnitsBought, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "FilledDAOCoinLimitOrderMetadata.Decode: Problem reading CoinQuantityInBaseUnitsBought") } // CoinQuantityInBaseUnitsSold - orderMeta.CoinQuantityInBaseUnitsSold, err = DecodeUint256(rr) + orderMeta.CoinQuantityInBaseUnitsSold, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "FilledDAOCoinLimitOrderMetadata.Decode: Problem reading CoinQuantityInBaseUnitsSold") } @@ -6050,8 +6069,8 @@ func (daoMeta *DAOCoinLimitOrderTxindexMetadata) RawEncodeWithoutMetadata(blockH data = append(data, EncodeByteArray([]byte(daoMeta.BuyingDAOCoinCreatorPublicKey))...) data = append(data, EncodeByteArray([]byte(daoMeta.SellingDAOCoinCreatorPublicKey))...) - data = append(data, EncodeUint256(daoMeta.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) - data = append(data, EncodeUint256(daoMeta.QuantityToFillInBaseUnits)...) + data = append(data, VariableEncodeUint256(daoMeta.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) + data = append(data, VariableEncodeUint256(daoMeta.QuantityToFillInBaseUnits)...) data = append(data, UintToBuf(uint64(len(daoMeta.FilledDAOCoinLimitOrdersMetadata)))...) for _, order := range daoMeta.FilledDAOCoinLimitOrdersMetadata { @@ -6074,12 +6093,12 @@ func (daoMeta *DAOCoinLimitOrderTxindexMetadata) RawDecodeWithoutMetadata(blockH } daoMeta.SellingDAOCoinCreatorPublicKey = string(sellingDAOCoinCreatorPublicKey) - daoMeta.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = DecodeUint256(rr) + daoMeta.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "DAOCoinLimitOrderTxindexMetadata.Decode: Problem reading ScaledExchangeRateCoinsToSellPerCoinToBuy") } - daoMeta.QuantityToFillInBaseUnits, err = DecodeUint256(rr) + daoMeta.QuantityToFillInBaseUnits, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "DAOCoinLimitOrderTxindexMetadata.Decode: Problem reading QuantityToFillInBaseUnits") } @@ -9323,7 +9342,7 @@ func DBGetPaginatedProfilesByDeSoLocked( func DBKeyForDAOCoinLimitOrder(order *DAOCoinLimitOrderEntry) []byte { key := DBPrefixKeyForDAOCoinLimitOrder(order) - key = append(key, EncodeUint256(order.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) + key = append(key, VariableEncodeUint256(order.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) // Store MaxUint32 - block height to guarantee FIFO // orders as we seek in reverse order. key = append(key, _EncodeUint32(math.MaxUint32-order.BlockHeight)...) diff --git a/lib/network.go b/lib/network.go index b740c0faa..0ce83d3e4 100644 --- a/lib/network.go +++ b/lib/network.go @@ -8,7 +8,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/decred/dcrd/dcrec/secp256k1/v4" "io" "math" "math/big" @@ -18,6 +17,8 @@ import ( "strings" "time" + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" @@ -5967,12 +5968,13 @@ func (tsl *TransactionSpendingLimit) ToBytes(blockHeight uint64) ([]byte, error) for key := range tsl.StakeLimitMap { keys = append(keys, key) } + // Sort the keys to ensure deterministic ordering. sort.Slice(keys, func(ii, jj int) bool { return hex.EncodeToString(keys[ii].Encode()) < hex.EncodeToString(keys[jj].Encode()) }) for _, key := range keys { data = append(data, key.Encode()...) - data = append(data, EncodeUint256(tsl.StakeLimitMap[key])...) + data = append(data, VariableEncodeUint256(tsl.StakeLimitMap[key])...) } } @@ -5987,12 +5989,13 @@ func (tsl *TransactionSpendingLimit) ToBytes(blockHeight uint64) ([]byte, error) for key := range tsl.UnstakeLimitMap { keys = append(keys, key) } + // Sort the keys to ensure deterministic ordering. sort.Slice(keys, func(ii, jj int) bool { return hex.EncodeToString(keys[ii].Encode()) < hex.EncodeToString(keys[jj].Encode()) }) for _, key := range keys { data = append(data, key.Encode()...) - data = append(data, EncodeUint256(tsl.UnstakeLimitMap[key])...) + data = append(data, VariableEncodeUint256(tsl.UnstakeLimitMap[key])...) } } @@ -6007,6 +6010,7 @@ func (tsl *TransactionSpendingLimit) ToBytes(blockHeight uint64) ([]byte, error) for key := range tsl.UnlockStakeLimitMap { keys = append(keys, key) } + // Sort the keys to ensure deterministic ordering. sort.Slice(keys, func(ii, jj int) bool { return hex.EncodeToString(keys[ii].Encode()) < hex.EncodeToString(keys[jj].Encode()) }) @@ -6241,7 +6245,7 @@ func (tsl *TransactionSpendingLimit) FromBytes(blockHeight uint64, rr *bytes.Rea return errors.Wrap(err, "Error decoding StakeLimitKey: ") } var stakeLimitDESONanos *uint256.Int - stakeLimitDESONanos, err = DecodeUint256(rr) + stakeLimitDESONanos, err = VariableDecodeUint256(rr) if err != nil { return err } @@ -6265,7 +6269,7 @@ func (tsl *TransactionSpendingLimit) FromBytes(blockHeight uint64, rr *bytes.Rea return errors.Wrap(err, "Error decoding StakeLimitKey: ") } var unstakeLimitDESONanos *uint256.Int - unstakeLimitDESONanos, err = DecodeUint256(rr) + unstakeLimitDESONanos, err = VariableDecodeUint256(rr) if err != nil { return err } @@ -7501,8 +7505,8 @@ func (txnData *DAOCoinLimitOrderMetadata) GetTxnType() TxnType { func (txnData *DAOCoinLimitOrderMetadata) ToBytes(preSignature bool) ([]byte, error) { data := append([]byte{}, EncodeOptionalPublicKey(txnData.BuyingDAOCoinCreatorPublicKey)...) data = append(data, EncodeOptionalPublicKey(txnData.SellingDAOCoinCreatorPublicKey)...) - data = append(data, EncodeOptionalUint256(txnData.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) - data = append(data, EncodeOptionalUint256(txnData.QuantityToFillInBaseUnits)...) + data = append(data, FixedWidthEncodeUint256(txnData.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) + data = append(data, FixedWidthEncodeUint256(txnData.QuantityToFillInBaseUnits)...) data = append(data, UintToBuf(uint64(txnData.OperationType))...) data = append(data, UintToBuf(uint64(txnData.FillType))...) data = append(data, EncodeOptionalBlockHash(txnData.CancelOrderID)...) @@ -7549,13 +7553,13 @@ func (txnData *DAOCoinLimitOrderMetadata) FromBytes(data []byte) error { } // Parse ScaledExchangeRateCoinsToSellPerCoinToBuy - ret.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = ReadOptionalUint256(rr) + ret.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = FixedWidthDecodeUint256(rr) if err != nil { return fmt.Errorf("DAOCoinLimitOrderMetadata.FromBytes: Error reading ScaledPrice: %v", err) } // Parse QuantityToFillInBaseUnits - ret.QuantityToFillInBaseUnits, err = ReadOptionalUint256(rr) + ret.QuantityToFillInBaseUnits, err = FixedWidthDecodeUint256(rr) if err != nil { return fmt.Errorf("DAOCoinLimitOrderMetadata.FromBytes: Error reading QuantityToFillInBaseUnits: %v", err) } diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index c4685fea3..cb4e2c16e 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -67,7 +67,7 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { // First, check the UtxoView. epochEntry = bav.CurrentEpochEntry if epochEntry != nil { - return epochEntry.Copy(), nil + return epochEntry, nil } // If not found, check the database. @@ -77,7 +77,7 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { } if epochEntry != nil { // Cache in the UtxoView. - bav.CurrentEpochEntry = epochEntry.Copy() + bav._setCurrentEpochEntry(epochEntry) } return epochEntry, nil } diff --git a/lib/types.go b/lib/types.go index 71bb6630f..3e531f2ca 100644 --- a/lib/types.go +++ b/lib/types.go @@ -3,11 +3,12 @@ package lib import ( "bytes" "fmt" - "github.com/holiman/uint256" - "github.com/pkg/errors" "io" "reflect" "sort" + + "github.com/holiman/uint256" + "github.com/pkg/errors" ) // A PKID is an ID associated with a public key. In the DB, various fields are @@ -273,11 +274,11 @@ func ReadOptionalBlockHash(rr *bytes.Reader) (*BlockHash, error) { return nil, nil } -// EncodeOptionalUint256 guarantees fixed-width encoding which is useful -// in BadgerDB keys. It is less space-efficient than EncodeUint256, -// which should be used elsewhere. Both EncodeUint256 and -// EncodeOptionalUint256 can handle nil inputs. -func EncodeOptionalUint256(val *uint256.Int) []byte { +// FixedWidthEncodeUint256 guarantees fixed-width encoding which is useful +// in BadgerDB keys. It is less space-efficient than VariableEncodeUint256, +// which should be used elsewhere. Both VariableEncodeUint256 and +// FixedWidthEncodeUint256 can handle nil inputs. +func FixedWidthEncodeUint256(val *uint256.Int) []byte { if val == nil { return UintToBuf(uint64(0)) } @@ -287,7 +288,7 @@ func EncodeOptionalUint256(val *uint256.Int) []byte { return append(UintToBuf(uint64(len(encodedVal))), encodedVal...) } -func ReadOptionalUint256(rr *bytes.Reader) (*uint256.Int, error) { +func FixedWidthDecodeUint256(rr *bytes.Reader) (*uint256.Int, error) { byteCount, err := ReadUvarint(rr) if err != nil { return nil, err From 0cc0694c03a93340442dfe57dc4dd9e930705bef Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Fri, 19 May 2023 11:26:36 -0400 Subject: [PATCH 038/762] Merge feature/pos-txn-types into feature/proof-of-stake (#534) * [stable] Release 3.4.2 * Diamondhands/pos txn types review (#530) * POS fixes and comments from diamondhands * Mf/pos txn types review (#531) * Add comments. * Addres review comments. * Add first batch of sanity check utils. * Add second batch of txn sanity checks. * Rename encode uint256 funcs in comments. * Nuke RegisteredAtBlockHeight. * Address review feedback. * Fix a few more typos. * Update some comments --------- Co-authored-by: diamondhands --------- Co-authored-by: Matt Foley <100429827+mattfoley8@users.noreply.github.com> --------- Co-authored-by: lazynina Co-authored-by: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> --- bls/signature.go | 21 +- lib/block_view.go | 17 +- lib/block_view_profile_test.go | 7 +- lib/block_view_stake.go | 470 ++++++++++++++++++++++++----- lib/block_view_stake_test.go | 33 +-- lib/block_view_types.go | 40 +-- lib/block_view_types_test.go | 43 +-- lib/block_view_validator.go | 493 +++++++++++++++++-------------- lib/block_view_validator_test.go | 296 +++++++------------ lib/constants.go | 19 +- lib/db_utils.go | 79 +++-- lib/network.go | 22 +- lib/pos_epoch.go | 4 +- lib/types.go | 17 +- 14 files changed, 935 insertions(+), 626 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 93d6f9363..8f6cfb08e 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -7,15 +7,27 @@ import ( "crypto/rand" "encoding/hex" "errors" - flowCrypto "github.com/onflow/flow-go/crypto" "strings" + + flowCrypto "github.com/onflow/flow-go/crypto" ) +// The SigningAlgorithm for BLS keys is BLSBLS12381 which is BLS on the BLS 12-381 curve. +// This is the only supported BLS signing algorithm in the flowCrypto package. +// BLS is used such that we can aggregate signatures into one signature. const SigningAlgorithm = flowCrypto.BLSBLS12381 -// TODO: what should the domainTag param be? -var HashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-protocol") +// The HashingAlgorithm for BLS keys is the following. This algorithm is used to hash input data onto the +// BLS 12-381 curve for generating signatures. The returned instance is a Hasher and can be used to +// generate BLS signatures with the Sign() method. This is the only supported BLS Hasher in the flowCrypto +// package. The input domainTag is a separation tag that defines the protocol and its subdomain. Such tag +// should be of the format: -V-CS-with- where is the name of the protocol, +// the protocol version number, and the index of the ciphersuite in the protocol. +var HashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-V1-CS01-with-") +// AggregateSignatures takes in an input slice of bls.Signatures and aggregates them +// into a single bls.Signature. The assumption is that each of the input bls.Signatures +// were generated by different bls.PrivateKeys signing the same payload. func AggregateSignatures(signatures []*Signature) (*Signature, error) { var flowSignatures []flowCrypto.Signature for _, signature := range signatures { @@ -28,6 +40,9 @@ func AggregateSignatures(signatures []*Signature) (*Signature, error) { return &Signature{flowSignature: aggregateFlowSignature}, nil } +// VerifyAggregateSignature takes in a slice of bls.PublicKeys, a bls.Signature, and a payload and returns +// true if every bls.PublicKey in the slice signed the payload. The input bls.Signature is the aggregate +// signature of each of their respective bls.Signatures for that payload. func VerifyAggregateSignature(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { var flowPublicKeys []flowCrypto.PublicKey for _, publicKey := range publicKeys { diff --git a/lib/block_view.go b/lib/block_view.go index 0ce1a150e..83fbb069e 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4,14 +4,15 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/wire" - "github.com/holiman/uint256" "math" "math/big" "reflect" "strings" "time" + "github.com/btcsuite/btcd/wire" + "github.com/holiman/uint256" + "github.com/davecgh/go-spew/spew" "github.com/btcsuite/btcd/btcec" @@ -115,9 +116,9 @@ type UtxoView struct { TransactorNonceMapKeyToTransactorNonceEntry map[TransactorNonceMapKey]*TransactorNonceEntry // Validator mappings - ValidatorMapKeyToValidatorEntry map[ValidatorMapKey]*ValidatorEntry + ValidatorPKIDToValidatorEntry map[PKID]*ValidatorEntry - // Global stake across validators + // Global stake across all validators GlobalStakeAmountNanos *uint256.Int // Stake mappings @@ -219,7 +220,7 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { bav.TransactorNonceMapKeyToTransactorNonceEntry = make(map[TransactorNonceMapKey]*TransactorNonceEntry) // ValidatorEntries - bav.ValidatorMapKeyToValidatorEntry = make(map[ValidatorMapKey]*ValidatorEntry) + bav.ValidatorPKIDToValidatorEntry = make(map[PKID]*ValidatorEntry) // Global stake across validators. We deliberately want this to initialize to nil and not zero // since a zero value will overwrite an existing GlobalStakeAmountNanos value in the db, whereas @@ -487,9 +488,9 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { } // Copy the ValidatorEntries - newView.ValidatorMapKeyToValidatorEntry = make(map[ValidatorMapKey]*ValidatorEntry, len(bav.ValidatorMapKeyToValidatorEntry)) - for entryKey, entry := range bav.ValidatorMapKeyToValidatorEntry { - newView.ValidatorMapKeyToValidatorEntry[entryKey] = entry.Copy() + newView.ValidatorPKIDToValidatorEntry = make(map[PKID]*ValidatorEntry, len(bav.ValidatorPKIDToValidatorEntry)) + for entryKey, entry := range bav.ValidatorPKIDToValidatorEntry { + newView.ValidatorPKIDToValidatorEntry[entryKey] = entry.Copy() } // Copy the GlobalStakeAmountNanos. diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index 532520c5d..6883c23ef 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -4,14 +4,15 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/dgraph-io/badger/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "log" "os" "runtime/pprof" "testing" "time" + + "github.com/dgraph-io/badger/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func _swapIdentityWithTestMeta( diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index c22434e64..8c81416d3 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -3,19 +3,36 @@ package lib import ( "bytes" "fmt" + "sort" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "sort" ) +// Stake: Any user can assign stake to a registered validator who allows delegated stake. +// When a user stakes with a validator, they lock up $DESO from their account balance +// into a StakeEntry. As reward for staking, a user is eligible to receive a percentage +// of the block rewards attributed to the validator. Any staked $DESO is unspendable +// until the user unstakes and unlocks their stake. See below. +// +// Unstake: If a user wants to retrieve their funds from being staked with a validator, +// they must submit an Unstake transaction. This deletes or updates their existing +// StakeEntry and creates or updates a LockedStakeEntry. Unstaked stake is not immediately +// withdrawalable and usable. It is locked for a period of time as determined by a consensus +// parameter. This is to prevent byzantine users from trying to game block rewards or +// leader schedules. +// +// UnlockStake: Once sufficient time has elapsed since unstaking their funds, a user can +// submit an UnlockStake transaction to retrieve their funds. Any eligible funds are +// unlocked and returned to the user's account balance. + // // TYPES: StakeEntry // type StakeEntry struct { - StakeID *BlockHash StakerPKID *PKID ValidatorPKID *PKID StakeAmountNanos *uint256.Int @@ -30,7 +47,6 @@ type StakeMapKey struct { func (stakeEntry *StakeEntry) Copy() *StakeEntry { return &StakeEntry{ - StakeID: stakeEntry.StakeID.NewBlockHash(), StakerPKID: stakeEntry.StakerPKID.NewPKID(), ValidatorPKID: stakeEntry.ValidatorPKID.NewPKID(), StakeAmountNanos: stakeEntry.StakeAmountNanos.Clone(), @@ -40,7 +56,7 @@ func (stakeEntry *StakeEntry) Copy() *StakeEntry { } func (stakeEntry *StakeEntry) Eq(other *StakeEntry) bool { - return stakeEntry.StakeID.IsEqual(other.StakeID) + return stakeEntry.ToMapKey() == other.ToMapKey() } func (stakeEntry *StakeEntry) ToMapKey() StakeMapKey { @@ -52,10 +68,9 @@ func (stakeEntry *StakeEntry) ToMapKey() StakeMapKey { func (stakeEntry *StakeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte - data = append(data, EncodeToBytes(blockHeight, stakeEntry.StakeID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, stakeEntry.StakerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, stakeEntry.ValidatorPKID, skipMetadata...)...) - data = append(data, EncodeUint256(stakeEntry.StakeAmountNanos)...) + data = append(data, VariableEncodeUint256(stakeEntry.StakeAmountNanos)...) data = append(data, EncodeExtraData(stakeEntry.ExtraData)...) return data } @@ -63,12 +78,6 @@ func (stakeEntry *StakeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipM func (stakeEntry *StakeEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { var err error - // StakeID - stakeEntry.StakeID, err = DecodeDeSoEncoder(&BlockHash{}, rr) - if err != nil { - return errors.Wrapf(err, "StakeEntry.Decode: Problem reading StakeID: ") - } - // StakerPKID stakeEntry.StakerPKID, err = DecodeDeSoEncoder(&PKID{}, rr) if err != nil { @@ -82,7 +91,7 @@ func (stakeEntry *StakeEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *b } // StakeAmountNanos - stakeEntry.StakeAmountNanos, err = DecodeUint256(rr) + stakeEntry.StakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "StakeEntry.Decode: Problem reading StakeAmountNanos: ") } @@ -109,7 +118,6 @@ func (stakeEntry *StakeEntry) GetEncoderType() EncoderType { // type LockedStakeEntry struct { - LockedStakeID *BlockHash StakerPKID *PKID ValidatorPKID *PKID LockedAmountNanos *uint256.Int @@ -126,7 +134,6 @@ type LockedStakeMapKey struct { func (lockedStakeEntry *LockedStakeEntry) Copy() *LockedStakeEntry { return &LockedStakeEntry{ - LockedStakeID: lockedStakeEntry.LockedStakeID.NewBlockHash(), StakerPKID: lockedStakeEntry.StakerPKID.NewPKID(), ValidatorPKID: lockedStakeEntry.ValidatorPKID.NewPKID(), LockedAmountNanos: lockedStakeEntry.LockedAmountNanos.Clone(), @@ -137,7 +144,7 @@ func (lockedStakeEntry *LockedStakeEntry) Copy() *LockedStakeEntry { } func (lockedStakeEntry *LockedStakeEntry) Eq(other *LockedStakeEntry) bool { - return lockedStakeEntry.LockedStakeID.IsEqual(other.LockedStakeID) + return lockedStakeEntry.ToMapKey() == other.ToMapKey() } func (lockedStakeEntry *LockedStakeEntry) ToMapKey() LockedStakeMapKey { @@ -150,10 +157,9 @@ func (lockedStakeEntry *LockedStakeEntry) ToMapKey() LockedStakeMapKey { func (lockedStakeEntry *LockedStakeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte - data = append(data, EncodeToBytes(blockHeight, lockedStakeEntry.LockedStakeID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, lockedStakeEntry.StakerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, lockedStakeEntry.ValidatorPKID, skipMetadata...)...) - data = append(data, EncodeUint256(lockedStakeEntry.LockedAmountNanos)...) + data = append(data, VariableEncodeUint256(lockedStakeEntry.LockedAmountNanos)...) data = append(data, UintToBuf(lockedStakeEntry.LockedAtEpochNumber)...) data = append(data, EncodeExtraData(lockedStakeEntry.ExtraData)...) return data @@ -162,12 +168,6 @@ func (lockedStakeEntry *LockedStakeEntry) RawEncodeWithoutMetadata(blockHeight u func (lockedStakeEntry *LockedStakeEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { var err error - // LockedStakeID - lockedStakeEntry.LockedStakeID, err = DecodeDeSoEncoder(&BlockHash{}, rr) - if err != nil { - return errors.Wrapf(err, "LockedStakeEntry.Decode: Problem reading LockedStakeID: ") - } - // StakerPKID lockedStakeEntry.StakerPKID, err = DecodeDeSoEncoder(&PKID{}, rr) if err != nil { @@ -181,7 +181,7 @@ func (lockedStakeEntry *LockedStakeEntry) RawDecodeWithoutMetadata(blockHeight u } // LockedAmountNanos - lockedStakeEntry.LockedAmountNanos, err = DecodeUint256(rr) + lockedStakeEntry.LockedAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "LockedStakeEntry.Decode: Problem reading LockedAmountNanos: ") } @@ -225,7 +225,7 @@ func (txnData *StakeMetadata) GetTxnType() TxnType { func (txnData *StakeMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte data = append(data, EncodeByteArray(txnData.ValidatorPublicKey.ToBytes())...) - data = append(data, EncodeUint256(txnData.StakeAmountNanos)...) + data = append(data, VariableEncodeUint256(txnData.StakeAmountNanos)...) return data, nil } @@ -240,7 +240,7 @@ func (txnData *StakeMetadata) FromBytes(data []byte) error { txnData.ValidatorPublicKey = NewPublicKey(validatorPublicKeyBytes) // StakeAmountNanos - txnData.StakeAmountNanos, err = DecodeUint256(rr) + txnData.StakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "StakeMetadata.FromBytes: Problem reading StakeAmountNanos: ") } @@ -268,7 +268,7 @@ func (txnData *UnstakeMetadata) GetTxnType() TxnType { func (txnData *UnstakeMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte data = append(data, EncodeByteArray(txnData.ValidatorPublicKey.ToBytes())...) - data = append(data, EncodeUint256(txnData.UnstakeAmountNanos)...) + data = append(data, VariableEncodeUint256(txnData.UnstakeAmountNanos)...) return data, nil } @@ -283,7 +283,7 @@ func (txnData *UnstakeMetadata) FromBytes(data []byte) error { txnData.ValidatorPublicKey = NewPublicKey(validatorPublicKeyBytes) // UnstakeAmountNanos - txnData.UnstakeAmountNanos, err = DecodeUint256(rr) + txnData.UnstakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "UnstakeMetadata.FromBytes: Problem reading UnstakeAmountNanos: ") } @@ -360,7 +360,7 @@ func (txindexMetadata *StakeTxindexMetadata) RawEncodeWithoutMetadata(blockHeigh var data []byte data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) - data = append(data, EncodeUint256(txindexMetadata.StakeAmountNanos)...) + data = append(data, VariableEncodeUint256(txindexMetadata.StakeAmountNanos)...) return data } @@ -382,7 +382,7 @@ func (txindexMetadata *StakeTxindexMetadata) RawDecodeWithoutMetadata(blockHeigh txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) // StakeAmountNanos - txindexMetadata.StakeAmountNanos, err = DecodeUint256(rr) + txindexMetadata.StakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "StakeTxindexMetadata.Decode: Problem reading StakeAmountNanos: ") } @@ -412,7 +412,7 @@ func (txindexMetadata *UnstakeTxindexMetadata) RawEncodeWithoutMetadata(blockHei var data []byte data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) - data = append(data, EncodeUint256(txindexMetadata.UnstakeAmountNanos)...) + data = append(data, VariableEncodeUint256(txindexMetadata.UnstakeAmountNanos)...) return data } @@ -434,7 +434,7 @@ func (txindexMetadata *UnstakeTxindexMetadata) RawDecodeWithoutMetadata(blockHei txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) // UnstakeAmountNanos - txindexMetadata.UnstakeAmountNanos, err = DecodeUint256(rr) + txindexMetadata.UnstakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "UnstakeTxindexMetadata.Decode: Problem reading UnstakeAmountNanos: ") } @@ -468,7 +468,7 @@ func (txindexMetadata *UnlockStakeTxindexMetadata) RawEncodeWithoutMetadata(bloc data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) data = append(data, UintToBuf(txindexMetadata.StartEpochNumber)...) data = append(data, UintToBuf(txindexMetadata.EndEpochNumber)...) - data = append(data, EncodeUint256(txindexMetadata.TotalUnlockedAmountNanos)...) + data = append(data, VariableEncodeUint256(txindexMetadata.TotalUnlockedAmountNanos)...) return data } @@ -502,7 +502,7 @@ func (txindexMetadata *UnlockStakeTxindexMetadata) RawDecodeWithoutMetadata(bloc } // TotalUnlockedAmountNanos - txindexMetadata.TotalUnlockedAmountNanos, err = DecodeUint256(rr) + txindexMetadata.TotalUnlockedAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "UnlockStakeTxindexMetadata.Decode: Problem reading TotalUnlockedAmountNanos: ") } @@ -522,26 +522,26 @@ func (txindexMetadata *UnlockStakeTxindexMetadata) GetEncoderType() EncoderType // DB UTILS // -func DBKeyForStakeByValidatorByStaker(stakeEntry *StakeEntry) []byte { +func DBKeyForStakeByValidatorAndStaker(stakeEntry *StakeEntry) []byte { data := DBKeyForStakeByValidator(stakeEntry) data = append(data, stakeEntry.StakerPKID.ToBytes()...) return data } func DBKeyForStakeByValidator(stakeEntry *StakeEntry) []byte { - data := append([]byte{}, Prefixes.PrefixStakeByValidatorByStaker...) + data := append([]byte{}, Prefixes.PrefixStakeByValidatorAndStaker...) data = append(data, stakeEntry.ValidatorPKID.ToBytes()...) return data } -func DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry *LockedStakeEntry) []byte { - data := DBPrefixKeyForLockedStakeByValidatorByStaker(lockedStakeEntry) +func DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry *LockedStakeEntry) []byte { + data := DBPrefixKeyForLockedStakeByValidatorAndStaker(lockedStakeEntry) data = append(data, UintToBuf(lockedStakeEntry.LockedAtEpochNumber)...) return data } -func DBPrefixKeyForLockedStakeByValidatorByStaker(lockedStakeEntry *LockedStakeEntry) []byte { - data := append([]byte{}, Prefixes.PrefixLockedStakeByValidatorByStakerByLockedAt...) +func DBPrefixKeyForLockedStakeByValidatorAndStaker(lockedStakeEntry *LockedStakeEntry) []byte { + data := append([]byte{}, Prefixes.PrefixLockedStakeByValidatorAndStakerAndLockedAt...) data = append(data, lockedStakeEntry.ValidatorPKID.ToBytes()...) data = append(data, lockedStakeEntry.StakerPKID.ToBytes()...) return data @@ -569,7 +569,7 @@ func DBGetStakeEntryWithTxn( stakerPKID *PKID, ) (*StakeEntry, error) { // Retrieve StakeEntry from db. - key := DBKeyForStakeByValidatorByStaker(&StakeEntry{ValidatorPKID: validatorPKID, StakerPKID: stakerPKID}) + key := DBKeyForStakeByValidatorAndStaker(&StakeEntry{ValidatorPKID: validatorPKID, StakerPKID: stakerPKID}) stakeEntryBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return nil. @@ -611,6 +611,35 @@ func DBGetStakeEntriesForValidatorPKID(handle *badger.DB, snap *Snapshot, valida return stakeEntries, nil } +func DBValidatorHasDelegatedStake( + handle *badger.DB, + snap *Snapshot, + validatorPKID *PKID, + utxoDeletedStakeEntries []*StakeEntry, +) (bool, error) { + // Skip any stake the validator has assigned to himself (if exists). + skipKeys := NewSet([]string{ + string(DBKeyForStakeByValidatorAndStaker(&StakeEntry{ValidatorPKID: validatorPKID, StakerPKID: validatorPKID})), + }) + + // Skip any StakeEntries deleted in the UtxoView. + for _, utxoDeletedStakeEntry := range utxoDeletedStakeEntries { + skipKeys.Add(string(DBKeyForStakeByValidatorAndStaker(utxoDeletedStakeEntry))) + } + + // Scan for any delegated StakeEntries (limiting to at most one row). + prefix := DBKeyForStakeByValidator(&StakeEntry{ValidatorPKID: validatorPKID}) + keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( + handle, prefix, 1, nil, false, skipKeys, + ) + if err != nil { + return false, errors.Wrapf(err, "DBValidatorHasDelegatedStake: problem retrieving StakeEntries: ") + } + + // Return true if any delegated StakeEntries were found. + return len(keysFound) > 0, nil +} + func DBGetLockedStakeEntry( handle *badger.DB, snap *Snapshot, @@ -637,7 +666,7 @@ func DBGetLockedStakeEntryWithTxn( lockedAtEpochNumber uint64, ) (*LockedStakeEntry, error) { // Retrieve LockedStakeEntry from db. - key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(&LockedStakeEntry{ + key := DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(&LockedStakeEntry{ ValidatorPKID: validatorPKID, StakerPKID: stakerPKID, LockedAtEpochNumber: lockedAtEpochNumber, @@ -695,14 +724,14 @@ func DBGetLockedStakeEntriesInRangeWithTxn( // StartEpochNumber <= LockedAtEpochNumber <= EndEpochNumber. // Start at the StartEpochNumber. - startKey := DBKeyForLockedStakeByValidatorByStakerByLockedAt(&LockedStakeEntry{ + startKey := DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(&LockedStakeEntry{ ValidatorPKID: validatorPKID, StakerPKID: stakerPKID, LockedAtEpochNumber: startEpochNumber, }) // Consider only LockedStakeEntries for this ValidatorPKID, StakerPKID. - prefixKey := DBPrefixKeyForLockedStakeByValidatorByStaker(&LockedStakeEntry{ + prefixKey := DBPrefixKeyForLockedStakeByValidatorAndStaker(&LockedStakeEntry{ ValidatorPKID: validatorPKID, StakerPKID: stakerPKID, }) @@ -752,7 +781,7 @@ func DBPutStakeEntryWithTxn( } // Set StakeEntry in PrefixStakeByValidatorByStaker. - key := DBKeyForStakeByValidatorByStaker(stakeEntry) + key := DBKeyForStakeByValidatorAndStaker(stakeEntry) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, stakeEntry)); err != nil { return errors.Wrapf( err, "DBPutStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByValidatorByStaker: ", @@ -773,7 +802,7 @@ func DBPutLockedStakeEntryWithTxn( } // Set LockedStakeEntry in PrefixLockedStakeByValidatorByStakerByLockedAt. - key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry) + key := DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, lockedStakeEntry)); err != nil { return errors.Wrapf( err, "DBPutLockedStakeEntryWithTxn: problem storing LockedStakeEntry in index PrefixLockedStakeByValidatorByStakerByLockedAt: ", @@ -794,7 +823,7 @@ func DBDeleteStakeEntryWithTxn( } // Delete StakeEntry from PrefixStakeByValidatorByStaker. - key := DBKeyForStakeByValidatorByStaker(stakeEntry) + key := DBKeyForStakeByValidatorAndStaker(stakeEntry) if err := DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( err, "DBDeleteStakeEntryWithTxn: problem deleting StakeEntry from index PrefixStakeByValidatorByStaker: ", @@ -815,7 +844,7 @@ func DBDeleteLockedStakeEntryWithTxn( } // Delete LockedStakeEntry from PrefixLockedStakeByValidatorByStakerByLockedAt. - key := DBKeyForLockedStakeByValidatorByStakerByLockedAt(lockedStakeEntry) + key := DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry) if err := DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( err, "DBDeleteLockedStakeEntryWithTxn: problem deleting StakeEntry from index PrefixLockedStakeByValidatorByStakerByLockedAt: ", @@ -1116,6 +1145,12 @@ func (bav *UtxoView) _connectStake( } stakeAmountNanosUint64 := txMeta.StakeAmountNanos.Uint64() + // Retrieve the transactor's current balance to validate later. + prevBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(txn.PublicKey) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error retrieving PrevBalanceNanos: ") + } + // Connect a BasicTransfer to get the total input and the // total output without considering the txn metadata. This // BasicTransfer also includes the extra spend associated @@ -1140,17 +1175,14 @@ func (bav *UtxoView) _connectStake( return 0, 0, nil, errors.Wrapf(err, "_connectStake: ") } // Delete the existing StakeEntry, if exists. + // + // Note that we don't really need to do this, as setting a new StakeEntry will naturally cause + // the old entry to be deleted in the database. However, we do this here for clarity. if prevStakeEntry != nil { prevStakeEntries = append(prevStakeEntries, prevStakeEntry) bav._deleteStakeEntryMappings(prevStakeEntry) } - // Set StakeID only if this is a new StakeEntry. - stakeID := txHash - if prevStakeEntry != nil { - stakeID = prevStakeEntry.StakeID - } - // Calculate StakeAmountNanos. stakeAmountNanos := txMeta.StakeAmountNanos.Clone() if prevStakeEntry != nil { @@ -1168,7 +1200,6 @@ func (bav *UtxoView) _connectStake( // Construct new StakeEntry from metadata. currentStakeEntry := &StakeEntry{ - StakeID: stakeID, StakerPKID: transactorPKIDEntry.PKID, ValidatorPKID: prevValidatorEntry.ValidatorPKID, StakeAmountNanos: stakeAmountNanos, @@ -1181,6 +1212,9 @@ func (bav *UtxoView) _connectStake( // 1. Copy the existing ValidatorEntry. currentValidatorEntry := prevValidatorEntry.Copy() // 2. Delete the existing ValidatorEntry. + // + // Note that we don't really need to do this, as setting a new ValidatorEntry will naturally cause + // the old entry to be deleted in the database. However, we do this here for clarity. bav._deleteValidatorEntryMappings(prevValidatorEntry) // 3. Update the new ValidatorEntry's TotalStakeAmountNanos. currentValidatorEntry.TotalStakeAmountNanos, err = SafeUint256().Add( @@ -1214,13 +1248,19 @@ func (bav *UtxoView) _connectStake( return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to TotalOutput: ") } - // Add a UTXO operation - utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + // Create a UTXO operation + utxoOpForTxn := &UtxoOperation{ Type: OperationTypeStake, PrevValidatorEntry: prevValidatorEntry, PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, PrevStakeEntries: prevStakeEntries, - }) + } + if err = bav.SanityCheckStakeTxn( + transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.StakeAmountNanos, txn.TxnFeeNanos, prevBalanceNanos, + ); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: ") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpForTxn) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1369,7 +1409,7 @@ func (bav *UtxoView) _connectUnstake( return 0, 0, nil, errors.Wrapf(RuleErrorInvalidValidatorPKID, "_connectUnstake: ") } - // Retrieve PrevStakeEntry. This will be restored if we disconnect the txn. + // Retrieve prevStakeEntry. This will be restored if we disconnect the txn. prevStakeEntry, err := bav.GetStakeEntry(prevValidatorEntry.ValidatorPKID, transactorPKIDEntry.PKID) if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") @@ -1388,15 +1428,15 @@ func (bav *UtxoView) _connectUnstake( if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error subtracting UnstakeAmountNanos from StakeAmountNanos: ") } - // 2. Create a CurrentStakeEntry, if updated StakeAmountNanos > 0. + // 2. Create a currentStakeEntry, if updated StakeAmountNanos > 0. var currentStakeEntry *StakeEntry if stakeAmountNanos.Cmp(uint256.NewInt()) > 0 { currentStakeEntry = prevStakeEntry.Copy() currentStakeEntry.StakeAmountNanos = stakeAmountNanos.Clone() } - // 3. Delete the PrevStakeEntry. + // 3. Delete the prevStakeEntry. bav._deleteStakeEntryMappings(prevStakeEntry) - // 4. Set the CurrentStakeEntry, if exists. The CurrentStakeEntry will not exist + // 4. Set the currentStakeEntry, if exists. The currentStakeEntry will not exist // if the transactor has unstaked all stake assigned to this validator. if currentStakeEntry != nil { bav._setStakeEntryMappings(currentStakeEntry) @@ -1406,6 +1446,10 @@ func (bav *UtxoView) _connectUnstake( // 1. Copy the existing ValidatorEntry. currentValidatorEntry := prevValidatorEntry.Copy() // 2. Delete the existing ValidatorEntry. + // + // Note that we don't technically need to delete the ValidatorEntry here since + // the old ValidatorEntry will automatically be deleted in favor of the new one, + // but we do this here for clarity. bav._deleteValidatorEntryMappings(prevValidatorEntry) // 3. Update the new ValidatorEntry's TotalStakeAmountNanos. currentValidatorEntry.TotalStakeAmountNanos, err = SafeUint256().Sub( @@ -1460,7 +1504,6 @@ func (bav *UtxoView) _connectUnstake( } else { // Create a new LockedStakeEntry. currentLockedStakeEntry = &LockedStakeEntry{ - LockedStakeID: txn.Hash(), StakerPKID: transactorPKIDEntry.PKID, ValidatorPKID: prevValidatorEntry.ValidatorPKID, LockedAmountNanos: txMeta.UnstakeAmountNanos, @@ -1469,6 +1512,9 @@ func (bav *UtxoView) _connectUnstake( } } // 3. Delete the PrevLockedStakeEntry, if exists. + // + // Note that we don't technically need to do this since the flush will naturally delete + // the old value from the db before setting the new one, but we do it here for clarity. if prevLockedStakeEntry != nil { prevLockedStakeEntries = append(prevLockedStakeEntries, prevLockedStakeEntry) bav._deleteLockedStakeEntryMappings(prevLockedStakeEntry) @@ -1476,14 +1522,18 @@ func (bav *UtxoView) _connectUnstake( // 4. Set the CurrentLockedStakeEntry. bav._setLockedStakeEntryMappings(currentLockedStakeEntry) - // Add a UTXO operation - utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + // Create a UTXO operation. + utxoOpForTxn := &UtxoOperation{ Type: OperationTypeUnstake, PrevValidatorEntry: prevValidatorEntry, PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, PrevStakeEntries: prevStakeEntries, PrevLockedStakeEntries: prevLockedStakeEntries, - }) + } + if err = bav.SanityCheckUnstakeTxn(transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.UnstakeAmountNanos); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpForTxn) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1652,6 +1702,12 @@ func (bav *UtxoView) _connectUnlockStake( return 0, 0, nil, errors.Wrapf(RuleErrorInvalidUnlockStakeNoUnlockableStakeFound, "_connectUnlockStake: ") } + // Retrieve the transactor's current balance to validate later. + prevBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(txn.PublicKey) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnlockStake: error retrieving PrevBalanceNanos: ") + } + // Connect a basic transfer to get the total input and the // total output without considering the txn metadata. totalInput, totalOutput, utxoOpsForTxn, err := bav._connectBasicTransfer( @@ -1707,11 +1763,17 @@ func (bav *UtxoView) _connectUnlockStake( } utxoOpsForTxn = append(utxoOpsForTxn, utxoOp) - // Add a UTXO operation - utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + // Create a UTXO operation. + utxoOpForTxn := &UtxoOperation{ Type: OperationTypeUnlockStake, PrevLockedStakeEntries: prevLockedStakeEntries, - }) + } + if err = bav.SanityCheckUnlockStakeTxn( + transactorPKIDEntry.PKID, utxoOpForTxn, totalUnlockedAmountNanos, txn.TxnFeeNanos, prevBalanceNanos, + ); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnlockStake: ") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpForTxn) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1905,6 +1967,244 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada return nil } +func (bav *UtxoView) SanityCheckStakeTxn( + transactorPKID *PKID, + utxoOp *UtxoOperation, + amountNanos *uint256.Int, + feeNanos uint64, + prevBalanceNanos uint64, +) error { + if utxoOp.Type != OperationTypeStake { + return fmt.Errorf("SanityCheckStakeTxn: called with %v", utxoOp.Type) + } + + // Sanity check ValidatorEntry.TotalStakeAmountNanos increase. + if utxoOp.PrevValidatorEntry == nil { + return errors.New("SanityCheckStakeTxn: nil PrevValidatorEntry provided") + } + currentValidatorEntry, err := bav.GetValidatorByPKID(utxoOp.PrevValidatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving ValidatorEntry: ") + } + if currentValidatorEntry == nil { + return errors.New("SanityCheckStakeTxn: no CurrentValidatorEntry found") + } + validatorEntryTotalStakeAmountNanosIncrease, err := SafeUint256().Sub( + currentValidatorEntry.TotalStakeAmountNanos, utxoOp.PrevValidatorEntry.TotalStakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating TotalStakeAmountNanos increase: ") + } + if !validatorEntryTotalStakeAmountNanosIncrease.Eq(amountNanos) { + return errors.New("SanityCheckStakeTxn: TotalStakeAmountNanos increase does not match") + } + + // Validate StakeEntry.StakeAmountNanos increase. + prevStakeEntry := &StakeEntry{StakeAmountNanos: uint256.NewInt()} + if len(utxoOp.PrevStakeEntries) == 1 { + prevStakeEntry = utxoOp.PrevStakeEntries[0] + } + currentStakeEntry, err := bav.GetStakeEntry(currentValidatorEntry.ValidatorPKID, transactorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving StakeEntry: ") + } + if currentStakeEntry == nil { + return errors.New("SanityCheckStakeTxn: no CurrentStakeEntry found") + } + stakeEntryStakeAmountNanosIncrease, err := SafeUint256().Sub( + currentStakeEntry.StakeAmountNanos, prevStakeEntry.StakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating StakeAmountNanos increase: ") + } + if !stakeEntryStakeAmountNanosIncrease.Eq(amountNanos) { + return errors.New("SanityCheckStakeTxn: StakeAmountNanos increase does not match") + } + + // Validate GlobalStakeAmountNanos increase. + if utxoOp.PrevGlobalStakeAmountNanos == nil { + return errors.New("SanityCheckStakeTxn: nil PrevGlobalStakeAmountNanos provided") + } + currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving CurrentGlobalStakeAmountNanos: ") + } + globalStakeAmountNanosIncrease, err := SafeUint256().Sub( + currentGlobalStakeAmountNanos, utxoOp.PrevGlobalStakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating GlobalStakeAmountNanos increase: ") + } + if !globalStakeAmountNanosIncrease.Eq(amountNanos) { + return errors.New("SanityCheckStakeTxn: GlobalStakeAmountNanos increase does not match") + } + + // Validate TransactorBalance decrease. + // PrevTransactorBalanceNanos = CurrentTransactorBalanceNanos + AmountNanos + FeeNanos + // PrevTransactorBalanceNanos - CurrentTransactorBalanceNanos - FeeNanos = AmountNanos + currentBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(bav.GetPublicKeyForPKID(transactorPKID)) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving TransactorBalance: ") + } + transactorBalanceNanosDecrease, err := SafeUint64().Sub(prevBalanceNanos, currentBalanceNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating TransactorBalance decrease: ") + } + transactorBalanceNanosDecrease, err = SafeUint64().Sub(transactorBalanceNanosDecrease, feeNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error including fees in TransactorBalance decrease: ") + } + if !uint256.NewInt().SetUint64(transactorBalanceNanosDecrease).Eq(amountNanos) { + return errors.New("SanityCheckStakeTxn: TransactorBalance decrease does not match") + } + + return nil +} + +func (bav *UtxoView) SanityCheckUnstakeTxn(transactorPKID *PKID, utxoOp *UtxoOperation, amountNanos *uint256.Int) error { + if utxoOp.Type != OperationTypeUnstake { + return fmt.Errorf("SanityCheckUnstakeTxn: called with %v", utxoOp.Type) + } + + // Validate ValidatorEntry.TotalStakeAmountNanos decrease. + if utxoOp.PrevValidatorEntry == nil { + return errors.New("SanityCheckUnstakeTxn: nil PrevValidatorEntry provided") + } + currentValidatorEntry, err := bav.GetValidatorByPKID(utxoOp.PrevValidatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving ValidatorEntry: ") + } + if currentValidatorEntry == nil { + return errors.New("SanityCheckUnstakeTxn: no CurrentValidatorEntry found") + } + validatorEntryTotalStakeAmountNanosDecrease, err := SafeUint256().Sub( + utxoOp.PrevValidatorEntry.TotalStakeAmountNanos, currentValidatorEntry.TotalStakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating TotalStakeAmountNanos decrease: ") + } + if !validatorEntryTotalStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnstakeTxn: TotalStakeAmountNanos decrease does not match") + } + + // Validate PrevStakeEntry.StakeAmountNanos decrease. + if len(utxoOp.PrevStakeEntries) != 1 { + return errors.New("SanityCheckUnstakeTxn: PrevStakeEntries should have exactly one entry") + } + prevStakeEntry := utxoOp.PrevStakeEntries[0] + currentStakeEntry, err := bav.GetStakeEntry(prevStakeEntry.ValidatorPKID, transactorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving StakeEntry: ") + } + if currentStakeEntry == nil { + currentStakeEntry = &StakeEntry{StakeAmountNanos: uint256.NewInt()} + } + stakeEntryStakeAmountNanosDecrease, err := SafeUint256().Sub( + prevStakeEntry.StakeAmountNanos, currentStakeEntry.StakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating StakeAmountNanos decrease: ") + } + if !stakeEntryStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnstakeTxn: StakeAmountNanos decrease does not match") + } + + // Validate LockedStakeEntry.LockedAmountNanos increase. + prevLockedStakeEntry := &LockedStakeEntry{LockedAmountNanos: uint256.NewInt()} + if len(utxoOp.PrevLockedStakeEntries) == 1 { + prevLockedStakeEntry = utxoOp.PrevLockedStakeEntries[0] + } + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving CurrentEpochNumber: ") + } + currentLockedStakeEntry, err := bav.GetLockedStakeEntry( + currentValidatorEntry.ValidatorPKID, transactorPKID, currentEpochNumber, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving LockedStakeEntry: ") + } + lockedStakeEntryLockedAmountNanosIncrease, err := SafeUint256().Sub( + currentLockedStakeEntry.LockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating LockedAmountNanos increase: ") + } + if !lockedStakeEntryLockedAmountNanosIncrease.Eq(amountNanos) { + return errors.New("SanityCheckUnstakeTxn: LockedAmountNanos increase does not match") + } + + // Validate GlobalStakeAmountNanos decrease. + if utxoOp.PrevGlobalStakeAmountNanos == nil { + return errors.New("SanityCheckUnstakeTxn: nil PrevGlobalStakeAmountNanos provided") + } + currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving CurrentGlobalStakeAmountNanos: ") + } + if currentGlobalStakeAmountNanos == nil { + return errors.New("SanityCheckUnstakeTxn: no CurrentGlobalStakeAmountNanos found") + } + globalStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalStakeAmountNanos, currentGlobalStakeAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating GlobalStakeAmountNanos decrease: ") + } + if !globalStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnstakeTxn: GlobalStakeAmountNanos decrease does not match") + } + + return nil +} + +func (bav *UtxoView) SanityCheckUnlockStakeTxn( + transactorPKID *PKID, + utxoOp *UtxoOperation, + amountNanos *uint256.Int, + feeNanos uint64, + prevBalanceNanos uint64, +) error { + if utxoOp.Type != OperationTypeUnlockStake { + return fmt.Errorf("SanityCheckUnlockStakeTxn: called with %v", utxoOp.Type) + } + + // Validate PrevLockedStakeEntry.LockedAmountNanos. + if utxoOp.PrevLockedStakeEntries == nil || len(utxoOp.PrevLockedStakeEntries) == 0 { + return errors.New("SanityCheckUnlockStakeTxn: PrevLockedStakeEntries is empty") + } + totalUnlockedAmountNanos := uint256.NewInt() + var err error + for _, prevLockedStakeEntry := range utxoOp.PrevLockedStakeEntries { + totalUnlockedAmountNanos, err = SafeUint256().Add(totalUnlockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnlockStakeTxn: error calculating TotalUnlockedAmountNanos: ") + } + } + if !totalUnlockedAmountNanos.Eq(amountNanos) { + return errors.New("SanityCheckUnlockStakeTxn: TotalUnlockedAmountNanos does not match") + } + + // Validate TransactorBalanceNanos increase. + // CurrentTransactorBalanceNanos = PrevTransactorBalanceNanos + AmountNanos - FeeNanos + // CurrentTransactorBalanceNanos - PrevTransactorBalanceNanos + FeeNanos = AmountNanos + currentBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(bav.GetPublicKeyForPKID(transactorPKID)) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnlockStakeTxn: error retrieving TransactorBalance: ") + } + transactorBalanceNanosIncrease, err := SafeUint64().Sub(currentBalanceNanos, prevBalanceNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnlockStakeTxn: error calculating TransactorBalance increase: ") + } + transactorBalanceNanosIncrease, err = SafeUint64().Add(transactorBalanceNanosIncrease, feeNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error including fees in TransactorBalance decrease: ") + } + if !uint256.NewInt().SetUint64(transactorBalanceNanosIncrease).Eq(amountNanos) { + return errors.New("SanityCheckUnlockStakeTxn: TransactorBalance increase does not match") + } + + return nil +} + func (bav *UtxoView) GetStakeEntry(validatorPKID *PKID, stakerPKID *PKID) (*StakeEntry, error) { // Error if either input is nil. if validatorPKID == nil { @@ -1971,6 +2271,32 @@ func (bav *UtxoView) GetStakeEntriesForValidatorPKID(validatorPKID *PKID) ([]*St return stakeEntries, nil } +func (bav *UtxoView) ValidatorHasDelegatedStake(validatorPKID *PKID) (bool, error) { + // True if the validator has any delegated stake assigned to them. + + // First check the UtxoView. + var utxoDeletedStakeEntries []*StakeEntry + for _, stakeEntry := range bav.StakeMapKeyToStakeEntry { + if !stakeEntry.ValidatorPKID.Eq(validatorPKID) { + // Skip any stake assigned to other validators. + continue + } + if stakeEntry.StakerPKID.Eq(validatorPKID) { + // Skip any stake the validator assigned to themselves. + continue + } + if !stakeEntry.isDeleted { + // A non-deleted delegated StakeEntry for this validator was found in the UtxoView. + return true, nil + } + // A deleted delegated StakeEntry for this validator was found in the UtxoView. + utxoDeletedStakeEntries = append(utxoDeletedStakeEntries, stakeEntry) + } + + // Next, check the database skipping any deleted StakeEntries for this validator. + return DBValidatorHasDelegatedStake(bav.Handle, bav.Snapshot, validatorPKID, utxoDeletedStakeEntries) +} + func (bav *UtxoView) GetLockedStakeEntry( validatorPKID *PKID, stakerPKID *PKID, @@ -2406,6 +2732,8 @@ func (bav *UtxoView) _checkStakeTxnSpendingLimitAndUpdateDerivedKey( delete(derivedKeyEntry.TransactionSpendingLimitTracker.StakeLimitMap, stakeLimitKey) return derivedKeyEntry, nil } + // If we get here, it means that we did not find a valid spendingLimit with enough stake + // to cover the transaction's required stake amount. // Error if the spending limit was found but the staking limit was exceeded. if isSpendingLimitExceeded { @@ -2416,6 +2744,8 @@ func (bav *UtxoView) _checkStakeTxnSpendingLimitAndUpdateDerivedKey( return derivedKeyEntry, errors.Wrapf(RuleErrorStakeTransactionSpendingLimitNotFound, "UtxoView._checkStakeTxnSpendingLimitAndUpdateDerivedKey: ") } +// TODO: This function is highly-redundant with the previous function. Probably makes sense +// to consolidate in the future. func (bav *UtxoView) _checkUnstakeTxnSpendingLimitAndUpdateDerivedKey( derivedKeyEntry DerivedKeyEntry, transactorPublicKeyBytes []byte, diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index a75c06cdf..f8aefb9d8 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -4,11 +4,12 @@ package lib import ( "errors" + "math" + "testing" + "github.com/btcsuite/btcd/btcec" "github.com/holiman/uint256" "github.com/stretchr/testify/require" - "math" - "testing" ) func TestStaking(t *testing.T) { @@ -103,10 +104,9 @@ func _testStaking(t *testing.T, flushToDB bool) { votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, flushToDB) require.NoError(t, err) @@ -959,10 +959,9 @@ func TestStakingWithDerivedKey(t *testing.T) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example1.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) @@ -971,10 +970,9 @@ func TestStakingWithDerivedKey(t *testing.T) { // m1 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example2.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example2.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) @@ -1848,10 +1846,9 @@ func TestStakeLockupEpochDuration(t *testing.T) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m1.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, true) require.NoError(t, err) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 9aa041678..0e0af7f1c 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -1246,7 +1246,7 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada data = append(data, EncodeToBytes(blockHeight, op.PrevValidatorEntry, skipMetadata...)...) // PrevGlobalStakeAmountNanos - data = append(data, EncodeUint256(op.PrevGlobalStakeAmountNanos)...) + data = append(data, VariableEncodeUint256(op.PrevGlobalStakeAmountNanos)...) // PrevStakeEntries data = append(data, EncodeDeSoEncoderSlice(op.PrevStakeEntries, blockHeight, skipMetadata...)...) @@ -1877,7 +1877,7 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. } // PrevGlobalStakeAmountNanos - if prevGlobalStakeAmountNanos, err := DecodeUint256(rr); err == nil { + if prevGlobalStakeAmountNanos, err := VariableDecodeUint256(rr); err == nil { op.PrevGlobalStakeAmountNanos = prevGlobalStakeAmountNanos } else { return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevGlobalStakeAmountNanos: ") @@ -4207,7 +4207,7 @@ func (be *BalanceEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadat data = append(data, EncodeToBytes(blockHeight, be.HODLerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, be.CreatorPKID, skipMetadata...)...) - data = append(data, EncodeUint256(&be.BalanceNanos)...) + data = append(data, VariableEncodeUint256(&be.BalanceNanos)...) data = append(data, BoolToByte(be.HasPurchased)) return data @@ -4229,7 +4229,7 @@ func (be *BalanceEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.R return errors.Wrapf(err, "BalanceEntry.Decode: Problem decoding CreatorPKID") } - balanceNanos, err := DecodeUint256(rr) + balanceNanos, err := VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "BalanceEntry.Decode: Problem reading BalanceNanos") } @@ -4353,7 +4353,7 @@ func (ce *CoinEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata . data = append(data, UintToBuf(ce.CreatorBasisPoints)...) data = append(data, UintToBuf(ce.DeSoLockedNanos)...) data = append(data, UintToBuf(ce.NumberOfHolders)...) - data = append(data, EncodeUint256(&ce.CoinsInCirculationNanos)...) + data = append(data, VariableEncodeUint256(&ce.CoinsInCirculationNanos)...) data = append(data, UintToBuf(ce.CoinWatermarkNanos)...) data = append(data, BoolToByte(ce.MintingDisabled)) data = append(data, byte(ce.TransferRestrictionStatus)) @@ -4377,7 +4377,7 @@ func (ce *CoinEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Read if err != nil { return errors.Wrapf(err, "CoinEntry.Decode: Problem reading NumberOfHolders") } - coinsInCirculationNanos, err := DecodeUint256(rr) + coinsInCirculationNanos, err := VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "CoinEntry.Decode: Problem reading NumberOfHolders") } @@ -4912,12 +4912,12 @@ func DecodeMapStringUint64(rr *bytes.Reader) (map[string]uint64, error) { return nil, nil } -// EncodeUint256 is useful for space-efficient encoding of uint256s. +// VariableEncodeUint256 is useful for space-efficient encoding of uint256s. // It does not guarantee fixed-width encoding, so should not be used -// in BadgerDB keys. Use EncodeOptionalUint256 instead, which does -// guarantee fixed-width encoding. Both EncodeUint256 and -// EncodeOptionalUint256 can handle nil inputs. -func EncodeUint256(number *uint256.Int) []byte { +// in BadgerDB keys. Use FixedWidthEncodeUint256 instead, which does +// guarantee fixed-width encoding. Both VariableEncodeUint256 and +// FixedWidthEncodeUint256 can handle nil inputs. +func VariableEncodeUint256(number *uint256.Int) []byte { var data []byte if number != nil { data = append(data, BoolToByte(true)) @@ -4929,7 +4929,7 @@ func EncodeUint256(number *uint256.Int) []byte { return data } -func DecodeUint256(rr *bytes.Reader) (*uint256.Int, error) { +func VariableDecodeUint256(rr *bytes.Reader) (*uint256.Int, error) { if existenceByte, err := ReadBoolByte(rr); existenceByte && err == nil { maxUint256BytesLen := len(MaxUint256.Bytes()) intLen, err := ReadUvarint(rr) @@ -5089,8 +5089,8 @@ func (order *DAOCoinLimitOrderEntry) RawEncodeWithoutMetadata(blockHeight uint64 data = append(data, EncodeToBytes(blockHeight, order.TransactorPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, order.BuyingDAOCoinCreatorPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, order.SellingDAOCoinCreatorPKID, skipMetadata...)...) - data = append(data, EncodeUint256(order.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) - data = append(data, EncodeUint256(order.QuantityToFillInBaseUnits)...) + data = append(data, VariableEncodeUint256(order.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) + data = append(data, VariableEncodeUint256(order.QuantityToFillInBaseUnits)...) data = append(data, UintToBuf(uint64(order.OperationType))...) data = append(data, UintToBuf(uint64(order.FillType))...) data = append(data, UintToBuf(uint64(order.BlockHeight))...) @@ -5133,12 +5133,12 @@ func (order *DAOCoinLimitOrderEntry) RawDecodeWithoutMetadata(blockHeight uint64 } // ScaledExchangeRateCoinsToSellPerCoinToBuy - if order.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = DecodeUint256(rr); err != nil { + if order.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = VariableDecodeUint256(rr); err != nil { return errors.Wrapf(err, "DAOCoinLimitOrderEntry.Decode: Problem reading ScaledExchangeRateCoinsToSellPerCoinToBuy") } // QuantityToFillInBaseUnits - if order.QuantityToFillInBaseUnits, err = DecodeUint256(rr); err != nil { + if order.QuantityToFillInBaseUnits, err = VariableDecodeUint256(rr); err != nil { return errors.Wrapf(err, "DAOCoinLimitOrderEntry.Decode: Problem reading QuantityToFillInBaseUnits") } @@ -5383,8 +5383,8 @@ func (order *FilledDAOCoinLimitOrder) RawEncodeWithoutMetadata(blockHeight uint6 data = append(data, EncodeToBytes(blockHeight, order.TransactorPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, order.BuyingDAOCoinCreatorPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, order.SellingDAOCoinCreatorPKID, skipMetadata...)...) - data = append(data, EncodeUint256(order.CoinQuantityInBaseUnitsBought)...) - data = append(data, EncodeUint256(order.CoinQuantityInBaseUnitsSold)...) + data = append(data, VariableEncodeUint256(order.CoinQuantityInBaseUnitsBought)...) + data = append(data, VariableEncodeUint256(order.CoinQuantityInBaseUnitsSold)...) data = append(data, BoolToByte(order.IsFulfilled)) return data @@ -5426,12 +5426,12 @@ func (order *FilledDAOCoinLimitOrder) RawDecodeWithoutMetadata(blockHeight uint6 } // CoinQuantityInBaseUnitsBought - if order.CoinQuantityInBaseUnitsBought, err = DecodeUint256(rr); err != nil { + if order.CoinQuantityInBaseUnitsBought, err = VariableDecodeUint256(rr); err != nil { return errors.Wrapf(err, "FilledDAOCoinLimiteOrder.Decode: Problem reading CoinQuantityInBaseUnitsBought") } // CoinQuantityInBaseUnitsSold - if order.CoinQuantityInBaseUnitsSold, err = DecodeUint256(rr); err != nil { + if order.CoinQuantityInBaseUnitsSold, err = VariableDecodeUint256(rr); err != nil { return errors.Wrapf(err, "FilledDAOCoinLimiteOrder.Decode: Problem reading CoinQuantityInBaseUnitsSold") } diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index 0a81bcae4..db66e746e 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -5,13 +5,14 @@ package lib import ( "bytes" "encoding/hex" + "reflect" + "testing" + "time" + "github.com/brianvoe/gofakeit" "github.com/holiman/uint256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "reflect" - "testing" - "time" ) // Initialize empty DeSoEncoders and check if they are encoded properly. @@ -400,53 +401,53 @@ func TestEncodingUint256s(t *testing.T) { num2 := uint256.NewInt().SetUint64(598128756) num3 := MaxUint256 - // Encode them to bytes using EncodeUint256. - encoded1 := EncodeUint256(num1) - encoded2 := EncodeUint256(num2) - encoded3 := EncodeUint256(num3) + // Encode them to bytes using VariableEncodeUint256. + encoded1 := VariableEncodeUint256(num1) + encoded2 := VariableEncodeUint256(num2) + encoded3 := VariableEncodeUint256(num3) - // Decode them from bytes using DecodeUint256. Verify values. + // Decode them from bytes using VariableDecodeUint256. Verify values. rr := bytes.NewReader(encoded1) - decoded1, err := DecodeUint256(rr) + decoded1, err := VariableDecodeUint256(rr) require.NoError(t, err) require.True(t, num1.Eq(decoded1)) rr = bytes.NewReader(encoded2) - decoded2, err := DecodeUint256(rr) + decoded2, err := VariableDecodeUint256(rr) require.NoError(t, err) require.True(t, num2.Eq(decoded2)) rr = bytes.NewReader(encoded3) - decoded3, err := DecodeUint256(rr) + decoded3, err := VariableDecodeUint256(rr) require.NoError(t, err) require.True(t, num3.Eq(decoded3)) - // Test that EncodeUint256 does not provide a fixed-width byte encoding. + // Test that VariableEncodeUint256 does not provide a fixed-width byte encoding. require.NotEqual(t, len(encoded1), len(encoded2)) require.NotEqual(t, len(encoded1), len(encoded3)) - // Encode them to bytes using EncodeOptionalUint256. - encoded1 = EncodeOptionalUint256(num1) - encoded2 = EncodeOptionalUint256(num2) - encoded3 = EncodeOptionalUint256(num3) + // Encode them to bytes using FixedWidthEncodeUint256. + encoded1 = FixedWidthEncodeUint256(num1) + encoded2 = FixedWidthEncodeUint256(num2) + encoded3 = FixedWidthEncodeUint256(num3) - // Decode them from bytes using ReadOptionalUint256. Verify values. + // Decode them from bytes using FixedWidthDecodeUint256. Verify values. rr = bytes.NewReader(encoded1) - decoded1, err = ReadOptionalUint256(rr) + decoded1, err = FixedWidthDecodeUint256(rr) require.NoError(t, err) require.True(t, num1.Eq(decoded1)) rr = bytes.NewReader(encoded2) - decoded2, err = ReadOptionalUint256(rr) + decoded2, err = FixedWidthDecodeUint256(rr) require.NoError(t, err) require.True(t, num2.Eq(decoded2)) rr = bytes.NewReader(encoded3) - decoded3, err = ReadOptionalUint256(rr) + decoded3, err = FixedWidthDecodeUint256(rr) require.NoError(t, err) require.True(t, num3.Eq(decoded3)) - // Test that EncodeOptionalUint256 provides a fixed-width byte encoding. + // Test that FixedWidthEncodeUint256 provides a fixed-width byte encoding. require.Equal(t, len(encoded1), len(encoded2)) require.Equal(t, len(encoded1), len(encoded3)) } diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 083b9bd30..647044cd6 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -4,37 +4,80 @@ import ( "bytes" "crypto/sha256" "fmt" + "io" + "math" + "net/url" + "sort" + "github.com/deso-protocol/core/bls" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "io" - "math" - "net/url" - "sort" ) +// RegisterAsValidator: Registers a new validator. This transaction can be called multiple times +// if a validator needs to update any of their registration info such as their domains. Once +// a validator is registered, stake can be assigned to that validator, the validator is eligible +// to participate in consensus by voting, and may be selected as leader to propose new blocks. +// +// UnregisterAsValidator: Unregisters an existing validator. This unstakes all stake assigned to this +// validator and removes this validator from the set of eligible validators. A user would have to +// re-register by submitting a subsequent RegisterAsValidator transaction to be re-included. +// +// UnjailValidator: Unjails a jailed validator if sufficient time (epochs) have elapsed since the +// validator was first jailed. A validator is jailed if they fail to participate in consensus by +// either voting or proposing blocks for too long. A jailed validator is ineligible to receive +// any block rewards and ineligible to elected leader. +// +// FIXME: In the future, when we flesh out the jail/unjail functionality, we will want to make it +// so that the validator's stake is removed/added to GlobalStakeAmountNanos. See FIXME on +// _connectUnjailValidator for more details. + // // TYPES: ValidatorEntry // type ValidatorEntry struct { - ValidatorID *BlockHash + // The ValidatorPKID is the primary key for a ValidatorEntry. It is the PKID + // for the transactor who registered the validator. A user's PKID can only + // be associated with one validator. ValidatorPKID *PKID + // Domains is a slice of web domains where the validator can be reached. // Note: if someone is updating their ValidatorEntry, they need to include // all domains. The Domains field is not appended to. It is overwritten. - Domains [][]byte - DisableDelegatedStake bool - VotingPublicKey *bls.PublicKey - VotingPublicKeySignature *bls.Signature - VotingSignatureBlockHeight uint64 - TotalStakeAmountNanos *uint256.Int - RegisteredAtBlockHeight uint64 - LastActiveAtEpochNumber uint64 - JailedAtEpochNumber uint64 - ExtraData map[string][]byte - isDeleted bool + Domains [][]byte + // DisableDelegatedStake is a boolean that indicates whether the validator + // disallows delegated / 3rd party stake being assigned to themselves. If + // a validator sets DisableDelegatedStake to true, then they can still + // stake with themselves, but all other users will receive an error if they + // try to stake with this validator. + DisableDelegatedStake bool + // The VotingPublicKey is a BLS PublicKey that is used in consensus messages. + // A validator signs consensus messages with their VotingPrivateKey and then + // other validators can reliably prove the message came from this validator + // by verifying against their VotingPublicKey. + VotingPublicKey *bls.PublicKey + // The VotingPublicKeySignature is the signature of the SHA256(TransactorPublicKey) + // by the VotingPrivateKey. + // This proves that this validator is indeed the proper owner of the corresponding + // VotingPrivateKey. See comment on CreateValidatorVotingSignaturePayload for more details. + VotingPublicKeySignature *bls.Signature + // TotalStakeAmountNanos is a cached value of this validator's total stake, calculated + // by summing all the corresponding StakeEntries assigned to this validator. We cache + // the value here to avoid the O(N) operation of recomputing when determining a + // validator's total stake. This way it is an O(1) operation instead. + TotalStakeAmountNanos *uint256.Int + // LastActiveAtEpochNumber is the last epoch in which this validator either 1) participated in + // consensus by voting or proposing blocks, or 2) unjailed themselves. If a validator is + // inactive for too long, then they are jailed. + LastActiveAtEpochNumber uint64 + // JailedAtEpochNumber tracks when a validator was first jailed. This helps to verify + // that enough time (epochs) have passed before the validator is able to unjail themselves. + JailedAtEpochNumber uint64 + + ExtraData map[string][]byte + isDeleted bool } func (validatorEntry *ValidatorEntry) Status() ValidatorStatus { @@ -60,16 +103,6 @@ const ( ValidatorStatusJailed ValidatorStatus = 2 ) -type ValidatorMapKey struct { - // The MapKey has to contain all fields that are used in Badger keys. - // Otherwise, an update to the UtxoView will not be able to update or - // delete all relevant Badger rows. - ValidatorPKID PKID - TotalStakeAmountNanos uint256.Int - RegisteredAtBlockHeight uint64 - JailedAtEpochNumber uint64 -} - func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { // Copy domains. var domainsCopy [][]byte @@ -79,34 +112,21 @@ func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { // Return new ValidatorEntry. return &ValidatorEntry{ - ValidatorID: validatorEntry.ValidatorID.NewBlockHash(), - ValidatorPKID: validatorEntry.ValidatorPKID.NewPKID(), - Domains: domainsCopy, - DisableDelegatedStake: validatorEntry.DisableDelegatedStake, - VotingPublicKey: validatorEntry.VotingPublicKey.Copy(), - VotingPublicKeySignature: validatorEntry.VotingPublicKeySignature.Copy(), - VotingSignatureBlockHeight: validatorEntry.VotingSignatureBlockHeight, - TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), - RegisteredAtBlockHeight: validatorEntry.RegisteredAtBlockHeight, - LastActiveAtEpochNumber: validatorEntry.LastActiveAtEpochNumber, - JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, - ExtraData: copyExtraData(validatorEntry.ExtraData), - isDeleted: validatorEntry.isDeleted, - } -} - -func (validatorEntry *ValidatorEntry) ToMapKey() ValidatorMapKey { - return ValidatorMapKey{ - ValidatorPKID: *validatorEntry.ValidatorPKID, - TotalStakeAmountNanos: *validatorEntry.TotalStakeAmountNanos, - RegisteredAtBlockHeight: validatorEntry.RegisteredAtBlockHeight, - JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, + ValidatorPKID: validatorEntry.ValidatorPKID.NewPKID(), + Domains: domainsCopy, + DisableDelegatedStake: validatorEntry.DisableDelegatedStake, + VotingPublicKey: validatorEntry.VotingPublicKey.Copy(), + VotingPublicKeySignature: validatorEntry.VotingPublicKeySignature.Copy(), + TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), + LastActiveAtEpochNumber: validatorEntry.LastActiveAtEpochNumber, + JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, + ExtraData: copyExtraData(validatorEntry.ExtraData), + isDeleted: validatorEntry.isDeleted, } } func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte - data = append(data, EncodeToBytes(blockHeight, validatorEntry.ValidatorID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID, skipMetadata...)...) // Domains @@ -118,9 +138,7 @@ func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint6 data = append(data, BoolToByte(validatorEntry.DisableDelegatedStake)) data = append(data, EncodeBLSPublicKey(validatorEntry.VotingPublicKey)...) data = append(data, EncodeBLSSignature(validatorEntry.VotingPublicKeySignature)...) - data = append(data, UintToBuf(validatorEntry.VotingSignatureBlockHeight)...) - data = append(data, EncodeUint256(validatorEntry.TotalStakeAmountNanos)...) - data = append(data, UintToBuf(validatorEntry.RegisteredAtBlockHeight)...) + data = append(data, VariableEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) data = append(data, UintToBuf(validatorEntry.LastActiveAtEpochNumber)...) data = append(data, UintToBuf(validatorEntry.JailedAtEpochNumber)...) data = append(data, EncodeExtraData(validatorEntry.ExtraData)...) @@ -130,12 +148,6 @@ func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint6 func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { var err error - // ValidatorID - validatorEntry.ValidatorID, err = DecodeDeSoEncoder(&BlockHash{}, rr) - if err != nil { - return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading ValidatorID: ") - } - // ValidatorPKID validatorEntry.ValidatorPKID, err = DecodeDeSoEncoder(&PKID{}, rr) if err != nil { @@ -173,24 +185,12 @@ func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint6 return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingPublicKeySignature: ") } - // VotingSignatureBlockHeight - validatorEntry.VotingSignatureBlockHeight, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingSignatureBlockHeight: ") - } - // TotalStakeAmountNanos - validatorEntry.TotalStakeAmountNanos, err = DecodeUint256(rr) + validatorEntry.TotalStakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading TotalStakeAmountNanos: ") } - // RegisteredAtBlockHeight - validatorEntry.RegisteredAtBlockHeight, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading RegisteredAtBlockHeight: ") - } - // LastActiveAtEpochNumber validatorEntry.LastActiveAtEpochNumber, err = ReadUvarint(rr) if err != nil { @@ -225,11 +225,10 @@ func (validatorEntry *ValidatorEntry) GetEncoderType() EncoderType { // type RegisterAsValidatorMetadata struct { - Domains [][]byte - DisableDelegatedStake bool - VotingPublicKey *bls.PublicKey - VotingPublicKeySignature *bls.Signature - VotingSignatureBlockHeight uint64 + Domains [][]byte + DisableDelegatedStake bool + VotingPublicKey *bls.PublicKey + VotingPublicKeySignature *bls.Signature } func (txnData *RegisterAsValidatorMetadata) GetTxnType() TxnType { @@ -248,7 +247,6 @@ func (txnData *RegisterAsValidatorMetadata) ToBytes(preSignature bool) ([]byte, data = append(data, BoolToByte(txnData.DisableDelegatedStake)) data = append(data, EncodeBLSPublicKey(txnData.VotingPublicKey)...) data = append(data, EncodeBLSSignature(txnData.VotingPublicKeySignature)...) - data = append(data, UintToBuf(txnData.VotingSignatureBlockHeight)...) return data, nil } @@ -286,12 +284,6 @@ func (txnData *RegisterAsValidatorMetadata) FromBytes(data []byte) error { return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingPublicKeySignature: ") } - // VotingSignatureBlockHeight - txnData.VotingSignatureBlockHeight, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingSignatureBlockHeight: ") - } - return nil } @@ -353,7 +345,6 @@ type RegisterAsValidatorTxindexMetadata struct { DisableDelegatedStake bool VotingPublicKey string VotingPublicKeySignature string - VotingSignatureBlockHeight uint64 } func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -369,7 +360,6 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetad data = append(data, BoolToByte(txindexMetadata.DisableDelegatedStake)) data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKey))...) data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKeySignature))...) - data = append(data, UintToBuf(txindexMetadata.VotingSignatureBlockHeight)...) return data } @@ -416,12 +406,6 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawDecodeWithoutMetad } txindexMetadata.VotingPublicKeySignature = string(votingPublicKeySignatureBytes) - // VotingSignatureBlockHeight - txindexMetadata.VotingSignatureBlockHeight, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading VotingSignatureBlockHeight: ") - } - return nil } @@ -445,7 +429,7 @@ type UnstakedStakerTxindexMetadata struct { func (txindexMetadata *UnstakedStakerTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) - data = append(data, EncodeUint256(txindexMetadata.UnstakeAmountNanos)...) + data = append(data, VariableEncodeUint256(txindexMetadata.UnstakeAmountNanos)...) return data } @@ -460,7 +444,7 @@ func (txindexMetadata *UnstakedStakerTxindexMetadata) RawDecodeWithoutMetadata(b txindexMetadata.StakerPublicKeyBase58Check = string(stakerPublicKeyBase58CheckBytes) // UnstakeAmountNanos - txindexMetadata.UnstakeAmountNanos, err = DecodeUint256(rr) + txindexMetadata.UnstakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "UnstakedStakerTxindexMetadata.Decode: Problem reading UnstakeAmountNanos: ") } @@ -561,10 +545,7 @@ func DBKeyForValidatorByPKID(validatorEntry *ValidatorEntry) []byte { func DBKeyForValidatorByStake(validatorEntry *ValidatorEntry) []byte { key := append([]byte{}, Prefixes.PrefixValidatorByStake...) key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) - // TotalStakeAmountNanos will never be nil here, but EncodeOptionalUint256 - // is used because it provides a fixed-width encoding of uint256.Ints. - key = append(key, EncodeOptionalUint256(validatorEntry.TotalStakeAmountNanos)...) // Highest stake first - key = append(key, EncodeUint64(math.MaxUint64-validatorEntry.RegisteredAtBlockHeight)...) // Oldest first + key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) return key } @@ -618,22 +599,24 @@ func DBGetTopActiveValidatorsByStake( validatorKeysToSkip.Add(string(DBKeyForValidatorByStake(validatorEntryToSkip))) } - // Retrieve top N active ValidatorEntry PKIDs by stake. + // Retrieve top N active ValidatorEntry keys by stake. key := append([]byte{}, Prefixes.PrefixValidatorByStake...) key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) - _, validatorPKIDsBytes, err := EnumerateKeysForPrefixWithLimitOffsetOrder( + keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( handle, key, limit, nil, true, validatorKeysToSkip, ) if err != nil { return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem retrieving top validators: ") } - // For each PKID, retrieve the ValidatorEntry by PKID. - for _, validatorPKIDBytes := range validatorPKIDsBytes { + // For each key found, parse the ValidatorPKID from the key, + // then retrieve the ValidatorEntry by the ValidatorPKID. + for _, keyFound := range keysFound { + // Parse the PKIDBytes from the key. The ValidatorPKID is the last component of the key. + validatorPKIDBytes := keyFound[len(keyFound)-PublicKeyLenCompressed:] // Convert PKIDBytes to PKID. validatorPKID := &PKID{} - exists, err := DecodeFromBytes(validatorPKID, bytes.NewReader(validatorPKIDBytes)) - if !exists || err != nil { + if err = validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem reading ValidatorPKID: ") } // Retrieve ValidatorEntry by PKID. @@ -672,7 +655,7 @@ func DBGetGlobalStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot) (*uint2 // Decode from bytes. var globalStakeAmountNanos *uint256.Int rr := bytes.NewReader(globalStakeAmountNanosBytes) - globalStakeAmountNanos, err = DecodeUint256(rr) + globalStakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { return nil, errors.Wrapf(err, "DBGetGlobalStakeAmountNanosWithTxn: problem decoding value") } @@ -699,9 +682,10 @@ func DBPutValidatorWithTxn( ) } - // Set ValidatorEntry.PKID in PrefixValidatorByStake. + // Set ValidatorEntry key in PrefixValidatorByStake. The value should be nil. + // We parse the ValidatorPKID from the key for this index. key = DBKeyForValidatorByStake(validatorEntry) - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID)); err != nil { + if err := DBSetWithTxn(txn, snap, key, nil); err != nil { return errors.Wrapf( err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStake", ) @@ -710,10 +694,24 @@ func DBPutValidatorWithTxn( return nil } -func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorEntry *ValidatorEntry) error { - if validatorEntry == nil { +func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PKID) error { + if validatorPKID == nil { // This should never happen but is a sanity check. - glog.Errorf("DBDeleteValidatorWithTxn: called with nil ValidatorEntry") + glog.Errorf("DBDeleteValidatorWithTxn: called with nil ValidatorPKID") + return nil + } + + // Look up the existing ValidatorEntry in the db using the PKID. We need to use this + // validator's values to delete the corresponding indexes. + validatorEntry, err := DBGetValidatorByPKIDWithTxn(txn, snap, validatorPKID) + if err != nil { + return errors.Wrapf(err, "DBDeleteValidatorWithTxn: problem retrieving "+ + "ValidatorEntry for PKID %v: ", validatorPKID) + } + + // If there is no ValidatorEntry in the DB for this PKID, then there is nothing to + // delete. + if validatorEntry == nil { return nil } @@ -749,7 +747,7 @@ func DBPutGlobalStakeAmountNanosWithTxn( } key := DBKeyForGlobalStakeAmountNanos() - return DBSetWithTxn(txn, snap, key, EncodeUint256(globalStakeAmountNanos)) + return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalStakeAmountNanos)) } // @@ -1047,32 +1045,19 @@ func (bav *UtxoView) _connectRegisterAsValidator( return 0, 0, nil, errors.Wrapf(err, "_connectRegisterAsValidator: ") } // Delete the existing ValidatorEntry, if exists. There will be an existing ValidatorEntry - // if the transactor is updating their ValidatorEntry. There will not be, if the transactor + // if the transactor is updating their ValidatorEntry. There will not be one if the transactor // is registering a ValidatorEntry for the first time (or it was previously unregistered). // Note that we don't need to check isDeleted because the Get returns nil if isDeleted=true. if prevValidatorEntry != nil { bav._deleteValidatorEntryMappings(prevValidatorEntry) } - // Set ValidatorID only if this is a new ValidatorEntry. - validatorID := txHash.NewBlockHash() - if prevValidatorEntry != nil { - validatorID = prevValidatorEntry.ValidatorID.NewBlockHash() - } - // Calculate TotalStakeAmountNanos. totalStakeAmountNanos := uint256.NewInt() if prevValidatorEntry != nil { totalStakeAmountNanos = prevValidatorEntry.TotalStakeAmountNanos.Clone() } - // Set RegisteredAtBlockHeight to CurrentBlockHeight if this is a new ValidatorEntry. - // Otherwise, retain the existing RegisteredAtBlockHeight. - registeredAtBlockHeight := uint64(blockHeight) - if prevValidatorEntry != nil { - registeredAtBlockHeight = prevValidatorEntry.RegisteredAtBlockHeight - } - // Set LastActiveAtEpochNumber to CurrentEpochNumber if this is a new ValidatorEntry. // Otherwise, retain the existing LastActiveAtEpochNumber. var lastActiveAtEpochNumber uint64 @@ -1104,20 +1089,17 @@ func (bav *UtxoView) _connectRegisterAsValidator( // Construct new ValidatorEntry from metadata. currentValidatorEntry := &ValidatorEntry{ - ValidatorID: validatorID, ValidatorPKID: transactorPKIDEntry.PKID, // Note: if someone is updating their ValidatorEntry, they need to include // all domains. The Domains field is not appended to. It is overwritten. - Domains: txMeta.Domains, - DisableDelegatedStake: txMeta.DisableDelegatedStake, - VotingPublicKey: txMeta.VotingPublicKey, - VotingPublicKeySignature: txMeta.VotingPublicKeySignature, - VotingSignatureBlockHeight: txMeta.VotingSignatureBlockHeight, - TotalStakeAmountNanos: totalStakeAmountNanos, - RegisteredAtBlockHeight: registeredAtBlockHeight, - LastActiveAtEpochNumber: lastActiveAtEpochNumber, - JailedAtEpochNumber: jailedAtEpochNumber, - ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), + Domains: txMeta.Domains, + DisableDelegatedStake: txMeta.DisableDelegatedStake, + VotingPublicKey: txMeta.VotingPublicKey, + VotingPublicKeySignature: txMeta.VotingPublicKeySignature, + TotalStakeAmountNanos: totalStakeAmountNanos, + LastActiveAtEpochNumber: lastActiveAtEpochNumber, + JailedAtEpochNumber: jailedAtEpochNumber, + ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), } // Set the ValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) @@ -1292,12 +1274,6 @@ func (bav *UtxoView) _connectUnregisterAsValidator( } } else { lockedStakeEntry = &LockedStakeEntry{ - // What to set as the LockedStakeID here is not obvious. Typically, we use the - // TxHash, but since there may be multiple LockedStakeEntries created here, one - // for each unstaked staker, that would result in multiple LockedStakeEntries - // with the same LockedStakeID. For that reason, we opted to use the - // PrevStakeEntry's StakeID instead. - LockedStakeID: prevStakeEntry.StakeID.NewBlockHash(), StakerPKID: prevStakeEntry.StakerPKID.NewPKID(), ValidatorPKID: prevStakeEntry.ValidatorPKID.NewPKID(), LockedAmountNanos: prevStakeEntry.StakeAmountNanos.Clone(), @@ -1352,14 +1328,18 @@ func (bav *UtxoView) _connectUnregisterAsValidator( // Set the new GlobalStakeAmountNanos. bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) - // Add a UTXO operation. - utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + // Create a UTXO operation. + utxoOpForTxn := &UtxoOperation{ Type: OperationTypeUnregisterAsValidator, PrevValidatorEntry: prevValidatorEntry, PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, PrevStakeEntries: prevStakeEntries, PrevLockedStakeEntries: prevLockedStakeEntries, - }) + } + if err = bav.SanityCheckUnregisterAsValidatorTxn(transactorPKIDEntry.PKID, utxoOpForTxn, totalUnstakedAmountNanos); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpForTxn) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1443,6 +1423,10 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( ) } +// FIXME: Currently, unjail does not re-add a validator's stake back to the GlobalStakeAmountNanos. +// When we flesh out the logic for jail/unjail, we will want to make it so that the process that +// jails a validator *removes* their stake from GlobalStakeAmountNanos, and the process that unjails, +// i.e. this function, *re-adds* their stake back to GlobalStakeAmountNanos. func (bav *UtxoView) _connectUnjailValidator( txn *MsgDeSoTxn, txHash *BlockHash, @@ -1628,19 +1612,11 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKey, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } - // Validate VotingSignatureBlockHeight. - if metadata.VotingSignatureBlockHeight < blockHeight || - metadata.VotingSignatureBlockHeight > blockHeight+bav.Params.ValidatorVotingSignatureBlockHeightWindow { - return errors.Wrapf(RuleErrorValidatorInvalidVotingSignatureBlockHeight, "UtxoView.IsValidRegisterAsValidatorMetadata: ") - } - // Validate VotingPublicKeySignature. if metadata.VotingPublicKeySignature == nil { return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKeySignature, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } - votingSignaturePayload := CreateValidatorVotingSignaturePayload( - transactorPublicKey, metadata.VotingPublicKey, metadata.VotingSignatureBlockHeight, - ) + votingSignaturePayload := CreateValidatorVotingSignaturePayload(transactorPublicKey) isValidBLSSignature, err := metadata.VotingPublicKey.Verify(metadata.VotingPublicKeySignature, votingSignaturePayload) if err != nil { return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error verifying VotingPublicKeySignature: ") @@ -1659,19 +1635,14 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( !validatorEntry.DisableDelegatedStake && // Existing ValidatorEntry.DisableDelegatedStake = false metadata.DisableDelegatedStake { // Updating DisableDelegatedStake = true - // Retrieve existing StakeEntries. - prevStakeEntries, err := bav.GetStakeEntriesForValidatorPKID(transactorPKIDEntry.PKID) + hasDelegatedStake, err := bav.ValidatorHasDelegatedStake(transactorPKIDEntry.PKID) if err != nil { - return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error retrieving existing StakeEntries: ") + return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error checking for existing delegated StakeEntries: ") } - - // Error if any belong to someone other than the validator. - for _, stakeEntry := range prevStakeEntries { - if !stakeEntry.StakerPKID.Eq(transactorPKIDEntry.PKID) { - return errors.Wrapf( - RuleErrorValidatorDisablingExistingDelegatedStakers, "UtxoView.IsValidRegisterAsValidatorMetadata: ", - ) - } + if hasDelegatedStake { + return errors.Wrapf( + RuleErrorValidatorDisablingExistingDelegatedStakers, "UtxoView.IsValidRegisterAsValidatorMetadata: ", + ) } } @@ -1732,40 +1703,87 @@ func (bav *UtxoView) IsValidUnjailValidatorMetadata(transactorPublicKey []byte) return nil } -func (bav *UtxoView) GetValidatorByPKID(pkid *PKID) (*ValidatorEntry, error) { - // First check the UtxoView. +func (bav *UtxoView) SanityCheckUnregisterAsValidatorTxn( + transactorPKID *PKID, + utxoOp *UtxoOperation, + amountNanos *uint256.Int, +) error { + if utxoOp.Type != OperationTypeUnregisterAsValidator { + return fmt.Errorf("SanityCheckUnregisterAsValidatorTxn: called with %v", utxoOp.Type) + } - // There can be multiple ValidatorEntries for a given PKID in the UtxoView since the ValidatorMapKey - // contains ValidatorPKID, TotalStakeAmountNanos, and RegisteredAtBlockHeight. We need to loop through - // all the ValidatorEntries and find the one matching the given PKID that is !isDeleted. There should - // ever only be zero or one such matching ValidatorEntries. If the only matching ValidatorEntries are - // all isDeleted then we shouldn't check the database as the corresponding rows in the database will - // be deleted once the UtxoView is flushed. - isDeleted := false - - for _, validatorEntry := range bav.ValidatorMapKeyToValidatorEntry { - if validatorEntry == nil { - // This should never happen but is a sanity check. - continue - } - if !validatorEntry.ValidatorPKID.Eq(pkid) { - continue + // Sanity check the deleted ValidatorEntry. + if utxoOp.PrevValidatorEntry == nil { + return errors.New("SanityCheckUnregisterAsValidatorTxn: nil PrevValidatorEntry provided") + } + if !utxoOp.PrevValidatorEntry.ValidatorPKID.Eq(transactorPKID) { + return errors.New("SanityCheckUnregisterAsValidatorTxn: ValidatorPKID doesn't match TransactorPKID") + } + if !utxoOp.PrevValidatorEntry.TotalStakeAmountNanos.Eq(amountNanos) { + return errors.New("SanityCheckUnregisterAsValidatorTxn: TotalStakeAmountNanos doesn't match") + } + currentValidatorEntry, err := bav.GetValidatorByPKID(utxoOp.PrevValidatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving ValidatorEntry: ") + } + if currentValidatorEntry != nil { + return errors.New("SanityCheckUnregisterAsValidatorTxn: ValidatorEntry was not deleted") + } + + // Sanity check that there are no existing StakeEntries for the validator. + stakeEntries, err := bav.GetStakeEntriesForValidatorPKID(utxoOp.PrevValidatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving StakeEntries: ") + } + if len(stakeEntries) != 0 { + return errors.New("SanityCheckUnregisterAsValidatorTxn: StakeEntries for ValidatorEntry still exist") + } + + // Sanity check the deleted StakeEntries. + totalUnstakedAmountNanos := uint256.NewInt() + for _, stakeEntry := range utxoOp.PrevStakeEntries { + totalUnstakedAmountNanos, err = SafeUint256().Add(totalUnstakedAmountNanos, stakeEntry.StakeAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error calculating TotalUnstakedAmountNanos: ") } + } + if !totalUnstakedAmountNanos.Eq(amountNanos) { + return errors.New("SanityCheckUnregisterAsValidatorTxn: TotalUnstakedAmountNanos doesn't match") + } + + // Sanity check that the GlobalStakeAmountNanos was decreased by amountNanos. + if utxoOp.PrevGlobalStakeAmountNanos == nil { + return errors.New("SanityCheckUnregisterAsValidatorTxn: nil PrevGlobalStakeAmountNanos provided") + } + currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving GlobalStakeAmountNanos: ") + } + globalStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalStakeAmountNanos, currentGlobalStakeAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error calculating GlobalStakeAmountNanos decrease: ") + } + if !globalStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnregisterAsValidatorTxn: GlobalStakeAmountNanos decrease doesn't match") + } + return nil +} + +func (bav *UtxoView) GetValidatorByPKID(pkid *PKID) (*ValidatorEntry, error) { + // First check the UtxoView. + validatorEntry, exists := bav.ValidatorPKIDToValidatorEntry[*pkid] + if exists { if validatorEntry.isDeleted { - isDeleted = true - continue + // If we get to this point, we found a ValidatorEntry for the given PKID + // but it was marked as isDeleted. In this case, we do not want to check + // the database but instead just return nil, no ValidatorEntry found. + return nil, nil } // If we get to this point, we found a matching // !isDeleted ValidatorEntry for the given PKID. return validatorEntry, nil } - - if isDeleted { - // If we get to this point, we found one or more matching ValidatorEntries - // for the given PKID, but they were all isDeleted. We do not want to check - // the database but instead just return nil, no ValidatorEntry found. - return nil, nil - } + // At this point, we know there was no matching ValidatorEntry in the view. // If no ValidatorEntry (either isDeleted or !isDeleted) was found // in the UtxoView for the given PKID, check the database. @@ -1807,10 +1825,12 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry // then sort the UtxoViewValidatorEntries and DatabaseValidatorEntries together to find the top N // ValidatorEntries by stake across both the UtxoView and database. var utxoViewValidatorEntries []*ValidatorEntry - for _, validatorEntry := range bav.ValidatorMapKeyToValidatorEntry { + for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { utxoViewValidatorEntries = append(utxoViewValidatorEntries, validatorEntry) } // Pull top N active ValidatorEntries from the database (not present in the UtxoView). + // Note that we will skip validators that are present in the view because we pass + // utxoViewValidatorEntries to the function. dbValidatorEntries, err := DBGetTopActiveValidatorsByStake(bav.Handle, bav.Snapshot, limit, utxoViewValidatorEntries) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GetTopActiveValidatorsByStake: error retrieving entries from db: ") @@ -1820,13 +1840,13 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry // We only pull ValidatorEntries from the db that are not present in the // UtxoView. As a sanity check, we double-check that the ValidatorEntry // is not already in the UtxoView here. - if _, exists := bav.ValidatorMapKeyToValidatorEntry[validatorEntry.ToMapKey()]; !exists { + if _, exists := bav.ValidatorPKIDToValidatorEntry[*validatorEntry.ValidatorPKID]; !exists { bav._setValidatorEntryMappings(validatorEntry) } } // Pull !isDeleted, active ValidatorEntries from the UtxoView. var validatorEntries []*ValidatorEntry - for _, validatorEntry := range bav.ValidatorMapKeyToValidatorEntry { + for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { if !validatorEntry.isDeleted && validatorEntry.Status() == ValidatorStatusActive { validatorEntries = append(validatorEntries, validatorEntry) } @@ -1841,25 +1861,20 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry } func (bav *UtxoView) GetGlobalStakeAmountNanos() (*uint256.Int, error) { - var globalStakeAmountNanos *uint256.Int - var err error // Read the GlobalStakeAmountNanos from the UtxoView. if bav.GlobalStakeAmountNanos != nil { - globalStakeAmountNanos = bav.GlobalStakeAmountNanos.Clone() + return bav.GlobalStakeAmountNanos.Clone(), nil } // If not set, read the GlobalStakeAmountNanos from the db. - // TODO: Confirm if the GlobalStakeAmountNanos.IsZero() that we should look in the db. - if globalStakeAmountNanos == nil || globalStakeAmountNanos.IsZero() { - globalStakeAmountNanos, err = DBGetGlobalStakeAmountNanos(bav.Handle, bav.Snapshot) - if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetGlobalStakeAmountNanos: ") - } - if globalStakeAmountNanos == nil { - globalStakeAmountNanos = uint256.NewInt() - } - // Cache the GlobaleStakeAmountNanos from the db in the UtxoView. - bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) + globalStakeAmountNanos, err := DBGetGlobalStakeAmountNanos(bav.Handle, bav.Snapshot) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetGlobalStakeAmountNanos: ") } + if globalStakeAmountNanos == nil { + globalStakeAmountNanos = uint256.NewInt() + } + // Cache the GlobalStakeAmountNanos from the db in the UtxoView. + bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) return globalStakeAmountNanos, nil } @@ -1869,7 +1884,7 @@ func (bav *UtxoView) _setValidatorEntryMappings(validatorEntry *ValidatorEntry) glog.Errorf("_setValidatorEntryMappings: called with nil entry, this should never happen") return } - bav.ValidatorMapKeyToValidatorEntry[validatorEntry.ToMapKey()] = validatorEntry + bav.ValidatorPKIDToValidatorEntry[*validatorEntry.ValidatorPKID] = validatorEntry } func (bav *UtxoView) _deleteValidatorEntryMappings(validatorEntry *ValidatorEntry) { @@ -1896,14 +1911,14 @@ func (bav *UtxoView) _setGlobalStakeAmountNanos(globalStakeAmountNanos *uint256. func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { // Delete all entries in the ValidatorMapKeyToValidatorEntry UtxoView map. - for validatorMapKeyIter, validatorEntryIter := range bav.ValidatorMapKeyToValidatorEntry { + for validatorMapKeyIter, validatorEntryIter := range bav.ValidatorPKIDToValidatorEntry { // Make a copy of the iterators since we make references to them below. validatorMapKey := validatorMapKeyIter validatorEntry := *validatorEntryIter // Sanity-check that the entry matches the map key. - validatorMapKeyInEntry := validatorEntry.ToMapKey() - if validatorMapKeyInEntry != validatorMapKey { + validatorMapKeyInEntry := *validatorEntry.ValidatorPKID + if !validatorMapKeyInEntry.Eq(&validatorMapKey) { return fmt.Errorf( "_flushValidatorEntriesToDbWithTxn: ValidatorEnry key %v doesn't match MapKey %v", &validatorMapKeyInEntry, @@ -1913,13 +1928,13 @@ func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHei // Delete the existing mappings in the db for this ValidatorMapKey. They // will be re-added if the corresponding entry in memory has isDeleted=false. - if err := DBDeleteValidatorWithTxn(txn, bav.Snapshot, &validatorEntry); err != nil { + if err := DBDeleteValidatorWithTxn(txn, bav.Snapshot, &validatorMapKey); err != nil { return errors.Wrapf(err, "_flushValidatorEntriesToDbWithTxn: ") } } // Set any !isDeleted ValidatorEntries in the ValidatorMapKeyToValidatorEntry UtxoView map. - for _, validatorEntryIter := range bav.ValidatorMapKeyToValidatorEntry { + for _, validatorEntryIter := range bav.ValidatorPKIDToValidatorEntry { validatorEntry := *validatorEntryIter if validatorEntry.isDeleted { // If ValidatorEntry.isDeleted then there's nothing to @@ -1975,7 +1990,6 @@ func (bav *UtxoView) CreateRegisterAsValidatorTxindexMetadata( DisableDelegatedStake: metadata.DisableDelegatedStake, VotingPublicKey: metadata.VotingPublicKey.ToString(), VotingPublicKeySignature: metadata.VotingPublicKeySignature.ToString(), - VotingSignatureBlockHeight: metadata.VotingSignatureBlockHeight, } // Construct AffectedPublicKeys. @@ -2092,23 +2106,47 @@ func DecodeBLSSignature(rr io.Reader) (*bls.Signature, error) { return (&bls.Signature{}).FromBytes(signatureBytes) } +// When registering as a validator, there are two keys that are involved: +// +// - transactorPublicKey: This is the key that is used to sign transactions on the +// network. +// +// - votingPublicKey: This is the key that is used as a part of consensus to sign +// vote and timeout messages. It is distinct from the transactorPublicKey because +// it is a BLS key rather than a standard ECDSA key, which means we can *aggregate* +// signatures generated by these keys, which is needed for our Fast-HotStuff +// consensus to be efficient. It is also useful from an operational standpoint +// to separate the key used to perform transactions on the network from the key +// used to vote on blocks (the former can remail "cold" while the latter needs +// to remain "hot"). +// +// Given that there are two keys involved, the validation of a RegisterAsValidator +// transaction needs to check a few things: +// +// 1. That the user owns the transactorPublicKey. This is proven by checking the signature +// at the transaction level. +// +// 2. That the user owns the votingPublicKey. This is proven by checking a *second* +// signature embedded in the RegisterAsValidatorMetadata, which we define below. +// +// To prove #2, it is sufficient to have the votingPublicKey sign the +// transactorPublicKey. Doing this makes it so that the signature can only ever be +// used to associate this specific votingPublicKey with this specific transactorPublicKey. +// In addition, the fact that the transactorPublicKey is required to sign the +// *entire transaction payload*, including this votingPublicKey signature, +// ensures that no *other* transactor can ever associate +// this votingPublicKey with another key. Finally, replay attacks are prevented by +// the fact that the transaction includes a nonce in its payload, signed by the +// transactorPublicKey, that only allows the transaction as a whole to be run once. +// This means that nobody can construct a transaction to re-register this validator +// without constructing a new transaction with a fresh nonce, thus requiring a new +// signature from the same transactorPublicKey, which they wouldn't have access to. func CreateValidatorVotingSignaturePayload( transactorPublicKeyBytes []byte, - votingPublicKey *bls.PublicKey, - votingSignatureBlockHeight uint64, ) []byte { - var data []byte // HASH(TransactorPublicKey) hashedTransactorPublicKey := sha256.Sum256(transactorPublicKeyBytes) - data = append(data, hashedTransactorPublicKey[:]...) - // HASH(VotingPublicKey) - hashedVotingPublicKey := sha256.Sum256(votingPublicKey.ToBytes()) - data = append(data, hashedVotingPublicKey[:]...) - // VotingSignatureBlockHeight - data = append(data, UintToBuf(votingSignatureBlockHeight)...) - // HASH( HASH(TransactorPublicKey) || HASH(VotingPublicKey) || VotingSignatureBlockHeight ) - hashedData := sha256.Sum256(data) - return hashedData[:] + return hashedTransactorPublicKey[:] } // @@ -2125,9 +2163,8 @@ const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" const RuleErrorValidatorMissingVotingPublicKey RuleError = "RuleErrorValidatorMissingVotingPublicKey" const RuleErrorValidatorMissingVotingPublicKeySignature RuleError = "RuleErrorValidatorMissingVotingPublicKeySignature" const RuleErrorValidatorInvalidVotingPublicKeySignature RuleError = "RuleErrorValidatorInvalidVotingPublicKeySignature" -const RuleErrorValidatorInvalidVotingSignatureBlockHeight RuleError = "RuleErrorValidatorInvalidVotingSignatureBlockHeight" const RuleErrorValidatorDisablingExistingDelegatedStakers RuleError = "RuleErrorValidatorDisablingExistingDelegatedStakers" const RuleErrorUnjailingNonjailedValidator RuleError = "RuleErrorUnjailingNonjailedValidator" const RuleErrorUnjailingValidatorTooEarly RuleError = "RuleErrorUnjailingValidatorTooEarly" -const MaxValidatorNumDomains int = 12 +const MaxValidatorNumDomains int = 100 diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 6cfe055a3..96126d100 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -5,12 +5,13 @@ package lib import ( "errors" "fmt" + "math" + "testing" + "github.com/btcsuite/btcd/btcec" "github.com/deso-protocol/core/bls" "github.com/holiman/uint256" "github.com/stretchr/testify/require" - "math" - "testing" ) func TestValidatorRegistration(t *testing.T) { @@ -93,11 +94,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) @@ -162,57 +162,12 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingPublicKey) } - { - // RuleErrorValidatorInvalidVotingSignatureBlockHeight: missing - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) - registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - } - _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) - require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignatureBlockHeight) - } - { - // RuleErrorValidatorInvalidVotingSignatureBlockHeight: too low - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) - registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight - 1, - } - _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) - require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignatureBlockHeight) - } - { - // RuleErrorValidatorInvalidVotingSignatureBlockHeight: too high - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) - registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight + params.ValidatorVotingSignatureBlockHeightWindow + 1, - } - _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) - require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignatureBlockHeight) - } { // RuleErrorValidatorMissingVotingPublicKeySignature votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, @@ -224,10 +179,9 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid TransactorPkBytes votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, @@ -240,25 +194,9 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) _, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, - } - _, err = _submitRegisterAsValidatorTxn( - testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, - ) - require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingPublicKeySignature) - } - { - // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid VotingSignatureBlockHeight - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) - registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight + 1, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, @@ -270,11 +208,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // Happy path: register a validator votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -311,11 +248,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // Happy path: update a validator votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -677,10 +613,9 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { // Perform a RegisterAsValidator txn. No error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } err = _submitValidatorTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, @@ -744,10 +679,9 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { // Perform a RegisterAsValidator txn. Error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } err = _submitValidatorTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, @@ -845,10 +779,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -864,10 +797,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // m1 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m1.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -881,10 +813,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // m2 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m2PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m2.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m2.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m2Pub, m2Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1058,11 +989,10 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m0's ValidatorEntry in the db with TotalStake = 100 nanos. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) validatorEntry := &ValidatorEntry{ - ValidatorPKID: m0PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + ValidatorPKID: m0PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } utxoView._setValidatorEntryMappings(validatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1074,17 +1004,16 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) // Verify m0 is not stored in the UtxoView. - require.Empty(t, utxoView.ValidatorMapKeyToValidatorEntry) + require.Empty(t, utxoView.ValidatorPKIDToValidatorEntry) // Store m1's jailed ValidatorEntry in the db with TotalStake = 400 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) validatorEntry = &ValidatorEntry{ - ValidatorPKID: m1PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, - JailedAtEpochNumber: 1, + ValidatorPKID: m1PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(validatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1099,11 +1028,10 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m2's ValidatorEntry in the db with TotalStake = 300 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m2PkBytes, blockHeight) m2ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m2PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + ValidatorPKID: m2PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } utxoView._setValidatorEntryMappings(m2ValidatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1117,11 +1045,10 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m3's ValidatorEntry in the db with TotalStake = 600 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m3PkBytes, blockHeight) m3ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m3PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + ValidatorPKID: m3PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } utxoView._setValidatorEntryMappings(m3ValidatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1138,11 +1065,11 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.NotNil(t, validatorEntry) // Verify m2 is also stored in the UtxoView. - require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 1) - require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m2ValidatorEntry.ToMapKey()].ValidatorPKID, m2PKID) + require.Len(t, utxoView.ValidatorPKIDToValidatorEntry, 1) + require.Equal(t, utxoView.ValidatorPKIDToValidatorEntry[*m2ValidatorEntry.ValidatorPKID].ValidatorPKID, m2PKID) require.Equal( t, - utxoView.ValidatorMapKeyToValidatorEntry[m2ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, + utxoView.ValidatorPKIDToValidatorEntry[*m2ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300), ) @@ -1150,17 +1077,16 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { utxoView._deleteValidatorEntryMappings(m3ValidatorEntry) // Verify m3 is stored in the UtxoView with isDeleted=true. - require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m3ValidatorEntry.ToMapKey()].ValidatorPKID, m3PKID) - require.True(t, utxoView.ValidatorMapKeyToValidatorEntry[m3ValidatorEntry.ToMapKey()].isDeleted) + require.Equal(t, utxoView.ValidatorPKIDToValidatorEntry[*m3ValidatorEntry.ValidatorPKID].ValidatorPKID, m3PKID) + require.True(t, utxoView.ValidatorPKIDToValidatorEntry[*m3ValidatorEntry.ValidatorPKID].isDeleted) // Store m4's ValidatorEntry in the UtxoView with TotalStake = 50 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m4PkBytes, blockHeight) m4ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m4PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + ValidatorPKID: m4PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } utxoView._setValidatorEntryMappings(m4ValidatorEntry) @@ -1170,23 +1096,22 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Nil(t, validatorEntry) // Verify m4 is stored in the UtxoView. - require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 3) - require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m4ValidatorEntry.ToMapKey()].ValidatorPKID, m4PKID) + require.Len(t, utxoView.ValidatorPKIDToValidatorEntry, 3) + require.Equal(t, utxoView.ValidatorPKIDToValidatorEntry[*m4ValidatorEntry.ValidatorPKID].ValidatorPKID, m4PKID) require.Equal( t, - utxoView.ValidatorMapKeyToValidatorEntry[m4ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, + utxoView.ValidatorPKIDToValidatorEntry[*m4ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50), ) // Store m5's jailed ValidatorEntry in the UtxoView with TotalStake = 500 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m5PkBytes, blockHeight) m5ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m5PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, - JailedAtEpochNumber: 1, + ValidatorPKID: m5PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(m5ValidatorEntry) @@ -1196,15 +1121,15 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Nil(t, validatorEntry) // Verify m5 is stored in the UtxoView. - require.Len(t, utxoView.ValidatorMapKeyToValidatorEntry, 4) - require.Equal(t, utxoView.ValidatorMapKeyToValidatorEntry[m5ValidatorEntry.ToMapKey()].ValidatorPKID, m5PKID) + require.Len(t, utxoView.ValidatorPKIDToValidatorEntry, 4) + require.Equal(t, utxoView.ValidatorPKIDToValidatorEntry[*m5ValidatorEntry.ValidatorPKID].ValidatorPKID, m5PKID) require.Equal( t, - utxoView.ValidatorMapKeyToValidatorEntry[m5ValidatorEntry.ToMapKey()].TotalStakeAmountNanos, + utxoView.ValidatorPKIDToValidatorEntry[*m5ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, uint256.NewInt().SetUint64(500), ) require.Equal( - t, utxoView.ValidatorMapKeyToValidatorEntry[m5ValidatorEntry.ToMapKey()].Status(), ValidatorStatusJailed, + t, utxoView.ValidatorPKIDToValidatorEntry[*m5ValidatorEntry.ValidatorPKID].Status(), ValidatorStatusJailed, ) // Fetch TopActiveValidatorsByStake merging ValidatorEntries from the db and UtxoView. @@ -1298,11 +1223,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 registers as a validator with DisableDelegatedStake = FALSE. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1320,11 +1244,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 updates DisableDelegatedStake = TRUE. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: true, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: true, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1366,11 +1289,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 updates DisableDelegatedStake = FALSE. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1399,11 +1321,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 tries to update DisableDelegateStake = TRUE. Errors. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: true, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: true, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) @@ -1500,10 +1421,9 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://m0.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1707,10 +1627,9 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -1733,8 +1652,8 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // trying to unjail himself, but he was never jailed. // Delete m0's ValidatorEntry from the UtxoView. - delete(mempool.universalUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) - delete(mempool.readOnlyUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) + delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) // Set JailedAtEpochNumber. validatorEntry.JailedAtEpochNumber = currentEpochNumber @@ -2013,10 +1932,9 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // sender registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - VotingSignatureBlockHeight: blockHeight, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, senderPkString, senderPrivString, registerMetadata, nil, true) require.NoError(t, err) @@ -2031,8 +1949,8 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // trying to unjail himself, but he was never jailed. // Delete sender's ValidatorEntry from the UtxoView. - delete(mempool.universalUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) - delete(mempool.readOnlyUtxoView.ValidatorMapKeyToValidatorEntry, validatorEntry.ToMapKey()) + delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) // Set JailedAtEpochNumber. validatorEntry.JailedAtEpochNumber = currentEpochNumber @@ -2161,7 +2079,7 @@ func _generateVotingPublicKeyAndSignature(t *testing.T, transactorPkBytes []byte blsPrivateKey, err := bls.NewPrivateKey() require.NoError(t, err) votingPublicKey := blsPrivateKey.PublicKey() - signaturePayload := CreateValidatorVotingSignaturePayload(transactorPkBytes, votingPublicKey, blockHeight) + signaturePayload := CreateValidatorVotingSignaturePayload(transactorPkBytes) votingSignature, err := blsPrivateKey.Sign(signaturePayload) require.NoError(t, err) return votingPublicKey, votingSignature diff --git a/lib/constants.go b/lib/constants.go index 4530999cd..3ebccda33 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -3,7 +3,6 @@ package lib import ( "encoding/hex" "fmt" - "github.com/pkg/errors" "log" "math" "math/big" @@ -14,6 +13,8 @@ import ( "sort" "time" + "github.com/pkg/errors" + "github.com/holiman/uint256" "github.com/btcsuite/btcd/chaincfg" @@ -606,12 +607,6 @@ type DeSoParams struct { // TODO: Move this to GlobalParamsEntry. ValidatorJailEpochDuration uint64 - // When registering, a validator must include a VotingSignatureBlockHeight within CurrentBlockHeight - // to CurrentBlockHeight + ValidatorVotingSignatureBlockHeightWindow blocks. This is to prevent - // validator registration replay attacks. - // TODO: Move this to GlobalParamsEntry. - ValidatorVotingSignatureBlockHeightWindow uint64 - ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -990,11 +985,6 @@ var DeSoMainnetParams = DeSoParams{ // Jailed validators can be unjailed after a minimum of N elapsed epochs. ValidatorJailEpochDuration: uint64(3), - // When registering, a validator must include a VotingSignatureBlockHeight within CurrentBlockHeight - // to CurrentBlockHeight + ValidatorVotingSignatureBlockHeightWindow blocks. This is to prevent - // validator registration replay attacks. - ValidatorVotingSignatureBlockHeightWindow: uint64(300), - ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1229,11 +1219,6 @@ var DeSoTestnetParams = DeSoParams{ // Jailed validators can be unjailed after a minimum of N elapsed epochs. ValidatorJailEpochDuration: uint64(3), - // When registering, a validator must include a VotingSignatureBlockHeight within CurrentBlockHeight - // to CurrentBlockHeight + ValidatorVotingSignatureBlockHeightWindow blocks. This is to prevent - // validator registration replay attacks. - ValidatorVotingSignatureBlockHeightWindow: uint64(300), - ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/db_utils.go b/lib/db_utils.go index cb3892d59..2272501ca 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -7,7 +7,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/holiman/uint256" "io" "log" "math" @@ -18,6 +17,8 @@ import ( "strings" "time" + "github.com/holiman/uint256" + "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" "github.com/dgraph-io/badger/v3" @@ -481,26 +482,44 @@ type DBPrefixes struct { PrefixValidatorByPKID []byte `prefix_id:"[78]" is_state:"true"` // PrefixValidatorByStake: Retrieve the top N validators by stake. - // Prefix, TotalStakeAmountNanos, MaxUint64 - RegisteredAtBlockHeight, ValidatorPKID -> ValidatorPKID - // FIXME: @DH, should we duplicate the ValidatorPKID in the key and the value? - // Alternatively, we could just store and parse the ValidatorPKID from the key - // and store a struct{} as the value. That saves on space, but makes retrieving - // the ValidatorPKID from the key bytes more complex than just reading the value - // bytes directly since the key includes other preceding fields. Interesting - // trade-off. Curious your opinion. + // Prefix, TotalStakeAmountNanos, ValidatorPKID -> nil + // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. PrefixValidatorByStake []byte `prefix_id:"[79]" is_state:"true"` // PrefixGlobalStakeAmountNanos: Retrieve the cumulative stake across all validators. // Prefix -> *uint256.Int PrefixGlobalStakeAmountNanos []byte `prefix_id:"[80]" is_state:"true"` - // PrefixStakeByValidatorByStaker: Retrieve a StakeEntry. + // PrefixStakeByValidatorAndStaker: Retrieve a StakeEntry. // Prefix, ValidatorPKID, StakerPKID -> StakeEntry - PrefixStakeByValidatorByStaker []byte `prefix_id:"[81]" is_state:"true"` + PrefixStakeByValidatorAndStaker []byte `prefix_id:"[81]" is_state:"true"` - // PrefixLockedStakeByValidatorByStakerByLockedAt: Retrieve a LockedStakeEntry. + // PrefixLockedStakeByValidatorAndStakerAndLockedAt: Retrieve a LockedStakeEntry. // Prefix, ValidatorPKID, StakerPKID, LockedAtEpochNumber -> LockedStakeEntry - PrefixLockedStakeByValidatorByStakerByLockedAt []byte `prefix_id:"[82]" is_state:"true"` + // + // The way staking works is that staking to a validator is instant and creates a StakeEntry + // immediately, but UNstaking from a validator has a "cooldown" period before the funds + // are returned to the user. This cooldown period is implemented in Unstake by decrementing + // from the StakeEntry and creating a new LockedStakeEntry with the amount being unstaked. + // the LockedStakeEntry has a LockedAtEpochNumber indicating when the Unstake occurred. This + // allows the user to then call a *second* Unlock txn to pull the LockedStake into their + // wallet balance after enough epochs have passed since LockedAtEpochNumber. + // + // Below is an example: + // - User stakes 100 DESO to a validator. A StakeEntry is created containing 100 DESO. + // - User unstakes 25 DESO at epoch 123. The StakeEntry is decremented to 75 DESO and a + // LockedStakeEntry is created containing: + // * + // - Suppose the cooldown period is 3 epochs. If the user tries to call UnlockStake at + // epoch 124, for example, which is one epoch after they called Unstake, the call will + // fail because (CurrentEpoch - LockedAtEpockNumber) = 124 - 123 = 1, which is less + // than cooldown = 3. + // - After 3 epochs have passed, however, the UnlockStake transaction will work. For + // example, suppose the user calls UnlockStake at spoch 133. Now, we have + // (CurrentEpoch - LockedAtEpochNumber) = 133 - 123 = 10, which is greater than + // cooldown=3. Thus the UnlockStake will succeed, which will result in the + // LockedStakeEntry being deleted and 25 DESO being added to the user's balance. + PrefixLockedStakeByValidatorAndStakerAndLockedAt []byte `prefix_id:"[82]" is_state:"true"` // PrefixCurrentEpoch: Retrieve the current EpochEntry. // Prefix -> EpochEntry @@ -713,14 +732,14 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco return true, &ValidatorEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStake) { // prefix_id:"[79]" - return true, &PKID{} + return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixGlobalStakeAmountNanos) { // prefix_id:"[80]" return false, nil - } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorByStaker) { + } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorAndStaker) { // prefix_id:"[81]" return true, &StakeEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixLockedStakeByValidatorByStakerByLockedAt) { + } else if bytes.Equal(prefix, Prefixes.PrefixLockedStakeByValidatorAndStakerAndLockedAt) { // prefix_id:"[82]" return true, &LockedStakeEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixCurrentEpoch) { @@ -5873,7 +5892,7 @@ func (txnMeta *DAOCoinTransferTxindexMetadata) RawEncodeWithoutMetadata(blockHei var data []byte data = append(data, EncodeByteArray([]byte(txnMeta.CreatorUsername))...) - data = append(data, EncodeUint256(&txnMeta.DAOCoinToTransferNanos)...) + data = append(data, VariableEncodeUint256(&txnMeta.DAOCoinToTransferNanos)...) return data } @@ -5886,7 +5905,7 @@ func (txnMeta *DAOCoinTransferTxindexMetadata) RawDecodeWithoutMetadata(blockHei } txnMeta.CreatorUsername = string(creatorUsernameBytes) - DAOCoinToTransferNanos, err := DecodeUint256(rr) + DAOCoinToTransferNanos, err := VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "DAOCoinTransferTxindexMetadata.Decode: Problem reading DAOCoinToTransferNanos") } @@ -5916,8 +5935,8 @@ func (txnMeta *DAOCoinTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint data = append(data, EncodeByteArray([]byte(txnMeta.CreatorUsername))...) data = append(data, EncodeByteArray([]byte(txnMeta.OperationType))...) - data = append(data, EncodeUint256(txnMeta.CoinsToMintNanos)...) - data = append(data, EncodeUint256(txnMeta.CoinsToBurnNanos)...) + data = append(data, VariableEncodeUint256(txnMeta.CoinsToMintNanos)...) + data = append(data, VariableEncodeUint256(txnMeta.CoinsToBurnNanos)...) data = append(data, EncodeByteArray([]byte(txnMeta.TransferRestrictionStatus))...) return data @@ -5938,12 +5957,12 @@ func (txnMeta *DAOCoinTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint } txnMeta.OperationType = string(operationTypeBytes) - txnMeta.CoinsToMintNanos, err = DecodeUint256(rr) + txnMeta.CoinsToMintNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "DAOCoinTxindexMetadata.Decode: problem reading CoinsToMintNanos") } - txnMeta.CoinsToBurnNanos, err = DecodeUint256(rr) + txnMeta.CoinsToBurnNanos, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "DAOCoinTxindexMetadata.Decode: problem reading CoinsToBurnNanos") } @@ -5980,8 +5999,8 @@ func (orderMeta *FilledDAOCoinLimitOrderMetadata) RawEncodeWithoutMetadata(block data = append(data, EncodeByteArray([]byte(orderMeta.TransactorPublicKeyBase58Check))...) data = append(data, EncodeByteArray([]byte(orderMeta.BuyingDAOCoinCreatorPublicKey))...) data = append(data, EncodeByteArray([]byte(orderMeta.SellingDAOCoinCreatorPublicKey))...) - data = append(data, EncodeUint256(orderMeta.CoinQuantityInBaseUnitsBought)...) - data = append(data, EncodeUint256(orderMeta.CoinQuantityInBaseUnitsSold)...) + data = append(data, VariableEncodeUint256(orderMeta.CoinQuantityInBaseUnitsBought)...) + data = append(data, VariableEncodeUint256(orderMeta.CoinQuantityInBaseUnitsSold)...) data = append(data, BoolToByte(orderMeta.IsFulfilled)) return data @@ -6011,13 +6030,13 @@ func (orderMeta *FilledDAOCoinLimitOrderMetadata) RawDecodeWithoutMetadata(block orderMeta.SellingDAOCoinCreatorPublicKey = string(sellingDAOCoinCreatorPublicKey) // CoinQuantityInBaseUnitsBought - orderMeta.CoinQuantityInBaseUnitsBought, err = DecodeUint256(rr) + orderMeta.CoinQuantityInBaseUnitsBought, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "FilledDAOCoinLimitOrderMetadata.Decode: Problem reading CoinQuantityInBaseUnitsBought") } // CoinQuantityInBaseUnitsSold - orderMeta.CoinQuantityInBaseUnitsSold, err = DecodeUint256(rr) + orderMeta.CoinQuantityInBaseUnitsSold, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "FilledDAOCoinLimitOrderMetadata.Decode: Problem reading CoinQuantityInBaseUnitsSold") } @@ -6050,8 +6069,8 @@ func (daoMeta *DAOCoinLimitOrderTxindexMetadata) RawEncodeWithoutMetadata(blockH data = append(data, EncodeByteArray([]byte(daoMeta.BuyingDAOCoinCreatorPublicKey))...) data = append(data, EncodeByteArray([]byte(daoMeta.SellingDAOCoinCreatorPublicKey))...) - data = append(data, EncodeUint256(daoMeta.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) - data = append(data, EncodeUint256(daoMeta.QuantityToFillInBaseUnits)...) + data = append(data, VariableEncodeUint256(daoMeta.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) + data = append(data, VariableEncodeUint256(daoMeta.QuantityToFillInBaseUnits)...) data = append(data, UintToBuf(uint64(len(daoMeta.FilledDAOCoinLimitOrdersMetadata)))...) for _, order := range daoMeta.FilledDAOCoinLimitOrdersMetadata { @@ -6074,12 +6093,12 @@ func (daoMeta *DAOCoinLimitOrderTxindexMetadata) RawDecodeWithoutMetadata(blockH } daoMeta.SellingDAOCoinCreatorPublicKey = string(sellingDAOCoinCreatorPublicKey) - daoMeta.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = DecodeUint256(rr) + daoMeta.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "DAOCoinLimitOrderTxindexMetadata.Decode: Problem reading ScaledExchangeRateCoinsToSellPerCoinToBuy") } - daoMeta.QuantityToFillInBaseUnits, err = DecodeUint256(rr) + daoMeta.QuantityToFillInBaseUnits, err = VariableDecodeUint256(rr) if err != nil { return errors.Wrapf(err, "DAOCoinLimitOrderTxindexMetadata.Decode: Problem reading QuantityToFillInBaseUnits") } @@ -9323,7 +9342,7 @@ func DBGetPaginatedProfilesByDeSoLocked( func DBKeyForDAOCoinLimitOrder(order *DAOCoinLimitOrderEntry) []byte { key := DBPrefixKeyForDAOCoinLimitOrder(order) - key = append(key, EncodeUint256(order.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) + key = append(key, VariableEncodeUint256(order.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) // Store MaxUint32 - block height to guarantee FIFO // orders as we seek in reverse order. key = append(key, _EncodeUint32(math.MaxUint32-order.BlockHeight)...) diff --git a/lib/network.go b/lib/network.go index b740c0faa..0ce83d3e4 100644 --- a/lib/network.go +++ b/lib/network.go @@ -8,7 +8,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/decred/dcrd/dcrec/secp256k1/v4" "io" "math" "math/big" @@ -18,6 +17,8 @@ import ( "strings" "time" + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" @@ -5967,12 +5968,13 @@ func (tsl *TransactionSpendingLimit) ToBytes(blockHeight uint64) ([]byte, error) for key := range tsl.StakeLimitMap { keys = append(keys, key) } + // Sort the keys to ensure deterministic ordering. sort.Slice(keys, func(ii, jj int) bool { return hex.EncodeToString(keys[ii].Encode()) < hex.EncodeToString(keys[jj].Encode()) }) for _, key := range keys { data = append(data, key.Encode()...) - data = append(data, EncodeUint256(tsl.StakeLimitMap[key])...) + data = append(data, VariableEncodeUint256(tsl.StakeLimitMap[key])...) } } @@ -5987,12 +5989,13 @@ func (tsl *TransactionSpendingLimit) ToBytes(blockHeight uint64) ([]byte, error) for key := range tsl.UnstakeLimitMap { keys = append(keys, key) } + // Sort the keys to ensure deterministic ordering. sort.Slice(keys, func(ii, jj int) bool { return hex.EncodeToString(keys[ii].Encode()) < hex.EncodeToString(keys[jj].Encode()) }) for _, key := range keys { data = append(data, key.Encode()...) - data = append(data, EncodeUint256(tsl.UnstakeLimitMap[key])...) + data = append(data, VariableEncodeUint256(tsl.UnstakeLimitMap[key])...) } } @@ -6007,6 +6010,7 @@ func (tsl *TransactionSpendingLimit) ToBytes(blockHeight uint64) ([]byte, error) for key := range tsl.UnlockStakeLimitMap { keys = append(keys, key) } + // Sort the keys to ensure deterministic ordering. sort.Slice(keys, func(ii, jj int) bool { return hex.EncodeToString(keys[ii].Encode()) < hex.EncodeToString(keys[jj].Encode()) }) @@ -6241,7 +6245,7 @@ func (tsl *TransactionSpendingLimit) FromBytes(blockHeight uint64, rr *bytes.Rea return errors.Wrap(err, "Error decoding StakeLimitKey: ") } var stakeLimitDESONanos *uint256.Int - stakeLimitDESONanos, err = DecodeUint256(rr) + stakeLimitDESONanos, err = VariableDecodeUint256(rr) if err != nil { return err } @@ -6265,7 +6269,7 @@ func (tsl *TransactionSpendingLimit) FromBytes(blockHeight uint64, rr *bytes.Rea return errors.Wrap(err, "Error decoding StakeLimitKey: ") } var unstakeLimitDESONanos *uint256.Int - unstakeLimitDESONanos, err = DecodeUint256(rr) + unstakeLimitDESONanos, err = VariableDecodeUint256(rr) if err != nil { return err } @@ -7501,8 +7505,8 @@ func (txnData *DAOCoinLimitOrderMetadata) GetTxnType() TxnType { func (txnData *DAOCoinLimitOrderMetadata) ToBytes(preSignature bool) ([]byte, error) { data := append([]byte{}, EncodeOptionalPublicKey(txnData.BuyingDAOCoinCreatorPublicKey)...) data = append(data, EncodeOptionalPublicKey(txnData.SellingDAOCoinCreatorPublicKey)...) - data = append(data, EncodeOptionalUint256(txnData.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) - data = append(data, EncodeOptionalUint256(txnData.QuantityToFillInBaseUnits)...) + data = append(data, FixedWidthEncodeUint256(txnData.ScaledExchangeRateCoinsToSellPerCoinToBuy)...) + data = append(data, FixedWidthEncodeUint256(txnData.QuantityToFillInBaseUnits)...) data = append(data, UintToBuf(uint64(txnData.OperationType))...) data = append(data, UintToBuf(uint64(txnData.FillType))...) data = append(data, EncodeOptionalBlockHash(txnData.CancelOrderID)...) @@ -7549,13 +7553,13 @@ func (txnData *DAOCoinLimitOrderMetadata) FromBytes(data []byte) error { } // Parse ScaledExchangeRateCoinsToSellPerCoinToBuy - ret.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = ReadOptionalUint256(rr) + ret.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = FixedWidthDecodeUint256(rr) if err != nil { return fmt.Errorf("DAOCoinLimitOrderMetadata.FromBytes: Error reading ScaledPrice: %v", err) } // Parse QuantityToFillInBaseUnits - ret.QuantityToFillInBaseUnits, err = ReadOptionalUint256(rr) + ret.QuantityToFillInBaseUnits, err = FixedWidthDecodeUint256(rr) if err != nil { return fmt.Errorf("DAOCoinLimitOrderMetadata.FromBytes: Error reading QuantityToFillInBaseUnits: %v", err) } diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index c4685fea3..cb4e2c16e 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -67,7 +67,7 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { // First, check the UtxoView. epochEntry = bav.CurrentEpochEntry if epochEntry != nil { - return epochEntry.Copy(), nil + return epochEntry, nil } // If not found, check the database. @@ -77,7 +77,7 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { } if epochEntry != nil { // Cache in the UtxoView. - bav.CurrentEpochEntry = epochEntry.Copy() + bav._setCurrentEpochEntry(epochEntry) } return epochEntry, nil } diff --git a/lib/types.go b/lib/types.go index 71bb6630f..3e531f2ca 100644 --- a/lib/types.go +++ b/lib/types.go @@ -3,11 +3,12 @@ package lib import ( "bytes" "fmt" - "github.com/holiman/uint256" - "github.com/pkg/errors" "io" "reflect" "sort" + + "github.com/holiman/uint256" + "github.com/pkg/errors" ) // A PKID is an ID associated with a public key. In the DB, various fields are @@ -273,11 +274,11 @@ func ReadOptionalBlockHash(rr *bytes.Reader) (*BlockHash, error) { return nil, nil } -// EncodeOptionalUint256 guarantees fixed-width encoding which is useful -// in BadgerDB keys. It is less space-efficient than EncodeUint256, -// which should be used elsewhere. Both EncodeUint256 and -// EncodeOptionalUint256 can handle nil inputs. -func EncodeOptionalUint256(val *uint256.Int) []byte { +// FixedWidthEncodeUint256 guarantees fixed-width encoding which is useful +// in BadgerDB keys. It is less space-efficient than VariableEncodeUint256, +// which should be used elsewhere. Both VariableEncodeUint256 and +// FixedWidthEncodeUint256 can handle nil inputs. +func FixedWidthEncodeUint256(val *uint256.Int) []byte { if val == nil { return UintToBuf(uint64(0)) } @@ -287,7 +288,7 @@ func EncodeOptionalUint256(val *uint256.Int) []byte { return append(UintToBuf(uint64(len(encodedVal))), encodedVal...) } -func ReadOptionalUint256(rr *bytes.Reader) (*uint256.Int, error) { +func FixedWidthDecodeUint256(rr *bytes.Reader) (*uint256.Int, error) { byteCount, err := ReadUvarint(rr) if err != nil { return nil, err From 5b4fc6392955f45a6d0fa09006cdd88113153c89 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 19 May 2023 12:05:58 -0400 Subject: [PATCH 039/762] Start building out tests. --- lib/pos_leader_schedule.go | 71 +++++++++--------- lib/pos_leader_schedule_test.go | 125 ++++++++++++++++++++++++++++++++ 2 files changed, 163 insertions(+), 33 deletions(-) diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 813f14ee7..42248d9c5 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -3,8 +3,6 @@ package lib import ( "github.com/holiman/uint256" "github.com/pkg/errors" - "math/big" - "math/rand" ) func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { @@ -22,7 +20,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") } if len(validatorEntries) == 0 { - return nil, nil + return []*ValidatorEntry{}, nil } // Sum TotalStakeAmountNanos. @@ -34,44 +32,51 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { } } + // Pseudocode for algorithm: + // While len(LeaderSchedule) < len(ValidatorEntries) + // RandomUint256 %= TotalStakeAmountNanos. + // For each ValidatorEntry... + // If ValidatorEntry.TotalStakeAmountNanos >= RandomUint256: + // Add ValidatorEntry to LeaderSchedule. + // Remove ValidatorEntry from ValidatorEntries. + // TotalStakeAmountNanos -= ValidatorEntry.TotalStakeAmountNanos. var leaderSchedule []*ValidatorEntry - r := rand.New(rand.NewSource(int64(currentRandomSeedHash.ToUint64()))) + for len(leaderSchedule) < len(validatorEntries) { + // Take RandomUint256 % TotalStakeAmountNanos. + randomUint256 := uint256.NewInt().Mod(currentRandomSeedHash.ToUint256(), totalStakeAmountNanos) - // In a loop... + // Keep track of the stake seen so far in this loop. + sumStakeAmountNanos := uint256.NewInt() - for _, validatorEntry := range validatorEntries { - // Pick a random uin256.Int between 0 and TotalStakeAmountNanos. - randomUint256, err := RandomUint256(r) - if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error generating random uint256: ") - } + for validatorIndex, validatorEntry := range validatorEntries { + // Sum the ValidatorEntry.TotalStakeAmountNanos to the stake seen so far. + sumStakeAmountNanos, err = SafeUint256().Add(sumStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error summing TotalStakeAmountNanos: ") + } - // Iterate through ValidatorEntries until ValidatorEntry.TotalStakeAmountNanos >= randomIter. - if validatorEntry.TotalStakeAmountNanos.Gt(randomUint256) { - leaderSchedule = append(leaderSchedule, validatorEntry) - } + // If the sum of the stake seen so far is less than the RandomUint256, skip this validator. + if sumStakeAmountNanos.Lt(randomUint256) { + continue + } - // Add that ValidatorEntry to the leaderSchedule. Remove that ValidatorEntry from the validatorEntries slice. - // Subtract the ValidatorEntry.TotalStakeAmountNanos from the TotalStakeAmountNanos. - } + // If we get to this point, the current validator is the + // one we should add to the leader schedule next. - return leaderSchedule, nil -} + // Add the current ValidatorEntry to the leaderSchedule. + leaderSchedule = append(leaderSchedule, validatorEntry) -func RandomUint256(r *rand.Rand) (*uint256.Int, error) { - digits := []byte("0123456789abcdef") - uint256ByteSlice := make([]byte, 256) - for i := 0; i < 256; i++ { - uint256ByteSlice[i] = digits[r.Intn(16)] - } - uint256String := string(uint256ByteSlice) + // Remove the current ValidatorEntry from the ValidatorEntries slice. + validatorEntries = append(validatorEntries[:validatorIndex], validatorEntries[validatorIndex+1:]...) - uint256BigInt, success := big.NewInt(0).SetString(uint256String, 16) - if !success { - return nil, errors.New("RandomUint256: problem converting string to big.Int") + // Subtract the ValidatorEntry.TotalStakeAmountNanos from the TotalStakeAmountNanos. + totalStakeAmountNanos, err = SafeUint256().Sub(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error subtracting TotalStakeAmountNanos: ") + } + } } - randUint256 := uint256.NewInt() - randUint256.SetFromBig(uint256BigInt) - return randUint256, nil + + return leaderSchedule, nil } diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 55c21f80a..2ee28ad9c 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -1 +1,126 @@ package lib + +import ( + "fmt" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + "testing" +) + +func TestGenerateLeaderSchedule(t *testing.T) { + // Initialize fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize PoS txn types block height. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err := miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height + 1) + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m5", senderPkString, m5Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m6", senderPkString, m6Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID + m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID + m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID + m5PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m5PkBytes).PKID + m6PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m6PkBytes).PKID + + type TestValidator struct { + PublicKey string + PrivateKey string + PKID *PKID + } + + testValidators := []*TestValidator{ + {PublicKey: m0Pub, PrivateKey: m0Priv, PKID: m0PKID}, // Stake = 100 + {PublicKey: m1Pub, PrivateKey: m1Priv, PKID: m1PKID}, // Stake = 200 + {PublicKey: m2Pub, PrivateKey: m2Priv, PKID: m2PKID}, // Stake = 300 + {PublicKey: m3Pub, PrivateKey: m3Priv, PKID: m3PKID}, // Stake = 400 + {PublicKey: m4Pub, PrivateKey: m4Priv, PKID: m4PKID}, // Stake = 500 + {PublicKey: m5Pub, PrivateKey: m5Priv, PKID: m5PKID}, // Stake = 600 + {PublicKey: m6Pub, PrivateKey: m6Priv, PKID: m6PKID}, // Stake = 700 + } + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // All validators register and stake to themselves. + for index, testValidator := range testValidators { + validatorPkBytes, _, err := Base58CheckDecode(testValidator.PublicKey) + require.NoError(t, err) + + // Validator registers. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, validatorPkBytes, blockHeight) + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte(fmt.Sprintf("https://%d.example.com", index))}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + } + _, err = _submitRegisterAsValidatorTxn( + testMeta, testValidator.PublicKey, testValidator.PrivateKey, registerMetadata, nil, true, + ) + require.NoError(t, err) + + // Validator stakes to himself. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(validatorPkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64((uint64(index) + 1) * 100), + } + _, err = _submitStakeTxn(testMeta, testValidator.PublicKey, testValidator.PrivateKey, stakeMetadata, nil, true) + require.NoError(t, err) + } + } + { + // Verify GetTopActiveValidatorsByStake. + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + validatorEntries, err := utxoView.GetTopActiveValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 7) + } +} From fcdd168d377574645b88174312befe3ab514abc9 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 19 May 2023 12:06:19 -0400 Subject: [PATCH 040/762] Add ToUint256() method. --- lib/pos_random_seed.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index 2c83d1116..6e7d3163d 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -6,12 +6,17 @@ import ( "github.com/deso-protocol/core/bls" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" + "github.com/holiman/uint256" "github.com/pkg/errors" "io" ) type RandomSeedHash [32]byte +func (randomSeedHash *RandomSeedHash) ToUint256() *uint256.Int { + return uint256.NewInt().SetBytes(randomSeedHash.ToBytes()) +} + func (randomSeedHash *RandomSeedHash) Copy() *RandomSeedHash { randomSeedHashCopy := &RandomSeedHash{} copy(randomSeedHashCopy[:], randomSeedHash[:]) From 29ebd42cda92ca0f36b630f9899efddd061a93d0 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 19 May 2023 13:23:56 -0400 Subject: [PATCH 041/762] Add more to test. --- lib/pos_leader_schedule_test.go | 37 +++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 2ee28ad9c..74e4e19e5 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -1,3 +1,5 @@ +//go:build relic + package lib import ( @@ -8,6 +10,9 @@ import ( ) func TestGenerateLeaderSchedule(t *testing.T) { + var utxoView *UtxoView + var err error + // Initialize fork heights. setBalanceModelBlockHeights() defer resetBalanceModelBlockHeights() @@ -23,7 +28,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { - _, err := miner.MineAndProcessSingleBlock(0, mempool) + _, err = miner.MineAndProcessSingleBlock(0, mempool) require.NoError(t, err) } @@ -73,6 +78,12 @@ func TestGenerateLeaderSchedule(t *testing.T) { {PublicKey: m6Pub, PrivateKey: m6Priv, PKID: m6PKID}, // Stake = 700 } + // Seed a CurrentEpochEntry. + utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + utxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true @@ -117,10 +128,32 @@ func TestGenerateLeaderSchedule(t *testing.T) { } { // Verify GetTopActiveValidatorsByStake. - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) validatorEntries, err := utxoView.GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) + require.True(t, validatorEntries[0].ValidatorPKID.Eq(m6PKID)) + require.True(t, validatorEntries[1].ValidatorPKID.Eq(m5PKID)) + require.True(t, validatorEntries[2].ValidatorPKID.Eq(m4PKID)) + require.True(t, validatorEntries[3].ValidatorPKID.Eq(m3PKID)) + require.True(t, validatorEntries[4].ValidatorPKID.Eq(m2PKID)) + require.True(t, validatorEntries[5].ValidatorPKID.Eq(m1PKID)) + require.True(t, validatorEntries[6].ValidatorPKID.Eq(m0PKID)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos.Uint64(), uint64(700)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos.Uint64(), uint64(600)) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos.Uint64(), uint64(500)) + require.Equal(t, validatorEntries[3].TotalStakeAmountNanos.Uint64(), uint64(400)) + require.Equal(t, validatorEntries[4].TotalStakeAmountNanos.Uint64(), uint64(300)) + require.Equal(t, validatorEntries[5].TotalStakeAmountNanos.Uint64(), uint64(200)) + require.Equal(t, validatorEntries[6].TotalStakeAmountNanos.Uint64(), uint64(100)) + } + { + // Test GenerateLeaderSchedule(). + utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + leaderSchedule, err := utxoView.GenerateLeaderSchedule() + require.NoError(t, err) + require.Len(t, leaderSchedule, 7) } } From 6144324c67f40264d441dc96f1205045ceb5385a Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 22 May 2023 10:41:02 -0400 Subject: [PATCH 042/762] Update tests for pos leader schedule. --- lib/pos_leader_schedule.go | 21 +++-- lib/pos_leader_schedule_test.go | 156 +++++++++++++++++++++++++------- 2 files changed, 136 insertions(+), 41 deletions(-) diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 42248d9c5..a87d62ae6 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -32,16 +32,22 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { } } - // Pseudocode for algorithm: + // Pseudocode for leader-selection algorithm: // While len(LeaderSchedule) < len(ValidatorEntries) // RandomUint256 %= TotalStakeAmountNanos. // For each ValidatorEntry... + // Skip if ValidatorEntry.TotalStakeAmountNanos is zero. + // Skip if ValidatorEntry has already been added to the leader schedule. // If ValidatorEntry.TotalStakeAmountNanos >= RandomUint256: // Add ValidatorEntry to LeaderSchedule. - // Remove ValidatorEntry from ValidatorEntries. // TotalStakeAmountNanos -= ValidatorEntry.TotalStakeAmountNanos. var leaderSchedule []*ValidatorEntry + // We also track a set of ValidatorPKIDs that have already been + // added to the LeaderSchedule so that we can skip them when + // iterating over ValidatorEntries in O(1) time. + leaderSchedulePKIDs := NewSet([]*PKID{}) + for len(leaderSchedule) < len(validatorEntries) { // Take RandomUint256 % TotalStakeAmountNanos. randomUint256 := uint256.NewInt().Mod(currentRandomSeedHash.ToUint256(), totalStakeAmountNanos) @@ -49,7 +55,12 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { // Keep track of the stake seen so far in this loop. sumStakeAmountNanos := uint256.NewInt() - for validatorIndex, validatorEntry := range validatorEntries { + for _, validatorEntry := range validatorEntries { + // Skip if ValidatorEntry has already been added to the leader schedule. + if leaderSchedulePKIDs.Includes(validatorEntry.ValidatorPKID) { + continue + } + // Sum the ValidatorEntry.TotalStakeAmountNanos to the stake seen so far. sumStakeAmountNanos, err = SafeUint256().Add(sumStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) if err != nil { @@ -66,9 +77,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { // Add the current ValidatorEntry to the leaderSchedule. leaderSchedule = append(leaderSchedule, validatorEntry) - - // Remove the current ValidatorEntry from the ValidatorEntries slice. - validatorEntries = append(validatorEntries[:validatorIndex], validatorEntries[validatorIndex+1:]...) + leaderSchedulePKIDs.Add(validatorEntry.ValidatorPKID) // Subtract the ValidatorEntry.TotalStakeAmountNanos from the TotalStakeAmountNanos. totalStakeAmountNanos, err = SafeUint256().Sub(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 74e4e19e5..bf253fcc6 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -3,6 +3,7 @@ package lib import ( + "crypto/sha256" "fmt" "github.com/holiman/uint256" "github.com/stretchr/testify/require" @@ -10,9 +11,6 @@ import ( ) func TestGenerateLeaderSchedule(t *testing.T) { - var utxoView *UtxoView - var err error - // Initialize fork heights. setBalanceModelBlockHeights() defer resetBalanceModelBlockHeights() @@ -28,7 +26,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { - _, err = miner.MineAndProcessSingleBlock(0, mempool) + _, err := miner.MineAndProcessSingleBlock(0, mempool) require.NoError(t, err) } @@ -62,6 +60,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { m5PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m5PkBytes).PKID m6PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m6PkBytes).PKID + // Helper utils type TestValidator struct { PublicKey string PrivateKey string @@ -78,11 +77,53 @@ func TestGenerateLeaderSchedule(t *testing.T) { {PublicKey: m6Pub, PrivateKey: m6Priv, PKID: m6PKID}, // Stake = 700 } + newUtxoView := func() *UtxoView { + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + return utxoView + } + + registerValidator := func(testValidator *TestValidator) { + validatorPkBytes, _, err := Base58CheckDecode(testValidator.PublicKey) + require.NoError(t, err) + + // Validator registers. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, validatorPkBytes, blockHeight) + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", testValidator.PublicKey))}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + } + _, err = _submitRegisterAsValidatorTxn( + testMeta, testValidator.PublicKey, testValidator.PrivateKey, registerMetadata, nil, true, + ) + require.NoError(t, err) + } + + stakeValidator := func(testValidator *TestValidator, stakeAmountNanos uint64) { + validatorPkBytes, _, err := Base58CheckDecode(testValidator.PublicKey) + require.NoError(t, err) + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(validatorPkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), + } + _, err = _submitStakeTxn(testMeta, testValidator.PublicKey, testValidator.PrivateKey, stakeMetadata, nil, true) + require.NoError(t, err) + } + + setCurrentRandomSeedHash := func(seed string) { + randomSHA256 := sha256.Sum256([]byte(seed)) + randomSeedHash, err := (&RandomSeedHash{}).FromBytes(randomSHA256[:]) + require.NoError(t, err) + tmpUtxoView := newUtxoView() + tmpUtxoView._setCurrentRandomSeedHash(randomSeedHash) + require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + } + // Seed a CurrentEpochEntry. - utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) - utxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) - require.NoError(t, utxoView.FlushToDb(blockHeight)) + tmpUtxoView := newUtxoView() + tmpUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) { // ParamUpdater set min fee rate @@ -100,37 +141,57 @@ func TestGenerateLeaderSchedule(t *testing.T) { ) } { - // All validators register and stake to themselves. + // Test GenerateLeaderSchedule() edge case: no registered validators. + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Empty(t, leaderSchedule) + } + { + // m0 registers as validator. + registerValidator(testValidators[0]) + } + { + // Test GenerateLeaderSchedule() edge case: one registered validator with zero stake. + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Empty(t, leaderSchedule) + } + { + // m0 stakes to himself. + stakeValidator(testValidators[0], 100) + } + { + // Test GenerateLeaderSchedule() edge case: one registered validator with non-zero stake. + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Len(t, leaderSchedule, 1) + require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[0].PKID) + } + { + // m1 registers and stakes to himself. + registerValidator(testValidators[1]) + stakeValidator(testValidators[1], 200) + } + { + // Test GenerateLeaderSchedule() edge case: two registered validators with non-zero stake. + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Len(t, leaderSchedule, 2) + require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[1].PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[0].PKID) + } + { + // All remaining validators register and stake to themselves. for index, testValidator := range testValidators { - validatorPkBytes, _, err := Base58CheckDecode(testValidator.PublicKey) - require.NoError(t, err) - - // Validator registers. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, validatorPkBytes, blockHeight) - registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte(fmt.Sprintf("https://%d.example.com", index))}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - } - _, err = _submitRegisterAsValidatorTxn( - testMeta, testValidator.PublicKey, testValidator.PrivateKey, registerMetadata, nil, true, - ) - require.NoError(t, err) - - // Validator stakes to himself. - stakeMetadata := &StakeMetadata{ - ValidatorPublicKey: NewPublicKey(validatorPkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64((uint64(index) + 1) * 100), + registerValidator(testValidator) + if index > 1 { + stakeValidator(testValidator, uint64((index+1)*100)) } - _, err = _submitStakeTxn(testMeta, testValidator.PublicKey, testValidator.PrivateKey, stakeMetadata, nil, true) - require.NoError(t, err) } } { // Verify GetTopActiveValidatorsByStake. - utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) - validatorEntries, err := utxoView.GetTopActiveValidatorsByStake(10) + validatorEntries, err := newUtxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) require.True(t, validatorEntries[0].ValidatorPKID.Eq(m6PKID)) @@ -150,10 +211,35 @@ func TestGenerateLeaderSchedule(t *testing.T) { } { // Test GenerateLeaderSchedule(). - utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) - leaderSchedule, err := utxoView.GenerateLeaderSchedule() + require.Len(t, leaderSchedule, 7) + require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[6].PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[5].PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[4].PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[3].PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[2].PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[1].PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[0].PKID) + } + { + // Seed a new CurrentRandomSeedHash. + setCurrentRandomSeedHash("3b4b028b-6a7c-4b38-bea3-a5f59b34e02d") + } + { + // Test GenerateLeaderSchedule(). + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) + require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[0].PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[1].PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[2].PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[3].PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[4].PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[5].PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[6].PKID) } + + // Test rollbacks. + _executeAllTestRollbackAndFlush(testMeta) } From a31181ed16cb84df360352fad016fb16c6b0e1e1 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 22 May 2023 11:10:24 -0400 Subject: [PATCH 043/762] Add more tests. --- lib/pos_leader_schedule.go | 10 ++++++ lib/pos_leader_schedule_test.go | 63 ++++++++++++++++++++++++++++----- 2 files changed, 65 insertions(+), 8 deletions(-) diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index a87d62ae6..a7d3851f9 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -1,6 +1,7 @@ package lib import ( + "crypto/sha256" "github.com/holiman/uint256" "github.com/pkg/errors" ) @@ -34,6 +35,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { // Pseudocode for leader-selection algorithm: // While len(LeaderSchedule) < len(ValidatorEntries) + // Hash the CurrentRandomSeedHash to generate a new RandomUint256. // RandomUint256 %= TotalStakeAmountNanos. // For each ValidatorEntry... // Skip if ValidatorEntry.TotalStakeAmountNanos is zero. @@ -49,6 +51,14 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { leaderSchedulePKIDs := NewSet([]*PKID{}) for len(leaderSchedule) < len(validatorEntries) { + // Hash the CurrentRandomSeedHash each iteration. This generates + // multiple predictable pseudorandom values from the same seed. + currentRandomSHA256 := sha256.Sum256(currentRandomSeedHash.ToBytes()) + currentRandomSeedHash, err = (&RandomSeedHash{}).FromBytes(currentRandomSHA256[:]) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error hashing CurrentRandomSeedHash: ") + } + // Take RandomUint256 % TotalStakeAmountNanos. randomUint256 := uint256.NewInt().Mod(currentRandomSeedHash.ToUint256(), totalStakeAmountNanos) diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index bf253fcc6..0fe3dd2c9 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -177,8 +177,8 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 2) - require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[1].PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[0].PKID) + require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[0].PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[1].PKID) } { // All remaining validators register and stake to themselves. @@ -210,6 +210,22 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.Equal(t, validatorEntries[6].TotalStakeAmountNanos.Uint64(), uint64(100)) } { + // Test GenerateLeaderSchedule(). + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Len(t, leaderSchedule, 7) + require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[3].PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[2].PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[1].PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[0].PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[6].PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[5].PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[4].PKID) + } + { + // Seed a new CurrentRandomSeedHash. + setCurrentRandomSeedHash("3b4b028b-6a7c-4b38-bea3-a5f59b34e02d") + // Test GenerateLeaderSchedule(). leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) @@ -224,19 +240,50 @@ func TestGenerateLeaderSchedule(t *testing.T) { } { // Seed a new CurrentRandomSeedHash. - setCurrentRandomSeedHash("3b4b028b-6a7c-4b38-bea3-a5f59b34e02d") + setCurrentRandomSeedHash("b4b38eaf-216d-4132-8725-a481baaf87cc") + + // Test GenerateLeaderSchedule(). + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Len(t, leaderSchedule, 7) + require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[6].PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[5].PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[4].PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[3].PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[2].PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[1].PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[0].PKID) } { + // Seed a new CurrentRandomSeedHash. + setCurrentRandomSeedHash("7c87f290-d9ec-4cb4-ad47-c64c8ca46f0e") + // Test GenerateLeaderSchedule(). leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[0].PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[1].PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[2].PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[3].PKID) + require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[3].PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[2].PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[1].PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[0].PKID) require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[4].PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[5].PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[6].PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[5].PKID) + } + { + // Seed a new CurrentRandomSeedHash. + setCurrentRandomSeedHash("0999a3ce-15e4-455a-b061-6081b88b237d") + + // Test GenerateLeaderSchedule(). + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Len(t, leaderSchedule, 7) + require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[5].PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[4].PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[3].PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[2].PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[1].PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[0].PKID) require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[6].PKID) } From 136e3883bc07c0038f12aa4e7f85db1f676c8a78 Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Mon, 22 May 2023 11:18:18 -0400 Subject: [PATCH 044/762] Exclude zero-stake validators from TopValidatorsByStake. (#535) --- lib/block_view_validator.go | 6 ++++-- lib/block_view_validator_test.go | 32 +++++++++++--------------------- 2 files changed, 15 insertions(+), 23 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 647044cd6..b274a9b76 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1844,10 +1844,12 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry bav._setValidatorEntryMappings(validatorEntry) } } - // Pull !isDeleted, active ValidatorEntries from the UtxoView. + // Pull !isDeleted, active ValidatorEntries from the UtxoView with stake > 0. var validatorEntries []*ValidatorEntry for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { - if !validatorEntry.isDeleted && validatorEntry.Status() == ValidatorStatusActive { + if !validatorEntry.isDeleted && + validatorEntry.Status() == ValidatorStatusActive && + !validatorEntry.TotalStakeAmountNanos.IsZero() { validatorEntries = append(validatorEntries, validatorEntry) } } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 96126d100..1eb411df3 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -228,15 +228,11 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Equal(t, string(validatorEntry.ExtraData["TestKey"]), "TestValue1") } { - // Query: retrieve top active ValidatorEntries by stake - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(0) + // Query: retrieve top active ValidatorEntries by stake. + // Should be empty since m0's TotalStakeAmountNanos is zero. + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(1) require.NoError(t, err) require.Empty(t, validatorEntries) - - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(2) - require.NoError(t, err) - require.Len(t, validatorEntries, 1) - require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) } { // Query: retrieve GlobalStakeAmountNanos @@ -786,12 +782,10 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) - // Verify top validators. + // Verify top validators is empty since m0's TotalStakeAmountNanos is zero. validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) - require.Len(t, validatorEntries, 1) - require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt()) + require.Empty(t, validatorEntries) } { // m1 registers as a validator. @@ -804,10 +798,10 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) - // Verify top validators. + // Verify top validators is empty since both validators' TotalStakeAmountNanos are zero. validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) - require.Len(t, validatorEntries, 2) + require.Empty(t, validatorEntries) } { // m2 registers as a validator. @@ -820,10 +814,10 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { _, err = _submitRegisterAsValidatorTxn(testMeta, m2Pub, m2Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) - // Verify top validators. + // Verify top validators is empty since all three validators' TotalStakeAmountNanos are zero. validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) - require.Len(t, validatorEntries, 3) + require.Empty(t, validatorEntries) } { // m3 stakes 100 DESO nanos with m0. @@ -891,13 +885,11 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // Verify top validators. validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) - require.Len(t, validatorEntries, 3) + require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - require.Equal(t, validatorEntries[2].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(0)) } { // m2 unregisters as validator. @@ -907,11 +899,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // Verify top validators. validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) - require.Len(t, validatorEntries, 2) + require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - require.Equal(t, validatorEntries[1].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(0)) } { // m4 stakes with m1. From ae9032c7529e66ee4554687dc8b4f1d7bf0821db Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Mon, 22 May 2023 13:02:31 -0400 Subject: [PATCH 045/762] Mf/remove jailed stake from global active stake (#533) * Remove jailed stake from global active stake. * Write to the UtxoView if no errors in jailing. * Add additional sanity checks. * PR review feedback. --- lib/block_view.go | 18 +- lib/block_view_flush.go | 2 +- lib/block_view_stake.go | 156 ++++++++------- lib/block_view_stake_test.go | 331 +++++++++++++++++++++++++++++-- lib/block_view_types.go | 16 +- lib/block_view_validator.go | 226 +++++++++++++-------- lib/block_view_validator_test.go | 67 +++---- lib/db_utils.go | 6 +- 8 files changed, 592 insertions(+), 230 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 83fbb069e..3b3a0c497 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -118,8 +118,8 @@ type UtxoView struct { // Validator mappings ValidatorPKIDToValidatorEntry map[PKID]*ValidatorEntry - // Global stake across all validators - GlobalStakeAmountNanos *uint256.Int + // The global active stake is the sum of all stake across validators who have Status = Active. + GlobalActiveStakeAmountNanos *uint256.Int // Stake mappings StakeMapKeyToStakeEntry map[StakeMapKey]*StakeEntry @@ -222,10 +222,10 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // ValidatorEntries bav.ValidatorPKIDToValidatorEntry = make(map[PKID]*ValidatorEntry) - // Global stake across validators. We deliberately want this to initialize to nil and not zero - // since a zero value will overwrite an existing GlobalStakeAmountNanos value in the db, whereas - // a nil GlobalStakeAmountNanos value signifies that this value was never set. - bav.GlobalStakeAmountNanos = nil + // Global active stake across validators. We deliberately want this to initialize to nil and not zero + // since a zero value will overwrite an existing GlobalActiveStakeAmountNanos value in the db, whereas + // a nil GlobalActiveStakeAmountNanos value signifies that this value was never set. + bav.GlobalActiveStakeAmountNanos = nil // StakeEntries bav.StakeMapKeyToStakeEntry = make(map[StakeMapKey]*StakeEntry) @@ -493,9 +493,9 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.ValidatorPKIDToValidatorEntry[entryKey] = entry.Copy() } - // Copy the GlobalStakeAmountNanos. - if bav.GlobalStakeAmountNanos != nil { - newView.GlobalStakeAmountNanos = bav.GlobalStakeAmountNanos.Clone() + // Copy the GlobalActiveStakeAmountNanos. + if bav.GlobalActiveStakeAmountNanos != nil { + newView.GlobalActiveStakeAmountNanos = bav.GlobalActiveStakeAmountNanos.Clone() } // Copy the StakeEntries diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 6ae70f214..684e79efe 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -143,7 +143,7 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushValidatorEntriesToDbWithTxn(txn, blockHeight); err != nil { return err } - if err := bav._flushGlobalStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { + if err := bav._flushGlobalActiveStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { return err } if err := bav._flushStakeEntriesToDbWithTxn(txn, blockHeight); err != nil { diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 8c81416d3..66df9a90b 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1226,19 +1226,22 @@ func (bav *UtxoView) _connectStake( // 4. Set the new ValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) - // Increase the GlobalStakeAmountNanos. - // Retrieve the existing GlobalStakeAmountNanos. - // The PrevGlobalStakeAmountNanos will be restored if we disconnect this transaction. - prevGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectStake: error retrieving GlobalStakeAmountNanos: ") - } - globalStakeAmountNanos, err := SafeUint256().Add(prevGlobalStakeAmountNanos, txMeta.StakeAmountNanos) - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to GlobalStakeAmountNanos: ") + // Increase the GlobalActiveStakeAmountNanos if the validator is active. + var prevGlobalActiveStakeAmountNanos *uint256.Int + if currentValidatorEntry.Status() == ValidatorStatusActive { + // Retrieve the existing GlobalActiveStakeAmountNanos. + // The PrevGlobalActiveStakeAmountNanos will be restored if we disconnect this transaction. + prevGlobalActiveStakeAmountNanos, err = bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error retrieving GlobalActiveStakeAmountNanos: ") + } + globalActiveStakeAmountNanos, err := SafeUint256().Add(prevGlobalActiveStakeAmountNanos, txMeta.StakeAmountNanos) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to GlobalActiveStakeAmountNanos: ") + } + // Set the new GlobalActiveStakeAmountNanos. + bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) } - // Set the new GlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) // Add the StakeAmountNanos to TotalOutput. The coins being staked are already // part of the TotalInput. But they are not burned, so they are an implicit @@ -1250,10 +1253,10 @@ func (bav *UtxoView) _connectStake( // Create a UTXO operation utxoOpForTxn := &UtxoOperation{ - Type: OperationTypeStake, - PrevValidatorEntry: prevValidatorEntry, - PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, - PrevStakeEntries: prevStakeEntries, + Type: OperationTypeStake, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, + PrevStakeEntries: prevStakeEntries, } if err = bav.SanityCheckStakeTxn( transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.StakeAmountNanos, txn.TxnFeeNanos, prevBalanceNanos, @@ -1338,8 +1341,10 @@ func (bav *UtxoView) _disconnectStake( bav._setStakeEntryMappings(operationData.PrevStakeEntries[0]) } - // Restore the PrevGlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(operationData.PrevGlobalStakeAmountNanos) + // Restore the PrevGlobalActiveStakeAmountNanos, if exists. + if operationData.PrevGlobalActiveStakeAmountNanos != nil { + bav._setGlobalActiveStakeAmountNanos(operationData.PrevGlobalActiveStakeAmountNanos) + } // Disconnect the BasicTransfer. Disconnecting the BasicTransfer also returns // the extra spend associated with the amount the transactor staked. @@ -1461,18 +1466,21 @@ func (bav *UtxoView) _connectUnstake( // 4. Set the new ValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) - // Decrease the GlobalStakeAmountNanos. - // 1. Retrieve the existing GlobalStakeAmountNanos. This will be restored if we disconnect this txn. - prevGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error retrieving GlobalStakeAmountNanos: ") - } - globalStakeAmountNanos, err := SafeUint256().Sub(prevGlobalStakeAmountNanos, txMeta.UnstakeAmountNanos) - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error subtracting UnstakeAmountNanos from GlobalStakeAmountNanos: ") + // Decrease the GlobalActiveStakeAmountNanos if the validator is active. + var prevGlobalActiveStakeAmountNanos *uint256.Int + if currentValidatorEntry.Status() == ValidatorStatusActive { + // 1. Retrieve the existing GlobalActiveStakeAmountNanos. This will be restored if we disconnect this txn. + prevGlobalActiveStakeAmountNanos, err = bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error retrieving GlobalActiveStakeAmountNanos: ") + } + globalActiveStakeAmountNanos, err := SafeUint256().Sub(prevGlobalActiveStakeAmountNanos, txMeta.UnstakeAmountNanos) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error subtracting UnstakeAmountNanos from GlobalActiveStakeAmountNanos: ") + } + // 2. Set the new GlobalActiveStakeAmountNanos. + bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) } - // 2. Set the new GlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) // Retrieve the CurrentEpochNumber. currentEpochNumber, err := bav.GetCurrentEpochNumber() @@ -1524,11 +1532,11 @@ func (bav *UtxoView) _connectUnstake( // Create a UTXO operation. utxoOpForTxn := &UtxoOperation{ - Type: OperationTypeUnstake, - PrevValidatorEntry: prevValidatorEntry, - PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, - PrevStakeEntries: prevStakeEntries, - PrevLockedStakeEntries: prevLockedStakeEntries, + Type: OperationTypeUnstake, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, + PrevStakeEntries: prevStakeEntries, + PrevLockedStakeEntries: prevLockedStakeEntries, } if err = bav.SanityCheckUnstakeTxn(transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.UnstakeAmountNanos); err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") @@ -1612,8 +1620,10 @@ func (bav *UtxoView) _disconnectUnstake( } bav._setStakeEntryMappings(operationData.PrevStakeEntries[0]) - // Restore the PrevGlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(operationData.PrevGlobalStakeAmountNanos) + // Restore the PrevGlobalActiveStakeAmountNanos, if exists. + if operationData.PrevGlobalActiveStakeAmountNanos != nil { + bav._setGlobalActiveStakeAmountNanos(operationData.PrevGlobalActiveStakeAmountNanos) + } // Retrieve the CurrentEpochNumber. currentEpochNumber, err := bav.GetCurrentEpochNumber() @@ -2021,22 +2031,26 @@ func (bav *UtxoView) SanityCheckStakeTxn( return errors.New("SanityCheckStakeTxn: StakeAmountNanos increase does not match") } - // Validate GlobalStakeAmountNanos increase. - if utxoOp.PrevGlobalStakeAmountNanos == nil { - return errors.New("SanityCheckStakeTxn: nil PrevGlobalStakeAmountNanos provided") - } - currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving CurrentGlobalStakeAmountNanos: ") - } - globalStakeAmountNanosIncrease, err := SafeUint256().Sub( - currentGlobalStakeAmountNanos, utxoOp.PrevGlobalStakeAmountNanos, - ) - if err != nil { - return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating GlobalStakeAmountNanos increase: ") - } - if !globalStakeAmountNanosIncrease.Eq(amountNanos) { - return errors.New("SanityCheckStakeTxn: GlobalStakeAmountNanos increase does not match") + // Validate GlobalActiveStakeAmountNanos increase if validator is active. + if currentValidatorEntry.Status() == ValidatorStatusActive { + if utxoOp.PrevGlobalActiveStakeAmountNanos == nil { + return errors.New("SanityCheckStakeTxn: nil PrevGlobalActiveStakeAmountNanos provided") + } + currentGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving CurrentGlobalActiveStakeAmountNanos: ") + } + globalActiveStakeAmountNanosIncrease, err := SafeUint256().Sub( + currentGlobalActiveStakeAmountNanos, utxoOp.PrevGlobalActiveStakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating GlobalActiveStakeAmountNanos increase: ") + } + if !globalActiveStakeAmountNanosIncrease.Eq(amountNanos) { + return errors.New("SanityCheckStakeTxn: GlobalActiveStakeAmountNanos increase does not match") + } + } else if utxoOp.PrevGlobalActiveStakeAmountNanos != nil { + return errors.New("SanityCheckStakeTxn: non-nil PrevGlobalActiveStakeAmountNanos provided for inactive validator, this should never happen") } // Validate TransactorBalance decrease. @@ -2134,23 +2148,27 @@ func (bav *UtxoView) SanityCheckUnstakeTxn(transactorPKID *PKID, utxoOp *UtxoOpe return errors.New("SanityCheckUnstakeTxn: LockedAmountNanos increase does not match") } - // Validate GlobalStakeAmountNanos decrease. - if utxoOp.PrevGlobalStakeAmountNanos == nil { - return errors.New("SanityCheckUnstakeTxn: nil PrevGlobalStakeAmountNanos provided") - } - currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving CurrentGlobalStakeAmountNanos: ") - } - if currentGlobalStakeAmountNanos == nil { - return errors.New("SanityCheckUnstakeTxn: no CurrentGlobalStakeAmountNanos found") - } - globalStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalStakeAmountNanos, currentGlobalStakeAmountNanos) - if err != nil { - return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating GlobalStakeAmountNanos decrease: ") - } - if !globalStakeAmountNanosDecrease.Eq(amountNanos) { - return errors.New("SanityCheckUnstakeTxn: GlobalStakeAmountNanos decrease does not match") + // Validate GlobalActiveStakeAmountNanos decrease if validator is active. + if currentValidatorEntry.Status() == ValidatorStatusActive { + if utxoOp.PrevGlobalActiveStakeAmountNanos == nil { + return errors.New("SanityCheckUnstakeTxn: nil PrevGlobalActiveStakeAmountNanos provided") + } + currentGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving CurrentGlobalActiveStakeAmountNanos: ") + } + if currentGlobalActiveStakeAmountNanos == nil { + return errors.New("SanityCheckUnstakeTxn: no CurrentGlobalActiveStakeAmountNanos found") + } + globalActiveStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalActiveStakeAmountNanos, currentGlobalActiveStakeAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating GlobalActiveStakeAmountNanos decrease: ") + } + if !globalActiveStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnstakeTxn: GlobalActiveStakeAmountNanos decrease does not match") + } + } else if utxoOp.PrevGlobalActiveStakeAmountNanos != nil { + return errors.New("SanityCheckUnstakeTxn: non-nil PrevGlobalActiveStakeAmountNanos provided for inactive validator, this should never happen") } return nil diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index f8aefb9d8..f1ff8576f 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -227,10 +227,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) @@ -262,10 +262,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(150)) // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) @@ -377,10 +377,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(110)) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(110)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(110)) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) @@ -415,10 +415,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(80)) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(80)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(80)) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) @@ -452,10 +452,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) @@ -560,10 +560,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) // Verify LockedStakeEntry.isDeleted. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) @@ -1934,3 +1934,298 @@ func TestStakeLockupEpochDuration(t *testing.T) { require.Equal(t, oldDesoBalanceNanos-feeNanos+uint64(100), newDesoBalanceNanos) } } + +func TestStakingToJailedValidator(t *testing.T) { + testStakingToJailedValidator(t, false) + testStakingToJailedValidator(t, true) +} + +func testStakingToJailedValidator(t *testing.T, flushToDB bool) { + var err error + + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize fork heights. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + chain.snapshot = nil + + // For these tests, we set ValidatorJailEpochDuration to 0. + params.ValidatorJailEpochDuration = 0 + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height + 1) + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + + // Helper utils + utxoView := func() *UtxoView { + newUtxoView, err := mempool.GetAugmentedUniversalView() + require.NoError(t, err) + return newUtxoView + } + + jailValidator := func(validatorPKID *PKID) { + // Retrieve current ValidatorEntry. + validatorEntry, err := utxoView().GetValidatorByPKID(validatorPKID) + require.NoError(t, err) + + // Retrieve current GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + + // Jail the validator. + tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + tmpUtxoView._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) + require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) + require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + + // Delete the ValidatorEntry and GlobalActiveStakeAmountNanos + // from the UtxoView so that they are next read from the db. + delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorPKID) + delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorPKID) + mempool.universalUtxoView.GlobalActiveStakeAmountNanos = nil + mempool.readOnlyUtxoView.GlobalActiveStakeAmountNanos = nil + + } + + // Seed a CurrentEpochEntry. + epochUtxoView := utxoView() + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // m0 registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m0.example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + } + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) + require.NoError(t, err) + } + { + // m1 stakes with m0. m0 is active. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(150), + } + _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos increases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + + // GlobalActiveStakeAmountNanos increases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(150)) + } + { + // m1 unstakes some from m0. m0 is active. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos decreases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos decreases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // Jail m0. Since this update takes place outside a transaction, + // we cannot test rollbacks. We will run into an error where m0 + // is trying to unjail himself, but he was never jailed. + jailValidator(m0PKID) + + // Verify m0 is jailed. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) + + // m0 TotalStakeAmountNanos stays the same. + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos decreases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) + } + { + // m1 stakes more with m0. m0 is jailed. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos increases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + + // GlobalActiveStakeAmountNanos stays the same. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) + } + { + // m1 unstakes some from m0. m0 is jailed. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos decreases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos stays the same. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) + } + { + // m0 unjails himself. + _, err = _submitUnjailValidatorTxn(testMeta, m0Pub, m0Priv, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos stays the same. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos increases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // m1 stakes more with m0. m0 is active. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos increases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + + // GlobalActiveStakeAmountNanos increases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(150)) + } + { + // m1 unstakes some from m0. m0 is active. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos decreases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos decreases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // Jail m0 again. Since this update takes place outside a transaction, + // we cannot test rollbacks. We will run into an error where m0 is + // trying to unjail himself, but he was never jailed. + jailValidator(m0PKID) + + // m0 TotalStakeAmountNanos stays the same. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos decreases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) + } + { + // m0 unregisters as a validator. + _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) + require.NoError(t, err) + + // m0's ValidatorEntry is deleted. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Nil(t, validatorEntry) + + // GlobalActiveStakeAmountNanos stays the same. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) + } +} diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 0e0af7f1c..7641a34df 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -913,9 +913,9 @@ type UtxoOperation struct { // register, unregister, stake, or unstake txn. PrevValidatorEntry *ValidatorEntry - // PrevGlobalStakeAmountNanos is the previous GlobalStakeAmountNanos + // PrevGlobalActiveStakeAmountNanos is the previous GlobalActiveStakeAmountNanos // prior to a stake or unstake operation txn. - PrevGlobalStakeAmountNanos *uint256.Int + PrevGlobalActiveStakeAmountNanos *uint256.Int // PrevStakeEntries is a slice of StakeEntries prior to // a register, unregister, stake, or unstake txn. @@ -1245,8 +1245,8 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada // PrevValidatorEntry data = append(data, EncodeToBytes(blockHeight, op.PrevValidatorEntry, skipMetadata...)...) - // PrevGlobalStakeAmountNanos - data = append(data, VariableEncodeUint256(op.PrevGlobalStakeAmountNanos)...) + // PrevGlobalActiveStakeAmountNanos + data = append(data, VariableEncodeUint256(op.PrevGlobalActiveStakeAmountNanos)...) // PrevStakeEntries data = append(data, EncodeDeSoEncoderSlice(op.PrevStakeEntries, blockHeight, skipMetadata...)...) @@ -1876,11 +1876,9 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevValidatorEntry: ") } - // PrevGlobalStakeAmountNanos - if prevGlobalStakeAmountNanos, err := VariableDecodeUint256(rr); err == nil { - op.PrevGlobalStakeAmountNanos = prevGlobalStakeAmountNanos - } else { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevGlobalStakeAmountNanos: ") + // PrevGlobalActiveStakeAmountNanos + if op.PrevGlobalActiveStakeAmountNanos, err = VariableDecodeUint256(rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevGlobalActiveStakeAmountNanos: ") } // PrevStakeEntries diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index b274a9b76..4bcddcbf2 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -29,10 +29,6 @@ import ( // validator was first jailed. A validator is jailed if they fail to participate in consensus by // either voting or proposing blocks for too long. A jailed validator is ineligible to receive // any block rewards and ineligible to elected leader. -// -// FIXME: In the future, when we flesh out the jail/unjail functionality, we will want to make it -// so that the validator's stake is removed/added to GlobalStakeAmountNanos. See FIXME on -// _connectUnjailValidator for more details. // // TYPES: ValidatorEntry @@ -550,8 +546,8 @@ func DBKeyForValidatorByStake(validatorEntry *ValidatorEntry) []byte { return key } -func DBKeyForGlobalStakeAmountNanos() []byte { - return append([]byte{}, Prefixes.PrefixGlobalStakeAmountNanos...) +func DBKeyForGlobalActiveStakeAmountNanos() []byte { + return append([]byte{}, Prefixes.PrefixGlobalActiveStakeAmountNanos...) } func DBGetValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID) (*ValidatorEntry, error) { @@ -630,36 +626,36 @@ func DBGetTopActiveValidatorsByStake( return validatorEntries, nil } -func DBGetGlobalStakeAmountNanos(handle *badger.DB, snap *Snapshot) (*uint256.Int, error) { +func DBGetGlobalActiveStakeAmountNanos(handle *badger.DB, snap *Snapshot) (*uint256.Int, error) { var ret *uint256.Int err := handle.View(func(txn *badger.Txn) error { var innerErr error - ret, innerErr = DBGetGlobalStakeAmountNanosWithTxn(txn, snap) + ret, innerErr = DBGetGlobalActiveStakeAmountNanosWithTxn(txn, snap) return innerErr }) return ret, err } -func DBGetGlobalStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot) (*uint256.Int, error) { +func DBGetGlobalActiveStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot) (*uint256.Int, error) { // Retrieve from db. - key := DBKeyForGlobalStakeAmountNanos() - globalStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) + key := DBKeyForGlobalActiveStakeAmountNanos() + globalActiveStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return 0. if err == badger.ErrKeyNotFound { return uint256.NewInt(), nil } - return nil, errors.Wrapf(err, "DBGetGlobalStakeAmountNanosWithTxn: problem retrieving value") + return nil, errors.Wrapf(err, "DBGetGlobalActiveStakeAmountNanosWithTxn: problem retrieving value") } // Decode from bytes. - var globalStakeAmountNanos *uint256.Int - rr := bytes.NewReader(globalStakeAmountNanosBytes) - globalStakeAmountNanos, err = VariableDecodeUint256(rr) + var globalActiveStakeAmountNanos *uint256.Int + rr := bytes.NewReader(globalActiveStakeAmountNanosBytes) + globalActiveStakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { - return nil, errors.Wrapf(err, "DBGetGlobalStakeAmountNanosWithTxn: problem decoding value") + return nil, errors.Wrapf(err, "DBGetGlobalActiveStakeAmountNanosWithTxn: problem decoding value") } - return globalStakeAmountNanos, nil + return globalActiveStakeAmountNanos, nil } func DBPutValidatorWithTxn( @@ -734,20 +730,20 @@ func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PK return nil } -func DBPutGlobalStakeAmountNanosWithTxn( +func DBPutGlobalActiveStakeAmountNanosWithTxn( txn *badger.Txn, snap *Snapshot, - globalStakeAmountNanos *uint256.Int, + globalActiveStakeAmountNanos *uint256.Int, blockHeight uint64, ) error { - if globalStakeAmountNanos == nil { + if globalActiveStakeAmountNanos == nil { // This should never happen but is a sanity check. - glog.Errorf("DBPutGlobalStakeAmountNanosWithTxn: called with nil GlobalStakeAmountNanos") + glog.Errorf("DBPutGlobalActiveStakeAmountNanosWithTxn: called with nil GlobalActiveStakeAmountNanos") return nil } - key := DBKeyForGlobalStakeAmountNanos() - return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalStakeAmountNanos)) + key := DBKeyForGlobalActiveStakeAmountNanos() + return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos)) } // @@ -1310,31 +1306,35 @@ func (bav *UtxoView) _connectUnregisterAsValidator( ) } - // Decrease the GlobalStakeAmountNanos by the amount that was unstaked. - // Fetch the existing GlobalStakeAmountNanos. - prevGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: error fetching GlobalStakeAmountNanos: ") - } - // Subtract the amount that was unstaked. - globalStakeAmountNanos, err := SafeUint256().Sub( - prevGlobalStakeAmountNanos, totalUnstakedAmountNanos, - ) - if err != nil { - return 0, 0, nil, errors.Wrapf( - err, "_connectUnregisterAsValidator: error subtracting TotalUnstakedAmountNanos from GlobalStakeAmountNanos: ", + // If the validator was active, decrease the GlobalActiveStakeAmountNanos + // by the amount that was unstaked. Do nothing if the validator was jailed. + var prevGlobalActiveStakeAmountNanos *uint256.Int + if prevValidatorEntry.Status() == ValidatorStatusActive { + // Fetch the existing GlobalActiveStakeAmountNanos. + prevGlobalActiveStakeAmountNanos, err = bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: error fetching GlobalActiveStakeAmountNanos: ") + } + // Subtract the amount that was unstaked. + globalActiveStakeAmountNanos, err := SafeUint256().Sub( + prevGlobalActiveStakeAmountNanos, totalUnstakedAmountNanos, ) + if err != nil { + return 0, 0, nil, errors.Wrapf( + err, "_connectUnregisterAsValidator: error subtracting TotalUnstakedAmountNanos from GlobalActiveStakeAmountNanos: ", + ) + } + // Set the new GlobalActiveStakeAmountNanos. + bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) } - // Set the new GlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) // Create a UTXO operation. utxoOpForTxn := &UtxoOperation{ - Type: OperationTypeUnregisterAsValidator, - PrevValidatorEntry: prevValidatorEntry, - PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, - PrevStakeEntries: prevStakeEntries, - PrevLockedStakeEntries: prevLockedStakeEntries, + Type: OperationTypeUnregisterAsValidator, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, + PrevStakeEntries: prevStakeEntries, + PrevLockedStakeEntries: prevLockedStakeEntries, } if err = bav.SanityCheckUnregisterAsValidatorTxn(transactorPKIDEntry.PKID, utxoOpForTxn, totalUnstakedAmountNanos); err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") @@ -1414,8 +1414,10 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( bav._setLockedStakeEntryMappings(prevLockedStakeEntry) } - // Restore the PrevGlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(operationData.PrevGlobalStakeAmountNanos) + // Restore the PrevGlobalActiveStakeAmountNanos, if exists. + if operationData.PrevGlobalActiveStakeAmountNanos != nil { + bav._setGlobalActiveStakeAmountNanos(operationData.PrevGlobalActiveStakeAmountNanos) + } // Disconnect the BasicTransfer. return bav._disconnectBasicTransfer( @@ -1423,10 +1425,6 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( ) } -// FIXME: Currently, unjail does not re-add a validator's stake back to the GlobalStakeAmountNanos. -// When we flesh out the logic for jail/unjail, we will want to make it so that the process that -// jails a validator *removes* their stake from GlobalStakeAmountNanos, and the process that unjails, -// i.e. this function, *re-adds* their stake back to GlobalStakeAmountNanos. func (bav *UtxoView) _connectUnjailValidator( txn *MsgDeSoTxn, txHash *BlockHash, @@ -1514,10 +1512,24 @@ func (bav *UtxoView) _connectUnjailValidator( // Set the CurrentValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) + // Increase the GlobalActiveStakeAmountNanos. + prevGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnjailValidator: error retrieving existing GlobalActiveStakeAmountNanos: ") + } + currentGlobalActiveStakeAmountNanos, err := SafeUint256().Add( + prevGlobalActiveStakeAmountNanos, currentValidatorEntry.TotalStakeAmountNanos, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnjailValidator: error calculating updated GlobalActiveStakeAmountNanos ") + } + bav._setGlobalActiveStakeAmountNanos(currentGlobalActiveStakeAmountNanos) + // Add a UTXO operation utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ - Type: OperationTypeUnjailValidator, - PrevValidatorEntry: prevValidatorEntry, + Type: OperationTypeUnjailValidator, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, }) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1571,6 +1583,13 @@ func (bav *UtxoView) _disconnectUnjailValidator( } bav._setValidatorEntryMappings(prevValidatorEntry) + // Restore the PrevGlobalActiveStakeAmountNanos. + prevGlobalActiveStakeAmountNanos := operationData.PrevGlobalActiveStakeAmountNanos + if prevGlobalActiveStakeAmountNanos == nil { + return errors.New("_disconnectUnjailValidator: PrevGlobalActiveStakeAmountNanos is nil, this should never happen") + } + bav._setGlobalActiveStakeAmountNanos(prevGlobalActiveStakeAmountNanos) + // Disconnect the BasicTransfer. return bav._disconnectBasicTransfer( currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, @@ -1751,21 +1770,27 @@ func (bav *UtxoView) SanityCheckUnregisterAsValidatorTxn( return errors.New("SanityCheckUnregisterAsValidatorTxn: TotalUnstakedAmountNanos doesn't match") } - // Sanity check that the GlobalStakeAmountNanos was decreased by amountNanos. - if utxoOp.PrevGlobalStakeAmountNanos == nil { - return errors.New("SanityCheckUnregisterAsValidatorTxn: nil PrevGlobalStakeAmountNanos provided") - } - currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving GlobalStakeAmountNanos: ") - } - globalStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalStakeAmountNanos, currentGlobalStakeAmountNanos) - if err != nil { - return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error calculating GlobalStakeAmountNanos decrease: ") - } - if !globalStakeAmountNanosDecrease.Eq(amountNanos) { - return errors.New("SanityCheckUnregisterAsValidatorTxn: GlobalStakeAmountNanos decrease doesn't match") + // Sanity check that the GlobalActiveStakeAmountNanos was decreased + // by amountNanos if the PrevValidatorEntry was active. + if utxoOp.PrevValidatorEntry.Status() == ValidatorStatusActive { + if utxoOp.PrevGlobalActiveStakeAmountNanos == nil { + return errors.New("SanityCheckUnregisterAsValidatorTxn: nil PrevGlobalActiveStakeAmountNanos provided") + } + currentGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving GlobalActiveStakeAmountNanos: ") + } + globalActiveStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalActiveStakeAmountNanos, currentGlobalActiveStakeAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error calculating GlobalActiveStakeAmountNanos decrease: ") + } + if !globalActiveStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnregisterAsValidatorTxn: GlobalActiveStakeAmountNanos decrease doesn't match") + } + } else if utxoOp.PrevGlobalActiveStakeAmountNanos != nil { + return errors.New("SanityCheckUnregisterAsValidatorTxn: non-nil PrevGlobalActiveStakeAmountNanos provided for inactive validator") } + return nil } @@ -1862,22 +1887,53 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry return validatorEntries[0:upperBound], nil } -func (bav *UtxoView) GetGlobalStakeAmountNanos() (*uint256.Int, error) { - // Read the GlobalStakeAmountNanos from the UtxoView. - if bav.GlobalStakeAmountNanos != nil { - return bav.GlobalStakeAmountNanos.Clone(), nil +func (bav *UtxoView) GetGlobalActiveStakeAmountNanos() (*uint256.Int, error) { + // Read the GlobalActiveStakeAmountNanos from the UtxoView. + if bav.GlobalActiveStakeAmountNanos != nil { + return bav.GlobalActiveStakeAmountNanos.Clone(), nil } - // If not set, read the GlobalStakeAmountNanos from the db. - globalStakeAmountNanos, err := DBGetGlobalStakeAmountNanos(bav.Handle, bav.Snapshot) + // If not set, read the GlobalActiveStakeAmountNanos from the db. + globalActiveStakeAmountNanos, err := DBGetGlobalActiveStakeAmountNanos(bav.Handle, bav.Snapshot) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetGlobalStakeAmountNanos: ") + return nil, errors.Wrapf(err, "UtxoView.GetGlobalActiveStakeAmountNanos: ") } - if globalStakeAmountNanos == nil { - globalStakeAmountNanos = uint256.NewInt() + if globalActiveStakeAmountNanos == nil { + globalActiveStakeAmountNanos = uint256.NewInt() } - // Cache the GlobalStakeAmountNanos from the db in the UtxoView. - bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) - return globalStakeAmountNanos, nil + // Cache the GlobalActiveStakeAmountNanos from the db in the UtxoView. + bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) + return globalActiveStakeAmountNanos, nil +} + +func (bav *UtxoView) JailValidator(validatorEntry *ValidatorEntry) error { + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return errors.Wrapf(err, "UtxoView.JailValidator: error retrieving CurrentEpochNumber: ") + } + + // Set ValidatorEntry.JailedAtEpochNumber to the CurrentEpochNumber. + validatorEntry.JailedAtEpochNumber = currentEpochNumber + + // Remove the validator's stake from the GlobalActiveStakeAmountNanos. + prevGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "UtxoView.JailValidator: error retrieving GlobalActiveStakeAmountNanos: ") + } + currentGlobalActiveStakeAmountNanos, err := SafeUint256().Sub( + prevGlobalActiveStakeAmountNanos, validatorEntry.TotalStakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "UtxoView.JailValidator: error calculating updated GlobalActiveStakeAmountNanos: ") + } + + // Store the updated ValidatorEntry. + bav._setValidatorEntryMappings(validatorEntry) + + // Store the updated GlobalActiveStakeAmountNanos. + bav._setGlobalActiveStakeAmountNanos(currentGlobalActiveStakeAmountNanos) + + return nil } func (bav *UtxoView) _setValidatorEntryMappings(validatorEntry *ValidatorEntry) { @@ -1902,13 +1958,13 @@ func (bav *UtxoView) _deleteValidatorEntryMappings(validatorEntry *ValidatorEntr bav._setValidatorEntryMappings(&tombstoneEntry) } -func (bav *UtxoView) _setGlobalStakeAmountNanos(globalStakeAmountNanos *uint256.Int) { +func (bav *UtxoView) _setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int) { // This function shouldn't be called with nil. - if globalStakeAmountNanos == nil { - glog.Errorf("_setGlobalStakeAmountNanos: called with nil entry, this should never happen") + if globalActiveStakeAmountNanos == nil { + glog.Errorf("_setGlobalActiveStakeAmountNanos: called with nil entry, this should never happen") return } - bav.GlobalStakeAmountNanos = globalStakeAmountNanos.Clone() + bav.GlobalActiveStakeAmountNanos = globalActiveStakeAmountNanos.Clone() } func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { @@ -1953,14 +2009,14 @@ func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHei return nil } -func (bav *UtxoView) _flushGlobalStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - // If GlobalStakeAmountNanos is nil, then it was never +func (bav *UtxoView) _flushGlobalActiveStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // If GlobalActiveStakeAmountNanos is nil, then it was never // set and shouldn't overwrite the value in the db. - if bav.GlobalStakeAmountNanos == nil { + if bav.GlobalActiveStakeAmountNanos == nil { return nil } - return DBPutGlobalStakeAmountNanosWithTxn(txn, bav.Snapshot, bav.GlobalStakeAmountNanos, blockHeight) + return DBPutGlobalActiveStakeAmountNanosWithTxn(txn, bav.Snapshot, bav.GlobalActiveStakeAmountNanos, blockHeight) } // diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 1eb411df3..33a4d210d 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -24,7 +24,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { var registerMetadata *RegisterAsValidatorMetadata var validatorEntry *ValidatorEntry var validatorEntries []*ValidatorEntry - var globalStakeAmountNanos *uint256.Int + var globalActiveStakeAmountNanos *uint256.Int var err error // Initialize fork heights. @@ -235,10 +235,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Empty(t, validatorEntries) } { - // Query: retrieve GlobalStakeAmountNanos - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + // Query: retrieve GlobalActiveStakeAmountNanos + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } { // Happy path: update a validator @@ -294,10 +294,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Empty(t, validatorEntries) } { - // Query: retrieve GlobalStakeAmountNanos - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + // Query: retrieve GlobalActiveStakeAmountNanos + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } // Flush mempool to the db and test rollbacks. @@ -1336,7 +1336,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { var stakeEntry *StakeEntry var lockedStakeEntry *LockedStakeEntry _ = lockedStakeEntry - var globalStakeAmountNanos *uint256.Int + var globalActiveStakeAmountNanos *uint256.Int var err error // Initialize balance model fork heights. @@ -1438,9 +1438,9 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NotNil(t, stakeEntry) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(600)) - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(600)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(600)) } { // m1 stakes with m0. @@ -1458,9 +1458,9 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NotNil(t, stakeEntry) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(400)) - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(1000)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(1000)) } { // m1 partially unstakes with m0. @@ -1484,10 +1484,10 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NotNil(t, lockedStakeEntry) require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(100)) - // GlobalStakeAmountNanos is updated. - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + // GlobalActiveStakeAmountNanos is updated. + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(900)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(900)) } { // m0 unregisters as a validator. @@ -1521,10 +1521,10 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NotNil(t, lockedStakeEntry) require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(400)) - // GlobalStakeAmountNanos is updated. - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + // GlobalActiveStakeAmountNanos is updated. + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } // Flush mempool to the db and test rollbacks. @@ -1641,19 +1641,16 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // we cannot test rollbacks. We will run into an error where m0 is // trying to unjail himself, but he was never jailed. - // Delete m0's ValidatorEntry from the UtxoView. - delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) - delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) - - // Set JailedAtEpochNumber. - validatorEntry.JailedAtEpochNumber = currentEpochNumber - - // Store m0's ValidatorEntry in the db. + // Jail m0. tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) - tmpUtxoView._setValidatorEntryMappings(validatorEntry) + require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + // Delete m0's ValidatorEntry from the UtxoView so that it is read from the db. + delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + // Verify m0 is jailed. validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) @@ -1938,19 +1935,17 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // we cannot test rollbacks. We will run into an error where sender is // trying to unjail himself, but he was never jailed. - // Delete sender's ValidatorEntry from the UtxoView. - delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) - delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) - - // Set JailedAtEpochNumber. - validatorEntry.JailedAtEpochNumber = currentEpochNumber - - // Store sender's ValidatorEntry in the db. + // Jail the sender. tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) - tmpUtxoView._setValidatorEntryMappings(validatorEntry) + err = tmpUtxoView.JailValidator(validatorEntry) + require.NoError(t, err) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + // Delete sender's ValidatorEntry from the UtxoView so that it is read from the db. + delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + // Verify sender is jailed. validatorEntry, err = newUtxoView().GetValidatorByPKID(senderPKID) require.NoError(t, err) diff --git a/lib/db_utils.go b/lib/db_utils.go index 2272501ca..d34842f82 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -486,9 +486,9 @@ type DBPrefixes struct { // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. PrefixValidatorByStake []byte `prefix_id:"[79]" is_state:"true"` - // PrefixGlobalStakeAmountNanos: Retrieve the cumulative stake across all validators. + // PrefixGlobalActiveStakeAmountNanos: Retrieve the cumulative stake across all validators. // Prefix -> *uint256.Int - PrefixGlobalStakeAmountNanos []byte `prefix_id:"[80]" is_state:"true"` + PrefixGlobalActiveStakeAmountNanos []byte `prefix_id:"[80]" is_state:"true"` // PrefixStakeByValidatorAndStaker: Retrieve a StakeEntry. // Prefix, ValidatorPKID, StakerPKID -> StakeEntry @@ -733,7 +733,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStake) { // prefix_id:"[79]" return false, nil - } else if bytes.Equal(prefix, Prefixes.PrefixGlobalStakeAmountNanos) { + } else if bytes.Equal(prefix, Prefixes.PrefixGlobalActiveStakeAmountNanos) { // prefix_id:"[80]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorAndStaker) { From dbe9b40851beb0619fd32ad3a997b32affaeed98 Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Mon, 22 May 2023 13:23:42 -0400 Subject: [PATCH 046/762] Mf/merge pos txn types 20230522 (#536) * Diamondhands/pos txn types review (#530) * POS fixes and comments from diamondhands * Mf/pos txn types review (#531) * Add comments. * Addres review comments. * Add first batch of sanity check utils. * Add second batch of txn sanity checks. * Rename encode uint256 funcs in comments. * Nuke RegisteredAtBlockHeight. * Address review feedback. * Fix a few more typos. * Update some comments --------- Co-authored-by: diamondhands --------- Co-authored-by: Matt Foley <100429827+mattfoley8@users.noreply.github.com> * Exclude zero-stake validators from TopValidatorsByStake. (#535) * Mf/remove jailed stake from global active stake (#533) * Remove jailed stake from global active stake. * Write to the UtxoView if no errors in jailing. * Add additional sanity checks. * PR review feedback. * Remove FIXME comment. --------- Co-authored-by: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> --- lib/block_view.go | 18 +- lib/block_view_flush.go | 2 +- lib/block_view_stake.go | 156 ++++++++------- lib/block_view_stake_test.go | 331 +++++++++++++++++++++++++++++-- lib/block_view_types.go | 16 +- lib/block_view_validator.go | 233 ++++++++++++++-------- lib/block_view_validator_test.go | 99 ++++----- lib/db_utils.go | 6 +- 8 files changed, 607 insertions(+), 254 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 83fbb069e..3b3a0c497 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -118,8 +118,8 @@ type UtxoView struct { // Validator mappings ValidatorPKIDToValidatorEntry map[PKID]*ValidatorEntry - // Global stake across all validators - GlobalStakeAmountNanos *uint256.Int + // The global active stake is the sum of all stake across validators who have Status = Active. + GlobalActiveStakeAmountNanos *uint256.Int // Stake mappings StakeMapKeyToStakeEntry map[StakeMapKey]*StakeEntry @@ -222,10 +222,10 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // ValidatorEntries bav.ValidatorPKIDToValidatorEntry = make(map[PKID]*ValidatorEntry) - // Global stake across validators. We deliberately want this to initialize to nil and not zero - // since a zero value will overwrite an existing GlobalStakeAmountNanos value in the db, whereas - // a nil GlobalStakeAmountNanos value signifies that this value was never set. - bav.GlobalStakeAmountNanos = nil + // Global active stake across validators. We deliberately want this to initialize to nil and not zero + // since a zero value will overwrite an existing GlobalActiveStakeAmountNanos value in the db, whereas + // a nil GlobalActiveStakeAmountNanos value signifies that this value was never set. + bav.GlobalActiveStakeAmountNanos = nil // StakeEntries bav.StakeMapKeyToStakeEntry = make(map[StakeMapKey]*StakeEntry) @@ -493,9 +493,9 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.ValidatorPKIDToValidatorEntry[entryKey] = entry.Copy() } - // Copy the GlobalStakeAmountNanos. - if bav.GlobalStakeAmountNanos != nil { - newView.GlobalStakeAmountNanos = bav.GlobalStakeAmountNanos.Clone() + // Copy the GlobalActiveStakeAmountNanos. + if bav.GlobalActiveStakeAmountNanos != nil { + newView.GlobalActiveStakeAmountNanos = bav.GlobalActiveStakeAmountNanos.Clone() } // Copy the StakeEntries diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 6ae70f214..684e79efe 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -143,7 +143,7 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushValidatorEntriesToDbWithTxn(txn, blockHeight); err != nil { return err } - if err := bav._flushGlobalStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { + if err := bav._flushGlobalActiveStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { return err } if err := bav._flushStakeEntriesToDbWithTxn(txn, blockHeight); err != nil { diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 8c81416d3..66df9a90b 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1226,19 +1226,22 @@ func (bav *UtxoView) _connectStake( // 4. Set the new ValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) - // Increase the GlobalStakeAmountNanos. - // Retrieve the existing GlobalStakeAmountNanos. - // The PrevGlobalStakeAmountNanos will be restored if we disconnect this transaction. - prevGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectStake: error retrieving GlobalStakeAmountNanos: ") - } - globalStakeAmountNanos, err := SafeUint256().Add(prevGlobalStakeAmountNanos, txMeta.StakeAmountNanos) - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to GlobalStakeAmountNanos: ") + // Increase the GlobalActiveStakeAmountNanos if the validator is active. + var prevGlobalActiveStakeAmountNanos *uint256.Int + if currentValidatorEntry.Status() == ValidatorStatusActive { + // Retrieve the existing GlobalActiveStakeAmountNanos. + // The PrevGlobalActiveStakeAmountNanos will be restored if we disconnect this transaction. + prevGlobalActiveStakeAmountNanos, err = bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error retrieving GlobalActiveStakeAmountNanos: ") + } + globalActiveStakeAmountNanos, err := SafeUint256().Add(prevGlobalActiveStakeAmountNanos, txMeta.StakeAmountNanos) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to GlobalActiveStakeAmountNanos: ") + } + // Set the new GlobalActiveStakeAmountNanos. + bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) } - // Set the new GlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) // Add the StakeAmountNanos to TotalOutput. The coins being staked are already // part of the TotalInput. But they are not burned, so they are an implicit @@ -1250,10 +1253,10 @@ func (bav *UtxoView) _connectStake( // Create a UTXO operation utxoOpForTxn := &UtxoOperation{ - Type: OperationTypeStake, - PrevValidatorEntry: prevValidatorEntry, - PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, - PrevStakeEntries: prevStakeEntries, + Type: OperationTypeStake, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, + PrevStakeEntries: prevStakeEntries, } if err = bav.SanityCheckStakeTxn( transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.StakeAmountNanos, txn.TxnFeeNanos, prevBalanceNanos, @@ -1338,8 +1341,10 @@ func (bav *UtxoView) _disconnectStake( bav._setStakeEntryMappings(operationData.PrevStakeEntries[0]) } - // Restore the PrevGlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(operationData.PrevGlobalStakeAmountNanos) + // Restore the PrevGlobalActiveStakeAmountNanos, if exists. + if operationData.PrevGlobalActiveStakeAmountNanos != nil { + bav._setGlobalActiveStakeAmountNanos(operationData.PrevGlobalActiveStakeAmountNanos) + } // Disconnect the BasicTransfer. Disconnecting the BasicTransfer also returns // the extra spend associated with the amount the transactor staked. @@ -1461,18 +1466,21 @@ func (bav *UtxoView) _connectUnstake( // 4. Set the new ValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) - // Decrease the GlobalStakeAmountNanos. - // 1. Retrieve the existing GlobalStakeAmountNanos. This will be restored if we disconnect this txn. - prevGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error retrieving GlobalStakeAmountNanos: ") - } - globalStakeAmountNanos, err := SafeUint256().Sub(prevGlobalStakeAmountNanos, txMeta.UnstakeAmountNanos) - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error subtracting UnstakeAmountNanos from GlobalStakeAmountNanos: ") + // Decrease the GlobalActiveStakeAmountNanos if the validator is active. + var prevGlobalActiveStakeAmountNanos *uint256.Int + if currentValidatorEntry.Status() == ValidatorStatusActive { + // 1. Retrieve the existing GlobalActiveStakeAmountNanos. This will be restored if we disconnect this txn. + prevGlobalActiveStakeAmountNanos, err = bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error retrieving GlobalActiveStakeAmountNanos: ") + } + globalActiveStakeAmountNanos, err := SafeUint256().Sub(prevGlobalActiveStakeAmountNanos, txMeta.UnstakeAmountNanos) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error subtracting UnstakeAmountNanos from GlobalActiveStakeAmountNanos: ") + } + // 2. Set the new GlobalActiveStakeAmountNanos. + bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) } - // 2. Set the new GlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) // Retrieve the CurrentEpochNumber. currentEpochNumber, err := bav.GetCurrentEpochNumber() @@ -1524,11 +1532,11 @@ func (bav *UtxoView) _connectUnstake( // Create a UTXO operation. utxoOpForTxn := &UtxoOperation{ - Type: OperationTypeUnstake, - PrevValidatorEntry: prevValidatorEntry, - PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, - PrevStakeEntries: prevStakeEntries, - PrevLockedStakeEntries: prevLockedStakeEntries, + Type: OperationTypeUnstake, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, + PrevStakeEntries: prevStakeEntries, + PrevLockedStakeEntries: prevLockedStakeEntries, } if err = bav.SanityCheckUnstakeTxn(transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.UnstakeAmountNanos); err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") @@ -1612,8 +1620,10 @@ func (bav *UtxoView) _disconnectUnstake( } bav._setStakeEntryMappings(operationData.PrevStakeEntries[0]) - // Restore the PrevGlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(operationData.PrevGlobalStakeAmountNanos) + // Restore the PrevGlobalActiveStakeAmountNanos, if exists. + if operationData.PrevGlobalActiveStakeAmountNanos != nil { + bav._setGlobalActiveStakeAmountNanos(operationData.PrevGlobalActiveStakeAmountNanos) + } // Retrieve the CurrentEpochNumber. currentEpochNumber, err := bav.GetCurrentEpochNumber() @@ -2021,22 +2031,26 @@ func (bav *UtxoView) SanityCheckStakeTxn( return errors.New("SanityCheckStakeTxn: StakeAmountNanos increase does not match") } - // Validate GlobalStakeAmountNanos increase. - if utxoOp.PrevGlobalStakeAmountNanos == nil { - return errors.New("SanityCheckStakeTxn: nil PrevGlobalStakeAmountNanos provided") - } - currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving CurrentGlobalStakeAmountNanos: ") - } - globalStakeAmountNanosIncrease, err := SafeUint256().Sub( - currentGlobalStakeAmountNanos, utxoOp.PrevGlobalStakeAmountNanos, - ) - if err != nil { - return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating GlobalStakeAmountNanos increase: ") - } - if !globalStakeAmountNanosIncrease.Eq(amountNanos) { - return errors.New("SanityCheckStakeTxn: GlobalStakeAmountNanos increase does not match") + // Validate GlobalActiveStakeAmountNanos increase if validator is active. + if currentValidatorEntry.Status() == ValidatorStatusActive { + if utxoOp.PrevGlobalActiveStakeAmountNanos == nil { + return errors.New("SanityCheckStakeTxn: nil PrevGlobalActiveStakeAmountNanos provided") + } + currentGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving CurrentGlobalActiveStakeAmountNanos: ") + } + globalActiveStakeAmountNanosIncrease, err := SafeUint256().Sub( + currentGlobalActiveStakeAmountNanos, utxoOp.PrevGlobalActiveStakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating GlobalActiveStakeAmountNanos increase: ") + } + if !globalActiveStakeAmountNanosIncrease.Eq(amountNanos) { + return errors.New("SanityCheckStakeTxn: GlobalActiveStakeAmountNanos increase does not match") + } + } else if utxoOp.PrevGlobalActiveStakeAmountNanos != nil { + return errors.New("SanityCheckStakeTxn: non-nil PrevGlobalActiveStakeAmountNanos provided for inactive validator, this should never happen") } // Validate TransactorBalance decrease. @@ -2134,23 +2148,27 @@ func (bav *UtxoView) SanityCheckUnstakeTxn(transactorPKID *PKID, utxoOp *UtxoOpe return errors.New("SanityCheckUnstakeTxn: LockedAmountNanos increase does not match") } - // Validate GlobalStakeAmountNanos decrease. - if utxoOp.PrevGlobalStakeAmountNanos == nil { - return errors.New("SanityCheckUnstakeTxn: nil PrevGlobalStakeAmountNanos provided") - } - currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving CurrentGlobalStakeAmountNanos: ") - } - if currentGlobalStakeAmountNanos == nil { - return errors.New("SanityCheckUnstakeTxn: no CurrentGlobalStakeAmountNanos found") - } - globalStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalStakeAmountNanos, currentGlobalStakeAmountNanos) - if err != nil { - return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating GlobalStakeAmountNanos decrease: ") - } - if !globalStakeAmountNanosDecrease.Eq(amountNanos) { - return errors.New("SanityCheckUnstakeTxn: GlobalStakeAmountNanos decrease does not match") + // Validate GlobalActiveStakeAmountNanos decrease if validator is active. + if currentValidatorEntry.Status() == ValidatorStatusActive { + if utxoOp.PrevGlobalActiveStakeAmountNanos == nil { + return errors.New("SanityCheckUnstakeTxn: nil PrevGlobalActiveStakeAmountNanos provided") + } + currentGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving CurrentGlobalActiveStakeAmountNanos: ") + } + if currentGlobalActiveStakeAmountNanos == nil { + return errors.New("SanityCheckUnstakeTxn: no CurrentGlobalActiveStakeAmountNanos found") + } + globalActiveStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalActiveStakeAmountNanos, currentGlobalActiveStakeAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating GlobalActiveStakeAmountNanos decrease: ") + } + if !globalActiveStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnstakeTxn: GlobalActiveStakeAmountNanos decrease does not match") + } + } else if utxoOp.PrevGlobalActiveStakeAmountNanos != nil { + return errors.New("SanityCheckUnstakeTxn: non-nil PrevGlobalActiveStakeAmountNanos provided for inactive validator, this should never happen") } return nil diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index f8aefb9d8..f1ff8576f 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -227,10 +227,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) @@ -262,10 +262,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(150)) // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) @@ -377,10 +377,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(110)) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(110)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(110)) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) @@ -415,10 +415,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(80)) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(80)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(80)) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) @@ -452,10 +452,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) @@ -560,10 +560,10 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) - // Verify GlobalStakeAmountNanos. - globalStakeAmountNanos, err := utxoView().GetGlobalStakeAmountNanos() + // Verify GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) // Verify LockedStakeEntry.isDeleted. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) @@ -1934,3 +1934,298 @@ func TestStakeLockupEpochDuration(t *testing.T) { require.Equal(t, oldDesoBalanceNanos-feeNanos+uint64(100), newDesoBalanceNanos) } } + +func TestStakingToJailedValidator(t *testing.T) { + testStakingToJailedValidator(t, false) + testStakingToJailedValidator(t, true) +} + +func testStakingToJailedValidator(t *testing.T, flushToDB bool) { + var err error + + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize fork heights. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + chain.snapshot = nil + + // For these tests, we set ValidatorJailEpochDuration to 0. + params.ValidatorJailEpochDuration = 0 + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height + 1) + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + + // Helper utils + utxoView := func() *UtxoView { + newUtxoView, err := mempool.GetAugmentedUniversalView() + require.NoError(t, err) + return newUtxoView + } + + jailValidator := func(validatorPKID *PKID) { + // Retrieve current ValidatorEntry. + validatorEntry, err := utxoView().GetValidatorByPKID(validatorPKID) + require.NoError(t, err) + + // Retrieve current GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + + // Jail the validator. + tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + tmpUtxoView._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) + require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) + require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + + // Delete the ValidatorEntry and GlobalActiveStakeAmountNanos + // from the UtxoView so that they are next read from the db. + delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorPKID) + delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorPKID) + mempool.universalUtxoView.GlobalActiveStakeAmountNanos = nil + mempool.readOnlyUtxoView.GlobalActiveStakeAmountNanos = nil + + } + + // Seed a CurrentEpochEntry. + epochUtxoView := utxoView() + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) + require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // m0 registers as a validator. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://m0.example.com")}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + } + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) + require.NoError(t, err) + } + { + // m1 stakes with m0. m0 is active. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(150), + } + _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos increases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + + // GlobalActiveStakeAmountNanos increases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(150)) + } + { + // m1 unstakes some from m0. m0 is active. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos decreases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos decreases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // Jail m0. Since this update takes place outside a transaction, + // we cannot test rollbacks. We will run into an error where m0 + // is trying to unjail himself, but he was never jailed. + jailValidator(m0PKID) + + // Verify m0 is jailed. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) + + // m0 TotalStakeAmountNanos stays the same. + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos decreases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) + } + { + // m1 stakes more with m0. m0 is jailed. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos increases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + + // GlobalActiveStakeAmountNanos stays the same. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) + } + { + // m1 unstakes some from m0. m0 is jailed. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos decreases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos stays the same. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) + } + { + // m0 unjails himself. + _, err = _submitUnjailValidatorTxn(testMeta, m0Pub, m0Priv, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos stays the same. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos increases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // m1 stakes more with m0. m0 is active. + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos increases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + + // GlobalActiveStakeAmountNanos increases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(150)) + } + { + // m1 unstakes some from m0. m0 is active. + unstakeMetadata := &UnstakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + } + _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) + require.NoError(t, err) + + // m0 TotalStakeAmountNanos decreases. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos decreases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) + } + { + // Jail m0 again. Since this update takes place outside a transaction, + // we cannot test rollbacks. We will run into an error where m0 is + // trying to unjail himself, but he was never jailed. + jailValidator(m0PKID) + + // m0 TotalStakeAmountNanos stays the same. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + + // GlobalActiveStakeAmountNanos decreases. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) + } + { + // m0 unregisters as a validator. + _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, flushToDB) + require.NoError(t, err) + + // m0's ValidatorEntry is deleted. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Nil(t, validatorEntry) + + // GlobalActiveStakeAmountNanos stays the same. + globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() + require.NoError(t, err) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) + } +} diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 0e0af7f1c..7641a34df 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -913,9 +913,9 @@ type UtxoOperation struct { // register, unregister, stake, or unstake txn. PrevValidatorEntry *ValidatorEntry - // PrevGlobalStakeAmountNanos is the previous GlobalStakeAmountNanos + // PrevGlobalActiveStakeAmountNanos is the previous GlobalActiveStakeAmountNanos // prior to a stake or unstake operation txn. - PrevGlobalStakeAmountNanos *uint256.Int + PrevGlobalActiveStakeAmountNanos *uint256.Int // PrevStakeEntries is a slice of StakeEntries prior to // a register, unregister, stake, or unstake txn. @@ -1245,8 +1245,8 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada // PrevValidatorEntry data = append(data, EncodeToBytes(blockHeight, op.PrevValidatorEntry, skipMetadata...)...) - // PrevGlobalStakeAmountNanos - data = append(data, VariableEncodeUint256(op.PrevGlobalStakeAmountNanos)...) + // PrevGlobalActiveStakeAmountNanos + data = append(data, VariableEncodeUint256(op.PrevGlobalActiveStakeAmountNanos)...) // PrevStakeEntries data = append(data, EncodeDeSoEncoderSlice(op.PrevStakeEntries, blockHeight, skipMetadata...)...) @@ -1876,11 +1876,9 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevValidatorEntry: ") } - // PrevGlobalStakeAmountNanos - if prevGlobalStakeAmountNanos, err := VariableDecodeUint256(rr); err == nil { - op.PrevGlobalStakeAmountNanos = prevGlobalStakeAmountNanos - } else { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevGlobalStakeAmountNanos: ") + // PrevGlobalActiveStakeAmountNanos + if op.PrevGlobalActiveStakeAmountNanos, err = VariableDecodeUint256(rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevGlobalActiveStakeAmountNanos: ") } // PrevStakeEntries diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 647044cd6..6e358b562 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -29,10 +29,6 @@ import ( // validator was first jailed. A validator is jailed if they fail to participate in consensus by // either voting or proposing blocks for too long. A jailed validator is ineligible to receive // any block rewards and ineligible to elected leader. -// -// FIXME: In the future, when we flesh out the jail/unjail functionality, we will want to make it -// so that the validator's stake is removed/added to GlobalStakeAmountNanos. See FIXME on -// _connectUnjailValidator for more details. // // TYPES: ValidatorEntry @@ -550,8 +546,8 @@ func DBKeyForValidatorByStake(validatorEntry *ValidatorEntry) []byte { return key } -func DBKeyForGlobalStakeAmountNanos() []byte { - return append([]byte{}, Prefixes.PrefixGlobalStakeAmountNanos...) +func DBKeyForGlobalActiveStakeAmountNanos() []byte { + return append([]byte{}, Prefixes.PrefixGlobalActiveStakeAmountNanos...) } func DBGetValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID) (*ValidatorEntry, error) { @@ -630,36 +626,36 @@ func DBGetTopActiveValidatorsByStake( return validatorEntries, nil } -func DBGetGlobalStakeAmountNanos(handle *badger.DB, snap *Snapshot) (*uint256.Int, error) { +func DBGetGlobalActiveStakeAmountNanos(handle *badger.DB, snap *Snapshot) (*uint256.Int, error) { var ret *uint256.Int err := handle.View(func(txn *badger.Txn) error { var innerErr error - ret, innerErr = DBGetGlobalStakeAmountNanosWithTxn(txn, snap) + ret, innerErr = DBGetGlobalActiveStakeAmountNanosWithTxn(txn, snap) return innerErr }) return ret, err } -func DBGetGlobalStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot) (*uint256.Int, error) { +func DBGetGlobalActiveStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot) (*uint256.Int, error) { // Retrieve from db. - key := DBKeyForGlobalStakeAmountNanos() - globalStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) + key := DBKeyForGlobalActiveStakeAmountNanos() + globalActiveStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return 0. if err == badger.ErrKeyNotFound { return uint256.NewInt(), nil } - return nil, errors.Wrapf(err, "DBGetGlobalStakeAmountNanosWithTxn: problem retrieving value") + return nil, errors.Wrapf(err, "DBGetGlobalActiveStakeAmountNanosWithTxn: problem retrieving value") } // Decode from bytes. - var globalStakeAmountNanos *uint256.Int - rr := bytes.NewReader(globalStakeAmountNanosBytes) - globalStakeAmountNanos, err = VariableDecodeUint256(rr) + var globalActiveStakeAmountNanos *uint256.Int + rr := bytes.NewReader(globalActiveStakeAmountNanosBytes) + globalActiveStakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { - return nil, errors.Wrapf(err, "DBGetGlobalStakeAmountNanosWithTxn: problem decoding value") + return nil, errors.Wrapf(err, "DBGetGlobalActiveStakeAmountNanosWithTxn: problem decoding value") } - return globalStakeAmountNanos, nil + return globalActiveStakeAmountNanos, nil } func DBPutValidatorWithTxn( @@ -734,20 +730,19 @@ func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PK return nil } -func DBPutGlobalStakeAmountNanosWithTxn( +func DBPutGlobalActiveStakeAmountNanosWithTxn( txn *badger.Txn, snap *Snapshot, - globalStakeAmountNanos *uint256.Int, + globalActiveStakeAmountNanos *uint256.Int, blockHeight uint64, ) error { - if globalStakeAmountNanos == nil { + if globalActiveStakeAmountNanos == nil { // This should never happen but is a sanity check. - glog.Errorf("DBPutGlobalStakeAmountNanosWithTxn: called with nil GlobalStakeAmountNanos") + glog.Errorf("DBPutGlobalActiveStakeAmountNanosWithTxn: called with nil GlobalActiveStakeAmountNanos") return nil } - - key := DBKeyForGlobalStakeAmountNanos() - return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalStakeAmountNanos)) + key := DBKeyForGlobalActiveStakeAmountNanos() + return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos)) } // @@ -1310,31 +1305,35 @@ func (bav *UtxoView) _connectUnregisterAsValidator( ) } - // Decrease the GlobalStakeAmountNanos by the amount that was unstaked. - // Fetch the existing GlobalStakeAmountNanos. - prevGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: error fetching GlobalStakeAmountNanos: ") - } - // Subtract the amount that was unstaked. - globalStakeAmountNanos, err := SafeUint256().Sub( - prevGlobalStakeAmountNanos, totalUnstakedAmountNanos, - ) - if err != nil { - return 0, 0, nil, errors.Wrapf( - err, "_connectUnregisterAsValidator: error subtracting TotalUnstakedAmountNanos from GlobalStakeAmountNanos: ", + // If the validator was active, decrease the GlobalActiveStakeAmountNanos + // by the amount that was unstaked. Do nothing if the validator was jailed. + var prevGlobalActiveStakeAmountNanos *uint256.Int + if prevValidatorEntry.Status() == ValidatorStatusActive { + // Fetch the existing GlobalActiveStakeAmountNanos. + prevGlobalActiveStakeAmountNanos, err = bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: error fetching GlobalActiveStakeAmountNanos: ") + } + // Subtract the amount that was unstaked. + globalActiveStakeAmountNanos, err := SafeUint256().Sub( + prevGlobalActiveStakeAmountNanos, totalUnstakedAmountNanos, ) + if err != nil { + return 0, 0, nil, errors.Wrapf( + err, "_connectUnregisterAsValidator: error subtracting TotalUnstakedAmountNanos from GlobalActiveStakeAmountNanos: ", + ) + } + // Set the new GlobalActiveStakeAmountNanos. + bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) } - // Set the new GlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) // Create a UTXO operation. utxoOpForTxn := &UtxoOperation{ - Type: OperationTypeUnregisterAsValidator, - PrevValidatorEntry: prevValidatorEntry, - PrevGlobalStakeAmountNanos: prevGlobalStakeAmountNanos, - PrevStakeEntries: prevStakeEntries, - PrevLockedStakeEntries: prevLockedStakeEntries, + Type: OperationTypeUnregisterAsValidator, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, + PrevStakeEntries: prevStakeEntries, + PrevLockedStakeEntries: prevLockedStakeEntries, } if err = bav.SanityCheckUnregisterAsValidatorTxn(transactorPKIDEntry.PKID, utxoOpForTxn, totalUnstakedAmountNanos); err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") @@ -1414,8 +1413,10 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( bav._setLockedStakeEntryMappings(prevLockedStakeEntry) } - // Restore the PrevGlobalStakeAmountNanos. - bav._setGlobalStakeAmountNanos(operationData.PrevGlobalStakeAmountNanos) + // Restore the PrevGlobalActiveStakeAmountNanos, if exists. + if operationData.PrevGlobalActiveStakeAmountNanos != nil { + bav._setGlobalActiveStakeAmountNanos(operationData.PrevGlobalActiveStakeAmountNanos) + } // Disconnect the BasicTransfer. return bav._disconnectBasicTransfer( @@ -1423,10 +1424,6 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( ) } -// FIXME: Currently, unjail does not re-add a validator's stake back to the GlobalStakeAmountNanos. -// When we flesh out the logic for jail/unjail, we will want to make it so that the process that -// jails a validator *removes* their stake from GlobalStakeAmountNanos, and the process that unjails, -// i.e. this function, *re-adds* their stake back to GlobalStakeAmountNanos. func (bav *UtxoView) _connectUnjailValidator( txn *MsgDeSoTxn, txHash *BlockHash, @@ -1514,10 +1511,24 @@ func (bav *UtxoView) _connectUnjailValidator( // Set the CurrentValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) + // Increase the GlobalActiveStakeAmountNanos. + prevGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnjailValidator: error retrieving existing GlobalActiveStakeAmountNanos: ") + } + currentGlobalActiveStakeAmountNanos, err := SafeUint256().Add( + prevGlobalActiveStakeAmountNanos, currentValidatorEntry.TotalStakeAmountNanos, + ) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUnjailValidator: error calculating updated GlobalActiveStakeAmountNanos ") + } + bav._setGlobalActiveStakeAmountNanos(currentGlobalActiveStakeAmountNanos) + // Add a UTXO operation utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ - Type: OperationTypeUnjailValidator, - PrevValidatorEntry: prevValidatorEntry, + Type: OperationTypeUnjailValidator, + PrevValidatorEntry: prevValidatorEntry, + PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, }) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1571,6 +1582,13 @@ func (bav *UtxoView) _disconnectUnjailValidator( } bav._setValidatorEntryMappings(prevValidatorEntry) + // Restore the PrevGlobalActiveStakeAmountNanos. + prevGlobalActiveStakeAmountNanos := operationData.PrevGlobalActiveStakeAmountNanos + if prevGlobalActiveStakeAmountNanos == nil { + return errors.New("_disconnectUnjailValidator: PrevGlobalActiveStakeAmountNanos is nil, this should never happen") + } + bav._setGlobalActiveStakeAmountNanos(prevGlobalActiveStakeAmountNanos) + // Disconnect the BasicTransfer. return bav._disconnectBasicTransfer( currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, @@ -1751,21 +1769,27 @@ func (bav *UtxoView) SanityCheckUnregisterAsValidatorTxn( return errors.New("SanityCheckUnregisterAsValidatorTxn: TotalUnstakedAmountNanos doesn't match") } - // Sanity check that the GlobalStakeAmountNanos was decreased by amountNanos. - if utxoOp.PrevGlobalStakeAmountNanos == nil { - return errors.New("SanityCheckUnregisterAsValidatorTxn: nil PrevGlobalStakeAmountNanos provided") - } - currentGlobalStakeAmountNanos, err := bav.GetGlobalStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving GlobalStakeAmountNanos: ") - } - globalStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalStakeAmountNanos, currentGlobalStakeAmountNanos) - if err != nil { - return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error calculating GlobalStakeAmountNanos decrease: ") - } - if !globalStakeAmountNanosDecrease.Eq(amountNanos) { - return errors.New("SanityCheckUnregisterAsValidatorTxn: GlobalStakeAmountNanos decrease doesn't match") + // Sanity check that the GlobalActiveStakeAmountNanos was decreased + // by amountNanos if the PrevValidatorEntry was active. + if utxoOp.PrevValidatorEntry.Status() == ValidatorStatusActive { + if utxoOp.PrevGlobalActiveStakeAmountNanos == nil { + return errors.New("SanityCheckUnregisterAsValidatorTxn: nil PrevGlobalActiveStakeAmountNanos provided") + } + currentGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving GlobalActiveStakeAmountNanos: ") + } + globalActiveStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalActiveStakeAmountNanos, currentGlobalActiveStakeAmountNanos) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error calculating GlobalActiveStakeAmountNanos decrease: ") + } + if !globalActiveStakeAmountNanosDecrease.Eq(amountNanos) { + return errors.New("SanityCheckUnregisterAsValidatorTxn: GlobalActiveStakeAmountNanos decrease doesn't match") + } + } else if utxoOp.PrevGlobalActiveStakeAmountNanos != nil { + return errors.New("SanityCheckUnregisterAsValidatorTxn: non-nil PrevGlobalActiveStakeAmountNanos provided for inactive validator") } + return nil } @@ -1844,10 +1868,12 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry bav._setValidatorEntryMappings(validatorEntry) } } - // Pull !isDeleted, active ValidatorEntries from the UtxoView. + // Pull !isDeleted, active ValidatorEntries from the UtxoView with stake > 0. var validatorEntries []*ValidatorEntry for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { - if !validatorEntry.isDeleted && validatorEntry.Status() == ValidatorStatusActive { + if !validatorEntry.isDeleted && + validatorEntry.Status() == ValidatorStatusActive && + !validatorEntry.TotalStakeAmountNanos.IsZero() { validatorEntries = append(validatorEntries, validatorEntry) } } @@ -1860,22 +1886,53 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry return validatorEntries[0:upperBound], nil } -func (bav *UtxoView) GetGlobalStakeAmountNanos() (*uint256.Int, error) { - // Read the GlobalStakeAmountNanos from the UtxoView. - if bav.GlobalStakeAmountNanos != nil { - return bav.GlobalStakeAmountNanos.Clone(), nil +func (bav *UtxoView) GetGlobalActiveStakeAmountNanos() (*uint256.Int, error) { + // Read the GlobalActiveStakeAmountNanos from the UtxoView. + if bav.GlobalActiveStakeAmountNanos != nil { + return bav.GlobalActiveStakeAmountNanos.Clone(), nil } - // If not set, read the GlobalStakeAmountNanos from the db. - globalStakeAmountNanos, err := DBGetGlobalStakeAmountNanos(bav.Handle, bav.Snapshot) + // If not set, read the GlobalActiveStakeAmountNanos from the db. + globalActiveStakeAmountNanos, err := DBGetGlobalActiveStakeAmountNanos(bav.Handle, bav.Snapshot) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetGlobalStakeAmountNanos: ") + return nil, errors.Wrapf(err, "UtxoView.GetGlobalActiveStakeAmountNanos: ") } - if globalStakeAmountNanos == nil { - globalStakeAmountNanos = uint256.NewInt() + if globalActiveStakeAmountNanos == nil { + globalActiveStakeAmountNanos = uint256.NewInt() } - // Cache the GlobalStakeAmountNanos from the db in the UtxoView. - bav._setGlobalStakeAmountNanos(globalStakeAmountNanos) - return globalStakeAmountNanos, nil + // Cache the GlobalActiveStakeAmountNanos from the db in the UtxoView. + bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) + return globalActiveStakeAmountNanos, nil +} + +func (bav *UtxoView) JailValidator(validatorEntry *ValidatorEntry) error { + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return errors.Wrapf(err, "UtxoView.JailValidator: error retrieving CurrentEpochNumber: ") + } + + // Set ValidatorEntry.JailedAtEpochNumber to the CurrentEpochNumber. + validatorEntry.JailedAtEpochNumber = currentEpochNumber + + // Remove the validator's stake from the GlobalActiveStakeAmountNanos. + prevGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "UtxoView.JailValidator: error retrieving GlobalActiveStakeAmountNanos: ") + } + currentGlobalActiveStakeAmountNanos, err := SafeUint256().Sub( + prevGlobalActiveStakeAmountNanos, validatorEntry.TotalStakeAmountNanos, + ) + if err != nil { + return errors.Wrapf(err, "UtxoView.JailValidator: error calculating updated GlobalActiveStakeAmountNanos: ") + } + + // Store the updated ValidatorEntry. + bav._setValidatorEntryMappings(validatorEntry) + + // Store the updated GlobalActiveStakeAmountNanos. + bav._setGlobalActiveStakeAmountNanos(currentGlobalActiveStakeAmountNanos) + + return nil } func (bav *UtxoView) _setValidatorEntryMappings(validatorEntry *ValidatorEntry) { @@ -1900,13 +1957,13 @@ func (bav *UtxoView) _deleteValidatorEntryMappings(validatorEntry *ValidatorEntr bav._setValidatorEntryMappings(&tombstoneEntry) } -func (bav *UtxoView) _setGlobalStakeAmountNanos(globalStakeAmountNanos *uint256.Int) { +func (bav *UtxoView) _setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int) { // This function shouldn't be called with nil. - if globalStakeAmountNanos == nil { - glog.Errorf("_setGlobalStakeAmountNanos: called with nil entry, this should never happen") + if globalActiveStakeAmountNanos == nil { + glog.Errorf("_setGlobalActiveStakeAmountNanos: called with nil entry, this should never happen") return } - bav.GlobalStakeAmountNanos = globalStakeAmountNanos.Clone() + bav.GlobalActiveStakeAmountNanos = globalActiveStakeAmountNanos.Clone() } func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { @@ -1951,14 +2008,14 @@ func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHei return nil } -func (bav *UtxoView) _flushGlobalStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - // If GlobalStakeAmountNanos is nil, then it was never +func (bav *UtxoView) _flushGlobalActiveStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // If GlobalActiveStakeAmountNanos is nil, then it was never // set and shouldn't overwrite the value in the db. - if bav.GlobalStakeAmountNanos == nil { + if bav.GlobalActiveStakeAmountNanos == nil { return nil } - return DBPutGlobalStakeAmountNanosWithTxn(txn, bav.Snapshot, bav.GlobalStakeAmountNanos, blockHeight) + return DBPutGlobalActiveStakeAmountNanosWithTxn(txn, bav.Snapshot, bav.GlobalActiveStakeAmountNanos, blockHeight) } // diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 96126d100..33a4d210d 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -24,7 +24,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { var registerMetadata *RegisterAsValidatorMetadata var validatorEntry *ValidatorEntry var validatorEntries []*ValidatorEntry - var globalStakeAmountNanos *uint256.Int + var globalActiveStakeAmountNanos *uint256.Int var err error // Initialize fork heights. @@ -228,21 +228,17 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Equal(t, string(validatorEntry.ExtraData["TestKey"]), "TestValue1") } { - // Query: retrieve top active ValidatorEntries by stake - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(0) + // Query: retrieve top active ValidatorEntries by stake. + // Should be empty since m0's TotalStakeAmountNanos is zero. + validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(1) require.NoError(t, err) require.Empty(t, validatorEntries) - - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(2) - require.NoError(t, err) - require.Len(t, validatorEntries, 1) - require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) } { - // Query: retrieve GlobalStakeAmountNanos - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + // Query: retrieve GlobalActiveStakeAmountNanos + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } { // Happy path: update a validator @@ -298,10 +294,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Empty(t, validatorEntries) } { - // Query: retrieve GlobalStakeAmountNanos - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + // Query: retrieve GlobalActiveStakeAmountNanos + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } // Flush mempool to the db and test rollbacks. @@ -786,12 +782,10 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) - // Verify top validators. + // Verify top validators is empty since m0's TotalStakeAmountNanos is zero. validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) - require.Len(t, validatorEntries, 1) - require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt()) + require.Empty(t, validatorEntries) } { // m1 registers as a validator. @@ -804,10 +798,10 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) - // Verify top validators. + // Verify top validators is empty since both validators' TotalStakeAmountNanos are zero. validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) - require.Len(t, validatorEntries, 2) + require.Empty(t, validatorEntries) } { // m2 registers as a validator. @@ -820,10 +814,10 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { _, err = _submitRegisterAsValidatorTxn(testMeta, m2Pub, m2Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) - // Verify top validators. + // Verify top validators is empty since all three validators' TotalStakeAmountNanos are zero. validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) - require.Len(t, validatorEntries, 3) + require.Empty(t, validatorEntries) } { // m3 stakes 100 DESO nanos with m0. @@ -891,13 +885,11 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // Verify top validators. validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) - require.Len(t, validatorEntries, 3) + require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - require.Equal(t, validatorEntries[2].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(0)) } { // m2 unregisters as validator. @@ -907,11 +899,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // Verify top validators. validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) - require.Len(t, validatorEntries, 2) + require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - require.Equal(t, validatorEntries[1].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(0)) } { // m4 stakes with m1. @@ -1346,7 +1336,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { var stakeEntry *StakeEntry var lockedStakeEntry *LockedStakeEntry _ = lockedStakeEntry - var globalStakeAmountNanos *uint256.Int + var globalActiveStakeAmountNanos *uint256.Int var err error // Initialize balance model fork heights. @@ -1448,9 +1438,9 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NotNil(t, stakeEntry) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(600)) - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(600)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(600)) } { // m1 stakes with m0. @@ -1468,9 +1458,9 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NotNil(t, stakeEntry) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(400)) - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(1000)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(1000)) } { // m1 partially unstakes with m0. @@ -1494,10 +1484,10 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NotNil(t, lockedStakeEntry) require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(100)) - // GlobalStakeAmountNanos is updated. - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + // GlobalActiveStakeAmountNanos is updated. + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt().SetUint64(900)) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(900)) } { // m0 unregisters as a validator. @@ -1531,10 +1521,10 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NotNil(t, lockedStakeEntry) require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(400)) - // GlobalStakeAmountNanos is updated. - globalStakeAmountNanos, err = utxoView().GetGlobalStakeAmountNanos() + // GlobalActiveStakeAmountNanos is updated. + globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Equal(t, globalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } // Flush mempool to the db and test rollbacks. @@ -1651,19 +1641,16 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // we cannot test rollbacks. We will run into an error where m0 is // trying to unjail himself, but he was never jailed. - // Delete m0's ValidatorEntry from the UtxoView. - delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) - delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) - - // Set JailedAtEpochNumber. - validatorEntry.JailedAtEpochNumber = currentEpochNumber - - // Store m0's ValidatorEntry in the db. + // Jail m0. tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) - tmpUtxoView._setValidatorEntryMappings(validatorEntry) + require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + // Delete m0's ValidatorEntry from the UtxoView so that it is read from the db. + delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + // Verify m0 is jailed. validatorEntry, err = utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) @@ -1948,19 +1935,17 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // we cannot test rollbacks. We will run into an error where sender is // trying to unjail himself, but he was never jailed. - // Delete sender's ValidatorEntry from the UtxoView. - delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) - delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) - - // Set JailedAtEpochNumber. - validatorEntry.JailedAtEpochNumber = currentEpochNumber - - // Store sender's ValidatorEntry in the db. + // Jail the sender. tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) - tmpUtxoView._setValidatorEntryMappings(validatorEntry) + err = tmpUtxoView.JailValidator(validatorEntry) + require.NoError(t, err) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + // Delete sender's ValidatorEntry from the UtxoView so that it is read from the db. + delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorEntry.ValidatorPKID) + // Verify sender is jailed. validatorEntry, err = newUtxoView().GetValidatorByPKID(senderPKID) require.NoError(t, err) diff --git a/lib/db_utils.go b/lib/db_utils.go index 2272501ca..d34842f82 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -486,9 +486,9 @@ type DBPrefixes struct { // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. PrefixValidatorByStake []byte `prefix_id:"[79]" is_state:"true"` - // PrefixGlobalStakeAmountNanos: Retrieve the cumulative stake across all validators. + // PrefixGlobalActiveStakeAmountNanos: Retrieve the cumulative stake across all validators. // Prefix -> *uint256.Int - PrefixGlobalStakeAmountNanos []byte `prefix_id:"[80]" is_state:"true"` + PrefixGlobalActiveStakeAmountNanos []byte `prefix_id:"[80]" is_state:"true"` // PrefixStakeByValidatorAndStaker: Retrieve a StakeEntry. // Prefix, ValidatorPKID, StakerPKID -> StakeEntry @@ -733,7 +733,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStake) { // prefix_id:"[79]" return false, nil - } else if bytes.Equal(prefix, Prefixes.PrefixGlobalStakeAmountNanos) { + } else if bytes.Equal(prefix, Prefixes.PrefixGlobalActiveStakeAmountNanos) { // prefix_id:"[80]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorAndStaker) { From 99f8b2f31e192b8eacf4b89a45c21a905d3cf338 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 22 May 2023 13:33:14 -0400 Subject: [PATCH 047/762] Add tests for ToUint256. --- lib/pos_random_seed_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go index fd84ffce9..04506d01a 100644 --- a/lib/pos_random_seed_test.go +++ b/lib/pos_random_seed_test.go @@ -4,6 +4,7 @@ package lib import ( "github.com/deso-protocol/core/bls" + "github.com/holiman/uint256" "github.com/stretchr/testify/require" "testing" ) @@ -81,4 +82,14 @@ func TestCurrentRandomSeedHash(t *testing.T) { require.False(t, randomSeedHash2.Eq(&RandomSeedHash{})) // The new RandomSeedHash is not the previous CurrentRandomSeedHash. require.False(t, randomSeedHash2.Eq(randomSeedHash1)) + + // Test RandomSeedHash.ToUint256(). Generates a valid uint256. + // Idempotent: generates the same uint256 each time. + require.True(t, randomSeedHash1.ToUint256().Cmp(uint256.NewInt()) > 0) + require.True(t, randomSeedHash1.ToUint256().Cmp(MaxUint256) < 0) + require.True(t, randomSeedHash1.ToUint256().Eq(randomSeedHash1.ToUint256())) + require.True(t, randomSeedHash2.ToUint256().Cmp(uint256.NewInt()) > 0) + require.True(t, randomSeedHash2.ToUint256().Cmp(MaxUint256) < 0) + require.True(t, randomSeedHash2.ToUint256().Eq(randomSeedHash2.ToUint256())) + require.False(t, randomSeedHash1.ToUint256().Eq(randomSeedHash2.ToUint256())) } From 619613060c87c763cb47225b489bf84e4780fba8 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 22 May 2023 13:41:14 -0400 Subject: [PATCH 048/762] Remove TODO. --- lib/pos_random_seed.go | 19 +++++++++++++------ lib/pos_random_seed_test.go | 1 + 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index 6e7d3163d..f412a3cf4 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -11,6 +11,10 @@ import ( "io" ) +// +// TYPES: RandomSeedHash +// + type RandomSeedHash [32]byte func (randomSeedHash *RandomSeedHash) ToUint256() *uint256.Int { @@ -51,6 +55,10 @@ func DecodeRandomSeedHash(rr io.Reader) (*RandomSeedHash, error) { return (&RandomSeedHash{}).FromBytes(randomSeedHashBytes) } +// +// UTXO VIEW UTILS +// + func (bav *UtxoView) GenerateRandomSeedSignature(signerPrivateKey *bls.PrivateKey) (*bls.Signature, error) { currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() if err != nil { @@ -105,9 +113,7 @@ func (bav *UtxoView) GetCurrentRandomSeedHash() (*RandomSeedHash, error) { } // If no RandomSeedHash is found in the UtxoView or db, return the // GenesisRandomSeedHash which is 32 bytes of zeroes. - // TODO: should we change this? should we store it as a constant hex and parse into a byte slice? - var genesisRandomSeedHashBytes [32]byte - return (&RandomSeedHash{}).FromBytes(genesisRandomSeedHashBytes[:]) + return &RandomSeedHash{}, nil } func (bav *UtxoView) _setCurrentRandomSeedHash(randomSeedHash *RandomSeedHash) { @@ -124,10 +130,13 @@ func (bav *UtxoView) _flushCurrentRandomSeedHashToDbWithTxn(txn *badger.Txn, blo if bav.CurrentRandomSeedHash == nil { return nil } - return DBPutCurrentRandomSeedHashWithTxn(txn, bav.Snapshot, bav.CurrentRandomSeedHash, blockHeight) } +// +// DB UTILS +// + func DBKeyForCurrentRandomSeedHash() []byte { return append([]byte{}, Prefixes.PrefixCurrentRandomSeedHash...) } @@ -153,7 +162,6 @@ func DBGetCurrentRandomSeedHashWithTxn(txn *badger.Txn, snap *Snapshot) (*Random } return nil, errors.Wrapf(err, "DBGetCurrentRandomSeedHashWithTxn: problem retrieving value") } - // Decode from bytes. return (&RandomSeedHash{}).FromBytes(currentRandomSeedHashBytes) } @@ -169,7 +177,6 @@ func DBPutCurrentRandomSeedHashWithTxn( glog.Errorf("DBPutCurrentRandomSeedHashWithTxn: called with nil CurrentRandomSeedHash") return nil } - key := DBKeyForCurrentRandomSeedHash() return DBSetWithTxn(txn, snap, key, currentRandomSeedHash.ToBytes()) } diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go index 04506d01a..9fc4e3822 100644 --- a/lib/pos_random_seed_test.go +++ b/lib/pos_random_seed_test.go @@ -57,6 +57,7 @@ func TestCurrentRandomSeedHash(t *testing.T) { randomSeedHash1, err = utxoView.GetCurrentRandomSeedHash() require.NoError(t, err) require.True(t, randomSeedHash1.Eq(&RandomSeedHash{})) + require.True(t, randomSeedHash1.ToUint256().Eq(uint256.NewInt())) // PrivateKey1 generates a new RandomSeedSignature. randomSeedSignature1, err = utxoView.GenerateRandomSeedSignature(privateKey1) From d8874c65291cb483ddb6f4e86f60e9f3fcdc9053 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 22 May 2023 14:28:19 -0400 Subject: [PATCH 049/762] Store max num validators in leader schedule as param. --- lib/constants.go | 11 +++++++++++ lib/pos_leader_schedule.go | 22 ++++++++++------------ lib/pos_leader_schedule_test.go | 7 +++++++ 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index 3ebccda33..52c651669 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -607,6 +607,11 @@ type DeSoParams struct { // TODO: Move this to GlobalParamsEntry. ValidatorJailEpochDuration uint64 + // LeaderScheduleMaxNumValidators is the maximum number of validators that + // are included when generating a new Proof-of-Stake leader schedule. + // TODO: Move this to GlobalParamsEntry. + LeaderScheduleMaxNumValidators uint64 + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -985,6 +990,9 @@ var DeSoMainnetParams = DeSoParams{ // Jailed validators can be unjailed after a minimum of N elapsed epochs. ValidatorJailEpochDuration: uint64(3), + // The max number of validators included in a leader schedule. + LeaderScheduleMaxNumValidators: uint64(100), + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1219,6 +1227,9 @@ var DeSoTestnetParams = DeSoParams{ // Jailed validators can be unjailed after a minimum of N elapsed epochs. ValidatorJailEpochDuration: uint64(3), + // The max number of validators included in a leader schedule. + LeaderScheduleMaxNumValidators: uint64(100), + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index a7d3851f9..5fe9d59f0 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -7,8 +7,6 @@ import ( ) func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { - numValidators := 100 // bav.Params.PoSLeaderScheduleNumValidators - // Retrieve CurrentRandomSeedHash. currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() if err != nil { @@ -16,7 +14,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { } // Retrieve top, active validators ordered by stake. - validatorEntries, err := bav.GetTopActiveValidatorsByStake(int(numValidators)) + validatorEntries, err := bav.GetTopActiveValidatorsByStake(int(bav.Params.LeaderScheduleMaxNumValidators)) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") } @@ -34,15 +32,15 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { } // Pseudocode for leader-selection algorithm: - // While len(LeaderSchedule) < len(ValidatorEntries) - // Hash the CurrentRandomSeedHash to generate a new RandomUint256. - // RandomUint256 %= TotalStakeAmountNanos. - // For each ValidatorEntry... - // Skip if ValidatorEntry.TotalStakeAmountNanos is zero. - // Skip if ValidatorEntry has already been added to the leader schedule. - // If ValidatorEntry.TotalStakeAmountNanos >= RandomUint256: - // Add ValidatorEntry to LeaderSchedule. - // TotalStakeAmountNanos -= ValidatorEntry.TotalStakeAmountNanos. + // Note this is an O(N^2) algorithm where N is the number of validators we include. + // While len(LeaderSchedule) < len(ValidatorEntries): + // Hash the CurrentRandomSeedHash and generate a new RandomUint256. + // Take RandomUint256 modulo TotalStakeAmountNanos. + // For each ValidatorEntry: + // Skip if ValidatorEntry has already been added to the leader schedule. + // If ValidatorEntry.TotalStakeAmountNanos >= RandomUint256: + // Add ValidatorEntry to LeaderSchedule. + // TotalStakeAmountNanos -= ValidatorEntry.TotalStakeAmountNanos. var leaderSchedule []*ValidatorEntry // We also track a set of ValidatorPKIDs that have already been diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 0fe3dd2c9..c83f43e45 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -286,6 +286,13 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[0].PKID) require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[6].PKID) } + { + // Test changing params.LeaderScheduleMaxNumValidators. + params.LeaderScheduleMaxNumValidators = 5 + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Len(t, leaderSchedule, 5) + } // Test rollbacks. _executeAllTestRollbackAndFlush(testMeta) From f8c638befb15ed15eeb860ec76e5ec9e403e09a8 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 22 May 2023 14:52:56 -0400 Subject: [PATCH 050/762] Update tests. --- lib/pos_leader_schedule_test.go | 143 ++++++++++++++------------------ 1 file changed, 62 insertions(+), 81 deletions(-) diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index c83f43e45..c81843d0c 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -61,53 +61,36 @@ func TestGenerateLeaderSchedule(t *testing.T) { m6PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m6PkBytes).PKID // Helper utils - type TestValidator struct { - PublicKey string - PrivateKey string - PKID *PKID - } - - testValidators := []*TestValidator{ - {PublicKey: m0Pub, PrivateKey: m0Priv, PKID: m0PKID}, // Stake = 100 - {PublicKey: m1Pub, PrivateKey: m1Priv, PKID: m1PKID}, // Stake = 200 - {PublicKey: m2Pub, PrivateKey: m2Priv, PKID: m2PKID}, // Stake = 300 - {PublicKey: m3Pub, PrivateKey: m3Priv, PKID: m3PKID}, // Stake = 400 - {PublicKey: m4Pub, PrivateKey: m4Priv, PKID: m4PKID}, // Stake = 500 - {PublicKey: m5Pub, PrivateKey: m5Priv, PKID: m5PKID}, // Stake = 600 - {PublicKey: m6Pub, PrivateKey: m6Priv, PKID: m6PKID}, // Stake = 700 - } - newUtxoView := func() *UtxoView { utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) return utxoView } - registerValidator := func(testValidator *TestValidator) { - validatorPkBytes, _, err := Base58CheckDecode(testValidator.PublicKey) + registerValidator := func(publicKey string, privateKey string, stakeAmountNanos uint64) { + // Convert PublicKeyBase58Check to PublicKeyBytes. + pkBytes, _, err := Base58CheckDecode(publicKey) require.NoError(t, err) // Validator registers. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, validatorPkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, pkBytes, blockHeight) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", testValidator.PublicKey))}, + Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, VotingPublicKey: votingPublicKey, VotingPublicKeySignature: votingSignature, } - _, err = _submitRegisterAsValidatorTxn( - testMeta, testValidator.PublicKey, testValidator.PrivateKey, registerMetadata, nil, true, - ) + _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) require.NoError(t, err) - } - stakeValidator := func(testValidator *TestValidator, stakeAmountNanos uint64) { - validatorPkBytes, _, err := Base58CheckDecode(testValidator.PublicKey) - require.NoError(t, err) + // Validator stakes to himself. + if stakeAmountNanos == 0 { + return + } stakeMetadata := &StakeMetadata{ - ValidatorPublicKey: NewPublicKey(validatorPkBytes), + ValidatorPublicKey: NewPublicKey(pkBytes), StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), } - _, err = _submitStakeTxn(testMeta, testValidator.PublicKey, testValidator.PrivateKey, stakeMetadata, nil, true) + _, err = _submitStakeTxn(testMeta, publicKey, privateKey, stakeMetadata, nil, true) require.NoError(t, err) } @@ -148,7 +131,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { } { // m0 registers as validator. - registerValidator(testValidators[0]) + registerValidator(m0Pub, m0Priv, 0) } { // Test GenerateLeaderSchedule() edge case: one registered validator with zero stake. @@ -158,36 +141,34 @@ func TestGenerateLeaderSchedule(t *testing.T) { } { // m0 stakes to himself. - stakeValidator(testValidators[0], 100) + registerValidator(m0Pub, m0Priv, 10) } { // Test GenerateLeaderSchedule() edge case: one registered validator with non-zero stake. leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 1) - require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[0].PKID) + require.Equal(t, leaderSchedule[0].ValidatorPKID, m0PKID) } { // m1 registers and stakes to himself. - registerValidator(testValidators[1]) - stakeValidator(testValidators[1], 200) + registerValidator(m1Pub, m1Priv, 20) } { // Test GenerateLeaderSchedule() edge case: two registered validators with non-zero stake. leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 2) - require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[0].PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[1].PKID) + require.Equal(t, leaderSchedule[0].ValidatorPKID, m1PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, m0PKID) } { // All remaining validators register and stake to themselves. - for index, testValidator := range testValidators { - registerValidator(testValidator) - if index > 1 { - stakeValidator(testValidator, uint64((index+1)*100)) - } - } + registerValidator(m2Pub, m2Priv, 30) + registerValidator(m3Pub, m3Priv, 40) + registerValidator(m4Pub, m4Priv, 500) + registerValidator(m5Pub, m5Priv, 600) + registerValidator(m6Pub, m6Priv, 700) } { // Verify GetTopActiveValidatorsByStake. @@ -204,23 +185,23 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.Equal(t, validatorEntries[0].TotalStakeAmountNanos.Uint64(), uint64(700)) require.Equal(t, validatorEntries[1].TotalStakeAmountNanos.Uint64(), uint64(600)) require.Equal(t, validatorEntries[2].TotalStakeAmountNanos.Uint64(), uint64(500)) - require.Equal(t, validatorEntries[3].TotalStakeAmountNanos.Uint64(), uint64(400)) - require.Equal(t, validatorEntries[4].TotalStakeAmountNanos.Uint64(), uint64(300)) - require.Equal(t, validatorEntries[5].TotalStakeAmountNanos.Uint64(), uint64(200)) - require.Equal(t, validatorEntries[6].TotalStakeAmountNanos.Uint64(), uint64(100)) + require.Equal(t, validatorEntries[3].TotalStakeAmountNanos.Uint64(), uint64(40)) + require.Equal(t, validatorEntries[4].TotalStakeAmountNanos.Uint64(), uint64(30)) + require.Equal(t, validatorEntries[5].TotalStakeAmountNanos.Uint64(), uint64(20)) + require.Equal(t, validatorEntries[6].TotalStakeAmountNanos.Uint64(), uint64(10)) } { // Test GenerateLeaderSchedule(). leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[3].PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[2].PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[1].PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[0].PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[6].PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[5].PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[4].PKID) + require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m3PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, m1PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) } { // Seed a new CurrentRandomSeedHash. @@ -230,13 +211,13 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[6].PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[5].PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[4].PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[3].PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[2].PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[1].PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[0].PKID) + require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m3PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, m1PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) } { // Seed a new CurrentRandomSeedHash. @@ -246,13 +227,13 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[6].PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[5].PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[4].PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[3].PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[2].PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[1].PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[0].PKID) + require.Equal(t, leaderSchedule[0].ValidatorPKID, m4PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, m3PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m1PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, m6PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, m5PKID) } { // Seed a new CurrentRandomSeedHash. @@ -262,13 +243,13 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[3].PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[2].PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[1].PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[0].PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[4].PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[6].PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[5].PKID) + require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m3PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, m1PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) } { // Seed a new CurrentRandomSeedHash. @@ -278,13 +259,13 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, testValidators[5].PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, testValidators[4].PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, testValidators[3].PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, testValidators[2].PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, testValidators[1].PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, testValidators[0].PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, testValidators[6].PKID) + require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m3PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, m1PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) } { // Test changing params.LeaderScheduleMaxNumValidators. From 3363dbc67f0a6deb20eb9bbf1f167b1a460c1567 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 22 May 2023 15:41:49 -0400 Subject: [PATCH 051/762] Fix bug. Break out of inner loop when leader added. --- lib/pos_leader_schedule.go | 7 ++++++ lib/pos_leader_schedule_test.go | 38 ++++++++++++++++----------------- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 5fe9d59f0..0ff6aee9f 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -41,6 +41,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { // If ValidatorEntry.TotalStakeAmountNanos >= RandomUint256: // Add ValidatorEntry to LeaderSchedule. // TotalStakeAmountNanos -= ValidatorEntry.TotalStakeAmountNanos. + // Break out of the inner loop. var leaderSchedule []*ValidatorEntry // We also track a set of ValidatorPKIDs that have already been @@ -92,6 +93,12 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { if err != nil { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error subtracting TotalStakeAmountNanos: ") } + + // The current validator has been added to the leader schedule. + // Break out of this inner loop, generate a new RandomUint256, + // and find the next stake-weighted validator to add to the + // leader schedule. + break } } diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index c81843d0c..28ff4926a 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -198,8 +198,8 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m3PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, m3PKID) require.Equal(t, leaderSchedule[5].ValidatorPKID, m1PKID) require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) } @@ -213,11 +213,11 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.Len(t, leaderSchedule, 7) require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m3PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, m3PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m4PKID) require.Equal(t, leaderSchedule[4].ValidatorPKID, m2PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, m1PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, m1PKID) } { // Seed a new CurrentRandomSeedHash. @@ -228,12 +228,12 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.NoError(t, err) require.Len(t, leaderSchedule, 7) require.Equal(t, leaderSchedule[0].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, m3PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, m2PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m1PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, m0PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, m6PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, m5PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, m6PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m3PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, m1PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) } { // Seed a new CurrentRandomSeedHash. @@ -244,10 +244,10 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.NoError(t, err) require.Len(t, leaderSchedule, 7) require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, m2PKID) require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m3PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m5PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, m3PKID) require.Equal(t, leaderSchedule[5].ValidatorPKID, m1PKID) require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) } @@ -262,10 +262,10 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m3PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, m2PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, m1PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, m1PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, m3PKID) } { // Test changing params.LeaderScheduleMaxNumValidators. From 76cec560d434712e2973b0f170c31dbfd3ea78fb Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 22 May 2023 15:58:06 -0400 Subject: [PATCH 052/762] Test GenerateLeaderSchedule is idempotent. --- lib/pos_leader_schedule_test.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 28ff4926a..4daee9a58 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -266,6 +266,19 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.Equal(t, leaderSchedule[4].ValidatorPKID, m1PKID) require.Equal(t, leaderSchedule[5].ValidatorPKID, m0PKID) require.Equal(t, leaderSchedule[6].ValidatorPKID, m3PKID) + + // Test GenerateLeaderSchedule() is idempotent. Given the same CurrentRandomSeedHash + // and the same stake-weighted validators, we generate the same leader schedule. + leaderSchedule, err = newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Len(t, leaderSchedule, 7) + require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) + require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) + require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) + require.Equal(t, leaderSchedule[3].ValidatorPKID, m2PKID) + require.Equal(t, leaderSchedule[4].ValidatorPKID, m1PKID) + require.Equal(t, leaderSchedule[5].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[6].ValidatorPKID, m3PKID) } { // Test changing params.LeaderScheduleMaxNumValidators. From ed4f6d9c5bc0441e6039a8abc27058a221539035 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 23 May 2023 10:28:35 -0400 Subject: [PATCH 053/762] Use set of PKIDs not *PKIDs. --- lib/pos_leader_schedule.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 0ff6aee9f..45022d4d6 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -47,7 +47,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { // We also track a set of ValidatorPKIDs that have already been // added to the LeaderSchedule so that we can skip them when // iterating over ValidatorEntries in O(1) time. - leaderSchedulePKIDs := NewSet([]*PKID{}) + leaderSchedulePKIDs := NewSet([]PKID{}) for len(leaderSchedule) < len(validatorEntries) { // Hash the CurrentRandomSeedHash each iteration. This generates @@ -66,7 +66,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { for _, validatorEntry := range validatorEntries { // Skip if ValidatorEntry has already been added to the leader schedule. - if leaderSchedulePKIDs.Includes(validatorEntry.ValidatorPKID) { + if leaderSchedulePKIDs.Includes(*validatorEntry.ValidatorPKID) { continue } @@ -86,7 +86,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { // Add the current ValidatorEntry to the leaderSchedule. leaderSchedule = append(leaderSchedule, validatorEntry) - leaderSchedulePKIDs.Add(validatorEntry.ValidatorPKID) + leaderSchedulePKIDs.Add(*validatorEntry.ValidatorPKID) // Subtract the ValidatorEntry.TotalStakeAmountNanos from the TotalStakeAmountNanos. totalStakeAmountNanos, err = SafeUint256().Sub(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) From d759d0cade6b13789e9fb6036c1087cea191d75b Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 23 May 2023 13:37:07 -0400 Subject: [PATCH 054/762] Stub out EpochCompleteHook. --- lib/pos_epoch_complete_hook.go | 53 ++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 lib/pos_epoch_complete_hook.go diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go new file mode 100644 index 000000000..bb6568d5c --- /dev/null +++ b/lib/pos_epoch_complete_hook.go @@ -0,0 +1,53 @@ +package lib + +import "github.com/pkg/errors" + +func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) { + // Returns true if this is the last block in the current epoch. + currentEpochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return false, errors.Wrapf(err, "UtxoView.IsEpochComplete: problem retrieving CurrentEpochEntry: ") + } + if currentEpochEntry == nil { + return false, errors.New("UtxoView.IsEpochComplete: CurrentEpochEntry is nil, this should never happen") + } + return currentEpochEntry.FinalBlockHeight == blockHeight, nil +} + +func (bav *UtxoView) RolloverEpochs(blockHeight uint64) error { + // Rolls-over the current epoch into a new one. Takes care of the associated snapshotting + accounting. + + // Sanity-check that the current block is the last block in the current epoch. + isLastBlockInCurrentEpoch, err := bav.IsLastBlockInCurrentEpoch(blockHeight) + if err != nil { + return errors.Wrapf(err, "UtxoView.RolloverEpochs: ") + } + if !isLastBlockInCurrentEpoch { + return errors.New("UtxoView.RolloverEpochs: called before current epoch is complete, this should never happen") + } + + // Snapshot the current GlobalParamsEntry. + // TODO + + // Snapshot the current validator set. + // TODO + + // Generate + store a leader schedule. + // TODO + + // Roll-over a new epoch by setting a new CurrentEpochEntry. + currentEpochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return errors.Wrapf(err, "UtxoView.RolloverEpochs: problem retrieving CurrentEpochEntry: ") + } + if currentEpochEntry == nil { + return errors.New("UtxoView.RolloverEpochs: CurrentEpochEntry is nil, this should never happen") + } + newEpochEntry := &EpochEntry{ + EpochNumber: currentEpochEntry.EpochNumber + 1, + FinalBlockHeight: blockHeight + 100, // TODO: read this duration from the GlobalParamsEntry. + } + bav._setCurrentEpochEntry(newEpochEntry) + + return nil +} From 2f90b2cdadecd45a7098c2a8796efc02482455fe Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 24 May 2023 09:18:57 -0400 Subject: [PATCH 055/762] Add fork height for starting to check epoch hook. --- lib/constants.go | 20 ++++++++++++++++++++ lib/pos_epoch_complete_hook.go | 4 ++++ 2 files changed, 24 insertions(+) diff --git a/lib/constants.go b/lib/constants.go index 3ebccda33..f3cb29da4 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -275,6 +275,9 @@ type ForkHeights struct { // UnregisterAsValidator, Stake, Unstake, and UnlockStake. ProofOfStakeNewTxnTypesBlockHeight uint32 + // ProofOfStakeBlockHeight defines the height at which we cut over from PoW to PoS. + ProofOfStakeBlockHeight uint32 + // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. } @@ -340,6 +343,7 @@ const ( AssociationsAndAccessGroupsMigration MigrationName = "AssociationsAndAccessGroupsMigration" BalanceModelMigration MigrationName = "BalanceModelMigration" ProofOfStakeNewTxnTypesMigration MigrationName = "ProofOfStakeNewTxnTypesMigration" + ProofOfStakeMigration MigrationName = "ProofOfStakeMigration" ) type EncoderMigrationHeights struct { @@ -356,6 +360,9 @@ type EncoderMigrationHeights struct { // This coincides with the ProofOfStakeNewTxnTypesBlockHeight ProofOfStakeNewTxnTypesMigration MigrationHeight + + // This coincides with the ProofOfStakeBlockHeight + ProofOfStakeMigration MigrationHeight } func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeights { @@ -385,8 +392,14 @@ func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeigh Height: uint64(forkHeights.ProofOfStakeNewTxnTypesBlockHeight), Name: ProofOfStakeNewTxnTypesMigration, }, + ProofOfStakeMigration: MigrationHeight{ + Version: 5, + Height: uint64(forkHeights.ProofOfStakeBlockHeight), + Name: ProofOfStakeMigration, + }, } } + func GetEncoderMigrationHeightsList(forkHeights *ForkHeights) ( _migrationHeightsList []*MigrationHeight) { @@ -641,6 +654,7 @@ var RegtestForkHeights = ForkHeights{ // genesis block was created using the utxo model. BalanceModelBlockHeight: uint32(1), ProofOfStakeNewTxnTypesBlockHeight: uint32(1), + ProofOfStakeBlockHeight: uint32(1), // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. @@ -796,6 +810,9 @@ var MainnetForkHeights = ForkHeights{ // FIXME: set to real block height when ready ProofOfStakeNewTxnTypesBlockHeight: uint32(math.MaxUint32), + // FIXME: set to real block height when ready + ProofOfStakeBlockHeight: uint32(math.MaxUint32), + // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. } @@ -1068,6 +1085,9 @@ var TestnetForkHeights = ForkHeights{ // FIXME: set to real block height when ready ProofOfStakeNewTxnTypesBlockHeight: uint32(math.MaxUint32), + // FIXME: set to real block height when ready + ProofOfStakeBlockHeight: uint32(math.MaxUint32), + // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index bb6568d5c..c21839c93 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -4,6 +4,10 @@ import "github.com/pkg/errors" func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) { // Returns true if this is the last block in the current epoch. + if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStakeBlockHeight) { + // Return false if we are still using PoW and haven't cut over to PoS yet. + return false, nil + } currentEpochEntry, err := bav.GetCurrentEpochEntry() if err != nil { return false, errors.Wrapf(err, "UtxoView.IsEpochComplete: problem retrieving CurrentEpochEntry: ") From 5efd45ef4b654b6e29dbc91114b4cba745ef79dd Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 24 May 2023 09:52:50 -0400 Subject: [PATCH 056/762] Add snapshot GlobalActiveStakeAmountNanos logic. --- lib/block_view.go | 12 ++++ lib/block_view_flush.go | 3 + lib/db_utils.go | 9 ++- lib/pos_epoch_complete_hook.go | 31 +++++++--- lib/pos_snapshot_entries.go | 108 +++++++++++++++++++++++++++++++++ 5 files changed, 152 insertions(+), 11 deletions(-) create mode 100644 lib/pos_snapshot_entries.go diff --git a/lib/block_view.go b/lib/block_view.go index 3b3a0c497..e4d54ab9e 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -130,6 +130,10 @@ type UtxoView struct { // Current EpochEntry CurrentEpochEntry *EpochEntry + // SnapshotGlobalActiveStakeAmountNanos is a map of EpochNumber to GlobalActiveStakeAmountNanos. + // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given EpochNumber. + SnapshotGlobalActiveStakeAmountNanos map[uint64]*uint256.Int + // The hash of the tip the view is currently referencing. Mainly used // for error-checking when doing a bulk operation on the view. TipHash *BlockHash @@ -235,6 +239,9 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // CurrentEpochEntry bav.CurrentEpochEntry = nil + + // SnapshotGlobalActiveStakeAmountNanos + bav.SnapshotGlobalActiveStakeAmountNanos = make(map[uint64]*uint256.Int) } func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { @@ -517,6 +524,11 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.CurrentEpochEntry = bav.CurrentEpochEntry.Copy() } + // Copy the SnapshotGlobalActiveStakeAmountNanos + for epochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { + newView.SnapshotGlobalActiveStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos.Clone() + } + return newView, nil } diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 684e79efe..4fefc43a4 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -158,6 +158,9 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushCurrentEpochEntryToDbWithTxn(txn, blockHeight); err != nil { return err } + if err := bav._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { + return err + } return nil } diff --git a/lib/db_utils.go b/lib/db_utils.go index d34842f82..c22be56ff 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -525,7 +525,11 @@ type DBPrefixes struct { // Prefix -> EpochEntry PrefixCurrentEpoch []byte `prefix_id:"[83]" is_state:"true"` - // NEXT_TAG: 84 + // PrefixSnapshotGlobalActiveStakeAmountNanos: Retrieve the GlobalActiveStakeAmountNanos by EpochNumber. + // Prefix, EpochNumber -> *uint256.Int + PrefixSnapshotGlobalActiveStakeAmountNanos []byte `prefix_id:"[84]" is_state:"true"` + + // NEXT_TAG: 85 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -745,6 +749,9 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixCurrentEpoch) { // prefix_id:"[83]" return true, &EpochEntry{} + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos) { + // prefix_id:"[84]" + return false, nil } return true, nil diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index c21839c93..49f2ff62a 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -1,6 +1,8 @@ package lib -import "github.com/pkg/errors" +import ( + "github.com/pkg/errors" +) func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) { // Returns true if this is the last block in the current epoch. @@ -30,28 +32,37 @@ func (bav *UtxoView) RolloverEpochs(blockHeight uint64) error { return errors.New("UtxoView.RolloverEpochs: called before current epoch is complete, this should never happen") } + // Retrieve the CurrentEpochEntry. + currentEpochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return errors.Wrapf(err, "UtxoView.RolloverEpochs: problem retrieving CurrentEpochEntry: ") + } + if currentEpochEntry == nil { + return errors.New("UtxoView.RolloverEpochs: CurrentEpochEntry is nil, this should never happen") + } + // Snapshot the current GlobalParamsEntry. // TODO // Snapshot the current validator set. // TODO + // Snapshot the current GlobalActiveStakeAmountNanos. + globalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() + if err != nil { + return errors.Wrapf(err, "UtxoView.RolloverEpochs: problem retrieving GlobalActiveStakeAmountNanos: ") + } + bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, currentEpochEntry.EpochNumber) + // Generate + store a leader schedule. // TODO // Roll-over a new epoch by setting a new CurrentEpochEntry. - currentEpochEntry, err := bav.GetCurrentEpochEntry() - if err != nil { - return errors.Wrapf(err, "UtxoView.RolloverEpochs: problem retrieving CurrentEpochEntry: ") - } - if currentEpochEntry == nil { - return errors.New("UtxoView.RolloverEpochs: CurrentEpochEntry is nil, this should never happen") - } - newEpochEntry := &EpochEntry{ + nextEpochEntry := &EpochEntry{ EpochNumber: currentEpochEntry.EpochNumber + 1, FinalBlockHeight: blockHeight + 100, // TODO: read this duration from the GlobalParamsEntry. } - bav._setCurrentEpochEntry(newEpochEntry) + bav._setCurrentEpochEntry(nextEpochEntry) return nil } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go new file mode 100644 index 000000000..90e9327ef --- /dev/null +++ b/lib/pos_snapshot_entries.go @@ -0,0 +1,108 @@ +package lib + +import ( + "bytes" + "fmt" + "github.com/dgraph-io/badger/v3" + "github.com/golang/glog" + "github.com/holiman/uint256" + "github.com/pkg/errors" +) + +// +// SnapshotGlobalActiveStakeAmountNanos: UTXO VIEW UTILS +// + +func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos(epochNumber uint64) (*uint256.Int, error) { + // Check the UtxoView first. + if globalActiveStakeAmountNanos, exists := bav.SnapshotGlobalActiveStakeAmountNanos[epochNumber]; exists { + return globalActiveStakeAmountNanos, nil + } + // If we don't have it in the UtxoView, check the db. + globalActiveStakeAmountNanos, err := DBGetSnapshotGlobalActiveStakeAmountNanos(bav.Handle, bav.Snapshot, epochNumber) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetSnapshotGlobalActiveStakeAmountNanos: problem retrieving SnapshotGlobalActiveStakeAmountNanos from db: ") + } + if globalActiveStakeAmountNanos != nil { + // Cache the result in the UtxoView. + bav.SnapshotGlobalActiveStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos + } + return globalActiveStakeAmountNanos, nil +} + +func (bav *UtxoView) _setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int, epochNumber uint64) { + if globalActiveStakeAmountNanos == nil { + glog.Errorf("UtxoView._setSnapshotGlobalActiveStakeAmountNanos: called with nil entry, this should never happen") + } + bav.SnapshotGlobalActiveStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos.Clone() +} + +func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + for epochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { + if globalActiveStakeAmountNanos == nil { + return fmt.Errorf("UtxoView._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: found nil entry for epochNumber %d, this should never happen", epochNumber) + } + if err := DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn(txn, bav.Snapshot, globalActiveStakeAmountNanos, epochNumber, blockHeight); err != nil { + return errors.Wrapf(err, "UtxoView._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: problem setting SnapshotGlobalActiveStakeAmountNanos for epochNumber %d: ", epochNumber) + } + } + return nil +} + +// +// SnapshotGlobalActiveStakeAmountNanos: DB UTILS +// + +func DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber uint64) []byte { + data := append([]byte{}, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos...) + data = append(data, UintToBuf(epochNumber)...) + return data +} + +func DBGetSnapshotGlobalActiveStakeAmountNanos(handle *badger.DB, snap *Snapshot, epochNumber uint64) (*uint256.Int, error) { + var ret *uint256.Int + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn(txn, snap, epochNumber) + return innerErr + }) + return ret, err +} + +func DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot, epochNumber uint64) (*uint256.Int, error) { + // Retrieve from db. + key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber) + globalActiveStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return 0. + if err == badger.ErrKeyNotFound { + return uint256.NewInt(), nil + } + return nil, errors.Wrapf(err, "DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn: problem retrieving value") + } + + // Decode from bytes. + var globalActiveStakeAmountNanos *uint256.Int + rr := bytes.NewReader(globalActiveStakeAmountNanosBytes) + globalActiveStakeAmountNanos, err = VariableDecodeUint256(rr) + if err != nil { + return nil, errors.Wrapf(err, "DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn: problem decoding value") + } + return globalActiveStakeAmountNanos, nil +} + +func DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( + txn *badger.Txn, + snap *Snapshot, + globalActiveStakeAmountNanos *uint256.Int, + epochNumber uint64, + blockHeight uint64, +) error { + if globalActiveStakeAmountNanos == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn: called with nil GlobalActiveStakeAmountNanos") + return nil + } + key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber) + return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos)) +} From b06c7cf53e1561f74e3390cf26ac695905e385b1 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 25 May 2023 11:48:08 -0400 Subject: [PATCH 057/762] Add snapshot GlobalParamsEntry. --- lib/block_view.go | 12 ++++ lib/block_view_flush.go | 3 + lib/block_view_types.go | 11 ++++ lib/db_utils.go | 15 +++-- lib/pos_epoch_complete_hook.go | 2 +- lib/pos_snapshot_entries.go | 105 ++++++++++++++++++++++++++++++--- 6 files changed, 135 insertions(+), 13 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index e4d54ab9e..cbf7d80d3 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -130,6 +130,10 @@ type UtxoView struct { // Current EpochEntry CurrentEpochEntry *EpochEntry + // SnapshotGlobalParamsEntries is a map of EpochNumber to GlobalParamsEntry. + // It contains the snapshot value of the GlobalParamsEntry at the given EpochNumber. + SnapshotGlobalParamsEntries map[uint64]*GlobalParamsEntry + // SnapshotGlobalActiveStakeAmountNanos is a map of EpochNumber to GlobalActiveStakeAmountNanos. // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given EpochNumber. SnapshotGlobalActiveStakeAmountNanos map[uint64]*uint256.Int @@ -240,6 +244,9 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // CurrentEpochEntry bav.CurrentEpochEntry = nil + // SnapshotGlobalParamsEntries + bav.SnapshotGlobalParamsEntries = make(map[uint64]*GlobalParamsEntry) + // SnapshotGlobalActiveStakeAmountNanos bav.SnapshotGlobalActiveStakeAmountNanos = make(map[uint64]*uint256.Int) } @@ -524,6 +531,11 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.CurrentEpochEntry = bav.CurrentEpochEntry.Copy() } + // Copy the SnapshotGlobalParamsEntries + for epochNumber, globalParamsEntry := range bav.SnapshotGlobalParamsEntries { + newView.SnapshotGlobalParamsEntries[epochNumber] = globalParamsEntry.Copy() + } + // Copy the SnapshotGlobalActiveStakeAmountNanos for epochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { newView.SnapshotGlobalActiveStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos.Clone() diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 4fefc43a4..4a256962c 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -158,6 +158,9 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushCurrentEpochEntryToDbWithTxn(txn, blockHeight); err != nil { return err } + if err := bav._flushSnapshotGlobalParamsEntryToDbWithTxn(txn, blockHeight); err != nil { + return err + } if err := bav._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { return err } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7641a34df..ccbab023a 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3774,6 +3774,17 @@ type GlobalParamsEntry struct { MaxNonceExpirationBlockHeightOffset uint64 } +func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { + return &GlobalParamsEntry{ + USDCentsPerBitcoin: gp.USDCentsPerBitcoin, + CreateProfileFeeNanos: gp.CreateProfileFeeNanos, + CreateNFTFeeNanos: gp.CreateNFTFeeNanos, + MaxCopiesPerNFT: gp.MaxCopiesPerNFT, + MinimumNetworkFeeNanosPerKB: gp.MinimumNetworkFeeNanosPerKB, + MaxNonceExpirationBlockHeightOffset: gp.MaxNonceExpirationBlockHeightOffset, + } +} + func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte diff --git a/lib/db_utils.go b/lib/db_utils.go index c22be56ff..e14cf783d 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -525,11 +525,15 @@ type DBPrefixes struct { // Prefix -> EpochEntry PrefixCurrentEpoch []byte `prefix_id:"[83]" is_state:"true"` - // PrefixSnapshotGlobalActiveStakeAmountNanos: Retrieve the GlobalActiveStakeAmountNanos by EpochNumber. + // PrefixSnapshotGlobalParamsEntryByEpochNumber: Retrieve a snapshot GlobalParamsEntry by EpochNumber. + // Prefix, EpochNumber -> *GlobalParamsEntry + PrefixSnapshotGlobalParamsEntryByEpochNumber []byte `prefix_id:"[84]" is_state:"true"` + + // PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber: Retrieve a snapshot GlobalActiveStakeAmountNanos by EpochNumber. // Prefix, EpochNumber -> *uint256.Int - PrefixSnapshotGlobalActiveStakeAmountNanos []byte `prefix_id:"[84]" is_state:"true"` + PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber []byte `prefix_id:"[85]" is_state:"true"` - // NEXT_TAG: 85 + // NEXT_TAG: 86 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -749,8 +753,11 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixCurrentEpoch) { // prefix_id:"[83]" return true, &EpochEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalParamsEntryByEpochNumber) { // prefix_id:"[84]" + return true, &GlobalParamsEntry{} + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber) { + // prefix_id:"[85]" return false, nil } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 49f2ff62a..f64867d9e 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -42,7 +42,7 @@ func (bav *UtxoView) RolloverEpochs(blockHeight uint64) error { } // Snapshot the current GlobalParamsEntry. - // TODO + bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) // Snapshot the current validator set. // TODO diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 90e9327ef..acabc34fa 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -10,7 +10,100 @@ import ( ) // -// SnapshotGlobalActiveStakeAmountNanos: UTXO VIEW UTILS +// SnapshotGlobalParamsEntry +// + +func (bav *UtxoView) GetSnapshotGlobalParamsEntry(epochNumber uint64) (*GlobalParamsEntry, error) { + // Check the UtxoView first. + if globalParamsEntry, exists := bav.SnapshotGlobalParamsEntries[epochNumber]; exists { + return globalParamsEntry, nil + } + // If we don't have it in the UtxoView, check the db. + globalParamsEntry, err := DBGetSnapshotGlobalParamsEntry(bav.Handle, bav.Snapshot, epochNumber) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetSnapshotGlobalParamsEntry: problem retrieving SnapshotGlobalParamsEntry from db: ") + } + if globalParamsEntry != nil { + // Cache the result in the UtxoView. + bav._setSnapshotGlobalParamsEntry(globalParamsEntry, epochNumber) + } + return globalParamsEntry, nil +} + +func (bav *UtxoView) _setSnapshotGlobalParamsEntry(globalParamsEntry *GlobalParamsEntry, epochNumber uint64) { + if globalParamsEntry == nil { + glog.Errorf("UtxoView._setSnapshotGlobalParamsEntry: called with nil entry, this should never happen") + } + bav.SnapshotGlobalParamsEntries[epochNumber] = globalParamsEntry.Copy() +} + +func (bav *UtxoView) _flushSnapshotGlobalParamsEntryToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + for epochNumber, globalParamsEntry := range bav.SnapshotGlobalParamsEntries { + if globalParamsEntry == nil { + return fmt.Errorf("UtxoView._flushSnapshotGlobalParamsEntryToDbWithTxn: found nil entry for epochNumber %d, this should never happen", epochNumber) + } + if err := DBPutSnapshotGlobalParamsEntryWithTxn(txn, bav.Snapshot, globalParamsEntry, epochNumber, blockHeight); err != nil { + return errors.Wrapf(err, "UtxoView._flushSnapshotGlobalParamsEntryToDbWithTxn: problem setting SnapshotGlobalParamsEntry for epochNumber %d: ", epochNumber) + } + } + return nil +} + +func DBKeyForSnapshotGlobalParamsEntry(epochNumber uint64) []byte { + data := append([]byte{}, Prefixes.PrefixSnapshotGlobalParamsEntryByEpochNumber...) + data = append(data, UintToBuf(epochNumber)...) + return data +} + +func DBGetSnapshotGlobalParamsEntry(handle *badger.DB, snap *Snapshot, epochNumber uint64) (*GlobalParamsEntry, error) { + var ret *GlobalParamsEntry + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetSnapshotGlobalParamsEntryWithTxn(txn, snap, epochNumber) + return innerErr + }) + return ret, err +} + +func DBGetSnapshotGlobalParamsEntryWithTxn(txn *badger.Txn, snap *Snapshot, epochNumber uint64) (*GlobalParamsEntry, error) { + // Retrieve from db. + key := DBKeyForSnapshotGlobalParamsEntry(epochNumber) + globalParamsEntryBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return nil. + if err == badger.ErrKeyNotFound { + return nil, nil + } + return nil, errors.Wrapf(err, "DBGetSnapshotGlobalParamsEntryWithTxn: problem retrieving value") + } + + // Decode from bytes. + globalParamsEntry := &GlobalParamsEntry{} + rr := bytes.NewReader(globalParamsEntryBytes) + if exist, err := DecodeFromBytes(globalParamsEntry, rr); !exist || err != nil { + return nil, errors.Wrapf(err, "DBGetSnapshotGlobalParamsEntryWithTxn: problem decoding GlobalParamsEntry: ") + } + return globalParamsEntry, nil +} + +func DBPutSnapshotGlobalParamsEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + globalParamsEntry *GlobalParamsEntry, + epochNumber uint64, + blockHeight uint64, +) error { + if globalParamsEntry == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBPutSnapshotGlobalParamsEntryWithTxn: called with nil GlobalParamsEntry, this should never happen") + return nil + } + key := DBKeyForSnapshotGlobalParamsEntry(epochNumber) + return DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, globalParamsEntry)) +} + +// +// SnapshotGlobalActiveStakeAmountNanos // func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos(epochNumber uint64) (*uint256.Int, error) { @@ -25,7 +118,7 @@ func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos(epochNumber uint64) } if globalActiveStakeAmountNanos != nil { // Cache the result in the UtxoView. - bav.SnapshotGlobalActiveStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos + bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, epochNumber) } return globalActiveStakeAmountNanos, nil } @@ -49,12 +142,8 @@ func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn * return nil } -// -// SnapshotGlobalActiveStakeAmountNanos: DB UTILS -// - func DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber uint64) []byte { - data := append([]byte{}, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos...) + data := append([]byte{}, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber...) data = append(data, UintToBuf(epochNumber)...) return data } @@ -100,7 +189,7 @@ func DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( ) error { if globalActiveStakeAmountNanos == nil { // This should never happen but is a sanity check. - glog.Errorf("DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn: called with nil GlobalActiveStakeAmountNanos") + glog.Errorf("DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn: called with nil GlobalActiveStakeAmountNanos, this should never happen") return nil } key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber) From c1224a7bafdc0be63bfd8bd1391e6c94d7caea42 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 25 May 2023 14:34:59 -0400 Subject: [PATCH 058/762] Start on test for epoch complete hook. --- lib/pos_epoch_complete_hook.go | 12 +- lib/pos_epoch_complete_hook_test.go | 225 ++++++++++++++++++++++++++++ 2 files changed, 231 insertions(+), 6 deletions(-) create mode 100644 lib/pos_epoch_complete_hook_test.go diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index f64867d9e..7680fd289 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -20,25 +20,25 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) return currentEpochEntry.FinalBlockHeight == blockHeight, nil } -func (bav *UtxoView) RolloverEpochs(blockHeight uint64) error { +func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Rolls-over the current epoch into a new one. Takes care of the associated snapshotting + accounting. // Sanity-check that the current block is the last block in the current epoch. isLastBlockInCurrentEpoch, err := bav.IsLastBlockInCurrentEpoch(blockHeight) if err != nil { - return errors.Wrapf(err, "UtxoView.RolloverEpochs: ") + return errors.Wrapf(err, "UtxoView.RunEpochCompleteHook: ") } if !isLastBlockInCurrentEpoch { - return errors.New("UtxoView.RolloverEpochs: called before current epoch is complete, this should never happen") + return errors.New("UtxoView.RunEpochCompleteHook: called before current epoch is complete, this should never happen") } // Retrieve the CurrentEpochEntry. currentEpochEntry, err := bav.GetCurrentEpochEntry() if err != nil { - return errors.Wrapf(err, "UtxoView.RolloverEpochs: problem retrieving CurrentEpochEntry: ") + return errors.Wrapf(err, "UtxoView.RunEpochCompleteHook: problem retrieving CurrentEpochEntry: ") } if currentEpochEntry == nil { - return errors.New("UtxoView.RolloverEpochs: CurrentEpochEntry is nil, this should never happen") + return errors.New("UtxoView.RunEpochCompleteHook: CurrentEpochEntry is nil, this should never happen") } // Snapshot the current GlobalParamsEntry. @@ -50,7 +50,7 @@ func (bav *UtxoView) RolloverEpochs(blockHeight uint64) error { // Snapshot the current GlobalActiveStakeAmountNanos. globalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() if err != nil { - return errors.Wrapf(err, "UtxoView.RolloverEpochs: problem retrieving GlobalActiveStakeAmountNanos: ") + return errors.Wrapf(err, "UtxoView.RunEpochCompleteHook: problem retrieving GlobalActiveStakeAmountNanos: ") } bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, currentEpochEntry.EpochNumber) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go new file mode 100644 index 000000000..2d06dfad0 --- /dev/null +++ b/lib/pos_epoch_complete_hook_test.go @@ -0,0 +1,225 @@ +//go:build relic + +package lib + +import ( + "fmt" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + "testing" +) + +func TestIsLastBlockInCurrentEpoch(t *testing.T) { + var isLastBlockInCurrentEpoch bool + + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + + // Initialize fork heights. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStakeBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + + // The BlockHeight is before the PoS fork height. + isLastBlockInCurrentEpoch, err = utxoView.IsLastBlockInCurrentEpoch(0) + require.NoError(t, err) + require.False(t, isLastBlockInCurrentEpoch) + + // The CurrentEpochEntry is nil. + isLastBlockInCurrentEpoch, err = utxoView.IsLastBlockInCurrentEpoch(1) + require.Error(t, err) + require.Contains(t, err.Error(), "CurrentEpochEntry is nil, this should never happen") + require.False(t, isLastBlockInCurrentEpoch) + + // Seed a CurrentEpochEntry. + utxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: 5}) + require.NoError(t, utxoView.FlushToDb(1)) + + // The CurrentBlockHeight != CurrentEpochEntry.FinalBlockHeight. + isLastBlockInCurrentEpoch, err = utxoView.IsLastBlockInCurrentEpoch(4) + require.NoError(t, err) + require.False(t, isLastBlockInCurrentEpoch) + + // The CurrentBlockHeight == CurrentEpochEntry.FinalBlockHeight. + isLastBlockInCurrentEpoch, err = utxoView.IsLastBlockInCurrentEpoch(5) + require.NoError(t, err) + require.True(t, isLastBlockInCurrentEpoch) +} + +func TestRunEpochCompleteHook(t *testing.T) { + // Initialize balance model fork heights. + setBalanceModelBlockHeights() + defer resetBalanceModelBlockHeights() + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize fork heights. + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStakeBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m5", senderPkString, m5Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m6", senderPkString, m6Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID + m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID + m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID + m5PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m5PkBytes).PKID + m6PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m6PkBytes).PKID + _, _, _, _, _, _, _ = m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID + + // Seed a CurrentEpochEntry and CurrentRandomSeedHash. + utxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight}) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + + // Helper utils + _registerAndStake := func(publicKey string, privateKey string, stakeAmountNanos uint64) { + // Convert PublicKeyBase58Check to PublicKeyBytes. + pkBytes, _, err := Base58CheckDecode(publicKey) + require.NoError(t, err) + + // Validator registers. + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, pkBytes, blockHeight) + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, + VotingPublicKey: votingPublicKey, + VotingPublicKeySignature: votingSignature, + } + _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) + require.NoError(t, err) + + // Validator stakes to himself. + if stakeAmountNanos == 0 { + return + } + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(pkBytes), + StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), + } + _, err = _submitStakeTxn(testMeta, publicKey, privateKey, stakeMetadata, nil, true) + require.NoError(t, err) + } + + { + // ParamUpdater set min fee rate + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + -1, + int64(testMeta.feeRateNanosPerKb), + -1, + -1, + -1, + ) + } + { + // All validators register + stake to themselves. + _registerAndStake(m0Pub, m0Priv, 100) + _registerAndStake(m1Pub, m1Priv, 200) + _registerAndStake(m2Pub, m2Priv, 300) + _registerAndStake(m3Pub, m3Priv, 400) + _registerAndStake(m4Pub, m4Priv, 500) + _registerAndStake(m5Pub, m5Priv, 600) + _registerAndStake(m6Pub, m6Priv, 700) + + validatorEntries, err := utxoView.GetTopActiveValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 7) + } + { + // Test SnapshotGlobalParamsEntry is nil. + snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(1) + require.NoError(t, err) + require.Nil(t, snapshotGlobalParamsEntry) + + // Test SnapshotValidatorByPKID is nil. + // TODO + //for _, pkid := range []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} { + // snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorEntryByPKID(pkid, 1) + // require.NoError(t, err) + // require.Nil(t, snapshotValidatorEntry) + //} + + // Test SnapshotTopActiveValidatorsByStake is empty. + // TODO + + // Test SnapshotLeaderSchedule is nil. + // TODO + + // Test SnapshotGlobalActiveStakeAmountNanos is zero. + snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(1) + require.NoError(t, err) + require.Equal(t, snapshotGlobalActiveStakeAmountNanos, uint256.NewInt()) + } + { + // Test RunOnEpochCompleteHook(). + require.NoError(t, utxoView.FlushToDb(blockHeight)) + require.NoError(t, utxoView.RunEpochCompleteHook(blockHeight)) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + } + { + // Test SnapshotGlobalParamsEntry is populated. + snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(1) + require.NoError(t, err) + require.NotNil(t, snapshotGlobalParamsEntry) + require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + + // Test SnapshotValidatorByPKID is populated. + // TODO + + // Test SnapshotTopActiveValidatorsByStake is populated. + // TODO + + // Test SnapshotLeaderSchedule is populated. + // TODO + + // Test SnapshotGlobalActiveStakeAmountNanos is populated. + snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(1) + require.NoError(t, err) + require.Equal(t, snapshotGlobalActiveStakeAmountNanos, uint256.NewInt().SetUint64(2800)) + } +} From c486d2a1bcf5fdc69cf161ba7106a433a2457bcd Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 26 May 2023 11:14:02 -0400 Subject: [PATCH 059/762] Start work on snapshotting validator entries. --- lib/block_view.go | 12 +++++++ lib/pos_epoch_complete_hook.go | 11 ++++-- lib/pos_epoch_complete_hook_test.go | 8 ++--- lib/pos_snapshot_entries.go | 56 +++++++++++++++++++++++++++-- 4 files changed, 78 insertions(+), 9 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index cbf7d80d3..47f8d8929 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -134,6 +134,10 @@ type UtxoView struct { // It contains the snapshot value of the GlobalParamsEntry at the given EpochNumber. SnapshotGlobalParamsEntries map[uint64]*GlobalParamsEntry + // SnapshotValidatorEntries is a map of to a ValidatorEntry. + // It contains the snapshot value of a ValidatorEntry at the given EpochNumber. + SnapshotValidatorEntries map[SnapshotValidatorMapKey]*ValidatorEntry + // SnapshotGlobalActiveStakeAmountNanos is a map of EpochNumber to GlobalActiveStakeAmountNanos. // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given EpochNumber. SnapshotGlobalActiveStakeAmountNanos map[uint64]*uint256.Int @@ -247,6 +251,9 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // SnapshotGlobalParamsEntries bav.SnapshotGlobalParamsEntries = make(map[uint64]*GlobalParamsEntry) + // SnapshotValidatorEntries + bav.SnapshotValidatorEntries = make(map[SnapshotValidatorMapKey]*ValidatorEntry) + // SnapshotGlobalActiveStakeAmountNanos bav.SnapshotGlobalActiveStakeAmountNanos = make(map[uint64]*uint256.Int) } @@ -536,6 +543,11 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.SnapshotGlobalParamsEntries[epochNumber] = globalParamsEntry.Copy() } + // Copy the SnapshotValidatorEntries + for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { + newView.SnapshotValidatorEntries[mapKey] = validatorEntry.Copy() + } + // Copy the SnapshotGlobalActiveStakeAmountNanos for epochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { newView.SnapshotGlobalActiveStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos.Clone() diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 7680fd289..35f968757 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -21,7 +21,7 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) } func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { - // Rolls-over the current epoch into a new one. Takes care of the associated snapshotting + accounting. + // Rolls-over the current epoch into a new one. Handles the associated snapshotting + accounting. // Sanity-check that the current block is the last block in the current epoch. isLastBlockInCurrentEpoch, err := bav.IsLastBlockInCurrentEpoch(blockHeight) @@ -44,7 +44,10 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Snapshot the current GlobalParamsEntry. bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) - // Snapshot the current validator set. + // Snapshot the current validators by PKID. + // TODO + + // Snapshot the current validators by stake. // TODO // Snapshot the current GlobalActiveStakeAmountNanos. @@ -54,9 +57,11 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { } bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, currentEpochEntry.EpochNumber) - // Generate + store a leader schedule. + // Generate + snapshot a leader schedule. // TODO + // TODO: Is there any clean-up we should do here deleting old snapshots that are no longer useful? + // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ EpochNumber: currentEpochEntry.EpochNumber + 1, diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 2d06dfad0..a24634e65 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -109,7 +109,7 @@ func TestRunEpochCompleteHook(t *testing.T) { m6PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m6PkBytes).PKID _, _, _, _, _, _, _ = m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID - // Seed a CurrentEpochEntry and CurrentRandomSeedHash. + // Seed a CurrentEpochEntry. utxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight}) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -190,10 +190,10 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test SnapshotLeaderSchedule is nil. // TODO - // Test SnapshotGlobalActiveStakeAmountNanos is zero. + // Test SnapshotGlobalActiveStakeAmountNanos is nil. snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(1) require.NoError(t, err) - require.Equal(t, snapshotGlobalActiveStakeAmountNanos, uint256.NewInt()) + require.Nil(t, snapshotGlobalActiveStakeAmountNanos) } { // Test RunOnEpochCompleteHook(). @@ -206,7 +206,7 @@ func TestRunEpochCompleteHook(t *testing.T) { snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(1) require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) - require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + //require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) // Test SnapshotValidatorByPKID is populated. // TODO diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index acabc34fa..aefc82447 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -102,6 +102,58 @@ func DBPutSnapshotGlobalParamsEntryWithTxn( return DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, globalParamsEntry)) } +// +// SnapshotValidatorEntry +// + +type SnapshotValidatorMapKey struct { + EpochNumber uint64 + ValidatorPKID PKID +} + +func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, epochNumber uint64) (*ValidatorEntry, error) { + // Check the UtxoView first. + mapKey := SnapshotValidatorMapKey{EpochNumber: epochNumber, ValidatorPKID: *pkid} + if validatorEntry, exists := bav.SnapshotValidatorEntries[mapKey]; exists { + return validatorEntry, nil + } + // If we don't have it in the UtxoView, check the db. + validatorEntry, err := DBGetSnapshotValidatorByPKID(bav.Handle, bav.Snapshot, pkid, epochNumber) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetSnapshotValidatorByPKID: problem retrieving ValidatorEntry from db: ") + } + if validatorEntry != nil { + // Cache the result in the UtxoView. + bav._setSnapshotValidatorEntry(validatorEntry, epochNumber) + } + return validatorEntry, nil +} + +func (bav *UtxoView) _setSnapshotValidatorEntry(validatorEntry *ValidatorEntry, epochNumber uint64) { + if validatorEntry == nil { + glog.Errorf("UtxoView._setSnapshotValidatorEntry: called with nil entry, this should never happen") + return + } + mapKey := SnapshotValidatorMapKey{EpochNumber: epochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID} + bav.SnapshotValidatorEntries[mapKey] = validatorEntry.Copy() +} + +func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + for epochNumber, validatorEntry := range bav.SnapshotValidatorEntries { + if validatorEntry == nil { + return fmt.Errorf("UtxoView._flushSnapshotValidatorEntriesToDbWithTxn: found nil entry for epochNumber %d, this should never happen", epochNumber) + } + if err := DBPutSnapshotValidatorEntryWithTxn(txn, bav.Snapshot, validatorEntry, epochNumber, blockHeight); err != nil { + return errors.Wrapf(err, "UtxoView._flushSnapshotValidatorEntryToDbWithTxn: problem setting ValidatorEntry for epochNumber %d: ", epochNumber) + } + } + return nil +} + +// +// SnapshotValidatorsByStake +// + // // SnapshotGlobalActiveStakeAmountNanos // @@ -163,9 +215,9 @@ func DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn(txn *badger.Txn, snap *Sna key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber) globalActiveStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { - // We don't want to error if the key isn't found. Instead, return 0. + // We don't want to error if the key isn't found. Instead, return nil. if err == badger.ErrKeyNotFound { - return uint256.NewInt(), nil + return nil, nil } return nil, errors.Wrapf(err, "DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn: problem retrieving value") } From 8136896456031c8f517d2a6b6ba184453ea4d14b Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 26 May 2023 11:20:24 -0400 Subject: [PATCH 060/762] Convert leader schedule to slice of PKIDs instead of ValidatorEntries. --- lib/pos_leader_schedule.go | 14 ++--- lib/pos_leader_schedule_test.go | 90 ++++++++++++++++----------------- 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 45022d4d6..7996aaaad 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -6,7 +6,7 @@ import ( "github.com/pkg/errors" ) -func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { +func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { // Retrieve CurrentRandomSeedHash. currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() if err != nil { @@ -19,7 +19,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") } if len(validatorEntries) == 0 { - return []*ValidatorEntry{}, nil + return []*PKID{}, nil } // Sum TotalStakeAmountNanos. @@ -37,12 +37,12 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { // Hash the CurrentRandomSeedHash and generate a new RandomUint256. // Take RandomUint256 modulo TotalStakeAmountNanos. // For each ValidatorEntry: - // Skip if ValidatorEntry has already been added to the leader schedule. + // Skip if ValidatorPKID has already been added to the leader schedule. // If ValidatorEntry.TotalStakeAmountNanos >= RandomUint256: - // Add ValidatorEntry to LeaderSchedule. + // Add ValidatorPKID to LeaderSchedule. // TotalStakeAmountNanos -= ValidatorEntry.TotalStakeAmountNanos. // Break out of the inner loop. - var leaderSchedule []*ValidatorEntry + var leaderSchedule []*PKID // We also track a set of ValidatorPKIDs that have already been // added to the LeaderSchedule so that we can skip them when @@ -84,8 +84,8 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*ValidatorEntry, error) { // If we get to this point, the current validator is the // one we should add to the leader schedule next. - // Add the current ValidatorEntry to the leaderSchedule. - leaderSchedule = append(leaderSchedule, validatorEntry) + // Add the current ValidatorPKID to the leaderSchedule. + leaderSchedule = append(leaderSchedule, validatorEntry.ValidatorPKID) leaderSchedulePKIDs.Add(*validatorEntry.ValidatorPKID) // Subtract the ValidatorEntry.TotalStakeAmountNanos from the TotalStakeAmountNanos. diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 4daee9a58..bb5b9bdeb 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -148,7 +148,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 1) - require.Equal(t, leaderSchedule[0].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[0], m0PKID) } { // m1 registers and stakes to himself. @@ -159,8 +159,8 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 2) - require.Equal(t, leaderSchedule[0].ValidatorPKID, m1PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[0], m1PKID) + require.Equal(t, leaderSchedule[1], m0PKID) } { // All remaining validators register and stake to themselves. @@ -195,13 +195,13 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m2PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, m3PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, m1PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[0], m6PKID) + require.Equal(t, leaderSchedule[1], m5PKID) + require.Equal(t, leaderSchedule[2], m4PKID) + require.Equal(t, leaderSchedule[3], m2PKID) + require.Equal(t, leaderSchedule[4], m3PKID) + require.Equal(t, leaderSchedule[5], m1PKID) + require.Equal(t, leaderSchedule[6], m0PKID) } { // Seed a new CurrentRandomSeedHash. @@ -211,13 +211,13 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, m3PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, m2PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, m0PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, m1PKID) + require.Equal(t, leaderSchedule[0], m6PKID) + require.Equal(t, leaderSchedule[1], m5PKID) + require.Equal(t, leaderSchedule[2], m3PKID) + require.Equal(t, leaderSchedule[3], m4PKID) + require.Equal(t, leaderSchedule[4], m2PKID) + require.Equal(t, leaderSchedule[5], m0PKID) + require.Equal(t, leaderSchedule[6], m1PKID) } { // Seed a new CurrentRandomSeedHash. @@ -227,13 +227,13 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, m6PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m3PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, m1PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, m2PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[0], m4PKID) + require.Equal(t, leaderSchedule[1], m5PKID) + require.Equal(t, leaderSchedule[2], m6PKID) + require.Equal(t, leaderSchedule[3], m3PKID) + require.Equal(t, leaderSchedule[4], m1PKID) + require.Equal(t, leaderSchedule[5], m2PKID) + require.Equal(t, leaderSchedule[6], m0PKID) } { // Seed a new CurrentRandomSeedHash. @@ -243,13 +243,13 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, m2PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m5PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, m3PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, m1PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, m0PKID) + require.Equal(t, leaderSchedule[0], m6PKID) + require.Equal(t, leaderSchedule[1], m2PKID) + require.Equal(t, leaderSchedule[2], m4PKID) + require.Equal(t, leaderSchedule[3], m5PKID) + require.Equal(t, leaderSchedule[4], m3PKID) + require.Equal(t, leaderSchedule[5], m1PKID) + require.Equal(t, leaderSchedule[6], m0PKID) } { // Seed a new CurrentRandomSeedHash. @@ -259,26 +259,26 @@ func TestGenerateLeaderSchedule(t *testing.T) { leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m2PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, m1PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, m0PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, m3PKID) + require.Equal(t, leaderSchedule[0], m6PKID) + require.Equal(t, leaderSchedule[1], m5PKID) + require.Equal(t, leaderSchedule[2], m4PKID) + require.Equal(t, leaderSchedule[3], m2PKID) + require.Equal(t, leaderSchedule[4], m1PKID) + require.Equal(t, leaderSchedule[5], m0PKID) + require.Equal(t, leaderSchedule[6], m3PKID) // Test GenerateLeaderSchedule() is idempotent. Given the same CurrentRandomSeedHash // and the same stake-weighted validators, we generate the same leader schedule. leaderSchedule, err = newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0].ValidatorPKID, m6PKID) - require.Equal(t, leaderSchedule[1].ValidatorPKID, m5PKID) - require.Equal(t, leaderSchedule[2].ValidatorPKID, m4PKID) - require.Equal(t, leaderSchedule[3].ValidatorPKID, m2PKID) - require.Equal(t, leaderSchedule[4].ValidatorPKID, m1PKID) - require.Equal(t, leaderSchedule[5].ValidatorPKID, m0PKID) - require.Equal(t, leaderSchedule[6].ValidatorPKID, m3PKID) + require.Equal(t, leaderSchedule[0], m6PKID) + require.Equal(t, leaderSchedule[1], m5PKID) + require.Equal(t, leaderSchedule[2], m4PKID) + require.Equal(t, leaderSchedule[3], m2PKID) + require.Equal(t, leaderSchedule[4], m1PKID) + require.Equal(t, leaderSchedule[5], m0PKID) + require.Equal(t, leaderSchedule[6], m3PKID) } { // Test changing params.LeaderScheduleMaxNumValidators. From 18588ac7f99db6b8605270653271bb1a7ab24b94 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 26 May 2023 14:06:37 -0400 Subject: [PATCH 061/762] Snapshot validator entries by PKID. --- lib/db_utils.go | 22 +++- lib/pos_epoch_complete_hook.go | 9 +- lib/pos_epoch_complete_hook_test.go | 29 +++--- lib/pos_snapshot_entries.go | 152 +++++++++++++++++++++++++--- 4 files changed, 177 insertions(+), 35 deletions(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index e14cf783d..79c408b45 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -482,7 +482,7 @@ type DBPrefixes struct { PrefixValidatorByPKID []byte `prefix_id:"[78]" is_state:"true"` // PrefixValidatorByStake: Retrieve the top N validators by stake. - // Prefix, TotalStakeAmountNanos, ValidatorPKID -> nil + // Prefix, Status, TotalStakeAmountNanos, ValidatorPKID -> nil // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. PrefixValidatorByStake []byte `prefix_id:"[79]" is_state:"true"` @@ -529,11 +529,19 @@ type DBPrefixes struct { // Prefix, EpochNumber -> *GlobalParamsEntry PrefixSnapshotGlobalParamsEntryByEpochNumber []byte `prefix_id:"[84]" is_state:"true"` + // PrefixSnapshotValidatorByEpochNumberAndPKID: Retrieve a snapshot ValidatorEntry by EpochNumber + PKID. + // Prefix, EpochNumber, PKID -> *ValidatorEntry + PrefixSnapshotValidatorByEpochNumberAndPKID []byte `prefix_id:"[85]" is_state:"true"` + + // PrefixSnapshotValidatorByEpochNumberAndStake: Retrieve stake-ordered ValidatorEntries by EpochNumber. + // Prefix, EpochNumber, Status, TotalStakeAmountNanos, PKID -> *ValidatorEntry + PrefixSnapshotValidatorByEpochNumberAndStake []byte `prefix_id:"[86]" is_state:"true"` + // PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber: Retrieve a snapshot GlobalActiveStakeAmountNanos by EpochNumber. // Prefix, EpochNumber -> *uint256.Int - PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber []byte `prefix_id:"[85]" is_state:"true"` + PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber []byte `prefix_id:"[87]" is_state:"true"` - // NEXT_TAG: 86 + // NEXT_TAG: 88 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -756,8 +764,14 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalParamsEntryByEpochNumber) { // prefix_id:"[84]" return true, &GlobalParamsEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByEpochNumberAndPKID) { // prefix_id:"[85]" + return true, &ValidatorEntry{} + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByEpochNumberAndStake) { + // prefix_id:"[86]" + return true, &PKID{} + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber) { + // prefix_id:"[87]" return false, nil } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 35f968757..2a975a8fa 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -44,11 +44,10 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Snapshot the current GlobalParamsEntry. bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) - // Snapshot the current validators by PKID. - // TODO - - // Snapshot the current validators by stake. - // TODO + // Snapshot the current ValidatorEntries. + if err = bav.SnapshotCurrentValidators(currentEpochEntry.EpochNumber); err != nil { + return errors.Wrapf(err, "UtxoView.RunEpochCompleteHook: problem snapshotting validators: ") + } // Snapshot the current GlobalActiveStakeAmountNanos. globalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index a24634e65..2a3fd5c68 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -177,23 +177,22 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Nil(t, snapshotGlobalParamsEntry) // Test SnapshotValidatorByPKID is nil. - // TODO - //for _, pkid := range []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} { - // snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorEntryByPKID(pkid, 1) - // require.NoError(t, err) - // require.Nil(t, snapshotValidatorEntry) - //} + for _, pkid := range []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} { + snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 1) + require.NoError(t, err) + require.Nil(t, snapshotValidatorEntry) + } // Test SnapshotTopActiveValidatorsByStake is empty. // TODO - // Test SnapshotLeaderSchedule is nil. - // TODO - // Test SnapshotGlobalActiveStakeAmountNanos is nil. snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(1) require.NoError(t, err) require.Nil(t, snapshotGlobalActiveStakeAmountNanos) + + // Test SnapshotLeaderSchedule is nil. + // TODO } { // Test RunOnEpochCompleteHook(). @@ -209,17 +208,21 @@ func TestRunEpochCompleteHook(t *testing.T) { //require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) // Test SnapshotValidatorByPKID is populated. - // TODO + for _, pkid := range []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} { + snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 1) + require.NoError(t, err) + require.NotNil(t, snapshotValidatorEntry) + } // Test SnapshotTopActiveValidatorsByStake is populated. // TODO - // Test SnapshotLeaderSchedule is populated. - // TODO - // Test SnapshotGlobalActiveStakeAmountNanos is populated. snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(1) require.NoError(t, err) require.Equal(t, snapshotGlobalActiveStakeAmountNanos, uint256.NewInt().SetUint64(2800)) + + // Test SnapshotLeaderSchedule is populated. + // TODO } } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index aefc82447..b5dbbc7b0 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -50,9 +50,9 @@ func (bav *UtxoView) _flushSnapshotGlobalParamsEntryToDbWithTxn(txn *badger.Txn, } func DBKeyForSnapshotGlobalParamsEntry(epochNumber uint64) []byte { - data := append([]byte{}, Prefixes.PrefixSnapshotGlobalParamsEntryByEpochNumber...) - data = append(data, UintToBuf(epochNumber)...) - return data + key := append([]byte{}, Prefixes.PrefixSnapshotGlobalParamsEntryByEpochNumber...) + key = append(key, UintToBuf(epochNumber)...) + return key } func DBGetSnapshotGlobalParamsEntry(handle *badger.DB, snap *Snapshot, epochNumber uint64) (*GlobalParamsEntry, error) { @@ -111,6 +111,28 @@ type SnapshotValidatorMapKey struct { ValidatorPKID PKID } +func (bav *UtxoView) SnapshotCurrentValidators(epochNumber uint64) error { + // First, snapshot any !isDeleted ValidatorEntries in the UtxoView. + var utxoViewValidatorPKIDs []*PKID + for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { + if !validatorEntry.isDeleted { + // We only want to snapshot !isDeleted ValidatorEntries. + bav._setSnapshotValidatorEntry(validatorEntry, epochNumber) + } + // We don't want to retrieve any ValidatorEntries from the db that are present in the UtxoView. + utxoViewValidatorPKIDs = append(utxoViewValidatorPKIDs, validatorEntry.ValidatorPKID) + } + // Second, snapshot the ValidatorEntries in the db (skipping any in the UtxoView). + dbValidatorEntries, err := DBEnumerateAllCurrentValidators(bav.Handle, utxoViewValidatorPKIDs) + if err != nil { + return errors.Wrapf(err, "UtxoView.SnapshotValidators: problem retrieving ValidatorEntries: ") + } + for _, validatorEntry := range dbValidatorEntries { + bav._setSnapshotValidatorEntry(validatorEntry, epochNumber) + } + return nil +} + func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, epochNumber uint64) (*ValidatorEntry, error) { // Check the UtxoView first. mapKey := SnapshotValidatorMapKey{EpochNumber: epochNumber, ValidatorPKID: *pkid} @@ -139,20 +161,124 @@ func (bav *UtxoView) _setSnapshotValidatorEntry(validatorEntry *ValidatorEntry, } func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - for epochNumber, validatorEntry := range bav.SnapshotValidatorEntries { + for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { if validatorEntry == nil { - return fmt.Errorf("UtxoView._flushSnapshotValidatorEntriesToDbWithTxn: found nil entry for epochNumber %d, this should never happen", epochNumber) + return fmt.Errorf( + "UtxoView._flushSnapshotValidatorEntriesToDbWithTxn: found nil entry for epochNumber %d, this should never happen", + mapKey.EpochNumber, + ) } - if err := DBPutSnapshotValidatorEntryWithTxn(txn, bav.Snapshot, validatorEntry, epochNumber, blockHeight); err != nil { - return errors.Wrapf(err, "UtxoView._flushSnapshotValidatorEntryToDbWithTxn: problem setting ValidatorEntry for epochNumber %d: ", epochNumber) + if err := DBPutSnapshotValidatorEntryWithTxn(txn, bav.Snapshot, validatorEntry, mapKey.EpochNumber, blockHeight); err != nil { + return errors.Wrapf( + err, + "UtxoView._flushSnapshotValidatorEntryToDbWithTxn: problem setting ValidatorEntry for epochNumber %d: ", + mapKey.EpochNumber, + ) } } return nil } -// -// SnapshotValidatorsByStake -// +func DBKeyForSnapshotValidatorByPKID(validatorEntry *ValidatorEntry, epochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByEpochNumberAndPKID...) + key = append(key, UintToBuf(epochNumber)...) + key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) + return key +} + +func DBKeyForSnapshotValidatorByStake(validatorEntry *ValidatorEntry, epochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByEpochNumberAndStake...) + key = append(key, UintToBuf(epochNumber)...) + key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) + key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) + key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) + return key +} + +func DBGetSnapshotValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID, epochNumber uint64) (*ValidatorEntry, error) { + var ret *ValidatorEntry + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetSnapshotValidatorByPKIDWithTxn(txn, snap, pkid, epochNumber) + return innerErr + }) + return ret, err +} + +func DBGetSnapshotValidatorByPKIDWithTxn(txn *badger.Txn, snap *Snapshot, pkid *PKID, epochNumber uint64) (*ValidatorEntry, error) { + // Retrieve ValidatorEntry from db. + key := DBKeyForSnapshotValidatorByPKID(&ValidatorEntry{ValidatorPKID: pkid}, epochNumber) + validatorBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return nil. + if err == badger.ErrKeyNotFound { + return nil, nil + } + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorByPKID: problem retrieving ValidatorEntry") + } + + // Decode ValidatorEntry from bytes. + validatorEntry := &ValidatorEntry{} + rr := bytes.NewReader(validatorBytes) + if exist, err := DecodeFromBytes(validatorEntry, rr); !exist || err != nil { + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorByPKID: problem decoding ValidatorEntry") + } + return validatorEntry, nil +} + +func DBPutSnapshotValidatorEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + validatorEntry *ValidatorEntry, + epochNumber uint64, + blockHeight uint64, +) error { + if validatorEntry == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBPutSnapshotValidatorEntryWithTxn: called with nil ValidatorEntry, this should never happen") + return nil + } + + // Put the ValidatorEntry in the SnapshotValidatorByPKID index. + key := DBKeyForSnapshotValidatorByPKID(validatorEntry, epochNumber) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry)); err != nil { + return errors.Wrapf(err, "DBPutSnapshotValidatorEntryWithTxn: problem putting ValidatorEntry in the SnapshotValidatorByPKID index: ") + } + + // Put the ValidatorPKID in the SnapshotValidatorByStake index. + key = DBKeyForSnapshotValidatorByStake(validatorEntry, epochNumber) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID)); err != nil { + return errors.Wrapf(err, "DBPutSnapshotValidatorEntryWithTxn: problem putting ValidatorPKID in the SnapshotValidatorByStake index: ") + } + + return nil +} + +func DBEnumerateAllCurrentValidators(handle *badger.DB, pkidsToSkip []*PKID) ([]*ValidatorEntry, error) { + // Convert []*PKIDs of validators to skip to a Set[string] of db keys to skip. + skipKeys := NewSet([]string{}) + for _, pkid := range pkidsToSkip { + skipKeys.Add(string(DBKeyForValidatorByPKID(&ValidatorEntry{ValidatorPKID: pkid}))) + } + // Retrieve all non-skipped validators. + _, valsFound, err := EnumerateKeysForPrefixWithLimitOffsetOrder( + handle, Prefixes.PrefixValidatorByPKID, 0, nil, false, skipKeys, + ) + if err != nil { + return nil, errors.Wrapf(err, "DBEnumerateValidators: problem retrieving ValidatorEntries") + } + // Convert ValidatorEntryBytes to ValidatorEntries. + var validatorEntries []*ValidatorEntry + for _, validatorEntryBytes := range valsFound { + validatorEntry := &ValidatorEntry{} + rr := bytes.NewReader(validatorEntryBytes) + if exist, err := DecodeFromBytes(validatorEntry, rr); !exist || err != nil { + return nil, errors.Wrapf(err, "DBEnumerateValidators: problem decoding ValidatorEntry") + } + validatorEntries = append(validatorEntries, validatorEntry) + } + return validatorEntries, nil +} // // SnapshotGlobalActiveStakeAmountNanos @@ -195,9 +321,9 @@ func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn * } func DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber uint64) []byte { - data := append([]byte{}, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber...) - data = append(data, UintToBuf(epochNumber)...) - return data + key := append([]byte{}, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber...) + key = append(key, UintToBuf(epochNumber)...) + return key } func DBGetSnapshotGlobalActiveStakeAmountNanos(handle *badger.DB, snap *Snapshot, epochNumber uint64) (*uint256.Int, error) { From 337342f23a777198f81de0778f463908c06bd155 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 26 May 2023 14:12:50 -0400 Subject: [PATCH 062/762] Flush snapshot validator entries to db. --- lib/block_view_flush.go | 3 +++ lib/pos_epoch_complete_hook_test.go | 11 ++++++++++- lib/pos_snapshot_entries.go | 5 +++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 4a256962c..869698fd0 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -161,6 +161,9 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushSnapshotGlobalParamsEntryToDbWithTxn(txn, blockHeight); err != nil { return err } + if err := bav._flushSnapshotValidatorEntriesToDbWithTxn(txn, blockHeight); err != nil { + return err + } if err := bav._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { return err } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 2a3fd5c68..eef08dd7b 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -171,6 +171,11 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Len(t, validatorEntries, 7) } { + // Test CurrentEpochNumber. + currentEpochNumber, err := utxoView.GetCurrentEpochNumber() + require.NoError(t, err) + require.Equal(t, currentEpochNumber, uint64(1)) + // Test SnapshotGlobalParamsEntry is nil. snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(1) require.NoError(t, err) @@ -196,11 +201,15 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook(). - require.NoError(t, utxoView.FlushToDb(blockHeight)) require.NoError(t, utxoView.RunEpochCompleteHook(blockHeight)) require.NoError(t, utxoView.FlushToDb(blockHeight)) } { + // Test CurrentEpochNumber. + currentEpochNumber, err := utxoView.GetCurrentEpochNumber() + require.NoError(t, err) + require.Equal(t, currentEpochNumber, uint64(2)) + // Test SnapshotGlobalParamsEntry is populated. snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(1) require.NoError(t, err) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index b5dbbc7b0..f69fa06e9 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -151,6 +151,11 @@ func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, epochNumber uint64) return validatorEntry, nil } +func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(epochNumber uint64) (*ValidatorEntry, error) { + // TODO + return nil, nil +} + func (bav *UtxoView) _setSnapshotValidatorEntry(validatorEntry *ValidatorEntry, epochNumber uint64) { if validatorEntry == nil { glog.Errorf("UtxoView._setSnapshotValidatorEntry: called with nil entry, this should never happen") From e6004872395397cef1b747b88e1b1bbb322d2fb7 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 26 May 2023 14:51:46 -0400 Subject: [PATCH 063/762] Snapshot validators by stake. --- lib/db_utils.go | 7 ++- lib/pos_epoch_complete_hook_test.go | 38 +++++++++++- lib/pos_snapshot_entries.go | 96 ++++++++++++++++++++++++++++- 3 files changed, 133 insertions(+), 8 deletions(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index 79c408b45..045a7c2fa 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -530,11 +530,12 @@ type DBPrefixes struct { PrefixSnapshotGlobalParamsEntryByEpochNumber []byte `prefix_id:"[84]" is_state:"true"` // PrefixSnapshotValidatorByEpochNumberAndPKID: Retrieve a snapshot ValidatorEntry by EpochNumber + PKID. - // Prefix, EpochNumber, PKID -> *ValidatorEntry + // Prefix, EpochNumber, ValidatorPKID -> *ValidatorEntry PrefixSnapshotValidatorByEpochNumberAndPKID []byte `prefix_id:"[85]" is_state:"true"` // PrefixSnapshotValidatorByEpochNumberAndStake: Retrieve stake-ordered ValidatorEntries by EpochNumber. - // Prefix, EpochNumber, Status, TotalStakeAmountNanos, PKID -> *ValidatorEntry + // Prefix, EpochNumber, Status, TotalStakeAmountNanos, ValidatorPKID -> nil + // Note: we parse the ValidatorPKID from the key and the value is nil to save space. PrefixSnapshotValidatorByEpochNumberAndStake []byte `prefix_id:"[86]" is_state:"true"` // PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber: Retrieve a snapshot GlobalActiveStakeAmountNanos by EpochNumber. @@ -769,7 +770,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco return true, &ValidatorEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByEpochNumberAndStake) { // prefix_id:"[86]" - return true, &PKID{} + return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber) { // prefix_id:"[87]" return false, nil diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index eef08dd7b..698cc1775 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -189,7 +189,9 @@ func TestRunEpochCompleteHook(t *testing.T) { } // Test SnapshotTopActiveValidatorsByStake is empty. - // TODO + validatorEntries, err := utxoView.GetSnapshotTopActiveValidatorsByStake(10, 1) + require.NoError(t, err) + require.Empty(t, validatorEntries) // Test SnapshotGlobalActiveStakeAmountNanos is nil. snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(1) @@ -224,7 +226,13 @@ func TestRunEpochCompleteHook(t *testing.T) { } // Test SnapshotTopActiveValidatorsByStake is populated. - // TODO + validatorEntries, err := utxoView.GetSnapshotTopActiveValidatorsByStake(10, 1) + require.NoError(t, err) + require.Len(t, validatorEntries, 7) + require.Equal(t, validatorEntries[0].ValidatorPKID, m6PKID) + require.Equal(t, validatorEntries[6].ValidatorPKID, m0PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(700)) + require.Equal(t, validatorEntries[6].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) // Test SnapshotGlobalActiveStakeAmountNanos is populated. snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(1) @@ -234,4 +242,30 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test SnapshotLeaderSchedule is populated. // TODO } + { + // Test SnapshotGlobalParamsEntry for a future epoch is nil. + snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(2) + require.NoError(t, err) + require.Nil(t, snapshotGlobalParamsEntry) + + // Test SnapshotValidatorByPKID for a future epoch is nil. + for _, pkid := range []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} { + snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 2) + require.NoError(t, err) + require.Nil(t, snapshotValidatorEntry) + } + + // Test SnapshotTopActiveValidatorsByStake for a future epoch is empty. + validatorEntries, err := utxoView.GetSnapshotTopActiveValidatorsByStake(10, 2) + require.NoError(t, err) + require.Empty(t, validatorEntries) + + // Test SnapshotGlobalActiveStakeAmountNanos for a future epoch is nil. + snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(2) + require.NoError(t, err) + require.Nil(t, snapshotGlobalActiveStakeAmountNanos) + + // Test SnapshotLeaderSchedule for a future epoch is nil. + // TODO + } } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index f69fa06e9..ad69a9b3b 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -7,6 +7,8 @@ import ( "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" + "math" + "sort" ) // @@ -151,9 +153,50 @@ func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, epochNumber uint64) return validatorEntry, nil } -func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(epochNumber uint64) (*ValidatorEntry, error) { - // TODO - return nil, nil +func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit int, epochNumber uint64) ([]*ValidatorEntry, error) { + if limit <= 0 { + return []*ValidatorEntry{}, nil + } + var utxoViewValidatorEntries []*ValidatorEntry + for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { + if mapKey.EpochNumber == epochNumber { + utxoViewValidatorEntries = append(utxoViewValidatorEntries, validatorEntry) + } + } + // Pull top N active ValidatorEntries from the database (not present in the UtxoView). + // Note that we will skip validators that are present in the view because we pass + // utxoViewValidatorEntries to the function. + dbValidatorEntries, err := DBGetSnapshotTopActiveValidatorsByStake(bav.Handle, bav.Snapshot, limit, epochNumber, utxoViewValidatorEntries) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetSnapshotTopActiveValidatorsByStake: error retrieving entries from db: ") + } + // Cache top N active ValidatorEntries from the db in the UtxoView. + for _, validatorEntry := range dbValidatorEntries { + // We only pull ValidatorEntries from the db that are not present in the + // UtxoView. As a sanity check, we double-check that the ValidatorEntry + // is not already in the UtxoView here. + mapKey := SnapshotValidatorMapKey{EpochNumber: epochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID} + if _, exists := bav.SnapshotValidatorEntries[mapKey]; !exists { + bav._setValidatorEntryMappings(validatorEntry) + } + } + // Pull !isDeleted, active ValidatorEntries from the UtxoView with stake > 0. + var validatorEntries []*ValidatorEntry + for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { + if mapKey.EpochNumber == epochNumber && + !validatorEntry.isDeleted && + validatorEntry.Status() == ValidatorStatusActive && + !validatorEntry.TotalStakeAmountNanos.IsZero() { + validatorEntries = append(validatorEntries, validatorEntry) + } + } + // Sort the ValidatorEntries DESC by TotalStakeAmountNanos. + sort.Slice(validatorEntries, func(ii, jj int) bool { + return validatorEntries[ii].TotalStakeAmountNanos.Cmp(validatorEntries[jj].TotalStakeAmountNanos) > 0 + }) + // Return top N. + upperBound := int(math.Min(float64(limit), float64(len(validatorEntries)))) + return validatorEntries[0:upperBound], nil } func (bav *UtxoView) _setSnapshotValidatorEntry(validatorEntry *ValidatorEntry, epochNumber uint64) { @@ -231,6 +274,53 @@ func DBGetSnapshotValidatorByPKIDWithTxn(txn *badger.Txn, snap *Snapshot, pkid * return validatorEntry, nil } +func DBGetSnapshotTopActiveValidatorsByStake( + handle *badger.DB, + snap *Snapshot, + limit int, + epochNumber uint64, + validatorEntriesToSkip []*ValidatorEntry, +) ([]*ValidatorEntry, error) { + var validatorEntries []*ValidatorEntry + + // Convert ValidatorEntriesToSkip to ValidatorEntryKeysToSkip. + validatorKeysToSkip := NewSet([]string{}) + for _, validatorEntryToSkip := range validatorEntriesToSkip { + validatorKeysToSkip.Add(string(DBKeyForSnapshotValidatorByStake(validatorEntryToSkip, epochNumber))) + } + + // Retrieve top N active ValidatorEntry keys by stake. + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByEpochNumberAndStake...) + key = append(key, UintToBuf(epochNumber)...) + key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) + keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( + handle, key, limit, nil, true, validatorKeysToSkip, + ) + if err != nil { + return nil, errors.Wrapf(err, "DBGetSnapshotTopActiveValidatorsByStake: problem retrieving top validators: ") + } + + // For each key found, parse the ValidatorPKID from the key, + // then retrieve the ValidatorEntry by the ValidatorPKID. + for _, keyFound := range keysFound { + // Parse the PKIDBytes from the key. The ValidatorPKID is the last component of the key. + validatorPKIDBytes := keyFound[len(keyFound)-PublicKeyLenCompressed:] + // Convert PKIDBytes to PKID. + validatorPKID := &PKID{} + if err = validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { + return nil, errors.Wrapf(err, "DBGetSnapshotTopActiveValidatorsByStake: problem reading ValidatorPKID: ") + } + // Retrieve ValidatorEntry by PKID. + validatorEntry, err := DBGetSnapshotValidatorByPKID(handle, snap, validatorPKID, epochNumber) + if err != nil { + return nil, errors.Wrapf(err, "DBGetSnapshotTopActiveValidatorsByStake: problem retrieving validator by PKID: ") + } + validatorEntries = append(validatorEntries, validatorEntry) + } + + return validatorEntries, nil +} + func DBPutSnapshotValidatorEntryWithTxn( txn *badger.Txn, snap *Snapshot, From 2d498cc3ad34a8054dc78e6a15f817774821357f Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 10:40:51 -0400 Subject: [PATCH 064/762] Update comments re: determinism. --- lib/db_utils.go | 2 +- lib/pos_random_seed.go | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index 008ef1c19..409aa03ed 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -526,7 +526,7 @@ type DBPrefixes struct { PrefixCurrentEpoch []byte `prefix_id:"[83]" is_state:"true"` // PrefixCurrentRandomSeedHash: Retrieve the current RandomSeedHash. - // Prefix -> RandomSeedHash. + // Prefix -> . PrefixCurrentRandomSeedHash []byte `prefix_id:"[84]" is_state:"true"` // NEXT_TAG: 85 diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index f412a3cf4..567025199 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -60,6 +60,9 @@ func DecodeRandomSeedHash(rr io.Reader) (*RandomSeedHash, error) { // func (bav *UtxoView) GenerateRandomSeedSignature(signerPrivateKey *bls.PrivateKey) (*bls.Signature, error) { + // This function generates a RandomSeedSignature by signing the CurrentRandomSeedHash + // with the provided bls.PrivateKey. This signature is deterministic: given the same + // CurrentRandomSeedHash and bls.PrivateKey, the same signature will always be generated. currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() if err != nil { return nil, errors.Wrapf(err, "UtxoView.GenerateRandomSeedSignature: problem retrieving CurrentRandomSeedHash: ") @@ -75,6 +78,11 @@ func (bav *UtxoView) VerifyRandomSeedSignature( signerPublicKey *bls.PublicKey, randomSeedSignature *bls.Signature, ) (*RandomSeedHash, error) { + // This function verifies that the provided RandomSeedSignature was signed by the corresponding + // bls.PrivateKey for the provided bls.PublicKey. If the RandomSeedSignature is invalid, we + // return an error. If the RandomSeedSignature is valid, we take the SHA256 of it to produce + // a RandomSeedHash, which is then returned. + // Verify the RandomSeedSignature. currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() if err != nil { From 6d952ee6c08618f9eb667de86920560846b31701 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 10:47:21 -0400 Subject: [PATCH 065/762] Use uint64 for get top validators limit. --- lib/block_view_validator.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 4bcddcbf2..f43adc5cc 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -584,7 +584,7 @@ func DBGetValidatorByPKIDWithTxn(txn *badger.Txn, snap *Snapshot, pkid *PKID) (* func DBGetTopActiveValidatorsByStake( handle *badger.DB, snap *Snapshot, - limit int, + limit uint64, validatorEntriesToSkip []*ValidatorEntry, ) ([]*ValidatorEntry, error) { var validatorEntries []*ValidatorEntry @@ -599,7 +599,7 @@ func DBGetTopActiveValidatorsByStake( key := append([]byte{}, Prefixes.PrefixValidatorByStake...) key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( - handle, key, limit, nil, true, validatorKeysToSkip, + handle, key, int(limit), nil, true, validatorKeysToSkip, ) if err != nil { return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem retrieving top validators: ") @@ -1838,9 +1838,9 @@ func (bav *UtxoView) GetValidatorByPublicKey(validatorPublicKey *PublicKey) (*Va return validatorEntry, nil } -func (bav *UtxoView) GetTopActiveValidatorsByStake(limit int) ([]*ValidatorEntry, error) { +func (bav *UtxoView) GetTopActiveValidatorsByStake(limit uint64) ([]*ValidatorEntry, error) { // Validate limit param. - if limit <= 0 { + if limit == 0 { return []*ValidatorEntry{}, nil } // Create a slice of UtxoViewValidatorEntries. We want to skip pulling these from the database in From d10675e872b25bee7e41ac2126fdac88ddfa06aa Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 10:49:12 -0400 Subject: [PATCH 066/762] Compare uint64 to uint64. --- lib/block_view_validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index f43adc5cc..5d5082e81 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1840,7 +1840,7 @@ func (bav *UtxoView) GetValidatorByPublicKey(validatorPublicKey *PublicKey) (*Va func (bav *UtxoView) GetTopActiveValidatorsByStake(limit uint64) ([]*ValidatorEntry, error) { // Validate limit param. - if limit == 0 { + if limit == uint64(0) { return []*ValidatorEntry{}, nil } // Create a slice of UtxoViewValidatorEntries. We want to skip pulling these from the database in From 293ea24e65b696dfef55772d2dcd463138c5c404 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 11:21:58 -0400 Subject: [PATCH 067/762] Address review feedback. --- lib/pos_leader_schedule.go | 2 +- lib/pos_leader_schedule_test.go | 22 ++++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 7996aaaad..2329773cc 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -38,7 +38,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { // Take RandomUint256 modulo TotalStakeAmountNanos. // For each ValidatorEntry: // Skip if ValidatorPKID has already been added to the leader schedule. - // If ValidatorEntry.TotalStakeAmountNanos >= RandomUint256: + // If the sum of the ValidatorEntry.TotalStakeAmountNanos seen so far >= RandomUint256: // Add ValidatorPKID to LeaderSchedule. // TotalStakeAmountNanos -= ValidatorEntry.TotalStakeAmountNanos. // Break out of the inner loop. diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index bb5b9bdeb..1a8a3aae2 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -269,16 +269,18 @@ func TestGenerateLeaderSchedule(t *testing.T) { // Test GenerateLeaderSchedule() is idempotent. Given the same CurrentRandomSeedHash // and the same stake-weighted validators, we generate the same leader schedule. - leaderSchedule, err = newUtxoView().GenerateLeaderSchedule() - require.NoError(t, err) - require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0], m6PKID) - require.Equal(t, leaderSchedule[1], m5PKID) - require.Equal(t, leaderSchedule[2], m4PKID) - require.Equal(t, leaderSchedule[3], m2PKID) - require.Equal(t, leaderSchedule[4], m1PKID) - require.Equal(t, leaderSchedule[5], m0PKID) - require.Equal(t, leaderSchedule[6], m3PKID) + for ii := 0; ii < 10; ii++ { + leaderSchedule, err = newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Len(t, leaderSchedule, 7) + require.Equal(t, leaderSchedule[0], m6PKID) + require.Equal(t, leaderSchedule[1], m5PKID) + require.Equal(t, leaderSchedule[2], m4PKID) + require.Equal(t, leaderSchedule[3], m2PKID) + require.Equal(t, leaderSchedule[4], m1PKID) + require.Equal(t, leaderSchedule[5], m0PKID) + require.Equal(t, leaderSchedule[6], m3PKID) + } } { // Test changing params.LeaderScheduleMaxNumValidators. From a0567a77f8b96410b6d110b64aaf788911115bf1 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 11:58:25 -0400 Subject: [PATCH 068/762] Test each generate leader schedule 10x. --- lib/pos_leader_schedule.go | 2 +- lib/pos_leader_schedule_test.go | 101 ++++++++++---------------------- 2 files changed, 33 insertions(+), 70 deletions(-) diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 2329773cc..bbd4e72ee 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -14,7 +14,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { } // Retrieve top, active validators ordered by stake. - validatorEntries, err := bav.GetTopActiveValidatorsByStake(int(bav.Params.LeaderScheduleMaxNumValidators)) + validatorEntries, err := bav.GetTopActiveValidatorsByStake(bav.Params.LeaderScheduleMaxNumValidators) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") } diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 1a8a3aae2..c4c44ebe9 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -103,6 +103,21 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) } + testGenerateLeaderSchedule := func(expectedOrder []*PKID) { + // We test that GenerateLeaderSchedule() is idempotent by running it 10 times. + // Given the same CurrentRandomSeedHash and the same stake-weighted validators, + // we verify that we generate the same leader schedule each time. + for ii := 0; ii < 10; ii++ { + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + require.NoError(t, err) + require.Len(t, leaderSchedule, len(expectedOrder)) + + for index, pkid := range leaderSchedule { + require.Equal(t, pkid, expectedOrder[index]) + } + } + } + // Seed a CurrentEpochEntry. tmpUtxoView := newUtxoView() tmpUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) @@ -192,95 +207,43 @@ func TestGenerateLeaderSchedule(t *testing.T) { } { // Test GenerateLeaderSchedule(). - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() - require.NoError(t, err) - require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0], m6PKID) - require.Equal(t, leaderSchedule[1], m5PKID) - require.Equal(t, leaderSchedule[2], m4PKID) - require.Equal(t, leaderSchedule[3], m2PKID) - require.Equal(t, leaderSchedule[4], m3PKID) - require.Equal(t, leaderSchedule[5], m1PKID) - require.Equal(t, leaderSchedule[6], m0PKID) + testGenerateLeaderSchedule([]*PKID{m6PKID, m5PKID, m4PKID, m2PKID, m3PKID, m1PKID, m0PKID}) } { // Seed a new CurrentRandomSeedHash. setCurrentRandomSeedHash("3b4b028b-6a7c-4b38-bea3-a5f59b34e02d") - // Test GenerateLeaderSchedule(). - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() - require.NoError(t, err) - require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0], m6PKID) - require.Equal(t, leaderSchedule[1], m5PKID) - require.Equal(t, leaderSchedule[2], m3PKID) - require.Equal(t, leaderSchedule[3], m4PKID) - require.Equal(t, leaderSchedule[4], m2PKID) - require.Equal(t, leaderSchedule[5], m0PKID) - require.Equal(t, leaderSchedule[6], m1PKID) + testGenerateLeaderSchedule([]*PKID{m6PKID, m5PKID, m3PKID, m4PKID, m2PKID, m0PKID, m1PKID}) } { // Seed a new CurrentRandomSeedHash. setCurrentRandomSeedHash("b4b38eaf-216d-4132-8725-a481baaf87cc") - // Test GenerateLeaderSchedule(). - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() - require.NoError(t, err) - require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0], m4PKID) - require.Equal(t, leaderSchedule[1], m5PKID) - require.Equal(t, leaderSchedule[2], m6PKID) - require.Equal(t, leaderSchedule[3], m3PKID) - require.Equal(t, leaderSchedule[4], m1PKID) - require.Equal(t, leaderSchedule[5], m2PKID) - require.Equal(t, leaderSchedule[6], m0PKID) + testGenerateLeaderSchedule([]*PKID{m4PKID, m5PKID, m6PKID, m3PKID, m1PKID, m2PKID, m0PKID}) } { // Seed a new CurrentRandomSeedHash. setCurrentRandomSeedHash("7c87f290-d9ec-4cb4-ad47-c64c8ca46f0e") - // Test GenerateLeaderSchedule(). - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() - require.NoError(t, err) - require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0], m6PKID) - require.Equal(t, leaderSchedule[1], m2PKID) - require.Equal(t, leaderSchedule[2], m4PKID) - require.Equal(t, leaderSchedule[3], m5PKID) - require.Equal(t, leaderSchedule[4], m3PKID) - require.Equal(t, leaderSchedule[5], m1PKID) - require.Equal(t, leaderSchedule[6], m0PKID) + testGenerateLeaderSchedule([]*PKID{m6PKID, m2PKID, m4PKID, m5PKID, m3PKID, m1PKID, m0PKID}) } { // Seed a new CurrentRandomSeedHash. setCurrentRandomSeedHash("0999a3ce-15e4-455a-b061-6081b88b237d") - // Test GenerateLeaderSchedule(). - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() - require.NoError(t, err) - require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0], m6PKID) - require.Equal(t, leaderSchedule[1], m5PKID) - require.Equal(t, leaderSchedule[2], m4PKID) - require.Equal(t, leaderSchedule[3], m2PKID) - require.Equal(t, leaderSchedule[4], m1PKID) - require.Equal(t, leaderSchedule[5], m0PKID) - require.Equal(t, leaderSchedule[6], m3PKID) - - // Test GenerateLeaderSchedule() is idempotent. Given the same CurrentRandomSeedHash - // and the same stake-weighted validators, we generate the same leader schedule. - for ii := 0; ii < 10; ii++ { - leaderSchedule, err = newUtxoView().GenerateLeaderSchedule() - require.NoError(t, err) - require.Len(t, leaderSchedule, 7) - require.Equal(t, leaderSchedule[0], m6PKID) - require.Equal(t, leaderSchedule[1], m5PKID) - require.Equal(t, leaderSchedule[2], m4PKID) - require.Equal(t, leaderSchedule[3], m2PKID) - require.Equal(t, leaderSchedule[4], m1PKID) - require.Equal(t, leaderSchedule[5], m0PKID) - require.Equal(t, leaderSchedule[6], m3PKID) - } + testGenerateLeaderSchedule([]*PKID{m6PKID, m5PKID, m4PKID, m2PKID, m1PKID, m0PKID, m3PKID}) + } + { + // Seed a new CurrentRandomSeedHash. + setCurrentRandomSeedHash("dbfffc42-3c40-49c4-a3df-cfbd2606cce2") + // Test GenerateLeaderSchedule(). + testGenerateLeaderSchedule([]*PKID{m6PKID, m5PKID, m4PKID, m3PKID, m0PKID, m2PKID, m1PKID}) + } + { + // Seed a new CurrentRandomSeedHash. + setCurrentRandomSeedHash("ceea0ad8-7277-4468-a0a1-8bacb78b01ca") + // Test GenerateLeaderSchedule(). + testGenerateLeaderSchedule([]*PKID{m3PKID, m5PKID, m6PKID, m4PKID, m2PKID, m1PKID, m0PKID}) } { // Test changing params.LeaderScheduleMaxNumValidators. From 630aa608257f8ab9687d0b4758b080211c7a2392 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 13:51:32 -0400 Subject: [PATCH 069/762] Generate + snapshot leader schedule. --- lib/block_view.go | 13 ++++ lib/block_view_flush.go | 3 + lib/db_utils.go | 9 ++- lib/pos_epoch_complete_hook.go | 12 +++- lib/pos_epoch_complete_hook_test.go | 26 +++++-- lib/pos_snapshot_entries.go | 108 ++++++++++++++++++++++++++++ 6 files changed, 162 insertions(+), 9 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index a35a246f7..9bbc005ed 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -145,6 +145,11 @@ type UtxoView struct { // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given EpochNumber. SnapshotGlobalActiveStakeAmountNanos map[uint64]*uint256.Int + // SnapshotLeaderSchedule is a map of to a ValidatorPKID. + // It contains the PKID of the validator at the given index in the leader schedule + // generated at the given EpochNumber. + SnapshotLeaderSchedule map[SnapshotLeaderScheduleMapKey]*PKID + // The hash of the tip the view is currently referencing. Mainly used // for error-checking when doing a bulk operation on the view. TipHash *BlockHash @@ -259,6 +264,9 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // SnapshotGlobalActiveStakeAmountNanos bav.SnapshotGlobalActiveStakeAmountNanos = make(map[uint64]*uint256.Int) + + // SnapshotLeaderSchedule + bav.SnapshotLeaderSchedule = make(map[SnapshotLeaderScheduleMapKey]*PKID) } func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { @@ -561,6 +569,11 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.SnapshotGlobalActiveStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos.Clone() } + // Copy the SnapshotLeaderSchedule + for mapKey, validatorPKID := range bav.SnapshotLeaderSchedule { + newView.SnapshotLeaderSchedule[mapKey] = validatorPKID.NewPKID() + } + return newView, nil } diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 0e98af659..b414e5fc4 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -170,6 +170,9 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { return err } + if err := bav._flushSnapshotLeaderScheduleToDbWithTxn(txn, blockHeight); err != nil { + return err + } return nil } diff --git a/lib/db_utils.go b/lib/db_utils.go index c4d3b824c..bf95faaab 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -546,7 +546,11 @@ type DBPrefixes struct { // Prefix, EpochNumber -> *uint256.Int PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber []byte `prefix_id:"[88]" is_state:"true"` - // NEXT_TAG: 89 + // PrefixSnapshotLeaderSchedule: Retrieve a ValidatorPKID by . + // Prefix, EpochNumber, LeaderIndex -> ValidatorPKID + PrefixSnapshotLeaderSchedule []byte `prefix_id:"[89]" is_state:"true"` + + // NEXT_TAG: 90 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -781,6 +785,9 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber) { // prefix_id:"[88]" return false, nil + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotLeaderSchedule) { + // prefix_id:"[89]" + return true, &PKID{} } return true, nil diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 2a975a8fa..ca5a43152 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -2,6 +2,7 @@ package lib import ( "github.com/pkg/errors" + "math" ) func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) { @@ -57,7 +58,16 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, currentEpochEntry.EpochNumber) // Generate + snapshot a leader schedule. - // TODO + leaderSchedule, err := bav.GenerateLeaderSchedule() + if err != nil { + return errors.Wrapf(err, "UtxoView.RunEpochCompleteHook: problem generating leader schedule: ") + } + for index, validatorPKID := range leaderSchedule { + if index > math.MaxUint8 { + return errors.Errorf("UtxoView.RunEpochCompleteHook: LeaderIndex %d overflows uint8", index) + } + bav._setSnapshotLeaderScheduleValidator(validatorPKID, uint8(index), currentEpochEntry.EpochNumber) + } // TODO: Is there any clean-up we should do here deleting old snapshots that are no longer useful? diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 698cc1775..a48e2ed18 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -107,7 +107,7 @@ func TestRunEpochCompleteHook(t *testing.T) { m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID m5PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m5PkBytes).PKID m6PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m6PkBytes).PKID - _, _, _, _, _, _, _ = m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID + validatorPKIDs := []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} // Seed a CurrentEpochEntry. utxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight}) @@ -182,7 +182,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Nil(t, snapshotGlobalParamsEntry) // Test SnapshotValidatorByPKID is nil. - for _, pkid := range []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} { + for _, pkid := range validatorPKIDs { snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 1) require.NoError(t, err) require.Nil(t, snapshotValidatorEntry) @@ -199,7 +199,11 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Nil(t, snapshotGlobalActiveStakeAmountNanos) // Test SnapshotLeaderSchedule is nil. - // TODO + for index, _ := range validatorPKIDs { + snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 1) + require.NoError(t, err) + require.Nil(t, snapshotLeaderScheduleValidator) + } } { // Test RunOnEpochCompleteHook(). @@ -219,7 +223,7 @@ func TestRunEpochCompleteHook(t *testing.T) { //require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) // Test SnapshotValidatorByPKID is populated. - for _, pkid := range []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} { + for _, pkid := range validatorPKIDs { snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 1) require.NoError(t, err) require.NotNil(t, snapshotValidatorEntry) @@ -240,7 +244,11 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, snapshotGlobalActiveStakeAmountNanos, uint256.NewInt().SetUint64(2800)) // Test SnapshotLeaderSchedule is populated. - // TODO + for index, _ := range validatorPKIDs { + snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 1) + require.NoError(t, err) + require.NotNil(t, snapshotLeaderScheduleValidator) + } } { // Test SnapshotGlobalParamsEntry for a future epoch is nil. @@ -249,7 +257,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Nil(t, snapshotGlobalParamsEntry) // Test SnapshotValidatorByPKID for a future epoch is nil. - for _, pkid := range []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} { + for _, pkid := range validatorPKIDs { snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 2) require.NoError(t, err) require.Nil(t, snapshotValidatorEntry) @@ -266,6 +274,10 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Nil(t, snapshotGlobalActiveStakeAmountNanos) // Test SnapshotLeaderSchedule for a future epoch is nil. - // TODO + for index, _ := range validatorPKIDs { + snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 2) + require.NoError(t, err) + require.Nil(t, snapshotLeaderScheduleValidator) + } } } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index ad69a9b3b..196d539ba 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -468,3 +468,111 @@ func DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber) return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos)) } + +// +// SnapshotLeaderScheduleValidator +// + +type SnapshotLeaderScheduleMapKey struct { + EpochNumber uint64 + LeaderIndex uint8 +} + +func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint8, epochNumber uint64) (*ValidatorEntry, error) { + // First, check the UtxoView. + mapKey := SnapshotLeaderScheduleMapKey{EpochNumber: epochNumber, LeaderIndex: leaderIndex} + if validatorPKID, exists := bav.SnapshotLeaderSchedule[mapKey]; exists { + return bav.GetSnapshotValidatorByPKID(validatorPKID, epochNumber) + } + // Next, check the db. + validatorEntry, err := DBGetSnapshotLeaderScheduleValidator(bav.Handle, bav.Snapshot, leaderIndex, epochNumber) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetSnapshotLeaderScheduleValidator: error retrieving ValidatorPKID: ") + } + if validatorEntry != nil { + // Cache the ValidatorPKID in the UtxoView. + bav._setSnapshotLeaderScheduleValidator(validatorEntry.ValidatorPKID, leaderIndex, epochNumber) + } + return validatorEntry, nil +} + +func (bav *UtxoView) _setSnapshotLeaderScheduleValidator(validatorPKID *PKID, index uint8, epochNumber uint64) { + if validatorPKID == nil { + glog.Errorf("UtxoView._setSnapshotLeaderScheduleValidator: called with nil ValidatorPKID, this should never happen") + return + } + mapKey := SnapshotLeaderScheduleMapKey{EpochNumber: epochNumber, LeaderIndex: index} + bav.SnapshotLeaderSchedule[mapKey] = validatorPKID.NewPKID() +} + +func (bav *UtxoView) _flushSnapshotLeaderScheduleToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + for mapKey, validatorPKID := range bav.SnapshotLeaderSchedule { + if validatorPKID == nil { + return fmt.Errorf("UtxoView._flushSnapshotLeaderScheduleToDbWithTxn: found nil PKID for epochNumber %d, this should never happen", mapKey.EpochNumber) + } + if err := DBPutSnapshotLeaderScheduleValidatorWithTxn(txn, bav.Snapshot, validatorPKID, mapKey.LeaderIndex, mapKey.EpochNumber, blockHeight); err != nil { + return errors.Wrapf(err, "UtxoView._flushSnapshotLeaderScheduleToDbWithTxn: problem setting ValidatorPKID for epochNumber %d: ", mapKey.EpochNumber) + } + } + return nil +} + +func DBKeyForSnapshotLeaderScheduleValidator(leaderIndex uint8, epochNumber uint64) []byte { + data := append([]byte{}, Prefixes.PrefixSnapshotLeaderSchedule...) + data = append(data, UintToBuf(epochNumber)...) + data = append(data, EncodeUint8(leaderIndex)...) + return data +} + +func DBGetSnapshotLeaderScheduleValidator(handle *badger.DB, snap *Snapshot, leaderIndex uint8, epochNumber uint64) (*ValidatorEntry, error) { + var ret *ValidatorEntry + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetSnapshotLeaderScheduleValidatorWithTxn(txn, snap, leaderIndex, epochNumber) + return innerErr + }) + return ret, err +} + +func DBGetSnapshotLeaderScheduleValidatorWithTxn(txn *badger.Txn, snap *Snapshot, leaderIndex uint8, epochNumber uint64) (*ValidatorEntry, error) { + // Retrieve ValidatorPKID from db. + key := DBKeyForSnapshotLeaderScheduleValidator(leaderIndex, epochNumber) + validatorPKIDBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return nil. + if err == badger.ErrKeyNotFound { + return nil, nil + } + return nil, errors.Wrapf(err, "DBGetSnapshotLeaderScheduleValidator: problem retrieving ValidatorPKID") + } + + // Decode ValidatorPKID from bytes. + validatorPKID := &PKID{} + rr := bytes.NewReader(validatorPKIDBytes) + if exist, err := DecodeFromBytes(validatorPKID, rr); !exist || err != nil { + return nil, errors.Wrapf(err, "DBGetSnapshotLeaderScheduleValidator: problem decoding ValidatorPKID") + } + + // Retrieve ValidatorEntry by PKID from db. + return DBGetSnapshotValidatorByPKIDWithTxn(txn, snap, validatorPKID, epochNumber) +} + +func DBPutSnapshotLeaderScheduleValidatorWithTxn( + txn *badger.Txn, + snap *Snapshot, + validatorPKID *PKID, + leaderIndex uint8, + epochNumber uint64, + blockHeight uint64, +) error { + if validatorPKID == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBPutSnapshotLeaderScheduleValidatorWithTxn: called with nil ValidatorPKID, this should never happen") + return nil + } + key := DBKeyForSnapshotLeaderScheduleValidator(leaderIndex, epochNumber) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorPKID)); err != nil { + return errors.Wrapf(err, "DBPutSnapshotLeaderScheduleValidatorWithTxn: problem putting ValidatorPKID in the SnapshotLeaderSchedule index: ") + } + return nil +} From 7e49874bdb0404201faba87284a8cdd72ea647b6 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 14:29:15 -0400 Subject: [PATCH 070/762] Rename to SnapshotAtEpochNumber. --- lib/db_utils.go | 36 ++--- lib/pos_snapshot_entries.go | 258 +++++++++++++++++++++--------------- 2 files changed, 172 insertions(+), 122 deletions(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index bf95faaab..bc06eea0b 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -529,25 +529,25 @@ type DBPrefixes struct { // Prefix -> . PrefixCurrentRandomSeedHash []byte `prefix_id:"[84]" is_state:"true"` - // PrefixSnapshotGlobalParamsEntryByEpochNumber: Retrieve a snapshot GlobalParamsEntry by EpochNumber. - // Prefix, EpochNumber -> *GlobalParamsEntry - PrefixSnapshotGlobalParamsEntryByEpochNumber []byte `prefix_id:"[85]" is_state:"true"` + // PrefixSnapshotGlobalParamsEntry: Retrieve a snapshot GlobalParamsEntry by SnapshotAtEpochNumber. + // Prefix, SnapshotAtEpochNumber -> *GlobalParamsEntry + PrefixSnapshotGlobalParamsEntry []byte `prefix_id:"[85]" is_state:"true"` - // PrefixSnapshotValidatorByEpochNumberAndPKID: Retrieve a snapshot ValidatorEntry by EpochNumber + PKID. - // Prefix, EpochNumber, ValidatorPKID -> *ValidatorEntry - PrefixSnapshotValidatorByEpochNumberAndPKID []byte `prefix_id:"[86]" is_state:"true"` + // PrefixSnapshotValidatorByPKID: Retrieve a snapshot ValidatorEntry by . + // Prefix, SnapshotAtEpochNumber, ValidatorPKID -> *ValidatorEntry + PrefixSnapshotValidatorByPKID []byte `prefix_id:"[86]" is_state:"true"` - // PrefixSnapshotValidatorByEpochNumberAndStake: Retrieve stake-ordered ValidatorEntries by EpochNumber. - // Prefix, EpochNumber, Status, TotalStakeAmountNanos, ValidatorPKID -> nil + // PrefixSnapshotValidatorByStake: Retrieve stake-ordered ValidatorEntries by SnapshotAtEpochNumber. + // Prefix, SnapshotAtEpochNumber, Status, TotalStakeAmountNanos, ValidatorPKID -> nil // Note: we parse the ValidatorPKID from the key and the value is nil to save space. - PrefixSnapshotValidatorByEpochNumberAndStake []byte `prefix_id:"[87]" is_state:"true"` + PrefixSnapshotValidatorByStake []byte `prefix_id:"[87]" is_state:"true"` - // PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber: Retrieve a snapshot GlobalActiveStakeAmountNanos by EpochNumber. - // Prefix, EpochNumber -> *uint256.Int - PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber []byte `prefix_id:"[88]" is_state:"true"` + // PrefixSnapshotGlobalActiveStakeAmountNanos: Retrieve a snapshot GlobalActiveStakeAmountNanos by SnapshotAtEpochNumber. + // Prefix, SnapshotAtEpochNumber -> *uint256.Int + PrefixSnapshotGlobalActiveStakeAmountNanos []byte `prefix_id:"[88]" is_state:"true"` - // PrefixSnapshotLeaderSchedule: Retrieve a ValidatorPKID by . - // Prefix, EpochNumber, LeaderIndex -> ValidatorPKID + // PrefixSnapshotLeaderSchedule: Retrieve a ValidatorPKID by . + // Prefix, SnapshotAtEpochNumber, LeaderIndex -> ValidatorPKID PrefixSnapshotLeaderSchedule []byte `prefix_id:"[89]" is_state:"true"` // NEXT_TAG: 90 @@ -773,16 +773,16 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixCurrentRandomSeedHash) { // prefix_id:"[84]" return false, nil - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalParamsEntryByEpochNumber) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalParamsEntry) { // prefix_id:"[85]" return true, &GlobalParamsEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByEpochNumberAndPKID) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByPKID) { // prefix_id:"[86]" return true, &ValidatorEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByEpochNumberAndStake) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByStake) { // prefix_id:"[87]" return false, nil - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos) { // prefix_id:"[88]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotLeaderSchedule) { diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 196d539ba..17d1a946b 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -15,61 +15,71 @@ import ( // SnapshotGlobalParamsEntry // -func (bav *UtxoView) GetSnapshotGlobalParamsEntry(epochNumber uint64) (*GlobalParamsEntry, error) { +func (bav *UtxoView) GetSnapshotGlobalParamsEntry(snapshotAtEpochNumber uint64) (*GlobalParamsEntry, error) { // Check the UtxoView first. - if globalParamsEntry, exists := bav.SnapshotGlobalParamsEntries[epochNumber]; exists { + if globalParamsEntry, exists := bav.SnapshotGlobalParamsEntries[snapshotAtEpochNumber]; exists { return globalParamsEntry, nil } // If we don't have it in the UtxoView, check the db. - globalParamsEntry, err := DBGetSnapshotGlobalParamsEntry(bav.Handle, bav.Snapshot, epochNumber) + globalParamsEntry, err := DBGetSnapshotGlobalParamsEntry(bav.Handle, bav.Snapshot, snapshotAtEpochNumber) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetSnapshotGlobalParamsEntry: problem retrieving SnapshotGlobalParamsEntry from db: ") + return nil, errors.Wrapf( + err, + "UtxoView.GetSnapshotGlobalParamsEntry: problem retrieving SnapshotGlobalParamsEntry from db: ", + ) } if globalParamsEntry != nil { // Cache the result in the UtxoView. - bav._setSnapshotGlobalParamsEntry(globalParamsEntry, epochNumber) + bav._setSnapshotGlobalParamsEntry(globalParamsEntry, snapshotAtEpochNumber) } return globalParamsEntry, nil } -func (bav *UtxoView) _setSnapshotGlobalParamsEntry(globalParamsEntry *GlobalParamsEntry, epochNumber uint64) { +func (bav *UtxoView) _setSnapshotGlobalParamsEntry(globalParamsEntry *GlobalParamsEntry, snapshotAtEpochNumber uint64) { if globalParamsEntry == nil { glog.Errorf("UtxoView._setSnapshotGlobalParamsEntry: called with nil entry, this should never happen") } - bav.SnapshotGlobalParamsEntries[epochNumber] = globalParamsEntry.Copy() + bav.SnapshotGlobalParamsEntries[snapshotAtEpochNumber] = globalParamsEntry.Copy() } func (bav *UtxoView) _flushSnapshotGlobalParamsEntryToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - for epochNumber, globalParamsEntry := range bav.SnapshotGlobalParamsEntries { + for snapshotAtEpochNumber, globalParamsEntry := range bav.SnapshotGlobalParamsEntries { if globalParamsEntry == nil { - return fmt.Errorf("UtxoView._flushSnapshotGlobalParamsEntryToDbWithTxn: found nil entry for epochNumber %d, this should never happen", epochNumber) + return fmt.Errorf( + "UtxoView._flushSnapshotGlobalParamsEntryToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", + snapshotAtEpochNumber, + ) } - if err := DBPutSnapshotGlobalParamsEntryWithTxn(txn, bav.Snapshot, globalParamsEntry, epochNumber, blockHeight); err != nil { - return errors.Wrapf(err, "UtxoView._flushSnapshotGlobalParamsEntryToDbWithTxn: problem setting SnapshotGlobalParamsEntry for epochNumber %d: ", epochNumber) + if err := DBPutSnapshotGlobalParamsEntryWithTxn(txn, bav.Snapshot, globalParamsEntry, snapshotAtEpochNumber, blockHeight); err != nil { + return errors.Wrapf( + err, + "UtxoView._flushSnapshotGlobalParamsEntryToDbWithTxn: problem setting SnapshotGlobalParamsEntry for SnapshotAtEpochNumber %d: ", + snapshotAtEpochNumber, + ) } } return nil } -func DBKeyForSnapshotGlobalParamsEntry(epochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotGlobalParamsEntryByEpochNumber...) - key = append(key, UintToBuf(epochNumber)...) +func DBKeyForSnapshotGlobalParamsEntry(snapshotEpochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotGlobalParamsEntry...) + key = append(key, UintToBuf(snapshotEpochNumber)...) return key } -func DBGetSnapshotGlobalParamsEntry(handle *badger.DB, snap *Snapshot, epochNumber uint64) (*GlobalParamsEntry, error) { +func DBGetSnapshotGlobalParamsEntry(handle *badger.DB, snap *Snapshot, snapshotAtEpochNumber uint64) (*GlobalParamsEntry, error) { var ret *GlobalParamsEntry err := handle.View(func(txn *badger.Txn) error { var innerErr error - ret, innerErr = DBGetSnapshotGlobalParamsEntryWithTxn(txn, snap, epochNumber) + ret, innerErr = DBGetSnapshotGlobalParamsEntryWithTxn(txn, snap, snapshotAtEpochNumber) return innerErr }) return ret, err } -func DBGetSnapshotGlobalParamsEntryWithTxn(txn *badger.Txn, snap *Snapshot, epochNumber uint64) (*GlobalParamsEntry, error) { +func DBGetSnapshotGlobalParamsEntryWithTxn(txn *badger.Txn, snap *Snapshot, snapshotAtEpochNumber uint64) (*GlobalParamsEntry, error) { // Retrieve from db. - key := DBKeyForSnapshotGlobalParamsEntry(epochNumber) + key := DBKeyForSnapshotGlobalParamsEntry(snapshotAtEpochNumber) globalParamsEntryBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return nil. @@ -92,7 +102,7 @@ func DBPutSnapshotGlobalParamsEntryWithTxn( txn *badger.Txn, snap *Snapshot, globalParamsEntry *GlobalParamsEntry, - epochNumber uint64, + snapshotAtEpochNumber uint64, blockHeight uint64, ) error { if globalParamsEntry == nil { @@ -100,7 +110,7 @@ func DBPutSnapshotGlobalParamsEntryWithTxn( glog.Errorf("DBPutSnapshotGlobalParamsEntryWithTxn: called with nil GlobalParamsEntry, this should never happen") return nil } - key := DBKeyForSnapshotGlobalParamsEntry(epochNumber) + key := DBKeyForSnapshotGlobalParamsEntry(snapshotAtEpochNumber) return DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, globalParamsEntry)) } @@ -113,13 +123,13 @@ type SnapshotValidatorMapKey struct { ValidatorPKID PKID } -func (bav *UtxoView) SnapshotCurrentValidators(epochNumber uint64) error { +func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) error { // First, snapshot any !isDeleted ValidatorEntries in the UtxoView. var utxoViewValidatorPKIDs []*PKID for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { if !validatorEntry.isDeleted { // We only want to snapshot !isDeleted ValidatorEntries. - bav._setSnapshotValidatorEntry(validatorEntry, epochNumber) + bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) } // We don't want to retrieve any ValidatorEntries from the db that are present in the UtxoView. utxoViewValidatorPKIDs = append(utxoViewValidatorPKIDs, validatorEntry.ValidatorPKID) @@ -130,43 +140,45 @@ func (bav *UtxoView) SnapshotCurrentValidators(epochNumber uint64) error { return errors.Wrapf(err, "UtxoView.SnapshotValidators: problem retrieving ValidatorEntries: ") } for _, validatorEntry := range dbValidatorEntries { - bav._setSnapshotValidatorEntry(validatorEntry, epochNumber) + bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) } return nil } -func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, epochNumber uint64) (*ValidatorEntry, error) { +func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { // Check the UtxoView first. - mapKey := SnapshotValidatorMapKey{EpochNumber: epochNumber, ValidatorPKID: *pkid} + mapKey := SnapshotValidatorMapKey{EpochNumber: snapshotAtEpochNumber, ValidatorPKID: *pkid} if validatorEntry, exists := bav.SnapshotValidatorEntries[mapKey]; exists { return validatorEntry, nil } // If we don't have it in the UtxoView, check the db. - validatorEntry, err := DBGetSnapshotValidatorByPKID(bav.Handle, bav.Snapshot, pkid, epochNumber) + validatorEntry, err := DBGetSnapshotValidatorByPKID(bav.Handle, bav.Snapshot, pkid, snapshotAtEpochNumber) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetSnapshotValidatorByPKID: problem retrieving ValidatorEntry from db: ") + return nil, errors.Wrapf( + err, + "UtxoView.GetSnapshotValidatorByPKID: problem retrieving ValidatorEntry from db: ", + ) } if validatorEntry != nil { // Cache the result in the UtxoView. - bav._setSnapshotValidatorEntry(validatorEntry, epochNumber) + bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) } return validatorEntry, nil } -func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit int, epochNumber uint64) ([]*ValidatorEntry, error) { - if limit <= 0 { - return []*ValidatorEntry{}, nil - } +func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64, snapshotAtEpochNumber uint64) ([]*ValidatorEntry, error) { var utxoViewValidatorEntries []*ValidatorEntry for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { - if mapKey.EpochNumber == epochNumber { + if mapKey.EpochNumber == snapshotAtEpochNumber { utxoViewValidatorEntries = append(utxoViewValidatorEntries, validatorEntry) } } // Pull top N active ValidatorEntries from the database (not present in the UtxoView). // Note that we will skip validators that are present in the view because we pass // utxoViewValidatorEntries to the function. - dbValidatorEntries, err := DBGetSnapshotTopActiveValidatorsByStake(bav.Handle, bav.Snapshot, limit, epochNumber, utxoViewValidatorEntries) + dbValidatorEntries, err := DBGetSnapshotTopActiveValidatorsByStake( + bav.Handle, bav.Snapshot, limit, snapshotAtEpochNumber, utxoViewValidatorEntries, + ) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GetSnapshotTopActiveValidatorsByStake: error retrieving entries from db: ") } @@ -175,7 +187,7 @@ func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit int, epochNumbe // We only pull ValidatorEntries from the db that are not present in the // UtxoView. As a sanity check, we double-check that the ValidatorEntry // is not already in the UtxoView here. - mapKey := SnapshotValidatorMapKey{EpochNumber: epochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID} + mapKey := SnapshotValidatorMapKey{EpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID} if _, exists := bav.SnapshotValidatorEntries[mapKey]; !exists { bav._setValidatorEntryMappings(validatorEntry) } @@ -183,7 +195,7 @@ func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit int, epochNumbe // Pull !isDeleted, active ValidatorEntries from the UtxoView with stake > 0. var validatorEntries []*ValidatorEntry for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { - if mapKey.EpochNumber == epochNumber && + if mapKey.EpochNumber == snapshotAtEpochNumber && !validatorEntry.isDeleted && validatorEntry.Status() == ValidatorStatusActive && !validatorEntry.TotalStakeAmountNanos.IsZero() { @@ -199,12 +211,12 @@ func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit int, epochNumbe return validatorEntries[0:upperBound], nil } -func (bav *UtxoView) _setSnapshotValidatorEntry(validatorEntry *ValidatorEntry, epochNumber uint64) { +func (bav *UtxoView) _setSnapshotValidatorEntry(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) { if validatorEntry == nil { glog.Errorf("UtxoView._setSnapshotValidatorEntry: called with nil entry, this should never happen") return } - mapKey := SnapshotValidatorMapKey{EpochNumber: epochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID} + mapKey := SnapshotValidatorMapKey{EpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID} bav.SnapshotValidatorEntries[mapKey] = validatorEntry.Copy() } @@ -212,14 +224,14 @@ func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { if validatorEntry == nil { return fmt.Errorf( - "UtxoView._flushSnapshotValidatorEntriesToDbWithTxn: found nil entry for epochNumber %d, this should never happen", + "UtxoView._flushSnapshotValidatorEntriesToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", mapKey.EpochNumber, ) } if err := DBPutSnapshotValidatorEntryWithTxn(txn, bav.Snapshot, validatorEntry, mapKey.EpochNumber, blockHeight); err != nil { return errors.Wrapf( err, - "UtxoView._flushSnapshotValidatorEntryToDbWithTxn: problem setting ValidatorEntry for epochNumber %d: ", + "UtxoView._flushSnapshotValidatorEntryToDbWithTxn: problem setting ValidatorEntry for SnapshotAtEpochNumber %d: ", mapKey.EpochNumber, ) } @@ -227,35 +239,35 @@ func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, return nil } -func DBKeyForSnapshotValidatorByPKID(validatorEntry *ValidatorEntry, epochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByEpochNumberAndPKID...) - key = append(key, UintToBuf(epochNumber)...) +func DBKeyForSnapshotValidatorByPKID(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByPKID...) + key = append(key, UintToBuf(snapshotAtEpochNumber)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) return key } -func DBKeyForSnapshotValidatorByStake(validatorEntry *ValidatorEntry, epochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByEpochNumberAndStake...) - key = append(key, UintToBuf(epochNumber)...) +func DBKeyForSnapshotValidatorByStake(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStake...) + key = append(key, UintToBuf(snapshotAtEpochNumber)...) key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) return key } -func DBGetSnapshotValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID, epochNumber uint64) (*ValidatorEntry, error) { +func DBGetSnapshotValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { var ret *ValidatorEntry err := handle.View(func(txn *badger.Txn) error { var innerErr error - ret, innerErr = DBGetSnapshotValidatorByPKIDWithTxn(txn, snap, pkid, epochNumber) + ret, innerErr = DBGetSnapshotValidatorByPKIDWithTxn(txn, snap, pkid, snapshotAtEpochNumber) return innerErr }) return ret, err } -func DBGetSnapshotValidatorByPKIDWithTxn(txn *badger.Txn, snap *Snapshot, pkid *PKID, epochNumber uint64) (*ValidatorEntry, error) { +func DBGetSnapshotValidatorByPKIDWithTxn(txn *badger.Txn, snap *Snapshot, pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { // Retrieve ValidatorEntry from db. - key := DBKeyForSnapshotValidatorByPKID(&ValidatorEntry{ValidatorPKID: pkid}, epochNumber) + key := DBKeyForSnapshotValidatorByPKID(&ValidatorEntry{ValidatorPKID: pkid}, snapshotAtEpochNumber) validatorBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return nil. @@ -277,8 +289,8 @@ func DBGetSnapshotValidatorByPKIDWithTxn(txn *badger.Txn, snap *Snapshot, pkid * func DBGetSnapshotTopActiveValidatorsByStake( handle *badger.DB, snap *Snapshot, - limit int, - epochNumber uint64, + limit uint64, + snapshotAtEpochNumber uint64, validatorEntriesToSkip []*ValidatorEntry, ) ([]*ValidatorEntry, error) { var validatorEntries []*ValidatorEntry @@ -286,15 +298,15 @@ func DBGetSnapshotTopActiveValidatorsByStake( // Convert ValidatorEntriesToSkip to ValidatorEntryKeysToSkip. validatorKeysToSkip := NewSet([]string{}) for _, validatorEntryToSkip := range validatorEntriesToSkip { - validatorKeysToSkip.Add(string(DBKeyForSnapshotValidatorByStake(validatorEntryToSkip, epochNumber))) + validatorKeysToSkip.Add(string(DBKeyForSnapshotValidatorByStake(validatorEntryToSkip, snapshotAtEpochNumber))) } // Retrieve top N active ValidatorEntry keys by stake. - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByEpochNumberAndStake...) - key = append(key, UintToBuf(epochNumber)...) + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStake...) + key = append(key, UintToBuf(snapshotAtEpochNumber)...) key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( - handle, key, limit, nil, true, validatorKeysToSkip, + handle, key, int(limit), nil, true, validatorKeysToSkip, ) if err != nil { return nil, errors.Wrapf(err, "DBGetSnapshotTopActiveValidatorsByStake: problem retrieving top validators: ") @@ -311,7 +323,7 @@ func DBGetSnapshotTopActiveValidatorsByStake( return nil, errors.Wrapf(err, "DBGetSnapshotTopActiveValidatorsByStake: problem reading ValidatorPKID: ") } // Retrieve ValidatorEntry by PKID. - validatorEntry, err := DBGetSnapshotValidatorByPKID(handle, snap, validatorPKID, epochNumber) + validatorEntry, err := DBGetSnapshotValidatorByPKID(handle, snap, validatorPKID, snapshotAtEpochNumber) if err != nil { return nil, errors.Wrapf(err, "DBGetSnapshotTopActiveValidatorsByStake: problem retrieving validator by PKID: ") } @@ -325,7 +337,7 @@ func DBPutSnapshotValidatorEntryWithTxn( txn *badger.Txn, snap *Snapshot, validatorEntry *ValidatorEntry, - epochNumber uint64, + snapshotAtEpochNumber uint64, blockHeight uint64, ) error { if validatorEntry == nil { @@ -335,15 +347,21 @@ func DBPutSnapshotValidatorEntryWithTxn( } // Put the ValidatorEntry in the SnapshotValidatorByPKID index. - key := DBKeyForSnapshotValidatorByPKID(validatorEntry, epochNumber) + key := DBKeyForSnapshotValidatorByPKID(validatorEntry, snapshotAtEpochNumber) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry)); err != nil { - return errors.Wrapf(err, "DBPutSnapshotValidatorEntryWithTxn: problem putting ValidatorEntry in the SnapshotValidatorByPKID index: ") + return errors.Wrapf( + err, + "DBPutSnapshotValidatorEntryWithTxn: problem putting ValidatorEntry in the SnapshotValidatorByPKID index: ", + ) } // Put the ValidatorPKID in the SnapshotValidatorByStake index. - key = DBKeyForSnapshotValidatorByStake(validatorEntry, epochNumber) + key = DBKeyForSnapshotValidatorByStake(validatorEntry, snapshotAtEpochNumber) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID)); err != nil { - return errors.Wrapf(err, "DBPutSnapshotValidatorEntryWithTxn: problem putting ValidatorPKID in the SnapshotValidatorByStake index: ") + return errors.Wrapf( + err, + "DBPutSnapshotValidatorEntryWithTxn: problem putting ValidatorPKID in the SnapshotValidatorByStake index: ", + ) } return nil @@ -379,61 +397,71 @@ func DBEnumerateAllCurrentValidators(handle *badger.DB, pkidsToSkip []*PKID) ([] // SnapshotGlobalActiveStakeAmountNanos // -func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos(epochNumber uint64) (*uint256.Int, error) { +func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber uint64) (*uint256.Int, error) { // Check the UtxoView first. - if globalActiveStakeAmountNanos, exists := bav.SnapshotGlobalActiveStakeAmountNanos[epochNumber]; exists { + if globalActiveStakeAmountNanos, exists := bav.SnapshotGlobalActiveStakeAmountNanos[snapshotAtEpochNumber]; exists { return globalActiveStakeAmountNanos, nil } // If we don't have it in the UtxoView, check the db. - globalActiveStakeAmountNanos, err := DBGetSnapshotGlobalActiveStakeAmountNanos(bav.Handle, bav.Snapshot, epochNumber) + globalActiveStakeAmountNanos, err := DBGetSnapshotGlobalActiveStakeAmountNanos(bav.Handle, bav.Snapshot, snapshotAtEpochNumber) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetSnapshotGlobalActiveStakeAmountNanos: problem retrieving SnapshotGlobalActiveStakeAmountNanos from db: ") + return nil, errors.Wrapf( + err, + "UtxoView.GetSnapshotGlobalActiveStakeAmountNanos: problem retrieving SnapshotGlobalActiveStakeAmountNanos from db: ", + ) } if globalActiveStakeAmountNanos != nil { // Cache the result in the UtxoView. - bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, epochNumber) + bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, snapshotAtEpochNumber) } return globalActiveStakeAmountNanos, nil } -func (bav *UtxoView) _setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int, epochNumber uint64) { +func (bav *UtxoView) _setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int, snapshotAtEpochNumber uint64) { if globalActiveStakeAmountNanos == nil { glog.Errorf("UtxoView._setSnapshotGlobalActiveStakeAmountNanos: called with nil entry, this should never happen") } - bav.SnapshotGlobalActiveStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos.Clone() + bav.SnapshotGlobalActiveStakeAmountNanos[snapshotAtEpochNumber] = globalActiveStakeAmountNanos.Clone() } func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - for epochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { + for snapshotAtEpochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { if globalActiveStakeAmountNanos == nil { - return fmt.Errorf("UtxoView._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: found nil entry for epochNumber %d, this should never happen", epochNumber) + return fmt.Errorf( + "UtxoView._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", + snapshotAtEpochNumber, + ) } - if err := DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn(txn, bav.Snapshot, globalActiveStakeAmountNanos, epochNumber, blockHeight); err != nil { - return errors.Wrapf(err, "UtxoView._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: problem setting SnapshotGlobalActiveStakeAmountNanos for epochNumber %d: ", epochNumber) + if err := DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn(txn, bav.Snapshot, globalActiveStakeAmountNanos, snapshotAtEpochNumber, blockHeight); err != nil { + return errors.Wrapf( + err, + "UtxoView._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: problem setting SnapshotGlobalActiveStakeAmountNanos for SnapshotAtEpochNumber %d: ", + snapshotAtEpochNumber, + ) } } return nil } -func DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanosByEpochNumber...) - key = append(key, UintToBuf(epochNumber)...) +func DBKeyForSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos...) + key = append(key, UintToBuf(snapshotAtEpochNumber)...) return key } -func DBGetSnapshotGlobalActiveStakeAmountNanos(handle *badger.DB, snap *Snapshot, epochNumber uint64) (*uint256.Int, error) { +func DBGetSnapshotGlobalActiveStakeAmountNanos(handle *badger.DB, snap *Snapshot, snapshotAtEpochNumber uint64) (*uint256.Int, error) { var ret *uint256.Int err := handle.View(func(txn *badger.Txn) error { var innerErr error - ret, innerErr = DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn(txn, snap, epochNumber) + ret, innerErr = DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn(txn, snap, snapshotAtEpochNumber) return innerErr }) return ret, err } -func DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot, epochNumber uint64) (*uint256.Int, error) { +func DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot, snapshotAtEpochNumber uint64) (*uint256.Int, error) { // Retrieve from db. - key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber) + key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber) globalActiveStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return nil. @@ -457,7 +485,7 @@ func DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( txn *badger.Txn, snap *Snapshot, globalActiveStakeAmountNanos *uint256.Int, - epochNumber uint64, + snapshotAtEpochNumber uint64, blockHeight uint64, ) error { if globalActiveStakeAmountNanos == nil { @@ -465,7 +493,7 @@ func DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( glog.Errorf("DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn: called with nil GlobalActiveStakeAmountNanos, this should never happen") return nil } - key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(epochNumber) + key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber) return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos)) } @@ -474,69 +502,88 @@ func DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( // type SnapshotLeaderScheduleMapKey struct { - EpochNumber uint64 - LeaderIndex uint8 + SnapshotAtEpochNumber uint64 + LeaderIndex uint8 } -func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint8, epochNumber uint64) (*ValidatorEntry, error) { +func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint8, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { // First, check the UtxoView. - mapKey := SnapshotLeaderScheduleMapKey{EpochNumber: epochNumber, LeaderIndex: leaderIndex} + mapKey := SnapshotLeaderScheduleMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, LeaderIndex: leaderIndex} if validatorPKID, exists := bav.SnapshotLeaderSchedule[mapKey]; exists { - return bav.GetSnapshotValidatorByPKID(validatorPKID, epochNumber) + return bav.GetSnapshotValidatorByPKID(validatorPKID, snapshotAtEpochNumber) } // Next, check the db. - validatorEntry, err := DBGetSnapshotLeaderScheduleValidator(bav.Handle, bav.Snapshot, leaderIndex, epochNumber) + validatorEntry, err := DBGetSnapshotLeaderScheduleValidator(bav.Handle, bav.Snapshot, leaderIndex, snapshotAtEpochNumber) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GetSnapshotLeaderScheduleValidator: error retrieving ValidatorPKID: ") } if validatorEntry != nil { // Cache the ValidatorPKID in the UtxoView. - bav._setSnapshotLeaderScheduleValidator(validatorEntry.ValidatorPKID, leaderIndex, epochNumber) + bav._setSnapshotLeaderScheduleValidator(validatorEntry.ValidatorPKID, leaderIndex, snapshotAtEpochNumber) } return validatorEntry, nil } -func (bav *UtxoView) _setSnapshotLeaderScheduleValidator(validatorPKID *PKID, index uint8, epochNumber uint64) { +func (bav *UtxoView) _setSnapshotLeaderScheduleValidator(validatorPKID *PKID, index uint8, snapshotAtEpochNumber uint64) { if validatorPKID == nil { glog.Errorf("UtxoView._setSnapshotLeaderScheduleValidator: called with nil ValidatorPKID, this should never happen") return } - mapKey := SnapshotLeaderScheduleMapKey{EpochNumber: epochNumber, LeaderIndex: index} + mapKey := SnapshotLeaderScheduleMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, LeaderIndex: index} bav.SnapshotLeaderSchedule[mapKey] = validatorPKID.NewPKID() } func (bav *UtxoView) _flushSnapshotLeaderScheduleToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { for mapKey, validatorPKID := range bav.SnapshotLeaderSchedule { if validatorPKID == nil { - return fmt.Errorf("UtxoView._flushSnapshotLeaderScheduleToDbWithTxn: found nil PKID for epochNumber %d, this should never happen", mapKey.EpochNumber) + return fmt.Errorf( + "UtxoView._flushSnapshotLeaderScheduleToDbWithTxn: found nil PKID for SnapshotAtEpochNumber %d, this should never happen", + mapKey.SnapshotAtEpochNumber, + ) } - if err := DBPutSnapshotLeaderScheduleValidatorWithTxn(txn, bav.Snapshot, validatorPKID, mapKey.LeaderIndex, mapKey.EpochNumber, blockHeight); err != nil { - return errors.Wrapf(err, "UtxoView._flushSnapshotLeaderScheduleToDbWithTxn: problem setting ValidatorPKID for epochNumber %d: ", mapKey.EpochNumber) + if err := DBPutSnapshotLeaderScheduleValidatorWithTxn( + txn, bav.Snapshot, validatorPKID, mapKey.LeaderIndex, mapKey.SnapshotAtEpochNumber, blockHeight, + ); err != nil { + return errors.Wrapf( + err, + "UtxoView._flushSnapshotLeaderScheduleToDbWithTxn: problem setting ValidatorPKID for SnapshotAtEpochNumber %d: ", + mapKey.SnapshotAtEpochNumber, + ) } } return nil } -func DBKeyForSnapshotLeaderScheduleValidator(leaderIndex uint8, epochNumber uint64) []byte { +func DBKeyForSnapshotLeaderScheduleValidator(leaderIndex uint8, snapshotAtEpochNumber uint64) []byte { data := append([]byte{}, Prefixes.PrefixSnapshotLeaderSchedule...) - data = append(data, UintToBuf(epochNumber)...) + data = append(data, UintToBuf(snapshotAtEpochNumber)...) data = append(data, EncodeUint8(leaderIndex)...) return data } -func DBGetSnapshotLeaderScheduleValidator(handle *badger.DB, snap *Snapshot, leaderIndex uint8, epochNumber uint64) (*ValidatorEntry, error) { +func DBGetSnapshotLeaderScheduleValidator( + handle *badger.DB, + snap *Snapshot, + leaderIndex uint8, + snapshotAtEpochNumber uint64, +) (*ValidatorEntry, error) { var ret *ValidatorEntry err := handle.View(func(txn *badger.Txn) error { var innerErr error - ret, innerErr = DBGetSnapshotLeaderScheduleValidatorWithTxn(txn, snap, leaderIndex, epochNumber) + ret, innerErr = DBGetSnapshotLeaderScheduleValidatorWithTxn(txn, snap, leaderIndex, snapshotAtEpochNumber) return innerErr }) return ret, err } -func DBGetSnapshotLeaderScheduleValidatorWithTxn(txn *badger.Txn, snap *Snapshot, leaderIndex uint8, epochNumber uint64) (*ValidatorEntry, error) { +func DBGetSnapshotLeaderScheduleValidatorWithTxn( + txn *badger.Txn, + snap *Snapshot, + leaderIndex uint8, + snapshotAtEpochNumber uint64, +) (*ValidatorEntry, error) { // Retrieve ValidatorPKID from db. - key := DBKeyForSnapshotLeaderScheduleValidator(leaderIndex, epochNumber) + key := DBKeyForSnapshotLeaderScheduleValidator(leaderIndex, snapshotAtEpochNumber) validatorPKIDBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return nil. @@ -554,7 +601,7 @@ func DBGetSnapshotLeaderScheduleValidatorWithTxn(txn *badger.Txn, snap *Snapshot } // Retrieve ValidatorEntry by PKID from db. - return DBGetSnapshotValidatorByPKIDWithTxn(txn, snap, validatorPKID, epochNumber) + return DBGetSnapshotValidatorByPKIDWithTxn(txn, snap, validatorPKID, snapshotAtEpochNumber) } func DBPutSnapshotLeaderScheduleValidatorWithTxn( @@ -562,7 +609,7 @@ func DBPutSnapshotLeaderScheduleValidatorWithTxn( snap *Snapshot, validatorPKID *PKID, leaderIndex uint8, - epochNumber uint64, + snapshotAtEpochNumber uint64, blockHeight uint64, ) error { if validatorPKID == nil { @@ -570,9 +617,12 @@ func DBPutSnapshotLeaderScheduleValidatorWithTxn( glog.Errorf("DBPutSnapshotLeaderScheduleValidatorWithTxn: called with nil ValidatorPKID, this should never happen") return nil } - key := DBKeyForSnapshotLeaderScheduleValidator(leaderIndex, epochNumber) + key := DBKeyForSnapshotLeaderScheduleValidator(leaderIndex, snapshotAtEpochNumber) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorPKID)); err != nil { - return errors.Wrapf(err, "DBPutSnapshotLeaderScheduleValidatorWithTxn: problem putting ValidatorPKID in the SnapshotLeaderSchedule index: ") + return errors.Wrapf( + err, + "DBPutSnapshotLeaderScheduleValidatorWithTxn: problem putting ValidatorPKID in the SnapshotLeaderSchedule index: ", + ) } return nil } From 30e853da3993a4da9eb470f2ecf159d0a4c32f54 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 14:45:34 -0400 Subject: [PATCH 071/762] Fix line break overflow. --- lib/constants.go | 23 +++++-------- lib/pos_epoch_complete_hook.go | 24 +++++++------- lib/pos_epoch_complete_hook_test.go | 4 +-- lib/pos_snapshot_entries.go | 51 +++++++++++++++++------------ 4 files changed, 52 insertions(+), 50 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index fcc940214..5db509ebf 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -275,8 +275,9 @@ type ForkHeights struct { // UnregisterAsValidator, Stake, Unstake, and UnlockStake. ProofOfStakeNewTxnTypesBlockHeight uint32 - // ProofOfStakeBlockHeight defines the height at which we cut over from PoW to PoS. - ProofOfStakeBlockHeight uint32 + // ProofOfStakeSnapshottingBlockHeight defines the height at which we start + // snapshotting the relevant Proof of Stake entries. + ProofOfStakeSnapshottingBlockHeight uint32 // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. @@ -360,9 +361,6 @@ type EncoderMigrationHeights struct { // This coincides with the ProofOfStakeNewTxnTypesBlockHeight ProofOfStakeNewTxnTypesMigration MigrationHeight - - // This coincides with the ProofOfStakeBlockHeight - ProofOfStakeMigration MigrationHeight } func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeights { @@ -392,11 +390,6 @@ func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeigh Height: uint64(forkHeights.ProofOfStakeNewTxnTypesBlockHeight), Name: ProofOfStakeNewTxnTypesMigration, }, - ProofOfStakeMigration: MigrationHeight{ - Version: 5, - Height: uint64(forkHeights.ProofOfStakeBlockHeight), - Name: ProofOfStakeMigration, - }, } } @@ -657,9 +650,9 @@ var RegtestForkHeights = ForkHeights{ AssociationsDerivedKeySpendingLimitBlockHeight: uint32(0), // For convenience, we set the block height to 1 since the // genesis block was created using the utxo model. - BalanceModelBlockHeight: uint32(1), - ProofOfStakeNewTxnTypesBlockHeight: uint32(1), - ProofOfStakeBlockHeight: uint32(1), + BalanceModelBlockHeight: uint32(1), + ProofOfStakeNewTxnTypesBlockHeight: uint32(1), + ProofOfStakeSnapshottingBlockHeight: uint32(1), // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. @@ -816,7 +809,7 @@ var MainnetForkHeights = ForkHeights{ ProofOfStakeNewTxnTypesBlockHeight: uint32(math.MaxUint32), // FIXME: set to real block height when ready - ProofOfStakeBlockHeight: uint32(math.MaxUint32), + ProofOfStakeSnapshottingBlockHeight: uint32(math.MaxUint32), // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. @@ -1094,7 +1087,7 @@ var TestnetForkHeights = ForkHeights{ ProofOfStakeNewTxnTypesBlockHeight: uint32(math.MaxUint32), // FIXME: set to real block height when ready - ProofOfStakeBlockHeight: uint32(math.MaxUint32), + ProofOfStakeSnapshottingBlockHeight: uint32(math.MaxUint32), // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index ca5a43152..a8ad89772 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -7,16 +7,16 @@ import ( func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) { // Returns true if this is the last block in the current epoch. - if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStakeBlockHeight) { - // Return false if we are still using PoW and haven't cut over to PoS yet. + if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStakeSnapshottingBlockHeight) { + // Return false if we have not started snapshotting the relevant PoS entries yet. return false, nil } currentEpochEntry, err := bav.GetCurrentEpochEntry() if err != nil { - return false, errors.Wrapf(err, "UtxoView.IsEpochComplete: problem retrieving CurrentEpochEntry: ") + return false, errors.Wrapf(err, "IsEpochComplete: problem retrieving CurrentEpochEntry: ") } if currentEpochEntry == nil { - return false, errors.New("UtxoView.IsEpochComplete: CurrentEpochEntry is nil, this should never happen") + return false, errors.New("IsEpochComplete: CurrentEpochEntry is nil, this should never happen") } return currentEpochEntry.FinalBlockHeight == blockHeight, nil } @@ -27,19 +27,19 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Sanity-check that the current block is the last block in the current epoch. isLastBlockInCurrentEpoch, err := bav.IsLastBlockInCurrentEpoch(blockHeight) if err != nil { - return errors.Wrapf(err, "UtxoView.RunEpochCompleteHook: ") + return errors.Wrapf(err, "RunEpochCompleteHook: ") } if !isLastBlockInCurrentEpoch { - return errors.New("UtxoView.RunEpochCompleteHook: called before current epoch is complete, this should never happen") + return errors.New("RunEpochCompleteHook: called before current epoch is complete, this should never happen") } // Retrieve the CurrentEpochEntry. currentEpochEntry, err := bav.GetCurrentEpochEntry() if err != nil { - return errors.Wrapf(err, "UtxoView.RunEpochCompleteHook: problem retrieving CurrentEpochEntry: ") + return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving CurrentEpochEntry: ") } if currentEpochEntry == nil { - return errors.New("UtxoView.RunEpochCompleteHook: CurrentEpochEntry is nil, this should never happen") + return errors.New("RunEpochCompleteHook: CurrentEpochEntry is nil, this should never happen") } // Snapshot the current GlobalParamsEntry. @@ -47,24 +47,24 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Snapshot the current ValidatorEntries. if err = bav.SnapshotCurrentValidators(currentEpochEntry.EpochNumber); err != nil { - return errors.Wrapf(err, "UtxoView.RunEpochCompleteHook: problem snapshotting validators: ") + return errors.Wrapf(err, "RunEpochCompleteHook: problem snapshotting validators: ") } // Snapshot the current GlobalActiveStakeAmountNanos. globalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() if err != nil { - return errors.Wrapf(err, "UtxoView.RunEpochCompleteHook: problem retrieving GlobalActiveStakeAmountNanos: ") + return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving GlobalActiveStakeAmountNanos: ") } bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, currentEpochEntry.EpochNumber) // Generate + snapshot a leader schedule. leaderSchedule, err := bav.GenerateLeaderSchedule() if err != nil { - return errors.Wrapf(err, "UtxoView.RunEpochCompleteHook: problem generating leader schedule: ") + return errors.Wrapf(err, "RunEpochCompleteHook: problem generating leader schedule: ") } for index, validatorPKID := range leaderSchedule { if index > math.MaxUint8 { - return errors.Errorf("UtxoView.RunEpochCompleteHook: LeaderIndex %d overflows uint8", index) + return errors.Errorf("RunEpochCompleteHook: LeaderIndex %d overflows uint8", index) } bav._setSnapshotLeaderScheduleValidator(validatorPKID, uint8(index), currentEpochEntry.EpochNumber) } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index a48e2ed18..15c007a22 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -21,7 +21,7 @@ func TestIsLastBlockInCurrentEpoch(t *testing.T) { // Initialize fork heights. params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) - params.ForkHeights.ProofOfStakeBlockHeight = uint32(1) + params.ForkHeights.ProofOfStakeSnapshottingBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -65,7 +65,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Initialize fork heights. params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) - params.ForkHeights.ProofOfStakeBlockHeight = uint32(1) + params.ForkHeights.ProofOfStakeSnapshottingBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 17d1a946b..f2913350c 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -25,7 +25,7 @@ func (bav *UtxoView) GetSnapshotGlobalParamsEntry(snapshotAtEpochNumber uint64) if err != nil { return nil, errors.Wrapf( err, - "UtxoView.GetSnapshotGlobalParamsEntry: problem retrieving SnapshotGlobalParamsEntry from db: ", + "GetSnapshotGlobalParamsEntry: problem retrieving SnapshotGlobalParamsEntry from db: ", ) } if globalParamsEntry != nil { @@ -37,7 +37,7 @@ func (bav *UtxoView) GetSnapshotGlobalParamsEntry(snapshotAtEpochNumber uint64) func (bav *UtxoView) _setSnapshotGlobalParamsEntry(globalParamsEntry *GlobalParamsEntry, snapshotAtEpochNumber uint64) { if globalParamsEntry == nil { - glog.Errorf("UtxoView._setSnapshotGlobalParamsEntry: called with nil entry, this should never happen") + glog.Errorf("_setSnapshotGlobalParamsEntry: called with nil entry, this should never happen") } bav.SnapshotGlobalParamsEntries[snapshotAtEpochNumber] = globalParamsEntry.Copy() } @@ -46,14 +46,16 @@ func (bav *UtxoView) _flushSnapshotGlobalParamsEntryToDbWithTxn(txn *badger.Txn, for snapshotAtEpochNumber, globalParamsEntry := range bav.SnapshotGlobalParamsEntries { if globalParamsEntry == nil { return fmt.Errorf( - "UtxoView._flushSnapshotGlobalParamsEntryToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", + "_flushSnapshotGlobalParamsEntryToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", snapshotAtEpochNumber, ) } - if err := DBPutSnapshotGlobalParamsEntryWithTxn(txn, bav.Snapshot, globalParamsEntry, snapshotAtEpochNumber, blockHeight); err != nil { + if err := DBPutSnapshotGlobalParamsEntryWithTxn( + txn, bav.Snapshot, globalParamsEntry, snapshotAtEpochNumber, blockHeight, + ); err != nil { return errors.Wrapf( err, - "UtxoView._flushSnapshotGlobalParamsEntryToDbWithTxn: problem setting SnapshotGlobalParamsEntry for SnapshotAtEpochNumber %d: ", + "_flushSnapshotGlobalParamsEntryToDbWithTxn: problem setting SnapshotGlobalParamsEntry for SnapshotAtEpochNumber %d: ", snapshotAtEpochNumber, ) } @@ -137,7 +139,7 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) err // Second, snapshot the ValidatorEntries in the db (skipping any in the UtxoView). dbValidatorEntries, err := DBEnumerateAllCurrentValidators(bav.Handle, utxoViewValidatorPKIDs) if err != nil { - return errors.Wrapf(err, "UtxoView.SnapshotValidators: problem retrieving ValidatorEntries: ") + return errors.Wrapf(err, "SnapshotValidators: problem retrieving ValidatorEntries: ") } for _, validatorEntry := range dbValidatorEntries { bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) @@ -156,7 +158,7 @@ func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, snapshotAtEpochNumbe if err != nil { return nil, errors.Wrapf( err, - "UtxoView.GetSnapshotValidatorByPKID: problem retrieving ValidatorEntry from db: ", + "GetSnapshotValidatorByPKID: problem retrieving ValidatorEntry from db: ", ) } if validatorEntry != nil { @@ -180,7 +182,7 @@ func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64, snapsho bav.Handle, bav.Snapshot, limit, snapshotAtEpochNumber, utxoViewValidatorEntries, ) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetSnapshotTopActiveValidatorsByStake: error retrieving entries from db: ") + return nil, errors.Wrapf(err, "GetSnapshotTopActiveValidatorsByStake: error retrieving entries from db: ") } // Cache top N active ValidatorEntries from the db in the UtxoView. for _, validatorEntry := range dbValidatorEntries { @@ -213,7 +215,7 @@ func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64, snapsho func (bav *UtxoView) _setSnapshotValidatorEntry(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) { if validatorEntry == nil { - glog.Errorf("UtxoView._setSnapshotValidatorEntry: called with nil entry, this should never happen") + glog.Errorf("_setSnapshotValidatorEntry: called with nil entry, this should never happen") return } mapKey := SnapshotValidatorMapKey{EpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID} @@ -224,14 +226,14 @@ func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { if validatorEntry == nil { return fmt.Errorf( - "UtxoView._flushSnapshotValidatorEntriesToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", + "_flushSnapshotValidatorEntriesToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", mapKey.EpochNumber, ) } if err := DBPutSnapshotValidatorEntryWithTxn(txn, bav.Snapshot, validatorEntry, mapKey.EpochNumber, blockHeight); err != nil { return errors.Wrapf( err, - "UtxoView._flushSnapshotValidatorEntryToDbWithTxn: problem setting ValidatorEntry for SnapshotAtEpochNumber %d: ", + "_flushSnapshotValidatorEntryToDbWithTxn: problem setting ValidatorEntry for SnapshotAtEpochNumber %d: ", mapKey.EpochNumber, ) } @@ -265,7 +267,12 @@ func DBGetSnapshotValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID, return ret, err } -func DBGetSnapshotValidatorByPKIDWithTxn(txn *badger.Txn, snap *Snapshot, pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { +func DBGetSnapshotValidatorByPKIDWithTxn( + txn *badger.Txn, + snap *Snapshot, + pkid *PKID, + snapshotAtEpochNumber uint64, +) (*ValidatorEntry, error) { // Retrieve ValidatorEntry from db. key := DBKeyForSnapshotValidatorByPKID(&ValidatorEntry{ValidatorPKID: pkid}, snapshotAtEpochNumber) validatorBytes, err := DBGetWithTxn(txn, snap, key) @@ -407,7 +414,7 @@ func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumb if err != nil { return nil, errors.Wrapf( err, - "UtxoView.GetSnapshotGlobalActiveStakeAmountNanos: problem retrieving SnapshotGlobalActiveStakeAmountNanos from db: ", + "GetSnapshotGlobalActiveStakeAmountNanos: problem retrieving SnapshotGlobalActiveStakeAmountNanos from db: ", ) } if globalActiveStakeAmountNanos != nil { @@ -419,7 +426,7 @@ func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumb func (bav *UtxoView) _setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int, snapshotAtEpochNumber uint64) { if globalActiveStakeAmountNanos == nil { - glog.Errorf("UtxoView._setSnapshotGlobalActiveStakeAmountNanos: called with nil entry, this should never happen") + glog.Errorf("_setSnapshotGlobalActiveStakeAmountNanos: called with nil entry, this should never happen") } bav.SnapshotGlobalActiveStakeAmountNanos[snapshotAtEpochNumber] = globalActiveStakeAmountNanos.Clone() } @@ -428,14 +435,16 @@ func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn * for snapshotAtEpochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { if globalActiveStakeAmountNanos == nil { return fmt.Errorf( - "UtxoView._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", + "_flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", snapshotAtEpochNumber, ) } - if err := DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn(txn, bav.Snapshot, globalActiveStakeAmountNanos, snapshotAtEpochNumber, blockHeight); err != nil { + if err := DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( + txn, bav.Snapshot, globalActiveStakeAmountNanos, snapshotAtEpochNumber, blockHeight, + ); err != nil { return errors.Wrapf( err, - "UtxoView._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: problem setting SnapshotGlobalActiveStakeAmountNanos for SnapshotAtEpochNumber %d: ", + "_flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: problem setting SnapshotGlobalActiveStakeAmountNanos for SnapshotAtEpochNumber %d: ", snapshotAtEpochNumber, ) } @@ -515,7 +524,7 @@ func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint8, snaps // Next, check the db. validatorEntry, err := DBGetSnapshotLeaderScheduleValidator(bav.Handle, bav.Snapshot, leaderIndex, snapshotAtEpochNumber) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetSnapshotLeaderScheduleValidator: error retrieving ValidatorPKID: ") + return nil, errors.Wrapf(err, "GetSnapshotLeaderScheduleValidator: error retrieving ValidatorPKID: ") } if validatorEntry != nil { // Cache the ValidatorPKID in the UtxoView. @@ -526,7 +535,7 @@ func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint8, snaps func (bav *UtxoView) _setSnapshotLeaderScheduleValidator(validatorPKID *PKID, index uint8, snapshotAtEpochNumber uint64) { if validatorPKID == nil { - glog.Errorf("UtxoView._setSnapshotLeaderScheduleValidator: called with nil ValidatorPKID, this should never happen") + glog.Errorf("_setSnapshotLeaderScheduleValidator: called with nil ValidatorPKID, this should never happen") return } mapKey := SnapshotLeaderScheduleMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, LeaderIndex: index} @@ -537,7 +546,7 @@ func (bav *UtxoView) _flushSnapshotLeaderScheduleToDbWithTxn(txn *badger.Txn, bl for mapKey, validatorPKID := range bav.SnapshotLeaderSchedule { if validatorPKID == nil { return fmt.Errorf( - "UtxoView._flushSnapshotLeaderScheduleToDbWithTxn: found nil PKID for SnapshotAtEpochNumber %d, this should never happen", + "_flushSnapshotLeaderScheduleToDbWithTxn: found nil PKID for SnapshotAtEpochNumber %d, this should never happen", mapKey.SnapshotAtEpochNumber, ) } @@ -546,7 +555,7 @@ func (bav *UtxoView) _flushSnapshotLeaderScheduleToDbWithTxn(txn *badger.Txn, bl ); err != nil { return errors.Wrapf( err, - "UtxoView._flushSnapshotLeaderScheduleToDbWithTxn: problem setting ValidatorPKID for SnapshotAtEpochNumber %d: ", + "_flushSnapshotLeaderScheduleToDbWithTxn: problem setting ValidatorPKID for SnapshotAtEpochNumber %d: ", mapKey.SnapshotAtEpochNumber, ) } From d014e528900156bc3aafd59075d034218199be36 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 14:58:24 -0400 Subject: [PATCH 072/762] Fix newline breaking pt 2. --- lib/constants.go | 1 - lib/pos_snapshot_entries.go | 22 ++++++++++++---------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index 5db509ebf..bf07ee151 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -344,7 +344,6 @@ const ( AssociationsAndAccessGroupsMigration MigrationName = "AssociationsAndAccessGroupsMigration" BalanceModelMigration MigrationName = "BalanceModelMigration" ProofOfStakeNewTxnTypesMigration MigrationName = "ProofOfStakeNewTxnTypesMigration" - ProofOfStakeMigration MigrationName = "ProofOfStakeMigration" ) type EncoderMigrationHeights struct { diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index f2913350c..155692909 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -46,7 +46,7 @@ func (bav *UtxoView) _flushSnapshotGlobalParamsEntryToDbWithTxn(txn *badger.Txn, for snapshotAtEpochNumber, globalParamsEntry := range bav.SnapshotGlobalParamsEntries { if globalParamsEntry == nil { return fmt.Errorf( - "_flushSnapshotGlobalParamsEntryToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", + "_flushSnapshotGlobalParamsEntryToDb: found nil entry for EpochNumber %d, this should never happen", snapshotAtEpochNumber, ) } @@ -55,7 +55,7 @@ func (bav *UtxoView) _flushSnapshotGlobalParamsEntryToDbWithTxn(txn *badger.Txn, ); err != nil { return errors.Wrapf( err, - "_flushSnapshotGlobalParamsEntryToDbWithTxn: problem setting SnapshotGlobalParamsEntry for SnapshotAtEpochNumber %d: ", + "_flushSnapshotGlobalParamsEntryToDb: problem setting SnapshotGlobalParamsEntry for EpochNumber %d: ", snapshotAtEpochNumber, ) } @@ -226,14 +226,16 @@ func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { if validatorEntry == nil { return fmt.Errorf( - "_flushSnapshotValidatorEntriesToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", + "_flushSnapshotValidatorEntriesToDb: found nil entry for EpochNumber %d, this should never happen", mapKey.EpochNumber, ) } - if err := DBPutSnapshotValidatorEntryWithTxn(txn, bav.Snapshot, validatorEntry, mapKey.EpochNumber, blockHeight); err != nil { + if err := DBPutSnapshotValidatorEntryWithTxn( + txn, bav.Snapshot, validatorEntry, mapKey.EpochNumber, blockHeight, + ); err != nil { return errors.Wrapf( err, - "_flushSnapshotValidatorEntryToDbWithTxn: problem setting ValidatorEntry for SnapshotAtEpochNumber %d: ", + "_flushSnapshotValidatorEntriesToDb: problem setting ValidatorEntry for EpochNumber %d: ", mapKey.EpochNumber, ) } @@ -435,7 +437,7 @@ func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn * for snapshotAtEpochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { if globalActiveStakeAmountNanos == nil { return fmt.Errorf( - "_flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: found nil entry for SnapshotAtEpochNumber %d, this should never happen", + "_flushSnapshotGlobalActiveStakeAmountNanosToDb: found nil entry for EpochNumber %d, this should never happen", snapshotAtEpochNumber, ) } @@ -444,7 +446,7 @@ func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn * ); err != nil { return errors.Wrapf( err, - "_flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn: problem setting SnapshotGlobalActiveStakeAmountNanos for SnapshotAtEpochNumber %d: ", + "_flushSnapshotGlobalActiveStakeAmountNanosToDb: problem setting SnapshotGlobalActiveStake for EpochNumber %d: ", snapshotAtEpochNumber, ) } @@ -499,7 +501,7 @@ func DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( ) error { if globalActiveStakeAmountNanos == nil { // This should never happen but is a sanity check. - glog.Errorf("DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn: called with nil GlobalActiveStakeAmountNanos, this should never happen") + glog.Errorf("DBPutSnapshotGlobalActiveStakeAmountNanos: called with nil GlobalActiveStake, this should never happen") return nil } key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber) @@ -546,7 +548,7 @@ func (bav *UtxoView) _flushSnapshotLeaderScheduleToDbWithTxn(txn *badger.Txn, bl for mapKey, validatorPKID := range bav.SnapshotLeaderSchedule { if validatorPKID == nil { return fmt.Errorf( - "_flushSnapshotLeaderScheduleToDbWithTxn: found nil PKID for SnapshotAtEpochNumber %d, this should never happen", + "_flushSnapshotLeaderScheduleToDb: found nil PKID for EpochNumber %d, this should never happen", mapKey.SnapshotAtEpochNumber, ) } @@ -555,7 +557,7 @@ func (bav *UtxoView) _flushSnapshotLeaderScheduleToDbWithTxn(txn *badger.Txn, bl ); err != nil { return errors.Wrapf( err, - "_flushSnapshotLeaderScheduleToDbWithTxn: problem setting ValidatorPKID for SnapshotAtEpochNumber %d: ", + "_flushSnapshotLeaderScheduleToDb: problem setting ValidatorPKID for SnapshotAtEpochNumber %d: ", mapKey.SnapshotAtEpochNumber, ) } From 8ab7510fdd175b7c1a542585afd9715f4f645986 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 15:58:07 -0400 Subject: [PATCH 073/762] Fix snapshot global params entry test error. --- lib/pos_epoch_complete_hook_test.go | 5 ++++- lib/pos_snapshot_entries.go | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 15c007a22..41db6fd2d 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -155,6 +155,9 @@ func TestRunEpochCompleteHook(t *testing.T) { -1, -1, ) + // Reconstruct the UtxoView to pull in the updated GlobalParamsEntry. + utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) } { // All validators register + stake to themselves. @@ -220,7 +223,7 @@ func TestRunEpochCompleteHook(t *testing.T) { snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(1) require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) - //require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) // Test SnapshotValidatorByPKID is populated. for _, pkid := range validatorPKIDs { diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 155692909..11ba6c523 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -437,7 +437,7 @@ func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn * for snapshotAtEpochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { if globalActiveStakeAmountNanos == nil { return fmt.Errorf( - "_flushSnapshotGlobalActiveStakeAmountNanosToDb: found nil entry for EpochNumber %d, this should never happen", + "_flushSnapshotGlobalActiveStakeToDb: found nil entry for EpochNumber %d, this should never happen", snapshotAtEpochNumber, ) } @@ -446,7 +446,7 @@ func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn * ); err != nil { return errors.Wrapf( err, - "_flushSnapshotGlobalActiveStakeAmountNanosToDb: problem setting SnapshotGlobalActiveStake for EpochNumber %d: ", + "_flushSnapshotGlobalActiveStakeToDb: problem setting SnapshotGlobalActiveStake for EpochNumber %d: ", snapshotAtEpochNumber, ) } From e12c0e26f98e7f21c98fc3f3b7b334f995f5e71f Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 16:19:28 -0400 Subject: [PATCH 074/762] Add block height migration. --- lib/block_view.go | 14 +++++++------- lib/constants.go | 9 +++++++++ lib/pos_epoch_complete_hook.go | 4 +++- lib/pos_snapshot_entries.go | 6 +++--- 4 files changed, 22 insertions(+), 11 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 9bbc005ed..5cd96d3d4 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -133,9 +133,9 @@ type UtxoView struct { // Current RandomSeedHash CurrentRandomSeedHash *RandomSeedHash - // SnapshotGlobalParamsEntries is a map of EpochNumber to GlobalParamsEntry. + // SnapshotGlobalParamEntries is a map of EpochNumber to GlobalParamsEntry. // It contains the snapshot value of the GlobalParamsEntry at the given EpochNumber. - SnapshotGlobalParamsEntries map[uint64]*GlobalParamsEntry + SnapshotGlobalParamEntries map[uint64]*GlobalParamsEntry // SnapshotValidatorEntries is a map of to a ValidatorEntry. // It contains the snapshot value of a ValidatorEntry at the given EpochNumber. @@ -256,8 +256,8 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // CurrentEpochEntry bav.CurrentEpochEntry = nil - // SnapshotGlobalParamsEntries - bav.SnapshotGlobalParamsEntries = make(map[uint64]*GlobalParamsEntry) + // SnapshotGlobalParamEntries + bav.SnapshotGlobalParamEntries = make(map[uint64]*GlobalParamsEntry) // SnapshotValidatorEntries bav.SnapshotValidatorEntries = make(map[SnapshotValidatorMapKey]*ValidatorEntry) @@ -554,9 +554,9 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.CurrentRandomSeedHash = bav.CurrentRandomSeedHash.Copy() } - // Copy the SnapshotGlobalParamsEntries - for epochNumber, globalParamsEntry := range bav.SnapshotGlobalParamsEntries { - newView.SnapshotGlobalParamsEntries[epochNumber] = globalParamsEntry.Copy() + // Copy the SnapshotGlobalParamEntries + for epochNumber, globalParamsEntry := range bav.SnapshotGlobalParamEntries { + newView.SnapshotGlobalParamEntries[epochNumber] = globalParamsEntry.Copy() } // Copy the SnapshotValidatorEntries diff --git a/lib/constants.go b/lib/constants.go index bf07ee151..640d8a828 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -344,6 +344,7 @@ const ( AssociationsAndAccessGroupsMigration MigrationName = "AssociationsAndAccessGroupsMigration" BalanceModelMigration MigrationName = "BalanceModelMigration" ProofOfStakeNewTxnTypesMigration MigrationName = "ProofOfStakeNewTxnTypesMigration" + ProofOfStakeSnapshottingMigration MigrationName = "ProofOfStakeSnapshottingMigration" ) type EncoderMigrationHeights struct { @@ -360,6 +361,9 @@ type EncoderMigrationHeights struct { // This coincides with the ProofOfStakeNewTxnTypesBlockHeight ProofOfStakeNewTxnTypesMigration MigrationHeight + + // This coincides with the ProofOfStakeSnapshottingBlockHeight + ProofOfStakeSnapshottingMigration MigrationHeight } func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeights { @@ -389,6 +393,11 @@ func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeigh Height: uint64(forkHeights.ProofOfStakeNewTxnTypesBlockHeight), Name: ProofOfStakeNewTxnTypesMigration, }, + ProofOfStakeSnapshottingMigration: MigrationHeight{ + Version: 5, + Height: uint64(forkHeights.ProofOfStakeSnapshottingBlockHeight), + Name: ProofOfStakeSnapshottingMigration, + }, } } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index a8ad89772..668479edb 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -7,6 +7,7 @@ import ( func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) { // Returns true if this is the last block in the current epoch. + if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStakeSnapshottingBlockHeight) { // Return false if we have not started snapshotting the relevant PoS entries yet. return false, nil @@ -69,7 +70,8 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { bav._setSnapshotLeaderScheduleValidator(validatorPKID, uint8(index), currentEpochEntry.EpochNumber) } - // TODO: Is there any clean-up we should do here deleting old snapshots that are no longer useful? + // TODO: Jail inactive validators. + // TODO: Delete old snapshots that are no longer used. // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 11ba6c523..60afb3e7a 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -17,7 +17,7 @@ import ( func (bav *UtxoView) GetSnapshotGlobalParamsEntry(snapshotAtEpochNumber uint64) (*GlobalParamsEntry, error) { // Check the UtxoView first. - if globalParamsEntry, exists := bav.SnapshotGlobalParamsEntries[snapshotAtEpochNumber]; exists { + if globalParamsEntry, exists := bav.SnapshotGlobalParamEntries[snapshotAtEpochNumber]; exists { return globalParamsEntry, nil } // If we don't have it in the UtxoView, check the db. @@ -39,11 +39,11 @@ func (bav *UtxoView) _setSnapshotGlobalParamsEntry(globalParamsEntry *GlobalPara if globalParamsEntry == nil { glog.Errorf("_setSnapshotGlobalParamsEntry: called with nil entry, this should never happen") } - bav.SnapshotGlobalParamsEntries[snapshotAtEpochNumber] = globalParamsEntry.Copy() + bav.SnapshotGlobalParamEntries[snapshotAtEpochNumber] = globalParamsEntry.Copy() } func (bav *UtxoView) _flushSnapshotGlobalParamsEntryToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - for snapshotAtEpochNumber, globalParamsEntry := range bav.SnapshotGlobalParamsEntries { + for snapshotAtEpochNumber, globalParamsEntry := range bav.SnapshotGlobalParamEntries { if globalParamsEntry == nil { return fmt.Errorf( "_flushSnapshotGlobalParamsEntryToDb: found nil entry for EpochNumber %d, this should never happen", From b537a0d53cc0966628406ad94e10edc1f840fe8a Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 30 May 2023 16:37:41 -0400 Subject: [PATCH 075/762] Move epoch duration to utxo view params. --- lib/constants.go | 10 ++++++++++ lib/pos_epoch_complete_hook.go | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/constants.go b/lib/constants.go index 640d8a828..b87fc544f 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -626,6 +626,10 @@ type DeSoParams struct { // TODO: Move this to GlobalParamsEntry. LeaderScheduleMaxNumValidators uint64 + // EpochDurationNumBlocks is the number of blocks included in one epoch. + // TODO: Move this to GlobalParamsEntry. + EpochDurationNumBlocks uint64 + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -1011,6 +1015,9 @@ var DeSoMainnetParams = DeSoParams{ // The max number of validators included in a leader schedule. LeaderScheduleMaxNumValidators: uint64(100), + // The number of blocks in one epoch + EpochDurationNumBlocks: uint64(3600), + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1251,6 +1258,9 @@ var DeSoTestnetParams = DeSoParams{ // The max number of validators included in a leader schedule. LeaderScheduleMaxNumValidators: uint64(100), + // The number of blocks in one epoch + EpochDurationNumBlocks: uint64(3600), + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 668479edb..5ec303ec5 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -76,7 +76,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ EpochNumber: currentEpochEntry.EpochNumber + 1, - FinalBlockHeight: blockHeight + 100, // TODO: read this duration from the GlobalParamsEntry. + FinalBlockHeight: blockHeight + bav.Params.EpochDurationNumBlocks, } bav._setCurrentEpochEntry(nextEpochEntry) From ed44960e8fd3f59ced09988250d2501f0467b1fa Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 31 May 2023 13:00:38 -0400 Subject: [PATCH 076/762] Rename to SnapshotAtEpochNumber more. --- lib/pos_snapshot_entries.go | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 60afb3e7a..0a1451c27 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -121,8 +121,8 @@ func DBPutSnapshotGlobalParamsEntryWithTxn( // type SnapshotValidatorMapKey struct { - EpochNumber uint64 - ValidatorPKID PKID + SnapshotAtEpochNumber uint64 + ValidatorPKID PKID } func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) error { @@ -149,7 +149,7 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) err func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { // Check the UtxoView first. - mapKey := SnapshotValidatorMapKey{EpochNumber: snapshotAtEpochNumber, ValidatorPKID: *pkid} + mapKey := SnapshotValidatorMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *pkid} if validatorEntry, exists := bav.SnapshotValidatorEntries[mapKey]; exists { return validatorEntry, nil } @@ -171,7 +171,7 @@ func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, snapshotAtEpochNumbe func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64, snapshotAtEpochNumber uint64) ([]*ValidatorEntry, error) { var utxoViewValidatorEntries []*ValidatorEntry for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { - if mapKey.EpochNumber == snapshotAtEpochNumber { + if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber { utxoViewValidatorEntries = append(utxoViewValidatorEntries, validatorEntry) } } @@ -189,7 +189,9 @@ func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64, snapsho // We only pull ValidatorEntries from the db that are not present in the // UtxoView. As a sanity check, we double-check that the ValidatorEntry // is not already in the UtxoView here. - mapKey := SnapshotValidatorMapKey{EpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID} + mapKey := SnapshotValidatorMapKey{ + SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID, + } if _, exists := bav.SnapshotValidatorEntries[mapKey]; !exists { bav._setValidatorEntryMappings(validatorEntry) } @@ -197,7 +199,7 @@ func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64, snapsho // Pull !isDeleted, active ValidatorEntries from the UtxoView with stake > 0. var validatorEntries []*ValidatorEntry for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { - if mapKey.EpochNumber == snapshotAtEpochNumber && + if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber && !validatorEntry.isDeleted && validatorEntry.Status() == ValidatorStatusActive && !validatorEntry.TotalStakeAmountNanos.IsZero() { @@ -218,7 +220,9 @@ func (bav *UtxoView) _setSnapshotValidatorEntry(validatorEntry *ValidatorEntry, glog.Errorf("_setSnapshotValidatorEntry: called with nil entry, this should never happen") return } - mapKey := SnapshotValidatorMapKey{EpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID} + mapKey := SnapshotValidatorMapKey{ + SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID, + } bav.SnapshotValidatorEntries[mapKey] = validatorEntry.Copy() } @@ -227,16 +231,16 @@ func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, if validatorEntry == nil { return fmt.Errorf( "_flushSnapshotValidatorEntriesToDb: found nil entry for EpochNumber %d, this should never happen", - mapKey.EpochNumber, + mapKey.SnapshotAtEpochNumber, ) } if err := DBPutSnapshotValidatorEntryWithTxn( - txn, bav.Snapshot, validatorEntry, mapKey.EpochNumber, blockHeight, + txn, bav.Snapshot, validatorEntry, mapKey.SnapshotAtEpochNumber, blockHeight, ); err != nil { return errors.Wrapf( err, "_flushSnapshotValidatorEntriesToDb: problem setting ValidatorEntry for EpochNumber %d: ", - mapKey.EpochNumber, + mapKey.SnapshotAtEpochNumber, ) } } From c46550ba264376b63d186dbc14d28063c7503d6f Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 31 May 2023 13:05:09 -0400 Subject: [PATCH 077/762] Rename to SnapshotAtEpochNumber more more. --- lib/block_view.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 5cd96d3d4..90eba26de 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -133,21 +133,21 @@ type UtxoView struct { // Current RandomSeedHash CurrentRandomSeedHash *RandomSeedHash - // SnapshotGlobalParamEntries is a map of EpochNumber to GlobalParamsEntry. - // It contains the snapshot value of the GlobalParamsEntry at the given EpochNumber. + // SnapshotGlobalParamEntries is a map of SnapshotAtEpochNumber to GlobalParamsEntry. + // It contains the snapshot value of the GlobalParamsEntry at the given SnapshotAtEpochNumber. SnapshotGlobalParamEntries map[uint64]*GlobalParamsEntry - // SnapshotValidatorEntries is a map of to a ValidatorEntry. - // It contains the snapshot value of a ValidatorEntry at the given EpochNumber. + // SnapshotValidatorEntries is a map of to a ValidatorEntry. + // It contains the snapshot value of a ValidatorEntry at the given SnapshotAtEpochNumber. SnapshotValidatorEntries map[SnapshotValidatorMapKey]*ValidatorEntry - // SnapshotGlobalActiveStakeAmountNanos is a map of EpochNumber to GlobalActiveStakeAmountNanos. - // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given EpochNumber. + // SnapshotGlobalActiveStakeAmountNanos is a map of SnapshotAtEpochNumber to GlobalActiveStakeAmountNanos. + // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given SnapshotAtEpochNumber. SnapshotGlobalActiveStakeAmountNanos map[uint64]*uint256.Int - // SnapshotLeaderSchedule is a map of to a ValidatorPKID. + // SnapshotLeaderSchedule is a map of to a ValidatorPKID. // It contains the PKID of the validator at the given index in the leader schedule - // generated at the given EpochNumber. + // generated at the given SnapshotAtEpochNumber. SnapshotLeaderSchedule map[SnapshotLeaderScheduleMapKey]*PKID // The hash of the tip the view is currently referencing. Mainly used From d4c164e70a68cbfa13e8e5d9dff9103e23ba9a79 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 31 May 2023 13:08:46 -0400 Subject: [PATCH 078/762] Fix gofmt. --- lib/block_view.go | 4 ++-- lib/pos_epoch_complete_hook_test.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 90eba26de..1b2cb3d1b 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -133,7 +133,7 @@ type UtxoView struct { // Current RandomSeedHash CurrentRandomSeedHash *RandomSeedHash - // SnapshotGlobalParamEntries is a map of SnapshotAtEpochNumber to GlobalParamsEntry. + // SnapshotGlobalParamEntries is a map of SnapshotAtEpochNumber to a GlobalParamsEntry. // It contains the snapshot value of the GlobalParamsEntry at the given SnapshotAtEpochNumber. SnapshotGlobalParamEntries map[uint64]*GlobalParamsEntry @@ -141,7 +141,7 @@ type UtxoView struct { // It contains the snapshot value of a ValidatorEntry at the given SnapshotAtEpochNumber. SnapshotValidatorEntries map[SnapshotValidatorMapKey]*ValidatorEntry - // SnapshotGlobalActiveStakeAmountNanos is a map of SnapshotAtEpochNumber to GlobalActiveStakeAmountNanos. + // SnapshotGlobalActiveStakeAmountNanos is a map of SnapshotAtEpochNumber to a GlobalActiveStakeAmountNanos. // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given SnapshotAtEpochNumber. SnapshotGlobalActiveStakeAmountNanos map[uint64]*uint256.Int diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 41db6fd2d..8426ee132 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -202,7 +202,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Nil(t, snapshotGlobalActiveStakeAmountNanos) // Test SnapshotLeaderSchedule is nil. - for index, _ := range validatorPKIDs { + for index := range validatorPKIDs { snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 1) require.NoError(t, err) require.Nil(t, snapshotLeaderScheduleValidator) @@ -247,7 +247,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, snapshotGlobalActiveStakeAmountNanos, uint256.NewInt().SetUint64(2800)) // Test SnapshotLeaderSchedule is populated. - for index, _ := range validatorPKIDs { + for index := range validatorPKIDs { snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 1) require.NoError(t, err) require.NotNil(t, snapshotLeaderScheduleValidator) @@ -277,7 +277,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Nil(t, snapshotGlobalActiveStakeAmountNanos) // Test SnapshotLeaderSchedule for a future epoch is nil. - for index, _ := range validatorPKIDs { + for index := range validatorPKIDs { snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 2) require.NoError(t, err) require.Nil(t, snapshotLeaderScheduleValidator) From f832130a4c6928f0f56ee4e163adec92b1e1017b Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 1 Jun 2023 11:17:16 -0400 Subject: [PATCH 079/762] Add default epoch entry. --- lib/block_view_stake_test.go | 4 ---- lib/block_view_validator_test.go | 29 ----------------------------- lib/pos_epoch.go | 7 ++++++- lib/pos_epoch_test.go | 7 +++++-- 4 files changed, 11 insertions(+), 36 deletions(-) diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index f1ff8576f..81b6f8a54 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -933,10 +933,6 @@ func TestStakingWithDerivedKey(t *testing.T) { return fees, nil } - // Seed a CurrentEpochEntry. - epochUtxoView := newUtxoView() - epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) - require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) currentEpochNumber, err := newUtxoView().GetCurrentEpochNumber() require.NoError(t, err) diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 33a4d210d..3fe943f2b 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -65,12 +65,6 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID - // Seed a CurrentEpochEntry. - epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) - epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) - require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) - { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true @@ -576,12 +570,6 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { return nil } - // Seed a CurrentEpochEntry. - epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) - epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) - require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) - { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true @@ -750,12 +738,6 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID - // Seed a CurrentEpochEntry. - epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) - epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) - require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) - { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true @@ -1188,12 +1170,6 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID - // Seed a CurrentEpochEntry. - epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) - epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) - require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) - { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true @@ -1384,11 +1360,6 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID - // Seed a CurrentEpochEntry. - epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) - epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) - require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) currentEpochNumber, err := utxoView().GetCurrentEpochNumber() require.NoError(t, err) diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index cb4e2c16e..1e4f90612 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -5,6 +5,7 @@ import ( "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" + "math" ) // @@ -78,8 +79,12 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { if epochEntry != nil { // Cache in the UtxoView. bav._setCurrentEpochEntry(epochEntry) + return epochEntry, nil } - return epochEntry, nil + + // If still not found, return the DefaultEpochEntry. This will + // be the case prior to the ProofOfStakeSnapshottingBlockHeight. + return &EpochEntry{EpochNumber: 0, FinalBlockHeight: math.MaxUint64}, nil } func (bav *UtxoView) GetCurrentEpochNumber() (uint64, error) { diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index 3d84f1ec9..fbc25e538 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -2,6 +2,7 @@ package lib import ( "github.com/stretchr/testify/require" + "math" "testing" ) @@ -23,10 +24,12 @@ func TestCurrentEpoch(t *testing.T) { // Test that the CurrentEpoch is nil in the UtxoView. require.Nil(t, utxoView.CurrentEpochEntry) - // Test GetCurrentEpoch(). + // Test GetCurrentEpoch() returns the DefaultEpochEntry. epochEntry, err = utxoView.GetCurrentEpochEntry() require.NoError(t, err) - require.Nil(t, epochEntry) + require.NotNil(t, epochEntry) + require.Equal(t, epochEntry.EpochNumber, uint64(0)) + require.Equal(t, epochEntry.FinalBlockHeight, uint64(math.MaxUint64)) // Set the CurrentEpoch. epochEntry = &EpochEntry{ From 32770d07c13cf85f58a3b63f95bdbc17d09793b6 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 1 Jun 2023 11:24:11 -0400 Subject: [PATCH 080/762] Add special logic to handle the first run of snapshotting. --- lib/pos_epoch_complete_hook.go | 4 ++++ lib/pos_epoch_complete_hook_test.go | 9 ++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 5ec303ec5..2b74e2576 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -12,6 +12,10 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // Return false if we have not started snapshotting the relevant PoS entries yet. return false, nil } + if blockHeight == uint64(bav.Params.ForkHeights.ProofOfStakeSnapshottingBlockHeight) { + // As soon as we enable snapshotting for the first time, we should run the OnEpochCompleteHook. + return true, nil + } currentEpochEntry, err := bav.GetCurrentEpochEntry() if err != nil { return false, errors.Wrapf(err, "IsEpochComplete: problem retrieving CurrentEpochEntry: ") diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 8426ee132..e77781286 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -28,16 +28,15 @@ func TestIsLastBlockInCurrentEpoch(t *testing.T) { utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) - // The BlockHeight is before the PoS fork height. + // The BlockHeight is before the PoS snapshotting fork height. isLastBlockInCurrentEpoch, err = utxoView.IsLastBlockInCurrentEpoch(0) require.NoError(t, err) require.False(t, isLastBlockInCurrentEpoch) - // The CurrentEpochEntry is nil. + // The BlockHeight is equal to the PoS snapshotting fork height. isLastBlockInCurrentEpoch, err = utxoView.IsLastBlockInCurrentEpoch(1) - require.Error(t, err) - require.Contains(t, err.Error(), "CurrentEpochEntry is nil, this should never happen") - require.False(t, isLastBlockInCurrentEpoch) + require.NoError(t, err) + require.True(t, isLastBlockInCurrentEpoch) // Seed a CurrentEpochEntry. utxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: 5}) From 72ec00a13ea27b4105a3f407543d3a58191c77a9 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 1 Jun 2023 11:25:39 -0400 Subject: [PATCH 081/762] Rename DefaultEpochEntry to GenesisEpochEntry. --- lib/pos_epoch.go | 2 +- lib/pos_epoch_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index 1e4f90612..b16c986b3 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -82,7 +82,7 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { return epochEntry, nil } - // If still not found, return the DefaultEpochEntry. This will + // If still not found, return the GenesisEpochEntry. This will // be the case prior to the ProofOfStakeSnapshottingBlockHeight. return &EpochEntry{EpochNumber: 0, FinalBlockHeight: math.MaxUint64}, nil } diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index fbc25e538..9d3f58ecb 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -24,7 +24,7 @@ func TestCurrentEpoch(t *testing.T) { // Test that the CurrentEpoch is nil in the UtxoView. require.Nil(t, utxoView.CurrentEpochEntry) - // Test GetCurrentEpoch() returns the DefaultEpochEntry. + // Test GetCurrentEpoch() returns the GenesisEpochEntry. epochEntry, err = utxoView.GetCurrentEpochEntry() require.NoError(t, err) require.NotNil(t, epochEntry) From 96b9aece8c40689c73938d0ef28d99d1b93cc13d Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 1 Jun 2023 12:11:42 -0400 Subject: [PATCH 082/762] Update comment. --- lib/pos_epoch.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index b16c986b3..6025b04cf 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -82,8 +82,8 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { return epochEntry, nil } - // If still not found, return the GenesisEpochEntry. This will - // be the case prior to the ProofOfStakeSnapshottingBlockHeight. + // If still not found, return the GenesisEpochEntry. This will be the + // case prior to the first execution of the OnEpochCompleteHook. return &EpochEntry{EpochNumber: 0, FinalBlockHeight: math.MaxUint64}, nil } From 838e6ee0ff5b1021b33efe1c62e83ebd1536b779 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 2 Jun 2023 11:51:21 -0400 Subject: [PATCH 083/762] Test snapshotting with empty validator set. --- lib/pos_epoch_complete_hook_test.go | 92 ++++++++++++++++++++++------- 1 file changed, 72 insertions(+), 20 deletions(-) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index e77781286..e10e4a798 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -109,9 +109,12 @@ func TestRunEpochCompleteHook(t *testing.T) { validatorPKIDs := []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} // Seed a CurrentEpochEntry. - utxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight}) + utxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 0, FinalBlockHeight: blockHeight}) require.NoError(t, utxoView.FlushToDb(blockHeight)) + // For these tests, we set each epoch duration to only one block. + params.EpochDurationNumBlocks = uint64(1) + // Helper utils _registerAndStake := func(publicKey string, privateKey string, stakeAmountNanos uint64) { // Convert PublicKeyBase58Check to PublicKeyBytes. @@ -159,60 +162,109 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) } { - // All validators register + stake to themselves. - _registerAndStake(m0Pub, m0Priv, 100) - _registerAndStake(m1Pub, m1Priv, 200) - _registerAndStake(m2Pub, m2Priv, 300) - _registerAndStake(m3Pub, m3Priv, 400) - _registerAndStake(m4Pub, m4Priv, 500) - _registerAndStake(m5Pub, m5Priv, 600) - _registerAndStake(m6Pub, m6Priv, 700) + // Test the state of the snapshots prior to running our first OnEpochCompleteHook. - validatorEntries, err := utxoView.GetTopActiveValidatorsByStake(10) + // Test CurrentEpochNumber. + currentEpochNumber, err := utxoView.GetCurrentEpochNumber() require.NoError(t, err) - require.Len(t, validatorEntries, 7) + require.Equal(t, currentEpochNumber, uint64(0)) + + // Test SnapshotGlobalParamsEntry is nil. + snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(0) + require.NoError(t, err) + require.Nil(t, snapshotGlobalParamsEntry) + + // Test SnapshotValidatorByPKID is nil. + for _, pkid := range validatorPKIDs { + snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 0) + require.NoError(t, err) + require.Nil(t, snapshotValidatorEntry) + } + + // Test SnapshotTopActiveValidatorsByStake is empty. + validatorEntries, err := utxoView.GetSnapshotTopActiveValidatorsByStake(10, 0) + require.NoError(t, err) + require.Empty(t, validatorEntries) + + // Test SnapshotGlobalActiveStakeAmountNanos is nil. + snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(0) + require.NoError(t, err) + require.Nil(t, snapshotGlobalActiveStakeAmountNanos) + + // Test SnapshotLeaderSchedule is nil. + for index := range validatorPKIDs { + snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 0) + require.NoError(t, err) + require.Nil(t, snapshotLeaderScheduleValidator) + } } { + // Test RunOnEpochCompleteHook() with no validators or stakers. + require.NoError(t, utxoView.RunEpochCompleteHook(blockHeight)) + require.NoError(t, utxoView.FlushToDb(blockHeight)) + } + { + // Test the state of the snapshots after running our first OnEpochCompleteHook + // but with no existing validators or stakers. + // Test CurrentEpochNumber. currentEpochNumber, err := utxoView.GetCurrentEpochNumber() require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(1)) // Test SnapshotGlobalParamsEntry is nil. - snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(1) + snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(0) require.NoError(t, err) - require.Nil(t, snapshotGlobalParamsEntry) + require.NotNil(t, snapshotGlobalParamsEntry) + require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) // Test SnapshotValidatorByPKID is nil. for _, pkid := range validatorPKIDs { - snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 1) + snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 0) require.NoError(t, err) require.Nil(t, snapshotValidatorEntry) } // Test SnapshotTopActiveValidatorsByStake is empty. - validatorEntries, err := utxoView.GetSnapshotTopActiveValidatorsByStake(10, 1) + validatorEntries, err := utxoView.GetSnapshotTopActiveValidatorsByStake(10, 0) require.NoError(t, err) require.Empty(t, validatorEntries) - // Test SnapshotGlobalActiveStakeAmountNanos is nil. - snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(1) + // Test SnapshotGlobalActiveStakeAmountNanos is zero. + snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(0) require.NoError(t, err) - require.Nil(t, snapshotGlobalActiveStakeAmountNanos) + require.True(t, snapshotGlobalActiveStakeAmountNanos.IsZero()) // Test SnapshotLeaderSchedule is nil. for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 1) + snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 0) require.NoError(t, err) require.Nil(t, snapshotLeaderScheduleValidator) } } + { + // All validators register + stake to themselves. + _registerAndStake(m0Pub, m0Priv, 100) + _registerAndStake(m1Pub, m1Priv, 200) + _registerAndStake(m2Pub, m2Priv, 300) + _registerAndStake(m3Pub, m3Priv, 400) + _registerAndStake(m4Pub, m4Priv, 500) + _registerAndStake(m5Pub, m5Priv, 600) + _registerAndStake(m6Pub, m6Priv, 700) + + validatorEntries, err := utxoView.GetTopActiveValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 7) + } { // Test RunOnEpochCompleteHook(). - require.NoError(t, utxoView.RunEpochCompleteHook(blockHeight)) + require.NoError(t, utxoView.RunEpochCompleteHook(blockHeight+1)) require.NoError(t, utxoView.FlushToDb(blockHeight)) } { + // Test the state of the snapshots after running the second + // OnEpochCompleteHook with existing validators and stakers. + // Test CurrentEpochNumber. currentEpochNumber, err := utxoView.GetCurrentEpochNumber() require.NoError(t, err) From 582da4615615b689bb8449e8678ed7c01db56731 Mon Sep 17 00:00:00 2001 From: Matt Foley <100429827+mattfoley8@users.noreply.github.com> Date: Mon, 5 Jun 2023 11:21:42 -0400 Subject: [PATCH 084/762] Mf/param updater update pos global params (#543) * Allow ParamUpdater to update PoS GlobalParams. * Pass SnapshotAtEpochNumber to global param getters. * Add TODOs to retrieve snapshot values. * Add nil check for extra data param. --- lib/block_view.go | 16 +++++++ lib/block_view_bitcoin_test.go | 1 + lib/block_view_derived_key_test.go | 1 + lib/block_view_stake.go | 3 +- lib/block_view_stake_test.go | 49 ++++++------------- lib/block_view_test.go | 40 ++++++++++++++-- lib/block_view_types.go | 24 +++++++++- lib/block_view_validator.go | 3 +- lib/block_view_validator_test.go | 75 ++++++++++-------------------- lib/blockchain.go | 7 ++- lib/blockchain_test.go | 2 +- lib/constants.go | 25 +++++----- lib/db_utils.go | 2 +- lib/pos_global_params.go | 23 +++++++++ 14 files changed, 163 insertions(+), 108 deletions(-) create mode 100644 lib/pos_global_params.go diff --git a/lib/block_view.go b/lib/block_view.go index 3b3a0c497..8552d764d 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -2955,6 +2955,22 @@ func (bav *UtxoView) _connectUpdateGlobalParams( newGlobalParamsEntry.MaxNonceExpirationBlockHeightOffset = newMaxNonceExpirationBlockHeightOffset } + if blockHeight >= bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + var bytesRead int + if len(extraData[StakeLockupEpochDuration]) > 0 { + newGlobalParamsEntry.StakeLockupEpochDuration, bytesRead = Uvarint(extraData[StakeLockupEpochDuration]) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode StakeLockupEpochDuration as uint64") + } + } + if len(extraData[ValidatorJailEpochDuration]) > 0 { + newGlobalParamsEntry.ValidatorJailEpochDuration, bytesRead = Uvarint(extraData[ValidatorJailEpochDuration]) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode ValidatorJailEpochDuration as uint64") + } + } + } + var newForbiddenPubKeyEntry *ForbiddenPubKeyEntry var prevForbiddenPubKeyEntry *ForbiddenPubKeyEntry var forbiddenPubKey []byte diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index fa288c98a..cb037f1d4 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -1918,6 +1918,7 @@ func TestSpendOffOfUnminedTxnsBitcoinExchange(t *testing.T) { 0, nil, -1, + map[string][]byte{}, 100, /*feeRateNanosPerKB*/ nil, []*DeSoOutput{}) diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index 65d39fd29..37dcfc481 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -457,6 +457,7 @@ func _doTxnWithBlockHeight( minNetworkFeeNanosPerKB, nil, -1, + map[string][]byte{}, feeRateNanosPerKB, testMeta.mempool, nil, diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 66df9a90b..f1a16933a 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1955,7 +1955,8 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada } // Validate EndEpochNumber + StakeLockupEpochDuration <= CurrentEpochNumber. - if metadata.EndEpochNumber+bav.Params.StakeLockupEpochDuration > currentEpochNumber { + // TODO: Retrieve snapshot StakeLockupEpochDuration, not current value. + if metadata.EndEpochNumber+bav.GetStakeLockupEpochDuration(0) > currentEpochNumber { return errors.Wrapf(RuleErrorInvalidUnlockStakeMustWaitLockupDuration, "UtxoView.IsValidUnlockStakeMetadata: ") } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 81b6f8a54..1c057375f 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -32,7 +32,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // For these tests, we set StakeLockupEpochDuration to zero. // We test the lockup logic in a separate test. - params.StakeLockupEpochDuration = 0 + params.DefaultStakeLockupEpochDuration = 0 // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { @@ -82,18 +82,14 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) { - // Param Updater set min fee rate to 101 nanos per KB + // ParamUpdater set MinFeeRateNanos. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{}, ) } { @@ -779,7 +775,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // For these tests, we set StakeLockupEpochDuration to zero. // We test the lockup logic in a separate test. - params.StakeLockupEpochDuration = 0 + params.DefaultStakeLockupEpochDuration = 0 // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { @@ -937,18 +933,14 @@ func TestStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{}, ) } { @@ -1783,9 +1775,6 @@ func TestStakeLockupEpochDuration(t *testing.T) { GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) chain.snapshot = nil - // For these tests, we set StakeLockupEpochDuration to 3. - params.StakeLockupEpochDuration = 3 - // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { _, err = miner.MineAndProcessSingleBlock(0, mempool) @@ -1824,18 +1813,14 @@ func TestStakeLockupEpochDuration(t *testing.T) { require.NoError(t, err) { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos and StakeLockupEpochDuration=3. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{StakeLockupEpochDuration: UintToBuf(3)}, ) } { @@ -1954,7 +1939,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { chain.snapshot = nil // For these tests, we set ValidatorJailEpochDuration to 0. - params.ValidatorJailEpochDuration = 0 + params.DefaultValidatorJailEpochDuration = 0 // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { @@ -2019,18 +2004,14 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{}, ) } { diff --git a/lib/block_view_test.go b/lib/block_view_test.go index a2e81b278..e56190a72 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -918,7 +918,7 @@ func _updateGlobalParamsEntry(t *testing.T, chain *Blockchain, db *badger.DB, _utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { return _updateGlobalParamsEntryWithMempool(t, chain, db, params, feeRateNanosPerKB, updaterPkBase58Check, updaterPrivBase58Check, usdCentsPerBitcoin, minimumNetworkFeesNanosPerKB, createProfileFeeNanos, - createNFTFeeNanos, maxCopiesPerNFT, -1, flushToDb, nil) + createNFTFeeNanos, maxCopiesPerNFT, -1, map[string][]byte{}, flushToDb, nil) } func _updateGlobalParamsEntryWithMaxNonceExpirationBlockHeightOffset(t *testing.T, chain *Blockchain, db *badger.DB, @@ -928,14 +928,14 @@ func _updateGlobalParamsEntryWithMaxNonceExpirationBlockHeightOffset(t *testing. _utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { return _updateGlobalParamsEntryWithMempool(t, chain, db, params, feeRateNanosPerKB, updaterPkBase58Check, updaterPrivBase58Check, usdCentsPerBitcoin, minimumNetworkFeesNanosPerKB, createProfileFeeNanos, - createNFTFeeNanos, maxCopiesPerNFT, maxNonceExpirationBlockHeightOffset, flushToDb, nil) + createNFTFeeNanos, maxCopiesPerNFT, maxNonceExpirationBlockHeightOffset, map[string][]byte{}, flushToDb, nil) } func _updateGlobalParamsEntryWithMempool(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoParams, feeRateNanosPerKB uint64, updaterPkBase58Check string, updaterPrivBase58Check string, usdCentsPerBitcoin int64, minimumNetworkFeesNanosPerKB int64, createProfileFeeNanos int64, createNFTFeeNanos int64, maxCopiesPerNFT int64, maxNonceExpirationBlockHeightOffset int64, - flushToDb bool, mempool *DeSoMempool) ( + extraData map[string][]byte, flushToDb bool, mempool *DeSoMempool) ( _utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { assert := assert.New(t) @@ -955,6 +955,7 @@ func _updateGlobalParamsEntryWithMempool(t *testing.T, chain *Blockchain, db *ba minimumNetworkFeesNanosPerKB, nil, maxNonceExpirationBlockHeightOffset, + extraData, feeRateNanosPerKB, mempool, []*DeSoOutput{}) @@ -1015,7 +1016,6 @@ func _updateGlobalParamsEntryWithTestMeta( createNFTFeeNanos int64, maxCopiesPerNFT int64, ) { - testMeta.expectedSenderBalances = append( testMeta.expectedSenderBalances, _getBalance(testMeta.t, testMeta.chain, nil, updaterPkBase58Check)) @@ -1031,6 +1031,37 @@ func _updateGlobalParamsEntryWithTestMeta( createNFTFeeNanos, maxCopiesPerNFT, -1, + map[string][]byte{}, + true, + testMeta.mempool) /*flushToDB*/ + require.NoError(testMeta.t, err) + testMeta.txnOps = append(testMeta.txnOps, currentOps) + testMeta.txns = append(testMeta.txns, currentTxn) +} + +func _updateGlobalParamsEntryWithExtraData( + testMeta *TestMeta, + feeRateNanosPerKB uint64, + updaterPkBase58Check string, + updaterPrivBase58Check string, + extraData map[string][]byte, +) { + testMeta.expectedSenderBalances = append( + testMeta.expectedSenderBalances, + _getBalance(testMeta.t, testMeta.chain, nil, updaterPkBase58Check)) + + currentOps, currentTxn, _, err := _updateGlobalParamsEntryWithMempool( + testMeta.t, testMeta.chain, testMeta.db, testMeta.params, + feeRateNanosPerKB, + updaterPkBase58Check, + updaterPrivBase58Check, + -1, + int64(feeRateNanosPerKB), + -1, + -1, + -1, + -1, + extraData, true, testMeta.mempool) /*flushToDB*/ require.NoError(testMeta.t, err) @@ -1065,6 +1096,7 @@ func _updateGlobalParamsEntryWithMaxNonceExpirationBlockHeightOffsetAndTestMeta( createNFTFeeNanos, maxCopiesPerNFT, maxNonceExpirationBlockHeightOffset, + map[string][]byte{}, true, testMeta.mempool) /*flushToDB*/ require.NoError(testMeta.t, err) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7641a34df..aed6a0ebe 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3772,6 +3772,14 @@ type GlobalParamsEntry struct { // and the expiration block height specified in the nonce for a // transaction. MaxNonceExpirationBlockHeightOffset uint64 + + // StakeLockupEpochDuration is the number of epochs that a + // user must wait before unlocking their unstaked stake. + StakeLockupEpochDuration uint64 + + // ValidatorJailEpochDuration is the number of epochs that a validator must + // wait after being jailed before submitting an UnjailValidator txn. + ValidatorJailEpochDuration uint64 } func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -3785,6 +3793,10 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe if MigrationTriggered(blockHeight, BalanceModelMigration) { data = append(data, UintToBuf(gp.MaxNonceExpirationBlockHeightOffset)...) } + if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + data = append(data, UintToBuf(gp.StakeLockupEpochDuration)...) + data = append(data, UintToBuf(gp.ValidatorJailEpochDuration)...) + } return data } @@ -3817,11 +3829,21 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MaxNonceExpirationBlockHeightOffset") } } + if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + gp.StakeLockupEpochDuration, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading StakeLockupEpochDuration") + } + gp.ValidatorJailEpochDuration, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading ValidatorJailEpochDuration") + } + } return nil } func (gp *GlobalParamsEntry) GetVersionByte(blockHeight uint64) byte { - return GetMigrationVersion(blockHeight, BalanceModelMigration) + return GetMigrationVersion(blockHeight, BalanceModelMigration, ProofOfStakeNewTxnTypesMigration) } func (gp *GlobalParamsEntry) GetEncoderType() EncoderType { diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 5d5082e81..60f643537 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1715,7 +1715,8 @@ func (bav *UtxoView) IsValidUnjailValidatorMetadata(transactorPublicKey []byte) } // Validate sufficient epochs have elapsed for validator to be unjailed. - if validatorEntry.JailedAtEpochNumber+bav.Params.ValidatorJailEpochDuration > currentEpochNumber { + // TODO: Retrieve snapshot ValidatorJailEpochDuration, not current value. + if validatorEntry.JailedAtEpochNumber+bav.GetValidatorJailEpochDuration(0) > currentEpochNumber { return errors.Wrapf(RuleErrorUnjailingValidatorTooEarly, "UtxoView.IsValidUnjailValidatorMetadata: ") } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 3fe943f2b..1151dd913 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -35,6 +35,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) + params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + utxoView := func() *UtxoView { newUtxoView, err := mempool.GetAugmentedUniversalView() require.NoError(t, err) @@ -68,16 +72,12 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { { // ParamUpdater set min fee rate params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{}, ) } { @@ -571,18 +571,14 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { } { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{}, ) } { @@ -739,18 +735,14 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{}, ) } { @@ -1171,18 +1163,14 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{}, ) } { @@ -1364,18 +1352,14 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NoError(t, err) { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{}, ) } { @@ -1526,9 +1510,6 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) chain.snapshot = nil - // For these tests, we set ValidatorJailEpochDuration to 3. - params.ValidatorJailEpochDuration = 3 - utxoView := func() *UtxoView { newUtxoView, err := mempool.GetAugmentedUniversalView() require.NoError(t, err) @@ -1570,18 +1551,14 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { require.NoError(t, err) { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos and ValidatorJailEpochDuration=3. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{ValidatorJailEpochDuration: UintToBuf(3)}, ) } { @@ -1872,18 +1849,14 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { require.NoError(t, err) { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{}, ) } { diff --git a/lib/blockchain.go b/lib/blockchain.go index 488434b5f..59f38840e 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -3162,11 +3162,14 @@ func (bc *Blockchain) CreateUpdateGlobalParamsTxn(updaterPublicKey []byte, forbiddenPubKey []byte, maxNonceExpirationBlockHeightOffset int64, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + extraData map[string][]byte, minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { + if extraData == nil { + extraData = make(map[string][]byte) + } + // Set RepostedPostHash and IsQuotedRepost on the extra data map as necessary to track reposting. - extraData := make(map[string][]byte) if usdCentsPerBitcoin >= 0 { extraData[USDCentsPerBitcoinKey] = UintToBuf(uint64(usdCentsPerBitcoin)) } diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index dadfc77b8..11fa941a4 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -1711,7 +1711,7 @@ func TestForbiddenBlockSignaturePubKey(t *testing.T) { blockSignerPkBytes, _, err := Base58CheckDecode(blockSignerPk) require.NoError(err) txn, _, _, _, err := chain.CreateUpdateGlobalParamsTxn( - senderPkBytes, -1, -1, -1, -1, -1, blockSignerPkBytes, -1, 100 /*feeRateNanosPerKB*/, nil, []*DeSoOutput{}) + senderPkBytes, -1, -1, -1, -1, -1, blockSignerPkBytes, -1, map[string][]byte{}, 100 /*feeRateNanosPerKB*/, nil, []*DeSoOutput{}) require.NoError(err) // Mine a few blocks to give the senderPkString some money. diff --git a/lib/constants.go b/lib/constants.go index 3ebccda33..206c761e0 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -597,15 +597,14 @@ type DeSoParams struct { // attack the bancor curve to any meaningful measure. CreatorCoinAutoSellThresholdNanos uint64 - // StakeLockupEpochDuration is the number of epochs that a - // user must wait before unlocking their unstaked stake. - // TODO: Move this to GlobalParamsEntry. - StakeLockupEpochDuration uint64 + // DefaultStakeLockupEpochDuration is the default number of epochs + // that a user must wait before unlocking their unstaked stake. + DefaultStakeLockupEpochDuration uint64 - // ValidatorJailEpochDuration is the number of epochs that a validator must - // wait after being jailed before submitting an UnjailValidator txn. - // TODO: Move this to GlobalParamsEntry. - ValidatorJailEpochDuration uint64 + // DefaultValidatorJailEpochDuration is the default number of epochs + // that a validator must wait after being jailed before submitting + // an UnjailValidator txn. + DefaultValidatorJailEpochDuration uint64 ForkHeights ForkHeights @@ -980,10 +979,10 @@ var DeSoMainnetParams = DeSoParams{ CreatorCoinAutoSellThresholdNanos: uint64(10), // Unstaked stake can be unlocked after a minimum of N elapsed epochs. - StakeLockupEpochDuration: uint64(3), + DefaultStakeLockupEpochDuration: uint64(3), // Jailed validators can be unjailed after a minimum of N elapsed epochs. - ValidatorJailEpochDuration: uint64(3), + DefaultValidatorJailEpochDuration: uint64(3), ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), @@ -1214,10 +1213,10 @@ var DeSoTestnetParams = DeSoParams{ CreatorCoinAutoSellThresholdNanos: uint64(10), // Unstaked stake can be unlocked after a minimum of N elapsed epochs. - StakeLockupEpochDuration: uint64(3), + DefaultStakeLockupEpochDuration: uint64(3), // Jailed validators can be unjailed after a minimum of N elapsed epochs. - ValidatorJailEpochDuration: uint64(3), + DefaultValidatorJailEpochDuration: uint64(3), ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), @@ -1263,6 +1262,8 @@ const ( MaxCopiesPerNFTKey = "MaxCopiesPerNFT" MaxNonceExpirationBlockHeightOffsetKey = "MaxNonceExpirationBlockHeightOffset" ForbiddenBlockSignaturePubKeyKey = "ForbiddenBlockSignaturePubKey" + StakeLockupEpochDuration = "StakeLockupEpochDuration" + ValidatorJailEpochDuration = "ValidatorJailEpochDuration" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" diff --git a/lib/db_utils.go b/lib/db_utils.go index d34842f82..f374c93e5 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -482,7 +482,7 @@ type DBPrefixes struct { PrefixValidatorByPKID []byte `prefix_id:"[78]" is_state:"true"` // PrefixValidatorByStake: Retrieve the top N validators by stake. - // Prefix, TotalStakeAmountNanos, ValidatorPKID -> nil + // Prefix, Status, TotalStakeAmountNanos, ValidatorPKID -> nil // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. PrefixValidatorByStake []byte `prefix_id:"[79]" is_state:"true"` diff --git a/lib/pos_global_params.go b/lib/pos_global_params.go new file mode 100644 index 000000000..ff7670c38 --- /dev/null +++ b/lib/pos_global_params.go @@ -0,0 +1,23 @@ +package lib + +func (bav *UtxoView) GetStakeLockupEpochDuration(snapshotAtEpochNumber uint64) uint64 { + if snapshotAtEpochNumber > 0 { + // TODO: Return the SnapshotGlobalParamsEntry.StakeLockupEpochDuration if set. + } else if bav.GlobalParamsEntry.StakeLockupEpochDuration != uint64(0) { + // Return the CurrentGlobalParamsEntry.StakeLockupEpochDuration if set. + return bav.GlobalParamsEntry.StakeLockupEpochDuration + } + // Return the DefaultStakeLockupEpochDuration. + return bav.Params.DefaultStakeLockupEpochDuration +} + +func (bav *UtxoView) GetValidatorJailEpochDuration(snapshotAtEpochNumber uint64) uint64 { + if snapshotAtEpochNumber > 0 { + // TODO: Return the SnapshotGlobalParamsEntry.ValidatorJailEpochDuration if set. + } else if bav.GlobalParamsEntry.ValidatorJailEpochDuration != uint64(0) { + // Return the CurrentGlobalParamsEntry.ValidatorJailEpochDuration if set. + return bav.GlobalParamsEntry.ValidatorJailEpochDuration + } + // Return the DefaultValidatorJailEpochDuration. + return bav.Params.DefaultValidatorJailEpochDuration +} From 788a62a234dedb7c2bd85a2a8bb26574381c6a4a Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 5 Jun 2023 12:10:48 -0400 Subject: [PATCH 085/762] Consolidate PoS block heights. --- lib/block_view_derived_key.go | 4 +- lib/block_view_stake.go | 12 +++--- lib/block_view_stake_test.go | 28 +++++++------- lib/block_view_types.go | 6 +-- lib/block_view_validator.go | 12 +++--- lib/block_view_validator_test.go | 26 ++++++------- lib/constants.go | 63 +++++++++++++++++++++++--------- lib/db_utils.go | 6 +-- lib/network.go | 6 +-- lib/pos_leader_schedule_test.go | 6 +-- 10 files changed, 99 insertions(+), 70 deletions(-) diff --git a/lib/block_view_derived_key.go b/lib/block_view_derived_key.go index c39055ab8..0ed063d47 100644 --- a/lib/block_view_derived_key.go +++ b/lib/block_view_derived_key.go @@ -310,8 +310,8 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( } } - // ====== Proof of Stake New Txn Types Fork ====== - if blockHeight >= bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + // ====== Proof of Stake State Setup Fork ====== + if blockHeight >= bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { // StakeLimitMap for stakeLimitKey, stakingLimit := range transactionSpendingLimit.StakeLimitMap { if err = bav.IsValidStakeLimitKey(txn.PublicKey, stakeLimitKey); err != nil { diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 66df9a90b..02ef3f37c 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1103,7 +1103,7 @@ func (bav *UtxoView) _connectStake( _err error, ) { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectStake: ") } @@ -1275,7 +1275,7 @@ func (bav *UtxoView) _disconnectStake( blockHeight uint32, ) error { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectStake: ") } @@ -1365,7 +1365,7 @@ func (bav *UtxoView) _connectUnstake( _err error, ) { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectUnstake: ") } @@ -1553,7 +1553,7 @@ func (bav *UtxoView) _disconnectUnstake( blockHeight uint32, ) error { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectUnstake: ") } @@ -1669,7 +1669,7 @@ func (bav *UtxoView) _connectUnlockStake( _err error, ) { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectUnlockStake: ") } @@ -1795,7 +1795,7 @@ func (bav *UtxoView) _disconnectUnlockStake( blockHeight uint32, ) error { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight || + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectUnlockStake: ") } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index f1ff8576f..30175aca9 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -21,7 +21,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // Local variables var err error - // Initialize fork heights. + // Initialize balance model fork heights. setBalanceModelBlockHeights() defer resetBalanceModelBlockHeights() @@ -98,7 +98,7 @@ func _testStaking(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -123,7 +123,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // { // RuleErrorProofOfStakeTxnBeforeBlockHeight - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = math.MaxUint32 + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = math.MaxUint32 GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -137,7 +137,7 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) } @@ -276,7 +276,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // { // RuleErrorProofOfStakeTxnBeforeBlockHeight - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = math.MaxUint32 + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = math.MaxUint32 GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -290,7 +290,7 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) } @@ -472,7 +472,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // { // RuleErrorProofOfStakeTxnBeforeBlockHeight - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = math.MaxUint32 + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = math.MaxUint32 GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -487,7 +487,7 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) } @@ -770,9 +770,9 @@ func TestStakingWithDerivedKey(t *testing.T) { chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) - // Initialize fork heights. + // Initialize PoS fork heights. params.ForkHeights.DeSoUnlimitedDerivedKeysBlockHeight = uint32(0) - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) chain.snapshot = nil @@ -1694,7 +1694,7 @@ func TestGetLockedStakeEntriesInRange(t *testing.T) { utxoView._setLockedStakeEntryMappings(lockedStakeEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) - // Fetch the LockedStakeEntry so it is also cached in the UtxoView. + // Fetch the LockedStakeEntry, so it is also cached in the UtxoView. lockedStakeEntry, err = utxoView.GetLockedStakeEntry(m0PKID, m0PKID, 2) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) @@ -1782,7 +1782,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { // Initialize fork heights. params.ForkHeights.DeSoUnlimitedDerivedKeysBlockHeight = uint32(0) - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) chain.snapshot = nil @@ -1951,8 +1951,8 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) - // Initialize fork heights. - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + // Initialize PoS fork heights. + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) chain.snapshot = nil diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7641a34df..d173c354f 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -1241,7 +1241,7 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada } } - if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { // PrevValidatorEntry data = append(data, EncodeToBytes(blockHeight, op.PrevValidatorEntry, skipMetadata...)...) @@ -1870,7 +1870,7 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. } } - if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { // PrevValidatorEntry if op.PrevValidatorEntry, err = DecodeDeSoEncoder(&ValidatorEntry{}, rr); err != nil { return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevValidatorEntry: ") @@ -1900,7 +1900,7 @@ func (op *UtxoOperation) GetVersionByte(blockHeight uint64) byte { blockHeight, AssociationsAndAccessGroupsMigration, BalanceModelMigration, - ProofOfStakeNewTxnTypesMigration, + ProofOfStake1StateSetupMigration, ) } diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 8ba3e82ed..864759b5f 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -994,7 +994,7 @@ func (bav *UtxoView) _connectRegisterAsValidator( _err error, ) { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectRegisterAsValidator: ") } @@ -1115,7 +1115,7 @@ func (bav *UtxoView) _disconnectRegisterAsValidator( blockHeight uint32, ) error { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectRegisterAsValidator: ") } @@ -1178,7 +1178,7 @@ func (bav *UtxoView) _connectUnregisterAsValidator( _err error, ) { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectUnregisterAsValidator: ") } @@ -1350,7 +1350,7 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( blockHeight uint32, ) error { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectUnregisterAsValidator: ") } @@ -1436,7 +1436,7 @@ func (bav *UtxoView) _connectUnjailValidator( _err error, ) { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectUnjailValidator: ") } @@ -1541,7 +1541,7 @@ func (bav *UtxoView) _disconnectUnjailValidator( blockHeight uint32, ) error { // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { return errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_disconnectUnjailValidator: ") } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 33a4d210d..102a1b2df 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -27,7 +27,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { var globalActiveStakeAmountNanos *uint256.Int var err error - // Initialize fork heights. + // Initialize balance model fork heights. setBalanceModelBlockHeights() defer resetBalanceModelBlockHeights() @@ -88,7 +88,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // RuleErrorProofOfStakeTxnBeforeBlockHeight - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = math.MaxUint32 + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = math.MaxUint32 GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -103,7 +103,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) } @@ -432,8 +432,8 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) - // Initialize fork heights. - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + // Initialize PoS fork height. + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -710,7 +710,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { mempool, miner := NewTestMiner(t, chain, params, true) // Initialize PoS fork height. - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -1153,7 +1153,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { mempool, miner := NewTestMiner(t, chain, params, true) // Initialize PoS fork height. - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -1348,7 +1348,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { mempool, miner := NewTestMiner(t, chain, params, true) // Initialize PoS fork height. - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -1550,7 +1550,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { mempool, miner := NewTestMiner(t, chain, params, true) // Initialize PoS fork height. - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) chain.snapshot = nil @@ -1721,7 +1721,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { } { // RuleErrorProofofStakeTxnBeforeBlockHeight - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = math.MaxUint32 + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = math.MaxUint32 GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -1729,7 +1729,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorProofofStakeTxnBeforeBlockHeight) - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) } @@ -1767,8 +1767,8 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) - // Initialize fork heights. - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + // Initialize PoS fork height. + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) diff --git a/lib/constants.go b/lib/constants.go index 52c651669..cbdaeb3ed 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -270,10 +270,20 @@ type ForkHeights struct { // to an account balance model for accounting. BalanceModelBlockHeight uint32 - // ProofOfStakeNewTxnTypesBlockHeight defines the height at which we introduced the - // new txn types to support Proof of Stake. These txns include: RegisterAsValidator, - // UnregisterAsValidator, Stake, Unstake, and UnlockStake. - ProofOfStakeNewTxnTypesBlockHeight uint32 + // ProofOfStake1StateSetupBlockHeight defines the height at which we introduced all + // changes to set up the prerequisite state for cutting over to PoS consensus. These + // changes include, for example, introducing the new PoS txn types, consensus params, + // leader schedule generation, and snapshotting. + // + // The ProofOfStake1StateSetupBlockHeight needs to be set before the + // ProofOfStake2ConsensusCutoverBlockHeight so that we allow time for validators to + // register, stake to be assigned, and the validator set, consensus params, and + // leader schedule snapshots to be generated in advance. + ProofOfStake1StateSetupBlockHeight uint32 + + // ProofOfStake2ConsensusCutoverBlockHeight defines the height at which we cut over + // from PoW consensus to PoS consensus. + ProofOfStake2ConsensusCutoverBlockHeight uint32 // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. @@ -335,11 +345,12 @@ type MigrationHeight struct { } const ( - DefaultMigration MigrationName = "DefaultMigration" - UnlimitedDerivedKeysMigration MigrationName = "UnlimitedDerivedKeysMigration" - AssociationsAndAccessGroupsMigration MigrationName = "AssociationsAndAccessGroupsMigration" - BalanceModelMigration MigrationName = "BalanceModelMigration" - ProofOfStakeNewTxnTypesMigration MigrationName = "ProofOfStakeNewTxnTypesMigration" + DefaultMigration MigrationName = "DefaultMigration" + UnlimitedDerivedKeysMigration MigrationName = "UnlimitedDerivedKeysMigration" + AssociationsAndAccessGroupsMigration MigrationName = "AssociationsAndAccessGroupsMigration" + BalanceModelMigration MigrationName = "BalanceModelMigration" + ProofOfStake1StateSetupMigration MigrationName = "ProofOfStake1StateSetupMigration" + ProofOfStake2ConsensusCutoverMigration MigrationName = "ProofOfStake2ConsensusCutoverMigration" ) type EncoderMigrationHeights struct { @@ -354,8 +365,11 @@ type EncoderMigrationHeights struct { // This coincides with the BalanceModel block BalanceModel MigrationHeight - // This coincides with the ProofOfStakeNewTxnTypesBlockHeight - ProofOfStakeNewTxnTypesMigration MigrationHeight + // This coincides with the ProofOfStake1StateSetupBlockHeight + ProofOfStake1StateSetupMigration MigrationHeight + + // This coincides with the ProofOfStake2ConsensusCutoverBlockHeight + ProofOfStake2ConsensusCutoverMigration MigrationHeight } func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeights { @@ -380,13 +394,19 @@ func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeigh Height: uint64(forkHeights.BalanceModelBlockHeight), Name: BalanceModelMigration, }, - ProofOfStakeNewTxnTypesMigration: MigrationHeight{ + ProofOfStake1StateSetupMigration: MigrationHeight{ Version: 4, - Height: uint64(forkHeights.ProofOfStakeNewTxnTypesBlockHeight), - Name: ProofOfStakeNewTxnTypesMigration, + Height: uint64(forkHeights.ProofOfStake1StateSetupBlockHeight), + Name: ProofOfStake1StateSetupMigration, + }, + ProofOfStake2ConsensusCutoverMigration: MigrationHeight{ + Version: 5, + Height: uint64(forkHeights.ProofOfStake2ConsensusCutoverBlockHeight), + Name: ProofOfStake2ConsensusCutoverMigration, }, } } + func GetEncoderMigrationHeightsList(forkHeights *ForkHeights) ( _migrationHeightsList []*MigrationHeight) { @@ -645,7 +665,10 @@ var RegtestForkHeights = ForkHeights{ // For convenience, we set the block height to 1 since the // genesis block was created using the utxo model. BalanceModelBlockHeight: uint32(1), - ProofOfStakeNewTxnTypesBlockHeight: uint32(1), + ProofOfStake1StateSetupBlockHeight: uint32(1), + + // FIXME: set to real block height when ready + ProofOfStake2ConsensusCutoverBlockHeight: uint32(math.MaxUint32), // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. @@ -799,7 +822,10 @@ var MainnetForkHeights = ForkHeights{ BalanceModelBlockHeight: uint32(226839), // FIXME: set to real block height when ready - ProofOfStakeNewTxnTypesBlockHeight: uint32(math.MaxUint32), + ProofOfStake1StateSetupBlockHeight: uint32(math.MaxUint32), + + // FIXME: set to real block height when ready + ProofOfStake2ConsensusCutoverBlockHeight: uint32(math.MaxUint32), // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. @@ -1074,7 +1100,10 @@ var TestnetForkHeights = ForkHeights{ BalanceModelBlockHeight: uint32(683058), // FIXME: set to real block height when ready - ProofOfStakeNewTxnTypesBlockHeight: uint32(math.MaxUint32), + ProofOfStake1StateSetupBlockHeight: uint32(math.MaxUint32), + + // FIXME: set to real block height when ready + ProofOfStake2ConsensusCutoverBlockHeight: uint32(math.MaxUint32), // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. diff --git a/lib/db_utils.go b/lib/db_utils.go index 409aa03ed..01eae3507 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -6912,7 +6912,7 @@ func (txnMeta *TransactionMetadata) RawEncodeWithoutMetadata(blockHeight uint64, data = append(data, EncodeToBytes(blockHeight, txnMeta.NewMessageTxindexMetadata, skipMetadata...)...) } - if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { // encoding RegisterAsValidatorTxindexMetadata data = append(data, EncodeToBytes(blockHeight, txnMeta.RegisterAsValidatorTxindexMetadata, skipMetadata...)...) // encoding UnregisterAsValidatorTxindexMetadata @@ -7176,7 +7176,7 @@ func (txnMeta *TransactionMetadata) RawDecodeWithoutMetadata(blockHeight uint64, } } - if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { // decoding RegisterAsValidatorTxindexMetadata if txnMeta.RegisterAsValidatorTxindexMetadata, err = DecodeDeSoEncoder(&RegisterAsValidatorTxindexMetadata{}, rr); err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading RegisterAsValidatorTxindexMetadata: ") @@ -7207,7 +7207,7 @@ func (txnMeta *TransactionMetadata) RawDecodeWithoutMetadata(blockHeight uint64, } func (txnMeta *TransactionMetadata) GetVersionByte(blockHeight uint64) byte { - return GetMigrationVersion(blockHeight, AssociationsAndAccessGroupsMigration, ProofOfStakeNewTxnTypesMigration) + return GetMigrationVersion(blockHeight, AssociationsAndAccessGroupsMigration, ProofOfStake1StateSetupMigration) } func (txnMeta *TransactionMetadata) GetEncoderType() EncoderType { diff --git a/lib/network.go b/lib/network.go index 0ce83d3e4..b407e51c3 100644 --- a/lib/network.go +++ b/lib/network.go @@ -5389,7 +5389,7 @@ type TransactionSpendingLimit struct { // - AssociationOperation: one of { Any, Create, Delete } AssociationLimitMap map[AssociationLimitKey]uint64 - // ===== ENCODER MIGRATION ProofOfStakeNewTxnTypesMigration ===== + // ===== ENCODER MIGRATION ProofOfStake1StateSetupMigration ===== // ValidatorPKID || StakerPKID to amount of stake-able $DESO. // Note that this is not a limit on the number of Stake txns that // this derived key can perform but instead a limit on the amount @@ -5956,7 +5956,7 @@ func (tsl *TransactionSpendingLimit) ToBytes(blockHeight uint64) ([]byte, error) } // StakeLimitMap, UnstakeLimitMap, and UnlockStakeLimitMap, gated by the encoder migration. - if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { // StakeLimitMap stakeLimitMapLength := uint64(len(tsl.StakeLimitMap)) data = append(data, UintToBuf(stakeLimitMapLength)...) @@ -6231,7 +6231,7 @@ func (tsl *TransactionSpendingLimit) FromBytes(blockHeight uint64, rr *bytes.Rea } // StakeLimitMap, UnstakeLimitMap, and UnlockStakeLimitMap, gated by the encoder migration. - if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { // StakeLimitMap stakeLimitMapLen, err := ReadUvarint(rr) if err != nil { diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index c4c44ebe9..207bcfa70 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -11,7 +11,7 @@ import ( ) func TestGenerateLeaderSchedule(t *testing.T) { - // Initialize fork heights. + // Initialize balance model fork heights. setBalanceModelBlockHeights() defer resetBalanceModelBlockHeights() @@ -19,8 +19,8 @@ func TestGenerateLeaderSchedule(t *testing.T) { chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) - // Initialize PoS txn types block height. - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + // Initialize PoS fork height. + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) From 73293cca68f818022ab973ce5ee6974af7e85cad Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Mon, 5 Jun 2023 14:30:58 -0400 Subject: [PATCH 086/762] Update sign BLS test helper func signature. --- lib/block_view_stake_test.go | 10 +++--- lib/block_view_validator_test.go | 52 ++++++++++++++++---------------- lib/pos_leader_schedule_test.go | 2 +- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index f1ff8576f..baa83b8e9 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -102,7 +102,7 @@ func _testStaking(t *testing.T, flushToDB bool) { GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, VotingPublicKey: votingPublicKey, @@ -957,7 +957,7 @@ func TestStakingWithDerivedKey(t *testing.T) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example1.com")}, VotingPublicKey: votingPublicKey, @@ -968,7 +968,7 @@ func TestStakingWithDerivedKey(t *testing.T) { } { // m1 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example2.com")}, VotingPublicKey: votingPublicKey, @@ -1844,7 +1844,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m1.com")}, VotingPublicKey: votingPublicKey, @@ -2039,7 +2039,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.example.com")}, VotingPublicKey: votingPublicKey, diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 33a4d210d..1e5ae328e 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -92,7 +92,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, DisableDelegatedStake: false, @@ -164,7 +164,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // RuleErrorValidatorMissingVotingPublicKeySignature - votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, VotingPublicKey: votingPublicKey, @@ -177,7 +177,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid TransactorPkBytes - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, VotingPublicKey: votingPublicKey, @@ -191,8 +191,8 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid VotingPublicKey - votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) - _, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + _, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, VotingPublicKey: votingPublicKey, @@ -206,7 +206,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // Happy path: register a validator - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, DisableDelegatedStake: false, @@ -242,7 +242,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // Happy path: update a validator - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, DisableDelegatedStake: false, @@ -605,7 +605,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.NoError(t, err) // Create a VotingPublicKey and VotingSignature. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes) // Perform a RegisterAsValidator txn. No error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ @@ -671,7 +671,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.Contains(t, err.Error(), RuleErrorValidatorNotFound) // Create a VotingPublicKey and VotingSignature. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes) // Perform a RegisterAsValidator txn. Error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ @@ -773,7 +773,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, VotingPublicKey: votingPublicKey, @@ -789,7 +789,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // m1 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m1.com")}, VotingPublicKey: votingPublicKey, @@ -805,7 +805,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // m2 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m2PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m2PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m2.com")}, VotingPublicKey: votingPublicKey, @@ -977,7 +977,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { m5PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m5PkBytes).PKID // Store m0's ValidatorEntry in the db with TotalStake = 100 nanos. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) validatorEntry := &ValidatorEntry{ ValidatorPKID: m0PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), @@ -997,7 +997,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Empty(t, utxoView.ValidatorPKIDToValidatorEntry) // Store m1's jailed ValidatorEntry in the db with TotalStake = 400 nanos. - votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m1PkBytes, blockHeight) + votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m1PkBytes) validatorEntry = &ValidatorEntry{ ValidatorPKID: m1PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), @@ -1016,7 +1016,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) // Store m2's ValidatorEntry in the db with TotalStake = 300 nanos. - votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m2PkBytes, blockHeight) + votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m2PkBytes) m2ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m2PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), @@ -1033,7 +1033,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) // Store m3's ValidatorEntry in the db with TotalStake = 600 nanos. - votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m3PkBytes, blockHeight) + votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m3PkBytes) m3ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m3PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), @@ -1071,7 +1071,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.True(t, utxoView.ValidatorPKIDToValidatorEntry[*m3ValidatorEntry.ValidatorPKID].isDeleted) // Store m4's ValidatorEntry in the UtxoView with TotalStake = 50 nanos. - votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m4PkBytes, blockHeight) + votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m4PkBytes) m4ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m4PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), @@ -1095,7 +1095,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { ) // Store m5's jailed ValidatorEntry in the UtxoView with TotalStake = 500 nanos. - votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m5PkBytes, blockHeight) + votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m5PkBytes) m5ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m5PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), @@ -1211,7 +1211,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 registers as a validator with DisableDelegatedStake = FALSE. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: false, @@ -1232,7 +1232,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 updates DisableDelegatedStake = TRUE. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: true, @@ -1277,7 +1277,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 updates DisableDelegatedStake = FALSE. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: false, @@ -1309,7 +1309,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 tries to update DisableDelegateStake = TRUE. Errors. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: true, @@ -1409,7 +1409,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, VotingPublicKey: votingPublicKey, @@ -1615,7 +1615,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, VotingPublicKey: votingPublicKey, @@ -1917,7 +1917,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { } { // sender registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, VotingPublicKey: votingPublicKey, @@ -2060,7 +2060,7 @@ func _submitUnjailValidatorTxn( return fees, nil } -func _generateVotingPublicKeyAndSignature(t *testing.T, transactorPkBytes []byte, blockHeight uint64) (*bls.PublicKey, *bls.Signature) { +func _generateVotingPublicKeyAndSignature(t *testing.T, transactorPkBytes []byte) (*bls.PublicKey, *bls.Signature) { blsPrivateKey, err := bls.NewPrivateKey() require.NoError(t, err) votingPublicKey := blsPrivateKey.PublicKey() diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index c4c44ebe9..0c44511d2 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -73,7 +73,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.NoError(t, err) // Validator registers. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, pkBytes, blockHeight) + votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, pkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, VotingPublicKey: votingPublicKey, From 88a3565c428e91295d00a9f504694a0a0ae0356b Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 6 Jun 2023 09:15:39 -0400 Subject: [PATCH 087/762] Delete the CutoverMigration for now. --- lib/constants.go | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index cbdaeb3ed..6d81834d4 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -345,12 +345,11 @@ type MigrationHeight struct { } const ( - DefaultMigration MigrationName = "DefaultMigration" - UnlimitedDerivedKeysMigration MigrationName = "UnlimitedDerivedKeysMigration" - AssociationsAndAccessGroupsMigration MigrationName = "AssociationsAndAccessGroupsMigration" - BalanceModelMigration MigrationName = "BalanceModelMigration" - ProofOfStake1StateSetupMigration MigrationName = "ProofOfStake1StateSetupMigration" - ProofOfStake2ConsensusCutoverMigration MigrationName = "ProofOfStake2ConsensusCutoverMigration" + DefaultMigration MigrationName = "DefaultMigration" + UnlimitedDerivedKeysMigration MigrationName = "UnlimitedDerivedKeysMigration" + AssociationsAndAccessGroupsMigration MigrationName = "AssociationsAndAccessGroupsMigration" + BalanceModelMigration MigrationName = "BalanceModelMigration" + ProofOfStake1StateSetupMigration MigrationName = "ProofOfStake1StateSetupMigration" ) type EncoderMigrationHeights struct { @@ -367,9 +366,6 @@ type EncoderMigrationHeights struct { // This coincides with the ProofOfStake1StateSetupBlockHeight ProofOfStake1StateSetupMigration MigrationHeight - - // This coincides with the ProofOfStake2ConsensusCutoverBlockHeight - ProofOfStake2ConsensusCutoverMigration MigrationHeight } func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeights { @@ -399,11 +395,6 @@ func GetEncoderMigrationHeights(forkHeights *ForkHeights) *EncoderMigrationHeigh Height: uint64(forkHeights.ProofOfStake1StateSetupBlockHeight), Name: ProofOfStake1StateSetupMigration, }, - ProofOfStake2ConsensusCutoverMigration: MigrationHeight{ - Version: 5, - Height: uint64(forkHeights.ProofOfStake2ConsensusCutoverBlockHeight), - Name: ProofOfStake2ConsensusCutoverMigration, - }, } } From 5d58c0953916f507ea1a50198e0c85797e927da4 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 6 Jun 2023 09:23:16 -0400 Subject: [PATCH 088/762] Merge upstream branch. Resolve conflicts. --- lib/block_view.go | 2 +- lib/block_view_types.go | 6 +++--- lib/block_view_validator_test.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 1dbac280d..0321f0131 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -2963,7 +2963,7 @@ func (bav *UtxoView) _connectUpdateGlobalParams( newGlobalParamsEntry.MaxNonceExpirationBlockHeightOffset = newMaxNonceExpirationBlockHeightOffset } - if blockHeight >= bav.Params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight { + if blockHeight >= bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { var bytesRead int if len(extraData[StakeLockupEpochDuration]) > 0 { newGlobalParamsEntry.StakeLockupEpochDuration, bytesRead = Uvarint(extraData[StakeLockupEpochDuration]) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index c6640015a..2fcab1871 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3793,7 +3793,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe if MigrationTriggered(blockHeight, BalanceModelMigration) { data = append(data, UintToBuf(gp.MaxNonceExpirationBlockHeightOffset)...) } - if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { data = append(data, UintToBuf(gp.StakeLockupEpochDuration)...) data = append(data, UintToBuf(gp.ValidatorJailEpochDuration)...) } @@ -3829,7 +3829,7 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MaxNonceExpirationBlockHeightOffset") } } - if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { gp.StakeLockupEpochDuration, err = ReadUvarint(rr) if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading StakeLockupEpochDuration") @@ -3843,7 +3843,7 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by } func (gp *GlobalParamsEntry) GetVersionByte(blockHeight uint64) byte { - return GetMigrationVersion(blockHeight, BalanceModelMigration, ProofOfStakeNewTxnTypesMigration) + return GetMigrationVersion(blockHeight, BalanceModelMigration, ProofOfStake1StateSetupMigration) } func (gp *GlobalParamsEntry) GetEncoderType() EncoderType { diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 385f6fc89..b2ef8b20e 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -35,7 +35,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) - params.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) From 62afad7d6211540379ff72af08d706b90e1e5e1a Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 6 Jun 2023 10:13:55 -0400 Subject: [PATCH 089/762] Read in params from GlobalParamsEntry. --- lib/block_view_types.go | 25 +++++++++++++++++++++++-- lib/constants.go | 20 +++++++++----------- lib/pos_epoch_complete_hook.go | 2 +- lib/pos_epoch_complete_hook_test.go | 2 +- lib/pos_global_params.go | 22 ++++++++++++++++++++++ lib/pos_leader_schedule.go | 2 +- lib/pos_leader_schedule_test.go | 4 ++-- 7 files changed, 59 insertions(+), 18 deletions(-) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7175ca2d0..3f178aafe 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3780,6 +3780,13 @@ type GlobalParamsEntry struct { // ValidatorJailEpochDuration is the number of epochs that a validator must // wait after being jailed before submitting an UnjailValidator txn. ValidatorJailEpochDuration uint64 + + // LeaderScheduleMaxNumValidators is the maximum number of validators that + // are included when generating a new Proof-of-Stake leader schedule. + LeaderScheduleMaxNumValidators uint64 + + // EpochDurationNumBlocks is the number of blocks included in one epoch. + EpochDurationNumBlocks uint64 } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { @@ -3790,6 +3797,10 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { MaxCopiesPerNFT: gp.MaxCopiesPerNFT, MinimumNetworkFeeNanosPerKB: gp.MinimumNetworkFeeNanosPerKB, MaxNonceExpirationBlockHeightOffset: gp.MaxNonceExpirationBlockHeightOffset, + StakeLockupEpochDuration: gp.StakeLockupEpochDuration, + ValidatorJailEpochDuration: gp.ValidatorJailEpochDuration, + LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, + EpochDurationNumBlocks: gp.EpochDurationNumBlocks, } } @@ -3807,6 +3818,8 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { data = append(data, UintToBuf(gp.StakeLockupEpochDuration)...) data = append(data, UintToBuf(gp.ValidatorJailEpochDuration)...) + data = append(data, UintToBuf(gp.LeaderScheduleMaxNumValidators)...) + data = append(data, UintToBuf(gp.EpochDurationNumBlocks)...) } return data } @@ -3843,11 +3856,19 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { gp.StakeLockupEpochDuration, err = ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading StakeLockupEpochDuration") + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading StakeLockupEpochDuration: ") } gp.ValidatorJailEpochDuration, err = ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading ValidatorJailEpochDuration") + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading ValidatorJailEpochDuration: ") + } + gp.LeaderScheduleMaxNumValidators, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading LeaderScheduleMaxNumValidators: ") + } + gp.EpochDurationNumBlocks, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading EpochDurationNumBlocks: ") } } return nil diff --git a/lib/constants.go b/lib/constants.go index 1b3d63c31..9138c43b9 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -617,14 +617,12 @@ type DeSoParams struct { // an UnjailValidator txn. DefaultValidatorJailEpochDuration uint64 - // LeaderScheduleMaxNumValidators is the maximum number of validators that - // are included when generating a new Proof-of-Stake leader schedule. - // TODO: Move this to GlobalParamsEntry. - LeaderScheduleMaxNumValidators uint64 + // DefaultLeaderScheduleMaxNumValidators is the default maximum number of validators + // that are included when generating a new Proof-of-Stake leader schedule. + DefaultLeaderScheduleMaxNumValidators uint64 - // EpochDurationNumBlocks is the number of blocks included in one epoch. - // TODO: Move this to GlobalParamsEntry. - EpochDurationNumBlocks uint64 + // DefaultEpochDurationNumBlocks is the default number of blocks included in one epoch. + DefaultEpochDurationNumBlocks uint64 ForkHeights ForkHeights @@ -1011,10 +1009,10 @@ var DeSoMainnetParams = DeSoParams{ DefaultValidatorJailEpochDuration: uint64(3), // The max number of validators included in a leader schedule. - LeaderScheduleMaxNumValidators: uint64(100), + DefaultLeaderScheduleMaxNumValidators: uint64(100), // The number of blocks in one epoch - EpochDurationNumBlocks: uint64(3600), + DefaultEpochDurationNumBlocks: uint64(3600), ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), @@ -1254,10 +1252,10 @@ var DeSoTestnetParams = DeSoParams{ DefaultValidatorJailEpochDuration: uint64(3), // The max number of validators included in a leader schedule. - LeaderScheduleMaxNumValidators: uint64(100), + DefaultLeaderScheduleMaxNumValidators: uint64(100), // The number of blocks in one epoch - EpochDurationNumBlocks: uint64(3600), + DefaultEpochDurationNumBlocks: uint64(3600), ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 0f60698b6..d6f6549e6 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -80,7 +80,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ EpochNumber: currentEpochEntry.EpochNumber + 1, - FinalBlockHeight: blockHeight + bav.Params.EpochDurationNumBlocks, + FinalBlockHeight: blockHeight + bav.GetEpochDurationNumBlocks(0), } bav._setCurrentEpochEntry(nextEpochEntry) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index c3fa4febf..23d2cc333 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -111,7 +111,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, utxoView.FlushToDb(blockHeight)) // For these tests, we set each epoch duration to only one block. - params.EpochDurationNumBlocks = uint64(1) + params.DefaultEpochDurationNumBlocks = uint64(1) // Helper utils _registerAndStake := func(publicKey string, privateKey string, stakeAmountNanos uint64) { diff --git a/lib/pos_global_params.go b/lib/pos_global_params.go index ff7670c38..03c7d2721 100644 --- a/lib/pos_global_params.go +++ b/lib/pos_global_params.go @@ -21,3 +21,25 @@ func (bav *UtxoView) GetValidatorJailEpochDuration(snapshotAtEpochNumber uint64) // Return the DefaultValidatorJailEpochDuration. return bav.Params.DefaultValidatorJailEpochDuration } + +func (bav *UtxoView) GetLeaderScheduleMaxNumValidators(snapshotAtEpochNumber uint64) uint64 { + if snapshotAtEpochNumber > 0 { + // TODO: Return the SnapshotGlobalParamsEntry.LeaderScheduleMaxNumValidators if set. + } else if bav.GlobalParamsEntry.LeaderScheduleMaxNumValidators != uint64(0) { + // Return the CurrentGlobalParamsEntry.LeaderScheduleMaxNumValidators if set. + return bav.GlobalParamsEntry.LeaderScheduleMaxNumValidators + } + // Return the DefaultLeaderScheduleMaxNumValidators. + return bav.Params.DefaultLeaderScheduleMaxNumValidators +} + +func (bav *UtxoView) GetEpochDurationNumBlocks(snapshotAtEpochNumber uint64) uint64 { + if snapshotAtEpochNumber > 0 { + // TODO: Return the SnapshotGlobalParamsEntry.EpochDurationNumBlocks if set. + } else if bav.GlobalParamsEntry.EpochDurationNumBlocks != uint64(0) { + // Return the CurrentGlobalParamsEntry.EpochDurationNumBlocks if set. + return bav.GlobalParamsEntry.EpochDurationNumBlocks + } + // Return the DefaultEpochDurationNumBlocks. + return bav.Params.DefaultEpochDurationNumBlocks +} diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index bbd4e72ee..a0f487dd0 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -14,7 +14,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { } // Retrieve top, active validators ordered by stake. - validatorEntries, err := bav.GetTopActiveValidatorsByStake(bav.Params.LeaderScheduleMaxNumValidators) + validatorEntries, err := bav.GetTopActiveValidatorsByStake(bav.GetLeaderScheduleMaxNumValidators(0)) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") } diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 75f45f4c0..f59fd6150 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -246,8 +246,8 @@ func TestGenerateLeaderSchedule(t *testing.T) { testGenerateLeaderSchedule([]*PKID{m3PKID, m5PKID, m6PKID, m4PKID, m2PKID, m1PKID, m0PKID}) } { - // Test changing params.LeaderScheduleMaxNumValidators. - params.LeaderScheduleMaxNumValidators = 5 + // Test changing LeaderScheduleMaxNumValidators. + params.DefaultLeaderScheduleMaxNumValidators = 5 leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() require.NoError(t, err) require.Len(t, leaderSchedule, 5) From 96479a3c3ce5ab332641d296eda6c8f069b54986 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 6 Jun 2023 10:34:07 -0400 Subject: [PATCH 090/762] Consolidate fetching of snapshot global params into util. --- lib/block_view_stake.go | 10 +++++-- lib/block_view_validator.go | 10 +++++-- lib/constants.go | 2 ++ lib/pos_epoch_complete_hook.go | 9 +++++- lib/pos_global_params.go | 45 ----------------------------- lib/pos_leader_schedule.go | 9 +++++- lib/pos_snapshot_entries.go | 52 +++++++++++++++++++++++++++++++++- 7 files changed, 85 insertions(+), 52 deletions(-) delete mode 100644 lib/pos_global_params.go diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index ba9aa7b84..b5be72295 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1954,9 +1954,15 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving CurrentEpochNumber: ") } + // Retrieve SnapshotGlobalParam: StakeLockupEpochDuration. + // TODO: Retrieve snapshot value, not current value. Update SnapshotAtEpochNumber. + stakeLockupEpochDuration, err := bav.GetSnapshotGlobalParam(StakeLockupEpochDuration, 0) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving StakeLockupEpochDuration: ") + } + // Validate EndEpochNumber + StakeLockupEpochDuration <= CurrentEpochNumber. - // TODO: Retrieve snapshot StakeLockupEpochDuration, not current value. - if metadata.EndEpochNumber+bav.GetStakeLockupEpochDuration(0) > currentEpochNumber { + if metadata.EndEpochNumber+stakeLockupEpochDuration > currentEpochNumber { return errors.Wrapf(RuleErrorInvalidUnlockStakeMustWaitLockupDuration, "UtxoView.IsValidUnlockStakeMetadata: ") } diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index c30c00e26..15db872c3 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1713,9 +1713,15 @@ func (bav *UtxoView) IsValidUnjailValidatorMetadata(transactorPublicKey []byte) return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving CurrentEpochNumber: ") } + // Retrieve SnapshotGlobalParam: ValidatorJailEpochDuration. + // TODO: Retrieve snapshot value, not current value. Update SnapshotAtEpochNumber. + validatorJailEpochDuration, err := bav.GetSnapshotGlobalParam(ValidatorJailEpochDuration, 0) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving ValidatorJailEpochDuration: ") + } + // Validate sufficient epochs have elapsed for validator to be unjailed. - // TODO: Retrieve snapshot ValidatorJailEpochDuration, not current value. - if validatorEntry.JailedAtEpochNumber+bav.GetValidatorJailEpochDuration(0) > currentEpochNumber { + if validatorEntry.JailedAtEpochNumber+validatorJailEpochDuration > currentEpochNumber { return errors.Wrapf(RuleErrorUnjailingValidatorTooEarly, "UtxoView.IsValidUnjailValidatorMetadata: ") } diff --git a/lib/constants.go b/lib/constants.go index 9138c43b9..d28462090 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1303,6 +1303,8 @@ const ( ForbiddenBlockSignaturePubKeyKey = "ForbiddenBlockSignaturePubKey" StakeLockupEpochDuration = "StakeLockupEpochDuration" ValidatorJailEpochDuration = "ValidatorJailEpochDuration" + LeaderScheduleMaxNumValidators = "LeaderScheduleMaxNumValidators" + EpochDurationNumBlocks = "EpochDurationNumBlocks" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index d6f6549e6..77954ef48 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -47,6 +47,13 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { return errors.New("RunEpochCompleteHook: CurrentEpochEntry is nil, this should never happen") } + // Retrieve the SnapshotGlobalParam: EpochDurationNumBlocks. + // TODO: Retrieve snapshot value, not current value. Update SnapshotAtEpochNumber. + epochDurationNumBlocks, err := bav.GetSnapshotGlobalParam(EpochDurationNumBlocks, 0) + if err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving EpochDurationNumBlocks: ") + } + // Snapshot the current GlobalParamsEntry. bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) @@ -80,7 +87,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ EpochNumber: currentEpochEntry.EpochNumber + 1, - FinalBlockHeight: blockHeight + bav.GetEpochDurationNumBlocks(0), + FinalBlockHeight: blockHeight + epochDurationNumBlocks, } bav._setCurrentEpochEntry(nextEpochEntry) diff --git a/lib/pos_global_params.go b/lib/pos_global_params.go deleted file mode 100644 index 03c7d2721..000000000 --- a/lib/pos_global_params.go +++ /dev/null @@ -1,45 +0,0 @@ -package lib - -func (bav *UtxoView) GetStakeLockupEpochDuration(snapshotAtEpochNumber uint64) uint64 { - if snapshotAtEpochNumber > 0 { - // TODO: Return the SnapshotGlobalParamsEntry.StakeLockupEpochDuration if set. - } else if bav.GlobalParamsEntry.StakeLockupEpochDuration != uint64(0) { - // Return the CurrentGlobalParamsEntry.StakeLockupEpochDuration if set. - return bav.GlobalParamsEntry.StakeLockupEpochDuration - } - // Return the DefaultStakeLockupEpochDuration. - return bav.Params.DefaultStakeLockupEpochDuration -} - -func (bav *UtxoView) GetValidatorJailEpochDuration(snapshotAtEpochNumber uint64) uint64 { - if snapshotAtEpochNumber > 0 { - // TODO: Return the SnapshotGlobalParamsEntry.ValidatorJailEpochDuration if set. - } else if bav.GlobalParamsEntry.ValidatorJailEpochDuration != uint64(0) { - // Return the CurrentGlobalParamsEntry.ValidatorJailEpochDuration if set. - return bav.GlobalParamsEntry.ValidatorJailEpochDuration - } - // Return the DefaultValidatorJailEpochDuration. - return bav.Params.DefaultValidatorJailEpochDuration -} - -func (bav *UtxoView) GetLeaderScheduleMaxNumValidators(snapshotAtEpochNumber uint64) uint64 { - if snapshotAtEpochNumber > 0 { - // TODO: Return the SnapshotGlobalParamsEntry.LeaderScheduleMaxNumValidators if set. - } else if bav.GlobalParamsEntry.LeaderScheduleMaxNumValidators != uint64(0) { - // Return the CurrentGlobalParamsEntry.LeaderScheduleMaxNumValidators if set. - return bav.GlobalParamsEntry.LeaderScheduleMaxNumValidators - } - // Return the DefaultLeaderScheduleMaxNumValidators. - return bav.Params.DefaultLeaderScheduleMaxNumValidators -} - -func (bav *UtxoView) GetEpochDurationNumBlocks(snapshotAtEpochNumber uint64) uint64 { - if snapshotAtEpochNumber > 0 { - // TODO: Return the SnapshotGlobalParamsEntry.EpochDurationNumBlocks if set. - } else if bav.GlobalParamsEntry.EpochDurationNumBlocks != uint64(0) { - // Return the CurrentGlobalParamsEntry.EpochDurationNumBlocks if set. - return bav.GlobalParamsEntry.EpochDurationNumBlocks - } - // Return the DefaultEpochDurationNumBlocks. - return bav.Params.DefaultEpochDurationNumBlocks -} diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index a0f487dd0..32e918515 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -13,8 +13,15 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving CurrentRandomSeedHash: ") } + // Retrieve the SnapshotGlobalParam: LeaderScheduleMaxNumValidators. + // TODO: Retrieve snapshot value, not current value. Update SnapshotAtEpochNumber. + leaderScheduleMaxNumValidators, err := bav.GetSnapshotGlobalParam(LeaderScheduleMaxNumValidators, 0) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving LeaderScheduleMaxNumValidators: ") + } + // Retrieve top, active validators ordered by stake. - validatorEntries, err := bav.GetTopActiveValidatorsByStake(bav.GetLeaderScheduleMaxNumValidators(0)) + validatorEntries, err := bav.GetTopActiveValidatorsByStake(leaderScheduleMaxNumValidators) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 0a1451c27..260ef6b5c 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -15,6 +15,56 @@ import ( // SnapshotGlobalParamsEntry // +func (bav *UtxoView) GetSnapshotGlobalParam(field string, snapshotAtEpochNumber uint64) (uint64, error) { + var err error + + // Retrieve the CurrentGlobalParamsEntry. + globalParamsEntry := bav.GlobalParamsEntry + + // If SnapshotAtEpochNumber > 0, retrieve the corresponding SnapshotGlobalParamsEntry. + if snapshotAtEpochNumber > 0 { + globalParamsEntry, err = bav.GetSnapshotGlobalParamsEntry(snapshotAtEpochNumber) + if err != nil { + return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: error retrieving SnapshotGlobalParamsEntry: ") + } + } + + // Error if no GlobalParamsEntry was found. + if globalParamsEntry == nil { + return 0, errors.New("GetSnapshotGlobalParam: GlobalParamsEntry is nil") + } + + // Return the corresponding field. Either the updated value if + // set on the GlobalParamsEntry or the default value otherwise. + switch field { + case StakeLockupEpochDuration: + if globalParamsEntry.StakeLockupEpochDuration != 0 { + return globalParamsEntry.StakeLockupEpochDuration, nil + } + return bav.Params.DefaultStakeLockupEpochDuration, nil + + case ValidatorJailEpochDuration: + if globalParamsEntry.ValidatorJailEpochDuration != 0 { + return globalParamsEntry.ValidatorJailEpochDuration, nil + } + return bav.Params.DefaultValidatorJailEpochDuration, nil + + case LeaderScheduleMaxNumValidators: + if globalParamsEntry.LeaderScheduleMaxNumValidators != 0 { + return globalParamsEntry.LeaderScheduleMaxNumValidators, nil + } + return bav.Params.DefaultLeaderScheduleMaxNumValidators, nil + + case EpochDurationNumBlocks: + if globalParamsEntry.EpochDurationNumBlocks != 0 { + return globalParamsEntry.EpochDurationNumBlocks, nil + } + return bav.Params.DefaultEpochDurationNumBlocks, nil + } + + return 0, fmt.Errorf("GetSnapshotGlobalParam: invalid field provided: %s", field) +} + func (bav *UtxoView) GetSnapshotGlobalParamsEntry(snapshotAtEpochNumber uint64) (*GlobalParamsEntry, error) { // Check the UtxoView first. if globalParamsEntry, exists := bav.SnapshotGlobalParamEntries[snapshotAtEpochNumber]; exists { @@ -413,7 +463,7 @@ func DBEnumerateAllCurrentValidators(handle *badger.DB, pkidsToSkip []*PKID) ([] func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber uint64) (*uint256.Int, error) { // Check the UtxoView first. if globalActiveStakeAmountNanos, exists := bav.SnapshotGlobalActiveStakeAmountNanos[snapshotAtEpochNumber]; exists { - return globalActiveStakeAmountNanos, nil + return globalActiveStakeAmountNanos.Clone(), nil } // If we don't have it in the UtxoView, check the db. globalActiveStakeAmountNanos, err := DBGetSnapshotGlobalActiveStakeAmountNanos(bav.Handle, bav.Snapshot, snapshotAtEpochNumber) From ade1bdd4215f3bf09b8d42d38379996e5d7a46fe Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 6 Jun 2023 10:47:35 -0400 Subject: [PATCH 091/762] Store LeaderIndex as uint16. --- lib/db_utils.go | 10 ++++++++++ lib/db_utils_test.go | 12 ++++++++++++ lib/pos_epoch_complete_hook.go | 6 +++--- lib/pos_epoch_complete_hook_test.go | 8 ++++---- lib/pos_snapshot_entries.go | 16 ++++++++-------- 5 files changed, 37 insertions(+), 15 deletions(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index 9dd883d61..c6c58ab96 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -4285,6 +4285,16 @@ func DecodeUint64(scoreBytes []byte) uint64 { return binary.BigEndian.Uint64(scoreBytes) } +func EncodeUint16(num uint16) []byte { + numBytes := make([]byte, 2) + binary.BigEndian.PutUint16(numBytes, num) + return numBytes +} + +func DecodeUint16(numBytes []byte) uint16 { + return binary.BigEndian.Uint16(numBytes) +} + func EncodeUint8(num uint8) []byte { return []byte{num} } diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index d20d17490..fb1578455 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -668,6 +668,18 @@ func TestDeleteExpiredTransactorNonceEntries(t *testing.T) { } +func TestEncodeUint16(t *testing.T) { + for _, num := range []uint16{0, 5819, math.MaxUint16} { + // Encode to bytes. + encoded := EncodeUint16(num) + require.Len(t, encoded, 2) + + // Decode from bytes. + decoded := DecodeUint16(encoded) + require.Equal(t, num, decoded) + } +} + func TestEncodeUint8(t *testing.T) { for _, num := range []uint8{0, 95, math.MaxUint8} { // Encode to bytes. diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 77954ef48..8a8d98e42 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -75,10 +75,10 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { return errors.Wrapf(err, "RunEpochCompleteHook: problem generating leader schedule: ") } for index, validatorPKID := range leaderSchedule { - if index > math.MaxUint8 { - return errors.Errorf("RunEpochCompleteHook: LeaderIndex %d overflows uint8", index) + if index > math.MaxUint16 { + return errors.Errorf("RunEpochCompleteHook: LeaderIndex %d overflows uint16", index) } - bav._setSnapshotLeaderScheduleValidator(validatorPKID, uint8(index), currentEpochEntry.EpochNumber) + bav._setSnapshotLeaderScheduleValidator(validatorPKID, uint16(index), currentEpochEntry.EpochNumber) } // TODO: Jail inactive validators. diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 23d2cc333..40eb00abe 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -191,7 +191,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test SnapshotLeaderSchedule is nil. for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 0) + snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint16(index), 0) require.NoError(t, err) require.Nil(t, snapshotLeaderScheduleValidator) } @@ -235,7 +235,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test SnapshotLeaderSchedule is nil. for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 0) + snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint16(index), 0) require.NoError(t, err) require.Nil(t, snapshotLeaderScheduleValidator) } @@ -297,7 +297,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test SnapshotLeaderSchedule is populated. for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 1) + snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint16(index), 1) require.NoError(t, err) require.NotNil(t, snapshotLeaderScheduleValidator) } @@ -327,7 +327,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test SnapshotLeaderSchedule for a future epoch is nil. for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint8(index), 2) + snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint16(index), 2) require.NoError(t, err) require.Nil(t, snapshotLeaderScheduleValidator) } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 260ef6b5c..2ade09600 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -568,10 +568,10 @@ func DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( type SnapshotLeaderScheduleMapKey struct { SnapshotAtEpochNumber uint64 - LeaderIndex uint8 + LeaderIndex uint16 } -func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint8, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { +func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint16, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { // First, check the UtxoView. mapKey := SnapshotLeaderScheduleMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, LeaderIndex: leaderIndex} if validatorPKID, exists := bav.SnapshotLeaderSchedule[mapKey]; exists { @@ -589,7 +589,7 @@ func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint8, snaps return validatorEntry, nil } -func (bav *UtxoView) _setSnapshotLeaderScheduleValidator(validatorPKID *PKID, index uint8, snapshotAtEpochNumber uint64) { +func (bav *UtxoView) _setSnapshotLeaderScheduleValidator(validatorPKID *PKID, index uint16, snapshotAtEpochNumber uint64) { if validatorPKID == nil { glog.Errorf("_setSnapshotLeaderScheduleValidator: called with nil ValidatorPKID, this should never happen") return @@ -619,17 +619,17 @@ func (bav *UtxoView) _flushSnapshotLeaderScheduleToDbWithTxn(txn *badger.Txn, bl return nil } -func DBKeyForSnapshotLeaderScheduleValidator(leaderIndex uint8, snapshotAtEpochNumber uint64) []byte { +func DBKeyForSnapshotLeaderScheduleValidator(leaderIndex uint16, snapshotAtEpochNumber uint64) []byte { data := append([]byte{}, Prefixes.PrefixSnapshotLeaderSchedule...) data = append(data, UintToBuf(snapshotAtEpochNumber)...) - data = append(data, EncodeUint8(leaderIndex)...) + data = append(data, EncodeUint16(leaderIndex)...) return data } func DBGetSnapshotLeaderScheduleValidator( handle *badger.DB, snap *Snapshot, - leaderIndex uint8, + leaderIndex uint16, snapshotAtEpochNumber uint64, ) (*ValidatorEntry, error) { var ret *ValidatorEntry @@ -644,7 +644,7 @@ func DBGetSnapshotLeaderScheduleValidator( func DBGetSnapshotLeaderScheduleValidatorWithTxn( txn *badger.Txn, snap *Snapshot, - leaderIndex uint8, + leaderIndex uint16, snapshotAtEpochNumber uint64, ) (*ValidatorEntry, error) { // Retrieve ValidatorPKID from db. @@ -673,7 +673,7 @@ func DBPutSnapshotLeaderScheduleValidatorWithTxn( txn *badger.Txn, snap *Snapshot, validatorPKID *PKID, - leaderIndex uint8, + leaderIndex uint16, snapshotAtEpochNumber uint64, blockHeight uint64, ) error { From 76fa3afcf18ccb06af46c59bd1c1ad38b372c704 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 6 Jun 2023 11:36:56 -0400 Subject: [PATCH 092/762] Hard-code SnapshotAtEpochNumber when fetching snapshot global params. --- lib/block_view_stake.go | 5 ++--- lib/block_view_stake_test.go | 4 +++- lib/block_view_validator.go | 5 ++--- lib/block_view_validator_test.go | 6 ++++++ lib/pos_epoch_complete_hook.go | 5 ++--- lib/pos_leader_schedule.go | 5 ++--- lib/pos_snapshot_entries.go | 31 +++++++++++++++++++------------ 7 files changed, 36 insertions(+), 25 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index b5be72295..fca40f504 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1955,10 +1955,9 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada } // Retrieve SnapshotGlobalParam: StakeLockupEpochDuration. - // TODO: Retrieve snapshot value, not current value. Update SnapshotAtEpochNumber. - stakeLockupEpochDuration, err := bav.GetSnapshotGlobalParam(StakeLockupEpochDuration, 0) + stakeLockupEpochDuration, err := bav.GetSnapshotGlobalParam(StakeLockupEpochDuration) if err != nil { - return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving StakeLockupEpochDuration: ") + return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving snapshot StakeLockupEpochDuration: ") } // Validate EndEpochNumber + StakeLockupEpochDuration <= CurrentEpochNumber. diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 27e53b866..7e4900a1f 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -1807,7 +1807,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { // Seed a CurrentEpochEntry. epochUtxoView := newUtxoView() - epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 5, FinalBlockHeight: blockHeight + 10}) + epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) currentEpochNumber, err := newUtxoView().GetCurrentEpochNumber() require.NoError(t, err) @@ -1889,6 +1889,8 @@ func TestStakeLockupEpochDuration(t *testing.T) { epochUtxoView._setCurrentEpochEntry( &EpochEntry{EpochNumber: currentEpochNumber + 3, FinalBlockHeight: blockHeight + 10}, ) + // Also store a SnapshotGlobalParamsEntry in the db. + epochUtxoView._setSnapshotGlobalParamsEntry(&GlobalParamsEntry{}, currentEpochNumber+1) require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) currentEpochNumber, err = newUtxoView().GetCurrentEpochNumber() require.NoError(t, err) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 15db872c3..996bdb914 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1714,10 +1714,9 @@ func (bav *UtxoView) IsValidUnjailValidatorMetadata(transactorPublicKey []byte) } // Retrieve SnapshotGlobalParam: ValidatorJailEpochDuration. - // TODO: Retrieve snapshot value, not current value. Update SnapshotAtEpochNumber. - validatorJailEpochDuration, err := bav.GetSnapshotGlobalParam(ValidatorJailEpochDuration, 0) + validatorJailEpochDuration, err := bav.GetSnapshotGlobalParam(ValidatorJailEpochDuration) if err != nil { - return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving ValidatorJailEpochDuration: ") + return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving snapshot ValidatorJailEpochDuration: ") } // Validate sufficient epochs have elapsed for validator to be unjailed. diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index b2ef8b20e..7e95dfb0d 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -1660,6 +1660,9 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { epochUtxoView._setCurrentEpochEntry( &EpochEntry{EpochNumber: currentEpochNumber + 3, FinalBlockHeight: blockHeight + 10}, ) + + // Store a SnapshotGlobalParamsEntry in the db. + epochUtxoView._setSnapshotGlobalParamsEntry(&GlobalParamsEntry{}, currentEpochNumber+1) require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) // Verify CurrentEpochNumber. @@ -1920,6 +1923,9 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { epochUtxoView._setCurrentEpochEntry( &EpochEntry{EpochNumber: currentEpochNumber + 3, FinalBlockHeight: blockHeight + 10}, ) + + // Store a SnapshotGlobalParamsEntry in the db. + epochUtxoView._setSnapshotGlobalParamsEntry(&GlobalParamsEntry{}, currentEpochNumber+1) require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) // Verify CurrentEpochNumber. diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 8a8d98e42..30157798a 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -48,10 +48,9 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { } // Retrieve the SnapshotGlobalParam: EpochDurationNumBlocks. - // TODO: Retrieve snapshot value, not current value. Update SnapshotAtEpochNumber. - epochDurationNumBlocks, err := bav.GetSnapshotGlobalParam(EpochDurationNumBlocks, 0) + epochDurationNumBlocks, err := bav.GetSnapshotGlobalParam(EpochDurationNumBlocks) if err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving EpochDurationNumBlocks: ") + return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving snapshot EpochDurationNumBlocks: ") } // Snapshot the current GlobalParamsEntry. diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 32e918515..072eda3fb 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -14,10 +14,9 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { } // Retrieve the SnapshotGlobalParam: LeaderScheduleMaxNumValidators. - // TODO: Retrieve snapshot value, not current value. Update SnapshotAtEpochNumber. - leaderScheduleMaxNumValidators, err := bav.GetSnapshotGlobalParam(LeaderScheduleMaxNumValidators, 0) + leaderScheduleMaxNumValidators, err := bav.GetSnapshotGlobalParam(LeaderScheduleMaxNumValidators) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving LeaderScheduleMaxNumValidators: ") + return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving snapshot LeaderScheduleMaxNumValidators: ") } // Retrieve top, active validators ordered by stake. diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 2ade09600..ce7d4b8e7 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -15,23 +15,30 @@ import ( // SnapshotGlobalParamsEntry // -func (bav *UtxoView) GetSnapshotGlobalParam(field string, snapshotAtEpochNumber uint64) (uint64, error) { - var err error +const SnapshotLookbackNumEpochs uint64 = 2 - // Retrieve the CurrentGlobalParamsEntry. - globalParamsEntry := bav.GlobalParamsEntry +func (bav *UtxoView) GetSnapshotGlobalParam(field string) (uint64, error) { + // Retrieve the CurrentEpochEntry. + currentEpochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: problem retrieving CurrentEpochEntry: ") + } - // If SnapshotAtEpochNumber > 0, retrieve the corresponding SnapshotGlobalParamsEntry. - if snapshotAtEpochNumber > 0 { + // Calculate the SnapshotAtEpochNumber from which to retrieve the SnapshotGlobalParamsEntry. + // If the SnapshotAtEpochNumber < 0, then skip and just use the default value. + globalParamsEntry := &GlobalParamsEntry{} + if currentEpochEntry.EpochNumber >= SnapshotLookbackNumEpochs { + snapshotAtEpochNumber, err := SafeUint64().Sub(currentEpochEntry.EpochNumber, SnapshotLookbackNumEpochs) + if err != nil { + return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: problem calculating SnapshotAtEpochNumber: ") + } globalParamsEntry, err = bav.GetSnapshotGlobalParamsEntry(snapshotAtEpochNumber) if err != nil { - return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: error retrieving SnapshotGlobalParamsEntry: ") + return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: problem retrieving SnapshotGlobalParamsEntry: ") + } + if globalParamsEntry == nil { + return 0, errors.New("GetSnapshotGlobalParam: SnapshotGlobalParamsEntry is nil") } - } - - // Error if no GlobalParamsEntry was found. - if globalParamsEntry == nil { - return 0, errors.New("GetSnapshotGlobalParam: GlobalParamsEntry is nil") } // Return the corresponding field. Either the updated value if From a6b4fce515db95798c8eebc482ca30680ef37aa5 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 6 Jun 2023 11:46:31 -0400 Subject: [PATCH 093/762] Rename PrefixValidatorByStatusAndStake. --- lib/block_view_validator.go | 12 ++++++------ lib/db_utils.go | 12 ++++++------ lib/pos_snapshot_entries.go | 4 ++-- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 996bdb914..407f5ab93 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -539,7 +539,7 @@ func DBKeyForValidatorByPKID(validatorEntry *ValidatorEntry) []byte { } func DBKeyForValidatorByStake(validatorEntry *ValidatorEntry) []byte { - key := append([]byte{}, Prefixes.PrefixValidatorByStake...) + key := append([]byte{}, Prefixes.PrefixValidatorByStatusAndStake...) key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) @@ -596,7 +596,7 @@ func DBGetTopActiveValidatorsByStake( } // Retrieve top N active ValidatorEntry keys by stake. - key := append([]byte{}, Prefixes.PrefixValidatorByStake...) + key := append([]byte{}, Prefixes.PrefixValidatorByStatusAndStake...) key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( handle, key, int(limit), nil, true, validatorKeysToSkip, @@ -678,12 +678,12 @@ func DBPutValidatorWithTxn( ) } - // Set ValidatorEntry key in PrefixValidatorByStake. The value should be nil. + // Set ValidatorEntry key in PrefixValidatorByStatusAndStake. The value should be nil. // We parse the ValidatorPKID from the key for this index. key = DBKeyForValidatorByStake(validatorEntry) if err := DBSetWithTxn(txn, snap, key, nil); err != nil { return errors.Wrapf( - err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStake", + err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStatusAndStake", ) } @@ -719,11 +719,11 @@ func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PK ) } - // Delete ValidatorEntry.PKID from PrefixValidatorByStake. + // Delete ValidatorEntry.PKID from PrefixValidatorByStatusAndStake. key = DBKeyForValidatorByStake(validatorEntry) if err := DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( - err, "DBDeleteValidatorWithTxn: problem deleting ValidatorEntry from index PrefixValidatorByStake", + err, "DBDeleteValidatorWithTxn: problem deleting ValidatorEntry from index PrefixValidatorByStatusAndStake", ) } diff --git a/lib/db_utils.go b/lib/db_utils.go index c6c58ab96..0ed4630fe 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -481,10 +481,10 @@ type DBPrefixes struct { // Prefix, ValidatorPKID -> ValidatorEntry PrefixValidatorByPKID []byte `prefix_id:"[78]" is_state:"true"` - // PrefixValidatorByStake: Retrieve the top N validators by stake. + // PrefixValidatorByStatusAndStake: Retrieve the top N active validators by stake. // Prefix, Status, TotalStakeAmountNanos, ValidatorPKID -> nil // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. - PrefixValidatorByStake []byte `prefix_id:"[79]" is_state:"true"` + PrefixValidatorByStatusAndStake []byte `prefix_id:"[79]" is_state:"true"` // PrefixGlobalActiveStakeAmountNanos: Retrieve the cumulative stake across all validators. // Prefix -> *uint256.Int @@ -537,10 +537,10 @@ type DBPrefixes struct { // Prefix, SnapshotAtEpochNumber, ValidatorPKID -> *ValidatorEntry PrefixSnapshotValidatorByPKID []byte `prefix_id:"[86]" is_state:"true"` - // PrefixSnapshotValidatorByStake: Retrieve stake-ordered ValidatorEntries by SnapshotAtEpochNumber. + // PrefixSnapshotValidatorByStatusAndStake: Retrieve stake-ordered active ValidatorEntries by SnapshotAtEpochNumber. // Prefix, SnapshotAtEpochNumber, Status, TotalStakeAmountNanos, ValidatorPKID -> nil // Note: we parse the ValidatorPKID from the key and the value is nil to save space. - PrefixSnapshotValidatorByStake []byte `prefix_id:"[87]" is_state:"true"` + PrefixSnapshotValidatorByStatusAndStake []byte `prefix_id:"[87]" is_state:"true"` // PrefixSnapshotGlobalActiveStakeAmountNanos: Retrieve a snapshot GlobalActiveStakeAmountNanos by SnapshotAtEpochNumber. // Prefix, SnapshotAtEpochNumber -> *uint256.Int @@ -755,7 +755,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByPKID) { // prefix_id:"[78]" return true, &ValidatorEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStake) { + } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStatusAndStake) { // prefix_id:"[79]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixGlobalActiveStakeAmountNanos) { @@ -779,7 +779,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByPKID) { // prefix_id:"[86]" return true, &ValidatorEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByStake) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByStatusAndStake) { // prefix_id:"[87]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos) { diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index ce7d4b8e7..74f8ea7a9 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -312,7 +312,7 @@ func DBKeyForSnapshotValidatorByPKID(validatorEntry *ValidatorEntry, snapshotAtE } func DBKeyForSnapshotValidatorByStake(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStake...) + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStatusAndStake...) key = append(key, UintToBuf(snapshotAtEpochNumber)...) key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) @@ -372,7 +372,7 @@ func DBGetSnapshotTopActiveValidatorsByStake( } // Retrieve top N active ValidatorEntry keys by stake. - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStake...) + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStatusAndStake...) key = append(key, UintToBuf(snapshotAtEpochNumber)...) key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( From 78f7ea1bb92dbba0f8a9e34d95f08a2122c3f63d Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 6 Jun 2023 11:54:00 -0400 Subject: [PATCH 094/762] Use ValidatorPKID as tie-breaker when storing validators by stake. --- lib/block_view_validator.go | 12 ++++++++++-- lib/pos_snapshot_entries.go | 12 ++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 407f5ab93..58069c08b 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1884,8 +1884,16 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit uint64) ([]*ValidatorEn } } // Sort the ValidatorEntries DESC by TotalStakeAmountNanos. - sort.Slice(validatorEntries, func(ii, jj int) bool { - return validatorEntries[ii].TotalStakeAmountNanos.Cmp(validatorEntries[jj].TotalStakeAmountNanos) > 0 + sort.SliceStable(validatorEntries, func(ii, jj int) bool { + stakeCmp := validatorEntries[ii].TotalStakeAmountNanos.Cmp(validatorEntries[jj].TotalStakeAmountNanos) + if stakeCmp == 0 { + // Use ValidatorPKID as a tie-breaker if equal TotalStakeAmountNanos. + return bytes.Compare( + validatorEntries[ii].ValidatorPKID.ToBytes(), + validatorEntries[jj].ValidatorPKID.ToBytes(), + ) > 0 + } + return stakeCmp > 0 }) // Return top N. upperBound := int(math.Min(float64(limit), float64(len(validatorEntries)))) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 74f8ea7a9..01ad5a747 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -264,8 +264,16 @@ func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64, snapsho } } // Sort the ValidatorEntries DESC by TotalStakeAmountNanos. - sort.Slice(validatorEntries, func(ii, jj int) bool { - return validatorEntries[ii].TotalStakeAmountNanos.Cmp(validatorEntries[jj].TotalStakeAmountNanos) > 0 + sort.SliceStable(validatorEntries, func(ii, jj int) bool { + stakeCmp := validatorEntries[ii].TotalStakeAmountNanos.Cmp(validatorEntries[jj].TotalStakeAmountNanos) + if stakeCmp == 0 { + // Use ValidatorPKID as a tie-breaker if equal TotalStakeAmountNanos. + return bytes.Compare( + validatorEntries[ii].ValidatorPKID.ToBytes(), + validatorEntries[jj].ValidatorPKID.ToBytes(), + ) > 0 + } + return stakeCmp > 0 }) // Return top N. upperBound := int(math.Min(float64(limit), float64(len(validatorEntries)))) From a143fdb3c5ccbe95b026396c148de7df8c0a1aa0 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 7 Jun 2023 10:51:04 -0400 Subject: [PATCH 095/762] Hard-code how many snapshots back to look. --- lib/block_view.go | 20 +- lib/block_view_stake_test.go | 2 +- lib/block_view_validator.go | 4 +- lib/block_view_validator_test.go | 4 +- lib/constants.go | 4 - lib/pos_epoch_complete_hook_test.go | 279 +++++++++++++++++----------- lib/pos_leader_schedule_test.go | 10 +- lib/pos_snapshot_entries.go | 101 ++++++---- 8 files changed, 266 insertions(+), 158 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 120e2d6a1..9fcbb9450 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3017,18 +3017,30 @@ func (bav *UtxoView) _connectUpdateGlobalParams( if blockHeight >= bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { var bytesRead int - if len(extraData[StakeLockupEpochDuration]) > 0 { - newGlobalParamsEntry.StakeLockupEpochDuration, bytesRead = Uvarint(extraData[StakeLockupEpochDuration]) + if len(extraData[StakeLockupEpochDuration.ToString()]) > 0 { + newGlobalParamsEntry.StakeLockupEpochDuration, bytesRead = Uvarint(extraData[StakeLockupEpochDuration.ToString()]) if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode StakeLockupEpochDuration as uint64") } } - if len(extraData[ValidatorJailEpochDuration]) > 0 { - newGlobalParamsEntry.ValidatorJailEpochDuration, bytesRead = Uvarint(extraData[ValidatorJailEpochDuration]) + if len(extraData[ValidatorJailEpochDuration.ToString()]) > 0 { + newGlobalParamsEntry.ValidatorJailEpochDuration, bytesRead = Uvarint(extraData[ValidatorJailEpochDuration.ToString()]) if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode ValidatorJailEpochDuration as uint64") } } + if len(extraData[LeaderScheduleMaxNumValidators.ToString()]) > 0 { + newGlobalParamsEntry.LeaderScheduleMaxNumValidators, bytesRead = Uvarint(extraData[LeaderScheduleMaxNumValidators.ToString()]) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode LeaderScheduleMaxNumValidators as uint64") + } + } + if len(extraData[EpochDurationNumBlocks.ToString()]) > 0 { + newGlobalParamsEntry.LeaderScheduleMaxNumValidators, bytesRead = Uvarint(extraData[EpochDurationNumBlocks.ToString()]) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode EpochDurationNumBlocks as uint64") + } + } } var newForbiddenPubKeyEntry *ForbiddenPubKeyEntry diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 7e4900a1f..d66496e7a 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -1820,7 +1820,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - map[string][]byte{StakeLockupEpochDuration: UintToBuf(3)}, + map[string][]byte{StakeLockupEpochDuration.ToString(): UintToBuf(3)}, ) } { diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 58069c08b..d77e79918 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -641,9 +641,9 @@ func DBGetGlobalActiveStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot) ( key := DBKeyForGlobalActiveStakeAmountNanos() globalActiveStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { - // We don't want to error if the key isn't found. Instead, return 0. + // We don't want to error if the key isn't found. Instead, return nil. if err == badger.ErrKeyNotFound { - return uint256.NewInt(), nil + return nil, nil } return nil, errors.Wrapf(err, "DBGetGlobalActiveStakeAmountNanosWithTxn: problem retrieving value") } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 7e95dfb0d..817c811ca 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -70,7 +70,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true _updateGlobalParamsEntryWithExtraData( testMeta, @@ -1558,7 +1558,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - map[string][]byte{ValidatorJailEpochDuration: UintToBuf(3)}, + map[string][]byte{ValidatorJailEpochDuration.ToString(): UintToBuf(3)}, ) } { diff --git a/lib/constants.go b/lib/constants.go index 248411173..7741090fd 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1312,10 +1312,6 @@ const ( MaxCopiesPerNFTKey = "MaxCopiesPerNFT" MaxNonceExpirationBlockHeightOffsetKey = "MaxNonceExpirationBlockHeightOffset" ForbiddenBlockSignaturePubKeyKey = "ForbiddenBlockSignaturePubKey" - StakeLockupEpochDuration = "StakeLockupEpochDuration" - ValidatorJailEpochDuration = "ValidatorJailEpochDuration" - LeaderScheduleMaxNumValidators = "LeaderScheduleMaxNumValidators" - EpochDurationNumBlocks = "EpochDurationNumBlocks" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 40eb00abe..ed9381795 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -66,12 +66,9 @@ func TestRunEpochCompleteHook(t *testing.T) { GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) - // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { - _, err = miner.MineAndProcessSingleBlock(0, mempool) + _, err := miner.MineAndProcessSingleBlock(0, mempool) require.NoError(t, err) } @@ -106,14 +103,13 @@ func TestRunEpochCompleteHook(t *testing.T) { m6PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m6PkBytes).PKID validatorPKIDs := []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} - // Seed a CurrentEpochEntry. - utxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 0, FinalBlockHeight: blockHeight}) - require.NoError(t, utxoView.FlushToDb(blockHeight)) - - // For these tests, we set each epoch duration to only one block. - params.DefaultEpochDurationNumBlocks = uint64(1) - // Helper utils + utxoView := func() *UtxoView { + newUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(t, err) + return newUtxoView + } + _registerAndStake := func(publicKey string, privateKey string, stakeAmountNanos uint64) { // Convert PublicKeyBase58Check to PublicKeyBytes. pkBytes, _, err := Base58CheckDecode(publicKey) @@ -141,104 +137,92 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) } - { - // ParamUpdater set min fee rate - params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( - testMeta, - testMeta.feeRateNanosPerKb, - paramUpdaterPub, - paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, - ) - // Reconstruct the UtxoView to pull in the updated GlobalParamsEntry. - utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) + _runOnEpochCompleteHook := func() { + tmpUtxoView := utxoView() + blockHeight += 1 + require.NoError(t, tmpUtxoView.RunEpochCompleteHook(blockHeight)) + require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) } - { - // Test the state of the snapshots prior to running our first OnEpochCompleteHook. - - // Test CurrentEpochNumber. - currentEpochNumber, err := utxoView.GetCurrentEpochNumber() - require.NoError(t, err) - require.Equal(t, currentEpochNumber, uint64(0)) - - // Test SnapshotGlobalParamsEntry is nil. - snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(0) - require.NoError(t, err) - require.Nil(t, snapshotGlobalParamsEntry) + _assertEmptyValidatorSnapshots := func() { // Test SnapshotValidatorByPKID is nil. for _, pkid := range validatorPKIDs { - snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 0) + snapshotValidatorEntry, err := utxoView().GetSnapshotValidatorByPKID(pkid) require.NoError(t, err) require.Nil(t, snapshotValidatorEntry) } // Test SnapshotTopActiveValidatorsByStake is empty. - validatorEntries, err := utxoView.GetSnapshotTopActiveValidatorsByStake(10, 0) + validatorEntries, err := utxoView().GetSnapshotTopActiveValidatorsByStake(10) require.NoError(t, err) require.Empty(t, validatorEntries) - // Test SnapshotGlobalActiveStakeAmountNanos is nil. - snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(0) + // Test SnapshotGlobalActiveStakeAmountNanos is zero. + snapshotGlobalActiveStakeAmountNanos, err := utxoView().GetSnapshotGlobalActiveStakeAmountNanos() require.NoError(t, err) - require.Nil(t, snapshotGlobalActiveStakeAmountNanos) + require.True(t, snapshotGlobalActiveStakeAmountNanos.IsZero()) // Test SnapshotLeaderSchedule is nil. for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint16(index), 0) + snapshotLeaderScheduleValidator, err := utxoView().GetSnapshotLeaderScheduleValidator(uint16(index)) require.NoError(t, err) require.Nil(t, snapshotLeaderScheduleValidator) } } + + // Seed a CurrentEpochEntry. + tmpUtxoView := utxoView() + tmpUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 0, FinalBlockHeight: blockHeight + 1}) + require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + + // For these tests, we set each epoch duration to only one block. + params.DefaultEpochDurationNumBlocks = uint64(1) + { - // Test RunOnEpochCompleteHook() with no validators or stakers. - require.NoError(t, utxoView.RunEpochCompleteHook(blockHeight)) - require.NoError(t, utxoView.FlushToDb(blockHeight)) + // ParamUpdater set MinFeeRateNanos and ValidatorJailEpochDuration. + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithExtraData( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + map[string][]byte{ValidatorJailEpochDuration.ToString(): UintToBuf(4)}, + ) } { - // Test the state of the snapshots after running our first OnEpochCompleteHook - // but with no existing validators or stakers. + // Test the state of the snapshots prior to running our first OnEpochCompleteHook. // Test CurrentEpochNumber. - currentEpochNumber, err := utxoView.GetCurrentEpochNumber() + currentEpochNumber, err := utxoView().GetCurrentEpochNumber() require.NoError(t, err) - require.Equal(t, currentEpochNumber, uint64(1)) + require.Equal(t, currentEpochNumber, uint64(0)) // Test SnapshotGlobalParamsEntry is nil. - snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(0) + snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() require.NoError(t, err) - require.NotNil(t, snapshotGlobalParamsEntry) - require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + require.Nil(t, snapshotGlobalParamsEntry) - // Test SnapshotValidatorByPKID is nil. - for _, pkid := range validatorPKIDs { - snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 0) - require.NoError(t, err) - require.Nil(t, snapshotValidatorEntry) - } + _assertEmptyValidatorSnapshots() + } + { + // Test RunOnEpochCompleteHook() with no validators or stakers. + _runOnEpochCompleteHook() + } + { + // Test the state of the snapshots after running our first OnEpochCompleteHook + // but with no existing validators or stakers. - // Test SnapshotTopActiveValidatorsByStake is empty. - validatorEntries, err := utxoView.GetSnapshotTopActiveValidatorsByStake(10, 0) + // Test CurrentEpochNumber. + currentEpochNumber, err := utxoView().GetCurrentEpochNumber() require.NoError(t, err) - require.Empty(t, validatorEntries) + require.Equal(t, currentEpochNumber, uint64(1)) - // Test SnapshotGlobalActiveStakeAmountNanos is zero. - snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(0) + // Test SnapshotGlobalParamsEntry is nil. + snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() require.NoError(t, err) - require.True(t, snapshotGlobalActiveStakeAmountNanos.IsZero()) + require.NotNil(t, snapshotGlobalParamsEntry) - // Test SnapshotLeaderSchedule is nil. - for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint16(index), 0) - require.NoError(t, err) - require.Nil(t, snapshotLeaderScheduleValidator) - } + _assertEmptyValidatorSnapshots() } { // All validators register + stake to themselves. @@ -250,39 +234,53 @@ func TestRunEpochCompleteHook(t *testing.T) { _registerAndStake(m5Pub, m5Priv, 600) _registerAndStake(m6Pub, m6Priv, 700) - validatorEntries, err := utxoView.GetTopActiveValidatorsByStake(10) + validatorEntries, err := utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) } { // Test RunOnEpochCompleteHook(). - require.NoError(t, utxoView.RunEpochCompleteHook(blockHeight+1)) - require.NoError(t, utxoView.FlushToDb(blockHeight)) + _runOnEpochCompleteHook() } { - // Test the state of the snapshots after running the second - // OnEpochCompleteHook with existing validators and stakers. - // Test CurrentEpochNumber. - currentEpochNumber, err := utxoView.GetCurrentEpochNumber() + currentEpochNumber, err := utxoView().GetCurrentEpochNumber() require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(2)) // Test SnapshotGlobalParamsEntry is populated. - snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(1) + snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + _assertEmptyValidatorSnapshots() + } + { + // Test RunOnEpochCompleteHook(). + _runOnEpochCompleteHook() + } + { + // Test CurrentEpochNumber. + currentEpochNumber, err := utxoView().GetCurrentEpochNumber() + require.NoError(t, err) + require.Equal(t, currentEpochNumber, uint64(3)) + + // Test SnapshotGlobalParamsEntry is populated. + snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() + require.NoError(t, err) + require.NotNil(t, snapshotGlobalParamsEntry) + //require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + // Test SnapshotValidatorByPKID is populated. for _, pkid := range validatorPKIDs { - snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 1) + snapshotValidatorEntry, err := utxoView().GetSnapshotValidatorByPKID(pkid) require.NoError(t, err) require.NotNil(t, snapshotValidatorEntry) } // Test SnapshotTopActiveValidatorsByStake is populated. - validatorEntries, err := utxoView.GetSnapshotTopActiveValidatorsByStake(10, 1) + validatorEntries, err := utxoView().GetSnapshotTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) require.Equal(t, validatorEntries[0].ValidatorPKID, m6PKID) @@ -291,45 +289,112 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, validatorEntries[6].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) // Test SnapshotGlobalActiveStakeAmountNanos is populated. - snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(1) + snapshotGlobalActiveStakeAmountNanos, err := utxoView().GetSnapshotGlobalActiveStakeAmountNanos() require.NoError(t, err) require.Equal(t, snapshotGlobalActiveStakeAmountNanos, uint256.NewInt().SetUint64(2800)) // Test SnapshotLeaderSchedule is populated. for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint16(index), 1) + snapshotLeaderScheduleValidator, err := utxoView().GetSnapshotLeaderScheduleValidator(uint16(index)) require.NoError(t, err) require.NotNil(t, snapshotLeaderScheduleValidator) } } { - // Test SnapshotGlobalParamsEntry for a future epoch is nil. - snapshotGlobalParamsEntry, err := utxoView.GetSnapshotGlobalParamsEntry(2) + // Test snapshotting changing stake. + + // m5 has 600 staked. + validatorEntry, err := utxoView().GetValidatorByPKID(m5PKID) require.NoError(t, err) - require.Nil(t, snapshotGlobalParamsEntry) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(600)) - // Test SnapshotValidatorByPKID for a future epoch is nil. - for _, pkid := range validatorPKIDs { - snapshotValidatorEntry, err := utxoView.GetSnapshotValidatorByPKID(pkid, 2) - require.NoError(t, err) - require.Nil(t, snapshotValidatorEntry) - } + // m5 stakes another 200. + _registerAndStake(m5Pub, m5Priv, 200) - // Test SnapshotTopActiveValidatorsByStake for a future epoch is empty. - validatorEntries, err := utxoView.GetSnapshotTopActiveValidatorsByStake(10, 2) + // m5 has 800 staked. + validatorEntry, err = utxoView().GetValidatorByPKID(m5PKID) require.NoError(t, err) - require.Empty(t, validatorEntries) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() - // Test SnapshotGlobalActiveStakeAmountNanos for a future epoch is nil. - snapshotGlobalActiveStakeAmountNanos, err := utxoView.GetSnapshotGlobalActiveStakeAmountNanos(2) + // Snapshot m5 still has 600 staked. + validatorEntry, err = utxoView().GetSnapshotValidatorByPKID(m5PKID) require.NoError(t, err) - require.Nil(t, snapshotGlobalActiveStakeAmountNanos) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(600)) - // Test SnapshotLeaderSchedule for a future epoch is nil. - for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView.GetSnapshotLeaderScheduleValidator(uint16(index), 2) - require.NoError(t, err) - require.Nil(t, snapshotLeaderScheduleValidator) - } + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // Snapshot m5 now has 800 staked. + validatorEntry, err = utxoView().GetSnapshotValidatorByPKID(m5PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) + } + { + // Test snapshotting changing GlobalParams. + + // Update StakeLockupEpochDuration from default of 3 to 2. + stakeLockupEpochDuration, err := utxoView().GetSnapshotGlobalParam(StakeLockupEpochDuration) + require.NoError(t, err) + require.Equal(t, stakeLockupEpochDuration, uint64(3)) + + _updateGlobalParamsEntryWithExtraData( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + map[string][]byte{StakeLockupEpochDuration.ToString(): UintToBuf(2)}, + ) + + require.Equal(t, utxoView().GlobalParamsEntry.StakeLockupEpochDuration, uint64(2)) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // Snapshot StakeLockupEpochDuration is still 3. + stakeLockupEpochDuration, err = utxoView().GetSnapshotGlobalParam(StakeLockupEpochDuration) + require.NoError(t, err) + require.Equal(t, stakeLockupEpochDuration, uint64(3)) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // Snapshot StakeLockupEpochDuration is updated to 2. + stakeLockupEpochDuration, err = utxoView().GetSnapshotGlobalParam(StakeLockupEpochDuration) + require.NoError(t, err) + require.Equal(t, stakeLockupEpochDuration, uint64(2)) + } + { + // Test snapshotting changing validator set. + + // m0 unregisters as a validator. + snapshotValidatorEntries, err := utxoView().GetSnapshotTopActiveValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, snapshotValidatorEntries, 7) + + _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, true) + require.NoError(t, err) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // m0 is still in the snapshot validator set. + snapshotValidatorEntries, err = utxoView().GetSnapshotTopActiveValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, snapshotValidatorEntries, 7) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // m0 is dropped from the snapshot validator set. + snapshotValidatorEntries, err = utxoView().GetSnapshotTopActiveValidatorsByStake(10) + require.NoError(t, err) + require.Len(t, snapshotValidatorEntries, 6) } } diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index f59fd6150..a84d7397d 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -124,18 +124,14 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) { - // ParamUpdater set min fee rate + // ParamUpdater set MinFeeRateNanos. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - _updateGlobalParamsEntryWithTestMeta( + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - -1, - int64(testMeta.feeRateNanosPerKb), - -1, - -1, - -1, + map[string][]byte{}, ) } { diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 01ad5a747..8f4006981 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -11,34 +11,46 @@ import ( "sort" ) +const SnapshotLookbackNumEpochs uint64 = 2 + +func (bav *UtxoView) GetSnapshotEpochNumber() (uint64, error) { + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return 0, errors.Wrapf(err, "GetSnapshotEpochNumber: problem retrieving CurrentEpochNumber: ") + } + if currentEpochNumber < SnapshotLookbackNumEpochs { + return 0, nil + } + return SafeUint64().Sub(currentEpochNumber, SnapshotLookbackNumEpochs) +} + // // SnapshotGlobalParamsEntry // -const SnapshotLookbackNumEpochs uint64 = 2 +type SnapshotGlobalParam string -func (bav *UtxoView) GetSnapshotGlobalParam(field string) (uint64, error) { - // Retrieve the CurrentEpochEntry. - currentEpochEntry, err := bav.GetCurrentEpochEntry() +const ( + StakeLockupEpochDuration SnapshotGlobalParam = "StakeLockupEpochDuration" + ValidatorJailEpochDuration SnapshotGlobalParam = "ValidatorJailEpochDuration" + LeaderScheduleMaxNumValidators SnapshotGlobalParam = "LeaderScheduleMaxNumValidators" + EpochDurationNumBlocks SnapshotGlobalParam = "EpochDurationNumBlocks" +) + +func (snapshotGlobalParam SnapshotGlobalParam) ToString() string { + return string(snapshotGlobalParam) +} + +func (bav *UtxoView) GetSnapshotGlobalParam(field SnapshotGlobalParam) (uint64, error) { + // Retrieve the SnapshotGlobalParamsEntry. + globalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { - return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: problem retrieving CurrentEpochEntry: ") + return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: problem retrieving SnapshotGlobalParamsEntry: ") } - - // Calculate the SnapshotAtEpochNumber from which to retrieve the SnapshotGlobalParamsEntry. - // If the SnapshotAtEpochNumber < 0, then skip and just use the default value. - globalParamsEntry := &GlobalParamsEntry{} - if currentEpochEntry.EpochNumber >= SnapshotLookbackNumEpochs { - snapshotAtEpochNumber, err := SafeUint64().Sub(currentEpochEntry.EpochNumber, SnapshotLookbackNumEpochs) - if err != nil { - return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: problem calculating SnapshotAtEpochNumber: ") - } - globalParamsEntry, err = bav.GetSnapshotGlobalParamsEntry(snapshotAtEpochNumber) - if err != nil { - return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: problem retrieving SnapshotGlobalParamsEntry: ") - } - if globalParamsEntry == nil { - return 0, errors.New("GetSnapshotGlobalParam: SnapshotGlobalParamsEntry is nil") - } + if globalParamsEntry == nil { + // We will use hte default values below. + globalParamsEntry = &GlobalParamsEntry{} } // Return the corresponding field. Either the updated value if @@ -72,7 +84,12 @@ func (bav *UtxoView) GetSnapshotGlobalParam(field string) (uint64, error) { return 0, fmt.Errorf("GetSnapshotGlobalParam: invalid field provided: %s", field) } -func (bav *UtxoView) GetSnapshotGlobalParamsEntry(snapshotAtEpochNumber uint64) (*GlobalParamsEntry, error) { +func (bav *UtxoView) GetSnapshotGlobalParamsEntry() (*GlobalParamsEntry, error) { + // Calculate the SnapshotEpochNumber. + snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + if err != nil { + return nil, errors.Wrapf(err, "GetSnapshotGlobalParamsEntry: problem calculating SnapshotEpochNumber: ") + } // Check the UtxoView first. if globalParamsEntry, exists := bav.SnapshotGlobalParamEntries[snapshotAtEpochNumber]; exists { return globalParamsEntry, nil @@ -204,7 +221,12 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) err return nil } -func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { +func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID) (*ValidatorEntry, error) { + // Calculate the SnapshotEpochNumber. + snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + if err != nil { + return nil, errors.Wrapf(err, "GetSnapshotValidatorByPKID: problem calculating SnapshotEpochNumber: ") + } // Check the UtxoView first. mapKey := SnapshotValidatorMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *pkid} if validatorEntry, exists := bav.SnapshotValidatorEntries[mapKey]; exists { @@ -225,7 +247,13 @@ func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID, snapshotAtEpochNumbe return validatorEntry, nil } -func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64, snapshotAtEpochNumber uint64) ([]*ValidatorEntry, error) { +func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64) ([]*ValidatorEntry, error) { + // Calculate the SnapshotEpochNumber. + snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + if err != nil { + return nil, errors.Wrapf(err, "GetSnapshotTopActiveValidatorsByStake: problem calculating SnapshotEpochNumber: ") + } + // Create a slice of all UtxoView ValidatorEntries to prevent pulling them from the db. var utxoViewValidatorEntries []*ValidatorEntry for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber { @@ -250,7 +278,7 @@ func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64, snapsho SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID, } if _, exists := bav.SnapshotValidatorEntries[mapKey]; !exists { - bav._setValidatorEntryMappings(validatorEntry) + bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) } } // Pull !isDeleted, active ValidatorEntries from the UtxoView with stake > 0. @@ -475,7 +503,12 @@ func DBEnumerateAllCurrentValidators(handle *badger.DB, pkidsToSkip []*PKID) ([] // SnapshotGlobalActiveStakeAmountNanos // -func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber uint64) (*uint256.Int, error) { +func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos() (*uint256.Int, error) { + // Calculate the SnapshotEpochNumber. + snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + if err != nil { + return nil, errors.Wrapf(err, "GetSnapshotGlobalActiveStakeAmountNanos: problem calculating SnapshotEpochNumber: ") + } // Check the UtxoView first. if globalActiveStakeAmountNanos, exists := bav.SnapshotGlobalActiveStakeAmountNanos[snapshotAtEpochNumber]; exists { return globalActiveStakeAmountNanos.Clone(), nil @@ -488,10 +521,11 @@ func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumb "GetSnapshotGlobalActiveStakeAmountNanos: problem retrieving SnapshotGlobalActiveStakeAmountNanos from db: ", ) } - if globalActiveStakeAmountNanos != nil { - // Cache the result in the UtxoView. - bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, snapshotAtEpochNumber) + if globalActiveStakeAmountNanos == nil { + globalActiveStakeAmountNanos = uint256.NewInt() } + // Cache the result in the UtxoView. + bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, snapshotAtEpochNumber) return globalActiveStakeAmountNanos, nil } @@ -586,11 +620,16 @@ type SnapshotLeaderScheduleMapKey struct { LeaderIndex uint16 } -func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint16, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { +func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint16) (*ValidatorEntry, error) { + // Calculate the SnapshotEpochNumber. + snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + if err != nil { + return nil, errors.Wrapf(err, " GetSnapshotLeaderScheduleValidator: problem calculating SnapshotEpochNumber: ") + } // First, check the UtxoView. mapKey := SnapshotLeaderScheduleMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, LeaderIndex: leaderIndex} if validatorPKID, exists := bav.SnapshotLeaderSchedule[mapKey]; exists { - return bav.GetSnapshotValidatorByPKID(validatorPKID, snapshotAtEpochNumber) + return bav.GetSnapshotValidatorByPKID(validatorPKID) } // Next, check the db. validatorEntry, err := DBGetSnapshotLeaderScheduleValidator(bav.Handle, bav.Snapshot, leaderIndex, snapshotAtEpochNumber) From 3b894f8beecd696ca18bb58379901fa3db227b92 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 7 Jun 2023 13:59:43 -0400 Subject: [PATCH 096/762] Safely add uint64s. --- lib/block_view_stake.go | 8 +++++++- lib/block_view_validator.go | 8 +++++++- lib/pos_epoch_complete_hook.go | 20 +++++++++++++------- lib/pos_epoch_complete_hook_test.go | 18 +++++++++++++++++- lib/pos_snapshot_entries.go | 2 ++ 5 files changed, 46 insertions(+), 10 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index fca40f504..bfd340407 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1960,8 +1960,14 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving snapshot StakeLockupEpochDuration: ") } + // Calculate UnlockableAtEpochNumber. + unlockableAtEpochNumber, err := SafeUint64().Add(metadata.EndEpochNumber, stakeLockupEpochDuration) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error calculating UnlockableAtEpochNumber: ") + } + // Validate EndEpochNumber + StakeLockupEpochDuration <= CurrentEpochNumber. - if metadata.EndEpochNumber+stakeLockupEpochDuration > currentEpochNumber { + if unlockableAtEpochNumber > currentEpochNumber { return errors.Wrapf(RuleErrorInvalidUnlockStakeMustWaitLockupDuration, "UtxoView.IsValidUnlockStakeMetadata: ") } diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index d77e79918..7e5f4369c 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1719,8 +1719,14 @@ func (bav *UtxoView) IsValidUnjailValidatorMetadata(transactorPublicKey []byte) return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving snapshot ValidatorJailEpochDuration: ") } + // Calculate UnjailableAtEpochNumber. + unjailableAtEpochNumber, err := SafeUint64().Add(validatorEntry.JailedAtEpochNumber, validatorJailEpochDuration) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error calculating UnjailableAtEpochNumber: ") + } + // Validate sufficient epochs have elapsed for validator to be unjailed. - if validatorEntry.JailedAtEpochNumber+validatorJailEpochDuration > currentEpochNumber { + if unjailableAtEpochNumber > currentEpochNumber { return errors.Wrapf(RuleErrorUnjailingValidatorTooEarly, "UtxoView.IsValidUnjailValidatorMetadata: ") } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 30157798a..86aafa006 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -47,12 +47,6 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { return errors.New("RunEpochCompleteHook: CurrentEpochEntry is nil, this should never happen") } - // Retrieve the SnapshotGlobalParam: EpochDurationNumBlocks. - epochDurationNumBlocks, err := bav.GetSnapshotGlobalParam(EpochDurationNumBlocks) - if err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving snapshot EpochDurationNumBlocks: ") - } - // Snapshot the current GlobalParamsEntry. bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) @@ -83,10 +77,22 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // TODO: Jail inactive validators. // TODO: Delete old snapshots that are no longer used. + // Retrieve the SnapshotGlobalParam: EpochDurationNumBlocks. + epochDurationNumBlocks, err := bav.GetSnapshotGlobalParam(EpochDurationNumBlocks) + if err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving snapshot EpochDurationNumBlocks: ") + } + + // Calculate the NextEpochFinalBlockHeight. + nextEpochFinalBlockHeight, err := SafeUint64().Add(blockHeight, epochDurationNumBlocks) + if err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: problem calculating NextEpochFinalBlockHeight: ") + } + // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ EpochNumber: currentEpochEntry.EpochNumber + 1, - FinalBlockHeight: blockHeight + epochDurationNumBlocks, + FinalBlockHeight: nextEpochFinalBlockHeight, } bav._setCurrentEpochEntry(nextEpochEntry) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index ed9381795..194b1f5c3 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -181,6 +181,9 @@ func TestRunEpochCompleteHook(t *testing.T) { { // ParamUpdater set MinFeeRateNanos and ValidatorJailEpochDuration. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + require.Zero(t, utxoView().GlobalParamsEntry.MinimumNetworkFeeNanosPerKB) + require.Zero(t, utxoView().GlobalParamsEntry.ValidatorJailEpochDuration) + _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, @@ -188,6 +191,15 @@ func TestRunEpochCompleteHook(t *testing.T) { paramUpdaterPriv, map[string][]byte{ValidatorJailEpochDuration.ToString(): UintToBuf(4)}, ) + + require.Equal(t, utxoView().GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + require.Equal(t, utxoView().GlobalParamsEntry.ValidatorJailEpochDuration, uint64(4)) + + // We need to reset the UniversalUtxoView since the RegisterAsValidator and Stake + // txn test helper utils use and flush the UniversalUtxoView. Otherwise, the + // updated GlobalParamsEntry will be overwritten by the default one cached in + // the UniversalUtxoView when it is flushed. + mempool.universalUtxoView._ResetViewMappingsAfterFlush() } { // Test the state of the snapshots prior to running our first OnEpochCompleteHook. @@ -221,6 +233,8 @@ func TestRunEpochCompleteHook(t *testing.T) { snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) + require.Equal(t, utxoView().GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(4)) _assertEmptyValidatorSnapshots() } @@ -253,6 +267,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(4)) _assertEmptyValidatorSnapshots() } @@ -270,7 +285,8 @@ func TestRunEpochCompleteHook(t *testing.T) { snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) - //require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(4)) // Test SnapshotValidatorByPKID is populated. for _, pkid := range validatorPKIDs { diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 8f4006981..228bbd0da 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -112,6 +112,7 @@ func (bav *UtxoView) GetSnapshotGlobalParamsEntry() (*GlobalParamsEntry, error) func (bav *UtxoView) _setSnapshotGlobalParamsEntry(globalParamsEntry *GlobalParamsEntry, snapshotAtEpochNumber uint64) { if globalParamsEntry == nil { glog.Errorf("_setSnapshotGlobalParamsEntry: called with nil entry, this should never happen") + return } bav.SnapshotGlobalParamEntries[snapshotAtEpochNumber] = globalParamsEntry.Copy() } @@ -532,6 +533,7 @@ func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos() (*uint256.Int, er func (bav *UtxoView) _setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int, snapshotAtEpochNumber uint64) { if globalActiveStakeAmountNanos == nil { glog.Errorf("_setSnapshotGlobalActiveStakeAmountNanos: called with nil entry, this should never happen") + return } bav.SnapshotGlobalActiveStakeAmountNanos[snapshotAtEpochNumber] = globalActiveStakeAmountNanos.Clone() } From e6bc498adb1735f01d94dbd5c4950dc807511525 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 7 Jun 2023 14:01:37 -0400 Subject: [PATCH 097/762] Retrigger CI. From 4097260615872f53bccc0811e4144b720cbb86df Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 7 Jun 2023 14:38:57 -0400 Subject: [PATCH 098/762] Add jailing validator logic to OnEpochCompleteHook.a --- lib/block_view_stake.go | 2 +- lib/block_view_types.go | 11 ++++++++ lib/block_view_validator.go | 32 ++++++++++++++++++++- lib/constants.go | 11 ++++++++ lib/pos_epoch_complete_hook.go | 6 ++-- lib/pos_snapshot_entries.go | 51 ++++++++++++++++++++++++++++++---- 6 files changed, 104 insertions(+), 9 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index bfd340407..ef24e56e2 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1954,7 +1954,7 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving CurrentEpochNumber: ") } - // Retrieve SnapshotGlobalParam: StakeLockupEpochDuration. + // Retrieve the SnapshotGlobalParam: StakeLockupEpochDuration. stakeLockupEpochDuration, err := bav.GetSnapshotGlobalParam(StakeLockupEpochDuration) if err != nil { return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving snapshot StakeLockupEpochDuration: ") diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 3f178aafe..82ec9e9e1 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3787,6 +3787,11 @@ type GlobalParamsEntry struct { // EpochDurationNumBlocks is the number of blocks included in one epoch. EpochDurationNumBlocks uint64 + + // JailInactiveValidatorEpochThreshold is the number of epochs we allow + // a validator to be inactive for (neither voting nor proposing blocks) + // before they are jailed. + JailInactiveValidatorEpochThreshold uint64 } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { @@ -3801,6 +3806,7 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { ValidatorJailEpochDuration: gp.ValidatorJailEpochDuration, LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, EpochDurationNumBlocks: gp.EpochDurationNumBlocks, + JailInactiveValidatorEpochThreshold: gp.JailInactiveValidatorEpochThreshold, } } @@ -3820,6 +3826,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.ValidatorJailEpochDuration)...) data = append(data, UintToBuf(gp.LeaderScheduleMaxNumValidators)...) data = append(data, UintToBuf(gp.EpochDurationNumBlocks)...) + data = append(data, UintToBuf(gp.JailInactiveValidatorEpochThreshold)...) } return data } @@ -3870,6 +3877,10 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading EpochDurationNumBlocks: ") } + gp.JailInactiveValidatorEpochThreshold, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading JailInactiveValidatorEpochThreshold: ") + } } return nil } diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 7e5f4369c..cdba45fd2 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1713,7 +1713,7 @@ func (bav *UtxoView) IsValidUnjailValidatorMetadata(transactorPublicKey []byte) return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving CurrentEpochNumber: ") } - // Retrieve SnapshotGlobalParam: ValidatorJailEpochDuration. + // Retrieve the SnapshotGlobalParam: ValidatorJailEpochDuration. validatorJailEpochDuration, err := bav.GetSnapshotGlobalParam(ValidatorJailEpochDuration) if err != nil { return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving snapshot ValidatorJailEpochDuration: ") @@ -1924,6 +1924,36 @@ func (bav *UtxoView) GetGlobalActiveStakeAmountNanos() (*uint256.Int, error) { return globalActiveStakeAmountNanos, nil } +func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry) (bool, error) { + // Return false if the validator is already jailed. We do not want to jail + // them again. And we want to retain their original JailedAtEpochNumber so + // that they can eventually unjail themselves. + if validatorEntry.Status() == ValidatorStatusJailed { + return false, nil + } + + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving CurrentEpochNumber: ") + } + + // Retrieve the SnapshotGlobalParam: JailInactiveValidatorEpochThreshold. + jailInactiveValidatorEpochThreshold, err := bav.GetSnapshotGlobalParam(JailInactiveValidatorEpochThreshold) + if err != nil { + return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving JailInactiveValidatorEpochThreshold: ") + } + + // Calculate JailAtEpochNumber. + jailAtEpochNumber, err := SafeUint64().Add(validatorEntry.LastActiveAtEpochNumber, jailInactiveValidatorEpochThreshold) + if err != nil { + return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error calculating JailAtEpochNumber: ") + } + + // Return true if LastActiveAtEpochNumber + JailInactiveValidatorEpochThreshold <= CurrentEpochNumber. + return jailAtEpochNumber <= currentEpochNumber, nil +} + func (bav *UtxoView) JailValidator(validatorEntry *ValidatorEntry) error { // Retrieve the CurrentEpochNumber. currentEpochNumber, err := bav.GetCurrentEpochNumber() diff --git a/lib/constants.go b/lib/constants.go index 7741090fd..c8cae81bd 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -630,6 +630,11 @@ type DeSoParams struct { // DefaultEpochDurationNumBlocks is the default number of blocks included in one epoch. DefaultEpochDurationNumBlocks uint64 + // DefaultJailInactiveValidatorEpochThreshold is the default number of epochs + // we allow a validator to be inactive for (neither voting nor proposing blocks) + // before they are jailed. + DefaultJailInactiveValidatorEpochThreshold uint64 + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -1022,6 +1027,9 @@ var DeSoMainnetParams = DeSoParams{ // The number of blocks in one epoch DefaultEpochDurationNumBlocks: uint64(3600), + // The number of epochs before an inactive validator is jailed + DefaultJailInactiveValidatorEpochThreshold: uint64(48), + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1268,6 +1276,9 @@ var DeSoTestnetParams = DeSoParams{ // The number of blocks in one epoch DefaultEpochDurationNumBlocks: uint64(3600), + // The number of epochs before an inactive validator is jailed + DefaultJailInactiveValidatorEpochThreshold: uint64(48), + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 86aafa006..1d80fe938 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -50,7 +50,10 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Snapshot the current GlobalParamsEntry. bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) - // Snapshot the current ValidatorEntries. + // Snapshot the current ValidatorEntries. This loops through all validators to snapshot them, O(N). + // To save on runtime, we also check if we should jail each validator and jail them if so. + // We optionally jail a validator after we snapshot them. A jailed validator should be considered jailed + // in the new epoch starting after this OnEpochCompleteHook, and not the previous epoch which is snapshot. if err = bav.SnapshotCurrentValidators(currentEpochEntry.EpochNumber); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: problem snapshotting validators: ") } @@ -74,7 +77,6 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { bav._setSnapshotLeaderScheduleValidator(validatorPKID, uint16(index), currentEpochEntry.EpochNumber) } - // TODO: Jail inactive validators. // TODO: Delete old snapshots that are no longer used. // Retrieve the SnapshotGlobalParam: EpochDurationNumBlocks. diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 228bbd0da..1e1e6a4ac 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -32,10 +32,11 @@ func (bav *UtxoView) GetSnapshotEpochNumber() (uint64, error) { type SnapshotGlobalParam string const ( - StakeLockupEpochDuration SnapshotGlobalParam = "StakeLockupEpochDuration" - ValidatorJailEpochDuration SnapshotGlobalParam = "ValidatorJailEpochDuration" - LeaderScheduleMaxNumValidators SnapshotGlobalParam = "LeaderScheduleMaxNumValidators" - EpochDurationNumBlocks SnapshotGlobalParam = "EpochDurationNumBlocks" + StakeLockupEpochDuration SnapshotGlobalParam = "StakeLockupEpochDuration" + ValidatorJailEpochDuration SnapshotGlobalParam = "ValidatorJailEpochDuration" + LeaderScheduleMaxNumValidators SnapshotGlobalParam = "LeaderScheduleMaxNumValidators" + EpochDurationNumBlocks SnapshotGlobalParam = "EpochDurationNumBlocks" + JailInactiveValidatorEpochThreshold SnapshotGlobalParam = "JailInactiveValidatorEpochThreshold" ) func (snapshotGlobalParam SnapshotGlobalParam) ToString() string { @@ -49,7 +50,7 @@ func (bav *UtxoView) GetSnapshotGlobalParam(field SnapshotGlobalParam) (uint64, return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: problem retrieving SnapshotGlobalParamsEntry: ") } if globalParamsEntry == nil { - // We will use hte default values below. + // We will use the default values below. globalParamsEntry = &GlobalParamsEntry{} } @@ -79,6 +80,12 @@ func (bav *UtxoView) GetSnapshotGlobalParam(field SnapshotGlobalParam) (uint64, return globalParamsEntry.EpochDurationNumBlocks, nil } return bav.Params.DefaultEpochDurationNumBlocks, nil + + case JailInactiveValidatorEpochThreshold: + if globalParamsEntry.JailInactiveValidatorEpochThreshold != 0 { + return globalParamsEntry.JailInactiveValidatorEpochThreshold, nil + } + return bav.Params.DefaultJailInactiveValidatorEpochThreshold, nil } return 0, fmt.Errorf("GetSnapshotGlobalParam: invalid field provided: %s", field) @@ -207,6 +214,23 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) err if !validatorEntry.isDeleted { // We only want to snapshot !isDeleted ValidatorEntries. bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) + + // Check if we should jail the validator. + shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry) + if err != nil { + return errors.Wrapf( + err, "SnapshotValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, + ) + } + // Jail them if so. + if shouldJailValidator { + err = bav.JailValidator(validatorEntry) + if err != nil { + return errors.Wrapf( + err, "SnapshotValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, + ) + } + } } // We don't want to retrieve any ValidatorEntries from the db that are present in the UtxoView. utxoViewValidatorPKIDs = append(utxoViewValidatorPKIDs, validatorEntry.ValidatorPKID) @@ -218,6 +242,23 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) err } for _, validatorEntry := range dbValidatorEntries { bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) + + // Check if we should jail the validator. + shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry) + if err != nil { + return errors.Wrapf( + err, "SnapshotValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, + ) + } + // Jail them if so. + if shouldJailValidator { + err = bav.JailValidator(validatorEntry) + if err != nil { + return errors.Wrapf( + err, "SnapshotValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, + ) + } + } } return nil } From 1f1e6daa5ad8ce549d4b54f01fd1bca71948234d Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 7 Jun 2023 14:47:05 -0400 Subject: [PATCH 099/762] Rename JailInactiveValidatorGracePeriodEpochs. --- lib/block_view_types.go | 36 ++++++++++++++++++------------------ lib/block_view_validator.go | 10 +++++----- lib/constants.go | 8 ++++---- lib/pos_snapshot_entries.go | 28 ++++++++++++++-------------- 4 files changed, 41 insertions(+), 41 deletions(-) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 82ec9e9e1..52f8a29a1 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3788,25 +3788,25 @@ type GlobalParamsEntry struct { // EpochDurationNumBlocks is the number of blocks included in one epoch. EpochDurationNumBlocks uint64 - // JailInactiveValidatorEpochThreshold is the number of epochs we allow - // a validator to be inactive for (neither voting nor proposing blocks) - // before they are jailed. - JailInactiveValidatorEpochThreshold uint64 + // JailInactiveValidatorGracePeriodEpochs is the number of epochs we + // allow a validator to be inactive for (neither voting nor proposing + // blocks) before they are jailed. + JailInactiveValidatorGracePeriodEpochs uint64 } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { return &GlobalParamsEntry{ - USDCentsPerBitcoin: gp.USDCentsPerBitcoin, - CreateProfileFeeNanos: gp.CreateProfileFeeNanos, - CreateNFTFeeNanos: gp.CreateNFTFeeNanos, - MaxCopiesPerNFT: gp.MaxCopiesPerNFT, - MinimumNetworkFeeNanosPerKB: gp.MinimumNetworkFeeNanosPerKB, - MaxNonceExpirationBlockHeightOffset: gp.MaxNonceExpirationBlockHeightOffset, - StakeLockupEpochDuration: gp.StakeLockupEpochDuration, - ValidatorJailEpochDuration: gp.ValidatorJailEpochDuration, - LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, - EpochDurationNumBlocks: gp.EpochDurationNumBlocks, - JailInactiveValidatorEpochThreshold: gp.JailInactiveValidatorEpochThreshold, + USDCentsPerBitcoin: gp.USDCentsPerBitcoin, + CreateProfileFeeNanos: gp.CreateProfileFeeNanos, + CreateNFTFeeNanos: gp.CreateNFTFeeNanos, + MaxCopiesPerNFT: gp.MaxCopiesPerNFT, + MinimumNetworkFeeNanosPerKB: gp.MinimumNetworkFeeNanosPerKB, + MaxNonceExpirationBlockHeightOffset: gp.MaxNonceExpirationBlockHeightOffset, + StakeLockupEpochDuration: gp.StakeLockupEpochDuration, + ValidatorJailEpochDuration: gp.ValidatorJailEpochDuration, + LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, + EpochDurationNumBlocks: gp.EpochDurationNumBlocks, + JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, } } @@ -3826,7 +3826,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.ValidatorJailEpochDuration)...) data = append(data, UintToBuf(gp.LeaderScheduleMaxNumValidators)...) data = append(data, UintToBuf(gp.EpochDurationNumBlocks)...) - data = append(data, UintToBuf(gp.JailInactiveValidatorEpochThreshold)...) + data = append(data, UintToBuf(gp.JailInactiveValidatorGracePeriodEpochs)...) } return data } @@ -3877,9 +3877,9 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading EpochDurationNumBlocks: ") } - gp.JailInactiveValidatorEpochThreshold, err = ReadUvarint(rr) + gp.JailInactiveValidatorGracePeriodEpochs, err = ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading JailInactiveValidatorEpochThreshold: ") + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading JailInactiveValidatorGracePeriodEpochs: ") } } return nil diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index cdba45fd2..193a103f0 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1938,19 +1938,19 @@ func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry) (bool, return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving CurrentEpochNumber: ") } - // Retrieve the SnapshotGlobalParam: JailInactiveValidatorEpochThreshold. - jailInactiveValidatorEpochThreshold, err := bav.GetSnapshotGlobalParam(JailInactiveValidatorEpochThreshold) + // Retrieve the SnapshotGlobalParam: JailInactiveValidatorGracePeriodEpochs. + jailInactiveValidatorGracePeriodEpochs, err := bav.GetSnapshotGlobalParam(JailInactiveValidatorGracePeriodEpochs) if err != nil { - return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving JailInactiveValidatorEpochThreshold: ") + return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving JailInactiveValidatorGracePeriodEpochs: ") } // Calculate JailAtEpochNumber. - jailAtEpochNumber, err := SafeUint64().Add(validatorEntry.LastActiveAtEpochNumber, jailInactiveValidatorEpochThreshold) + jailAtEpochNumber, err := SafeUint64().Add(validatorEntry.LastActiveAtEpochNumber, jailInactiveValidatorGracePeriodEpochs) if err != nil { return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error calculating JailAtEpochNumber: ") } - // Return true if LastActiveAtEpochNumber + JailInactiveValidatorEpochThreshold <= CurrentEpochNumber. + // Return true if LastActiveAtEpochNumber + JailInactiveValidatorGracePeriodEpochs <= CurrentEpochNumber. return jailAtEpochNumber <= currentEpochNumber, nil } diff --git a/lib/constants.go b/lib/constants.go index c8cae81bd..44f38127a 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -630,10 +630,10 @@ type DeSoParams struct { // DefaultEpochDurationNumBlocks is the default number of blocks included in one epoch. DefaultEpochDurationNumBlocks uint64 - // DefaultJailInactiveValidatorEpochThreshold is the default number of epochs + // DefaultJailInactiveValidatorGracePeriodEpochs is the default number of epochs // we allow a validator to be inactive for (neither voting nor proposing blocks) // before they are jailed. - DefaultJailInactiveValidatorEpochThreshold uint64 + DefaultJailInactiveValidatorGracePeriodEpochs uint64 ForkHeights ForkHeights @@ -1028,7 +1028,7 @@ var DeSoMainnetParams = DeSoParams{ DefaultEpochDurationNumBlocks: uint64(3600), // The number of epochs before an inactive validator is jailed - DefaultJailInactiveValidatorEpochThreshold: uint64(48), + DefaultJailInactiveValidatorGracePeriodEpochs: uint64(48), ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), @@ -1277,7 +1277,7 @@ var DeSoTestnetParams = DeSoParams{ DefaultEpochDurationNumBlocks: uint64(3600), // The number of epochs before an inactive validator is jailed - DefaultJailInactiveValidatorEpochThreshold: uint64(48), + DefaultJailInactiveValidatorGracePeriodEpochs: uint64(48), ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 1e1e6a4ac..3fef0e0db 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -32,11 +32,11 @@ func (bav *UtxoView) GetSnapshotEpochNumber() (uint64, error) { type SnapshotGlobalParam string const ( - StakeLockupEpochDuration SnapshotGlobalParam = "StakeLockupEpochDuration" - ValidatorJailEpochDuration SnapshotGlobalParam = "ValidatorJailEpochDuration" - LeaderScheduleMaxNumValidators SnapshotGlobalParam = "LeaderScheduleMaxNumValidators" - EpochDurationNumBlocks SnapshotGlobalParam = "EpochDurationNumBlocks" - JailInactiveValidatorEpochThreshold SnapshotGlobalParam = "JailInactiveValidatorEpochThreshold" + StakeLockupEpochDuration SnapshotGlobalParam = "StakeLockupEpochDuration" + ValidatorJailEpochDuration SnapshotGlobalParam = "ValidatorJailEpochDuration" + LeaderScheduleMaxNumValidators SnapshotGlobalParam = "LeaderScheduleMaxNumValidators" + EpochDurationNumBlocks SnapshotGlobalParam = "EpochDurationNumBlocks" + JailInactiveValidatorGracePeriodEpochs SnapshotGlobalParam = "JailInactiveValidatorGracePeriodEpochs" ) func (snapshotGlobalParam SnapshotGlobalParam) ToString() string { @@ -81,11 +81,11 @@ func (bav *UtxoView) GetSnapshotGlobalParam(field SnapshotGlobalParam) (uint64, } return bav.Params.DefaultEpochDurationNumBlocks, nil - case JailInactiveValidatorEpochThreshold: - if globalParamsEntry.JailInactiveValidatorEpochThreshold != 0 { - return globalParamsEntry.JailInactiveValidatorEpochThreshold, nil + case JailInactiveValidatorGracePeriodEpochs: + if globalParamsEntry.JailInactiveValidatorGracePeriodEpochs != 0 { + return globalParamsEntry.JailInactiveValidatorGracePeriodEpochs, nil } - return bav.Params.DefaultJailInactiveValidatorEpochThreshold, nil + return bav.Params.DefaultJailInactiveValidatorGracePeriodEpochs, nil } return 0, fmt.Errorf("GetSnapshotGlobalParam: invalid field provided: %s", field) @@ -219,13 +219,14 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) err shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry) if err != nil { return errors.Wrapf( - err, "SnapshotValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, + err, + "SnapshotValidators: problem determining if should jail validator %v: ", + validatorEntry.ValidatorPKID, ) } // Jail them if so. if shouldJailValidator { - err = bav.JailValidator(validatorEntry) - if err != nil { + if err = bav.JailValidator(validatorEntry); err != nil { return errors.Wrapf( err, "SnapshotValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, ) @@ -252,8 +253,7 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) err } // Jail them if so. if shouldJailValidator { - err = bav.JailValidator(validatorEntry) - if err != nil { + if err = bav.JailValidator(validatorEntry); err != nil { return errors.Wrapf( err, "SnapshotValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, ) From 810b551e1848ca660e16eb8010a5e5d00b687ad9 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 8 Jun 2023 11:29:04 -0400 Subject: [PATCH 100/762] Add test for jailing logic. --- lib/block_view.go | 10 ++++ lib/pos_epoch_complete_hook_test.go | 87 ++++++++++++++++++++++++++++- 2 files changed, 95 insertions(+), 2 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 9fcbb9450..2ba6de0e3 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3041,6 +3041,16 @@ func (bav *UtxoView) _connectUpdateGlobalParams( return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode EpochDurationNumBlocks as uint64") } } + if len(extraData[JailInactiveValidatorGracePeriodEpochs.ToString()]) > 0 { + newGlobalParamsEntry.JailInactiveValidatorGracePeriodEpochs, bytesRead = Uvarint( + extraData[JailInactiveValidatorGracePeriodEpochs.ToString()], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode JailInactiveValidatorGracePeriodEpochs as uint64", + ) + } + } } var newForbiddenPubKeyEntry *ForbiddenPubKeyEntry diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 194b1f5c3..26536c93f 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -179,7 +179,8 @@ func TestRunEpochCompleteHook(t *testing.T) { params.DefaultEpochDurationNumBlocks = uint64(1) { - // ParamUpdater set MinFeeRateNanos and ValidatorJailEpochDuration. + // ParamUpdater set MinFeeRateNanos, ValidatorJailEpochDuration, + // and JailInactiveValidatorGracePeriodEpochs. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true require.Zero(t, utxoView().GlobalParamsEntry.MinimumNetworkFeeNanosPerKB) require.Zero(t, utxoView().GlobalParamsEntry.ValidatorJailEpochDuration) @@ -189,7 +190,10 @@ func TestRunEpochCompleteHook(t *testing.T) { testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - map[string][]byte{ValidatorJailEpochDuration.ToString(): UintToBuf(4)}, + map[string][]byte{ + ValidatorJailEpochDuration.ToString(): UintToBuf(4), + JailInactiveValidatorGracePeriodEpochs.ToString(): UintToBuf(10), + }, ) require.Equal(t, utxoView().GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) @@ -413,4 +417,83 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Len(t, snapshotValidatorEntries, 6) } + { + // Test jailing inactive validators. + // + // The CurrentEpochNumber is 9. All validators were last active in epoch 1 + // which is the epoch in which they registered. + // + // The JailInactiveValidatorGracePeriodEpochs is 10 epochs. So all + // validators should be jailed after epoch 11, at the start of epoch 12. + // + // The SnapshotLookbackNumEpochs is 2, so all registered snapshot validators + // should be considered jailed after epoch 13, at the start of epoch 14. + + // Define helper utils. + getCurrentEpochNumber := func() int { + currentEpochNumber, err := utxoView().GetCurrentEpochNumber() + require.NoError(t, err) + return int(currentEpochNumber) + } + + getNumCurrentActiveValidators := func() int { + validatorEntries, err := utxoView().GetTopActiveValidatorsByStake(10) + require.NoError(t, err) + return len(validatorEntries) + } + + getNumSnapshotActiveValidators := func() int { + snapshotValidatorEntries, err := utxoView().GetSnapshotTopActiveValidatorsByStake(10) + require.NoError(t, err) + return len(snapshotValidatorEntries) + } + + // In epoch 9, all registered validators have Status = Active. + require.Equal(t, getCurrentEpochNumber(), 9) + require.Equal(t, getNumCurrentActiveValidators(), 6) + require.Equal(t, getNumSnapshotActiveValidators(), 6) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // In epoch 10, all registered validators have Status = Active. + require.Equal(t, getCurrentEpochNumber(), 10) + require.Equal(t, getNumCurrentActiveValidators(), 6) + require.Equal(t, getNumSnapshotActiveValidators(), 6) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // In epoch 11, all registered validators have Status = Active. + require.Equal(t, getCurrentEpochNumber(), 11) + require.Equal(t, getNumCurrentActiveValidators(), 6) + require.Equal(t, getNumSnapshotActiveValidators(), 6) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // In epoch 12, all current registered validators have Status = Jailed. + // In snapshot 10, all snapshot registered validators have Status = Active. + require.Equal(t, getCurrentEpochNumber(), 12) + require.Empty(t, getNumCurrentActiveValidators()) + require.Equal(t, getNumSnapshotActiveValidators(), 6) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // In epoch 13, all current registered validators have Status = Jailed. + // In snapshot 11, all snapshot registered validators have Status = Active. + require.Equal(t, getCurrentEpochNumber(), 13) + require.Empty(t, getNumCurrentActiveValidators()) + require.Equal(t, getNumSnapshotActiveValidators(), 6) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // In epoch 14, all current registered validators have Status = Jailed. + // In snapshot 12, all snapshot registered validators have Status = Jailed. + require.Equal(t, getCurrentEpochNumber(), 14) + require.Empty(t, getNumCurrentActiveValidators()) + require.Empty(t, getNumSnapshotActiveValidators()) + } } From 5e32c356fba1d16a86b90b91b87c38d7bd61d7a3 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 8 Jun 2023 11:59:20 -0400 Subject: [PATCH 101/762] Clean up comments. --- lib/block_view_validator.go | 4 ++-- lib/pos_epoch_complete_hook.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 193a103f0..5d175467f 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1926,7 +1926,7 @@ func (bav *UtxoView) GetGlobalActiveStakeAmountNanos() (*uint256.Int, error) { func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry) (bool, error) { // Return false if the validator is already jailed. We do not want to jail - // them again. And we want to retain their original JailedAtEpochNumber so + // them again as we want to retain their original JailedAtEpochNumber so // that they can eventually unjail themselves. if validatorEntry.Status() == ValidatorStatusJailed { return false, nil @@ -1944,7 +1944,7 @@ func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry) (bool, return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving JailInactiveValidatorGracePeriodEpochs: ") } - // Calculate JailAtEpochNumber. + // Calculate the JailAtEpochNumber. jailAtEpochNumber, err := SafeUint64().Add(validatorEntry.LastActiveAtEpochNumber, jailInactiveValidatorGracePeriodEpochs) if err != nil { return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error calculating JailAtEpochNumber: ") diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 1d80fe938..e83b08239 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -50,8 +50,8 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Snapshot the current GlobalParamsEntry. bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) - // Snapshot the current ValidatorEntries. This loops through all validators to snapshot them, O(N). - // To save on runtime, we also check if we should jail each validator and jail them if so. + // Snapshot the current ValidatorEntries. This loops through all validators to snapshot them in O(N). + // To save on runtime, in this loop we also check if we should jail each validator and jail them if so. // We optionally jail a validator after we snapshot them. A jailed validator should be considered jailed // in the new epoch starting after this OnEpochCompleteHook, and not the previous epoch which is snapshot. if err = bav.SnapshotCurrentValidators(currentEpochEntry.EpochNumber); err != nil { From 614ba9bf9615eea16c639585e7cf89811b62ad08 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 8 Jun 2023 12:58:09 -0400 Subject: [PATCH 102/762] Dont jail validators until the ConsensusCutover block height. --- lib/block_view_validator.go | 10 +++++++++- lib/pos_epoch_complete_hook.go | 2 +- lib/pos_epoch_complete_hook_test.go | 17 +++++++++++++++++ lib/pos_snapshot_entries.go | 6 +++--- 4 files changed, 30 insertions(+), 5 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 5d175467f..0faddc29c 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1924,7 +1924,15 @@ func (bav *UtxoView) GetGlobalActiveStakeAmountNanos() (*uint256.Int, error) { return globalActiveStakeAmountNanos, nil } -func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry) (bool, error) { +func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHeight uint64) (bool, error) { + // Return false if we haven't switched from PoW to PoS yet. Otherwise, + // there would be an edge case where all validators will get jailed + // after we deploy the StateSetup block height, but before we deploy + // the ConsensusCutover block height. + if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + return false, nil + } + // Return false if the validator is already jailed. We do not want to jail // them again as we want to retain their original JailedAtEpochNumber so // that they can eventually unjail themselves. diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index e83b08239..ed523f545 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -54,7 +54,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // To save on runtime, in this loop we also check if we should jail each validator and jail them if so. // We optionally jail a validator after we snapshot them. A jailed validator should be considered jailed // in the new epoch starting after this OnEpochCompleteHook, and not the previous epoch which is snapshot. - if err = bav.SnapshotCurrentValidators(currentEpochEntry.EpochNumber); err != nil { + if err = bav.SnapshotCurrentValidators(currentEpochEntry.EpochNumber, blockHeight); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: problem snapshotting validators: ") } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 26536c93f..858a691b3 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -63,6 +63,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Initialize PoS fork heights. params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -448,6 +449,18 @@ func TestRunEpochCompleteHook(t *testing.T) { return len(snapshotValidatorEntries) } + getCurrentValidator := func(validatorPKID *PKID) *ValidatorEntry { + validatorEntry, err := utxoView().GetValidatorByPKID(validatorPKID) + require.NoError(t, err) + return validatorEntry + } + + getSnapshotValidator := func(validatorPKID *PKID) *ValidatorEntry { + snapshotValidatorEntry, err := utxoView().GetSnapshotValidatorByPKID(validatorPKID) + require.NoError(t, err) + return snapshotValidatorEntry + } + // In epoch 9, all registered validators have Status = Active. require.Equal(t, getCurrentEpochNumber(), 9) require.Equal(t, getNumCurrentActiveValidators(), 6) @@ -477,6 +490,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentEpochNumber(), 12) require.Empty(t, getNumCurrentActiveValidators()) require.Equal(t, getNumSnapshotActiveValidators(), 6) + require.Equal(t, getCurrentValidator(m6PKID).Status(), ValidatorStatusJailed) + require.Equal(t, getCurrentValidator(m6PKID).JailedAtEpochNumber, uint64(11)) // Run OnEpochCompleteHook(). _runOnEpochCompleteHook() @@ -495,5 +510,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentEpochNumber(), 14) require.Empty(t, getNumCurrentActiveValidators()) require.Empty(t, getNumSnapshotActiveValidators()) + require.Equal(t, getSnapshotValidator(m6PKID).Status(), ValidatorStatusJailed) + require.Equal(t, getSnapshotValidator(m6PKID).JailedAtEpochNumber, uint64(11)) } } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 3fef0e0db..46a1fe902 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -207,7 +207,7 @@ type SnapshotValidatorMapKey struct { ValidatorPKID PKID } -func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) error { +func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64, blockHeight uint64) error { // First, snapshot any !isDeleted ValidatorEntries in the UtxoView. var utxoViewValidatorPKIDs []*PKID for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { @@ -216,7 +216,7 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) err bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) // Check if we should jail the validator. - shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry) + shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry, blockHeight) if err != nil { return errors.Wrapf( err, @@ -245,7 +245,7 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64) err bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) // Check if we should jail the validator. - shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry) + shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry, blockHeight) if err != nil { return errors.Wrapf( err, "SnapshotValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, From b0bfc4bedbf33faf1332409731e5604f97a0cb85 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 8 Jun 2023 14:12:44 -0400 Subject: [PATCH 103/762] Add a larger buffer once we cutover to PoS before jailing. --- lib/block_view_validator.go | 44 ++++++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 0faddc29c..7b0bb837a 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1929,6 +1929,12 @@ func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHe // there would be an edge case where all validators will get jailed // after we deploy the StateSetup block height, but before we deploy // the ConsensusCutover block height. + // + // We do another check below to make sure enough blocks have passed even + // after we cut-over to PoS, but since this check is so quick to perform, + // we keep this one here as well, since this will catch all OnEpochCompleteHooks + // after the StateSetup block height and before the CutoverConsensus block height + // and saves us a few look-ups and computations. if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { return false, nil } @@ -1940,18 +1946,44 @@ func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHe return false, nil } - // Retrieve the CurrentEpochNumber. - currentEpochNumber, err := bav.GetCurrentEpochNumber() - if err != nil { - return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving CurrentEpochNumber: ") - } - // Retrieve the SnapshotGlobalParam: JailInactiveValidatorGracePeriodEpochs. jailInactiveValidatorGracePeriodEpochs, err := bav.GetSnapshotGlobalParam(JailInactiveValidatorGracePeriodEpochs) if err != nil { return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving JailInactiveValidatorGracePeriodEpochs: ") } + // Retrieve the SnapshotGlobalParam: EpochDurationNumBlocks. + epochDurationNumBlocks, err := bav.GetSnapshotGlobalParam(EpochDurationNumBlocks) + if err != nil { + return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving EpochDurationNumBlocks: ") + } + + // Calculate if enough blocks have passed since cutting over to PoS to start jailing validators. + // We want to allow a buffer after we cut-over to PoS to allow validators enough time to vote. + // Otherwise, validators may be jailed prior to the "jail inactive validators grace period" + // elapsing since all validators' LastActiveAtEpochNumber = 0 prior to the PoS cut-over. + // + // StartJailingBlockHeight = ConsensusCutoverBlockHeight + (JailInactiveValidatorGracePeriodEpochs * EpochDurationNumBlocks) + startJailingGracePeriodBlocks, err := SafeUint64().Mul(jailInactiveValidatorGracePeriodEpochs, epochDurationNumBlocks) + if err != nil { + return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error calculating StartJailingGracePeriod: ") + } + startJailingBlockHeight, err := SafeUint64().Add( + uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight), startJailingGracePeriodBlocks, + ) + if err != nil { + return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error calculating StartJailingBlockHeight: ") + } + if blockHeight < startJailingBlockHeight { + return false, nil + } + + // Retrieve the CurrentEpochNumber. + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving CurrentEpochNumber: ") + } + // Calculate the JailAtEpochNumber. jailAtEpochNumber, err := SafeUint64().Add(validatorEntry.LastActiveAtEpochNumber, jailInactiveValidatorGracePeriodEpochs) if err != nil { From 93ea8673b3e6e8a35500ca0eda0fd83a0081f628 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 9 Jun 2023 14:24:13 -0400 Subject: [PATCH 104/762] Fix bugs in updating global params. --- lib/block_view.go | 2 +- lib/pos_snapshot_entries.go | 45 +++++++++++++++++++++++-------------- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 2ba6de0e3..ea4aa7996 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3036,7 +3036,7 @@ func (bav *UtxoView) _connectUpdateGlobalParams( } } if len(extraData[EpochDurationNumBlocks.ToString()]) > 0 { - newGlobalParamsEntry.LeaderScheduleMaxNumValidators, bytesRead = Uvarint(extraData[EpochDurationNumBlocks.ToString()]) + newGlobalParamsEntry.EpochDurationNumBlocks, bytesRead = Uvarint(extraData[EpochDurationNumBlocks.ToString()]) if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode EpochDurationNumBlocks as uint64") } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 46a1fe902..52e423947 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -43,6 +43,10 @@ func (snapshotGlobalParam SnapshotGlobalParam) ToString() string { return string(snapshotGlobalParam) } +func (bav *UtxoView) GetCurrentGlobalParam(field SnapshotGlobalParam) uint64 { + return _getGlobalParam(bav, bav.GlobalParamsEntry, field) +} + func (bav *UtxoView) GetSnapshotGlobalParam(field SnapshotGlobalParam) (uint64, error) { // Retrieve the SnapshotGlobalParamsEntry. globalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() @@ -53,42 +57,49 @@ func (bav *UtxoView) GetSnapshotGlobalParam(field SnapshotGlobalParam) (uint64, // We will use the default values below. globalParamsEntry = &GlobalParamsEntry{} } + return _getGlobalParam(bav, globalParamsEntry, field), nil +} +func _getGlobalParam(bav *UtxoView, globalParamsEntry *GlobalParamsEntry, field SnapshotGlobalParam) uint64 { // Return the corresponding field. Either the updated value if // set on the GlobalParamsEntry or the default value otherwise. switch field { case StakeLockupEpochDuration: - if globalParamsEntry.StakeLockupEpochDuration != 0 { - return globalParamsEntry.StakeLockupEpochDuration, nil + if globalParamsEntry.StakeLockupEpochDuration > 0 { + return globalParamsEntry.StakeLockupEpochDuration } - return bav.Params.DefaultStakeLockupEpochDuration, nil + return bav.Params.DefaultStakeLockupEpochDuration case ValidatorJailEpochDuration: - if globalParamsEntry.ValidatorJailEpochDuration != 0 { - return globalParamsEntry.ValidatorJailEpochDuration, nil + if globalParamsEntry.ValidatorJailEpochDuration > 0 { + return globalParamsEntry.ValidatorJailEpochDuration } - return bav.Params.DefaultValidatorJailEpochDuration, nil + return bav.Params.DefaultValidatorJailEpochDuration case LeaderScheduleMaxNumValidators: - if globalParamsEntry.LeaderScheduleMaxNumValidators != 0 { - return globalParamsEntry.LeaderScheduleMaxNumValidators, nil + if globalParamsEntry.LeaderScheduleMaxNumValidators > 0 { + return globalParamsEntry.LeaderScheduleMaxNumValidators } - return bav.Params.DefaultLeaderScheduleMaxNumValidators, nil + return bav.Params.DefaultLeaderScheduleMaxNumValidators case EpochDurationNumBlocks: - if globalParamsEntry.EpochDurationNumBlocks != 0 { - return globalParamsEntry.EpochDurationNumBlocks, nil + if globalParamsEntry.EpochDurationNumBlocks > 0 { + return globalParamsEntry.EpochDurationNumBlocks } - return bav.Params.DefaultEpochDurationNumBlocks, nil + return bav.Params.DefaultEpochDurationNumBlocks case JailInactiveValidatorGracePeriodEpochs: - if globalParamsEntry.JailInactiveValidatorGracePeriodEpochs != 0 { - return globalParamsEntry.JailInactiveValidatorGracePeriodEpochs, nil + if globalParamsEntry.JailInactiveValidatorGracePeriodEpochs > 0 { + return globalParamsEntry.JailInactiveValidatorGracePeriodEpochs } - return bav.Params.DefaultJailInactiveValidatorGracePeriodEpochs, nil - } + return bav.Params.DefaultJailInactiveValidatorGracePeriodEpochs - return 0, fmt.Errorf("GetSnapshotGlobalParam: invalid field provided: %s", field) + default: + // This can never happen in production since we pass in a restricted enum + // field param. This would only catch the edge case where a developer adds + // a new SnapshotGlobalParam but forgets to add it to this case statement. + panic(fmt.Sprintf("GetSnapshotGlobalParam: invalid field provided: %s", field)) + } } func (bav *UtxoView) GetSnapshotGlobalParamsEntry() (*GlobalParamsEntry, error) { From de482d000d7d69c87d7bf3c4643d2b0e992535c2 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Tue, 13 Jun 2023 10:36:24 -0400 Subject: [PATCH 105/762] Add JSON encoding/decoding helpers. --- bls/signature.go | 46 ++++++++++++++++++++++++++++++++++++++- bls/signature_no_relic.go | 24 ++++++++++++++++++++ bls/signature_test.go | 32 ++++++++++++++++++++++++++- 3 files changed, 100 insertions(+), 2 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 7f9e3d9b6..e7fb72494 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -6,9 +6,11 @@ import ( "bytes" "crypto/rand" "encoding/hex" + "encoding/json" "errors" - flowCrypto "github.com/onflow/flow-go/crypto" "strings" + + flowCrypto "github.com/onflow/flow-go/crypto" ) // The SigningAlgorithm for BLS keys is BLSBLS12381 which is BLS on the BLS 12-381 curve. @@ -112,6 +114,20 @@ func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, return privateKey, err } +func (privateKey *PrivateKey) MarshalJSON() ([]byte, error) { + return json.Marshal(privateKey.ToString()) +} + +func (privateKey *PrivateKey) UnmarshalJSON(data []byte) error { + privateKeyString := "" + err := json.Unmarshal(data, &privateKeyString) + if err != nil { + return err + } + _, err = privateKey.FromString(privateKeyString) + return err +} + func (privateKey *PrivateKey) Eq(other *PrivateKey) bool { if privateKey == nil || privateKey.flowPrivateKey == nil || other == nil { return false @@ -174,6 +190,20 @@ func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, erro return publicKey, err } +func (publicKey *PublicKey) MarshalJSON() ([]byte, error) { + return json.Marshal(publicKey.ToString()) +} + +func (publicKey *PublicKey) UnmarshalJSON(data []byte) error { + publicKeyString := "" + err := json.Unmarshal(data, &publicKeyString) + if err != nil { + return err + } + _, err = publicKey.FromString(publicKeyString) + return err +} + func (publicKey *PublicKey) Eq(other *PublicKey) bool { if publicKey == nil || publicKey.flowPublicKey == nil || other == nil { return false @@ -237,6 +267,20 @@ func (signature *Signature) FromString(signatureString string) (*Signature, erro return signature, nil } +func (signature *Signature) MarshalJSON() ([]byte, error) { + return json.Marshal(signature.ToString()) +} + +func (signature *Signature) UnmarshalJSON(data []byte) error { + signatureString := "" + err := json.Unmarshal(data, &signatureString) + if err != nil { + return err + } + _, err = signature.FromString(signatureString) + return err +} + func (signature *Signature) Eq(other *Signature) bool { if signature == nil || signature.flowSignature == nil || other == nil { return false diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index 71564d6c0..9833a6aab 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -38,6 +38,14 @@ func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, panic(BLSNoRelicError) } +func (privateKey *PrivateKey) MarshalJSON() ([]byte, error) { + panic(BLSNoRelicError) +} + +func (privateKey *PrivateKey) UnmarshalJSON(data []byte) error { + panic(BLSNoRelicError) +} + func (privateKey *PrivateKey) Eq(other *PrivateKey) bool { panic(BLSNoRelicError) } @@ -68,6 +76,14 @@ func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, erro panic(BLSNoRelicError) } +func (publicKey *PublicKey) MarshalJSON() ([]byte, error) { + panic(BLSNoRelicError) +} + +func (publicKey *PublicKey) UnmarshalJSON(data []byte) error { + panic(BLSNoRelicError) +} + func (publicKey *PublicKey) Eq(other *PublicKey) bool { panic(BLSNoRelicError) } @@ -98,6 +114,14 @@ func (signature *Signature) FromString(signatureString string) (*Signature, erro panic(BLSNoRelicError) } +func (signature *Signature) MarshalJSON() ([]byte, error) { + panic(BLSNoRelicError) +} + +func (signature *Signature) UnmarshalJSON(data []byte) error { + panic(BLSNoRelicError) +} + func (signature *Signature) Eq(other *Signature) bool { panic(BLSNoRelicError) } diff --git a/bls/signature_test.go b/bls/signature_test.go index b734db286..63637f8df 100644 --- a/bls/signature_test.go +++ b/bls/signature_test.go @@ -5,6 +5,7 @@ package bls import ( "bytes" "crypto/rand" + "encoding/json" "github.com/stretchr/testify/require" "testing" ) @@ -212,6 +213,35 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.Nil(t, (&Signature{}).Copy().flowSignature) } +func TestJsonMarshalingBLSKeys(t *testing.T) { + // Generate random BLS PrivateKey, PublicKey, and Signature. + privateKey := _generateRandomBLSPrivateKey(t) + publicKey := privateKey.PublicKey() + signature, err := privateKey.Sign(_generateRandomBytes(t, 64)) + require.NoError(t, err) + + // Test JSON marshaling of bls.PrivateKey. + privateKeyEncoded, err := json.Marshal(privateKey) + require.NoError(t, err) + privateKeyDecoded := &PrivateKey{} + require.NoError(t, json.Unmarshal(privateKeyEncoded, privateKeyDecoded)) + require.True(t, privateKey.Eq(privateKeyDecoded)) + + // Test JSON marshaling of bls.PublicKey. + publicKeyEncoded, err := json.Marshal(publicKey) + require.NoError(t, err) + publicKeyDecoded := &PublicKey{} + require.NoError(t, json.Unmarshal(publicKeyEncoded, publicKeyDecoded)) + require.True(t, publicKey.Eq(publicKeyDecoded)) + + // Test JSON marshaling of bls.Signature. + signatureEncoded, err := json.Marshal(signature) + require.NoError(t, err) + signatureDecoded := &Signature{} + require.NoError(t, json.Unmarshal(signatureEncoded, signatureDecoded)) + require.True(t, signature.Eq(signatureDecoded)) +} + func _generateRandomBLSPrivateKey(t *testing.T) *PrivateKey { privateKey, err := NewPrivateKey() require.NoError(t, err) @@ -219,7 +249,7 @@ func _generateRandomBLSPrivateKey(t *testing.T) *PrivateKey { } func _generateRandomBytes(t *testing.T, numBytes int) []byte { - randomBytes := make([]byte, 64) + randomBytes := make([]byte, numBytes) _, err := rand.Read(randomBytes) require.NoError(t, err) return randomBytes From 0f7e09d910c97a754cfdf4a3c51e595aed583563 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 14 Jun 2023 12:20:06 -0400 Subject: [PATCH 106/762] Add validator status ToString util. --- lib/block_view_validator.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 7b0bb837a..9c76b8f1c 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -99,6 +99,17 @@ const ( ValidatorStatusJailed ValidatorStatus = 2 ) +func (validatorStatus ValidatorStatus) ToString() string { + switch validatorStatus { + case ValidatorStatusActive: + return "Active" + case ValidatorStatusJailed: + return "Jailed" + default: + return "Unknown" + } +} + func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { // Copy domains. var domainsCopy [][]byte From b6424acf8813de1204646c4cb3fe3261646bc8eb Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 14 Jun 2023 16:48:39 -0400 Subject: [PATCH 107/762] Install relic in prod dockerfile. --- Dockerfile | 10 ++++++++-- test.Dockerfile | 8 ++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index 04a49132f..0d90aad8b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,10 @@ FROM alpine:latest AS core RUN apk update && apk upgrade -RUN apk add --update go gcc g++ vips vips-dev +RUN apk add --update bash cmake g++ gcc git make vips vips-dev + +COPY --from=golang:1.20-alpine /usr/local/go/ /usr/local/go/ +ENV PATH="/usr/local/go/bin:${PATH}" WORKDIR /deso/src/core @@ -15,11 +18,14 @@ COPY cmd cmd COPY desohash desohash COPY lib lib COPY migrate migrate +COPY scripts scripts COPY test_data test_data COPY main.go . +RUN ./scripts/install-relic.sh + # build backend -RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go +RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core -tags=relic main.go # create tiny image FROM alpine:edge diff --git a/test.Dockerfile b/test.Dockerfile index 6eb62c4f9..3cb9a8b1c 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -2,7 +2,7 @@ FROM alpine:latest AS core RUN apk update RUN apk upgrade -RUN apk add --update bash cmake git gcc g++ make vips vips-dev +RUN apk add --update bash cmake g++ gcc git make vips vips-dev COPY --from=golang:1.20-alpine /usr/local/go/ /usr/local/go/ ENV PATH="/usr/local/go/bin:${PATH}" @@ -14,17 +14,17 @@ COPY go.sum . RUN go mod download -COPY scripts/install-relic.sh . -RUN ./install-relic.sh - COPY bls bls COPY cmd cmd COPY desohash desohash COPY lib lib COPY migrate migrate +COPY scripts scripts COPY test_data test_data COPY main.go . +RUN ./scripts/install-relic.sh + # build backend RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go From 80cb0776b7c7f4810de05ea98454e5062289dfbc Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 15 Jun 2023 09:36:54 -0400 Subject: [PATCH 108/762] Rename VotingPublicKeySignature to VotingSignature. --- lib/block_view_stake_test.go | 30 ++--- lib/block_view_validator.go | 94 +++++++-------- lib/block_view_validator_test.go | 180 ++++++++++++++-------------- lib/pos_epoch_complete_hook_test.go | 6 +- lib/pos_leader_schedule_test.go | 6 +- 5 files changed, 158 insertions(+), 158 deletions(-) diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index d66496e7a..ac9c39b1f 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -100,9 +100,9 @@ func _testStaking(t *testing.T, flushToDB bool) { votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, flushToDB) require.NoError(t, err) @@ -947,9 +947,9 @@ func TestStakingWithDerivedKey(t *testing.T) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example1.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) @@ -958,9 +958,9 @@ func TestStakingWithDerivedKey(t *testing.T) { // m1 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example2.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example2.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) @@ -1827,9 +1827,9 @@ func TestStakeLockupEpochDuration(t *testing.T) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://m1.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, true) require.NoError(t, err) @@ -2020,9 +2020,9 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://m0.example.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 9c76b8f1c..27898069f 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -54,11 +54,11 @@ type ValidatorEntry struct { // other validators can reliably prove the message came from this validator // by verifying against their VotingPublicKey. VotingPublicKey *bls.PublicKey - // The VotingPublicKeySignature is the signature of the SHA256(TransactorPublicKey) + // The VotingSignature is the signature of the SHA256(TransactorPublicKey) // by the VotingPrivateKey. // This proves that this validator is indeed the proper owner of the corresponding // VotingPrivateKey. See comment on CreateValidatorVotingSignaturePayload for more details. - VotingPublicKeySignature *bls.Signature + VotingSignature *bls.Signature // TotalStakeAmountNanos is a cached value of this validator's total stake, calculated // by summing all the corresponding StakeEntries assigned to this validator. We cache // the value here to avoid the O(N) operation of recomputing when determining a @@ -119,16 +119,16 @@ func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { // Return new ValidatorEntry. return &ValidatorEntry{ - ValidatorPKID: validatorEntry.ValidatorPKID.NewPKID(), - Domains: domainsCopy, - DisableDelegatedStake: validatorEntry.DisableDelegatedStake, - VotingPublicKey: validatorEntry.VotingPublicKey.Copy(), - VotingPublicKeySignature: validatorEntry.VotingPublicKeySignature.Copy(), - TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), - LastActiveAtEpochNumber: validatorEntry.LastActiveAtEpochNumber, - JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, - ExtraData: copyExtraData(validatorEntry.ExtraData), - isDeleted: validatorEntry.isDeleted, + ValidatorPKID: validatorEntry.ValidatorPKID.NewPKID(), + Domains: domainsCopy, + DisableDelegatedStake: validatorEntry.DisableDelegatedStake, + VotingPublicKey: validatorEntry.VotingPublicKey.Copy(), + VotingSignature: validatorEntry.VotingSignature.Copy(), + TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), + LastActiveAtEpochNumber: validatorEntry.LastActiveAtEpochNumber, + JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, + ExtraData: copyExtraData(validatorEntry.ExtraData), + isDeleted: validatorEntry.isDeleted, } } @@ -144,7 +144,7 @@ func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint6 data = append(data, BoolToByte(validatorEntry.DisableDelegatedStake)) data = append(data, EncodeBLSPublicKey(validatorEntry.VotingPublicKey)...) - data = append(data, EncodeBLSSignature(validatorEntry.VotingPublicKeySignature)...) + data = append(data, EncodeBLSSignature(validatorEntry.VotingSignature)...) data = append(data, VariableEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) data = append(data, UintToBuf(validatorEntry.LastActiveAtEpochNumber)...) data = append(data, UintToBuf(validatorEntry.JailedAtEpochNumber)...) @@ -186,10 +186,10 @@ func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint6 return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingPublicKey: ") } - // VotingPublicKeySignature - validatorEntry.VotingPublicKeySignature, err = DecodeBLSSignature(rr) + // VotingSignature + validatorEntry.VotingSignature, err = DecodeBLSSignature(rr) if err != nil { - return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingPublicKeySignature: ") + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingSignature: ") } // TotalStakeAmountNanos @@ -232,10 +232,10 @@ func (validatorEntry *ValidatorEntry) GetEncoderType() EncoderType { // type RegisterAsValidatorMetadata struct { - Domains [][]byte - DisableDelegatedStake bool - VotingPublicKey *bls.PublicKey - VotingPublicKeySignature *bls.Signature + Domains [][]byte + DisableDelegatedStake bool + VotingPublicKey *bls.PublicKey + VotingSignature *bls.Signature } func (txnData *RegisterAsValidatorMetadata) GetTxnType() TxnType { @@ -253,7 +253,7 @@ func (txnData *RegisterAsValidatorMetadata) ToBytes(preSignature bool) ([]byte, data = append(data, BoolToByte(txnData.DisableDelegatedStake)) data = append(data, EncodeBLSPublicKey(txnData.VotingPublicKey)...) - data = append(data, EncodeBLSSignature(txnData.VotingPublicKeySignature)...) + data = append(data, EncodeBLSSignature(txnData.VotingSignature)...) return data, nil } @@ -285,10 +285,10 @@ func (txnData *RegisterAsValidatorMetadata) FromBytes(data []byte) error { return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingPublicKey: ") } - // VotingPublicKeySignature - txnData.VotingPublicKeySignature, err = DecodeBLSSignature(rr) + // VotingSignature + txnData.VotingSignature, err = DecodeBLSSignature(rr) if err != nil { - return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingPublicKeySignature: ") + return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingSignature: ") } return nil @@ -351,7 +351,7 @@ type RegisterAsValidatorTxindexMetadata struct { Domains []string DisableDelegatedStake bool VotingPublicKey string - VotingPublicKeySignature string + VotingSignature string } func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -366,7 +366,7 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetad data = append(data, BoolToByte(txindexMetadata.DisableDelegatedStake)) data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKey))...) - data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKeySignature))...) + data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingSignature))...) return data } @@ -406,12 +406,12 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawDecodeWithoutMetad } txindexMetadata.VotingPublicKey = string(votingPublicKeyBytes) - // VotingPublicKeySignature - votingPublicKeySignatureBytes, err := DecodeByteArray(rr) + // VotingSignature + votingSignatureBytes, err := DecodeByteArray(rr) if err != nil { - return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading VotingPublicKeySignature: ") + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading VotingSignature: ") } - txindexMetadata.VotingPublicKeySignature = string(votingPublicKeySignatureBytes) + txindexMetadata.VotingSignature = string(votingSignatureBytes) return nil } @@ -1098,14 +1098,14 @@ func (bav *UtxoView) _connectRegisterAsValidator( ValidatorPKID: transactorPKIDEntry.PKID, // Note: if someone is updating their ValidatorEntry, they need to include // all domains. The Domains field is not appended to. It is overwritten. - Domains: txMeta.Domains, - DisableDelegatedStake: txMeta.DisableDelegatedStake, - VotingPublicKey: txMeta.VotingPublicKey, - VotingPublicKeySignature: txMeta.VotingPublicKeySignature, - TotalStakeAmountNanos: totalStakeAmountNanos, - LastActiveAtEpochNumber: lastActiveAtEpochNumber, - JailedAtEpochNumber: jailedAtEpochNumber, - ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), + Domains: txMeta.Domains, + DisableDelegatedStake: txMeta.DisableDelegatedStake, + VotingPublicKey: txMeta.VotingPublicKey, + VotingSignature: txMeta.VotingSignature, + TotalStakeAmountNanos: totalStakeAmountNanos, + LastActiveAtEpochNumber: lastActiveAtEpochNumber, + JailedAtEpochNumber: jailedAtEpochNumber, + ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), } // Set the ValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) @@ -1641,17 +1641,17 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKey, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } - // Validate VotingPublicKeySignature. - if metadata.VotingPublicKeySignature == nil { - return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKeySignature, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + // Validate VotingSignature. + if metadata.VotingSignature == nil { + return errors.Wrapf(RuleErrorValidatorMissingVotingSignature, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } votingSignaturePayload := CreateValidatorVotingSignaturePayload(transactorPublicKey) - isValidBLSSignature, err := metadata.VotingPublicKey.Verify(metadata.VotingPublicKeySignature, votingSignaturePayload) + isValidBLSSignature, err := metadata.VotingPublicKey.Verify(metadata.VotingSignature, votingSignaturePayload) if err != nil { - return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error verifying VotingPublicKeySignature: ") + return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error verifying VotingSignature: ") } if !isValidBLSSignature { - return errors.Wrapf(RuleErrorValidatorInvalidVotingPublicKeySignature, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + return errors.Wrapf(RuleErrorValidatorInvalidVotingSignature, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } // Error if updating DisableDelegatedStake from false to @@ -2147,7 +2147,7 @@ func (bav *UtxoView) CreateRegisterAsValidatorTxindexMetadata( Domains: domains, DisableDelegatedStake: metadata.DisableDelegatedStake, VotingPublicKey: metadata.VotingPublicKey.ToString(), - VotingPublicKeySignature: metadata.VotingPublicKeySignature.ToString(), + VotingSignature: metadata.VotingSignature.ToString(), } // Construct AffectedPublicKeys. @@ -2319,8 +2319,8 @@ const RuleErrorValidatorInvalidDomain RuleError = "RuleErrorValidatorInvalidDoma const RuleErrorValidatorDuplicateDomains RuleError = "RuleErrorValidatorDuplicateDomains" const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" const RuleErrorValidatorMissingVotingPublicKey RuleError = "RuleErrorValidatorMissingVotingPublicKey" -const RuleErrorValidatorMissingVotingPublicKeySignature RuleError = "RuleErrorValidatorMissingVotingPublicKeySignature" -const RuleErrorValidatorInvalidVotingPublicKeySignature RuleError = "RuleErrorValidatorInvalidVotingPublicKeySignature" +const RuleErrorValidatorMissingVotingSignature RuleError = "RuleErrorValidatorMissingVotingSignature" +const RuleErrorValidatorInvalidVotingSignature RuleError = "RuleErrorValidatorInvalidVotingSignature" const RuleErrorValidatorDisablingExistingDelegatedStakers RuleError = "RuleErrorValidatorDisablingExistingDelegatedStakers" const RuleErrorUnjailingNonjailedValidator RuleError = "RuleErrorUnjailingNonjailedValidator" const RuleErrorUnjailingValidatorTooEarly RuleError = "RuleErrorUnjailingValidatorTooEarly" diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 817c811ca..ffb4325db 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -88,10 +88,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) @@ -157,7 +157,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingPublicKey) } { - // RuleErrorValidatorMissingVotingPublicKeySignature + // RuleErrorValidatorMissingVotingSignature votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, @@ -167,45 +167,45 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, ) require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingPublicKeySignature) + require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingSignature) } { - // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid TransactorPkBytes + // RuleErrorValidatorInvalidVotingSignature: invalid TransactorPkBytes votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, ) require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingPublicKeySignature) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignature) } { - // RuleErrorValidatorInvalidVotingPublicKeySignature: invalid VotingPublicKey + // RuleErrorValidatorInvalidVotingSignature: invalid VotingPublicKey votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes) _, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, ) require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingPublicKeySignature) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignature) } { // Happy path: register a validator votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -238,10 +238,10 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // Happy path: update a validator votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -593,9 +593,9 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { // Perform a RegisterAsValidator txn. No error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } err = _submitValidatorTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, @@ -659,9 +659,9 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { // Perform a RegisterAsValidator txn. Error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } err = _submitValidatorTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, @@ -749,9 +749,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://m0.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -765,9 +765,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // m1 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://m1.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -781,9 +781,9 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { // m2 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m2PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m2.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://m2.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m2Pub, m2Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -953,10 +953,10 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m0's ValidatorEntry in the db with TotalStake = 100 nanos. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) validatorEntry := &ValidatorEntry{ - ValidatorPKID: m0PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + ValidatorPKID: m0PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } utxoView._setValidatorEntryMappings(validatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -973,11 +973,11 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m1's jailed ValidatorEntry in the db with TotalStake = 400 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m1PkBytes) validatorEntry = &ValidatorEntry{ - ValidatorPKID: m1PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - JailedAtEpochNumber: 1, + ValidatorPKID: m1PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, + JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(validatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -992,10 +992,10 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m2's ValidatorEntry in the db with TotalStake = 300 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m2PkBytes) m2ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m2PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + ValidatorPKID: m2PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } utxoView._setValidatorEntryMappings(m2ValidatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1009,10 +1009,10 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m3's ValidatorEntry in the db with TotalStake = 600 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m3PkBytes) m3ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m3PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + ValidatorPKID: m3PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } utxoView._setValidatorEntryMappings(m3ValidatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1047,10 +1047,10 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m4's ValidatorEntry in the UtxoView with TotalStake = 50 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m4PkBytes) m4ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m4PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + ValidatorPKID: m4PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } utxoView._setValidatorEntryMappings(m4ValidatorEntry) @@ -1071,11 +1071,11 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Store m5's jailed ValidatorEntry in the UtxoView with TotalStake = 500 nanos. votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m5PkBytes) m5ValidatorEntry := &ValidatorEntry{ - ValidatorPKID: m5PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, - JailedAtEpochNumber: 1, + ValidatorPKID: m5PKID, + TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, + JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(m5ValidatorEntry) @@ -1177,10 +1177,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 registers as a validator with DisableDelegatedStake = FALSE. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1198,10 +1198,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 updates DisableDelegatedStake = TRUE. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: true, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: true, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1243,10 +1243,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 updates DisableDelegatedStake = FALSE. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: false, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1275,10 +1275,10 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 tries to update DisableDelegateStake = TRUE. Errors. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - DisableDelegatedStake: true, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://m0.com")}, + DisableDelegatedStake: true, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) @@ -1366,9 +1366,9 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://m0.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1565,9 +1565,9 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -1866,9 +1866,9 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // sender registers as a validator. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, senderPkString, senderPrivString, registerMetadata, nil, true) require.NoError(t, err) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 858a691b3..241ae31d7 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -119,9 +119,9 @@ func TestRunEpochCompleteHook(t *testing.T) { // Validator registers. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, pkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) require.NoError(t, err) diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index a84d7397d..765ef60c3 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -75,9 +75,9 @@ func TestGenerateLeaderSchedule(t *testing.T) { // Validator registers. votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, pkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, - VotingPublicKey: votingPublicKey, - VotingPublicKeySignature: votingSignature, + Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, + VotingPublicKey: votingPublicKey, + VotingSignature: votingSignature, } _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) require.NoError(t, err) From 4e7eb02009550fe9eaff2b7be26ec0a6495e2148 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Thu, 15 Jun 2023 09:49:37 -0400 Subject: [PATCH 109/762] Rename to VotingAuthorization. --- lib/block_view_stake_test.go | 40 +++---- lib/block_view_validator.go | 66 ++++++------ lib/block_view_validator_test.go | 160 ++++++++++++++-------------- lib/pos_epoch_complete_hook_test.go | 8 +- lib/pos_leader_schedule_test.go | 8 +- 5 files changed, 141 insertions(+), 141 deletions(-) diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index ac9c39b1f..c20a61c43 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -98,11 +98,11 @@ func _testStaking(t *testing.T, flushToDB bool) { GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, flushToDB) require.NoError(t, err) @@ -945,22 +945,22 @@ func TestStakingWithDerivedKey(t *testing.T) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://example1.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) } { // m1 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m1PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example2.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://example2.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) @@ -1825,11 +1825,11 @@ func TestStakeLockupEpochDuration(t *testing.T) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://m1.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, true) require.NoError(t, err) @@ -2018,11 +2018,11 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.example.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://m0.example.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 27898069f..b86f765b6 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -54,11 +54,11 @@ type ValidatorEntry struct { // other validators can reliably prove the message came from this validator // by verifying against their VotingPublicKey. VotingPublicKey *bls.PublicKey - // The VotingSignature is the signature of the SHA256(TransactorPublicKey) - // by the VotingPrivateKey. - // This proves that this validator is indeed the proper owner of the corresponding - // VotingPrivateKey. See comment on CreateValidatorVotingSignaturePayload for more details. - VotingSignature *bls.Signature + // The VotingAuthorization is the BLS signature of the SHA256(TransactorPublicKey) + // by the VotingPrivateKey. This proves that this validator is indeed the proper + // owner of the corresponding VotingPrivateKey. See comment on + // CreateValidatorVotingAuthorizationPayload for more details. + VotingAuthorization *bls.Signature // TotalStakeAmountNanos is a cached value of this validator's total stake, calculated // by summing all the corresponding StakeEntries assigned to this validator. We cache // the value here to avoid the O(N) operation of recomputing when determining a @@ -123,7 +123,7 @@ func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { Domains: domainsCopy, DisableDelegatedStake: validatorEntry.DisableDelegatedStake, VotingPublicKey: validatorEntry.VotingPublicKey.Copy(), - VotingSignature: validatorEntry.VotingSignature.Copy(), + VotingAuthorization: validatorEntry.VotingAuthorization.Copy(), TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), LastActiveAtEpochNumber: validatorEntry.LastActiveAtEpochNumber, JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, @@ -144,7 +144,7 @@ func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint6 data = append(data, BoolToByte(validatorEntry.DisableDelegatedStake)) data = append(data, EncodeBLSPublicKey(validatorEntry.VotingPublicKey)...) - data = append(data, EncodeBLSSignature(validatorEntry.VotingSignature)...) + data = append(data, EncodeBLSSignature(validatorEntry.VotingAuthorization)...) data = append(data, VariableEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) data = append(data, UintToBuf(validatorEntry.LastActiveAtEpochNumber)...) data = append(data, UintToBuf(validatorEntry.JailedAtEpochNumber)...) @@ -186,10 +186,10 @@ func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint6 return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingPublicKey: ") } - // VotingSignature - validatorEntry.VotingSignature, err = DecodeBLSSignature(rr) + // VotingAuthorization + validatorEntry.VotingAuthorization, err = DecodeBLSSignature(rr) if err != nil { - return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingSignature: ") + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading VotingAuthorization: ") } // TotalStakeAmountNanos @@ -235,7 +235,7 @@ type RegisterAsValidatorMetadata struct { Domains [][]byte DisableDelegatedStake bool VotingPublicKey *bls.PublicKey - VotingSignature *bls.Signature + VotingAuthorization *bls.Signature } func (txnData *RegisterAsValidatorMetadata) GetTxnType() TxnType { @@ -253,7 +253,7 @@ func (txnData *RegisterAsValidatorMetadata) ToBytes(preSignature bool) ([]byte, data = append(data, BoolToByte(txnData.DisableDelegatedStake)) data = append(data, EncodeBLSPublicKey(txnData.VotingPublicKey)...) - data = append(data, EncodeBLSSignature(txnData.VotingSignature)...) + data = append(data, EncodeBLSSignature(txnData.VotingAuthorization)...) return data, nil } @@ -285,10 +285,10 @@ func (txnData *RegisterAsValidatorMetadata) FromBytes(data []byte) error { return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingPublicKey: ") } - // VotingSignature - txnData.VotingSignature, err = DecodeBLSSignature(rr) + // VotingAuthorization + txnData.VotingAuthorization, err = DecodeBLSSignature(rr) if err != nil { - return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingSignature: ") + return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading VotingAuthorization: ") } return nil @@ -351,7 +351,7 @@ type RegisterAsValidatorTxindexMetadata struct { Domains []string DisableDelegatedStake bool VotingPublicKey string - VotingSignature string + VotingAuthorization string } func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -366,7 +366,7 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetad data = append(data, BoolToByte(txindexMetadata.DisableDelegatedStake)) data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKey))...) - data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingSignature))...) + data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingAuthorization))...) return data } @@ -406,12 +406,12 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawDecodeWithoutMetad } txindexMetadata.VotingPublicKey = string(votingPublicKeyBytes) - // VotingSignature - votingSignatureBytes, err := DecodeByteArray(rr) + // VotingAuthorization + votingAuthorizationBytes, err := DecodeByteArray(rr) if err != nil { - return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading VotingSignature: ") + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading VotingAuthorization: ") } - txindexMetadata.VotingSignature = string(votingSignatureBytes) + txindexMetadata.VotingAuthorization = string(votingAuthorizationBytes) return nil } @@ -1101,7 +1101,7 @@ func (bav *UtxoView) _connectRegisterAsValidator( Domains: txMeta.Domains, DisableDelegatedStake: txMeta.DisableDelegatedStake, VotingPublicKey: txMeta.VotingPublicKey, - VotingSignature: txMeta.VotingSignature, + VotingAuthorization: txMeta.VotingAuthorization, TotalStakeAmountNanos: totalStakeAmountNanos, LastActiveAtEpochNumber: lastActiveAtEpochNumber, JailedAtEpochNumber: jailedAtEpochNumber, @@ -1641,17 +1641,17 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKey, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } - // Validate VotingSignature. - if metadata.VotingSignature == nil { - return errors.Wrapf(RuleErrorValidatorMissingVotingSignature, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + // Validate VotingAuthorization. + if metadata.VotingAuthorization == nil { + return errors.Wrapf(RuleErrorValidatorMissingVotingAuthorization, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } - votingSignaturePayload := CreateValidatorVotingSignaturePayload(transactorPublicKey) - isValidBLSSignature, err := metadata.VotingPublicKey.Verify(metadata.VotingSignature, votingSignaturePayload) + votingAuthorizationPayload := CreateValidatorVotingAuthorizationPayload(transactorPublicKey) + isValidBLSSignature, err := metadata.VotingPublicKey.Verify(metadata.VotingAuthorization, votingAuthorizationPayload) if err != nil { - return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error verifying VotingSignature: ") + return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error verifying VotingAuthorization: ") } if !isValidBLSSignature { - return errors.Wrapf(RuleErrorValidatorInvalidVotingSignature, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + return errors.Wrapf(RuleErrorValidatorInvalidVotingAuthorization, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } // Error if updating DisableDelegatedStake from false to @@ -2147,7 +2147,7 @@ func (bav *UtxoView) CreateRegisterAsValidatorTxindexMetadata( Domains: domains, DisableDelegatedStake: metadata.DisableDelegatedStake, VotingPublicKey: metadata.VotingPublicKey.ToString(), - VotingSignature: metadata.VotingSignature.ToString(), + VotingAuthorization: metadata.VotingAuthorization.ToString(), } // Construct AffectedPublicKeys. @@ -2299,7 +2299,7 @@ func DecodeBLSSignature(rr io.Reader) (*bls.Signature, error) { // This means that nobody can construct a transaction to re-register this validator // without constructing a new transaction with a fresh nonce, thus requiring a new // signature from the same transactorPublicKey, which they wouldn't have access to. -func CreateValidatorVotingSignaturePayload( +func CreateValidatorVotingAuthorizationPayload( transactorPublicKeyBytes []byte, ) []byte { // HASH(TransactorPublicKey) @@ -2319,8 +2319,8 @@ const RuleErrorValidatorInvalidDomain RuleError = "RuleErrorValidatorInvalidDoma const RuleErrorValidatorDuplicateDomains RuleError = "RuleErrorValidatorDuplicateDomains" const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" const RuleErrorValidatorMissingVotingPublicKey RuleError = "RuleErrorValidatorMissingVotingPublicKey" -const RuleErrorValidatorMissingVotingSignature RuleError = "RuleErrorValidatorMissingVotingSignature" -const RuleErrorValidatorInvalidVotingSignature RuleError = "RuleErrorValidatorInvalidVotingSignature" +const RuleErrorValidatorMissingVotingAuthorization RuleError = "RuleErrorValidatorMissingVotingAuthorization" +const RuleErrorValidatorInvalidVotingAuthorization RuleError = "RuleErrorValidatorInvalidVotingAuthorization" const RuleErrorValidatorDisablingExistingDelegatedStakers RuleError = "RuleErrorValidatorDisablingExistingDelegatedStakers" const RuleErrorUnjailingNonjailedValidator RuleError = "RuleErrorUnjailingNonjailedValidator" const RuleErrorUnjailingValidatorTooEarly RuleError = "RuleErrorUnjailingValidatorTooEarly" diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index ffb4325db..ede4a5dae 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -86,12 +86,12 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, DisableDelegatedStake: false, VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) @@ -157,8 +157,8 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingPublicKey) } { - // RuleErrorValidatorMissingVotingSignature - votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + // RuleErrorValidatorMissingVotingAuthorization + votingPublicKey, _ := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, VotingPublicKey: votingPublicKey, @@ -167,45 +167,45 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, ) require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingSignature) + require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingAuthorization) } { - // RuleErrorValidatorInvalidVotingSignature: invalid TransactorPkBytes - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes) + // RuleErrorValidatorInvalidVotingAuthorization: invalid TransactorPkBytes + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m1PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, ) require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignature) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingAuthorization) } { - // RuleErrorValidatorInvalidVotingSignature: invalid VotingPublicKey - votingPublicKey, _ := _generateVotingPublicKeyAndSignature(t, m0PkBytes) - _, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + // RuleErrorValidatorInvalidVotingAuthorization: invalid VotingPublicKey + votingPublicKey, _ := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) + _, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, ) require.Error(t, err) - require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingSignature) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingAuthorization) } { // Happy path: register a validator - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example.com")}, DisableDelegatedStake: false, VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -236,12 +236,12 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // Happy path: update a validator - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, DisableDelegatedStake: false, VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -588,14 +588,14 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { derivedKeyPriv, err := _submitAuthorizeDerivedKeyTxn(TxnTypeRegisterAsValidator, 1) require.NoError(t, err) - // Create a VotingPublicKey and VotingSignature. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes) + // Create a VotingPublicKey and VotingAuthorization. + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, senderPkBytes) // Perform a RegisterAsValidator txn. No error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } err = _submitValidatorTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, @@ -654,14 +654,14 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorNotFound) - // Create a VotingPublicKey and VotingSignature. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes) + // Create a VotingPublicKey and VotingAuthorization. + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, senderPkBytes) // Perform a RegisterAsValidator txn. Error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } err = _submitValidatorTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: registerAsValidatorMetadata}, @@ -747,11 +747,11 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://m0.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -763,11 +763,11 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // m1 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m1PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m1PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://m1.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -779,11 +779,11 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // m2 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m2PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m2PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m2.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://m2.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m2Pub, m2Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -951,12 +951,12 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { m5PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m5PkBytes).PKID // Store m0's ValidatorEntry in the db with TotalStake = 100 nanos. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) validatorEntry := &ValidatorEntry{ ValidatorPKID: m0PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } utxoView._setValidatorEntryMappings(validatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -971,12 +971,12 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Empty(t, utxoView.ValidatorPKIDToValidatorEntry) // Store m1's jailed ValidatorEntry in the db with TotalStake = 400 nanos. - votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m1PkBytes) + votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m1PkBytes) validatorEntry = &ValidatorEntry{ ValidatorPKID: m1PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(validatorEntry) @@ -990,12 +990,12 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) // Store m2's ValidatorEntry in the db with TotalStake = 300 nanos. - votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m2PkBytes) + votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m2PkBytes) m2ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m2PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } utxoView._setValidatorEntryMappings(m2ValidatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1007,12 +1007,12 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) // Store m3's ValidatorEntry in the db with TotalStake = 600 nanos. - votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m3PkBytes) + votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m3PkBytes) m3ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m3PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } utxoView._setValidatorEntryMappings(m3ValidatorEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -1045,12 +1045,12 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.True(t, utxoView.ValidatorPKIDToValidatorEntry[*m3ValidatorEntry.ValidatorPKID].isDeleted) // Store m4's ValidatorEntry in the UtxoView with TotalStake = 50 nanos. - votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m4PkBytes) + votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m4PkBytes) m4ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m4PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } utxoView._setValidatorEntryMappings(m4ValidatorEntry) @@ -1069,12 +1069,12 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { ) // Store m5's jailed ValidatorEntry in the UtxoView with TotalStake = 500 nanos. - votingPublicKey, votingSignature = _generateVotingPublicKeyAndSignature(t, m5PkBytes) + votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m5PkBytes) m5ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m5PKID, TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, JailedAtEpochNumber: 1, } utxoView._setValidatorEntryMappings(m5ValidatorEntry) @@ -1175,12 +1175,12 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 registers as a validator with DisableDelegatedStake = FALSE. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: false, VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1196,12 +1196,12 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 updates DisableDelegatedStake = TRUE. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: true, VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1241,12 +1241,12 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 updates DisableDelegatedStake = FALSE. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: false, VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1273,12 +1273,12 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } { // m0 tries to update DisableDelegateStake = TRUE. Errors. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://m0.com")}, DisableDelegatedStake: true, VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.Error(t, err) @@ -1364,11 +1364,11 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://m0.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) require.NoError(t, err) @@ -1563,11 +1563,11 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { } { // m0 registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, m0PkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) @@ -1864,11 +1864,11 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { } { // sender registers as a validator. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, senderPkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, senderPkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte("https://example.com")}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, senderPkString, senderPrivString, registerMetadata, nil, true) require.NoError(t, err) @@ -2010,12 +2010,12 @@ func _submitUnjailValidatorTxn( return fees, nil } -func _generateVotingPublicKeyAndSignature(t *testing.T, transactorPkBytes []byte) (*bls.PublicKey, *bls.Signature) { +func _generateVotingPublicKeyAndAuthorization(t *testing.T, transactorPkBytes []byte) (*bls.PublicKey, *bls.Signature) { blsPrivateKey, err := bls.NewPrivateKey() require.NoError(t, err) votingPublicKey := blsPrivateKey.PublicKey() - signaturePayload := CreateValidatorVotingSignaturePayload(transactorPkBytes) - votingSignature, err := blsPrivateKey.Sign(signaturePayload) + votingAuthorizationPayload := CreateValidatorVotingAuthorizationPayload(transactorPkBytes) + votingAuthorization, err := blsPrivateKey.Sign(votingAuthorizationPayload) require.NoError(t, err) - return votingPublicKey, votingSignature + return votingPublicKey, votingAuthorization } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 241ae31d7..de250fca4 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -117,11 +117,11 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) // Validator registers. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, pkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, pkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) require.NoError(t, err) diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 765ef60c3..e68493ab6 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -73,11 +73,11 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.NoError(t, err) // Validator registers. - votingPublicKey, votingSignature := _generateVotingPublicKeyAndSignature(t, pkBytes) + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, pkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, - VotingPublicKey: votingPublicKey, - VotingSignature: votingSignature, + Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) require.NoError(t, err) From 6ac52c7b8dd49cb034dc03babf220add1e6c23fb Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Fri, 16 Jun 2023 09:24:18 -0400 Subject: [PATCH 110/762] Refactor merging GlobalParamsEntry defaults. --- lib/block_view.go | 20 ++--- lib/block_view_stake.go | 10 ++- lib/block_view_stake_test.go | 2 +- lib/block_view_validator.go | 33 +++++---- lib/block_view_validator_test.go | 2 +- lib/constants.go | 19 +++-- lib/pos_epoch_complete_hook.go | 8 +- lib/pos_epoch_complete_hook_test.go | 23 +++--- lib/pos_leader_schedule.go | 8 +- lib/pos_snapshot_entries.go | 109 +++++++++------------------- 10 files changed, 104 insertions(+), 130 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index ea4aa7996..c22e10a7a 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3017,33 +3017,33 @@ func (bav *UtxoView) _connectUpdateGlobalParams( if blockHeight >= bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { var bytesRead int - if len(extraData[StakeLockupEpochDuration.ToString()]) > 0 { - newGlobalParamsEntry.StakeLockupEpochDuration, bytesRead = Uvarint(extraData[StakeLockupEpochDuration.ToString()]) + if len(extraData[StakeLockupEpochDurationKey]) > 0 { + newGlobalParamsEntry.StakeLockupEpochDuration, bytesRead = Uvarint(extraData[StakeLockupEpochDurationKey]) if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode StakeLockupEpochDuration as uint64") } } - if len(extraData[ValidatorJailEpochDuration.ToString()]) > 0 { - newGlobalParamsEntry.ValidatorJailEpochDuration, bytesRead = Uvarint(extraData[ValidatorJailEpochDuration.ToString()]) + if len(extraData[ValidatorJailEpochDurationKey]) > 0 { + newGlobalParamsEntry.ValidatorJailEpochDuration, bytesRead = Uvarint(extraData[ValidatorJailEpochDurationKey]) if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode ValidatorJailEpochDuration as uint64") } } - if len(extraData[LeaderScheduleMaxNumValidators.ToString()]) > 0 { - newGlobalParamsEntry.LeaderScheduleMaxNumValidators, bytesRead = Uvarint(extraData[LeaderScheduleMaxNumValidators.ToString()]) + if len(extraData[LeaderScheduleMaxNumValidatorsKey]) > 0 { + newGlobalParamsEntry.LeaderScheduleMaxNumValidators, bytesRead = Uvarint(extraData[LeaderScheduleMaxNumValidatorsKey]) if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode LeaderScheduleMaxNumValidators as uint64") } } - if len(extraData[EpochDurationNumBlocks.ToString()]) > 0 { - newGlobalParamsEntry.EpochDurationNumBlocks, bytesRead = Uvarint(extraData[EpochDurationNumBlocks.ToString()]) + if len(extraData[EpochDurationNumBlocksKey]) > 0 { + newGlobalParamsEntry.EpochDurationNumBlocks, bytesRead = Uvarint(extraData[EpochDurationNumBlocksKey]) if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode EpochDurationNumBlocks as uint64") } } - if len(extraData[JailInactiveValidatorGracePeriodEpochs.ToString()]) > 0 { + if len(extraData[JailInactiveValidatorGracePeriodEpochsKey]) > 0 { newGlobalParamsEntry.JailInactiveValidatorGracePeriodEpochs, bytesRead = Uvarint( - extraData[JailInactiveValidatorGracePeriodEpochs.ToString()], + extraData[JailInactiveValidatorGracePeriodEpochsKey], ) if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf( diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index ef24e56e2..b591a29a2 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1954,14 +1954,16 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving CurrentEpochNumber: ") } - // Retrieve the SnapshotGlobalParam: StakeLockupEpochDuration. - stakeLockupEpochDuration, err := bav.GetSnapshotGlobalParam(StakeLockupEpochDuration) + // Retrieve the SnapshotGlobalParamsEntry.StakeLockupEpochDuration. + snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { - return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving snapshot StakeLockupEpochDuration: ") + return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving SnapshotGlobalParamsEntry: ") } // Calculate UnlockableAtEpochNumber. - unlockableAtEpochNumber, err := SafeUint64().Add(metadata.EndEpochNumber, stakeLockupEpochDuration) + unlockableAtEpochNumber, err := SafeUint64().Add( + metadata.EndEpochNumber, snapshotGlobalParamsEntry.StakeLockupEpochDuration, + ) if err != nil { return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error calculating UnlockableAtEpochNumber: ") } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index c20a61c43..29a7eabc4 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -1820,7 +1820,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - map[string][]byte{StakeLockupEpochDuration.ToString(): UintToBuf(3)}, + map[string][]byte{StakeLockupEpochDurationKey: UintToBuf(3)}, ) } { diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index b86f765b6..7bcf2fbc6 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1724,14 +1724,16 @@ func (bav *UtxoView) IsValidUnjailValidatorMetadata(transactorPublicKey []byte) return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving CurrentEpochNumber: ") } - // Retrieve the SnapshotGlobalParam: ValidatorJailEpochDuration. - validatorJailEpochDuration, err := bav.GetSnapshotGlobalParam(ValidatorJailEpochDuration) + // Retrieve the SnapshotGlobalParamsEntry.ValidatorJailEpochDuration. + snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { - return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving snapshot ValidatorJailEpochDuration: ") + return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving SnapshotGlobalParamsEntry: ") } // Calculate UnjailableAtEpochNumber. - unjailableAtEpochNumber, err := SafeUint64().Add(validatorEntry.JailedAtEpochNumber, validatorJailEpochDuration) + unjailableAtEpochNumber, err := SafeUint64().Add( + validatorEntry.JailedAtEpochNumber, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, + ) if err != nil { return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error calculating UnjailableAtEpochNumber: ") } @@ -1957,16 +1959,12 @@ func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHe return false, nil } - // Retrieve the SnapshotGlobalParam: JailInactiveValidatorGracePeriodEpochs. - jailInactiveValidatorGracePeriodEpochs, err := bav.GetSnapshotGlobalParam(JailInactiveValidatorGracePeriodEpochs) - if err != nil { - return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving JailInactiveValidatorGracePeriodEpochs: ") - } - - // Retrieve the SnapshotGlobalParam: EpochDurationNumBlocks. - epochDurationNumBlocks, err := bav.GetSnapshotGlobalParam(EpochDurationNumBlocks) + // Retrieve the SnapshotGlobalParamsEntry: + // - JailInactiveValidatorGracePeriodEpochs + // - EpochDurationNumBlocks + snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { - return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving EpochDurationNumBlocks: ") + return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving SnapshotGlobalParamsEntry: ") } // Calculate if enough blocks have passed since cutting over to PoS to start jailing validators. @@ -1975,7 +1973,10 @@ func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHe // elapsing since all validators' LastActiveAtEpochNumber = 0 prior to the PoS cut-over. // // StartJailingBlockHeight = ConsensusCutoverBlockHeight + (JailInactiveValidatorGracePeriodEpochs * EpochDurationNumBlocks) - startJailingGracePeriodBlocks, err := SafeUint64().Mul(jailInactiveValidatorGracePeriodEpochs, epochDurationNumBlocks) + startJailingGracePeriodBlocks, err := SafeUint64().Mul( + snapshotGlobalParamsEntry.JailInactiveValidatorGracePeriodEpochs, + snapshotGlobalParamsEntry.EpochDurationNumBlocks, + ) if err != nil { return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error calculating StartJailingGracePeriod: ") } @@ -1996,7 +1997,9 @@ func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHe } // Calculate the JailAtEpochNumber. - jailAtEpochNumber, err := SafeUint64().Add(validatorEntry.LastActiveAtEpochNumber, jailInactiveValidatorGracePeriodEpochs) + jailAtEpochNumber, err := SafeUint64().Add( + validatorEntry.LastActiveAtEpochNumber, snapshotGlobalParamsEntry.JailInactiveValidatorGracePeriodEpochs, + ) if err != nil { return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error calculating JailAtEpochNumber: ") } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index ede4a5dae..628743548 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -1558,7 +1558,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - map[string][]byte{ValidatorJailEpochDuration.ToString(): UintToBuf(3)}, + map[string][]byte{ValidatorJailEpochDurationKey: UintToBuf(3)}, ) } { diff --git a/lib/constants.go b/lib/constants.go index 44f38127a..ef767666d 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1316,13 +1316,18 @@ const ( IsFrozenKey = "IsFrozen" // Keys for a GlobalParamUpdate transaction's extra data map. - USDCentsPerBitcoinKey = "USDCentsPerBitcoin" - MinNetworkFeeNanosPerKBKey = "MinNetworkFeeNanosPerKB" - CreateProfileFeeNanosKey = "CreateProfileFeeNanos" - CreateNFTFeeNanosKey = "CreateNFTFeeNanos" - MaxCopiesPerNFTKey = "MaxCopiesPerNFT" - MaxNonceExpirationBlockHeightOffsetKey = "MaxNonceExpirationBlockHeightOffset" - ForbiddenBlockSignaturePubKeyKey = "ForbiddenBlockSignaturePubKey" + USDCentsPerBitcoinKey = "USDCentsPerBitcoin" + MinNetworkFeeNanosPerKBKey = "MinNetworkFeeNanosPerKB" + CreateProfileFeeNanosKey = "CreateProfileFeeNanos" + CreateNFTFeeNanosKey = "CreateNFTFeeNanos" + MaxCopiesPerNFTKey = "MaxCopiesPerNFT" + MaxNonceExpirationBlockHeightOffsetKey = "MaxNonceExpirationBlockHeightOffset" + ForbiddenBlockSignaturePubKeyKey = "ForbiddenBlockSignaturePubKey" + StakeLockupEpochDurationKey = "StakeLockupEpochDuration" + ValidatorJailEpochDurationKey = "ValidatorJailEpochDuration" + LeaderScheduleMaxNumValidatorsKey = "LeaderScheduleMaxNumValidators" + EpochDurationNumBlocksKey = "EpochDurationNumBlocks" + JailInactiveValidatorGracePeriodEpochsKey = "JailInactiveValidatorGracePeriodEpochs" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index ed523f545..7424f1a72 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -79,14 +79,14 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // TODO: Delete old snapshots that are no longer used. - // Retrieve the SnapshotGlobalParam: EpochDurationNumBlocks. - epochDurationNumBlocks, err := bav.GetSnapshotGlobalParam(EpochDurationNumBlocks) + // Retrieve the SnapshotGlobalParamsEntry.EpochDurationNumBlocks. + snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving snapshot EpochDurationNumBlocks: ") + return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving SnapshotGlobalParamsEntry: ") } // Calculate the NextEpochFinalBlockHeight. - nextEpochFinalBlockHeight, err := SafeUint64().Add(blockHeight, epochDurationNumBlocks) + nextEpochFinalBlockHeight, err := SafeUint64().Add(blockHeight, snapshotGlobalParamsEntry.EpochDurationNumBlocks) if err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: problem calculating NextEpochFinalBlockHeight: ") } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index de250fca4..522129103 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -192,8 +192,8 @@ func TestRunEpochCompleteHook(t *testing.T) { paramUpdaterPub, paramUpdaterPriv, map[string][]byte{ - ValidatorJailEpochDuration.ToString(): UintToBuf(4), - JailInactiveValidatorGracePeriodEpochs.ToString(): UintToBuf(10), + ValidatorJailEpochDurationKey: UintToBuf(4), + JailInactiveValidatorGracePeriodEpochsKey: UintToBuf(10), }, ) @@ -214,10 +214,11 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(0)) - // Test SnapshotGlobalParamsEntry is nil. + // Test SnapshotGlobalParamsEntry is non-nil and contains the default values. snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() require.NoError(t, err) - require.Nil(t, snapshotGlobalParamsEntry) + require.NotNil(t, snapshotGlobalParamsEntry) + require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(3)) _assertEmptyValidatorSnapshots() } @@ -361,16 +362,16 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test snapshotting changing GlobalParams. // Update StakeLockupEpochDuration from default of 3 to 2. - stakeLockupEpochDuration, err := utxoView().GetSnapshotGlobalParam(StakeLockupEpochDuration) + snapshotGlobalsParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() require.NoError(t, err) - require.Equal(t, stakeLockupEpochDuration, uint64(3)) + require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(3)) _updateGlobalParamsEntryWithExtraData( testMeta, testMeta.feeRateNanosPerKb, paramUpdaterPub, paramUpdaterPriv, - map[string][]byte{StakeLockupEpochDuration.ToString(): UintToBuf(2)}, + map[string][]byte{StakeLockupEpochDurationKey: UintToBuf(2)}, ) require.Equal(t, utxoView().GlobalParamsEntry.StakeLockupEpochDuration, uint64(2)) @@ -379,17 +380,17 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // Snapshot StakeLockupEpochDuration is still 3. - stakeLockupEpochDuration, err = utxoView().GetSnapshotGlobalParam(StakeLockupEpochDuration) + snapshotGlobalsParamsEntry, err = utxoView().GetSnapshotGlobalParamsEntry() require.NoError(t, err) - require.Equal(t, stakeLockupEpochDuration, uint64(3)) + require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(3)) // Run OnEpochCompleteHook(). _runOnEpochCompleteHook() // Snapshot StakeLockupEpochDuration is updated to 2. - stakeLockupEpochDuration, err = utxoView().GetSnapshotGlobalParam(StakeLockupEpochDuration) + snapshotGlobalsParamsEntry, err = utxoView().GetSnapshotGlobalParamsEntry() require.NoError(t, err) - require.Equal(t, stakeLockupEpochDuration, uint64(2)) + require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(2)) } { // Test snapshotting changing validator set. diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 072eda3fb..5193b84fd 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -13,14 +13,14 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving CurrentRandomSeedHash: ") } - // Retrieve the SnapshotGlobalParam: LeaderScheduleMaxNumValidators. - leaderScheduleMaxNumValidators, err := bav.GetSnapshotGlobalParam(LeaderScheduleMaxNumValidators) + // Retrieve the SnapshotGlobalParamsEntry.LeaderScheduleMaxNumValidators. + snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving snapshot LeaderScheduleMaxNumValidators: ") + return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving SnapshotGlobalParamsEntry: ") } // Retrieve top, active validators ordered by stake. - validatorEntries, err := bav.GetTopActiveValidatorsByStake(leaderScheduleMaxNumValidators) + validatorEntries, err := bav.GetTopActiveValidatorsByStake(snapshotGlobalParamsEntry.LeaderScheduleMaxNumValidators) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 52e423947..f490547d2 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -29,77 +29,8 @@ func (bav *UtxoView) GetSnapshotEpochNumber() (uint64, error) { // SnapshotGlobalParamsEntry // -type SnapshotGlobalParam string - -const ( - StakeLockupEpochDuration SnapshotGlobalParam = "StakeLockupEpochDuration" - ValidatorJailEpochDuration SnapshotGlobalParam = "ValidatorJailEpochDuration" - LeaderScheduleMaxNumValidators SnapshotGlobalParam = "LeaderScheduleMaxNumValidators" - EpochDurationNumBlocks SnapshotGlobalParam = "EpochDurationNumBlocks" - JailInactiveValidatorGracePeriodEpochs SnapshotGlobalParam = "JailInactiveValidatorGracePeriodEpochs" -) - -func (snapshotGlobalParam SnapshotGlobalParam) ToString() string { - return string(snapshotGlobalParam) -} - -func (bav *UtxoView) GetCurrentGlobalParam(field SnapshotGlobalParam) uint64 { - return _getGlobalParam(bav, bav.GlobalParamsEntry, field) -} - -func (bav *UtxoView) GetSnapshotGlobalParam(field SnapshotGlobalParam) (uint64, error) { - // Retrieve the SnapshotGlobalParamsEntry. - globalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() - if err != nil { - return 0, errors.Wrapf(err, "GetSnapshotGlobalParam: problem retrieving SnapshotGlobalParamsEntry: ") - } - if globalParamsEntry == nil { - // We will use the default values below. - globalParamsEntry = &GlobalParamsEntry{} - } - return _getGlobalParam(bav, globalParamsEntry, field), nil -} - -func _getGlobalParam(bav *UtxoView, globalParamsEntry *GlobalParamsEntry, field SnapshotGlobalParam) uint64 { - // Return the corresponding field. Either the updated value if - // set on the GlobalParamsEntry or the default value otherwise. - switch field { - case StakeLockupEpochDuration: - if globalParamsEntry.StakeLockupEpochDuration > 0 { - return globalParamsEntry.StakeLockupEpochDuration - } - return bav.Params.DefaultStakeLockupEpochDuration - - case ValidatorJailEpochDuration: - if globalParamsEntry.ValidatorJailEpochDuration > 0 { - return globalParamsEntry.ValidatorJailEpochDuration - } - return bav.Params.DefaultValidatorJailEpochDuration - - case LeaderScheduleMaxNumValidators: - if globalParamsEntry.LeaderScheduleMaxNumValidators > 0 { - return globalParamsEntry.LeaderScheduleMaxNumValidators - } - return bav.Params.DefaultLeaderScheduleMaxNumValidators - - case EpochDurationNumBlocks: - if globalParamsEntry.EpochDurationNumBlocks > 0 { - return globalParamsEntry.EpochDurationNumBlocks - } - return bav.Params.DefaultEpochDurationNumBlocks - - case JailInactiveValidatorGracePeriodEpochs: - if globalParamsEntry.JailInactiveValidatorGracePeriodEpochs > 0 { - return globalParamsEntry.JailInactiveValidatorGracePeriodEpochs - } - return bav.Params.DefaultJailInactiveValidatorGracePeriodEpochs - - default: - // This can never happen in production since we pass in a restricted enum - // field param. This would only catch the edge case where a developer adds - // a new SnapshotGlobalParam but forgets to add it to this case statement. - panic(fmt.Sprintf("GetSnapshotGlobalParam: invalid field provided: %s", field)) - } +func (bav *UtxoView) GetCurrentGlobalParamsEntry() *GlobalParamsEntry { + return _mergeGlobalParamEntryDefaults(bav, bav.GlobalParamsEntry) } func (bav *UtxoView) GetSnapshotGlobalParamsEntry() (*GlobalParamsEntry, error) { @@ -110,7 +41,7 @@ func (bav *UtxoView) GetSnapshotGlobalParamsEntry() (*GlobalParamsEntry, error) } // Check the UtxoView first. if globalParamsEntry, exists := bav.SnapshotGlobalParamEntries[snapshotAtEpochNumber]; exists { - return globalParamsEntry, nil + return _mergeGlobalParamEntryDefaults(bav, globalParamsEntry), nil } // If we don't have it in the UtxoView, check the db. globalParamsEntry, err := DBGetSnapshotGlobalParamsEntry(bav.Handle, bav.Snapshot, snapshotAtEpochNumber) @@ -124,7 +55,39 @@ func (bav *UtxoView) GetSnapshotGlobalParamsEntry() (*GlobalParamsEntry, error) // Cache the result in the UtxoView. bav._setSnapshotGlobalParamsEntry(globalParamsEntry, snapshotAtEpochNumber) } - return globalParamsEntry, nil + return _mergeGlobalParamEntryDefaults(bav, globalParamsEntry), nil +} + +func _mergeGlobalParamEntryDefaults(bav *UtxoView, globalParamsEntry *GlobalParamsEntry) *GlobalParamsEntry { + // Merge the input GlobalParamsEntry with the default param values. + if globalParamsEntry == nil { + // This could happen before we have any SnapshotGlobalParamEntries set. + // In this case, we fall back to all default values. + globalParamsEntry = &GlobalParamsEntry{} + } + + // Take a copy, so we don't modify the original. + globalParamsEntryCopy := globalParamsEntry.Copy() + + // Merge the default values. + if globalParamsEntryCopy.StakeLockupEpochDuration == 0 { + globalParamsEntryCopy.StakeLockupEpochDuration = bav.Params.DefaultStakeLockupEpochDuration + } + if globalParamsEntryCopy.ValidatorJailEpochDuration == 0 { + globalParamsEntryCopy.ValidatorJailEpochDuration = bav.Params.DefaultValidatorJailEpochDuration + } + if globalParamsEntryCopy.LeaderScheduleMaxNumValidators == 0 { + globalParamsEntryCopy.LeaderScheduleMaxNumValidators = bav.Params.DefaultLeaderScheduleMaxNumValidators + } + if globalParamsEntryCopy.EpochDurationNumBlocks == 0 { + globalParamsEntryCopy.EpochDurationNumBlocks = bav.Params.DefaultEpochDurationNumBlocks + } + if globalParamsEntryCopy.JailInactiveValidatorGracePeriodEpochs == 0 { + globalParamsEntryCopy.JailInactiveValidatorGracePeriodEpochs = bav.Params.DefaultJailInactiveValidatorGracePeriodEpochs + } + + // Return the merged result. + return globalParamsEntryCopy } func (bav *UtxoView) _setSnapshotGlobalParamsEntry(globalParamsEntry *GlobalParamsEntry, snapshotAtEpochNumber uint64) { From 46cad1636db854d3446b762e83029800d63308d7 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 21 Jun 2023 14:30:08 -0400 Subject: [PATCH 111/762] Add comments. --- bls/signature.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/bls/signature.go b/bls/signature.go index e7fb72494..b05dbbee7 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -115,10 +115,18 @@ func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, } func (privateKey *PrivateKey) MarshalJSON() ([]byte, error) { + // This is called automatically by the JSON library when converting a + // bls.PrivateKey to JSON. This is currently not used, since the client + // never shares their bls.PrivateKey over the network, but is included + // here are as a nicety utility for completeness. return json.Marshal(privateKey.ToString()) } func (privateKey *PrivateKey) UnmarshalJSON(data []byte) error { + // This is called automatically by the JSON library when converting a + // bls.PrivateKey from JSON. This is currently not used, since the client + // never shares their bls.PrivateKey over the network, but is included + // here are as a nicety utility for completeness. privateKeyString := "" err := json.Unmarshal(data, &privateKeyString) if err != nil { @@ -191,10 +199,16 @@ func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, erro } func (publicKey *PublicKey) MarshalJSON() ([]byte, error) { + // This is called automatically by the JSON library when converting a + // bls.PublicKey to JSON. This is useful when passing a bls.PublicKey + // back and forth from the backend to the frontend as JSON. return json.Marshal(publicKey.ToString()) } func (publicKey *PublicKey) UnmarshalJSON(data []byte) error { + // This is called automatically by the JSON library when converting a + // bls.PublicKey from JSON. This is useful when passing a bls.PublicKey + // back and forth from the frontend to the backend as JSON. publicKeyString := "" err := json.Unmarshal(data, &publicKeyString) if err != nil { @@ -268,10 +282,16 @@ func (signature *Signature) FromString(signatureString string) (*Signature, erro } func (signature *Signature) MarshalJSON() ([]byte, error) { + // This is called automatically by the JSON library when converting a + // bls.Signature to JSON. This is useful when passing a bls.Signature + // back and forth from the backend to the frontend as JSON. return json.Marshal(signature.ToString()) } func (signature *Signature) UnmarshalJSON(data []byte) error { + // This is called automatically by the JSON library when converting a + // bls.Signature from JSON. This is useful when passing a bls.Signature + // back and forth from the frontend to the backend as JSON. signatureString := "" err := json.Unmarshal(data, &signatureString) if err != nil { From 01e0e9d1f9c0fbacd9ae486b2c6f0efb5a1b7768 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 21 Jun 2023 15:12:49 -0400 Subject: [PATCH 112/762] Address PR feedback pt 1. --- lib/block_view_stake.go | 2 +- lib/db_utils.go | 18 +++++++++--------- lib/pos_snapshot_entries.go | 30 ++++++++++++++++++++++++------ 3 files changed, 34 insertions(+), 16 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index b591a29a2..0c8d6656c 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -536,7 +536,7 @@ func DBKeyForStakeByValidator(stakeEntry *StakeEntry) []byte { func DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry *LockedStakeEntry) []byte { data := DBPrefixKeyForLockedStakeByValidatorAndStaker(lockedStakeEntry) - data = append(data, UintToBuf(lockedStakeEntry.LockedAtEpochNumber)...) + data = append(data, EncodeUint64(lockedStakeEntry.LockedAtEpochNumber)...) return data } diff --git a/lib/db_utils.go b/lib/db_utils.go index 0ed4630fe..c6a28f06f 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -478,11 +478,11 @@ type DBPrefixes struct { PrefixNoncePKIDIndex []byte `prefix_id:"[77]" is_state:"true"` // PrefixValidatorByPKID: Retrieve a validator by PKID. - // Prefix, ValidatorPKID -> ValidatorEntry + // Prefix, -> ValidatorEntry PrefixValidatorByPKID []byte `prefix_id:"[78]" is_state:"true"` // PrefixValidatorByStatusAndStake: Retrieve the top N active validators by stake. - // Prefix, Status, TotalStakeAmountNanos, ValidatorPKID -> nil + // Prefix, , , -> nil // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. PrefixValidatorByStatusAndStake []byte `prefix_id:"[79]" is_state:"true"` @@ -491,11 +491,11 @@ type DBPrefixes struct { PrefixGlobalActiveStakeAmountNanos []byte `prefix_id:"[80]" is_state:"true"` // PrefixStakeByValidatorAndStaker: Retrieve a StakeEntry. - // Prefix, ValidatorPKID, StakerPKID -> StakeEntry + // Prefix, , -> StakeEntry PrefixStakeByValidatorAndStaker []byte `prefix_id:"[81]" is_state:"true"` // PrefixLockedStakeByValidatorAndStakerAndLockedAt: Retrieve a LockedStakeEntry. - // Prefix, ValidatorPKID, StakerPKID, LockedAtEpochNumber -> LockedStakeEntry + // Prefix, , , -> LockedStakeEntry // // The way staking works is that staking to a validator is instant and creates a StakeEntry // immediately, but UNstaking from a validator has a "cooldown" period before the funds @@ -530,24 +530,24 @@ type DBPrefixes struct { PrefixCurrentRandomSeedHash []byte `prefix_id:"[84]" is_state:"true"` // PrefixSnapshotGlobalParamsEntry: Retrieve a snapshot GlobalParamsEntry by SnapshotAtEpochNumber. - // Prefix, SnapshotAtEpochNumber -> *GlobalParamsEntry + // Prefix, -> *GlobalParamsEntry PrefixSnapshotGlobalParamsEntry []byte `prefix_id:"[85]" is_state:"true"` // PrefixSnapshotValidatorByPKID: Retrieve a snapshot ValidatorEntry by . - // Prefix, SnapshotAtEpochNumber, ValidatorPKID -> *ValidatorEntry + // Prefix, , -> *ValidatorEntry PrefixSnapshotValidatorByPKID []byte `prefix_id:"[86]" is_state:"true"` // PrefixSnapshotValidatorByStatusAndStake: Retrieve stake-ordered active ValidatorEntries by SnapshotAtEpochNumber. - // Prefix, SnapshotAtEpochNumber, Status, TotalStakeAmountNanos, ValidatorPKID -> nil + // Prefix, , , , -> nil // Note: we parse the ValidatorPKID from the key and the value is nil to save space. PrefixSnapshotValidatorByStatusAndStake []byte `prefix_id:"[87]" is_state:"true"` // PrefixSnapshotGlobalActiveStakeAmountNanos: Retrieve a snapshot GlobalActiveStakeAmountNanos by SnapshotAtEpochNumber. - // Prefix, SnapshotAtEpochNumber -> *uint256.Int + // Prefix, -> *uint256.Int PrefixSnapshotGlobalActiveStakeAmountNanos []byte `prefix_id:"[88]" is_state:"true"` // PrefixSnapshotLeaderSchedule: Retrieve a ValidatorPKID by . - // Prefix, SnapshotAtEpochNumber, LeaderIndex -> ValidatorPKID + // Prefix, , -> ValidatorPKID PrefixSnapshotLeaderSchedule []byte `prefix_id:"[89]" is_state:"true"` // NEXT_TAG: 90 diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index f490547d2..46ee76c4c 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -20,6 +20,24 @@ func (bav *UtxoView) GetSnapshotEpochNumber() (uint64, error) { return 0, errors.Wrapf(err, "GetSnapshotEpochNumber: problem retrieving CurrentEpochNumber: ") } if currentEpochNumber < SnapshotLookbackNumEpochs { + // We want to return 0 in this case and not error. We start snapshotting with our StateSetup block height, + // so we should have the correct number of snapshots and not hit this case once we hit the ConsensusCutover + // block height. This case will only be hit immediately following the StateSetup block height. We run one + // OnEpochCompleteHook right away on the StateSetup block height which will increment our CurrentEpochNumber + // from zero (the starting default) to one. Then we wait one epoch and run our second OnEpochCompleteHook to + // increment our CurrentEpochNumber from one to two. At this point, we will have the correct number of + // snapshots and no longer hit this edge case. + // + // The problem is what about snapshot values we need to use in that first block where CurrentBlockHeight = + // StateSetup block height and then the first epoch after that? The only snapshot values that we use relate + // to our new PoS txn types. We pull the snapshot GlobalParamsEntry to retrieve the StakeLockupEpochDuration + // and the ValidatorJailEpochDuration. Both of these impact the new PoS txn types which are unlocked after + // the StateSetup block height. The ValidatorJailEpochDuration value doesn't really matter since no validators + // will be jailed until the ConsensusCutover block height. For the StakeLockupEpochDuration (and all other + // snapshot GlobalParamsEntry values), if there is no snapshot value, we return an empty GlobalParamsEntry with + // just our defaults, which is what we intend. There's one other small edge case here which is if we update the + // StakeLockupEpochDuration parameter within that first block (which would be weird and should not happen), + // then that value will take immediate effect in the first epoch with no lagged snapshot wait period. return 0, nil } return SafeUint64().Sub(currentEpochNumber, SnapshotLookbackNumEpochs) @@ -121,7 +139,7 @@ func (bav *UtxoView) _flushSnapshotGlobalParamsEntryToDbWithTxn(txn *badger.Txn, func DBKeyForSnapshotGlobalParamsEntry(snapshotEpochNumber uint64) []byte { key := append([]byte{}, Prefixes.PrefixSnapshotGlobalParamsEntry...) - key = append(key, UintToBuf(snapshotEpochNumber)...) + key = append(key, EncodeUint64(snapshotEpochNumber)...) return key } @@ -358,14 +376,14 @@ func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, func DBKeyForSnapshotValidatorByPKID(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByPKID...) - key = append(key, UintToBuf(snapshotAtEpochNumber)...) + key = append(key, EncodeUint64(snapshotAtEpochNumber)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) return key } func DBKeyForSnapshotValidatorByStake(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStatusAndStake...) - key = append(key, UintToBuf(snapshotAtEpochNumber)...) + key = append(key, EncodeUint64(snapshotAtEpochNumber)...) key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) @@ -425,7 +443,7 @@ func DBGetSnapshotTopActiveValidatorsByStake( // Retrieve top N active ValidatorEntry keys by stake. key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStatusAndStake...) - key = append(key, UintToBuf(snapshotAtEpochNumber)...) + key = append(key, EncodeUint64(snapshotAtEpochNumber)...) key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( handle, key, int(limit), nil, true, validatorKeysToSkip, @@ -576,7 +594,7 @@ func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn * func DBKeyForSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber uint64) []byte { key := append([]byte{}, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos...) - key = append(key, UintToBuf(snapshotAtEpochNumber)...) + key = append(key, EncodeUint64(snapshotAtEpochNumber)...) return key } @@ -692,7 +710,7 @@ func (bav *UtxoView) _flushSnapshotLeaderScheduleToDbWithTxn(txn *badger.Txn, bl func DBKeyForSnapshotLeaderScheduleValidator(leaderIndex uint16, snapshotAtEpochNumber uint64) []byte { data := append([]byte{}, Prefixes.PrefixSnapshotLeaderSchedule...) - data = append(data, UintToBuf(snapshotAtEpochNumber)...) + data = append(data, EncodeUint64(snapshotAtEpochNumber)...) data = append(data, EncodeUint16(leaderIndex)...) return data } From 6a4264207a1cfdcc2182f53ca27ae2b09d987cf6 Mon Sep 17 00:00:00 2001 From: mattfoley8 Date: Wed, 21 Jun 2023 15:41:17 -0400 Subject: [PATCH 113/762] Delete then set snapshot validator entries. --- lib/pos_snapshot_entries.go | 81 ++++++++++++++++++++++++++++++++++++- 1 file changed, 80 insertions(+), 1 deletion(-) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 46ee76c4c..464a283ad 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -353,7 +353,21 @@ func (bav *UtxoView) _setSnapshotValidatorEntry(validatorEntry *ValidatorEntry, bav.SnapshotValidatorEntries[mapKey] = validatorEntry.Copy() } +func (bav *UtxoView) _deleteSnapshotValidatorEntry(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) { + // This function shouldn't be called with nil. + if validatorEntry == nil { + glog.Errorf("_deleteSnapshotValidatorEntry: called with nil entry, this should never happen") + return + } + // Create a tombstone entry. + tombstoneEntry := *validatorEntry + tombstoneEntry.isDeleted = true + // Set the mappings to the point to the tombstone entry. + bav._setSnapshotValidatorEntry(&tombstoneEntry, snapshotAtEpochNumber) +} + func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Delete all SnapshotValidatorEntries from the db that are in the UtxoView. for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { if validatorEntry == nil { return fmt.Errorf( @@ -361,6 +375,29 @@ func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, mapKey.SnapshotAtEpochNumber, ) } + if err := DBDeleteSnapshotValidatorEntryWithTxn( + txn, bav.Snapshot, &mapKey.ValidatorPKID, mapKey.SnapshotAtEpochNumber, + ); err != nil { + return errors.Wrapf( + err, + "_flushSnapshotValidatorEntriesToDb: problem deleting ValidatorEntry for EpochNumber %d: ", + mapKey.SnapshotAtEpochNumber, + ) + } + } + + // Set all !isDeleted SnapshotValidatorEntries into the db from the UtxoView. + for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { + if validatorEntry == nil { + return fmt.Errorf( + "_flushSnapshotValidatorEntriesToDb: found nil entry for EpochNumber %d, this should never happen", + mapKey.SnapshotAtEpochNumber, + ) + } + if validatorEntry.isDeleted { + // Skip any deleted SnapshotValidatorEntries. + continue + } if err := DBPutSnapshotValidatorEntryWithTxn( txn, bav.Snapshot, validatorEntry, mapKey.SnapshotAtEpochNumber, blockHeight, ); err != nil { @@ -495,7 +532,7 @@ func DBPutSnapshotValidatorEntryWithTxn( ) } - // Put the ValidatorPKID in the SnapshotValidatorByStake index. + // Put the ValidatorPKID in the SnapshotValidatorByStatusAndStake index. key = DBKeyForSnapshotValidatorByStake(validatorEntry, snapshotAtEpochNumber) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID)); err != nil { return errors.Wrapf( @@ -507,6 +544,48 @@ func DBPutSnapshotValidatorEntryWithTxn( return nil } +func DBDeleteSnapshotValidatorEntryWithTxn( + txn *badger.Txn, snap *Snapshot, validatorPKID *PKID, snapshotAtEpochNumber uint64, +) error { + if validatorPKID == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBDeleteSnapshotValidatorEntryWithTxn: called with nil ValidatorPKID") + return nil + } + + // Look up the existing SnapshotValidatorEntry in the db using the PKID. + // We need to use this validator's values to delete the corresponding indexes. + validatorEntry, err := DBGetSnapshotValidatorByPKIDWithTxn(txn, snap, validatorPKID, snapshotAtEpochNumber) + if err != nil { + return errors.Wrapf( + err, "DBDeleteSnapshotValidatorEntryWithTxn: problem retrieving ValidatorEntry for PKID %v: ", validatorPKID, + ) + } + + // If there is no ValidatorEntry in the DB for this PKID, then there is nothing to delete. + if validatorEntry == nil { + return nil + } + + // Delete ValidatorEntry from PrefixSnapshotValidatorByPKID. + key := DBKeyForSnapshotValidatorByPKID(validatorEntry, snapshotAtEpochNumber) + if err = DBDeleteWithTxn(txn, snap, key); err != nil { + return errors.Wrapf( + err, "DBDeleteSnapshotValidatorEntryWithTxn: problem deleting ValidatorEntry from index PrefixSnapshotValidatorByPKID", + ) + } + + // Delete ValidatorEntry.PKID from PrefixSnapshotValidatorByStatusAndStake. + key = DBKeyForSnapshotValidatorByStake(validatorEntry, snapshotAtEpochNumber) + if err = DBDeleteWithTxn(txn, snap, key); err != nil { + return errors.Wrapf( + err, "DBDeleteSnapshotValidatorEntryWithTxn: problem deleting ValidatorEntry from index PrefixSnapshotValidatorByStatusAndStake", + ) + } + + return nil +} + func DBEnumerateAllCurrentValidators(handle *badger.DB, pkidsToSkip []*PKID) ([]*ValidatorEntry, error) { // Convert []*PKIDs of validators to skip to a Set[string] of db keys to skip. skipKeys := NewSet([]string{}) From fbb5b71b1c709ad406e749c2514c378a507e4826 Mon Sep 17 00:00:00 2001 From: diamondhands Date: Fri, 23 Jun 2023 06:25:10 -0700 Subject: [PATCH 114/762] Add comment --- lib/pos_epoch_complete_hook.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 7424f1a72..b0e12c93b 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -30,6 +30,10 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Rolls-over the current epoch into a new one. Handles the associated snapshotting + accounting. // Sanity-check that the current block is the last block in the current epoch. + // + // Note that this will also return true if we're currently at the + // ProofOfStake1StateSetupBlockHeight so that we can run the hook for the first time + // to initialize the CurrentEpochEntry. isLastBlockInCurrentEpoch, err := bav.IsLastBlockInCurrentEpoch(blockHeight) if err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: ") From fbf7bec9d9df0a38adb1ac1bb55104e0ebd1db2a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Sun, 25 Jun 2023 15:00:28 -0600 Subject: [PATCH 115/762] PoS Add P2P vote and timeout messages (#555) * Add p2p msgs for votes and timeouts * Implement remaining tests * Add ProposedInView field to vote message * Cleanup * Fix failing TestEnumExtras unit test * Comments * Add validators list to aggregated signature * Fix naming for QC type * Use io.reader * Prefix new file names with 'pos_' * Add deep equality check functions to new types * Add ECDSA public key to new message types * Address Nina's comments * Address nits --- .gitignore | 1 + go.mod | 5 +- go.sum | 23 +-- lib/constants.go | 16 ++ lib/network.go | 14 +- lib/pos_network.go | 402 ++++++++++++++++++++++++++++++++++++++++ lib/pos_network_test.go | 101 ++++++++++ 7 files changed, 538 insertions(+), 24 deletions(-) create mode 100644 lib/pos_network.go create mode 100644 lib/pos_network_test.go diff --git a/.gitignore b/.gitignore index dc0267338..608a45416 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ backend core .idea +.vscode local_scripts/* lib/mem.log diff --git a/go.mod b/go.mod index 1e34c1732..7db2ed102 100644 --- a/go.mod +++ b/go.mod @@ -23,6 +23,7 @@ require ( github.com/holiman/uint256 v1.1.1 github.com/mitchellh/go-homedir v1.1.0 github.com/oleiade/lane v1.0.1 + github.com/onflow/flow-go/crypto v0.24.7 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0 @@ -45,9 +46,7 @@ require ( ) require ( - github.com/DataDog/zstd v1.4.8 // indirect github.com/Microsoft/go-winio v0.4.16 // indirect - github.com/NVIDIA/cstruct v0.0.0-20210817223100-441a06a021c8 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/bwesterb/go-ristretto v1.2.0 // indirect @@ -79,7 +78,6 @@ require ( github.com/mattn/goveralls v0.0.6 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/mitchellh/mapstructure v1.1.2 // indirect - github.com/onflow/flow-go/crypto v0.24.7 // indirect github.com/onsi/ginkgo v1.15.0 // indirect github.com/onsi/gomega v1.10.5 // indirect github.com/pelletier/go-toml v1.7.0 // indirect @@ -100,7 +98,6 @@ require ( github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.opencensus.io v0.23.0 // indirect - golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect golang.org/x/sys v0.0.0-20221010170243-090e33056c14 // indirect golang.org/x/text v0.3.6 // indirect diff --git a/go.sum b/go.sum index c2e35d0c6..f0f9a57dc 100644 --- a/go.sum +++ b/go.sum @@ -30,12 +30,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DataDog/datadog-go v4.4.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v4.5.0+incompatible h1:MyyuIz5LVAI3Im+0F/tfo64ETyH4sNVynZ29yOiHm50= github.com/DataDog/datadog-go v4.5.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.4.8 h1:Rpmta4xZ/MgZnriKNd24iZMhGpP5dvUcs/uqfBapKZY= -github.com/DataDog/zstd v1.4.8/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/NVIDIA/cstruct v0.0.0-20210817223100-441a06a021c8/go.mod h1:GPbuJvLD4QWiHPS6vivLzh+XMAx6va0Aucm6ipa5S0I= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -125,13 +121,8 @@ github.com/deso-protocol/go-deadlock v1.0.0 h1:mw0pHy/19zgC+JFBStuQt1+1Ehv5OKA5N github.com/deso-protocol/go-deadlock v1.0.0/go.mod h1:K0Wd2OV2x7ck7SMYDraWerpKjFKUeBqaFcwz21tmkb8= github.com/deso-protocol/go-merkle-tree v1.0.0 h1:9zkI5dQsITYy77s4kbTGPQmZnhQ+LsH/kRdL5l/Yzvg= github.com/deso-protocol/go-merkle-tree v1.0.0/go.mod h1:V/vbg/maaNv6G7zf9VVs645nLFx/jsO2L/awFB/S/ZU= -github.com/dgraph-io/badger/v3 v3.2103.0 h1:abkD2EnP3+6Tj8h5LI1y00dJ9ICKTIAzvG9WmZ8S2c4= -github.com/dgraph-io/badger/v3 v3.2103.0/go.mod h1:GHMCYxuDWyzbHkh4k3yyg4PM61tJPFfEGSMbE3Vd5QE= github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= -github.com/dgraph-io/ristretto v0.0.4-0.20210309073149-3836124cdc5a/go.mod h1:MIonLggsKgZLUSt414ExgwNtlOL5MuEoAJP514mwGe8= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -215,7 +206,6 @@ github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= @@ -309,6 +299,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -434,6 +425,7 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= @@ -462,7 +454,6 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZL github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -472,13 +463,13 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/supranational/blst v0.3.10 h1:CMciDZ/h4pXDDXQASe8ZGTNKUiVNxVVA5hpci2Uuhuk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= @@ -546,7 +537,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -606,8 +596,6 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -655,13 +643,11 @@ golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201026173827-119d4633e4d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14 h1:k5II8e6QD8mITdi+okbbmR/cIyEbeXLBhy5Ha4nevyc= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -779,8 +765,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= @@ -791,4 +775,5 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= mellium.im/sasl v0.2.1 h1:nspKSRg7/SyO0cRGY71OkfHab8tf9kCts6a6oTDut0w= mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= +pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/lib/constants.go b/lib/constants.go index 52c651669..0920238cb 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -105,6 +105,22 @@ const ( CurrentHeaderVersion = HeaderVersion1 ) +// Versioning for the MsgValidatorVote message type. This type alias is equivalent +// to a uint8, and supports the same byte encoders/decoders. +type MsgValidatorVoteVersion = byte + +const ( + MsgValidatorVoteVersion0 MsgValidatorVoteVersion = 0 +) + +// Versioning for the MsgValidatorTimeout message type. This type alias is equivalent +// to a uint8, and supports the same byte encoders/decoders. +type MsgValidatorTimeoutVersion = byte + +const ( + MsgValidatorTimeoutVersion0 MsgValidatorTimeoutVersion = 0 +) + var ( MaxUint256, _ = uint256.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") diff --git a/lib/network.go b/lib/network.go index 0ce83d3e4..099c7b2f8 100644 --- a/lib/network.go +++ b/lib/network.go @@ -93,7 +93,11 @@ const ( // MsgTypeTransactionBundleV2 contains transactions after the balance model block height from a peer. MsgTypeTransactionBundleV2 MsgType = 19 - // NEXT_TAG = 20 + // Proof of stake vote and timeout messages + MsgTypeValidatorVote MsgType = 20 + MsgTypeValidatorTimeout MsgType = 21 + + // NEXT_TAG = 22 // Below are control messages used to signal to the Server from other parts of // the code but not actually sent among peers. @@ -151,6 +155,10 @@ func (msgType MsgType) String() string { return "TRANSACTION_BUNDLE" case MsgTypeTransactionBundleV2: return "TRANSACTION_BUNDLE_V2" + case MsgTypeValidatorVote: + return "VALIDATOR_VOTE" + case MsgTypeValidatorTimeout: + return "VALIDATOR_TIMEOUT" case MsgTypeMempool: return "MEMPOOL" case MsgTypeAddr: @@ -745,6 +753,10 @@ func NewMessage(msgType MsgType) DeSoMessage { return &MsgDeSoTransactionBundle{} case MsgTypeTransactionBundleV2: return &MsgDeSoTransactionBundleV2{} + case MsgTypeValidatorVote: + return &MsgDeSoValidatorVote{} + case MsgTypeValidatorTimeout: + return &MsgDeSoValidatorTimeout{} case MsgTypeMempool: return &MsgDeSoMempool{} case MsgTypeGetHeaders: diff --git a/lib/pos_network.go b/lib/pos_network.go new file mode 100644 index 000000000..f2db4117d --- /dev/null +++ b/lib/pos_network.go @@ -0,0 +1,402 @@ +package lib + +import ( + "bytes" + "fmt" + "io" + + "github.com/deso-protocol/core/bls" + "github.com/pkg/errors" +) + +// ================================================================== +// Proof of Stake Vote Message +// ================================================================== + +type MsgDeSoValidatorVote struct { + MsgVersion MsgValidatorVoteVersion + + // The ECDSA public key for the validator who constructed this vote message. + // Given the validator's ECDSA public key, we can look up their Validator PKID + // and their stake in consensus. This allows us to verify that the vote message + // was sent by a registered validator. + ValidatorPublicKey *PublicKey + // The BLS voting public key for the validator who constructed this vote message. + // The BLS public key is included in the vote message because it allows us to + // easily verify if the BLS VotePartialSignature is correctly formed, without having + // to first look up the validator's BLS public key in consensus. It helps optimize + // vote validation. + ValidatorVotingPublicKey *bls.PublicKey + + // The block hash corresponding to the block that this vote is for. + BlockHash *BlockHash + + // The view number when the the block was proposed. + ProposedInView uint64 + + // The validator's partial BLS signature of the (ProposedInView, BlockHash) pair + // This represents the validator's vote for this block. The block height is implicitly + // captured in the block hash. + VotePartialSignature *bls.Signature +} + +func (msg *MsgDeSoValidatorVote) GetMsgType() MsgType { + return MsgTypeValidatorVote +} + +func (msg *MsgDeSoValidatorVote) ToBytes(bool) ([]byte, error) { + if msg.MsgVersion != MsgValidatorVoteVersion0 { + return nil, fmt.Errorf("MsgDeSoValidatorVote.ToBytes: Invalid MsgVersion %d", msg.MsgVersion) + } + + retBytes := []byte{} + + // MsgVersion + retBytes = append(retBytes, msg.MsgVersion) + + // ValidatorPublicKey + if msg.ValidatorPublicKey == nil { + return nil, errors.New("MsgDeSoValidatorVote.ToBytes: ValidatorPublicKey must not be nil") + } + retBytes = append(retBytes, msg.ValidatorPublicKey.ToBytes()...) + + // ValidatorVotingPublicKey + if msg.ValidatorVotingPublicKey == nil { + return nil, errors.New("MsgDeSoValidatorVote.ToBytes: ValidatorVotingPublicKey must not be nil") + } + retBytes = append(retBytes, EncodeBLSPublicKey(msg.ValidatorVotingPublicKey)...) + + // BlockHash + if msg.BlockHash == nil { + return nil, errors.New("MsgDeSoValidatorVote.ToBytes: BlockHash must not be nil") + } + retBytes = append(retBytes, msg.BlockHash.ToBytes()...) + + // ProposedInView + retBytes = append(retBytes, UintToBuf(msg.ProposedInView)...) + + // VotePartialSignature + if msg.VotePartialSignature == nil { + return nil, errors.New("MsgDeSoValidatorVote.ToBytes: VotePartialSignature must not be nil") + } + retBytes = append(retBytes, EncodeBLSSignature(msg.VotePartialSignature)...) + + return retBytes, nil +} + +func (msg *MsgDeSoValidatorVote) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + // MsgVersion + msgVersion, err := rr.ReadByte() + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding MsgVersion") + } + if msgVersion != MsgValidatorVoteVersion0 { + return fmt.Errorf("MsgDeSoValidatorVote.FromBytes: Invalid MsgVersion %d", msgVersion) + } + msg.MsgVersion = msgVersion + + // ValidatorPublicKey + msg.ValidatorPublicKey, err = ReadPublicKey(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding ValidatorPublicKey") + } + + // ValidatorVotingPublicKey + msg.ValidatorVotingPublicKey, err = DecodeBLSPublicKey(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding ValidatorVotingPublicKey") + } + + // BlockHash + msg.BlockHash, err = ReadBlockHash(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding BlockHash") + } + + // ProposedInView + msg.ProposedInView, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding ProposedInView") + } + + // VotePartialSignature + msg.VotePartialSignature, err = DecodeBLSSignature(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding VotePartialSignature") + } + + return nil +} + +// ================================================================== +// Proof of Stake Timeout Message +// ================================================================== + +type MsgDeSoValidatorTimeout struct { + MsgVersion MsgValidatorTimeoutVersion + + // The ECDSA public key for the validator who constructed this timeout message. + // Given the validator's ECDSA public key, we can look up their Validator PKID. + // This allows us to verify that the timeout originated from a registered validator. + ValidatorPublicKey *PublicKey + // The BLS voting public key for the validator who constructed this timeout. The BLS + // public key is included in the timeout message because it allows us to easily + // verify that the BLS TimeoutPartialSignature is correctly formed, without having to + // first look up the validator's BLS public key in consensus. It helps optimize timeout + // message validation. + ValidatorVotingPublicKey *bls.PublicKey + + // The view that the validator has timed out on. + TimedOutView uint64 + + // This QC has the highest view that the validator is aware of. This QC allows + // the leader to link back to the most recent block that 2/3rds of validators + // are aware of when constructing the next block. + HighQC *QuorumCertificate + + // The validator's BLS signature on (TimedOutView, HighQC.View). Notice that we + // include the HighQC.View in the signature payload rather than signing the full + // serialized HighQC itself. This allows the leader to better aggregate validator + // signatures without compromising the integrity of the protocol. + TimeoutPartialSignature *bls.Signature +} + +func (msg *MsgDeSoValidatorTimeout) GetMsgType() MsgType { + return MsgTypeValidatorTimeout +} + +func (msg *MsgDeSoValidatorTimeout) ToBytes(bool) ([]byte, error) { + if msg.MsgVersion != MsgValidatorTimeoutVersion0 { + return nil, fmt.Errorf("MsgDeSoValidatorTimeout.ToBytes: Invalid MsgVersion %d", msg.MsgVersion) + } + + retBytes := []byte{} + + // MsgVersion + retBytes = append(retBytes, msg.MsgVersion) + + // ValidatorPublicKey + if msg.ValidatorPublicKey == nil { + return nil, errors.New("MsgDeSoValidatorTimeout.ToBytes: ValidatorPublicKey must not be nil") + } + retBytes = append(retBytes, msg.ValidatorPublicKey.ToBytes()...) + + // ValidatorVotingPublicKey + if msg.ValidatorVotingPublicKey == nil { + return nil, errors.New("MsgDeSoValidatorTimeout.ToBytes: ValidatorVotingPublicKey must not be nil") + } + retBytes = append(retBytes, EncodeBLSPublicKey(msg.ValidatorVotingPublicKey)...) + + // TimeoutView + retBytes = append(retBytes, UintToBuf(msg.TimedOutView)...) + + // HighQC + if msg.HighQC == nil { + return nil, errors.New("MsgDeSoValidatorTimeout.ToBytes: HighQC must not be nil") + } + encodedHighQC, err := msg.HighQC.ToBytes() + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoValidatorTimeout.ToBytes: Error encoding HighQC") + } + retBytes = append(retBytes, encodedHighQC...) + + // TimeoutPartialSignature + if msg.TimeoutPartialSignature == nil { + return nil, errors.New("MsgDeSoValidatorTimeout.ToBytes: TimeoutPartialSignature must not be nil") + } + retBytes = append(retBytes, EncodeBLSSignature(msg.TimeoutPartialSignature)...) + + return retBytes, nil +} + +func (msg *MsgDeSoValidatorTimeout) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + // MsgVersion + msgVersion, err := rr.ReadByte() + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding MsgVersion") + } + if msgVersion != MsgValidatorVoteVersion0 { + return fmt.Errorf("MsgDeSoValidatorTimeout.FromBytes: Invalid MsgVersion %d", msgVersion) + } + msg.MsgVersion = msgVersion + + // ValidatorPublicKey + msg.ValidatorPublicKey, err = ReadPublicKey(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding ValidatorPublicKey") + } + + // ValidatorVotingPublicKey + msg.ValidatorVotingPublicKey, err = DecodeBLSPublicKey(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding ValidatorVotingPublicKey") + } + + // TimedOutView + msg.TimedOutView, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding TimedOutView") + } + + // HighQC + msg.HighQC, err = DecodeQuorumCertificate(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding HighQC") + } + + // TimeoutPartialSignature + msg.TimeoutPartialSignature, err = DecodeBLSSignature(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding TimeoutPartialSignature") + } + + return nil +} + +// A QuorumCertificate contains an aggregated signature from 2/3rds of the validators +// on the network, weighted by stake. The signatures are associated with a block hash +// and a view, both of which are identified in the certificate. +type QuorumCertificate struct { + // No versioning field is needed for this type since it is a member field + // for other top-level P2P messages, which will be versioned themselves. + + // The block hash corresponding to the block that this QC authorizes. + BlockHash *BlockHash + + // The view number when the the block was proposed. + ProposedInView uint64 + + // This BLS signature is aggregated from all of the partial BLS signatures for + // vote messages that have been aggregated by the leader. The partial signatures + // sign the (ProposedInView, BlockHash) pair for the block. + // + // From the block hash, we can look up the block height, the validator set at that + // block height, and the ordering of validators in consensus which identifies the + // present signers in the provided signers list. We can then use this to determine + // if the QC has 2/3rds of the total stake. + ValidatorsVoteAggregatedSignature *AggregatedBLSSignature +} + +// Performs a deep equality check between two QuorumCertificates, and returns +// true if the values of the two are identical. +func (qc *QuorumCertificate) Eq(other *QuorumCertificate) bool { + if qc == nil && other == nil { + return true + } + + if (qc == nil) != (other == nil) { + return false + } + + return bytes.Equal(qc.BlockHash.ToBytes(), other.BlockHash.ToBytes()) && + qc.ProposedInView == other.ProposedInView && + qc.ValidatorsVoteAggregatedSignature.Eq(other.ValidatorsVoteAggregatedSignature) +} + +func (qc *QuorumCertificate) ToBytes() ([]byte, error) { + retBytes := []byte{} + + // BlockHash + if qc.BlockHash == nil { + return nil, errors.New("QuorumCertificate.ToBytes: BlockHash must not be nil") + } + retBytes = append(retBytes, qc.BlockHash.ToBytes()...) + + // ProposedInView + retBytes = append(retBytes, UintToBuf(qc.ProposedInView)...) + + // ValidatorsVoteAggregatedSignature + if qc.ValidatorsVoteAggregatedSignature == nil { + return nil, errors.New("QuorumCertificate.ToBytes: ValidatorsVoteAggregatedSignature must not be nil") + } + encodedValidatorsVoteAggregatedSignature, err := qc.ValidatorsVoteAggregatedSignature.ToBytes() + if err != nil { + return nil, errors.Wrapf(err, "QuorumCertificate.ToBytes: Error encoding ValidatorsVoteAggregatedSignature") + } + retBytes = append(retBytes, encodedValidatorsVoteAggregatedSignature...) + + return retBytes, nil +} + +func DecodeQuorumCertificate(rr io.Reader) (*QuorumCertificate, error) { + var qc QuorumCertificate + var err error + + qc.BlockHash, err = ReadBlockHash(rr) + if err != nil { + return nil, errors.Wrapf(err, "DecodeQuorumCertificate: Error decoding BlockHash") + } + + qc.ProposedInView, err = ReadUvarint(rr) + if err != nil { + return nil, errors.Wrapf(err, "DecodeQuorumCertificate: Error decoding ProposedInView") + } + + qc.ValidatorsVoteAggregatedSignature, err = DecodeAggregatedBLSSignature(rr) + if err != nil { + return nil, errors.Wrapf(err, "DecodeQuorumCertificate: Error decoding ValidatorsVoteAggregatedSignature") + } + + return &qc, nil +} + +// This is an aggregated BLS signature from a set of validators. Each validator's +// presence in the signature is denoted in the provided signers list. I.e. if the +// list's value at index 0 is 1, then the validator identified by that index is +// present in the aggregated signature. The indices of all validators are expected +// to be known by the caller. +type AggregatedBLSSignature struct { + // TODO: Switch this to a bitlist, which will result in ~8x reduction in total + // size of this construct. + SignersList []byte + Signature *bls.Signature +} + +func (sig *AggregatedBLSSignature) Eq(other *AggregatedBLSSignature) bool { + if sig == nil && other == nil { + return true + } + + if (sig == nil) != (other == nil) { + return false + } + + return bytes.Equal(sig.SignersList, other.SignersList) && + sig.Signature.Eq(other.Signature) +} + +func (sig *AggregatedBLSSignature) ToBytes() ([]byte, error) { + retBytes := []byte{} + + // SignersList + retBytes = append(retBytes, EncodeByteArray(sig.SignersList)...) + + // Signature + if sig.Signature == nil { + return nil, errors.New("AggregatedBLSSignature.ToBytes: Signature must not be nil") + } + retBytes = append(retBytes, EncodeBLSSignature(sig.Signature)...) + + return retBytes, nil +} + +func DecodeAggregatedBLSSignature(rr io.Reader) (*AggregatedBLSSignature, error) { + var sig AggregatedBLSSignature + var err error + + sig.SignersList, err = DecodeByteArray(rr) + if err != nil { + return nil, errors.Wrapf(err, "DecodeAggregatedBLSSignature: Error decoding SignersList") + } + + sig.Signature, err = DecodeBLSSignature(rr) + if err != nil { + return nil, errors.Wrapf(err, "DecodeAggregatedBLSSignature: Error decoding Signature") + } + + return &sig, nil +} diff --git a/lib/pos_network_test.go b/lib/pos_network_test.go new file mode 100644 index 000000000..76514eabc --- /dev/null +++ b/lib/pos_network_test.go @@ -0,0 +1,101 @@ +//go:build relic + +package lib + +import ( + "testing" + + "github.com/deso-protocol/core/bls" + "github.com/stretchr/testify/require" +) + +func TestValidatorVoteEncodeDecode(t *testing.T) { + validatorVotingPublicKey, votePartialSignature := _generateValidatorVotingPublicKeyAndSignature(t) + + originalMsg := MsgDeSoValidatorVote{ + MsgVersion: MsgValidatorVoteVersion0, + ValidatorPublicKey: &PublicKey{}, + ValidatorVotingPublicKey: validatorVotingPublicKey, + BlockHash: &BlockHash{}, + ProposedInView: 9910, + VotePartialSignature: votePartialSignature, + } + + // Encode the message and verify the length is correct. + encodedMsgBytes, err := originalMsg.ToBytes(false) + require.NoError(t, err) + require.Equal(t, 214, len(encodedMsgBytes)) + + // Decode the message. + decodedMsg := &MsgDeSoValidatorVote{} + err = decodedMsg.FromBytes(encodedMsgBytes) + require.NoError(t, err) + + // Check that the message bodies are the same. + require.Equal(t, originalMsg.MsgVersion, decodedMsg.MsgVersion) + require.True(t, originalMsg.ValidatorPublicKey.Equal(*decodedMsg.ValidatorPublicKey)) + require.True(t, originalMsg.ValidatorVotingPublicKey.Eq(decodedMsg.ValidatorVotingPublicKey)) + require.Equal(t, originalMsg.BlockHash, decodedMsg.BlockHash) + require.Equal(t, originalMsg.ProposedInView, decodedMsg.ProposedInView) + require.True(t, originalMsg.VotePartialSignature.Eq(decodedMsg.VotePartialSignature)) +} + +func TestValidatorTimeoutEncodeDecode(t *testing.T) { + validatorVotingPublicKey, timeoutPartialSignature := _generateValidatorVotingPublicKeyAndSignature(t) + + _, partialSignature1 := _generateValidatorVotingPublicKeyAndSignature(t) + _, partialSignature2 := _generateValidatorVotingPublicKeyAndSignature(t) + + aggregateSignature, err := bls.AggregateSignatures([]*bls.Signature{partialSignature1, partialSignature2}) + require.NoError(t, err) + + originalMsg := MsgDeSoValidatorTimeout{ + MsgVersion: MsgValidatorTimeoutVersion0, + ValidatorPublicKey: &PublicKey{}, + ValidatorVotingPublicKey: validatorVotingPublicKey, + TimedOutView: 999912, + HighQC: &QuorumCertificate{ + BlockHash: &BlockHash{}, + ProposedInView: 999910, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + SignersList: []byte{1, 2}, + Signature: aggregateSignature, + }, + }, + TimeoutPartialSignature: timeoutPartialSignature, + } + + // Encode the message and verify the length is correct. + encodedMsgBytes, err := originalMsg.ToBytes(false) + require.NoError(t, err) + require.Equal(t, 270, len(encodedMsgBytes)) + + // Decode the message. + decodedMsg := &MsgDeSoValidatorTimeout{} + err = decodedMsg.FromBytes(encodedMsgBytes) + require.NoError(t, err) + + // Check that the message bodies are the same. + require.Equal(t, originalMsg.MsgVersion, decodedMsg.MsgVersion) + require.True(t, originalMsg.ValidatorPublicKey.Equal(*decodedMsg.ValidatorPublicKey)) + require.True(t, originalMsg.ValidatorVotingPublicKey.Eq(decodedMsg.ValidatorVotingPublicKey)) + require.Equal(t, originalMsg.TimedOutView, decodedMsg.TimedOutView) + require.True(t, originalMsg.TimeoutPartialSignature.Eq(decodedMsg.TimeoutPartialSignature)) + + // Check that the high QCs are the same. + require.True(t, + originalMsg.HighQC.ValidatorsVoteAggregatedSignature.Eq( + decodedMsg.HighQC.ValidatorsVoteAggregatedSignature, + ), + ) +} + +// Creates an arbitrary BLS public key and signature for testing. +func _generateValidatorVotingPublicKeyAndSignature(t *testing.T) (*bls.PublicKey, *bls.Signature) { + blsPrivateKey, err := bls.NewPrivateKey() + require.NoError(t, err) + blsPublicKey := blsPrivateKey.PublicKey() + blsSignature, err := blsPrivateKey.Sign([]byte{0x01, 0x02, 0x03}) + require.NoError(t, err) + return blsPublicKey, blsSignature +} From bdd6dee2288ae2fc5500a411ed15330d24f05152 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 27 Jun 2023 11:56:41 -0400 Subject: [PATCH 116/762] PoS Add QC fields to block header schema (#558) * Initial commit * Add tests * Fix broken tests * Better comments and edge case handling * Revert unnecessary comments * Address Nina's feedback * Fix postgres tests * Fix compile error when the relic build tag isn't defined * Address Piotr's comments * Address Nina's comments --- lib/block_view_types.go | 53 +++++++++++ lib/constants.go | 23 ++++- lib/db_utils_test.go | 2 +- lib/network.go | 146 +++++++++++++++++++++++++++++-- lib/network_test.go | 92 ++++++++++++++++--- lib/pos_network.go | 145 ++++++++++++++++++++++++++++-- lib/pos_network_no_relic_test.go | 17 ++++ 7 files changed, 448 insertions(+), 30 deletions(-) create mode 100644 lib/pos_network_no_relic_test.go diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7641a34df..17f67303f 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -2,6 +2,7 @@ package lib import ( "bytes" + "encoding/binary" "encoding/hex" "encoding/json" "fmt" @@ -4647,6 +4648,58 @@ func DecodeByteArray(reader io.Reader) ([]byte, error) { } } +func EncodeUint64Array(uint64s []uint64) []byte { + var data []byte + + data = append(data, UintToBuf(uint64(len(uint64s)))...) + for _, uint64 := range uint64s { + data = append(data, UintToBuf(uint64)...) + } + + return data +} + +func DecodeUint64Array(reader io.Reader) ([]uint64, error) { + arrLen, err := ReadUvarint(reader) + if err != nil { + return nil, errors.Wrapf(err, "DecodeUint64Array: Problem reading array length") + } + + if arrLen == 0 { + return nil, nil + } + + var result []uint64 + result, err = SafeMakeSliceWithLength[uint64](arrLen) + if err != nil { + return nil, errors.Wrapf(err, "DecodeUint64Array: Problem creating slice") + } + + for ii := uint64(0); ii < arrLen; ii++ { + result[ii], err = ReadUvarint(reader) + if err != nil { + return nil, errors.Wrapf(err, "DecodeUint64Array: Problem reading uint64") + } + } + + return result, nil +} + +func EncodeUint64BigEndian(val uint64) []byte { + encodedBytes := [8]byte{} + binary.BigEndian.PutUint64(encodedBytes[:], val) + return encodedBytes[:] +} + +func DecodeUint64BigEndian(rr io.Reader) (uint64, error) { + scratchBytes := [8]byte{} + _, err := io.ReadFull(rr, scratchBytes[:]) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(scratchBytes[:]), nil +} + func EncodePKIDuint64Map(pkidMap map[PKID]uint64) []byte { var data []byte diff --git a/lib/constants.go b/lib/constants.go index 0920238cb..34c04d33e 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -85,9 +85,11 @@ const ( NetworkType_TESTNET NetworkType = 2 ) +type MsgDeSoHeaderVersion = uint32 + const ( // This is the header version that the blockchain started with. - HeaderVersion0 = uint32(0) + HeaderVersion0 = MsgDeSoHeaderVersion(0) // This version made several changes to the previous header encoding format: // - The Nonce field was expanded to 64 bits // - Another ExtraNonce field was added to provide *another* 64 bits of entropy, @@ -101,7 +103,24 @@ const ( // // At the time of this writing, the intent is to deploy it in a backwards-compatible // fashion, with the eventual goal of phasing out blocks with the previous version. - HeaderVersion1 = uint32(1) + HeaderVersion1 = MsgDeSoHeaderVersion(1) + // This version introduces the transition from Proof of Work to Proof of Stake blocks. + // It includes several changes to the header format: + // - Nonce field is deprecated + // - ExtraNonce field is deprecated + // - ValidatorsVoteQC field is added + // - ValidatorsTimeoutAggregateQC field is added + // + // This format change is a breaking change that is not backwards-compatible with + // versions 0 and 1. + HeaderVersion2 = MsgDeSoHeaderVersion(2) + // This CurrentHeaderVersion is an implicit version type that represents the latest + // backwards compatible Proof of Work header format. This value is now locked to + // HeaderVersion1 since versions 2 and onwards will be used for Proof of Stake formats. + // + // TODO: rename this constant to "LatestProofOfWorkHeaderVersion". Note, doing so will + // be a breaking change for 3rd party applications that import core and use this + // constant. CurrentHeaderVersion = HeaderVersion1 ) diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index d20d17490..e8af5ce8c 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -61,7 +61,7 @@ func _GetTestBlockNode() *BlockNode { // Header (make a copy) bs.Header = NewMessage(MsgTypeHeader).(*MsgDeSoHeader) - headerBytes, _ := expectedBlockHeader.ToBytes(false) + headerBytes, _ := expectedBlockHeaderVersion1.ToBytes(false) bs.Header.FromBytes(headerBytes) // Status diff --git a/lib/network.go b/lib/network.go index 099c7b2f8..0adc14d26 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1892,6 +1892,10 @@ type MsgDeSoHeader struct { // The height of the block this header corresponds to. Height uint64 + // Nonce is only used for Proof of Work blocks, and will only be populated + // in MsgDeSoHeader versions 0 and 1. For all later versions, this field will + // default to a value of zero. + // // The nonce that is used by miners in order to produce valid blocks. // // Note: Before the upgrade from HeaderVersion0 to HeaderVersion1, miners would make @@ -1899,9 +1903,30 @@ type MsgDeSoHeader struct { // no longer needed since HeaderVersion1 upgraded the nonce to 64 bits from 32 bits. Nonce uint64 - // An extra nonce that can be used to provice *even more* entropy for miners, in the + // ExtraNonce is only used for Proof of Work blocks, and will only be populated + // in MsgDeSoHeader versions 0 and 1. For all later versions, this field will + // default to zero. + // + // An extra nonce that can be used to provide *even more* entropy for miners, in the // event that ASICs become powerful enough to have birthday problems in the future. ExtraNonce uint64 + + // ValidatorsVoteQC is only used for Proof of Stake blocks, and will only be + // populated in MsgDeSoHeader versions 2 and higher. For all earlier version, this + // field will be null. + // + // This corresponds to QC containing votes from 2/3 of validators for weighted by stake. + ValidatorsVoteQC *QuorumCertificate + + // ValidatorsTimeoutAggregateQC is only used for Proof of Stake blocks, and will only be + // populated in MsgDeSoHeader versions 2 and higher. For all earlier version, this field + // will be null. + // + // In the event of a timeout, this field will contain the aggregate QC constructed from + // timeout messages from 2/3 of validators weighted by stake, and proves that they have + // time out. This value is set to null in normal cases where a regular block vote has + // taken place. + ValidatorsTimeoutAggregateQC *TimeoutAggregateQuorumCertificate } func HeaderSizeBytes() int { @@ -2030,6 +2055,63 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion1(preSignature bool) ([]byte, error return retBytes, nil } +func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error) { + retBytes := []byte{} + + // Version + { + scratchBytes := [4]byte{} + binary.BigEndian.PutUint32(scratchBytes[:], msg.Version) + retBytes = append(retBytes, scratchBytes[:]...) + } + + // PrevBlockHash + prevBlockHash := msg.PrevBlockHash + if prevBlockHash == nil { + prevBlockHash = &BlockHash{} + } + retBytes = append(retBytes, prevBlockHash[:]...) + + // TransactionMerkleRoot + transactionMerkleRoot := msg.TransactionMerkleRoot + if transactionMerkleRoot == nil { + transactionMerkleRoot = &BlockHash{} + } + retBytes = append(retBytes, transactionMerkleRoot[:]...) + + // TstampSecs: this field can be encoded to take up the full 64 bits now + // that MsgDeSoHeader version 2 does not need to be backwards compatible. + retBytes = append(retBytes, EncodeUint64BigEndian(msg.TstampSecs)...) + + // Height + retBytes = append(retBytes, EncodeUint64BigEndian(msg.Height)...) + + // The Nonce and ExtraNonce fields are unused in version 2. We skip them + // during both encoding and decoding. + + // ValidatorsVoteQC + if msg.ValidatorsVoteQC == nil { + return nil, fmt.Errorf("EncodeHeaderVersion2: ValidatorsVoteQC must be non-nil") + } + encodedValidatorsVoteQC, err := msg.ValidatorsVoteQC.ToBytes() + if err != nil { + return nil, errors.Wrapf(err, "EncodeHeaderVersion2: error encoding ValidatorsVoteQC") + } + retBytes = append(retBytes, encodedValidatorsVoteQC...) + + // ValidatorsTimeoutAggregateQC + if msg.ValidatorsTimeoutAggregateQC == nil { + return nil, fmt.Errorf("EncodeHeaderVersion2: ValidatorsTimeoutAggregateQC must be non-nil") + } + encodedValidatorsTimeoutAggregateQC, err := msg.ValidatorsTimeoutAggregateQC.ToBytes() + if err != nil { + return nil, errors.Wrapf(err, "EncodeHeaderVersion2: error encoding ValidatorsTimeoutAggregateQC") + } + retBytes = append(retBytes, encodedValidatorsTimeoutAggregateQC...) + + return retBytes, nil +} + func (msg *MsgDeSoHeader) ToBytes(preSignature bool) ([]byte, error) { // Depending on the version, we decode the header differently. @@ -2037,6 +2119,8 @@ func (msg *MsgDeSoHeader) ToBytes(preSignature bool) ([]byte, error) { return msg.EncodeHeaderVersion0(preSignature) } else if msg.Version == HeaderVersion1 { return msg.EncodeHeaderVersion1(preSignature) + } else if msg.Version == HeaderVersion2 { + return msg.EncodeHeaderVersion2(preSignature) } else { // If we have an unrecognized version then we default to serializing with // version 0. This is necessary because there are places where we use a @@ -2151,6 +2235,53 @@ func DecodeHeaderVersion1(rr io.Reader) (*MsgDeSoHeader, error) { return retHeader, nil } +func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { + retHeader := NewMessage(MsgTypeHeader).(*MsgDeSoHeader) + + // PrevBlockHash + _, err := io.ReadFull(rr, retHeader.PrevBlockHash[:]) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding PrevBlockHash") + } + + // TransactionMerkleRoot + _, err = io.ReadFull(rr, retHeader.TransactionMerkleRoot[:]) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TransactionMerkleRoot") + } + + // TstampSecs + retHeader.TstampSecs, err = DecodeUint64BigEndian(rr) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TstampSecs") + } + + // Height + retHeader.Height, err = DecodeUint64BigEndian(rr) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding Height") + } + + // The Nonce and ExtraNonce fields are unused in version 2. We skip them + // during both encoding and decoding. + retHeader.Nonce = 0 + retHeader.ExtraNonce = 0 + + // ValidatorsVoteQC + retHeader.ValidatorsVoteQC, err = DecodeQuorumCertificate(rr) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ValidatorsVoteQC") + } + + // ValidatorsTimeoutAggregateQC + retHeader.ValidatorsTimeoutAggregateQC = &TimeoutAggregateQuorumCertificate{} + if err = retHeader.ValidatorsTimeoutAggregateQC.FromBytes(rr); err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ValidatorsTimeoutAggregateQC") + } + + return retHeader, nil +} + func DecodeHeader(rr io.Reader) (*MsgDeSoHeader, error) { // Read the version to determine scratchBytes := [4]byte{} @@ -2165,15 +2296,16 @@ func DecodeHeader(rr io.Reader) (*MsgDeSoHeader, error) { ret, err = DecodeHeaderVersion0(rr) } else if headerVersion == HeaderVersion1 { ret, err = DecodeHeaderVersion1(rr) + } else if headerVersion == HeaderVersion2 { + ret, err = DecodeHeaderVersion2(rr) } else { - // If we have an unrecognized version then we default to de-serializing with - // version 0. This is necessary because there are places where we use a - // MsgDeSoHeader struct to store Bitcoin headers. - ret, err = DecodeHeaderVersion0(rr) + // If we have an unrecognized version then we return an error. The schema + // differences between header versions 0, 1, 2, and beyond will be large + // enough that no one decoder is a safe fallback. + err = fmt.Errorf("DecodeHeader: Unrecognized header version: %v", headerVersion) } if err != nil { - return nil, fmt.Errorf( - "DecodeHeader: Unrecognized header version: %v", headerVersion) + return nil, errors.Wrapf(err, "DecodeHeader: Error parsing header:") } // Set the version since it's not decoded in the version-specific handlers. ret.Version = headerVersion diff --git a/lib/network_test.go b/lib/network_test.go index b99595c94..e098adfed 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -3,7 +3,6 @@ package lib import ( "bytes" "encoding/hex" - "github.com/holiman/uint256" "math/big" "math/rand" "reflect" @@ -12,6 +11,8 @@ import ( "testing" "time" + "github.com/holiman/uint256" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" @@ -83,7 +84,7 @@ func TestVerack(t *testing.T) { require.Equal(&MsgDeSoVerack{Nonce: nonce}, testMsg) } -var expectedBlockHeader = &MsgDeSoHeader{ +var expectedBlockHeaderVersion1 = &MsgDeSoHeader{ Version: 1, PrevBlockHash: &BlockHash{ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, @@ -104,6 +105,61 @@ var expectedBlockHeader = &MsgDeSoHeader{ ExtraNonce: uint64(101234123456789), } +// Creates fully formatted a PoS block header with random signatures +// and block hashes +func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { + testBlockHash := BlockHash{ + 0x00, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, + 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20, 0x21, + 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x31, + 0x32, 0x33, + } + testMerkleRoot := BlockHash{ + 0x00, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, 0x43, + 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x50, 0x51, 0x52, 0x53, + 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, + 0x64, 0x65, + } + + _, testBLSSignature := _generateValidatorVotingPublicKeyAndSignature(t) + + return &MsgDeSoHeader{ + Version: 2, + PrevBlockHash: &testBlockHash, + TransactionMerkleRoot: &testMerkleRoot, + TstampSecs: uint64(1678943210), + Height: uint64(1321012345), + // Nonce and ExtraNonce are unused and set to 0 starting in version 2. + Nonce: uint64(0), + ExtraNonce: uint64(0), + // Use real signatures and public keys for the PoS fields + ValidatorsVoteQC: &QuorumCertificate{ + BlockHash: &testBlockHash, + ProposedInView: uint64(123456789123), + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + SignersList: []byte{1}, + Signature: testBLSSignature, + }, + }, + ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ + TimedOutView: uint64(234567891234), + ValidatorsHighQC: &QuorumCertificate{ + BlockHash: &testBlockHash, + ProposedInView: uint64(345678912345), + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + SignersList: []byte{2}, + Signature: testBLSSignature, + }, + }, + ValidatorsTimeoutHighQCViews: []uint64{456789123456}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + SignersList: []byte{3}, + Signature: testBLSSignature, + }, + }, + } +} + func TestHeaderConversionAndReadWriteMessage(t *testing.T) { assert := assert.New(t) require := require.New(t) @@ -111,7 +167,14 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { _ = require networkType := NetworkType_MAINNET - { + expectedBlockHeadersToTest := []*MsgDeSoHeader{ + expectedBlockHeaderVersion1, + createTestBlockHeaderVersion2(t), + } + + // Performs a full E2E byte encode and decode of all the block header + // versions we want to test. + for _, expectedBlockHeader := range expectedBlockHeadersToTest { data, err := expectedBlockHeader.ToBytes(false) assert.NoError(err) @@ -143,12 +206,12 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { hdrPayload, err := expectedBlockHeader.ToBytes(false) assert.NoError(err) assert.Equal(hdrPayload, data) - } - assert.Equalf(7, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), - "Number of fields in HEADER message is different from expected. "+ - "Did you add a new field? If so, make sure the serialization code "+ - "works, add the new field to the test case, and fix this error.") + assert.Equalf(9, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), + "Number of fields in HEADER message is different from expected. "+ + "Did you add a new field? If so, make sure the serialization code "+ + "works, add the new field to the test case, and fix this error.") + } } func TestGetHeadersSerialization(t *testing.T) { @@ -157,8 +220,8 @@ func TestGetHeadersSerialization(t *testing.T) { _ = assert _ = require - hash1 := expectedBlockHeader.PrevBlockHash - hash2 := expectedBlockHeader.TransactionMerkleRoot + hash1 := expectedBlockHeaderVersion1.PrevBlockHash + hash2 := expectedBlockHeaderVersion1.TransactionMerkleRoot getHeaders := &MsgDeSoGetHeaders{ StopHash: hash1, @@ -179,10 +242,13 @@ func TestHeaderBundleSerialization(t *testing.T) { _ = assert _ = require - hash1 := expectedBlockHeader.PrevBlockHash + hash1 := expectedBlockHeaderVersion1.PrevBlockHash headerBundle := &MsgDeSoHeaderBundle{ - Headers: []*MsgDeSoHeader{expectedBlockHeader, expectedBlockHeader}, + Headers: []*MsgDeSoHeader{ + expectedBlockHeaderVersion1, + createTestBlockHeaderVersion2(t), + }, TipHash: hash1, TipHeight: 12345, } @@ -256,7 +322,7 @@ func TestReadWrite(t *testing.T) { } var expectedBlock = &MsgDeSoBlock{ - Header: expectedBlockHeader, + Header: expectedBlockHeaderVersion1, Txns: expectedTransactions(true), // originally was effectively false BlockProducerInfo: &BlockProducerInfo{ diff --git a/lib/pos_network.go b/lib/pos_network.go index f2db4117d..5ad15a0c3 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -336,8 +336,8 @@ func DecodeQuorumCertificate(rr io.Reader) (*QuorumCertificate, error) { return nil, errors.Wrapf(err, "DecodeQuorumCertificate: Error decoding ProposedInView") } - qc.ValidatorsVoteAggregatedSignature, err = DecodeAggregatedBLSSignature(rr) - if err != nil { + qc.ValidatorsVoteAggregatedSignature = &AggregatedBLSSignature{} + if err = qc.ValidatorsVoteAggregatedSignature.FromBytes(rr); err != nil { return nil, errors.Wrapf(err, "DecodeQuorumCertificate: Error decoding ValidatorsVoteAggregatedSignature") } @@ -384,19 +384,150 @@ func (sig *AggregatedBLSSignature) ToBytes() ([]byte, error) { return retBytes, nil } -func DecodeAggregatedBLSSignature(rr io.Reader) (*AggregatedBLSSignature, error) { - var sig AggregatedBLSSignature +func (sig *AggregatedBLSSignature) FromBytes(rr io.Reader) error { var err error sig.SignersList, err = DecodeByteArray(rr) if err != nil { - return nil, errors.Wrapf(err, "DecodeAggregatedBLSSignature: Error decoding SignersList") + return errors.Wrapf(err, "AggregatedBLSSignature.FromBytes: Error decoding SignersList") } sig.Signature, err = DecodeBLSSignature(rr) if err != nil { - return nil, errors.Wrapf(err, "DecodeAggregatedBLSSignature: Error decoding Signature") + return errors.Wrapf(err, "AggregatedBLSSignature.FromBytes: Error decoding Signature") + } + + return nil +} + +// TimeoutAggregateQuorumCertificate is an aggregation of timeout messages from 2/3rds +// of all validators, weighted by stake, which indicates that these validators want to +// time out a particular view. +// +// When validators want to time out a view, they send their high QCs to the block proposer +// who builds an aggregate QC extending the chain from the highest QC that it received +// from all validators who timed out. To prove that it has selected the highest QC, the +// proposer also includes a list of the high QC views that each validator has sent. +type TimeoutAggregateQuorumCertificate struct { + + // The view that the block proposers has produced a timeout QC for. + TimedOutView uint64 + + // This is the highest QC that the block proposer received from any validator who + // has timed out for the current view. + ValidatorsHighQC *QuorumCertificate + + // Here we include a list of the HighQC.View values we got from each of the + // validators in the ValidatorsTimeoutHighQCViews field. In addition, for each + // unique HighQC.View value we received, we combine all the partial signatures + // for that HighQC.View into a single BLSMultiSignature. + // + // + // The aggregated signature is made up of partial signatures for all present + // validators, each of whom signed a payload with the pair + // (current view, the validator's local HighQC.View). + // + // The ordering of high QC views and validators in the aggregate signature will + // match the ordering of active validators in descending order of stake for the + // current view's epoch. I.e. index 0 will correspond to the highest-staked active + // validator in the epoch, index 1 will correspond to the second-highest-staked active + // validator, ... + ValidatorsTimeoutHighQCViews []uint64 + ValidatorsTimeoutAggregatedSignature *AggregatedBLSSignature +} + +// Performs a deep equality check between two TimeoutAggregateQuorumCertificate, and +// returns true if the values of the two are identical. +func (aggQC *TimeoutAggregateQuorumCertificate) Eq( + other *TimeoutAggregateQuorumCertificate, +) bool { + if aggQC == nil && other == nil { + return true + } + + if (aggQC == nil) != (other == nil) { + return false + } + + if len(aggQC.ValidatorsTimeoutHighQCViews) != len(other.ValidatorsTimeoutHighQCViews) { + return false + } + + if aggQC.TimedOutView != other.TimedOutView { + return false + } + + if !aggQC.ValidatorsHighQC.Eq(other.ValidatorsHighQC) { + return false + } + + if !aggQC.ValidatorsTimeoutAggregatedSignature.Eq(other.ValidatorsTimeoutAggregatedSignature) { + return false + } + + for i := 0; i < len(aggQC.ValidatorsTimeoutHighQCViews); i++ { + if aggQC.ValidatorsTimeoutHighQCViews[i] != other.ValidatorsTimeoutHighQCViews[i] { + return false + } + } + + return true +} + +func (aggQC *TimeoutAggregateQuorumCertificate) ToBytes() ([]byte, error) { + retBytes := []byte{} + + // TimedOutView + retBytes = append(retBytes, UintToBuf(aggQC.TimedOutView)...) + + // ValidatorsHighQC + if aggQC.ValidatorsHighQC == nil { + return nil, errors.New("TimeoutAggregateQuorumCertificate.ToBytes: ValidatorsHighQC must not be nil") + } + encodedValidatorsHighQC, err := aggQC.ValidatorsHighQC.ToBytes() + if err != nil { + return nil, errors.Wrapf(err, "TimeoutAggregateQuorumCertificate.ToBytes: Error encoding ValidatorsHighQC") + } + retBytes = append(retBytes, encodedValidatorsHighQC...) + + // ValidatorsTimeoutHighQCViews + retBytes = append(retBytes, EncodeUint64Array(aggQC.ValidatorsTimeoutHighQCViews)...) + + // ValidatorsTimeoutAggregatedSignature + if aggQC.ValidatorsTimeoutAggregatedSignature == nil { + return nil, errors.New("TimeoutAggregateQuorumCertificate.ToBytes: ValidatorsTimeoutAggregatedSignature must not be nil") + } + encodedValidatorsTimeoutAggregatedSignature, err := aggQC.ValidatorsTimeoutAggregatedSignature.ToBytes() + if err != nil { + return nil, errors.Wrapf(err, "TimeoutAggregateQuorumCertificate.ToBytes: Error encoding ValidatorsTimeoutAggregatedSignature") + } + retBytes = append(retBytes, encodedValidatorsTimeoutAggregatedSignature...) + + return retBytes, nil +} + +func (aggQC *TimeoutAggregateQuorumCertificate) FromBytes(rr io.Reader) error { + var err error + + aggQC.TimedOutView, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "TimeoutAggregateQuorumCertificate.FromBytes: Error decoding TimedOutView") + } + + aggQC.ValidatorsHighQC, err = DecodeQuorumCertificate(rr) + if err != nil { + return errors.Wrapf(err, "TimeoutAggregateQuorumCertificate.FromBytes: Error decoding ValidatorsHighQC") + } + + aggQC.ValidatorsTimeoutHighQCViews, err = DecodeUint64Array(rr) + if err != nil { + return errors.Wrapf(err, "TimeoutAggregateQuorumCertificate.FromBytes: Error decoding ValidatorsTimeoutHighQCViews") + } + + aggQC.ValidatorsTimeoutAggregatedSignature = &AggregatedBLSSignature{} + if aggQC.ValidatorsTimeoutAggregatedSignature.FromBytes(rr); err != nil { + return errors.Wrapf(err, "TimeoutAggregateQuorumCertificate.FromBytes: Error decoding ValidatorsTimeoutAggregatedSignature") } - return &sig, nil + return nil } diff --git a/lib/pos_network_no_relic_test.go b/lib/pos_network_no_relic_test.go new file mode 100644 index 000000000..45eaeacf1 --- /dev/null +++ b/lib/pos_network_no_relic_test.go @@ -0,0 +1,17 @@ +//go:build !relic + +package lib + +import ( + "testing" + + "github.com/deso-protocol/core/bls" +) + +// This function is a placeholder needed to make the lib tests compile when the relic build tag +// isn't defined. Without the relic build tag, we are not able to generate any BLS keys. This +// function immediately fails the parent test that called it. +func _generateValidatorVotingPublicKeyAndSignature(t *testing.T) (*bls.PublicKey, *bls.Signature) { + t.FailNow() + return nil, nil +} From 6eb792ef999f00d5de45606b6a5fdc884c62744a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 27 Jun 2023 12:03:42 -0400 Subject: [PATCH 117/762] PoS Implement Bitset data structure (#564) * Implement new Bitset data structure * Update bitset constructor to return a pointer * Better comments * Address PR comments --- Dockerfile | 1 + test.Dockerfile | 1 + utils/bitset/bitset.go | 69 ++++++++++++++++++++++++++ utils/bitset/bitset_test.go | 96 +++++++++++++++++++++++++++++++++++++ 4 files changed, 167 insertions(+) create mode 100644 utils/bitset/bitset.go create mode 100644 utils/bitset/bitset_test.go diff --git a/Dockerfile b/Dockerfile index 04a49132f..c366741b3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,6 +15,7 @@ COPY cmd cmd COPY desohash desohash COPY lib lib COPY migrate migrate +COPY utils utils COPY test_data test_data COPY main.go . diff --git a/test.Dockerfile b/test.Dockerfile index 6eb62c4f9..6200abd07 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -22,6 +22,7 @@ COPY cmd cmd COPY desohash desohash COPY lib lib COPY migrate migrate +COPY utils utils COPY test_data test_data COPY main.go . diff --git a/utils/bitset/bitset.go b/utils/bitset/bitset.go new file mode 100644 index 000000000..cbb75a997 --- /dev/null +++ b/utils/bitset/bitset.go @@ -0,0 +1,69 @@ +package bitset + +import ( + "math/big" +) + +// A Bitset is an ordered list of bits with arbitrary length. It uses +// the built-in big.Int as the underlying storage scheme. The big.Int maintains +// an ordered bit list and provides an interface where we can flip each bit +// individually. The Bitset acts as a wrapper and provides boolean Get and Set +// functions on top. +// +// We implement a custom Bitset data structure using Big.Int rather than using +// an off-the-shelf solution because we need to support byte encoding and decoding, +// with known endianness. Out of the box, the built-in big.Int supports individual +// bit operations, safe indexing & boundary checks, dynamic resizing, and big +// endian byte encoding/decoding. It allows us to implement a straightforward +// Bitset data structure while having full transparency into the underlying +// implementation, and no reliance on 3rd party libraries. +type Bitset struct { + store *big.Int +} + +// Initializes a new Bitset with zero value for all indices. +func NewBitset() *Bitset { + return &Bitset{ + store: big.NewInt(0), + } +} + +func (b *Bitset) Get(index int) bool { + return b.store.Bit(index) == 1 +} + +func (b *Bitset) Set(index int, newValue bool) *Bitset { + booleanValue := uint(0) + if newValue { + booleanValue = 1 + } + + b.store.SetBit(b.store, index, booleanValue) + return b +} + +// Returns the total number of bits used by this bitset. This is +// equivalent to the length of the absolute value of the underlying +// big.Int. +// +// This also means that the highest index set to true is b.Size() - 1. +// All indices beyond are implicitly false. +func (b *Bitset) Size() int { + return b.store.BitLen() +} + +// Return the absolute value of the underlying the BigSet as a big-endian +// byte slice. The output is compressed such that if the underlying +// big.Int had zeros at the highest bits, they will be removed +// from the output. +func (b *Bitset) ToBytes() []byte { + return b.store.Bytes() +} + +// Populates the BitSet from a big-endian byte slice. +func (b *Bitset) FromBytes(bytes []byte) { + if b.store == nil { + b.store = big.NewInt(0) + } + b.store.SetBytes(bytes) +} diff --git a/utils/bitset/bitset_test.go b/utils/bitset/bitset_test.go new file mode 100644 index 000000000..47bb083f6 --- /dev/null +++ b/utils/bitset/bitset_test.go @@ -0,0 +1,96 @@ +package bitset + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBitsetStorage(t *testing.T) { + b := NewBitset() + + // Set every 3rd bit to true for indices 0 through 999. + // This sets indices 0, 3, 5, 9,... 996, 999 to true. + for ii := 0; ii < 1000; ii++ { + if ii%3 == 0 { + b.Set(ii, true) + } + } + + // Set all bits from 1000 through 1009 to false. + for ii := 1000; ii < 1010; ii++ { + require.False(t, b.Get(ii)) + } + + // Verify that only every 3rd bit is set to true from indices + // 0 through 999. + for ii := 0; ii < 1000; ii++ { + if ii%3 == 0 { + require.True(t, b.Get(ii)) + } else { + require.False(t, b.Get(ii)) + } + } + + // Verify that no additional bits are set beyond index 999. + for ii := 1000; ii < 1010; ii++ { + require.False(t, b.Get(ii)) + } + + // Verify that the size of the bitset is 1000, which means that + // 1000 bits are used, and the highest index set to true is index 999. + // All indices beyond are implicitly false. + require.Equal(t, 1000, b.Size()) +} + +func TestBitsetByteEncodeDecode(t *testing.T) { + b := NewBitset() + + // Sets indices 0, 3, 6, 9 to true. + for ii := 0; ii < 10; ii++ { + if ii%3 == 0 { + b.Set(ii, true) + } + } + + // Set indices 10 through 19 to false. + for ii := 10; ii < 20; ii++ { + require.False(t, b.Get(ii)) + } + + // When byte-encoded in big endian format, the bitset + // has a value of 10 01001001. The highest index set to + // true is at index 9. + byteEncoding := b.ToBytes() + + // When byte encoded, the bitset has size of 2 bytes. + require.Equal(t, 2, len(byteEncoding)) + require.True(t, byteEncoding[0] == 0b00000010) // index 9 is true + require.True(t, byteEncoding[1] == 0b01001001) // indices 0, 3, 6 are true + + decodedBitset := NewBitset() + decodedBitset.FromBytes(byteEncoding) + + // Verify that the decoded bitset has the same size as the original. + require.Equal(t, b.Size(), decodedBitset.Size()) + + // Verify that the decoded bitset has the same values as the original. + for ii := 0; ii < 10; ii++ { + require.Equal(t, b.Get(ii), decodedBitset.Get(ii)) + } +} + +func TestEmptyBitsetByteEncodeDecode(t *testing.T) { + b := NewBitset() + + require.Zero(t, b.Size()) + + byteEncoding := b.ToBytes() + + require.Zero(t, len(byteEncoding)) + + decodedBitset := NewBitset() + decodedBitset.FromBytes(byteEncoding) + + require.Zero(t, decodedBitset.Size()) +} From 8d93c8da163c105f5a427ee64a922c1da650ff91 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 27 Jun 2023 12:20:31 -0400 Subject: [PATCH 118/762] PoS Add Bitset to messaging schema (#565) * Migreate AggregateQC to use bitset * Add tests * Cleanup * Fix broken test * Address PR feedback --- lib/network_test.go | 8 +++++--- lib/pos_network.go | 39 ++++++++++++++++++++++++++++++++------- lib/pos_network_test.go | 5 +++-- utils/bitset/bitset.go | 3 ++- 4 files changed, 42 insertions(+), 13 deletions(-) diff --git a/lib/network_test.go b/lib/network_test.go index e098adfed..39fa573f5 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -17,6 +17,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/bxcodec/faker" + "github.com/deso-protocol/core/utils/bitset" merkletree "github.com/deso-protocol/go-merkle-tree" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -121,6 +122,7 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { 0x64, 0x65, } + testBitset := bitset.NewBitset().Set(0, true).Set(3, true) _, testBLSSignature := _generateValidatorVotingPublicKeyAndSignature(t) return &MsgDeSoHeader{ @@ -137,7 +139,7 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { BlockHash: &testBlockHash, ProposedInView: uint64(123456789123), ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - SignersList: []byte{1}, + SignersList: testBitset, Signature: testBLSSignature, }, }, @@ -147,13 +149,13 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { BlockHash: &testBlockHash, ProposedInView: uint64(345678912345), ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - SignersList: []byte{2}, + SignersList: testBitset, Signature: testBLSSignature, }, }, ValidatorsTimeoutHighQCViews: []uint64{456789123456}, ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ - SignersList: []byte{3}, + SignersList: testBitset, Signature: testBLSSignature, }, }, diff --git a/lib/pos_network.go b/lib/pos_network.go index 5ad15a0c3..724136629 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -6,6 +6,7 @@ import ( "io" "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/utils/bitset" "github.com/pkg/errors" ) @@ -350,9 +351,7 @@ func DecodeQuorumCertificate(rr io.Reader) (*QuorumCertificate, error) { // present in the aggregated signature. The indices of all validators are expected // to be known by the caller. type AggregatedBLSSignature struct { - // TODO: Switch this to a bitlist, which will result in ~8x reduction in total - // size of this construct. - SignersList []byte + SignersList *bitset.Bitset Signature *bls.Signature } @@ -365,15 +364,21 @@ func (sig *AggregatedBLSSignature) Eq(other *AggregatedBLSSignature) bool { return false } - return bytes.Equal(sig.SignersList, other.SignersList) && - sig.Signature.Eq(other.Signature) + if !sig.Signature.Eq(other.Signature) { + return false + } + + return bytes.Equal(sig.SignersList.ToBytes(), other.SignersList.ToBytes()) } func (sig *AggregatedBLSSignature) ToBytes() ([]byte, error) { retBytes := []byte{} // SignersList - retBytes = append(retBytes, EncodeByteArray(sig.SignersList)...) + if sig.SignersList == nil { + return nil, errors.New("AggregatedBLSSignature.ToBytes: SignersList must not be nil") + } + retBytes = append(retBytes, EncodeBitset(sig.SignersList)...) // Signature if sig.Signature == nil { @@ -387,7 +392,7 @@ func (sig *AggregatedBLSSignature) ToBytes() ([]byte, error) { func (sig *AggregatedBLSSignature) FromBytes(rr io.Reader) error { var err error - sig.SignersList, err = DecodeByteArray(rr) + sig.SignersList, err = DecodeBitset(rr) if err != nil { return errors.Wrapf(err, "AggregatedBLSSignature.FromBytes: Error decoding SignersList") } @@ -531,3 +536,23 @@ func (aggQC *TimeoutAggregateQuorumCertificate) FromBytes(rr io.Reader) error { return nil } + +// ================================================================== +// Bitset Utils +// ================================================================== + +func EncodeBitset(b *bitset.Bitset) []byte { + var encodedBytes []byte + if b != nil { + encodedBytes = b.ToBytes() + } + return EncodeByteArray(encodedBytes) +} + +func DecodeBitset(rr io.Reader) (*bitset.Bitset, error) { + encodedBytes, err := DecodeByteArray(rr) + if err != nil { + return nil, errors.Wrapf(err, "DecodeBitset: Error decoding bitset") + } + return (bitset.NewBitset()).FromBytes(encodedBytes), nil +} diff --git a/lib/pos_network_test.go b/lib/pos_network_test.go index 76514eabc..5695d5d73 100644 --- a/lib/pos_network_test.go +++ b/lib/pos_network_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/utils/bitset" "github.com/stretchr/testify/require" ) @@ -58,7 +59,7 @@ func TestValidatorTimeoutEncodeDecode(t *testing.T) { BlockHash: &BlockHash{}, ProposedInView: 999910, ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - SignersList: []byte{1, 2}, + SignersList: bitset.NewBitset().Set(0, true).Set(3, true), Signature: aggregateSignature, }, }, @@ -68,7 +69,7 @@ func TestValidatorTimeoutEncodeDecode(t *testing.T) { // Encode the message and verify the length is correct. encodedMsgBytes, err := originalMsg.ToBytes(false) require.NoError(t, err) - require.Equal(t, 270, len(encodedMsgBytes)) + require.Equal(t, 269, len(encodedMsgBytes)) // Decode the message. decodedMsg := &MsgDeSoValidatorTimeout{} diff --git a/utils/bitset/bitset.go b/utils/bitset/bitset.go index cbb75a997..63bfc7fbf 100644 --- a/utils/bitset/bitset.go +++ b/utils/bitset/bitset.go @@ -61,9 +61,10 @@ func (b *Bitset) ToBytes() []byte { } // Populates the BitSet from a big-endian byte slice. -func (b *Bitset) FromBytes(bytes []byte) { +func (b *Bitset) FromBytes(bytes []byte) *Bitset { if b.store == nil { b.store = big.NewInt(0) } b.store.SetBytes(bytes) + return b } From 31609aa9682ece0bc76b61949571929c68bcf410 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:29:46 -0400 Subject: [PATCH 119/762] Simplify public key field names for vote and timeout msg types (#566) --- lib/pos_network.go | 64 ++++++++++++++++++++--------------------- lib/pos_network_test.go | 28 +++++++++--------- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/lib/pos_network.go b/lib/pos_network.go index 724136629..5fc0a801c 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -21,13 +21,13 @@ type MsgDeSoValidatorVote struct { // Given the validator's ECDSA public key, we can look up their Validator PKID // and their stake in consensus. This allows us to verify that the vote message // was sent by a registered validator. - ValidatorPublicKey *PublicKey + PublicKey *PublicKey // The BLS voting public key for the validator who constructed this vote message. // The BLS public key is included in the vote message because it allows us to // easily verify if the BLS VotePartialSignature is correctly formed, without having // to first look up the validator's BLS public key in consensus. It helps optimize // vote validation. - ValidatorVotingPublicKey *bls.PublicKey + VotingPublicKey *bls.PublicKey // The block hash corresponding to the block that this vote is for. BlockHash *BlockHash @@ -55,17 +55,17 @@ func (msg *MsgDeSoValidatorVote) ToBytes(bool) ([]byte, error) { // MsgVersion retBytes = append(retBytes, msg.MsgVersion) - // ValidatorPublicKey - if msg.ValidatorPublicKey == nil { - return nil, errors.New("MsgDeSoValidatorVote.ToBytes: ValidatorPublicKey must not be nil") + // PublicKey + if msg.PublicKey == nil { + return nil, errors.New("MsgDeSoValidatorVote.ToBytes: PublicKey must not be nil") } - retBytes = append(retBytes, msg.ValidatorPublicKey.ToBytes()...) + retBytes = append(retBytes, msg.PublicKey.ToBytes()...) - // ValidatorVotingPublicKey - if msg.ValidatorVotingPublicKey == nil { - return nil, errors.New("MsgDeSoValidatorVote.ToBytes: ValidatorVotingPublicKey must not be nil") + // VotingPublicKey + if msg.VotingPublicKey == nil { + return nil, errors.New("MsgDeSoValidatorVote.ToBytes: VotingPublicKey must not be nil") } - retBytes = append(retBytes, EncodeBLSPublicKey(msg.ValidatorVotingPublicKey)...) + retBytes = append(retBytes, EncodeBLSPublicKey(msg.VotingPublicKey)...) // BlockHash if msg.BlockHash == nil { @@ -98,16 +98,16 @@ func (msg *MsgDeSoValidatorVote) FromBytes(data []byte) error { } msg.MsgVersion = msgVersion - // ValidatorPublicKey - msg.ValidatorPublicKey, err = ReadPublicKey(rr) + // PublicKey + msg.PublicKey, err = ReadPublicKey(rr) if err != nil { - return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding ValidatorPublicKey") + return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding PublicKey") } - // ValidatorVotingPublicKey - msg.ValidatorVotingPublicKey, err = DecodeBLSPublicKey(rr) + // VotingPublicKey + msg.VotingPublicKey, err = DecodeBLSPublicKey(rr) if err != nil { - return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding ValidatorVotingPublicKey") + return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding VotingPublicKey") } // BlockHash @@ -141,13 +141,13 @@ type MsgDeSoValidatorTimeout struct { // The ECDSA public key for the validator who constructed this timeout message. // Given the validator's ECDSA public key, we can look up their Validator PKID. // This allows us to verify that the timeout originated from a registered validator. - ValidatorPublicKey *PublicKey + PublicKey *PublicKey // The BLS voting public key for the validator who constructed this timeout. The BLS // public key is included in the timeout message because it allows us to easily // verify that the BLS TimeoutPartialSignature is correctly formed, without having to // first look up the validator's BLS public key in consensus. It helps optimize timeout // message validation. - ValidatorVotingPublicKey *bls.PublicKey + VotingPublicKey *bls.PublicKey // The view that the validator has timed out on. TimedOutView uint64 @@ -178,17 +178,17 @@ func (msg *MsgDeSoValidatorTimeout) ToBytes(bool) ([]byte, error) { // MsgVersion retBytes = append(retBytes, msg.MsgVersion) - // ValidatorPublicKey - if msg.ValidatorPublicKey == nil { - return nil, errors.New("MsgDeSoValidatorTimeout.ToBytes: ValidatorPublicKey must not be nil") + // PublicKey + if msg.PublicKey == nil { + return nil, errors.New("MsgDeSoValidatorTimeout.ToBytes: PublicKey must not be nil") } - retBytes = append(retBytes, msg.ValidatorPublicKey.ToBytes()...) + retBytes = append(retBytes, msg.PublicKey.ToBytes()...) - // ValidatorVotingPublicKey - if msg.ValidatorVotingPublicKey == nil { - return nil, errors.New("MsgDeSoValidatorTimeout.ToBytes: ValidatorVotingPublicKey must not be nil") + // VotingPublicKey + if msg.VotingPublicKey == nil { + return nil, errors.New("MsgDeSoValidatorTimeout.ToBytes: VotingPublicKey must not be nil") } - retBytes = append(retBytes, EncodeBLSPublicKey(msg.ValidatorVotingPublicKey)...) + retBytes = append(retBytes, EncodeBLSPublicKey(msg.VotingPublicKey)...) // TimeoutView retBytes = append(retBytes, UintToBuf(msg.TimedOutView)...) @@ -225,16 +225,16 @@ func (msg *MsgDeSoValidatorTimeout) FromBytes(data []byte) error { } msg.MsgVersion = msgVersion - // ValidatorPublicKey - msg.ValidatorPublicKey, err = ReadPublicKey(rr) + // PublicKey + msg.PublicKey, err = ReadPublicKey(rr) if err != nil { - return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding ValidatorPublicKey") + return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding PublicKey") } - // ValidatorVotingPublicKey - msg.ValidatorVotingPublicKey, err = DecodeBLSPublicKey(rr) + // VotingPublicKey + msg.VotingPublicKey, err = DecodeBLSPublicKey(rr) if err != nil { - return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding ValidatorVotingPublicKey") + return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding VotingPublicKey") } // TimedOutView diff --git a/lib/pos_network_test.go b/lib/pos_network_test.go index 5695d5d73..8c30d499d 100644 --- a/lib/pos_network_test.go +++ b/lib/pos_network_test.go @@ -14,12 +14,12 @@ func TestValidatorVoteEncodeDecode(t *testing.T) { validatorVotingPublicKey, votePartialSignature := _generateValidatorVotingPublicKeyAndSignature(t) originalMsg := MsgDeSoValidatorVote{ - MsgVersion: MsgValidatorVoteVersion0, - ValidatorPublicKey: &PublicKey{}, - ValidatorVotingPublicKey: validatorVotingPublicKey, - BlockHash: &BlockHash{}, - ProposedInView: 9910, - VotePartialSignature: votePartialSignature, + MsgVersion: MsgValidatorVoteVersion0, + PublicKey: &PublicKey{}, + VotingPublicKey: validatorVotingPublicKey, + BlockHash: &BlockHash{}, + ProposedInView: 9910, + VotePartialSignature: votePartialSignature, } // Encode the message and verify the length is correct. @@ -34,8 +34,8 @@ func TestValidatorVoteEncodeDecode(t *testing.T) { // Check that the message bodies are the same. require.Equal(t, originalMsg.MsgVersion, decodedMsg.MsgVersion) - require.True(t, originalMsg.ValidatorPublicKey.Equal(*decodedMsg.ValidatorPublicKey)) - require.True(t, originalMsg.ValidatorVotingPublicKey.Eq(decodedMsg.ValidatorVotingPublicKey)) + require.True(t, originalMsg.PublicKey.Equal(*decodedMsg.PublicKey)) + require.True(t, originalMsg.VotingPublicKey.Eq(decodedMsg.VotingPublicKey)) require.Equal(t, originalMsg.BlockHash, decodedMsg.BlockHash) require.Equal(t, originalMsg.ProposedInView, decodedMsg.ProposedInView) require.True(t, originalMsg.VotePartialSignature.Eq(decodedMsg.VotePartialSignature)) @@ -51,10 +51,10 @@ func TestValidatorTimeoutEncodeDecode(t *testing.T) { require.NoError(t, err) originalMsg := MsgDeSoValidatorTimeout{ - MsgVersion: MsgValidatorTimeoutVersion0, - ValidatorPublicKey: &PublicKey{}, - ValidatorVotingPublicKey: validatorVotingPublicKey, - TimedOutView: 999912, + MsgVersion: MsgValidatorTimeoutVersion0, + PublicKey: &PublicKey{}, + VotingPublicKey: validatorVotingPublicKey, + TimedOutView: 999912, HighQC: &QuorumCertificate{ BlockHash: &BlockHash{}, ProposedInView: 999910, @@ -78,8 +78,8 @@ func TestValidatorTimeoutEncodeDecode(t *testing.T) { // Check that the message bodies are the same. require.Equal(t, originalMsg.MsgVersion, decodedMsg.MsgVersion) - require.True(t, originalMsg.ValidatorPublicKey.Equal(*decodedMsg.ValidatorPublicKey)) - require.True(t, originalMsg.ValidatorVotingPublicKey.Eq(decodedMsg.ValidatorVotingPublicKey)) + require.True(t, originalMsg.PublicKey.Equal(*decodedMsg.PublicKey)) + require.True(t, originalMsg.VotingPublicKey.Eq(decodedMsg.VotingPublicKey)) require.Equal(t, originalMsg.TimedOutView, decodedMsg.TimedOutView) require.True(t, originalMsg.TimeoutPartialSignature.Eq(decodedMsg.TimeoutPartialSignature)) From 3e21ba20dddd3588f897bdc1213d78edd7a9914f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 28 Jun 2023 11:12:12 -0400 Subject: [PATCH 120/762] PoS Add BLS public key and signature to BlockProducerInfo (#567) * Add BLS PK and signature to BlockProducerInfo * Add versioning for BlockProducerInfo * Implement byte encoder and decoder * Add tests * Address nits * Update BlockProducerInfo public key field to PublicKey type * Restrict BlockProducerInfo byte encoding/decoding to version 1 * Rename BlockProducerInfo to MsgDeSoBlockProducerInfo --- lib/block_producer.go | 9 +- lib/blockchain.go | 17 +-- lib/blockchain_test.go | 13 +-- lib/constants.go | 16 +++ lib/network.go | 232 ++++++++++++++++++++++++++++++++++------- lib/network_test.go | 41 +++++++- lib/server.go | 3 +- 7 files changed, 272 insertions(+), 59 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index 158820507..807917a14 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -3,13 +3,14 @@ package lib import ( "encoding/hex" "fmt" - "github.com/btcsuite/btcd/wire" - "github.com/tyler-smith/go-bip39" "math" "sync" "sync/atomic" "time" + "github.com/btcsuite/btcd/wire" + "github.com/tyler-smith/go-bip39" + "github.com/deso-protocol/go-deadlock" "github.com/btcsuite/btcd/btcec" @@ -516,8 +517,8 @@ func (desoBlockProducer *DeSoBlockProducer) SignBlock(blockFound *MsgDeSoBlock) // If we get here, we now have a valid signature for the block. // Embed the signature into the block. - blockFound.BlockProducerInfo = &BlockProducerInfo{ - PublicKey: desoBlockProducer.blockProducerPrivateKey.PubKey().SerializeCompressed(), + blockFound.BlockProducerInfo = &MsgDeSoBlockProducerInfo{ + PublicKey: NewPublicKey(desoBlockProducer.blockProducerPrivateKey.PubKey().SerializeCompressed()), Signature: signature, } diff --git a/lib/blockchain.go b/lib/blockchain.go index 488434b5f..792bd06a6 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5,7 +5,6 @@ import ( "container/list" "encoding/hex" "fmt" - "github.com/holiman/uint256" "math" "math/big" "reflect" @@ -14,6 +13,8 @@ import ( "strings" "time" + "github.com/holiman/uint256" + btcdchain "github.com/btcsuite/btcd/blockchain" chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" @@ -1830,35 +1831,35 @@ func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures boo } // Verify that the public key is in the allowed set. - if _, exists := bc.trustedBlockProducerPublicKeys[MakePkMapKey(publicKey)]; !exists { + if _, exists := bc.trustedBlockProducerPublicKeys[MakePkMapKey(publicKey.ToBytes())]; !exists { return false, false, errors.Wrapf(RuleErrorBlockProducerPublicKeyNotInWhitelist, "ProcessBlock: Block producer public key %v is not in the allowed list of "+ - "--trusted_block_producer_public_keys: %v.", PkToStringBoth(publicKey), + "--trusted_block_producer_public_keys: %v.", PkToStringBoth(publicKey.ToBytes()), bc.trustedBlockProducerPublicKeys) } // Verify that the public key has not been forbidden. - dbEntry := DbGetForbiddenBlockSignaturePubKey(bc.db, bc.snapshot, publicKey) + dbEntry := DbGetForbiddenBlockSignaturePubKey(bc.db, bc.snapshot, publicKey.ToBytes()) if dbEntry != nil { return false, false, errors.Wrapf(RuleErrorForbiddenBlockProducerPublicKey, - "ProcessBlock: Block producer public key %v is forbidden", PkToStringBoth(publicKey)) + "ProcessBlock: Block producer public key %v is forbidden", PkToStringBoth(publicKey.ToBytes())) } // At this point we are confident that we have a valid public key that is // trusted. signature := desoBlock.BlockProducerInfo.Signature - pkObj, err := btcec.ParsePubKey(publicKey, btcec.S256()) + pkObj, err := btcec.ParsePubKey(publicKey.ToBytes(), btcec.S256()) if err != nil { return false, false, errors.Wrapf(err, "ProcessBlock: Error parsing block producer public key: %v.", - PkToStringBoth(publicKey)) + PkToStringBoth(publicKey.ToBytes())) } if !signature.Verify(blockHash[:], pkObj) { return false, false, errors.Wrapf(RuleErrorInvalidBlockProducerSIgnature, "ProcessBlock: Error validating signature %v for public key %v: %v.", hex.EncodeToString(signature.Serialize()), - PkToStringBoth(publicKey), + PkToStringBoth(publicKey.ToBytes()), err) } } diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index dadfc77b8..ad74f4e56 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -4,8 +4,6 @@ import ( "encoding/hex" "flag" "fmt" - embeddedpostgres "github.com/fergusstrange/embedded-postgres" - "github.com/go-pg/pg/v10" "log" "math/big" "math/rand" @@ -14,6 +12,9 @@ import ( "testing" "time" + embeddedpostgres "github.com/fergusstrange/embedded-postgres" + "github.com/go-pg/pg/v10" + chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v3" @@ -1651,12 +1652,12 @@ func TestBadBlockSignature(t *testing.T) { // Since MineAndProcesssSingleBlock returns a valid block above, we can play with its // signature and re-process the block to see what happens. - blockProducerInfoCopy := &BlockProducerInfo{Signature: &btcec.Signature{}} - blockProducerInfoCopy.PublicKey = append([]byte{}, finalBlock1.BlockProducerInfo.PublicKey...) + blockProducerInfoCopy := &MsgDeSoBlockProducerInfo{Signature: &btcec.Signature{}} + blockProducerInfoCopy.PublicKey = NewPublicKey(finalBlock1.BlockProducerInfo.PublicKey[:]) *blockProducerInfoCopy.Signature = *finalBlock1.BlockProducerInfo.Signature // A bad signature with the right public key should fail. - finalBlock1.BlockProducerInfo.PublicKey = senderPkBytes + finalBlock1.BlockProducerInfo.PublicKey = NewPublicKey(senderPkBytes) _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorInvalidBlockProducerSIgnature) @@ -1664,7 +1665,7 @@ func TestBadBlockSignature(t *testing.T) { // A signature that's outright missing should fail blockSignerPkBytes, _, err := Base58CheckDecode(blockSignerPk) require.NoError(err) - finalBlock1.BlockProducerInfo.PublicKey = blockSignerPkBytes + finalBlock1.BlockProducerInfo.PublicKey = NewPublicKey(blockSignerPkBytes) finalBlock1.BlockProducerInfo.Signature = nil _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) diff --git a/lib/constants.go b/lib/constants.go index 34c04d33e..fd64e0329 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -140,6 +140,22 @@ const ( MsgValidatorTimeoutVersion0 MsgValidatorTimeoutVersion = 0 ) +// Versioning for the BlockProducerInfo field included in MsgDeSoBlock. This type alias +// is equivalent to a uint8, and supports the same byte encoders/decoders. +type MsgDeSoBlockProducerInfoVersion = byte + +const ( + // This represents the original schema for the BlockProducerInfo field included in + // Proof of Work blocks. The original schema did not have versioning, so we use a default + // version value of 0 to denote this. The original schema only contains the block producer's + // ECDSA public key and ECDSA signature of the block. + MsgDeSoBlockProducerInfoVersion0 MsgDeSoBlockProducerInfoVersion = 0 + // This version is introduced starting with Proof of Stake blocks. It adds versioning to the + // BlockProducerInfo schema, and adds two new fields for the block producer's BLS public key + // and BLS partial signature for the block. + MsgDeSoBlockProducerInfoVersion1 MsgDeSoBlockProducerInfoVersion = 1 +) + var ( MaxUint256, _ = uint256.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") diff --git a/lib/network.go b/lib/network.go index 0adc14d26..18e2b4ecc 100644 --- a/lib/network.go +++ b/lib/network.go @@ -23,6 +23,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" decredEC "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" + "github.com/deso-protocol/core/bls" merkletree "github.com/deso-protocol/go-merkle-tree" "github.com/ethereum/go-ethereum/crypto/ecies" "github.com/holiman/uint256" @@ -2350,15 +2351,112 @@ func (msg *MsgDeSoHeader) String() string { // BLOCK Message // ================================================================== -type BlockProducerInfo struct { - PublicKey []byte +type MsgDeSoBlockProducerInfo struct { + Version MsgDeSoBlockProducerInfoVersion + + // ECDSA public key for the block producer. + PublicKey *PublicKey + // The block producer's ECDSA signature for the block. This field is used in + // MsgDeSoBlockProducerInfo version 0, and is deprecated from version 1 onwards. Signature *btcec.Signature + + // The BLS public key of the validator who constructed this block. This field is + // populated starting in MsgDeSoBlockProducerInfo version 1. + VotingPublicKey *bls.PublicKey + // The validator's partial BLS signature of the (ProposedInView, BlockHash) pair + // for enclosing block. This signature proves the validator proposed the block, + // and also acts as the validator's vote for this block. This filed is only populated + // starting in MsgDeSoBlockProducerInfo version 1. + VotePartialSignature *bls.Signature +} + +// Byte encoder for the MsgDeSoBlockProducerInfo with support for versioning. The encoder only +// supports MsgDeSoBlockProducerInfo version 1 and above. For the legacy version 0, use the +// MsgDeSoBlockProducerInfo.Serialize_Legacy() method instead. +func (bpi *MsgDeSoBlockProducerInfo) ToBytes() ([]byte, error) { + // Only support byte encoding for BlockProducerInfo version 1. All later versions will + // need custom encoding. + if bpi.Version != MsgDeSoBlockProducerInfoVersion1 { + return nil, fmt.Errorf("MsgDeSoBlockProducerInfo.ToBytes: BlockProducerInfo version %d not supported", bpi.Version) + } + + encodedBytes := []byte{} + + // Required Version field + encodedBytes = append(encodedBytes, bpi.Version) + + // Required ECDSA PublicKey + if bpi.PublicKey == nil { + return nil, fmt.Errorf("MsgDeSoBlockProducerInfo.ToBytes: PublicKey is required") + } + encodedBytes = append(encodedBytes, bpi.PublicKey.ToBytes()...) + + // The ECDSA Signature is redundant, and is removed in MsgDeSoBlockProducerInfo version 1 and above + + // Voting BLS PublicKey + if bpi.VotingPublicKey == nil { + return nil, fmt.Errorf("MsgDeSoBlockProducerInfo.ToBytes: VotingPublicKey is required") + } + encodedBytes = append(encodedBytes, EncodeByteArray(bpi.VotingPublicKey.ToBytes())...) + + // Vote BLS Partial Signature + if bpi.VotePartialSignature == nil { + return nil, fmt.Errorf("MsgDeSoBlockProducerInfo.ToBytes: VotePartialSignature is required") + } + encodedBytes = append(encodedBytes, EncodeByteArray(bpi.VotePartialSignature.ToBytes())...) + + return encodedBytes, nil +} + +// Byte decoder for the MsgDeSoBlockProducerInfo, with support for versioning. The encoder only +// supports MsgDeSoBlockProducerInfo version 1 and above. For the legacy version 0, use the +// MsgDeSoBlockProducerInfo.Deserialize_Legacy() method instead. +func (bpi *MsgDeSoBlockProducerInfo) FromBytes(rr *bytes.Reader) error { + var err error + + // Required Version field + bpi.Version, err = rr.ReadByte() + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.FromBytes: Problem reading Version") + } + + // Only support byte decoding for BlockProducerInfo version 1. All later versions will + // need custom decoding. + if bpi.Version != MsgDeSoBlockProducerInfoVersion1 { + return fmt.Errorf("MsgDeSoBlockProducerInfo.FromBytes: BlockProducerInfo version %d not supported", bpi.Version) + } + + // Required ECDSA PublicKey + bpi.PublicKey, err = ReadPublicKey(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.FromBytes: Problem reading PublicKey") + } + + // The ECDSA Signature is redundant, and is removed in MsgDeSoBlockProducerInfo version 1 and above + // so we skip it here. + + // Voting BLS PublicKey + bpi.VotingPublicKey, err = DecodeBLSPublicKey(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.FromBytes: Problem reading VotingPublicKey") + } + + // Vote BLS Partial Signature + bpi.VotePartialSignature, err = DecodeBLSSignature(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.FromBytes: Problem reading VotePartialSignature") + } + + return nil } -func (bpi *BlockProducerInfo) Serialize() []byte { +// Legacy byte encoder for MsgDeSoBlockProducerInfo with no support for versioning. +// It encodes just the public key and signature according to the legacy encoding +// format. +func (bpi *MsgDeSoBlockProducerInfo) Serialize_Legacy() []byte { data := []byte{} data = append(data, UintToBuf(uint64(len(bpi.PublicKey)))...) - data = append(data, bpi.PublicKey...) + data = append(data, bpi.PublicKey.ToBytes()...) sigBytes := []byte{} if bpi.Signature != nil { @@ -2370,46 +2468,47 @@ func (bpi *BlockProducerInfo) Serialize() []byte { return data } -func (bpi *BlockProducerInfo) Deserialize(data []byte) error { - ret := &BlockProducerInfo{} +// Legacy byte decoder for MsgDeSoBlockProducerInfo with no support for versioning. +// It decodes the public key and signature according to the legacy encoding +// format and then sets the version to 0. +func (bpi *MsgDeSoBlockProducerInfo) Deserialize_Legacy(data []byte) error { + ret := &MsgDeSoBlockProducerInfo{} rr := bytes.NewReader(data) + // Set the version to 0 since this is the legacy format. + ret.Version = MsgDeSoBlockProducerInfoVersion0 + // De-serialize the public key. { pkLen, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Error reading public key len") + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: Error reading public key len") } if pkLen > MaxMessagePayload { - return errors.Wrapf(err, "BlockProducerInfo.Deserialize: pkLen too long: %v", pkLen) + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: pkLen too long: %v", pkLen) } - pkBytes, err := SafeMakeSliceWithLength[byte](pkLen) + ret.PublicKey, err = ReadPublicKey(rr) if err != nil { - return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Problem making slice for pkBytes") + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: Error reading public key: ") } - _, err = io.ReadFull(rr, pkBytes) - if err != nil { - return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Error reading public key: ") - } - ret.PublicKey = pkBytes } // De-serialize the signature. { sigLen, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Error reading signature len") + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: Error reading signature len") } if sigLen > MaxMessagePayload { - return errors.Wrapf(err, "BlockProducerInfo.Deserialize: signature len too long: %v", sigLen) + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: signature len too long: %v", sigLen) } sigBytes, err := SafeMakeSliceWithLength[byte](sigLen) if err != nil { - return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Problem making slice for sigBytes") + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: Problem making slice for sigBytes") } _, err = io.ReadFull(rr, sigBytes) if err != nil { - return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Error reading signature: ") + return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: Error reading signature: ") } ret.Signature = nil if sigLen > 0 { @@ -2425,22 +2524,33 @@ func (bpi *BlockProducerInfo) Deserialize(data []byte) error { return nil } -func (bpi *BlockProducerInfo) String() string { +func (bpi *MsgDeSoBlockProducerInfo) String() string { if bpi == nil || len(bpi.PublicKey) == 0 { return "Signer Key: NONE" } - return fmt.Sprintf("Signer Key: %v", PkToStringMainnet(bpi.PublicKey)) + return fmt.Sprintf("Signer Key: %v", PkToStringMainnet(bpi.PublicKey.ToBytes())) } type MsgDeSoBlock struct { Header *MsgDeSoHeader Txns []*MsgDeSoTxn - // This field is optional and provides the producer of the block the ability to sign it - // with their private key. Doing this proves that this block was produced by a particular - // entity, which can be useful for nodes that want to restrict who they accept blocks - // from. - BlockProducerInfo *BlockProducerInfo + // This MsgDeSoBlockProducerInfo field describes the producer of the block and their signature + // for the block. + // + // In Proof of Work blocks, this field is optional and provides the producer of the block + // the ability to sign the block with their private key. Doing this proves that this block + // was produced by a particular entity, which can be useful for nodes that want to restrict + // who they accept blocks from. + // + // In Proof of Stake blocks, this field is required and serves two purposes: + // 1. It allows the block producer to sign its block with its ECDSA or BLS private key. + // This allows validators to verify that the block was produced by the expected leader for the + // current block height and view. + // 2. It contains the block producer's BLS partial signature, which acts as their vote on the + // block. This way, its vote can be aggregated into a QC by the next block proposer in the leader + // schedule. + BlockProducerInfo *MsgDeSoBlockProducerInfo } func (msg *MsgDeSoBlock) EncodeBlockCommmon(preSignature bool) ([]byte, error) { @@ -2485,7 +2595,7 @@ func (msg *MsgDeSoBlock) EncodeBlockVersion1(preSignature bool) ([]byte, error) // BlockProducerInfo blockProducerInfoBytes := []byte{} if msg.BlockProducerInfo != nil { - blockProducerInfoBytes = msg.BlockProducerInfo.Serialize() + blockProducerInfoBytes = msg.BlockProducerInfo.Serialize_Legacy() } data = append(data, UintToBuf(uint64(len(blockProducerInfoBytes)))...) data = append(data, blockProducerInfoBytes...) @@ -2493,11 +2603,33 @@ func (msg *MsgDeSoBlock) EncodeBlockVersion1(preSignature bool) ([]byte, error) return data, nil } +func (msg *MsgDeSoBlock) EncodeBlockVersion2(preSignature bool) ([]byte, error) { + // Encode MsgDeSoHeader and []*MsgDeSoTxn + encodedBytes, err := msg.EncodeBlockCommmon(preSignature) + if err != nil { + return nil, err + } + + // Encode BlockProducerInfo + if msg.BlockProducerInfo == nil { + return nil, fmt.Errorf("MsgDeSoBlock.EncodeBlockVersion2: BlockProducerInfo should not be nil") + } + blockProducerInfoBytes, err := msg.BlockProducerInfo.ToBytes() + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoBlock.EncodeBlockVersion2: Problem encoding BlockProducerInfo") + } + encodedBytes = append(encodedBytes, EncodeByteArray(blockProducerInfoBytes)...) + + return encodedBytes, nil +} + func (msg *MsgDeSoBlock) ToBytes(preSignature bool) ([]byte, error) { if msg.Header.Version == HeaderVersion0 { return msg.EncodeBlockVersion0(preSignature) } else if msg.Header.Version == HeaderVersion1 { return msg.EncodeBlockVersion1(preSignature) + } else if msg.Header.Version == HeaderVersion2 { + return msg.EncodeBlockVersion2(preSignature) } else { return nil, fmt.Errorf("MsgDeSoBlock.ToBytes: Error encoding version: %v", msg.Header.Version) } @@ -2560,15 +2692,21 @@ func (msg *MsgDeSoBlock) FromBytes(data []byte) error { ret.Txns = append(ret.Txns, currentTxn) } - // Version 1 blocks have a BlockProducerInfo attached to them that - // must be read. If this is not a Version 1 block, then the BlockProducerInfo - // remains nil. + // Version 0 blocks have no BlockProducerInfo attached to them. We can exit early here. + if ret.Header.Version == HeaderVersion0 { + *msg = *ret + return nil + } + + // Starting with version 1, all block versions have a BlockProducerInfo length encoded. + blockProducerInfoLen, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Error decoding header length") + } + if ret.Header.Version == HeaderVersion1 { - blockProducerInfoLen, err := ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Error decoding header length") - } - var blockProducerInfo *BlockProducerInfo + // All version 1 blocks have an optional BlockProducerInfo attached. + var blockProducerInfo *MsgDeSoBlockProducerInfo if blockProducerInfoLen > 0 { if blockProducerInfoLen > MaxMessagePayload { return fmt.Errorf("MsgDeSoBlock.FromBytes: Header length %d longer "+ @@ -2582,12 +2720,32 @@ func (msg *MsgDeSoBlock) FromBytes(data []byte) error { if err != nil { return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Problem reading header") } - blockProducerInfo = &BlockProducerInfo{} - if err = blockProducerInfo.Deserialize(blockProducerInfoBytes); err != nil { + blockProducerInfo = &MsgDeSoBlockProducerInfo{} + if err = blockProducerInfo.Deserialize_Legacy(blockProducerInfoBytes); err != nil { return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Error deserializing block producer info") } ret.BlockProducerInfo = blockProducerInfo } + } else if ret.Header.Version == HeaderVersion2 { + // All version 2 blocks have a required BlockProducerInfo field. + + // Verify the length for the BlockProducerInfo. + if blockProducerInfoLen > MaxMessagePayload { + return fmt.Errorf("MsgDeSoBlock.FromBytes: BlockProducerInfo length %d longer "+ + "than max %d", blockProducerInfoLen, MaxMessagePayload) + } + + // BlockProducerInfo is a required field for block header version 2 and later. Otherwise, + // the block is considered malformed. + if blockProducerInfoLen == 0 { + return fmt.Errorf("MsgDeSoBlock.FromBytes: BlockProducerInfo length cannot be zero") + } + + blockProducerInfo := &MsgDeSoBlockProducerInfo{} + if blockProducerInfo.FromBytes(rr); err != nil { + return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Error decoding BlockProducerInfo") + } + ret.BlockProducerInfo = blockProducerInfo } *msg = *ret diff --git a/lib/network_test.go b/lib/network_test.go index 39fa573f5..bbebecf10 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -162,6 +162,17 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { } } +func createTestBlockProducerInfoVersion1(t *testing.T) *MsgDeSoBlockProducerInfo { + testBLSPublicKey, testBLSSignature := _generateValidatorVotingPublicKeyAndSignature(t) + + return &MsgDeSoBlockProducerInfo{ + Version: 1, + PublicKey: NewPublicKey(pkForTesting1), + VotingPublicKey: testBLSPublicKey, + VotePartialSignature: testBLSSignature, + } +} + func TestHeaderConversionAndReadWriteMessage(t *testing.T) { assert := assert.New(t) require := require.New(t) @@ -327,14 +338,14 @@ var expectedBlock = &MsgDeSoBlock{ Header: expectedBlockHeaderVersion1, Txns: expectedTransactions(true), // originally was effectively false - BlockProducerInfo: &BlockProducerInfo{ - PublicKey: []byte{ + BlockProducerInfo: &MsgDeSoBlockProducerInfo{ + PublicKey: NewPublicKey([]byte{ // random bytes 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x10, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x21, 0x22, 0x23, - }, + }), }, } @@ -486,6 +497,30 @@ var expectedV0Header = &MsgDeSoHeader{ Nonce: uint64(123456), } +func TestSerializeBlockVersion2(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + _ = assert + _ = require + + originalBlock := &MsgDeSoBlock{ + Header: createTestBlockHeaderVersion2(t), + BlockProducerInfo: createTestBlockProducerInfoVersion1(t), + } + + encodedBytes, err := originalBlock.ToBytes(false) + require.NoError(err) + + decodedBlock := NewMessage(MsgTypeBlock).(*MsgDeSoBlock) + err = decodedBlock.FromBytes(encodedBytes) + require.NoError(err) + + assert.Equal(originalBlock.Header.Version, decodedBlock.Header.Version) + assert.Equal(originalBlock.BlockProducerInfo.PublicKey, decodedBlock.BlockProducerInfo.PublicKey) + assert.True(originalBlock.BlockProducerInfo.VotingPublicKey.Eq(decodedBlock.BlockProducerInfo.VotingPublicKey)) + assert.True(originalBlock.BlockProducerInfo.VotePartialSignature.Eq(decodedBlock.BlockProducerInfo.VotePartialSignature)) +} + func TestBlockSerialize(t *testing.T) { assert := assert.New(t) require := require.New(t) diff --git a/lib/server.go b/lib/server.go index 18e9ade70..a6974953f 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1795,7 +1795,8 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { if len(srv.blockchain.trustedBlockProducerPublicKeys) > 0 && blockHeader.Height >= srv.blockchain.trustedBlockProducerStartHeight { if blk.BlockProducerInfo != nil { _, entryExists := srv.mempool.readOnlyUtxoView.ForbiddenPubKeyToForbiddenPubKeyEntry[MakePkMapKey( - blk.BlockProducerInfo.PublicKey)] + blk.BlockProducerInfo.PublicKey.ToBytes(), + )] if entryExists { srv._logAndDisconnectPeer(pp, blk, "Got forbidden block signature public key.") return From def9809fdd366bca8b0695ef450aec35c9ba1402 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 28 Jun 2023 11:21:45 -0400 Subject: [PATCH 121/762] Add ProposedInView field to block header (#568) --- lib/network.go | 16 ++++++++++++++++ lib/network_test.go | 7 ++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/lib/network.go b/lib/network.go index 18e2b4ecc..d4db48dfc 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1912,6 +1912,13 @@ type MsgDeSoHeader struct { // event that ASICs become powerful enough to have birthday problems in the future. ExtraNonce uint64 + // ProposedInView is only used for Proof of Stake blocks, and will only be populated + // in MsgDeSoHeader versions 2 and higher. For all earlier version, this field will + // default to zero. + // + // Denotes the view in which this block was proposed. + ProposedInView uint64 + // ValidatorsVoteQC is only used for Proof of Stake blocks, and will only be // populated in MsgDeSoHeader versions 2 and higher. For all earlier version, this // field will be null. @@ -2090,6 +2097,9 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error // The Nonce and ExtraNonce fields are unused in version 2. We skip them // during both encoding and decoding. + // ProposedInView + retBytes = append(retBytes, EncodeUint64BigEndian(msg.ProposedInView)...) + // ValidatorsVoteQC if msg.ValidatorsVoteQC == nil { return nil, fmt.Errorf("EncodeHeaderVersion2: ValidatorsVoteQC must be non-nil") @@ -2268,6 +2278,12 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { retHeader.Nonce = 0 retHeader.ExtraNonce = 0 + // ProposedInView + retHeader.ProposedInView, err = DecodeUint64BigEndian(rr) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposedInView") + } + // ValidatorsVoteQC retHeader.ValidatorsVoteQC, err = DecodeQuorumCertificate(rr) if err != nil { diff --git a/lib/network_test.go b/lib/network_test.go index bbebecf10..33e0b4737 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -132,8 +132,9 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { TstampSecs: uint64(1678943210), Height: uint64(1321012345), // Nonce and ExtraNonce are unused and set to 0 starting in version 2. - Nonce: uint64(0), - ExtraNonce: uint64(0), + Nonce: uint64(0), + ExtraNonce: uint64(0), + ProposedInView: uint64(1432101234), // Use real signatures and public keys for the PoS fields ValidatorsVoteQC: &QuorumCertificate{ BlockHash: &testBlockHash, @@ -220,7 +221,7 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { assert.NoError(err) assert.Equal(hdrPayload, data) - assert.Equalf(9, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), + assert.Equalf(10, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), "Number of fields in HEADER message is different from expected. "+ "Did you add a new field? If so, make sure the serialization code "+ "works, add the new field to the test case, and fix this error.") From ad0f89a5c5618d1809645ee8892a57444cc08047 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 28 Jun 2023 11:34:22 -0400 Subject: [PATCH 122/762] Compress uint64s in block header version 2 byte encoding (#569) --- lib/block_view_types.go | 16 ---------------- lib/network.go | 12 ++++++------ 2 files changed, 6 insertions(+), 22 deletions(-) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 17f67303f..629db038e 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -2,7 +2,6 @@ package lib import ( "bytes" - "encoding/binary" "encoding/hex" "encoding/json" "fmt" @@ -4685,21 +4684,6 @@ func DecodeUint64Array(reader io.Reader) ([]uint64, error) { return result, nil } -func EncodeUint64BigEndian(val uint64) []byte { - encodedBytes := [8]byte{} - binary.BigEndian.PutUint64(encodedBytes[:], val) - return encodedBytes[:] -} - -func DecodeUint64BigEndian(rr io.Reader) (uint64, error) { - scratchBytes := [8]byte{} - _, err := io.ReadFull(rr, scratchBytes[:]) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint64(scratchBytes[:]), nil -} - func EncodePKIDuint64Map(pkidMap map[PKID]uint64) []byte { var data []byte diff --git a/lib/network.go b/lib/network.go index d4db48dfc..bd8af47af 100644 --- a/lib/network.go +++ b/lib/network.go @@ -2089,16 +2089,16 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error // TstampSecs: this field can be encoded to take up the full 64 bits now // that MsgDeSoHeader version 2 does not need to be backwards compatible. - retBytes = append(retBytes, EncodeUint64BigEndian(msg.TstampSecs)...) + retBytes = append(retBytes, UintToBuf(msg.TstampSecs)...) // Height - retBytes = append(retBytes, EncodeUint64BigEndian(msg.Height)...) + retBytes = append(retBytes, UintToBuf(msg.Height)...) // The Nonce and ExtraNonce fields are unused in version 2. We skip them // during both encoding and decoding. // ProposedInView - retBytes = append(retBytes, EncodeUint64BigEndian(msg.ProposedInView)...) + retBytes = append(retBytes, UintToBuf(msg.ProposedInView)...) // ValidatorsVoteQC if msg.ValidatorsVoteQC == nil { @@ -2262,13 +2262,13 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { } // TstampSecs - retHeader.TstampSecs, err = DecodeUint64BigEndian(rr) + retHeader.TstampSecs, err = ReadUvarint(rr) if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TstampSecs") } // Height - retHeader.Height, err = DecodeUint64BigEndian(rr) + retHeader.Height, err = ReadUvarint(rr) if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding Height") } @@ -2279,7 +2279,7 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { retHeader.ExtraNonce = 0 // ProposedInView - retHeader.ProposedInView, err = DecodeUint64BigEndian(rr) + retHeader.ProposedInView, err = ReadUvarint(rr) if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposedInView") } From 996ddc72a88d406c50c39d961fe853356fae4dca Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 28 Jun 2023 12:38:48 -0400 Subject: [PATCH 123/762] Rename Bitset parent package from utils to collections (#571) * rename-bitset-parent-package-to-collections * Fix Dockerfiles * Better comments for Bitset encoding * Gracefully handle nil bitset encoding --- Dockerfile | 18 +++++++++--------- {utils => collections}/bitset/bitset.go | 15 +++++++++++++++ {utils => collections}/bitset/bitset_test.go | 0 lib/network_test.go | 2 +- lib/pos_network.go | 2 +- lib/pos_network_test.go | 2 +- test.Dockerfile | 18 +++++++++--------- 7 files changed, 36 insertions(+), 21 deletions(-) rename {utils => collections}/bitset/bitset.go (80%) rename {utils => collections}/bitset/bitset_test.go (100%) diff --git a/Dockerfile b/Dockerfile index 51f367b1b..6aac58b66 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,15 +13,15 @@ COPY go.sum . RUN go mod download -COPY bls bls -COPY cmd cmd -COPY desohash desohash -COPY lib lib -COPY migrate migrate -COPY utils utils -COPY scripts scripts -COPY test_data test_data -COPY main.go . +COPY bls bls +COPY cmd cmd +COPY collections collections +COPY desohash desohash +COPY lib lib +COPY migrate migrate +COPY scripts scripts +COPY test_data test_data +COPY main.go . RUN ./scripts/install-relic.sh diff --git a/utils/bitset/bitset.go b/collections/bitset/bitset.go similarity index 80% rename from utils/bitset/bitset.go rename to collections/bitset/bitset.go index 63bfc7fbf..bd9df969e 100644 --- a/utils/bitset/bitset.go +++ b/collections/bitset/bitset.go @@ -48,6 +48,12 @@ func (b *Bitset) Set(index int, newValue bool) *Bitset { // // This also means that the highest index set to true is b.Size() - 1. // All indices beyond are implicitly false. +// +// Example: +// Given a sequence of values 11 values: +// - [true, true, true, false, false, false, false, false, true, false, false] +// This function returns a size of 9. The highest index set to true is 8, and +// all other indices beyond are implicitly false. func (b *Bitset) Size() int { return b.store.BitLen() } @@ -56,7 +62,16 @@ func (b *Bitset) Size() int { // byte slice. The output is compressed such that if the underlying // big.Int had zeros at the highest bits, they will be removed // from the output. +// +// Example: +// Given a sequence of values 11 values: +// - [true, true, true, false, false, false, false, false, true, false, false] +// This function returns the byte slice: +// - [00000001, 00000111] func (b *Bitset) ToBytes() []byte { + if b == nil || b.store == nil { + return []byte{} + } return b.store.Bytes() } diff --git a/utils/bitset/bitset_test.go b/collections/bitset/bitset_test.go similarity index 100% rename from utils/bitset/bitset_test.go rename to collections/bitset/bitset_test.go diff --git a/lib/network_test.go b/lib/network_test.go index 33e0b4737..86728e3be 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -17,7 +17,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/bxcodec/faker" - "github.com/deso-protocol/core/utils/bitset" + "github.com/deso-protocol/core/collections/bitset" merkletree "github.com/deso-protocol/go-merkle-tree" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/lib/pos_network.go b/lib/pos_network.go index 5fc0a801c..8e46839dc 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -6,7 +6,7 @@ import ( "io" "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/utils/bitset" + "github.com/deso-protocol/core/collections/bitset" "github.com/pkg/errors" ) diff --git a/lib/pos_network_test.go b/lib/pos_network_test.go index 8c30d499d..0c093fb33 100644 --- a/lib/pos_network_test.go +++ b/lib/pos_network_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/utils/bitset" + "github.com/deso-protocol/core/collections/bitset" "github.com/stretchr/testify/require" ) diff --git a/test.Dockerfile b/test.Dockerfile index 6f4a8f952..c6e5bc6ce 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -14,15 +14,15 @@ COPY go.sum . RUN go mod download -COPY bls bls -COPY cmd cmd -COPY desohash desohash -COPY lib lib -COPY migrate migrate -COPY utils utils -COPY scripts scripts -COPY test_data test_data -COPY main.go . +COPY bls bls +COPY cmd cmd +COPY collections collections +COPY desohash desohash +COPY lib lib +COPY migrate migrate +COPY scripts scripts +COPY test_data test_data +COPY main.go . RUN ./scripts/install-relic.sh From f941647c4ee6bf72838b3241cb9cbde944b477bc Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 28 Jun 2023 13:26:59 -0400 Subject: [PATCH 124/762] sa/pos-messaging-schema-cleanup (#572) * sa/pos-messaging-schema-cleanup * Better comments --- collections/bitset/bitset.go | 12 +++--- lib/constants.go | 9 ++-- lib/network.go | 17 ++++---- lib/pos_network.go | 79 ++++++++++++++++++------------------ 4 files changed, 59 insertions(+), 58 deletions(-) diff --git a/collections/bitset/bitset.go b/collections/bitset/bitset.go index bd9df969e..3cd9428b1 100644 --- a/collections/bitset/bitset.go +++ b/collections/bitset/bitset.go @@ -6,12 +6,11 @@ import ( // A Bitset is an ordered list of bits with arbitrary length. It uses // the built-in big.Int as the underlying storage scheme. The big.Int maintains -// an ordered bit list and provides an interface where we can flip each bit -// individually. The Bitset acts as a wrapper and provides boolean Get and Set -// functions on top. +// an ordered list of bits and provides an interface where we can flip each bit +// individually. The Bitset type acts as a wrapper and a clean interface on top. // // We implement a custom Bitset data structure using Big.Int rather than using -// an off-the-shelf solution because we need to support byte encoding and decoding, +// an off-the-shelf solution because we need to support byte encoding and decoding // with known endianness. Out of the box, the built-in big.Int supports individual // bit operations, safe indexing & boundary checks, dynamic resizing, and big // endian byte encoding/decoding. It allows us to implement a straightforward @@ -21,17 +20,20 @@ type Bitset struct { store *big.Int } -// Initializes a new Bitset with zero value for all indices. +// Initializes a new Bitset with zero value for all bits. func NewBitset() *Bitset { return &Bitset{ store: big.NewInt(0), } } +// Gets the value of the bit at the given index. func (b *Bitset) Get(index int) bool { return b.store.Bit(index) == 1 } +// Set the value of the bit at the given index, and returns the updated Bitset +// for method chaining. func (b *Bitset) Set(index int, newValue bool) *Bitset { booleanValue := uint(0) if newValue { diff --git a/lib/constants.go b/lib/constants.go index 016f28042..673856268 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -108,19 +108,20 @@ const ( // It includes several changes to the header format: // - Nonce field is deprecated // - ExtraNonce field is deprecated + // - ProposedInView field is added // - ValidatorsVoteQC field is added // - ValidatorsTimeoutAggregateQC field is added // // This format change is a breaking change that is not backwards-compatible with // versions 0 and 1. HeaderVersion2 = MsgDeSoHeaderVersion(2) + // TODO: rename this "CurrentHeaderVersion" to "LatestProofOfWorkHeaderVersion". Note, + // doing so will be a breaking change for 3rd party applications that import core and + // use this constant. + // // This CurrentHeaderVersion is an implicit version type that represents the latest // backwards compatible Proof of Work header format. This value is now locked to // HeaderVersion1 since versions 2 and onwards will be used for Proof of Stake formats. - // - // TODO: rename this constant to "LatestProofOfWorkHeaderVersion". Note, doing so will - // be a breaking change for 3rd party applications that import core and use this - // constant. CurrentHeaderVersion = HeaderVersion1 ) diff --git a/lib/network.go b/lib/network.go index f617fcd6e..e45ce7704 100644 --- a/lib/network.go +++ b/lib/network.go @@ -2391,7 +2391,7 @@ type MsgDeSoBlockProducerInfo struct { // MsgDeSoBlockProducerInfo.Serialize_Legacy() method instead. func (bpi *MsgDeSoBlockProducerInfo) ToBytes() ([]byte, error) { // Only support byte encoding for BlockProducerInfo version 1. All later versions will - // need custom encoding. + // need differ in format, so their encoding can be implemented then. if bpi.Version != MsgDeSoBlockProducerInfoVersion1 { return nil, fmt.Errorf("MsgDeSoBlockProducerInfo.ToBytes: BlockProducerInfo version %d not supported", bpi.Version) } @@ -2424,7 +2424,7 @@ func (bpi *MsgDeSoBlockProducerInfo) ToBytes() ([]byte, error) { return encodedBytes, nil } -// Byte decoder for the MsgDeSoBlockProducerInfo, with support for versioning. The encoder only +// Byte decoder for the MsgDeSoBlockProducerInfo with support for versioning. The decoder only // supports MsgDeSoBlockProducerInfo version 1 and above. For the legacy version 0, use the // MsgDeSoBlockProducerInfo.Deserialize_Legacy() method instead. func (bpi *MsgDeSoBlockProducerInfo) FromBytes(rr *bytes.Reader) error { @@ -2437,7 +2437,7 @@ func (bpi *MsgDeSoBlockProducerInfo) FromBytes(rr *bytes.Reader) error { } // Only support byte decoding for BlockProducerInfo version 1. All later versions will - // need custom decoding. + // need differ in format, so their decoding can be implemented then. if bpi.Version != MsgDeSoBlockProducerInfoVersion1 { return fmt.Errorf("MsgDeSoBlockProducerInfo.FromBytes: BlockProducerInfo version %d not supported", bpi.Version) } @@ -2551,20 +2551,19 @@ type MsgDeSoBlock struct { Header *MsgDeSoHeader Txns []*MsgDeSoTxn - // This MsgDeSoBlockProducerInfo field describes the producer of the block and their signature - // for the block. + // This field describes the producer of the block and their signature for the block. // - // In Proof of Work blocks, this field is optional and provides the producer of the block - // the ability to sign the block with their private key. Doing this proves that this block + // In Proof of Work blocks, the field is optional and provides the producer of the block + // the ability to sign the block with its ECDSA private key. Doing this proves that this block // was produced by a particular entity, which can be useful for nodes that want to restrict // who they accept blocks from. // // In Proof of Stake blocks, this field is required and serves two purposes: - // 1. It allows the block producer to sign its block with its ECDSA or BLS private key. + // 1. It allows the block producer to sign the block with its BLS private key. // This allows validators to verify that the block was produced by the expected leader for the // current block height and view. // 2. It contains the block producer's BLS partial signature, which acts as their vote on the - // block. This way, its vote can be aggregated into a QC by the next block proposer in the leader + // block. This way, the vote can be aggregated into a QC by the next block proposer in the leader // schedule. BlockProducerInfo *MsgDeSoBlockProducerInfo } diff --git a/lib/pos_network.go b/lib/pos_network.go index 8e46839dc..fc92b81c7 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -282,20 +282,25 @@ type QuorumCertificate struct { ValidatorsVoteAggregatedSignature *AggregatedBLSSignature } -// Performs a deep equality check between two QuorumCertificates, and returns -// true if the values of the two are identical. +// Performs a deep equality check between two QuorumCertificates, and returns true +// if the two are fully initialized and have identical values. In all other cases, +// it return false. func (qc *QuorumCertificate) Eq(other *QuorumCertificate) bool { - if qc == nil && other == nil { - return true + if qc == nil || other == nil { + return false + } + + qcEncodedBytes, err := qc.ToBytes() + if err != nil { + return false } - if (qc == nil) != (other == nil) { + otherEncodedBytes, err := other.ToBytes() + if err != nil { return false } - return bytes.Equal(qc.BlockHash.ToBytes(), other.BlockHash.ToBytes()) && - qc.ProposedInView == other.ProposedInView && - qc.ValidatorsVoteAggregatedSignature.Eq(other.ValidatorsVoteAggregatedSignature) + return bytes.Equal(qcEncodedBytes, otherEncodedBytes) } func (qc *QuorumCertificate) ToBytes() ([]byte, error) { @@ -348,27 +353,36 @@ func DecodeQuorumCertificate(rr io.Reader) (*QuorumCertificate, error) { // This is an aggregated BLS signature from a set of validators. Each validator's // presence in the signature is denoted in the provided signers list. I.e. if the // list's value at index 0 is 1, then the validator identified by that index is -// present in the aggregated signature. The indices of all validators are expected -// to be known by the caller. +// present in the aggregated signature. +// +// The validators in the signers list will match the ordering of active validators +// in descending order of stake for the relevant view's epoch. I.e. index 0 will +// correspond to the highest-staked active validator in the epoch, index 1 will +// correspond to the second-highest-staked active validator, ... type AggregatedBLSSignature struct { SignersList *bitset.Bitset Signature *bls.Signature } +// Performs a deep equality check between two AggregatedBLSSignatures, and returns true +// if the two are fully initialized and have identical values. In all other cases, +// it return false. func (sig *AggregatedBLSSignature) Eq(other *AggregatedBLSSignature) bool { - if sig == nil && other == nil { - return true + if sig == nil || other == nil { + return false } - if (sig == nil) != (other == nil) { + sigEncodedBytes, err := sig.ToBytes() + if err != nil { return false } - if !sig.Signature.Eq(other.Signature) { + otherEncodedBytes, err := other.ToBytes() + if err != nil { return false } - return bytes.Equal(sig.SignersList.ToBytes(), other.SignersList.ToBytes()) + return bytes.Equal(sigEncodedBytes, otherEncodedBytes) } func (sig *AggregatedBLSSignature) ToBytes() ([]byte, error) { @@ -434,49 +448,34 @@ type TimeoutAggregateQuorumCertificate struct { // // The ordering of high QC views and validators in the aggregate signature will // match the ordering of active validators in descending order of stake for the - // current view's epoch. I.e. index 0 will correspond to the highest-staked active + // timed out view's epoch. I.e. index 0 will correspond to the highest-staked active // validator in the epoch, index 1 will correspond to the second-highest-staked active // validator, ... ValidatorsTimeoutHighQCViews []uint64 ValidatorsTimeoutAggregatedSignature *AggregatedBLSSignature } -// Performs a deep equality check between two TimeoutAggregateQuorumCertificate, and -// returns true if the values of the two are identical. +// Performs a deep equality check between two TimeoutAggregateQuorumCertificates, and +// returns true if the two are fully initialized and have identical values. In all other +// cases, it return false. func (aggQC *TimeoutAggregateQuorumCertificate) Eq( other *TimeoutAggregateQuorumCertificate, ) bool { - if aggQC == nil && other == nil { - return true - } - - if (aggQC == nil) != (other == nil) { - return false - } - - if len(aggQC.ValidatorsTimeoutHighQCViews) != len(other.ValidatorsTimeoutHighQCViews) { + if aggQC == nil || other == nil { return false } - if aggQC.TimedOutView != other.TimedOutView { - return false - } - - if !aggQC.ValidatorsHighQC.Eq(other.ValidatorsHighQC) { + aggQcEncodedBytes, err := aggQC.ToBytes() + if err != nil { return false } - if !aggQC.ValidatorsTimeoutAggregatedSignature.Eq(other.ValidatorsTimeoutAggregatedSignature) { + otherEncodedBytes, err := other.ToBytes() + if err != nil { return false } - for i := 0; i < len(aggQC.ValidatorsTimeoutHighQCViews); i++ { - if aggQC.ValidatorsTimeoutHighQCViews[i] != other.ValidatorsTimeoutHighQCViews[i] { - return false - } - } - - return true + return bytes.Equal(aggQcEncodedBytes, otherEncodedBytes) } func (aggQC *TimeoutAggregateQuorumCertificate) ToBytes() ([]byte, error) { From ec53d4d811effe6e8fbd988ffc20fdedacfea7b2 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 5 Jul 2023 16:22:13 -0400 Subject: [PATCH 125/762] Add BLS Signature Verification for Multiple Payloads (#575) * sa/add-bls-signature-verification-for-multiple-payloads * Rename signature verification functions * Make hashingAlgorithm and signingAlgorithm private to the bls package * Fix gofmt error * Address Nina's feedback --- bls/signature.go | 57 ++++++++++++++++++++++++++++----------- bls/signature_no_relic.go | 6 ++++- bls/signature_test.go | 50 +++++++++++++++++++++++++++++----- 3 files changed, 90 insertions(+), 23 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index b05dbbee7..6a83d1079 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -8,23 +8,25 @@ import ( "encoding/hex" "encoding/json" "errors" + "fmt" "strings" flowCrypto "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/crypto/hash" ) -// The SigningAlgorithm for BLS keys is BLSBLS12381 which is BLS on the BLS 12-381 curve. +// The signingAlgorithm for BLS keys is BLSBLS12381 which is BLS on the BLS 12-381 curve. // This is the only supported BLS signing algorithm in the flowCrypto package. // BLS is used such that we can aggregate signatures into one signature. -const SigningAlgorithm = flowCrypto.BLSBLS12381 +const signingAlgorithm = flowCrypto.BLSBLS12381 -// The HashingAlgorithm for BLS keys is the following. This algorithm is used to hash input data onto the +// The hashingAlgorithm for BLS keys is the following. This algorithm is used to hash input data onto the // BLS 12-381 curve for generating signatures. The returned instance is a Hasher and can be used to // generate BLS signatures with the Sign() method. This is the only supported BLS Hasher in the flowCrypto // package. The input domainTag is a separation tag that defines the protocol and its subdomain. Such tag // should be of the format: -V-CS-with- where is the name of the protocol, // the protocol version number, and the index of the ciphersuite in the protocol. -var HashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-V1-CS01-with-") +var hashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-V1-CS01-with-") // AggregateSignatures takes in an input slice of bls.Signatures and aggregates them // into a single bls.Signature. The assumption is that each of the input bls.Signatures @@ -41,15 +43,30 @@ func AggregateSignatures(signatures []*Signature) (*Signature, error) { return &Signature{flowSignature: aggregateFlowSignature}, nil } -// VerifyAggregateSignature takes in a slice of bls.PublicKeys, a bls.Signature, and a payload and returns +// VerifyAggregateSignatureSinglePayload takes in a slice of bls.PublicKeys, a bls.Signature, and a single payload and returns // true if every bls.PublicKey in the slice signed the payload. The input bls.Signature is the aggregate // signature of each of their respective bls.Signatures for that payload. -func VerifyAggregateSignature(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { - var flowPublicKeys []flowCrypto.PublicKey - for _, publicKey := range publicKeys { - flowPublicKeys = append(flowPublicKeys, publicKey.flowPublicKey) +func VerifyAggregateSignatureSinglePayload(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { + flowPublicKeys := extractFlowPublicKeys(publicKeys) + return flowCrypto.VerifyBLSSignatureOneMessage(flowPublicKeys, signature.flowSignature, payloadBytes, hashingAlgorithm) +} + +// VerifyAggregateSignatureMultiplePayloads takes in a slice of bls.PublicKeys, a bls.Signature, and a slice of payloads. +// It returns true if each bls.PublicKey at index i has signed its respective payload at index i in the payloads slice. +// The input bls.Signature is the aggregate signature of each public key's partial bls.Signatures for its respective payload. +func VerifyAggregateSignatureMultiplePayloads(publicKeys []*PublicKey, signature *Signature, payloadsBytes [][]byte) (bool, error) { + if len(publicKeys) != len(payloadsBytes) { + return false, fmt.Errorf("number of public keys %d does not equal number of payloads %d", len(publicKeys), len(payloadsBytes)) + } + + flowPublicKeys := extractFlowPublicKeys(publicKeys) + + var hashingAlgorithms []hash.Hasher + for ii := 0; ii < len(publicKeys); ii++ { + hashingAlgorithms = append(hashingAlgorithms, hashingAlgorithm) } - return flowCrypto.VerifyBLSSignatureOneMessage(flowPublicKeys, signature.flowSignature, payloadBytes, HashingAlgorithm) + + return flowCrypto.VerifyBLSSignatureManyMessages(flowPublicKeys, signature.flowSignature, payloadsBytes, hashingAlgorithms) } // @@ -66,7 +83,7 @@ func NewPrivateKey() (*PrivateKey, error) { if err != nil { return nil, err } - flowPrivateKey, err := flowCrypto.GeneratePrivateKey(SigningAlgorithm, randomBytes) + flowPrivateKey, err := flowCrypto.GeneratePrivateKey(signingAlgorithm, randomBytes) if err != nil { return nil, err } @@ -77,7 +94,7 @@ func (privateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { if privateKey == nil || privateKey.flowPrivateKey == nil { return nil, errors.New("PrivateKey is nil") } - flowSignature, err := privateKey.flowPrivateKey.Sign(payloadBytes, HashingAlgorithm) + flowSignature, err := privateKey.flowPrivateKey.Sign(payloadBytes, hashingAlgorithm) if err != nil { return nil, err } @@ -110,7 +127,7 @@ func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, return nil, err } // Convert from byte slice to bls.PrivateKey. - privateKey.flowPrivateKey, err = flowCrypto.DecodePrivateKey(SigningAlgorithm, privateKeyBytes) + privateKey.flowPrivateKey, err = flowCrypto.DecodePrivateKey(signingAlgorithm, privateKeyBytes) return privateKey, err } @@ -155,7 +172,7 @@ func (publicKey *PublicKey) Verify(signature *Signature, input []byte) (bool, er if publicKey == nil || publicKey.flowPublicKey == nil { return false, errors.New("bls.PublicKey is nil") } - return publicKey.flowPublicKey.Verify(signature.flowSignature, input, HashingAlgorithm) + return publicKey.flowPublicKey.Verify(signature.flowSignature, input, hashingAlgorithm) } func (publicKey *PublicKey) ToBytes() []byte { @@ -171,7 +188,7 @@ func (publicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) return nil, nil } var err error - publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) + publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(signingAlgorithm, publicKeyBytes) return publicKey, err } @@ -194,7 +211,7 @@ func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, erro return nil, err } // Convert from byte slice to bls.PublicKey. - publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(SigningAlgorithm, publicKeyBytes) + publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(signingAlgorithm, publicKeyBytes) return publicKey, err } @@ -319,3 +336,11 @@ func (signature *Signature) Copy() *Signature { flowSignature: append([]byte{}, signature.flowSignature.Bytes()...), } } + +func extractFlowPublicKeys(publicKeys []*PublicKey) []flowCrypto.PublicKey { + flowPublicKeys := make([]flowCrypto.PublicKey, len(publicKeys)) + for i, publicKey := range publicKeys { + flowPublicKeys[i] = publicKey.flowPublicKey + } + return flowPublicKeys +} diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index 9833a6aab..093a06331 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -8,7 +8,11 @@ func AggregateSignatures(signatures []*Signature) (*Signature, error) { panic(BLSNoRelicError) } -func VerifyAggregateSignature(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { +func VerifyAggregateSignatureSinglePayload(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { + panic(BLSNoRelicError) +} + +func VerifyAggregateSignatureMultiplePayloads(publicKeys []*PublicKey, signature *Signature, payloadsBytes [][]byte) (bool, error) { panic(BLSNoRelicError) } diff --git a/bls/signature_test.go b/bls/signature_test.go index 63637f8df..d41bd6646 100644 --- a/bls/signature_test.go +++ b/bls/signature_test.go @@ -6,8 +6,9 @@ import ( "bytes" "crypto/rand" "encoding/json" - "github.com/stretchr/testify/require" "testing" + + "github.com/stretchr/testify/require" ) func TestVerifyingBLSSignatures(t *testing.T) { @@ -45,7 +46,7 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.NoError(t, err) require.True(t, isVerified) - // Test AggregateSignatures() and VerifyAggregateSignature(). + // Test AggregateSignatures() and VerifyAggregateSignatureSinglePayload(). // 1. PrivateKey1 signs a random payload. randomPayload3 := _generateRandomBytes(t, 256) blsSignature1, err = blsPrivateKey1.Sign(randomPayload3) @@ -57,26 +58,63 @@ func TestVerifyingBLSSignatures(t *testing.T) { aggregateSignature, err := AggregateSignatures([]*Signature{blsSignature1, blsSignature2}) require.NoError(t, err) // 4. Verify the AggregateSignature. - isVerified, err = VerifyAggregateSignature( + isVerified, err = VerifyAggregateSignatureSinglePayload( []*PublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, randomPayload3, ) require.NoError(t, err) require.True(t, isVerified) // 5. Verify PrivateKey1's signature doesn't work on its own. - isVerified, err = VerifyAggregateSignature([]*PublicKey{blsPublicKey1}, aggregateSignature, randomPayload3) + isVerified, err = VerifyAggregateSignatureSinglePayload([]*PublicKey{blsPublicKey1}, aggregateSignature, randomPayload3) require.NoError(t, err) require.False(t, isVerified) // 6. Verify PrivateKey2's signature doesn't work on its own. - isVerified, err = VerifyAggregateSignature([]*PublicKey{blsPublicKey2}, aggregateSignature, randomPayload3) + isVerified, err = VerifyAggregateSignatureSinglePayload([]*PublicKey{blsPublicKey2}, aggregateSignature, randomPayload3) require.NoError(t, err) require.False(t, isVerified) // 7. Verify the AggregateSignature doesn't work on a different payload. - isVerified, err = VerifyAggregateSignature( + isVerified, err = VerifyAggregateSignatureSinglePayload( []*PublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, randomPayload1, ) require.NoError(t, err) require.False(t, isVerified) + // Test AggregateSignatures() and VerifyMultiPayloadAggregateSignature() on different payloads. + // 1. PrivateKey1 signs a random payload. + randomPayload4 := _generateRandomBytes(t, 256) + blsSignature1, err = blsPrivateKey1.Sign(randomPayload4) + require.NoError(t, err) + // 2. PrivateKey2 signs a different random payload. + randomPayload5 := _generateRandomBytes(t, 256) + blsSignature2, err = blsPrivateKey2.Sign(randomPayload5) + require.NoError(t, err) + // 3. Aggregate their signatures. + aggregateSignature, err = AggregateSignatures([]*Signature{blsSignature1, blsSignature2}) + require.NoError(t, err) + // 4. Verify the AggregateSignature on the different payloads. + isVerified, err = VerifyAggregateSignatureMultiplePayloads( + []*PublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, [][]byte{randomPayload4, randomPayload5}, + ) + require.NoError(t, err) + require.True(t, isVerified) + // 5. Verify PrivateKey1's signature doesn't work on its own. + isVerified, err = VerifyAggregateSignatureMultiplePayloads( + []*PublicKey{blsPublicKey1}, aggregateSignature, [][]byte{randomPayload4}, + ) + require.NoError(t, err) + require.False(t, isVerified) + // 6. Verify PrivateKey2's signature doesn't work on its own. + isVerified, err = VerifyAggregateSignatureMultiplePayloads( + []*PublicKey{blsPublicKey2}, aggregateSignature, [][]byte{randomPayload5}, + ) + require.NoError(t, err) + require.False(t, isVerified) + // 7. Verify the AggregateSignature doesn't work on different ordering of payloads. + isVerified, err = VerifyAggregateSignatureMultiplePayloads( + []*PublicKey{blsPublicKey1, blsPublicKey2}, aggregateSignature, [][]byte{randomPayload5, randomPayload4}, + ) + require.NoError(t, err) + require.False(t, isVerified) + // Test bls.PrivateKey.Eq(). require.True(t, blsPrivateKey1.Eq(blsPrivateKey1)) require.True(t, blsPrivateKey2.Eq(blsPrivateKey2)) From 9b1a8b7497138ff090e8c6dbeac329a758462012 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 5 Jul 2023 16:51:46 -0400 Subject: [PATCH 126/762] Revert BLS public key and signature in BlockProducerInfo (#567) (#576) --- lib/block_producer.go | 9 +- lib/blockchain.go | 17 ++- lib/blockchain_test.go | 13 ++- lib/constants.go | 16 --- lib/network.go | 231 +++++++---------------------------------- lib/network_test.go | 41 +------- lib/server.go | 3 +- 7 files changed, 59 insertions(+), 271 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index 869531598..b4433124e 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -3,14 +3,13 @@ package lib import ( "encoding/hex" "fmt" + "github.com/btcsuite/btcd/wire" + "github.com/tyler-smith/go-bip39" "math" "sync" "sync/atomic" "time" - "github.com/btcsuite/btcd/wire" - "github.com/tyler-smith/go-bip39" - "github.com/deso-protocol/go-deadlock" "github.com/btcsuite/btcd/btcec" @@ -577,8 +576,8 @@ func (desoBlockProducer *DeSoBlockProducer) SignBlock(blockFound *MsgDeSoBlock) // If we get here, we now have a valid signature for the block. // Embed the signature into the block. - blockFound.BlockProducerInfo = &MsgDeSoBlockProducerInfo{ - PublicKey: NewPublicKey(desoBlockProducer.blockProducerPrivateKey.PubKey().SerializeCompressed()), + blockFound.BlockProducerInfo = &BlockProducerInfo{ + PublicKey: desoBlockProducer.blockProducerPrivateKey.PubKey().SerializeCompressed(), Signature: signature, } diff --git a/lib/blockchain.go b/lib/blockchain.go index 0e1ad5d63..59f38840e 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5,6 +5,7 @@ import ( "container/list" "encoding/hex" "fmt" + "github.com/holiman/uint256" "math" "math/big" "reflect" @@ -13,8 +14,6 @@ import ( "strings" "time" - "github.com/holiman/uint256" - btcdchain "github.com/btcsuite/btcd/blockchain" chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" @@ -1831,35 +1830,35 @@ func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures boo } // Verify that the public key is in the allowed set. - if _, exists := bc.trustedBlockProducerPublicKeys[MakePkMapKey(publicKey.ToBytes())]; !exists { + if _, exists := bc.trustedBlockProducerPublicKeys[MakePkMapKey(publicKey)]; !exists { return false, false, errors.Wrapf(RuleErrorBlockProducerPublicKeyNotInWhitelist, "ProcessBlock: Block producer public key %v is not in the allowed list of "+ - "--trusted_block_producer_public_keys: %v.", PkToStringBoth(publicKey.ToBytes()), + "--trusted_block_producer_public_keys: %v.", PkToStringBoth(publicKey), bc.trustedBlockProducerPublicKeys) } // Verify that the public key has not been forbidden. - dbEntry := DbGetForbiddenBlockSignaturePubKey(bc.db, bc.snapshot, publicKey.ToBytes()) + dbEntry := DbGetForbiddenBlockSignaturePubKey(bc.db, bc.snapshot, publicKey) if dbEntry != nil { return false, false, errors.Wrapf(RuleErrorForbiddenBlockProducerPublicKey, - "ProcessBlock: Block producer public key %v is forbidden", PkToStringBoth(publicKey.ToBytes())) + "ProcessBlock: Block producer public key %v is forbidden", PkToStringBoth(publicKey)) } // At this point we are confident that we have a valid public key that is // trusted. signature := desoBlock.BlockProducerInfo.Signature - pkObj, err := btcec.ParsePubKey(publicKey.ToBytes(), btcec.S256()) + pkObj, err := btcec.ParsePubKey(publicKey, btcec.S256()) if err != nil { return false, false, errors.Wrapf(err, "ProcessBlock: Error parsing block producer public key: %v.", - PkToStringBoth(publicKey.ToBytes())) + PkToStringBoth(publicKey)) } if !signature.Verify(blockHash[:], pkObj) { return false, false, errors.Wrapf(RuleErrorInvalidBlockProducerSIgnature, "ProcessBlock: Error validating signature %v for public key %v: %v.", hex.EncodeToString(signature.Serialize()), - PkToStringBoth(publicKey.ToBytes()), + PkToStringBoth(publicKey), err) } } diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index abce936db..11fa941a4 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -4,6 +4,8 @@ import ( "encoding/hex" "flag" "fmt" + embeddedpostgres "github.com/fergusstrange/embedded-postgres" + "github.com/go-pg/pg/v10" "log" "math/big" "math/rand" @@ -12,9 +14,6 @@ import ( "testing" "time" - embeddedpostgres "github.com/fergusstrange/embedded-postgres" - "github.com/go-pg/pg/v10" - chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v3" @@ -1652,12 +1651,12 @@ func TestBadBlockSignature(t *testing.T) { // Since MineAndProcesssSingleBlock returns a valid block above, we can play with its // signature and re-process the block to see what happens. - blockProducerInfoCopy := &MsgDeSoBlockProducerInfo{Signature: &btcec.Signature{}} - blockProducerInfoCopy.PublicKey = NewPublicKey(finalBlock1.BlockProducerInfo.PublicKey[:]) + blockProducerInfoCopy := &BlockProducerInfo{Signature: &btcec.Signature{}} + blockProducerInfoCopy.PublicKey = append([]byte{}, finalBlock1.BlockProducerInfo.PublicKey...) *blockProducerInfoCopy.Signature = *finalBlock1.BlockProducerInfo.Signature // A bad signature with the right public key should fail. - finalBlock1.BlockProducerInfo.PublicKey = NewPublicKey(senderPkBytes) + finalBlock1.BlockProducerInfo.PublicKey = senderPkBytes _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorInvalidBlockProducerSIgnature) @@ -1665,7 +1664,7 @@ func TestBadBlockSignature(t *testing.T) { // A signature that's outright missing should fail blockSignerPkBytes, _, err := Base58CheckDecode(blockSignerPk) require.NoError(err) - finalBlock1.BlockProducerInfo.PublicKey = NewPublicKey(blockSignerPkBytes) + finalBlock1.BlockProducerInfo.PublicKey = blockSignerPkBytes finalBlock1.BlockProducerInfo.Signature = nil _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) diff --git a/lib/constants.go b/lib/constants.go index 673856268..af7d892c5 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -141,22 +141,6 @@ const ( MsgValidatorTimeoutVersion0 MsgValidatorTimeoutVersion = 0 ) -// Versioning for the BlockProducerInfo field included in MsgDeSoBlock. This type alias -// is equivalent to a uint8, and supports the same byte encoders/decoders. -type MsgDeSoBlockProducerInfoVersion = byte - -const ( - // This represents the original schema for the BlockProducerInfo field included in - // Proof of Work blocks. The original schema did not have versioning, so we use a default - // version value of 0 to denote this. The original schema only contains the block producer's - // ECDSA public key and ECDSA signature of the block. - MsgDeSoBlockProducerInfoVersion0 MsgDeSoBlockProducerInfoVersion = 0 - // This version is introduced starting with Proof of Stake blocks. It adds versioning to the - // BlockProducerInfo schema, and adds two new fields for the block producer's BLS public key - // and BLS partial signature for the block. - MsgDeSoBlockProducerInfoVersion1 MsgDeSoBlockProducerInfoVersion = 1 -) - var ( MaxUint256, _ = uint256.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") diff --git a/lib/network.go b/lib/network.go index e45ce7704..07091a1f9 100644 --- a/lib/network.go +++ b/lib/network.go @@ -23,7 +23,6 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" decredEC "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" - "github.com/deso-protocol/core/bls" merkletree "github.com/deso-protocol/go-merkle-tree" "github.com/ethereum/go-ethereum/crypto/ecies" "github.com/holiman/uint256" @@ -2367,112 +2366,15 @@ func (msg *MsgDeSoHeader) String() string { // BLOCK Message // ================================================================== -type MsgDeSoBlockProducerInfo struct { - Version MsgDeSoBlockProducerInfoVersion - - // ECDSA public key for the block producer. - PublicKey *PublicKey - // The block producer's ECDSA signature for the block. This field is used in - // MsgDeSoBlockProducerInfo version 0, and is deprecated from version 1 onwards. +type BlockProducerInfo struct { + PublicKey []byte Signature *btcec.Signature - - // The BLS public key of the validator who constructed this block. This field is - // populated starting in MsgDeSoBlockProducerInfo version 1. - VotingPublicKey *bls.PublicKey - // The validator's partial BLS signature of the (ProposedInView, BlockHash) pair - // for enclosing block. This signature proves the validator proposed the block, - // and also acts as the validator's vote for this block. This filed is only populated - // starting in MsgDeSoBlockProducerInfo version 1. - VotePartialSignature *bls.Signature -} - -// Byte encoder for the MsgDeSoBlockProducerInfo with support for versioning. The encoder only -// supports MsgDeSoBlockProducerInfo version 1 and above. For the legacy version 0, use the -// MsgDeSoBlockProducerInfo.Serialize_Legacy() method instead. -func (bpi *MsgDeSoBlockProducerInfo) ToBytes() ([]byte, error) { - // Only support byte encoding for BlockProducerInfo version 1. All later versions will - // need differ in format, so their encoding can be implemented then. - if bpi.Version != MsgDeSoBlockProducerInfoVersion1 { - return nil, fmt.Errorf("MsgDeSoBlockProducerInfo.ToBytes: BlockProducerInfo version %d not supported", bpi.Version) - } - - encodedBytes := []byte{} - - // Required Version field - encodedBytes = append(encodedBytes, bpi.Version) - - // Required ECDSA PublicKey - if bpi.PublicKey == nil { - return nil, fmt.Errorf("MsgDeSoBlockProducerInfo.ToBytes: PublicKey is required") - } - encodedBytes = append(encodedBytes, bpi.PublicKey.ToBytes()...) - - // The ECDSA Signature is redundant, and is removed in MsgDeSoBlockProducerInfo version 1 and above - - // Voting BLS PublicKey - if bpi.VotingPublicKey == nil { - return nil, fmt.Errorf("MsgDeSoBlockProducerInfo.ToBytes: VotingPublicKey is required") - } - encodedBytes = append(encodedBytes, EncodeByteArray(bpi.VotingPublicKey.ToBytes())...) - - // Vote BLS Partial Signature - if bpi.VotePartialSignature == nil { - return nil, fmt.Errorf("MsgDeSoBlockProducerInfo.ToBytes: VotePartialSignature is required") - } - encodedBytes = append(encodedBytes, EncodeByteArray(bpi.VotePartialSignature.ToBytes())...) - - return encodedBytes, nil -} - -// Byte decoder for the MsgDeSoBlockProducerInfo with support for versioning. The decoder only -// supports MsgDeSoBlockProducerInfo version 1 and above. For the legacy version 0, use the -// MsgDeSoBlockProducerInfo.Deserialize_Legacy() method instead. -func (bpi *MsgDeSoBlockProducerInfo) FromBytes(rr *bytes.Reader) error { - var err error - - // Required Version field - bpi.Version, err = rr.ReadByte() - if err != nil { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.FromBytes: Problem reading Version") - } - - // Only support byte decoding for BlockProducerInfo version 1. All later versions will - // need differ in format, so their decoding can be implemented then. - if bpi.Version != MsgDeSoBlockProducerInfoVersion1 { - return fmt.Errorf("MsgDeSoBlockProducerInfo.FromBytes: BlockProducerInfo version %d not supported", bpi.Version) - } - - // Required ECDSA PublicKey - bpi.PublicKey, err = ReadPublicKey(rr) - if err != nil { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.FromBytes: Problem reading PublicKey") - } - - // The ECDSA Signature is redundant, and is removed in MsgDeSoBlockProducerInfo version 1 and above - // so we skip it here. - - // Voting BLS PublicKey - bpi.VotingPublicKey, err = DecodeBLSPublicKey(rr) - if err != nil { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.FromBytes: Problem reading VotingPublicKey") - } - - // Vote BLS Partial Signature - bpi.VotePartialSignature, err = DecodeBLSSignature(rr) - if err != nil { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.FromBytes: Problem reading VotePartialSignature") - } - - return nil } -// Legacy byte encoder for MsgDeSoBlockProducerInfo with no support for versioning. -// It encodes just the public key and signature according to the legacy encoding -// format. -func (bpi *MsgDeSoBlockProducerInfo) Serialize_Legacy() []byte { +func (bpi *BlockProducerInfo) Serialize() []byte { data := []byte{} data = append(data, UintToBuf(uint64(len(bpi.PublicKey)))...) - data = append(data, bpi.PublicKey.ToBytes()...) + data = append(data, bpi.PublicKey...) sigBytes := []byte{} if bpi.Signature != nil { @@ -2484,47 +2386,46 @@ func (bpi *MsgDeSoBlockProducerInfo) Serialize_Legacy() []byte { return data } -// Legacy byte decoder for MsgDeSoBlockProducerInfo with no support for versioning. -// It decodes the public key and signature according to the legacy encoding -// format and then sets the version to 0. -func (bpi *MsgDeSoBlockProducerInfo) Deserialize_Legacy(data []byte) error { - ret := &MsgDeSoBlockProducerInfo{} +func (bpi *BlockProducerInfo) Deserialize(data []byte) error { + ret := &BlockProducerInfo{} rr := bytes.NewReader(data) - // Set the version to 0 since this is the legacy format. - ret.Version = MsgDeSoBlockProducerInfoVersion0 - // De-serialize the public key. { pkLen, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: Error reading public key len") + return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Error reading public key len") } if pkLen > MaxMessagePayload { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: pkLen too long: %v", pkLen) + return errors.Wrapf(err, "BlockProducerInfo.Deserialize: pkLen too long: %v", pkLen) } - ret.PublicKey, err = ReadPublicKey(rr) + pkBytes, err := SafeMakeSliceWithLength[byte](pkLen) if err != nil { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: Error reading public key: ") + return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Problem making slice for pkBytes") } + _, err = io.ReadFull(rr, pkBytes) + if err != nil { + return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Error reading public key: ") + } + ret.PublicKey = pkBytes } // De-serialize the signature. { sigLen, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: Error reading signature len") + return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Error reading signature len") } if sigLen > MaxMessagePayload { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: signature len too long: %v", sigLen) + return errors.Wrapf(err, "BlockProducerInfo.Deserialize: signature len too long: %v", sigLen) } sigBytes, err := SafeMakeSliceWithLength[byte](sigLen) if err != nil { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: Problem making slice for sigBytes") + return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Problem making slice for sigBytes") } _, err = io.ReadFull(rr, sigBytes) if err != nil { - return errors.Wrapf(err, "MsgDeSoBlockProducerInfo.Deserialize: Error reading signature: ") + return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Error reading signature: ") } ret.Signature = nil if sigLen > 0 { @@ -2540,32 +2441,22 @@ func (bpi *MsgDeSoBlockProducerInfo) Deserialize_Legacy(data []byte) error { return nil } -func (bpi *MsgDeSoBlockProducerInfo) String() string { +func (bpi *BlockProducerInfo) String() string { if bpi == nil || len(bpi.PublicKey) == 0 { return "Signer Key: NONE" } - return fmt.Sprintf("Signer Key: %v", PkToStringMainnet(bpi.PublicKey.ToBytes())) + return fmt.Sprintf("Signer Key: %v", PkToStringMainnet(bpi.PublicKey)) } type MsgDeSoBlock struct { Header *MsgDeSoHeader Txns []*MsgDeSoTxn - // This field describes the producer of the block and their signature for the block. - // - // In Proof of Work blocks, the field is optional and provides the producer of the block - // the ability to sign the block with its ECDSA private key. Doing this proves that this block - // was produced by a particular entity, which can be useful for nodes that want to restrict - // who they accept blocks from. - // - // In Proof of Stake blocks, this field is required and serves two purposes: - // 1. It allows the block producer to sign the block with its BLS private key. - // This allows validators to verify that the block was produced by the expected leader for the - // current block height and view. - // 2. It contains the block producer's BLS partial signature, which acts as their vote on the - // block. This way, the vote can be aggregated into a QC by the next block proposer in the leader - // schedule. - BlockProducerInfo *MsgDeSoBlockProducerInfo + // This field is optional and provides the producer of the block the ability to sign it + // with their private key. Doing this proves that this block was produced by a particular + // entity, which can be useful for nodes that want to restrict who they accept blocks + // from. + BlockProducerInfo *BlockProducerInfo } func (msg *MsgDeSoBlock) EncodeBlockCommmon(preSignature bool) ([]byte, error) { @@ -2610,7 +2501,7 @@ func (msg *MsgDeSoBlock) EncodeBlockVersion1(preSignature bool) ([]byte, error) // BlockProducerInfo blockProducerInfoBytes := []byte{} if msg.BlockProducerInfo != nil { - blockProducerInfoBytes = msg.BlockProducerInfo.Serialize_Legacy() + blockProducerInfoBytes = msg.BlockProducerInfo.Serialize() } data = append(data, UintToBuf(uint64(len(blockProducerInfoBytes)))...) data = append(data, blockProducerInfoBytes...) @@ -2618,33 +2509,11 @@ func (msg *MsgDeSoBlock) EncodeBlockVersion1(preSignature bool) ([]byte, error) return data, nil } -func (msg *MsgDeSoBlock) EncodeBlockVersion2(preSignature bool) ([]byte, error) { - // Encode MsgDeSoHeader and []*MsgDeSoTxn - encodedBytes, err := msg.EncodeBlockCommmon(preSignature) - if err != nil { - return nil, err - } - - // Encode BlockProducerInfo - if msg.BlockProducerInfo == nil { - return nil, fmt.Errorf("MsgDeSoBlock.EncodeBlockVersion2: BlockProducerInfo should not be nil") - } - blockProducerInfoBytes, err := msg.BlockProducerInfo.ToBytes() - if err != nil { - return nil, errors.Wrapf(err, "MsgDeSoBlock.EncodeBlockVersion2: Problem encoding BlockProducerInfo") - } - encodedBytes = append(encodedBytes, EncodeByteArray(blockProducerInfoBytes)...) - - return encodedBytes, nil -} - func (msg *MsgDeSoBlock) ToBytes(preSignature bool) ([]byte, error) { if msg.Header.Version == HeaderVersion0 { return msg.EncodeBlockVersion0(preSignature) } else if msg.Header.Version == HeaderVersion1 { return msg.EncodeBlockVersion1(preSignature) - } else if msg.Header.Version == HeaderVersion2 { - return msg.EncodeBlockVersion2(preSignature) } else { return nil, fmt.Errorf("MsgDeSoBlock.ToBytes: Error encoding version: %v", msg.Header.Version) } @@ -2707,21 +2576,15 @@ func (msg *MsgDeSoBlock) FromBytes(data []byte) error { ret.Txns = append(ret.Txns, currentTxn) } - // Version 0 blocks have no BlockProducerInfo attached to them. We can exit early here. - if ret.Header.Version == HeaderVersion0 { - *msg = *ret - return nil - } - - // Starting with version 1, all block versions have a BlockProducerInfo length encoded. - blockProducerInfoLen, err := ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Error decoding header length") - } - + // Version 1 blocks have a BlockProducerInfo attached to them that + // must be read. If this is not a Version 1 block, then the BlockProducerInfo + // remains nil. if ret.Header.Version == HeaderVersion1 { - // All version 1 blocks have an optional BlockProducerInfo attached. - var blockProducerInfo *MsgDeSoBlockProducerInfo + blockProducerInfoLen, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Error decoding header length") + } + var blockProducerInfo *BlockProducerInfo if blockProducerInfoLen > 0 { if blockProducerInfoLen > MaxMessagePayload { return fmt.Errorf("MsgDeSoBlock.FromBytes: Header length %d longer "+ @@ -2735,32 +2598,12 @@ func (msg *MsgDeSoBlock) FromBytes(data []byte) error { if err != nil { return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Problem reading header") } - blockProducerInfo = &MsgDeSoBlockProducerInfo{} - if err = blockProducerInfo.Deserialize_Legacy(blockProducerInfoBytes); err != nil { + blockProducerInfo = &BlockProducerInfo{} + if err = blockProducerInfo.Deserialize(blockProducerInfoBytes); err != nil { return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Error deserializing block producer info") } ret.BlockProducerInfo = blockProducerInfo } - } else if ret.Header.Version == HeaderVersion2 { - // All version 2 blocks have a required BlockProducerInfo field. - - // Verify the length for the BlockProducerInfo. - if blockProducerInfoLen > MaxMessagePayload { - return fmt.Errorf("MsgDeSoBlock.FromBytes: BlockProducerInfo length %d longer "+ - "than max %d", blockProducerInfoLen, MaxMessagePayload) - } - - // BlockProducerInfo is a required field for block header version 2 and later. Otherwise, - // the block is considered malformed. - if blockProducerInfoLen == 0 { - return fmt.Errorf("MsgDeSoBlock.FromBytes: BlockProducerInfo length cannot be zero") - } - - blockProducerInfo := &MsgDeSoBlockProducerInfo{} - if blockProducerInfo.FromBytes(rr); err != nil { - return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Error decoding BlockProducerInfo") - } - ret.BlockProducerInfo = blockProducerInfo } *msg = *ret diff --git a/lib/network_test.go b/lib/network_test.go index 86728e3be..0afb5322a 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -163,17 +163,6 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { } } -func createTestBlockProducerInfoVersion1(t *testing.T) *MsgDeSoBlockProducerInfo { - testBLSPublicKey, testBLSSignature := _generateValidatorVotingPublicKeyAndSignature(t) - - return &MsgDeSoBlockProducerInfo{ - Version: 1, - PublicKey: NewPublicKey(pkForTesting1), - VotingPublicKey: testBLSPublicKey, - VotePartialSignature: testBLSSignature, - } -} - func TestHeaderConversionAndReadWriteMessage(t *testing.T) { assert := assert.New(t) require := require.New(t) @@ -339,14 +328,14 @@ var expectedBlock = &MsgDeSoBlock{ Header: expectedBlockHeaderVersion1, Txns: expectedTransactions(true), // originally was effectively false - BlockProducerInfo: &MsgDeSoBlockProducerInfo{ - PublicKey: NewPublicKey([]byte{ + BlockProducerInfo: &BlockProducerInfo{ + PublicKey: []byte{ // random bytes 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x10, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x21, 0x22, 0x23, - }), + }, }, } @@ -498,30 +487,6 @@ var expectedV0Header = &MsgDeSoHeader{ Nonce: uint64(123456), } -func TestSerializeBlockVersion2(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - _ = assert - _ = require - - originalBlock := &MsgDeSoBlock{ - Header: createTestBlockHeaderVersion2(t), - BlockProducerInfo: createTestBlockProducerInfoVersion1(t), - } - - encodedBytes, err := originalBlock.ToBytes(false) - require.NoError(err) - - decodedBlock := NewMessage(MsgTypeBlock).(*MsgDeSoBlock) - err = decodedBlock.FromBytes(encodedBytes) - require.NoError(err) - - assert.Equal(originalBlock.Header.Version, decodedBlock.Header.Version) - assert.Equal(originalBlock.BlockProducerInfo.PublicKey, decodedBlock.BlockProducerInfo.PublicKey) - assert.True(originalBlock.BlockProducerInfo.VotingPublicKey.Eq(decodedBlock.BlockProducerInfo.VotingPublicKey)) - assert.True(originalBlock.BlockProducerInfo.VotePartialSignature.Eq(decodedBlock.BlockProducerInfo.VotePartialSignature)) -} - func TestBlockSerialize(t *testing.T) { assert := assert.New(t) require := require.New(t) diff --git a/lib/server.go b/lib/server.go index a6974953f..18e9ade70 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1795,8 +1795,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { if len(srv.blockchain.trustedBlockProducerPublicKeys) > 0 && blockHeader.Height >= srv.blockchain.trustedBlockProducerStartHeight { if blk.BlockProducerInfo != nil { _, entryExists := srv.mempool.readOnlyUtxoView.ForbiddenPubKeyToForbiddenPubKeyEntry[MakePkMapKey( - blk.BlockProducerInfo.PublicKey.ToBytes(), - )] + blk.BlockProducerInfo.PublicKey)] if entryExists { srv._logAndDisconnectPeer(pp, blk, "Got forbidden block signature public key.") return From 7bcffd1bbe4f899c276b59ede70c446208e2c364 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 5 Jul 2023 16:45:40 -0700 Subject: [PATCH 127/762] PoS Mempool Data Structures (#561) * TransactionRegister * Block construction routine & stateConnected transaction encoding * RevolutionRegister and partial Revolution Rule * RevolutionModule draft finished * BMF and Fee bucket multiplier * Use RB TreeSet for TransactionRegister; Add Iterator to TransactionRegister * AddTransaction for DeSoMempoolPoS * Refactor out non-mempool code * gofmt * Remove transaction * Remove DeSoMempoolPoS * Remove Fee & Time Bucket data structures --- go.mod | 6 ++---- go.sum | 25 ++++++------------------- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/go.mod b/go.mod index 1e34c1732..d91530157 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/deso-protocol/go-deadlock v1.0.0 github.com/deso-protocol/go-merkle-tree v1.0.0 github.com/dgraph-io/badger/v3 v3.2103.5 + github.com/emirpasic/gods v1.18.1 github.com/ethereum/go-ethereum v1.9.25 github.com/fatih/color v1.13.0 github.com/gernest/mention v2.0.0+incompatible @@ -23,6 +24,7 @@ require ( github.com/holiman/uint256 v1.1.1 github.com/mitchellh/go-homedir v1.1.0 github.com/oleiade/lane v1.0.1 + github.com/onflow/flow-go/crypto v0.24.7 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0 @@ -45,9 +47,7 @@ require ( ) require ( - github.com/DataDog/zstd v1.4.8 // indirect github.com/Microsoft/go-winio v0.4.16 // indirect - github.com/NVIDIA/cstruct v0.0.0-20210817223100-441a06a021c8 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/bwesterb/go-ristretto v1.2.0 // indirect @@ -79,7 +79,6 @@ require ( github.com/mattn/goveralls v0.0.6 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/mitchellh/mapstructure v1.1.2 // indirect - github.com/onflow/flow-go/crypto v0.24.7 // indirect github.com/onsi/ginkgo v1.15.0 // indirect github.com/onsi/gomega v1.10.5 // indirect github.com/pelletier/go-toml v1.7.0 // indirect @@ -100,7 +99,6 @@ require ( github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.opencensus.io v0.23.0 // indirect - golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect golang.org/x/sys v0.0.0-20221010170243-090e33056c14 // indirect golang.org/x/text v0.3.6 // indirect diff --git a/go.sum b/go.sum index c2e35d0c6..7a4c6513b 100644 --- a/go.sum +++ b/go.sum @@ -30,12 +30,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DataDog/datadog-go v4.4.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v4.5.0+incompatible h1:MyyuIz5LVAI3Im+0F/tfo64ETyH4sNVynZ29yOiHm50= github.com/DataDog/datadog-go v4.5.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.4.8 h1:Rpmta4xZ/MgZnriKNd24iZMhGpP5dvUcs/uqfBapKZY= -github.com/DataDog/zstd v1.4.8/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/NVIDIA/cstruct v0.0.0-20210817223100-441a06a021c8/go.mod h1:GPbuJvLD4QWiHPS6vivLzh+XMAx6va0Aucm6ipa5S0I= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -125,13 +121,8 @@ github.com/deso-protocol/go-deadlock v1.0.0 h1:mw0pHy/19zgC+JFBStuQt1+1Ehv5OKA5N github.com/deso-protocol/go-deadlock v1.0.0/go.mod h1:K0Wd2OV2x7ck7SMYDraWerpKjFKUeBqaFcwz21tmkb8= github.com/deso-protocol/go-merkle-tree v1.0.0 h1:9zkI5dQsITYy77s4kbTGPQmZnhQ+LsH/kRdL5l/Yzvg= github.com/deso-protocol/go-merkle-tree v1.0.0/go.mod h1:V/vbg/maaNv6G7zf9VVs645nLFx/jsO2L/awFB/S/ZU= -github.com/dgraph-io/badger/v3 v3.2103.0 h1:abkD2EnP3+6Tj8h5LI1y00dJ9ICKTIAzvG9WmZ8S2c4= -github.com/dgraph-io/badger/v3 v3.2103.0/go.mod h1:GHMCYxuDWyzbHkh4k3yyg4PM61tJPFfEGSMbE3Vd5QE= github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= -github.com/dgraph-io/ristretto v0.0.4-0.20210309073149-3836124cdc5a/go.mod h1:MIonLggsKgZLUSt414ExgwNtlOL5MuEoAJP514mwGe8= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -145,6 +136,8 @@ github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -215,7 +208,6 @@ github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= @@ -309,6 +301,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -434,6 +427,7 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= @@ -462,7 +456,6 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZL github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -472,13 +465,13 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/supranational/blst v0.3.10 h1:CMciDZ/h4pXDDXQASe8ZGTNKUiVNxVVA5hpci2Uuhuk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= @@ -546,7 +539,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -606,8 +598,6 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -655,13 +645,11 @@ golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201026173827-119d4633e4d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14 h1:k5II8e6QD8mITdi+okbbmR/cIyEbeXLBhy5Ha4nevyc= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -779,8 +767,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= @@ -791,4 +777,5 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= mellium.im/sasl v0.2.1 h1:nspKSRg7/SyO0cRGY71OkfHab8tf9kCts6a6oTDut0w= mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= +pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= From 476216a6d856ca610ca9a7d1bdcf9390b8eeeda4 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 5 Jul 2023 16:46:01 -0700 Subject: [PATCH 128/762] PoS Mempool Transaction Register (#562) * TransactionRegister * Block construction routine & stateConnected transaction encoding * RevolutionRegister and partial Revolution Rule * RevolutionModule draft finished * BMF and Fee bucket multiplier * Use RB TreeSet for TransactionRegister; Add Iterator to TransactionRegister * AddTransaction for DeSoMempoolPoS * Refactor out non-mempool code * gofmt * Remove transaction * Remove DeSoMempoolPoS * Remove Fee & Time Bucket data structures * Revert "Remove Fee & Time Bucket data structures" This reverts commit f8e6855cfe66d7e1a6ba9ff8ae7d040946e5e61f. * Add tests; Add documentation * Review round * Another review round * Remove index from FeeTimeBucket * Rename * CI Fix * Nits * Some more review rounding; Rename file * Aand another review * Nits * quick rename --- lib/block_view_derived_key_test.go | 1 + lib/block_view_test.go | 2 + lib/block_view_types.go | 33 ++- lib/constants.go | 9 + lib/pos_transaction_register.go | 425 +++++++++++++++++++++++++++ lib/pos_transaction_register_test.go | 329 +++++++++++++++++++++ lib/snapshot_test.go | 1 - 7 files changed, 797 insertions(+), 3 deletions(-) create mode 100644 lib/pos_transaction_register.go create mode 100644 lib/pos_transaction_register_test.go diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index 65d39fd29..db26009cd 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -877,6 +877,7 @@ func TestBalanceModelAuthorizeDerivedKey(t *testing.T) { // We need to set the block height here to 7 so that encoder migrations have the proper version and heights. // Otherwise, the access groups and associations migrations do not run when encoding Utxo Operations. DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = 7 + DeSoTestnetParams.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = 7 TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t) } diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 455f3ec06..fcc87e2b9 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -74,6 +74,7 @@ func setBalanceModelBlockHeights() { DeSoTestnetParams.ForkHeights.ExtraDataOnEntriesBlockHeight = 0 DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = 0 DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = 1 + DeSoTestnetParams.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = 1 DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams @@ -87,6 +88,7 @@ func resetBalanceModelBlockHeights() { DeSoTestnetParams.ForkHeights.ExtraDataOnEntriesBlockHeight = 1000000 DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = 1000000 DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = 1000000 + DeSoTestnetParams.ForkHeights.ProofOfStakeNewTxnTypesBlockHeight = 1000000 DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7641a34df..0452050a6 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3764,7 +3764,9 @@ type GlobalParamsEntry struct { // The maximum number of NFT copies that are allowed to be minted. MaxCopiesPerNFT uint64 - // The new minimum fee the network will accept + // MinimumNetworkFeeNanosPerKB is the minimal fee rate in DeSo nanos per KB a transaction can have. + // If a transaction has a lower fee than MinimumNetworkFeeNanosPerKB, it will be + // rejected by the node's mempool. MinimumNetworkFeeNanosPerKB uint64 // MaxNonceExpirationBlockHeightOffset is maximum value nodes will @@ -3772,6 +3774,13 @@ type GlobalParamsEntry struct { // and the expiration block height specified in the nonce for a // transaction. MaxNonceExpirationBlockHeightOffset uint64 + + // FeeBucketRateMultiplierBasisPoints is the rate of growth of the fee bucket ranges. This is part of the new + // PoS Mempool. The multiplier is given as basis points. For example a value of 1000 means that the fee bucket + // ranges will grow by 10% each time. If, let's say, we start with MinimumNetworkFeeNanosPerKB of 1000 nanos, + // then the first bucket will be [1000, 1099], the second bucket will be [1100, 1209], the third bucket will + // be [1210, 1330], etc. + FeeBucketRateMultiplierBasisPoints uint64 } func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -3785,6 +3794,9 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe if MigrationTriggered(blockHeight, BalanceModelMigration) { data = append(data, UintToBuf(gp.MaxNonceExpirationBlockHeightOffset)...) } + if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + data = append(data, UintToBuf(gp.FeeBucketRateMultiplierBasisPoints)...) + } return data } @@ -3817,17 +3829,34 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MaxNonceExpirationBlockHeightOffset") } } + if MigrationTriggered(blockHeight, ProofOfStakeNewTxnTypesMigration) { + gp.FeeBucketRateMultiplierBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading FeeBucketRateMultiplierBasisPoints") + } + } return nil } func (gp *GlobalParamsEntry) GetVersionByte(blockHeight uint64) byte { - return GetMigrationVersion(blockHeight, BalanceModelMigration) + return GetMigrationVersion(blockHeight, BalanceModelMigration, ProofOfStakeNewTxnTypesMigration) } func (gp *GlobalParamsEntry) GetEncoderType() EncoderType { return EncoderTypeGlobalParamsEntry } +// ComputeFeeTimeBucketMinimumFeeAndMultiplier takes the MinimumNetworkFeeNanosPerKB and FeeBucketRateMultiplierBasisPoints for +// the GlobalParamsEntry, and returns them as big.Floats. +func (gp *GlobalParamsEntry) ComputeFeeTimeBucketMinimumFeeAndMultiplier() ( + _minimumRate *big.Float, _bucketMultiplier *big.Float) { + + minimumNetworkFeeNanosPerKB := NewFloat().SetUint64(gp.MinimumNetworkFeeNanosPerKB) + feeBucketMultiplier := NewFloat().SetUint64(10000 + gp.FeeBucketRateMultiplierBasisPoints) + feeBucketMultiplier.Quo(feeBucketMultiplier, NewFloat().SetUint64(10000)) + return minimumNetworkFeeNanosPerKB, feeBucketMultiplier +} + // This struct holds info on a readers interactions (e.g. likes) with a post. // It is added to a post entry response in the frontend server api. type PostEntryReaderState struct { diff --git a/lib/constants.go b/lib/constants.go index 52489dc1a..5003fca78 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -622,6 +622,9 @@ type DeSoParams struct { EncoderMigrationHeights *EncoderMigrationHeights EncoderMigrationHeightsList []*MigrationHeight + + // The maximum aggregate number of bytes of transactions included in the PoS mempool. + MaxMempoolPosSizeBytes uint64 } var RegtestForkHeights = ForkHeights{ @@ -1004,6 +1007,8 @@ var DeSoMainnetParams = DeSoParams{ ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), + + MaxMempoolPosSizeBytes: 3 << 30, // 3GB } func mustDecodeHexBlockHashBitcoin(ss string) *BlockHash { @@ -1244,6 +1249,8 @@ var DeSoTestnetParams = DeSoParams{ ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), + + MaxMempoolPosSizeBytes: 3 << 30, // 3GB } // GetDataDir gets the user data directory where we store files @@ -1358,6 +1365,8 @@ var ( // We initialize the CreateNFTFeeNanos to 0 so we do not assess a fee to create an NFT until specified by ParamUpdater. CreateNFTFeeNanos: 0, MaxCopiesPerNFT: 0, + // We initialize the FeeBucketRateMultiplierBasisPoints to 1000, or equivalently, a multiplier of 1.1x. + FeeBucketRateMultiplierBasisPoints: 1000, } ) diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go new file mode 100644 index 000000000..88c9395e5 --- /dev/null +++ b/lib/pos_transaction_register.go @@ -0,0 +1,425 @@ +package lib + +import ( + "fmt" + "github.com/emirpasic/gods/sets/treeset" + "github.com/golang/glog" + "github.com/pkg/errors" + "math" + "math/big" +) + +// ======================== +// TransactionRegister +// ======================== + +// TransactionRegister is the primary transaction store of the PoS Mempool. The register is responsible for determining +// the Fee-Time ordering of transactions. The operations supported by the register are: adding a transaction, removing +// a transaction, iterating through all transactions in fee-time order, and retrieving all transactions ordered in +// fee-time. The TransactionRegister doesn't perform any validation on the transactions, it just accepts the provided +// MempoolTx and adds it to the appropriate FeeTimeBucket. +type TransactionRegister struct { + // feeTimeBucketSet is a set of FeeTimeBucket objects. The set is ordered by the FeeTimeBucket's ranges, based on feeTimeBucketComparator. + feeTimeBucketSet *treeset.Set + // feeTimeBucketsByMinFeeMap is a map of FeeTimeBucket minimum fees to FeeTimeBucket objects. It is used to quickly find + // a FeeTimeBucket given its min fee. + feeTimeBucketsByMinFeeMap map[uint64]*FeeTimeBucket + // txnMembership is a set of transaction hashes. It is used to determine existence of a transaction in the register. + txnMembership *Set[BlockHash] + // totalTxnsSizeBytes is the total size of all transactions in the register. + totalTxnsSizeBytes uint64 + // maxTxnsSizeBytes is the maximum size of a transaction in the register. + maxTxnsSizeBytes uint64 + // minimumNetworkFeeNanosPerKB is the base fee rate for the lowest fee FeeTimeBucket. This value corresponds to + // GlobalParamsEntry's MinimumNetworkFeeNanosPerKB. + minimumNetworkFeeNanosPerKB *big.Float + // feeBucketRateMultiplierBasisPoints is the fee rate multiplier for FeeTimeBucket objects. This value corresponds to + // GlobalParamsEntry's FeeBucketRateMultiplierBasisPoints. + feeBucketRateMultiplierBasisPoints *big.Float +} + +func NewTransactionRegister(params *DeSoParams, globalParams *GlobalParamsEntry) *TransactionRegister { + feeTimeBucketSet := treeset.NewWith(feeTimeBucketComparator) + minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + + return &TransactionRegister{ + feeTimeBucketSet: feeTimeBucketSet, + feeTimeBucketsByMinFeeMap: make(map[uint64]*FeeTimeBucket), + txnMembership: NewSet([]BlockHash{}), + totalTxnsSizeBytes: 0, + maxTxnsSizeBytes: params.MaxMempoolPosSizeBytes, + minimumNetworkFeeNanosPerKB: minNetworkFee, + feeBucketRateMultiplierBasisPoints: bucketMultiplier, + } +} + +// feeTimeBucketComparator is a comparator function for FeeTimeBucket objects. It is used to order FeeTimeBucket objects +// in the TransactionRegister's feeTimeBucketSet based on fee ranges (higher fee ranges are ordered first). +func feeTimeBucketComparator(a, b interface{}) int { + aVal, aOk := a.(*FeeTimeBucket) + bVal, bOk := b.(*FeeTimeBucket) + if !aOk || !bOk { + glog.Error(CLog(Red, "feeTimeBucketComparator: Invalid types. This is BAD NEWS, we should never get here.")) + return 0 + } + + // Determine the FeeTimeBucket with a higher fee range. We can easily find out by comparing FeeTimeBucket minFeeNanosPerKB. + if aVal.minFeeNanosPerKB < bVal.minFeeNanosPerKB { + return 1 + } else if aVal.minFeeNanosPerKB > bVal.minFeeNanosPerKB { + return -1 + } + + return 0 +} + +// AddTransaction adds a transaction to the register. If the transaction already exists in the register, or its size +// exceeds the maximum mempool capacity, it is not added. Returns nil when transaction was successfully added to the +// register, or an error otherwise. +func (tr *TransactionRegister) AddTransaction(txn *MempoolTx) error { + if txn == nil || txn.Hash == nil { + return fmt.Errorf("TransactionRegister.AddTransaction: Transaction or transaction hash is nil") + } + + if tr.txnMembership.Includes(*txn.Hash) { + return fmt.Errorf("TransactionRegister.AddTransaction: Transaction already exists in register") + } + + // If the transaction is too large, reject it. + if tr.totalTxnsSizeBytes > math.MaxUint64-txn.TxSizeBytes { + return fmt.Errorf("TransactionRegister.AddTransaction: Transaction size overflows uint64. Txn size %v, "+ + "total size %v", txn.TxSizeBytes, tr.totalTxnsSizeBytes) + } + + // If the transaction overflows the maximum mempool size, reject it. + if tr.totalTxnsSizeBytes+txn.TxSizeBytes > tr.maxTxnsSizeBytes { + return fmt.Errorf("TransactionRegister.AddTransaction: Transaction size exceeds maximum mempool size") + } + + // Determine the min fee of the bucket based on the transaction's fee rate. + bucketMinFeeNanosPerKb, bucketMaxFeeNanosPerKB := computeFeeTimeBucketRangeFromFeeNanosPerKB(txn.FeePerKB, + tr.minimumNetworkFeeNanosPerKB, tr.feeBucketRateMultiplierBasisPoints) + // Lookup the bucket in the map. + bucket, bucketExists := tr.feeTimeBucketsByMinFeeMap[bucketMinFeeNanosPerKb] + if !bucketExists { + // If the bucket doesn't exist, create it and add the transaction to it. + bucket = NewFeeTimeBucket(bucketMinFeeNanosPerKb, bucketMaxFeeNanosPerKB) + if err := bucket.AddTransaction(txn); err != nil { + return errors.Wrapf(err, "TransactionRegister.AddTransaction: Error adding transaction to bucket: %v", err) + } + } + + // Add the transaction to the bucket. + if err := bucket.AddTransaction(txn); err != nil { + return errors.Wrapf(err, "TransactionRegister.AddTransaction: Error adding transaction to bucket: ") + } + + if !bucketExists { + // If the bucket didn't exist, add it to the set and the map. + tr.feeTimeBucketSet.Add(bucket) + tr.feeTimeBucketsByMinFeeMap[bucketMinFeeNanosPerKb] = bucket + } + + tr.totalTxnsSizeBytes += txn.TxSizeBytes + tr.txnMembership.Add(*txn.Hash) + return nil +} + +// RemoveTransaction removes a transaction from the register. If the transaction does not exist in the register, or its +// size exceeds the current register size (which should never happen), it is not removed. Returns nil when transaction +// was successfully removed from the register, or an error otherwise. +func (tr *TransactionRegister) RemoveTransaction(txn *MempoolTx) error { + if txn == nil || txn.Hash == nil { + return fmt.Errorf("TransactionRegister.RemoveTransaction: Transaction or transaction hash is nil") + } + + if !tr.txnMembership.Includes(*txn.Hash) { + return fmt.Errorf("TransactionRegister.RemoveTransaction: Transaction with transaction hash %v does not "+ + "exist in the register", txn.Hash.String()) + } + + if tr.totalTxnsSizeBytes < txn.TxSizeBytes { + return fmt.Errorf("TransactionRegister.RemoveTransaction: Transaction with transaction hash %v size %v "+ + "exceeds total mempool size %v", txn.Hash.String(), txn.TxSizeBytes, tr.totalTxnsSizeBytes) + } + + // Determine the min fee of the bucket based on the transaction's fee rate. + bucketMinFeeNanosPerKb, _ := computeFeeTimeBucketRangeFromFeeNanosPerKB(txn.FeePerKB, + tr.minimumNetworkFeeNanosPerKB, tr.feeBucketRateMultiplierBasisPoints) + // Remove the transaction from the bucket. + if bucket, exists := tr.feeTimeBucketsByMinFeeMap[bucketMinFeeNanosPerKb]; exists { + bucket.RemoveTransaction(txn) + // If the bucket becomes empty, remove it from the TransactionRegister. + if bucket.Empty() { + tr.feeTimeBucketSet.Remove(bucket) + delete(tr.feeTimeBucketsByMinFeeMap, bucketMinFeeNanosPerKb) + } + } + + tr.txnMembership.Remove(*txn.Hash) + tr.totalTxnsSizeBytes -= txn.TxSizeBytes + return nil +} + +func (tr *TransactionRegister) Empty() bool { + return tr.feeTimeBucketSet.Empty() +} + +// GetFeeTimeIterator returns an iterator over the transactions in the register. The iterator goes through all transactions +// as ordered by Fee-Time. +func (tr *TransactionRegister) GetFeeTimeIterator() *FeeTimeIterator { + return &FeeTimeIterator{ + bucketIterator: tr.feeTimeBucketSet.Iterator(), + mempoolTxIterator: nil, + } +} + +// GetFeeTimeTransactions returns all transactions in the register ordered by Fee-Time. +func (tr *TransactionRegister) GetFeeTimeTransactions() []*MempoolTx { + txns := []*MempoolTx{} + it := tr.GetFeeTimeIterator() + for it.Next() { + if txn, ok := it.Value(); ok { + txns = append(txns, txn) + } + } + return txns +} + +// FeeTimeIterator is an iterator over the transactions in a TransactionRegister. The iterator goes through all transactions +// as ordered by Fee-Time. +type FeeTimeIterator struct { + // bucketIterator is an iterator over the FeeTimeBucket objects in the TransactionRegister. + bucketIterator treeset.Iterator + // mempoolTxIterator is an iterator over the transactions in the current FeeTimeBucket. It is nil if the iterator + // is uninitialized. + mempoolTxIterator *treeset.Iterator +} + +// Next moves the FeeTimeIterator to the next transaction. It returns true if the iterator is pointing at a transaction +// after the move, and false otherwise. +func (fti *FeeTimeIterator) Next() bool { + // If the iterator is uninitialized, then mempoolTxIterator is nil. In this case, we will first advance the + // bucketIterator to the first FeeTime bucket, and then initialize mempoolTxIterator to point at the first transaction + // in the bucket. + // If the iterator is initialized, then mempoolTxIterator is not nil. In this case, we will first see if there are + // more transactions in the current FeeTimeBucket. We do this by advancing the mempoolTxIterator and checking if it + // is pointing at a transaction. + if fti.mempoolTxIterator != nil && fti.mempoolTxIterator.Next() { + return true + } + + // If there are no more transactions in the current FeeTimeBucket, we will advance the bucketIterator. + for fti.bucketIterator.Next() { + // If there are more FeeTimeBucket objects in the TransactionRegister, we will advance the mempoolTxIterator + // to the first transaction in the next FeeTimeBucket. First, we fetch the newly pointed-at FeeTimeBucket. + nextFeeTimeBucket, ok := fti.bucketIterator.Value().(*FeeTimeBucket) + if !ok { + return false + } + + // We will set the mempoolTxIterator to point at the first transaction in the new FeeTimeBucket. + it := nextFeeTimeBucket.txnsSet.Iterator() + fti.mempoolTxIterator = &it + + // Check if the newly found FeeTimeBucket is empty. If it's not empty, then we're done. + if fti.mempoolTxIterator.Next() { + return true + } + } + + return false +} + +// Value returns the transaction that the iterator is pointing at. +func (fti *FeeTimeIterator) Value() (*MempoolTx, bool) { + if !fti.Initialized() { + return nil, false + } + + if txn, ok := fti.mempoolTxIterator.Value().(*MempoolTx); ok { + return txn, true + } + return nil, false +} + +// Initialized returns true if the iterator is initialized, and false otherwise. +func (fti *FeeTimeIterator) Initialized() bool { + return fti.mempoolTxIterator != nil +} + +// ======================== +// FeeTimeBucket +// ======================== + +// FeeTimeBucket is a data structure storing MempoolTx with similar fee rates. +type FeeTimeBucket struct { + // txnsSet is a set of MempoolTx transactions stored in the FeeTimeBucket. + txnsSet *treeset.Set + // minFeeNanosPerKB is the minimum fee rate (inclusive) accepted by the FeeTimeBucket, in nanos per KB. + minFeeNanosPerKB uint64 + // maxFeeNanosPerKB is the maximum fee rate (inclusive) accepted by the FeeTimeBucket, in nanos per KB. It's worth + // noting that the maximum fee rate is always 1 below the minimum fee rate of the FeeTimeBucket with exponent+1. + maxFeeNanosPerKB uint64 +} + +func NewFeeTimeBucket(minFeeNanosPerKB uint64, maxFeeNanosPerKB uint64) *FeeTimeBucket { + + txnsSet := treeset.NewWith(mempoolTxTimeOrderComparator) + return &FeeTimeBucket{ + minFeeNanosPerKB: minFeeNanosPerKB, + maxFeeNanosPerKB: maxFeeNanosPerKB, + txnsSet: txnsSet, + } +} + +// mempoolTxTimeOrderComparator is a comparator function for MempoolTx transactions stored inside a FeeTimeBucket. The comparator +// orders the transactions by smallest timestamp. In case of a tie, transactions are ordered by greatest fee rate. Finally, +// in case of another tie, transactions are ordered by their hash. +func mempoolTxTimeOrderComparator(a, b interface{}) int { + aVal, aOk := a.(*MempoolTx) + bVal, bOk := b.(*MempoolTx) + if !aOk || !bOk { + glog.Error(CLog(Red, "mempoolTxTimeOrderComparator: Invalid types. This is BAD NEWS, we should never get here.")) + return 0 + } + + if aVal.Added.UnixMicro() > bVal.Added.UnixMicro() { + return 1 + } else if aVal.Added.UnixMicro() < bVal.Added.UnixMicro() { + return -1 + } else if aVal.FeePerKB < bVal.FeePerKB { + return 1 + } else if aVal.FeePerKB > bVal.FeePerKB { + return -1 + } + // If the timestamps and fee rates are the same, we order by the transaction hash. + return HashToBigint(aVal.Hash).Cmp(HashToBigint(bVal.Hash)) +} + +// AddTransaction adds a transaction to the FeeTimeBucket. It returns an error if the transaction is outside the +// FeeTimeBucket's fee range, or if the transaction hash is nil. +func (tb *FeeTimeBucket) AddTransaction(txn *MempoolTx) error { + if txn == nil || txn.Hash == nil { + return fmt.Errorf("FeeTimeBucket.AddTransaction: Transaction or transaction hash is nil") + } + + if tb.minFeeNanosPerKB > txn.FeePerKB || tb.maxFeeNanosPerKB < txn.FeePerKB { + return fmt.Errorf("FeeTimeBucket.AddTransaction: Transaction fee %d outside of bucket range [%d, %d]", + txn.FeePerKB, tb.minFeeNanosPerKB, tb.maxFeeNanosPerKB) + } + + tb.txnsSet.Add(txn) + return nil +} + +// RemoveTransaction removes a transaction from the FeeTimeBucket. +func (tb *FeeTimeBucket) RemoveTransaction(txn *MempoolTx) { + tb.txnsSet.Remove(txn) +} + +func (tb *FeeTimeBucket) Empty() bool { + return tb.txnsSet.Empty() +} + +// GetIterator returns an iterator over the MempoolTx inside the FeeTimeBucket. +func (tb *FeeTimeBucket) GetIterator() treeset.Iterator { + return tb.txnsSet.Iterator() +} + +// GetTransactions returns a slice of MempoolTx inside the FeeTimeBucket. The slice is ordered according to the +// mempoolTxTimeOrderComparator. +func (tb *FeeTimeBucket) GetTransactions() []*MempoolTx { + txns := []*MempoolTx{} + it := tb.GetIterator() + for it.Next() { + if txn, ok := it.Value().(*MempoolTx); ok { + txns = append(txns, txn) + } + } + return txns +} + +//============================================ +// Fee-Time Bucket Math +//============================================ + +// computeFeeTimeBucketRangeFromFeeNanosPerKB takes a fee rate, minimumNetworkFeeNanosPerKB, and feeBucketMultiplier, +// and returns the [minFeeNanosPerKB, maxFeeNanosPerKB] of the fee range. +func computeFeeTimeBucketRangeFromFeeNanosPerKB(feeNanosPerKB uint64, minimumNetworkFeeNanosPerKB *big.Float, + feeBucketMultiplier *big.Float) (uint64, uint64) { + + bucketExponent := computeFeeTimeBucketExponentFromFeeNanosPerKB(feeNanosPerKB, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) + return computeFeeTimeBucketRangeFromExponent(bucketExponent, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) +} + +// computeFeeTimeBucketRangeFromExponent takes a fee range exponent, minimumNetworkFeeNanosPerKB, and feeBucketMultiplier, and returns the +// [minFeeNanosPerKB, maxFeeNanosPerKB] of the fee range. +func computeFeeTimeBucketRangeFromExponent(exponent uint32, minimumNetworkFeeNanosPerKB *big.Float, feeBucketMultiplier *big.Float) ( + _minFeeNanosPerKB uint64, _maxFeeNanosPerKB uint64) { + + minFeeNanosPerKB := computeFeeTimeBucketMinFromExponent(exponent, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) + maxFeeNanosPerKB := computeFeeTimeBucketMinFromExponent(exponent+1, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) - 1 + return minFeeNanosPerKB, maxFeeNanosPerKB +} + +// computeFeeTimeBucketMinFromExponent takes a fee range exponent, minimumNetworkFeeNanosPerKB, and feeBucketMultiplier, and uses them to +// return the minimum fee rate of this fee range. +func computeFeeTimeBucketMinFromExponent(exponent uint32, minimumNetworkFeeNanosPerKB *big.Float, feeBucketMultiplier *big.Float) uint64 { + // The first fee range has a fee rate of minimumNetworkFeeNanosPerKB. + if exponent == 0 { + fee, _ := minimumNetworkFeeNanosPerKB.Uint64() + return fee + } + + // Compute minimumNetworkFeeNanosPerKB * feeBucketMultiplier^exponent. + pow := NewFloat().SetUint64(uint64(exponent)) + multiplier := BigFloatPow(feeBucketMultiplier, pow) + fee := NewFloat().Mul(minimumNetworkFeeNanosPerKB, multiplier) + + feeUint64, _ := fee.Uint64() + return feeUint64 +} + +// computeFeeTimeBucketExponentFromFeeNanosPerKB takes a fee rate, minimumNetworkFeeNanosPerKB, and feeBucketMultiplier, and +// returns the fee range exponent. +func computeFeeTimeBucketExponentFromFeeNanosPerKB(feeNanosPerKB uint64, minimumNetworkFeeNanosPerKB *big.Float, + feeBucketMultiplier *big.Float) uint32 { + + // Compute the fee time bucket exponent for the fee rate. We can compute the exponent as follows: + // feeNanosPerKB = minimumNetworkFeeNanosPerKB * feeBucketMultiplier ^ exponent + // log(feeNanosPerKB) = log(minimumNetworkFeeNanosPerKB) + exponent * log(feeBucketMultiplier^exponent) + // exponent = (log(feeNanosPerKB) - log(minimumNetworkFeeNanosPerKB)) / log(feeBucketMultiplier). + + feeFloat := NewFloat().SetUint64(feeNanosPerKB) + // If the fee is less than the base rate, return 0. + if feeFloat.Cmp(minimumNetworkFeeNanosPerKB) < 0 { + return 0 + } + + logFeeFloat := BigFloatLog(feeFloat) + logBaseRate := BigFloatLog(minimumNetworkFeeNanosPerKB) + logMultiplier := BigFloatLog(feeBucketMultiplier) + subFee := Sub(logFeeFloat, logBaseRate) + divFee := Div(subFee, logMultiplier) + feeTimeBucketExponentUint64, _ := divFee.Uint64() + feeTimeBucketExponent := uint32(feeTimeBucketExponentUint64) + if feeTimeBucketExponent < 0 { + return 0 + } + + // Now verify that float precision hasn't caused us to be off by one. + // If this condition is true, then the computed exponent is overestimated by 1. + if computeFeeTimeBucketMinFromExponent(feeTimeBucketExponent, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) > feeNanosPerKB { + return feeTimeBucketExponent - 1 + } + + // This condition gets triggered exactly on fee bucket boundaries, i.e. when feeNanosPerKB = FeeMax for some bucket. + // The float rounding makes the number slightly smaller like 5.9999999991 instead of 6.0. + if computeFeeTimeBucketMinFromExponent(feeTimeBucketExponent+1, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) <= feeNanosPerKB { + return feeTimeBucketExponent + 1 + } + + // If we get here, then the computed exponent is correct. + return feeTimeBucketExponent +} diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go new file mode 100644 index 000000000..71cbf6763 --- /dev/null +++ b/lib/pos_transaction_register_test.go @@ -0,0 +1,329 @@ +package lib + +import ( + "bytes" + "github.com/stretchr/testify/require" + "math/rand" + "sort" + "testing" + "time" +) + +func TestSanityCheckTransactionRegister(t *testing.T) { + require := require.New(t) + + // Empty TransactionRegister + txnRegister := NewTransactionRegister(&DeSoTestnetParams, _testGetDefaultGlobalParams()) + require.Equal(true, txnRegister.Empty()) + it := txnRegister.GetFeeTimeIterator() + require.Equal(false, it.Next()) + + // TransactionRegister with a single transaction + txn := &MempoolTx{ + FeePerKB: 100000, + Added: time.UnixMicro(1000000), + Hash: NewBlockHash(RandomBytes(32)), + } + require.Nil(txnRegister.AddTransaction(txn)) + require.Equal(false, txnRegister.Empty()) + it = txnRegister.GetFeeTimeIterator() + require.Equal(true, it.Next()) + recTxn, ok := it.Value() + require.Equal(true, ok) + require.Equal(true, bytes.Equal(txn.Hash[:], recTxn.Hash[:])) + require.Nil(txnRegister.RemoveTransaction(recTxn)) + + // TransactionRegister with no transactions and a single empty FeeTimeBucket. + // This should never happen but let's see what happens. + txnRegister = NewTransactionRegister(&DeSoTestnetParams, _testGetDefaultGlobalParams()) + emptyFeeTimeBucket := NewFeeTimeBucket(0, 1000) + txnRegister.feeTimeBucketSet.Add(emptyFeeTimeBucket) + txnRegister.feeTimeBucketsByMinFeeMap[0] = emptyFeeTimeBucket + newIt := txnRegister.GetFeeTimeIterator() + require.Equal(false, newIt.Next()) +} + +func TestTransactionRegisterWithRemoves(t *testing.T) { + seed := int64(88) + testCases := 1000 + feeRange := uint64(10000) + timestampRange := uint64(10000) + + require := require.New(t) + rand := rand.New(rand.NewSource(seed)) + globalParams := _testGetDefaultGlobalParams() + txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, timestampRange, testCases) + + txnRegister := NewTransactionRegister(&DeSoTestnetParams, globalParams) + _testBucketStandardRemoveTest(t, txnPool, globalParams, false, + func(tx *MempoolTx) { + require.Nil(txnRegister.AddTransaction(tx)) + }, + func(tx *MempoolTx) { + txnRegister.RemoveTransaction(tx) + }, + func() []*MempoolTx { + return txnRegister.GetFeeTimeTransactions() + }, + func() { + require.Equal(true, txnRegister.Empty()) + }, + ) +} + +func TestTransactionRegisterBasic(t *testing.T) { + seed := int64(77) + testCases := 1000 + feeRange := uint64(10000) + timestampRange := uint64(10000) + + require := require.New(t) + rand := rand.New(rand.NewSource(seed)) + globalParams := _testGetDefaultGlobalParams() + txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, timestampRange, testCases) + + txnRegister := NewTransactionRegister(&DeSoTestnetParams, globalParams) + _testBucketStandardAddTest(t, txnPool, globalParams, false, + func(tx *MempoolTx) { + require.Nil(txnRegister.AddTransaction(tx)) + }, + func() []*MempoolTx { + return txnRegister.GetFeeTimeTransactions() + }, + ) +} + +func TestFeeTimeBucketRemove(t *testing.T) { + seed := int64(44) + testCases := 1000 + exponentRange := 100 + timestampRange := uint64(10000) + + require := require.New(t) + rand := rand.New(rand.NewSource(seed)) + globalParams := _testGetDefaultGlobalParams() + randomExponent := uint32(rand.Intn(exponentRange)) + baseRate, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + feeMin, feeMax := computeFeeTimeBucketRangeFromExponent(randomExponent, baseRate, bucketMultiplier) + txnPool := _testGetRandomMempoolTxns(rand, feeMin, feeMax, timestampRange, testCases) + + // Create new FeeBucket and add the txn pool + bucketFeeMin, bucketFeeMax := computeFeeTimeBucketRangeFromExponent(randomExponent, baseRate, bucketMultiplier) + timeBucket := NewFeeTimeBucket(bucketFeeMin, bucketFeeMax) + _testBucketStandardRemoveTest(t, txnPool, globalParams, false, + func(tx *MempoolTx) { + require.Equal(nil, timeBucket.AddTransaction(tx)) + }, + func(tx *MempoolTx) { + timeBucket.RemoveTransaction(tx) + }, + func() []*MempoolTx { + return timeBucket.GetTransactions() + }, + func() { + require.Equal(true, timeBucket.Empty()) + }, + ) +} + +func TestFeeTimeBucketBasic(t *testing.T) { + seed := int64(33) + testCases := 1000 + exponentRange := 100 + timestampRange := uint64(10000) + + require := require.New(t) + rand := rand.New(rand.NewSource(seed)) + globalParams := _testGetDefaultGlobalParams() + randomExponent := uint32(rand.Intn(exponentRange)) + baseRate, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + feeMin, feeMax := computeFeeTimeBucketRangeFromExponent(randomExponent, baseRate, bucketMultiplier) + txnPool := _testGetRandomMempoolTxns(rand, feeMin, feeMax, timestampRange, testCases) + + // Create new FeeBucket and add the txn pool + bucketFeeMin, bucketFeeMax := computeFeeTimeBucketRangeFromExponent(randomExponent, baseRate, bucketMultiplier) + timeBucket := NewFeeTimeBucket(bucketFeeMin, bucketFeeMax) + _testBucketStandardAddTest(t, txnPool, globalParams, true, + func(tx *MempoolTx) { + require.Equal(nil, timeBucket.AddTransaction(tx)) + }, + func() []*MempoolTx { + return timeBucket.GetTransactions() + }, + ) +} + +func _testGetDefaultGlobalParams() *GlobalParamsEntry { + globalParams := InitialGlobalParamsEntry + globalParams.MinimumNetworkFeeNanosPerKB = 1000 + globalParams.FeeBucketRateMultiplierBasisPoints = 1000 + + return &globalParams +} + +func _testGetRandomMempoolTxns(rand *rand.Rand, feeMin uint64, feeMax uint64, timestampRange uint64, numTxns int) []*MempoolTx { + txnPool := []*MempoolTx{} + for ii := 0; ii < numTxns; ii++ { + txnPool = append(txnPool, &MempoolTx{ + FeePerKB: rand.Uint64()%(feeMax-feeMin) + feeMin, + Added: time.UnixMicro(int64(rand.Uint64() % timestampRange)), + Hash: NewBlockHash(RandomBytes(32)), + }) + } + return txnPool +} + +func _testMapMempoolTxnToTimeBucket(txn *MempoolTx, globalParams *GlobalParamsEntry) *FeeTimeBucket { + baseRate, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + timeBucketExponent := computeFeeTimeBucketExponentFromFeeNanosPerKB(txn.FeePerKB, baseRate, bucketMultiplier) + bucketFeeMin, bucketFeeMax := computeFeeTimeBucketRangeFromExponent(timeBucketExponent, baseRate, bucketMultiplier) + return NewFeeTimeBucket(bucketFeeMin, bucketFeeMax) +} + +func _testSortMempoolTxnsByFeeTime(txnPool []*MempoolTx, globalParams *GlobalParamsEntry, timeOnly bool) []*MempoolTx { + if !timeOnly { + sort.Slice(txnPool, func(i, j int) bool { + timeBucketI := _testMapMempoolTxnToTimeBucket(txnPool[i], globalParams) + timeBucketJ := _testMapMempoolTxnToTimeBucket(txnPool[j], globalParams) + + feeComparison := feeTimeBucketComparator(timeBucketI, timeBucketJ) + if feeComparison == 1 { + return false + } else if feeComparison == -1 { + return true + } + + timeComparison := mempoolTxTimeOrderComparator(txnPool[i], txnPool[j]) + if timeComparison == 1 { + return false + } + return true + }) + } else { + sort.Slice(txnPool, func(i, j int) bool { + timeComparison := mempoolTxTimeOrderComparator(txnPool[i], txnPool[j]) + if timeComparison == 1 { + return false + } + return true + }) + } + return txnPool +} + +func _testBucketStandardAddTest(t *testing.T, txns []*MempoolTx, globalParams *GlobalParamsEntry, timeOnly bool, + add func(tx *MempoolTx), getTxns func() []*MempoolTx) { + + require := require.New(t) + for ii := 0; ii < len(txns); ii++ { + add(txns[ii]) + } + + feeTimeTxns := getTxns() + sortedTxns := _testSortMempoolTxnsByFeeTime(txns, _testGetDefaultGlobalParams(), timeOnly) + require.Equal(len(sortedTxns), len(feeTimeTxns)) + for ii := 0; ii < len(sortedTxns); ii++ { + require.Equal(sortedTxns[ii].FeePerKB, feeTimeTxns[ii].FeePerKB) + require.Equal(sortedTxns[ii].Added.UnixNano(), feeTimeTxns[ii].Added.UnixNano()) + } +} + +func _testBucketStandardRemoveTest(t *testing.T, txns []*MempoolTx, globalParams *GlobalParamsEntry, timeOnly bool, + add func(tx *MempoolTx), remove func(tx *MempoolTx), getTxns func() []*MempoolTx, checkEmpty func()) { + + require := require.New(t) + for ii := 0; ii < len(txns); ii++ { + add(txns[ii]) + } + + // Now remove half of the transactions. + txnToRemove := len(txns) / 2 + for ii := 0; ii < txnToRemove; ii++ { + remove(txns[ii]) + } + + // Iterate through buckets to get Fee-Time ordering. + halfTxnsFeeTime := getTxns() + + // Make a slice of the remaining transactions. + remainingTxns := txns[txnToRemove:] + remainingTxnsSorted := _testSortMempoolTxnsByFeeTime(remainingTxns, globalParams, timeOnly) + + // Now compare the tree set ordering with explicit sort ordering. + require.Equal(len(halfTxnsFeeTime), len(remainingTxnsSorted)) + for ii := 0; ii < len(remainingTxnsSorted); ii++ { + require.Equal(halfTxnsFeeTime[ii].FeePerKB, remainingTxnsSorted[ii].FeePerKB) + require.Equal(halfTxnsFeeTime[ii].Added.UnixNano(), remainingTxnsSorted[ii].Added.UnixNano()) + } + + // Add back the transactions we removed to make sure they are added back in the correct order. + for ii := 0; ii < txnToRemove; ii++ { + add(txns[ii]) + } + + // Iterate through buckets to get Fee-Time ordering. + allTxnsFeeTime := getTxns() + + // Get all the transactions and sort them by Fee-Time. + allTxns := txns[:] + allTxnsSorted := _testSortMempoolTxnsByFeeTime(allTxns, globalParams, timeOnly) + + // Now compare the tree set ordering with explicit sort ordering. + require.Equal(len(allTxnsFeeTime), len(allTxnsSorted)) + for ii := 0; ii < len(allTxnsFeeTime); ii++ { + require.Equal(allTxnsFeeTime[ii].FeePerKB, allTxnsSorted[ii].FeePerKB) + require.Equal(allTxnsFeeTime[ii].Added.UnixNano(), allTxnsSorted[ii].Added.UnixNano()) + } + + // Now remove all the transactions. + for ii := 0; ii < len(txns); ii++ { + remove(txns[ii]) + } + + // Make sure the fee bucket is empty. + checkEmpty() +} + +// TestComputeFeeBucketRanges checks that the fee bucket ranges are computed correctly and deterministically. +func TestComputeFeeBucketRanges(t *testing.T) { + require := require.New(t) + _ = require + + globalParams := _testGetDefaultGlobalParams() + baseRate, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + + feeMins, feeMaxs := []uint64{}, []uint64{} + for ii := uint32(0); ii < 100; ii++ { + feeMin, feeMax := computeFeeTimeBucketRangeFromExponent(ii, baseRate, bucketMultiplier) + feeMins = append(feeMins, feeMin) + feeMaxs = append(feeMaxs, feeMax) + } + + for ii := 0; ii < 99; ii++ { + require.Equal(feeMaxs[ii], feeMins[ii+1]-1) + } + + for ii := uint32(0); ii < 100; ii++ { + feeMin, feeMax := computeFeeTimeBucketRangeFromExponent(ii, baseRate, bucketMultiplier) + require.Equal(feeMins[ii], feeMin) + require.Equal(feeMaxs[ii], feeMax) + } +} + +// TestComputeFeeBucketWithFee checks that the fee bucket exponent is computed correctly from a fee. +func TestComputeFeeBucketWithFee(t *testing.T) { + globalParams := _testGetDefaultGlobalParams() + baseRate, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + + verifyFeeBucket := func(exponent uint32, fee uint64) bool { + feeMin, feeMax := computeFeeTimeBucketRangeFromExponent(exponent, baseRate, bucketMultiplier) + return fee >= feeMin && fee <= feeMax + } + + require := require.New(t) + _ = require + for ii := uint64(1000); ii < 100000; ii++ { + n := computeFeeTimeBucketExponentFromFeeNanosPerKB(ii, baseRate, bucketMultiplier) + require.True(verifyFeeBucket(n, ii)) + } +} diff --git a/lib/snapshot_test.go b/lib/snapshot_test.go index 64d504752..12a96dd4a 100644 --- a/lib/snapshot_test.go +++ b/lib/snapshot_test.go @@ -276,7 +276,6 @@ func TestFasterHashToCurve(t *testing.T) { if err := sem.Acquire(ctx, int64(maxWorkers)); err != nil { fmt.Println("FAILED") } - fmt.Println("YEAAAH") } func TestStateChecksumBirthdayParadox(t *testing.T) { From cb1d99f0ee48b52555210e30ed89b210fea90aed Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 6 Jul 2023 10:36:23 -0400 Subject: [PATCH 129/762] Add block proposer public keys and signature to block header (#577) * sa/add-block-propser-keys-and-signature-to-block-header * Update tests and block encoding * Add comment to hash function * Simplify hash function for PoS block header * Clean up comments * Address Nina's comments * Add unit test for signature encoding * Add test case for expected hash --- lib/network.go | 148 +++++++++++++++++++++++++++++++++++--------- lib/network_test.go | 66 ++++++++++++++++++-- 2 files changed, 180 insertions(+), 34 deletions(-) diff --git a/lib/network.go b/lib/network.go index 07091a1f9..478781bc2 100644 --- a/lib/network.go +++ b/lib/network.go @@ -23,6 +23,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" decredEC "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" + "github.com/deso-protocol/core/bls" merkletree "github.com/deso-protocol/go-merkle-tree" "github.com/ethereum/go-ethereum/crypto/ecies" "github.com/holiman/uint256" @@ -1892,9 +1893,8 @@ type MsgDeSoHeader struct { // The height of the block this header corresponds to. Height uint64 - // Nonce is only used for Proof of Work blocks, and will only be populated - // in MsgDeSoHeader versions 0 and 1. For all later versions, this field will - // default to a value of zero. + // Nonce is only used for Proof of Work blocks, with MsgDeSoHeader versions 0 and 1. + // For all later versions, this field will default to a value of zero. // // The nonce that is used by miners in order to produce valid blocks. // @@ -1903,37 +1903,53 @@ type MsgDeSoHeader struct { // no longer needed since HeaderVersion1 upgraded the nonce to 64 bits from 32 bits. Nonce uint64 - // ExtraNonce is only used for Proof of Work blocks, and will only be populated - // in MsgDeSoHeader versions 0 and 1. For all later versions, this field will - // default to zero. + // ExtraNonce is only used for Proof of Work blocks, with MsgDeSoHeader versions 0 and 1. + // For all later versions, this field will default to zero. // // An extra nonce that can be used to provide *even more* entropy for miners, in the // event that ASICs become powerful enough to have birthday problems in the future. ExtraNonce uint64 - // ProposedInView is only used for Proof of Stake blocks, and will only be populated - // in MsgDeSoHeader versions 2 and higher. For all earlier version, this field will - // default to zero. + // ProposerPublicKey is only used for Proof of Stake blocks, starting with MsgDeSoHeader + // version 2. For all earlier versions, this field will default to nil. // - // Denotes the view in which this block was proposed. + // The ECDSA public key of the validator who proposed this block. + ProposerPublicKey *PublicKey + + // ProposerVotingPublicKey is only used for Proof of Stake blocks, starting with + // MsgDeSoHeader version 2. For all earlier versions, this field will default to nil. + // + // The BLS public key of the validator who proposed this block. + ProposerVotingPublicKey *bls.PublicKey + + // ProposedInView is only used for Proof of Stake blocks, starting with MsgDeSoHeader + // version 2. For all earlier versions, this field will default to nil. + // + // The view in which this block was proposed. ProposedInView uint64 - // ValidatorsVoteQC is only used for Proof of Stake blocks, and will only be - // populated in MsgDeSoHeader versions 2 and higher. For all earlier version, this - // field will be null. + // ValidatorsVoteQC is only used for Proof of Stake blocks, starting with MsgDeSoHeader + // version 2. For all earlier versions, this field will default to nil. // - // This corresponds to QC containing votes from 2/3 of validators for weighted by stake. + // This is a QC containing votes from 2/3 of validators weighted by stake. ValidatorsVoteQC *QuorumCertificate - // ValidatorsTimeoutAggregateQC is only used for Proof of Stake blocks, and will only be - // populated in MsgDeSoHeader versions 2 and higher. For all earlier version, this field - // will be null. + // ValidatorsTimeoutAggregateQC is only used for Proof of Stake blocks, starting with + // MsgDeSoHeader version 2. For all earlier versions, this field will default to nil. // // In the event of a timeout, this field will contain the aggregate QC constructed from // timeout messages from 2/3 of validators weighted by stake, and proves that they have - // time out. This value is set to null in normal cases where a regular block vote has + // timed out. This value is set to null in normal cases where a regular block vote has // taken place. ValidatorsTimeoutAggregateQC *TimeoutAggregateQuorumCertificate + + // ProposerVotePartialSignature is only used for Proof of Stake blocks, starting with + // MsgDeSoHeader version 2. For all earlier versions, this field will default to nil. + // + // The block proposer's partial BLS signature of the (ProposedInView, BlockHash) pair + // for the block. This signature proves that a particular validator proposed the block, + // and also acts as the proposer's vote for the block. + ProposerVotePartialSignature *bls.Signature } func HeaderSizeBytes() int { @@ -2090,12 +2106,26 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error // that MsgDeSoHeader version 2 does not need to be backwards compatible. retBytes = append(retBytes, UintToBuf(msg.TstampSecs)...) - // Height + // Height: similar to the field above, this field can be encoded to take + // up to the full 64 bits now that MsgDeSoHeader version 2 does not need to + // be backwards compatible. retBytes = append(retBytes, UintToBuf(msg.Height)...) // The Nonce and ExtraNonce fields are unused in version 2. We skip them // during both encoding and decoding. + // ProposerPublicKey + if msg.ProposerPublicKey == nil { + return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerPublicKey must be non-nil") + } + retBytes = append(retBytes, msg.ProposerPublicKey.ToBytes()...) + + // ProposerVotingPublicKey + if msg.ProposerVotingPublicKey == nil { + return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerVotingPublicKey must be non-nil") + } + retBytes = append(retBytes, EncodeBLSPublicKey(msg.ProposerVotingPublicKey)...) + // ProposedInView retBytes = append(retBytes, UintToBuf(msg.ProposedInView)...) @@ -2119,11 +2149,25 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error } retBytes = append(retBytes, encodedValidatorsTimeoutAggregateQC...) + // ProposerVotePartialSignature: we encode the signature if it's present and the preSignature + // flag is set to false. Otherwise, we encode an empty byte array as a placeholder. Note, any + // byte encoded output from this function must be decodable by the DecodeHeaderVersion2 + // function. + proposerSignatureBytes := []byte{} + if !preSignature { + // If the the preSignature flag is set to false, then the caller intends to encode the signature. + // The signature must be non-nil. + if msg.ProposerVotePartialSignature == nil { + return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerVotePartialSignature must be non-nil") + } + proposerSignatureBytes = msg.ProposerVotePartialSignature.ToBytes() + } + retBytes = append(retBytes, EncodeByteArray(proposerSignatureBytes)...) + return retBytes, nil } func (msg *MsgDeSoHeader) ToBytes(preSignature bool) ([]byte, error) { - // Depending on the version, we decode the header differently. if msg.Version == HeaderVersion0 { return msg.EncodeHeaderVersion0(preSignature) @@ -2277,6 +2321,18 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { retHeader.Nonce = 0 retHeader.ExtraNonce = 0 + // ProposerPublicKey + retHeader.ProposerPublicKey, err = ReadPublicKey(rr) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerPublicKey") + } + + // ProposerVotingPublicKey + retHeader.ProposerVotingPublicKey, err = DecodeBLSPublicKey(rr) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerVotingPublicKey") + } + // ProposedInView retHeader.ProposedInView, err = ReadUvarint(rr) if err != nil { @@ -2295,6 +2351,19 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ValidatorsTimeoutAggregateQC") } + // ProposerVotePartialSignature: we decode the signature if it's present in the byte encoding. + // If it's not present, then we set the signature to nil. + proposerSignatureBytes, err := DecodeByteArray(rr) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerVotePartialSignature") + } + if len(proposerSignatureBytes) != 0 { + retHeader.ProposerVotePartialSignature, err = (&bls.Signature{}).FromBytes(proposerSignatureBytes) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerVotePartialSignature") + } + } + return retHeader, nil } @@ -2344,17 +2413,31 @@ func (msg *MsgDeSoHeader) GetMsgType() MsgType { return MsgTypeHeader } -// Hash is a helper function to compute a hash of the header. Note that the header -// hash is special in that we always hash it using the ProofOfWorkHash rather than -// Sha256DoubleHash. +// Hash is a helper function to compute a hash of the header. For Proof of Work +// blocks headers, which have header version 0 or 1, this uses the specialized +// ProofOfWorkHash, which takes mining difficulty and hardware into consideration. +// +// For Proof of Stake block headers, which start header versions 2, it uses the +// simpler Sha256DoubleHash function. func (msg *MsgDeSoHeader) Hash() (*BlockHash, error) { - preSignature := false - headerBytes, err := msg.ToBytes(preSignature) + // The preSignature flag is unused during byte encoding in + // in header versions 0 and 1. We set it to true to ensure that + // it's forward compatible for versions 2 and beyond. + headerBytes, err := msg.ToBytes(true) if err != nil { return nil, errors.Wrap(err, "MsgDeSoHeader.Hash: ") } - return ProofOfWorkHash(headerBytes, msg.Version), nil + // Compute the specialized PoW hash for header versions 0 and 1. + if msg.Version == HeaderVersion0 || msg.Version == HeaderVersion1 { + return ProofOfWorkHash(headerBytes, msg.Version), nil + } + + // TODO: Do we need a new specialized hash function for Proof of Stake + // block headers? A simple SHA256 hash seems like it would be sufficient. + // The use of ASICS is no longer a consideration, so we should be able to + // simplify the hash function used. + return Sha256DoubleHash(headerBytes), nil } func (msg *MsgDeSoHeader) String() string { @@ -2509,12 +2592,19 @@ func (msg *MsgDeSoBlock) EncodeBlockVersion1(preSignature bool) ([]byte, error) return data, nil } +func (msg *MsgDeSoBlock) EncodeBlockVersion2(preSignature bool) ([]byte, error) { + return msg.EncodeBlockCommmon(preSignature) +} + func (msg *MsgDeSoBlock) ToBytes(preSignature bool) ([]byte, error) { - if msg.Header.Version == HeaderVersion0 { + switch msg.Header.Version { + case HeaderVersion0: return msg.EncodeBlockVersion0(preSignature) - } else if msg.Header.Version == HeaderVersion1 { + case HeaderVersion1: return msg.EncodeBlockVersion1(preSignature) - } else { + case HeaderVersion2: + return msg.EncodeBlockVersion2(preSignature) + default: return nil, fmt.Errorf("MsgDeSoBlock.ToBytes: Error encoding version: %v", msg.Header.Version) } } diff --git a/lib/network_test.go b/lib/network_test.go index 0afb5322a..f56b318ef 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -123,7 +123,7 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { } testBitset := bitset.NewBitset().Set(0, true).Set(3, true) - _, testBLSSignature := _generateValidatorVotingPublicKeyAndSignature(t) + testBLSPublicKey, testBLSSignature := _generateValidatorVotingPublicKeyAndSignature(t) return &MsgDeSoHeader{ Version: 2, @@ -132,9 +132,11 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { TstampSecs: uint64(1678943210), Height: uint64(1321012345), // Nonce and ExtraNonce are unused and set to 0 starting in version 2. - Nonce: uint64(0), - ExtraNonce: uint64(0), - ProposedInView: uint64(1432101234), + Nonce: uint64(0), + ExtraNonce: uint64(0), + ProposerPublicKey: NewPublicKey(pkForTesting1), + ProposerVotingPublicKey: testBLSPublicKey, + ProposedInView: uint64(1432101234), // Use real signatures and public keys for the PoS fields ValidatorsVoteQC: &QuorumCertificate{ BlockHash: &testBlockHash, @@ -160,6 +162,7 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { Signature: testBLSSignature, }, }, + ProposerVotePartialSignature: testBLSSignature, } } @@ -210,13 +213,66 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { assert.NoError(err) assert.Equal(hdrPayload, data) - assert.Equalf(10, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), + assert.Equalf(13, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), "Number of fields in HEADER message is different from expected. "+ "Did you add a new field? If so, make sure the serialization code "+ "works, add the new field to the test case, and fix this error.") } } +func TestHeaderVersion2SignatureByteEncoding(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + _ = assert + _ = require + + expectedBlockHeader := createTestBlockHeaderVersion2(t) + + preSignatureBytes, err := expectedBlockHeader.ToBytes(true) + require.NoError(err) + require.NotZero(preSignatureBytes) + + postSignatureBytes, err := expectedBlockHeader.ToBytes(false) + require.NoError(err) + require.NotZero(postSignatureBytes) + + // The length of the post-signature bytes will always be equal to the length of the + // pre-signature bytes + the length of the signature. This is always the case for the + // following reason: + // - The end of the pre-signature bytes have a []byte{0} appended to them to indicate + // that the signature is not present. + // - The end of the post-signature bytes have []byte{len(signature)} + signature.ToBytes() + // appended, which encode the signature. + // The difference in length between the two will always be the length of the signature, which + // is a fixed size 32 byte BLS signature. + require.Equal( + len(postSignatureBytes), + len(preSignatureBytes)+len(expectedBlockHeader.ProposerVotePartialSignature.ToBytes()), + ) +} + +func TestHeaderVersion2Hash(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + _ = assert + _ = require + + expectedBlockHeader := createTestBlockHeaderVersion2(t) + + headerHash, err := expectedBlockHeader.Hash() + require.NoError(err) + require.NotZero(len(headerHash)) + + preSignatureBytes, err := expectedBlockHeader.ToBytes(true) + require.NoError(err) + require.NotZero(preSignatureBytes) + + // Re-compute the expected hash manually and make sure it's using Sha256DoubleHash + // as expected. + expectedHeaderHash := Sha256DoubleHash(preSignatureBytes) + require.Equal(expectedHeaderHash[:], headerHash[:]) +} + func TestGetHeadersSerialization(t *testing.T) { assert := assert.New(t) require := require.New(t) From 603f52542e9aad1d04b1f85fc045bcd2a3b280e8 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Thu, 6 Jul 2023 10:51:57 -0400 Subject: [PATCH 130/762] Clean up straggler comments --- collections/bitset/bitset.go | 2 +- lib/constants.go | 3 +++ lib/network.go | 9 +++++---- lib/pos_network.go | 12 ++++++++++++ 4 files changed, 21 insertions(+), 5 deletions(-) diff --git a/collections/bitset/bitset.go b/collections/bitset/bitset.go index 3cd9428b1..085013b67 100644 --- a/collections/bitset/bitset.go +++ b/collections/bitset/bitset.go @@ -60,7 +60,7 @@ func (b *Bitset) Size() int { return b.store.BitLen() } -// Return the absolute value of the underlying the BigSet as a big-endian +// Return the absolute value of the underlying the BigInt as a big-endian // byte slice. The output is compressed such that if the underlying // big.Int had zeros at the highest bits, they will be removed // from the output. diff --git a/lib/constants.go b/lib/constants.go index af7d892c5..7d5abb260 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -108,9 +108,12 @@ const ( // It includes several changes to the header format: // - Nonce field is deprecated // - ExtraNonce field is deprecated + // - ProposerPublicKey field is added + // - ProposerVotingPublicKey field is added // - ProposedInView field is added // - ValidatorsVoteQC field is added // - ValidatorsTimeoutAggregateQC field is added + // - ProposerVotePartialSignature field is added // // This format change is a breaking change that is not backwards-compatible with // versions 0 and 1. diff --git a/lib/network.go b/lib/network.go index 478781bc2..3068164b8 100644 --- a/lib/network.go +++ b/lib/network.go @@ -2102,7 +2102,7 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error } retBytes = append(retBytes, transactionMerkleRoot[:]...) - // TstampSecs: this field can be encoded to take up the full 64 bits now + // TstampSecs: this field can be encoded to take up to the full 64 bits now // that MsgDeSoHeader version 2 does not need to be backwards compatible. retBytes = append(retBytes, UintToBuf(msg.TstampSecs)...) @@ -2150,9 +2150,10 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error retBytes = append(retBytes, encodedValidatorsTimeoutAggregateQC...) // ProposerVotePartialSignature: we encode the signature if it's present and the preSignature - // flag is set to false. Otherwise, we encode an empty byte array as a placeholder. Note, any - // byte encoded output from this function must be decodable by the DecodeHeaderVersion2 - // function. + // flag is set to false. Otherwise, we encode an empty byte array as a placeholder. The placeholder + // ensures, that the DecodeHeaderVersion2 function can properly recognize encoding where a signature + // isn't populated. It ensures that every possible output from EncodeHeaderVersion2 can be decoded by + // DecodeHeaderVersion2. proposerSignatureBytes := []byte{} if !preSignature { // If the the preSignature flag is set to false, then the caller intends to encode the signature. diff --git a/lib/pos_network.go b/lib/pos_network.go index fc92b81c7..775281ee7 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -15,6 +15,12 @@ import ( // ================================================================== type MsgDeSoValidatorVote struct { + // We use the MsgVersion field to determine how to encode and decode this message to + // bytes when sending it over the wire. Note, when receiving a message for a peer, + // we won't always know the block height ahead of time, so we can't rely on block + // height gating or encoder migrations to determine how to decode the message. So, + // we rely on a separate message version field whose sole purpose to define how to + // encode and decode the message. MsgVersion MsgValidatorVoteVersion // The ECDSA public key for the validator who constructed this vote message. @@ -136,6 +142,12 @@ func (msg *MsgDeSoValidatorVote) FromBytes(data []byte) error { // ================================================================== type MsgDeSoValidatorTimeout struct { + // We use the MsgVersion field to determine how to encode and decode this message to + // bytes when sending it over the wire. Note, when receiving a message for a peer, + // we won't always know the block height ahead of time, so we can't rely on block + // height gating or encoder migrations to determine how to decode the message. So, + // we rely on a separate message version field whose sole purpose to define how to + // encode and decode the message. MsgVersion MsgValidatorTimeoutVersion // The ECDSA public key for the validator who constructed this timeout message. From 03b003e38eecf9c30b22365eef703e1c5efbd41a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 7 Jul 2023 13:48:24 -0400 Subject: [PATCH 131/762] sa/add-max-validators-and-max-stakers-global-params (#573) --- .gitignore | 1 + lib/block_view.go | 12 ++++++++++++ lib/block_view_types.go | 22 ++++++++++++++++++++++ lib/constants.go | 22 ++++++++++++++++++++++ lib/pos_snapshot_entries.go | 11 +++++++++-- 5 files changed, 66 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index dc0267338..608a45416 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ backend core .idea +.vscode local_scripts/* lib/mem.log diff --git a/lib/block_view.go b/lib/block_view.go index c22e10a7a..10979be89 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3035,6 +3035,18 @@ func (bav *UtxoView) _connectUpdateGlobalParams( return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode LeaderScheduleMaxNumValidators as uint64") } } + if len(extraData[ValidatorSetMaxNumValidatorsKey]) > 0 { + newGlobalParamsEntry.ValidatorSetMaxNumValidators, bytesRead = Uvarint(extraData[ValidatorSetMaxNumValidatorsKey]) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode ValidatorSetMaxNumValidators as uint64") + } + } + if len(extraData[StakingRewardDistributionMaxNumStakersKey]) > 0 { + newGlobalParamsEntry.StakingRewardDistributionMaxNumStakers, bytesRead = Uvarint(extraData[StakingRewardDistributionMaxNumStakersKey]) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode StakingRewardDistributionMaxNumStakers as uint64") + } + } if len(extraData[EpochDurationNumBlocksKey]) > 0 { newGlobalParamsEntry.EpochDurationNumBlocks, bytesRead = Uvarint(extraData[EpochDurationNumBlocksKey]) if bytesRead <= 0 { diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 52f8a29a1..7e19d48e5 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3785,6 +3785,16 @@ type GlobalParamsEntry struct { // are included when generating a new Proof-of-Stake leader schedule. LeaderScheduleMaxNumValidators uint64 + // ValidatorSetMaxNumValidators is the maximum number of validators that + // are included in the active validator set every epoch in the Proof-of-Stake + // consensus. + ValidatorSetMaxNumValidators uint64 + + // StakingRewardDistributionMaxNumStakers is the maximum number of stake entries + // that are eligible to receive block rewards every epoch in the Proof-of-Stake + // consensus. + StakingRewardDistributionMaxNumStakers uint64 + // EpochDurationNumBlocks is the number of blocks included in one epoch. EpochDurationNumBlocks uint64 @@ -3805,6 +3815,8 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { StakeLockupEpochDuration: gp.StakeLockupEpochDuration, ValidatorJailEpochDuration: gp.ValidatorJailEpochDuration, LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, + ValidatorSetMaxNumValidators: gp.ValidatorSetMaxNumValidators, + StakingRewardDistributionMaxNumStakers: gp.StakingRewardDistributionMaxNumStakers, EpochDurationNumBlocks: gp.EpochDurationNumBlocks, JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, } @@ -3825,6 +3837,8 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.StakeLockupEpochDuration)...) data = append(data, UintToBuf(gp.ValidatorJailEpochDuration)...) data = append(data, UintToBuf(gp.LeaderScheduleMaxNumValidators)...) + data = append(data, UintToBuf(gp.ValidatorSetMaxNumValidators)...) + data = append(data, UintToBuf(gp.StakingRewardDistributionMaxNumStakers)...) data = append(data, UintToBuf(gp.EpochDurationNumBlocks)...) data = append(data, UintToBuf(gp.JailInactiveValidatorGracePeriodEpochs)...) } @@ -3873,6 +3887,14 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading LeaderScheduleMaxNumValidators: ") } + gp.ValidatorSetMaxNumValidators, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading ValidatorSetMaxNumValidators: ") + } + gp.StakingRewardDistributionMaxNumStakers, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading StakingRewardDistributionMaxNumStakers: ") + } gp.EpochDurationNumBlocks, err = ReadUvarint(rr) if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading EpochDurationNumBlocks: ") diff --git a/lib/constants.go b/lib/constants.go index ef767666d..2460783c7 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -627,6 +627,14 @@ type DeSoParams struct { // that are included when generating a new Proof-of-Stake leader schedule. DefaultLeaderScheduleMaxNumValidators uint64 + // DefaultValidatorSetMaxNumValidators is the default maximum number of validators + // that are included in the validator set for any given epoch. + DefaultValidatorSetMaxNumValidators uint64 + + // DefaultStakingRewardDistributionMaxNumStakers is the default number of stake entries + // that are included in the staking reward distribution in each epoch. + DefaultStakingRewardDistributionMaxNumStakers uint64 + // DefaultEpochDurationNumBlocks is the default number of blocks included in one epoch. DefaultEpochDurationNumBlocks uint64 @@ -1024,6 +1032,12 @@ var DeSoMainnetParams = DeSoParams{ // The max number of validators included in a leader schedule. DefaultLeaderScheduleMaxNumValidators: uint64(100), + // The max number of validators included in a validator set for any given epoch. + DefaultValidatorSetMaxNumValidators: uint64(1000), + + // The max number of stakers included in a staking rewards distribution every epoch. + DefaultStakingRewardDistributionMaxNumStakers: uint64(10000), + // The number of blocks in one epoch DefaultEpochDurationNumBlocks: uint64(3600), @@ -1273,6 +1287,12 @@ var DeSoTestnetParams = DeSoParams{ // The max number of validators included in a leader schedule. DefaultLeaderScheduleMaxNumValidators: uint64(100), + // The max number of validators included in a validator set for any given epoch. + DefaultValidatorSetMaxNumValidators: uint64(1000), + + // The max number of stakers included in a staking rewards distribution every epoch. + DefaultStakingRewardDistributionMaxNumStakers: uint64(10000), + // The number of blocks in one epoch DefaultEpochDurationNumBlocks: uint64(3600), @@ -1326,6 +1346,8 @@ const ( StakeLockupEpochDurationKey = "StakeLockupEpochDuration" ValidatorJailEpochDurationKey = "ValidatorJailEpochDuration" LeaderScheduleMaxNumValidatorsKey = "LeaderScheduleMaxNumValidators" + ValidatorSetMaxNumValidatorsKey = "ValidatorSetMaxNumValidators" + StakingRewardDistributionMaxNumStakersKey = "StakingRewardDistributionMaxNumStakers" EpochDurationNumBlocksKey = "EpochDurationNumBlocks" JailInactiveValidatorGracePeriodEpochsKey = "JailInactiveValidatorGracePeriodEpochs" diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 464a283ad..6146d66f4 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -3,12 +3,13 @@ package lib import ( "bytes" "fmt" + "math" + "sort" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "math" - "sort" ) const SnapshotLookbackNumEpochs uint64 = 2 @@ -97,6 +98,12 @@ func _mergeGlobalParamEntryDefaults(bav *UtxoView, globalParamsEntry *GlobalPara if globalParamsEntryCopy.LeaderScheduleMaxNumValidators == 0 { globalParamsEntryCopy.LeaderScheduleMaxNumValidators = bav.Params.DefaultLeaderScheduleMaxNumValidators } + if globalParamsEntryCopy.ValidatorSetMaxNumValidators == 0 { + globalParamsEntryCopy.ValidatorSetMaxNumValidators = bav.Params.DefaultValidatorSetMaxNumValidators + } + if globalParamsEntryCopy.StakingRewardDistributionMaxNumStakers == 0 { + globalParamsEntryCopy.StakingRewardDistributionMaxNumStakers = bav.Params.DefaultStakingRewardDistributionMaxNumStakers + } if globalParamsEntryCopy.EpochDurationNumBlocks == 0 { globalParamsEntryCopy.EpochDurationNumBlocks = bav.Params.DefaultEpochDurationNumBlocks } From b3bb387d44210f8cdf42f4019ab8e1f803cdf31b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 7 Jul 2023 14:30:42 -0400 Subject: [PATCH 132/762] Add byte encoder/decoder for optional BLS signatures (#580) --- lib/block_view_validator.go | 15 +++++++++++++++ lib/network.go | 28 +++++++++++----------------- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 7bcf2fbc6..a84d0b226 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -2251,6 +2251,15 @@ func DecodeBLSPublicKey(rr io.Reader) (*bls.PublicKey, error) { return (&bls.PublicKey{}).FromBytes(publicKeyBytes) } +// EncodeOptionalBLSSignature is defined explicitly as a wrapper encoding function +// that has known behavior for nil values. It will always have the following behavior: +// - If blsSignature is nil, then it returns []byte{0} as a placeholder +// - If the blsSignature is not nil, then it returns +// []byte{len(blsSignatureBytes), blsSignatureBytes...} +func EncodeOptionalBLSSignature(blsSignature *bls.Signature) []byte { + return EncodeBLSSignature(blsSignature) +} + func EncodeBLSSignature(blsSignature *bls.Signature) []byte { var blsSignatureBytes []byte if blsSignature != nil { @@ -2259,6 +2268,12 @@ func EncodeBLSSignature(blsSignature *bls.Signature) []byte { return EncodeByteArray(blsSignatureBytes) } +// DecodeOptionalBLSSignature is defined explicitly as a bijective decoding function +// for EncodeOptionalBLSSignature above. +func DecodeOptionalBLSSignature(rr io.Reader) (*bls.Signature, error) { + return DecodeBLSSignature(rr) +} + func DecodeBLSSignature(rr io.Reader) (*bls.Signature, error) { signatureBytes, err := DecodeByteArray(rr) if err != nil { diff --git a/lib/network.go b/lib/network.go index 3068164b8..6954ecbee 100644 --- a/lib/network.go +++ b/lib/network.go @@ -2149,21 +2149,21 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error } retBytes = append(retBytes, encodedValidatorsTimeoutAggregateQC...) + // If preSignature=false, then the ProposerVotePartialSignature must be populated. + if !preSignature && msg.ProposerVotePartialSignature == nil { + return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerVotePartialSignature must be non-nil when preSignature=false") + } + // ProposerVotePartialSignature: we encode the signature if it's present and the preSignature // flag is set to false. Otherwise, we encode an empty byte array as a placeholder. The placeholder - // ensures, that the DecodeHeaderVersion2 function can properly recognize encoding where a signature + // ensures that the DecodeHeaderVersion2 function can properly recognize encodings where the signature // isn't populated. It ensures that every possible output from EncodeHeaderVersion2 can be decoded by // DecodeHeaderVersion2. - proposerSignatureBytes := []byte{} - if !preSignature { - // If the the preSignature flag is set to false, then the caller intends to encode the signature. - // The signature must be non-nil. - if msg.ProposerVotePartialSignature == nil { - return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerVotePartialSignature must be non-nil") - } - proposerSignatureBytes = msg.ProposerVotePartialSignature.ToBytes() + if preSignature { + retBytes = append(retBytes, EncodeOptionalBLSSignature(nil)...) + } else { + retBytes = append(retBytes, EncodeOptionalBLSSignature(msg.ProposerVotePartialSignature)...) } - retBytes = append(retBytes, EncodeByteArray(proposerSignatureBytes)...) return retBytes, nil } @@ -2354,16 +2354,10 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { // ProposerVotePartialSignature: we decode the signature if it's present in the byte encoding. // If it's not present, then we set the signature to nil. - proposerSignatureBytes, err := DecodeByteArray(rr) + retHeader.ProposerVotePartialSignature, err = DecodeOptionalBLSSignature(rr) if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerVotePartialSignature") } - if len(proposerSignatureBytes) != 0 { - retHeader.ProposerVotePartialSignature, err = (&bls.Signature{}).FromBytes(proposerSignatureBytes) - if err != nil { - return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerVotePartialSignature") - } - } return retHeader, nil } From 2d2a104ea3318645072dab3b35039d926fed6e25 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Fri, 7 Jul 2023 17:59:50 -0400 Subject: [PATCH 133/762] Decouple validator jailing from validator snapshotting --- lib/block_view_validator.go | 59 ++++++++++++++++++++++++++++++++++ lib/pos_epoch_complete_hook.go | 24 +++++++++++--- lib/pos_snapshot_entries.go | 34 +------------------- 3 files changed, 79 insertions(+), 38 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 7bcf2fbc6..9e8421245 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1937,6 +1937,65 @@ func (bav *UtxoView) GetGlobalActiveStakeAmountNanos() (*uint256.Int, error) { return globalActiveStakeAmountNanos, nil } +func (bav *UtxoView) JailInactiveValidators(blockHeight uint64) error { + // First, iterate through all of the !isDeleted ValidatorEntries in the UtxoView and + // jail any that are inactive. + var utxoViewValidatorPKIDs []*PKID + for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { + if validatorEntry.isDeleted { + continue + } + + // Check if we should jail the validator. + shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry, blockHeight) + if err != nil { + return errors.Wrapf( + err, + "JailInactiveValidators: problem determining if should jail validator %v: ", + validatorEntry.ValidatorPKID, + ) + } + + // Jail them if so. + if shouldJailValidator { + if err = bav.JailValidator(validatorEntry); err != nil { + return errors.Wrapf( + err, "JailInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, + ) + } + } + + // We don't want to retrieve any ValidatorEntries from the db that are present in the UtxoView. + utxoViewValidatorPKIDs = append(utxoViewValidatorPKIDs, validatorEntry.ValidatorPKID) + } + + // Second, iterate through all the ValidatorEntries in the db and jail any that are inactive. + dbValidatorEntries, err := DBEnumerateAllCurrentValidators(bav.Handle, utxoViewValidatorPKIDs) + if err != nil { + return errors.Wrapf(err, "JailInactiveValidators: problem retrieving ValidatorEntries: ") + } + + for _, validatorEntry := range dbValidatorEntries { + + // Check if we should jail the validator. + shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry, blockHeight) + if err != nil { + return errors.Wrapf( + err, "JailInactiveValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, + ) + } + // Jail them if so. + if shouldJailValidator { + if err = bav.JailValidator(validatorEntry); err != nil { + return errors.Wrapf( + err, "JailInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, + ) + } + } + } + return nil +} + func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHeight uint64) (bool, error) { // Return false if we haven't switched from PoW to PoS yet. Otherwise, // there would be an edge case where all validators will get jailed diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index b0e12c93b..7a2e6ff6b 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -1,8 +1,9 @@ package lib import ( - "github.com/pkg/errors" "math" + + "github.com/pkg/errors" ) func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) { @@ -26,6 +27,15 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) return currentEpochEntry.FinalBlockHeight == blockHeight, nil } +// RunEpochCompleteHook performs all of the necessary end-of-epoch operations, when connecting the final +// block of a epoch. Order of operations: +// 1. Snapshot the current GlobalParamsEntry. +// 2. Jail all inactive validators from the current snapshot validator set. +// 3. Snapshot the current validator set. +// 4. Snapshot the current GlobalActiveStakeAmountNanos. +// 5. Generate + snapshot a leader schedule. +// 6. Compute the final block height for the next epoch. +// 7. Transition CurrentEpochEntry to the next epoch. func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Rolls-over the current epoch into a new one. Handles the associated snapshotting + accounting. @@ -54,10 +64,14 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Snapshot the current GlobalParamsEntry. bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) - // Snapshot the current ValidatorEntries. This loops through all validators to snapshot them in O(N). - // To save on runtime, in this loop we also check if we should jail each validator and jail them if so. - // We optionally jail a validator after we snapshot them. A jailed validator should be considered jailed - // in the new epoch starting after this OnEpochCompleteHook, and not the previous epoch which is snapshot. + // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation + // that loops through all validators and jails them if they are inactive. A jailed validator should be + // considered jailed in the next epoch we are transition into. + if err = bav.JailInactiveValidators(blockHeight); err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: problem jailing inactive validators: ") + } + + // Snapshot the current ValidatorEntries. This loops through all validators to snapshot them in O(n). if err = bav.SnapshotCurrentValidators(currentEpochEntry.EpochNumber, blockHeight); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: problem snapshotting validators: ") } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 6146d66f4..88607becb 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -213,25 +213,8 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64, blo if !validatorEntry.isDeleted { // We only want to snapshot !isDeleted ValidatorEntries. bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) - - // Check if we should jail the validator. - shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry, blockHeight) - if err != nil { - return errors.Wrapf( - err, - "SnapshotValidators: problem determining if should jail validator %v: ", - validatorEntry.ValidatorPKID, - ) - } - // Jail them if so. - if shouldJailValidator { - if err = bav.JailValidator(validatorEntry); err != nil { - return errors.Wrapf( - err, "SnapshotValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, - ) - } - } } + // We don't want to retrieve any ValidatorEntries from the db that are present in the UtxoView. utxoViewValidatorPKIDs = append(utxoViewValidatorPKIDs, validatorEntry.ValidatorPKID) } @@ -243,21 +226,6 @@ func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64, blo for _, validatorEntry := range dbValidatorEntries { bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) - // Check if we should jail the validator. - shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry, blockHeight) - if err != nil { - return errors.Wrapf( - err, "SnapshotValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, - ) - } - // Jail them if so. - if shouldJailValidator { - if err = bav.JailValidator(validatorEntry); err != nil { - return errors.Wrapf( - err, "SnapshotValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, - ) - } - } } return nil } From 5acd4ed536036644929d83d0afb786b6886bcdc6 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Fri, 7 Jul 2023 18:32:16 -0400 Subject: [PATCH 134/762] Break up epoch completed hook into two phases --- lib/block_view_validator.go | 14 +++++------ lib/pos_epoch_complete_hook.go | 38 +++++++++++++++++------------ lib/pos_epoch_complete_hook_test.go | 3 ++- 3 files changed, 32 insertions(+), 23 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 9e8421245..7fb6439a6 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1937,7 +1937,7 @@ func (bav *UtxoView) GetGlobalActiveStakeAmountNanos() (*uint256.Int, error) { return globalActiveStakeAmountNanos, nil } -func (bav *UtxoView) JailInactiveValidators(blockHeight uint64) error { +func (bav *UtxoView) JailAllInactiveValidators(blockHeight uint64) error { // First, iterate through all of the !isDeleted ValidatorEntries in the UtxoView and // jail any that are inactive. var utxoViewValidatorPKIDs []*PKID @@ -1951,7 +1951,7 @@ func (bav *UtxoView) JailInactiveValidators(blockHeight uint64) error { if err != nil { return errors.Wrapf( err, - "JailInactiveValidators: problem determining if should jail validator %v: ", + "JailAllInactiveValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, ) } @@ -1960,7 +1960,7 @@ func (bav *UtxoView) JailInactiveValidators(blockHeight uint64) error { if shouldJailValidator { if err = bav.JailValidator(validatorEntry); err != nil { return errors.Wrapf( - err, "JailInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, + err, "JailAllInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, ) } } @@ -1972,27 +1972,27 @@ func (bav *UtxoView) JailInactiveValidators(blockHeight uint64) error { // Second, iterate through all the ValidatorEntries in the db and jail any that are inactive. dbValidatorEntries, err := DBEnumerateAllCurrentValidators(bav.Handle, utxoViewValidatorPKIDs) if err != nil { - return errors.Wrapf(err, "JailInactiveValidators: problem retrieving ValidatorEntries: ") + return errors.Wrapf(err, "JailAllInactiveValidators: problem retrieving ValidatorEntries: ") } for _, validatorEntry := range dbValidatorEntries { - // Check if we should jail the validator. shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry, blockHeight) if err != nil { return errors.Wrapf( - err, "JailInactiveValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, + err, "JailAllInactiveValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, ) } // Jail them if so. if shouldJailValidator { if err = bav.JailValidator(validatorEntry); err != nil { return errors.Wrapf( - err, "JailInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, + err, "JailAllInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, ) } } } + return nil } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 7a2e6ff6b..3f54c0cdc 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -27,15 +27,23 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) return currentEpochEntry.FinalBlockHeight == blockHeight, nil } -// RunEpochCompleteHook performs all of the necessary end-of-epoch operations, when connecting the final -// block of a epoch. Order of operations: +// RunEpochCompleteHook performs all of the end-of-epoch operations when connecting the final +// block of a epoch. There epoch completion has two steps. +// +// Step 1: Create snapshots of current state. Snapshotting operations here should only create new +// snapshot state. They should have no other side effects that mutate the existing state of the view. // 1. Snapshot the current GlobalParamsEntry. -// 2. Jail all inactive validators from the current snapshot validator set. -// 3. Snapshot the current validator set. -// 4. Snapshot the current GlobalActiveStakeAmountNanos. -// 5. Generate + snapshot a leader schedule. -// 6. Compute the final block height for the next epoch. -// 7. Transition CurrentEpochEntry to the next epoch. +// 2. Snapshot the current validator set. +// 3. Snapshot the current GlobalActiveStakeAmountNanos. +// 4. Snapshot the leader schedule. +// +// Step 2: Transition to the next epoch. This runs all state-mutating operations that need to be run for +// the epoch transition. We always perform state-mutating operations after creating snapshots. This way, +// the snapshot created at the end of epoch n always reflects the state of the view at the end of epoch n. +// And it does not reflect the state changes that occur AFTER epoch n ends and before epoch n-1 BEGINS. +// 1. Jail all inactive validators from the current snapshot validator set. +// 2. Compute the final block height for the next epoch. +// 3. Transition CurrentEpochEntry to the next epoch. func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Rolls-over the current epoch into a new one. Handles the associated snapshotting + accounting. @@ -64,13 +72,6 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Snapshot the current GlobalParamsEntry. bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) - // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation - // that loops through all validators and jails them if they are inactive. A jailed validator should be - // considered jailed in the next epoch we are transition into. - if err = bav.JailInactiveValidators(blockHeight); err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem jailing inactive validators: ") - } - // Snapshot the current ValidatorEntries. This loops through all validators to snapshot them in O(n). if err = bav.SnapshotCurrentValidators(currentEpochEntry.EpochNumber, blockHeight); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: problem snapshotting validators: ") @@ -103,6 +104,13 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving SnapshotGlobalParamsEntry: ") } + // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation + // that loops through all validators and jails them if they are inactive. A jailed validator should be + // considered jailed in the next epoch we are transition into. + if err = bav.JailAllInactiveValidators(blockHeight); err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: problem jailing all inactive validators: ") + } + // Calculate the NextEpochFinalBlockHeight. nextEpochFinalBlockHeight, err := SafeUint64().Add(blockHeight, snapshotGlobalParamsEntry.EpochDurationNumBlocks) if err != nil { diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 522129103..757d61a98 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -4,9 +4,10 @@ package lib import ( "fmt" + "testing" + "github.com/holiman/uint256" "github.com/stretchr/testify/require" - "testing" ) func TestIsLastBlockInCurrentEpoch(t *testing.T) { From 42f9d9e79d7955ed4af993459c596cdebddb9fb2 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Mon, 10 Jul 2023 13:56:44 -0400 Subject: [PATCH 135/762] Address Nina's feedback --- lib/block_view_validator.go | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 7fb6439a6..0b45a4a12 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1942,6 +1942,9 @@ func (bav *UtxoView) JailAllInactiveValidators(blockHeight uint64) error { // jail any that are inactive. var utxoViewValidatorPKIDs []*PKID for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { + // We don't want to retrieve any ValidatorEntries from the db that are present in the UtxoView. + utxoViewValidatorPKIDs = append(utxoViewValidatorPKIDs, validatorEntry.ValidatorPKID) + if validatorEntry.isDeleted { continue } @@ -1957,16 +1960,15 @@ func (bav *UtxoView) JailAllInactiveValidators(blockHeight uint64) error { } // Jail them if so. - if shouldJailValidator { - if err = bav.JailValidator(validatorEntry); err != nil { - return errors.Wrapf( - err, "JailAllInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, - ) - } + if !shouldJailValidator { + continue } - // We don't want to retrieve any ValidatorEntries from the db that are present in the UtxoView. - utxoViewValidatorPKIDs = append(utxoViewValidatorPKIDs, validatorEntry.ValidatorPKID) + if err = bav.JailValidator(validatorEntry); err != nil { + return errors.Wrapf( + err, "JailAllInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, + ) + } } // Second, iterate through all the ValidatorEntries in the db and jail any that are inactive. @@ -1984,12 +1986,14 @@ func (bav *UtxoView) JailAllInactiveValidators(blockHeight uint64) error { ) } // Jail them if so. - if shouldJailValidator { - if err = bav.JailValidator(validatorEntry); err != nil { - return errors.Wrapf( - err, "JailAllInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, - ) - } + if !shouldJailValidator { + continue + } + + if err = bav.JailValidator(validatorEntry); err != nil { + return errors.Wrapf( + err, "JailAllInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, + ) } } From 770875769a1c8c1c2d181b7e99e63a11076d367c Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Mon, 10 Jul 2023 14:55:42 -0400 Subject: [PATCH 136/762] Address typo --- lib/pos_epoch_complete_hook.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 3f54c0cdc..57a585fda 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -40,7 +40,7 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // Step 2: Transition to the next epoch. This runs all state-mutating operations that need to be run for // the epoch transition. We always perform state-mutating operations after creating snapshots. This way, // the snapshot created at the end of epoch n always reflects the state of the view at the end of epoch n. -// And it does not reflect the state changes that occur AFTER epoch n ends and before epoch n-1 BEGINS. +// And it does not reflect the state changes that occur AFTER epoch n ends and before epoch n+1 BEGINS. // 1. Jail all inactive validators from the current snapshot validator set. // 2. Compute the final block height for the next epoch. // 3. Transition CurrentEpochEntry to the next epoch. From 7631183e55078e1e454003d0e3ef59482610be71 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Mon, 10 Jul 2023 17:02:18 -0400 Subject: [PATCH 137/762] Clean up commments --- lib/block_view_validator.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 0b45a4a12..ca38ed7a4 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1959,11 +1959,12 @@ func (bav *UtxoView) JailAllInactiveValidators(blockHeight uint64) error { ) } - // Jail them if so. + // If this validator should not be jailed, continue to the next validator. if !shouldJailValidator { continue } + // If we get here, then the validator should be jailed. if err = bav.JailValidator(validatorEntry); err != nil { return errors.Wrapf( err, "JailAllInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, @@ -1985,11 +1986,13 @@ func (bav *UtxoView) JailAllInactiveValidators(blockHeight uint64) error { err, "JailAllInactiveValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, ) } - // Jail them if so. + + // If this validator should not be jailed, continue to the next validator. if !shouldJailValidator { continue } + // If we get here, then the validator should be jailed. if err = bav.JailValidator(validatorEntry); err != nil { return errors.Wrapf( err, "JailAllInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, From 152fab9341dd66ff1125d644c123108629276915 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 10 Jul 2023 17:36:43 -0400 Subject: [PATCH 138/762] PoS Snapshot Top n Active Validators as the Validator Set (#582) * PoS Snapshot Top n Active Validators as the Validator Set * Fix tests * Typos in comments * Rearrange things * Undo renames * Update badger indices --- lib/db_utils.go | 6 ++--- lib/pos_epoch_complete_hook.go | 14 +++++++---- lib/pos_epoch_complete_hook_test.go | 23 +++++++----------- lib/pos_leader_schedule.go | 8 +++---- lib/pos_snapshot_entries.go | 37 +++++------------------------ 5 files changed, 30 insertions(+), 58 deletions(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index c6a28f06f..7d97ed759 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -538,9 +538,9 @@ type DBPrefixes struct { PrefixSnapshotValidatorByPKID []byte `prefix_id:"[86]" is_state:"true"` // PrefixSnapshotValidatorByStatusAndStake: Retrieve stake-ordered active ValidatorEntries by SnapshotAtEpochNumber. - // Prefix, , , , -> nil + // Prefix, , , -> nil // Note: we parse the ValidatorPKID from the key and the value is nil to save space. - PrefixSnapshotValidatorByStatusAndStake []byte `prefix_id:"[87]" is_state:"true"` + PrefixSnapshotValidatorByStake []byte `prefix_id:"[87]" is_state:"true"` // PrefixSnapshotGlobalActiveStakeAmountNanos: Retrieve a snapshot GlobalActiveStakeAmountNanos by SnapshotAtEpochNumber. // Prefix, -> *uint256.Int @@ -779,7 +779,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByPKID) { // prefix_id:"[86]" return true, &ValidatorEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByStatusAndStake) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByStake) { // prefix_id:"[87]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos) { diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 57a585fda..b24c2e3de 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -69,12 +69,18 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { return errors.New("RunEpochCompleteHook: CurrentEpochEntry is nil, this should never happen") } + currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() + // Snapshot the current GlobalParamsEntry. bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) - // Snapshot the current ValidatorEntries. This loops through all validators to snapshot them in O(n). - if err = bav.SnapshotCurrentValidators(currentEpochEntry.EpochNumber, blockHeight); err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem snapshotting validators: ") + // Snapshot the current top n active validators as the current validator set. + validatorSet, err := bav.GetTopActiveValidatorsByStake(currentGlobalParamsEntry.ValidatorSetMaxNumValidators) + if err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: error retrieving top ValidatorEntries: ") + } + for _, validatorEntry := range validatorSet { + bav._setSnapshotValidatorEntry(validatorEntry, currentEpochEntry.EpochNumber) } // Snapshot the current GlobalActiveStakeAmountNanos. @@ -98,7 +104,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // TODO: Delete old snapshots that are no longer used. - // Retrieve the SnapshotGlobalParamsEntry.EpochDurationNumBlocks. + // Retrieve the SnapshotGlobalParamsEntry. snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving SnapshotGlobalParamsEntry: ") diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 757d61a98..f2b665bf5 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -155,7 +155,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } // Test SnapshotTopActiveValidatorsByStake is empty. - validatorEntries, err := utxoView().GetSnapshotTopActiveValidatorsByStake(10) + validatorEntries, err := utxoView().GetSnapshotValidatorSetByStake(10) require.NoError(t, err) require.Empty(t, validatorEntries) @@ -303,7 +303,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } // Test SnapshotTopActiveValidatorsByStake is populated. - validatorEntries, err := utxoView().GetSnapshotTopActiveValidatorsByStake(10) + validatorEntries, err := utxoView().GetSnapshotValidatorSetByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) require.Equal(t, validatorEntries[0].ValidatorPKID, m6PKID) @@ -397,7 +397,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test snapshotting changing validator set. // m0 unregisters as a validator. - snapshotValidatorEntries, err := utxoView().GetSnapshotTopActiveValidatorsByStake(10) + snapshotValidatorEntries, err := utxoView().GetSnapshotValidatorSetByStake(10) require.NoError(t, err) require.Len(t, snapshotValidatorEntries, 7) @@ -408,7 +408,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // m0 is still in the snapshot validator set. - snapshotValidatorEntries, err = utxoView().GetSnapshotTopActiveValidatorsByStake(10) + snapshotValidatorEntries, err = utxoView().GetSnapshotValidatorSetByStake(10) require.NoError(t, err) require.Len(t, snapshotValidatorEntries, 7) @@ -416,7 +416,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // m0 is dropped from the snapshot validator set. - snapshotValidatorEntries, err = utxoView().GetSnapshotTopActiveValidatorsByStake(10) + snapshotValidatorEntries, err = utxoView().GetSnapshotValidatorSetByStake(10) require.NoError(t, err) require.Len(t, snapshotValidatorEntries, 6) } @@ -446,7 +446,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } getNumSnapshotActiveValidators := func() int { - snapshotValidatorEntries, err := utxoView().GetSnapshotTopActiveValidatorsByStake(10) + snapshotValidatorEntries, err := utxoView().GetSnapshotValidatorSetByStake(10) require.NoError(t, err) return len(snapshotValidatorEntries) } @@ -457,12 +457,6 @@ func TestRunEpochCompleteHook(t *testing.T) { return validatorEntry } - getSnapshotValidator := func(validatorPKID *PKID) *ValidatorEntry { - snapshotValidatorEntry, err := utxoView().GetSnapshotValidatorByPKID(validatorPKID) - require.NoError(t, err) - return snapshotValidatorEntry - } - // In epoch 9, all registered validators have Status = Active. require.Equal(t, getCurrentEpochNumber(), 9) require.Equal(t, getNumCurrentActiveValidators(), 6) @@ -508,11 +502,10 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // In epoch 14, all current registered validators have Status = Jailed. - // In snapshot 12, all snapshot registered validators have Status = Jailed. + // In snapshot 12, the validator set is empty because all validators have Status = Jailed. + require.Equal(t, getCurrentEpochNumber(), 14) require.Empty(t, getNumCurrentActiveValidators()) require.Empty(t, getNumSnapshotActiveValidators()) - require.Equal(t, getSnapshotValidator(m6PKID).Status(), ValidatorStatusJailed) - require.Equal(t, getSnapshotValidator(m6PKID).JailedAtEpochNumber, uint64(11)) } } diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 5193b84fd..518f08dd2 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -2,6 +2,7 @@ package lib import ( "crypto/sha256" + "github.com/holiman/uint256" "github.com/pkg/errors" ) @@ -14,13 +15,10 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { } // Retrieve the SnapshotGlobalParamsEntry.LeaderScheduleMaxNumValidators. - snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() - if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving SnapshotGlobalParamsEntry: ") - } + currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() // Retrieve top, active validators ordered by stake. - validatorEntries, err := bav.GetTopActiveValidatorsByStake(snapshotGlobalParamsEntry.LeaderScheduleMaxNumValidators) + validatorEntries, err := bav.GetTopActiveValidatorsByStake(currentGlobalParamsEntry.LeaderScheduleMaxNumValidators) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 88607becb..6ae7c6ca8 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -206,30 +206,6 @@ type SnapshotValidatorMapKey struct { ValidatorPKID PKID } -func (bav *UtxoView) SnapshotCurrentValidators(snapshotAtEpochNumber uint64, blockHeight uint64) error { - // First, snapshot any !isDeleted ValidatorEntries in the UtxoView. - var utxoViewValidatorPKIDs []*PKID - for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { - if !validatorEntry.isDeleted { - // We only want to snapshot !isDeleted ValidatorEntries. - bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) - } - - // We don't want to retrieve any ValidatorEntries from the db that are present in the UtxoView. - utxoViewValidatorPKIDs = append(utxoViewValidatorPKIDs, validatorEntry.ValidatorPKID) - } - // Second, snapshot the ValidatorEntries in the db (skipping any in the UtxoView). - dbValidatorEntries, err := DBEnumerateAllCurrentValidators(bav.Handle, utxoViewValidatorPKIDs) - if err != nil { - return errors.Wrapf(err, "SnapshotValidators: problem retrieving ValidatorEntries: ") - } - for _, validatorEntry := range dbValidatorEntries { - bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) - - } - return nil -} - func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID) (*ValidatorEntry, error) { // Calculate the SnapshotEpochNumber. snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() @@ -256,12 +232,13 @@ func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID) (*ValidatorEntry, er return validatorEntry, nil } -func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64) ([]*ValidatorEntry, error) { +func (bav *UtxoView) GetSnapshotValidatorSetByStake(limit uint64) ([]*ValidatorEntry, error) { // Calculate the SnapshotEpochNumber. snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotTopActiveValidatorsByStake: problem calculating SnapshotEpochNumber: ") + return nil, errors.Wrapf(err, "GetSnapshotValidatorSetByStake: problem calculating SnapshotEpochNumber: ") } + // Create a slice of all UtxoView ValidatorEntries to prevent pulling them from the db. var utxoViewValidatorEntries []*ValidatorEntry for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { @@ -276,7 +253,7 @@ func (bav *UtxoView) GetSnapshotTopActiveValidatorsByStake(limit uint64) ([]*Val bav.Handle, bav.Snapshot, limit, snapshotAtEpochNumber, utxoViewValidatorEntries, ) if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotTopActiveValidatorsByStake: error retrieving entries from db: ") + return nil, errors.Wrapf(err, "GetSnapshotValidatorSetByStake: error retrieving entries from db: ") } // Cache top N active ValidatorEntries from the db in the UtxoView. for _, validatorEntry := range dbValidatorEntries { @@ -394,9 +371,8 @@ func DBKeyForSnapshotValidatorByPKID(validatorEntry *ValidatorEntry, snapshotAtE } func DBKeyForSnapshotValidatorByStake(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStatusAndStake...) + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStake...) key = append(key, EncodeUint64(snapshotAtEpochNumber)...) - key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) return key @@ -454,9 +430,8 @@ func DBGetSnapshotTopActiveValidatorsByStake( } // Retrieve top N active ValidatorEntry keys by stake. - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStatusAndStake...) + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStake...) key = append(key, EncodeUint64(snapshotAtEpochNumber)...) - key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( handle, key, int(limit), nil, true, validatorKeysToSkip, ) From 9f34fad0c47ec257d3a294783303bbe5e7578ab4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 11 Jul 2023 15:17:19 -0400 Subject: [PATCH 139/762] PoS Rename SnapshotValidatorEntries to SnapshotValidatorSet (#583) * PoS Rename SnapshotValidatorEntries to SnapshotValidatorSet * Finish refactor --- lib/block_view.go | 17 ++-- lib/block_view_flush.go | 5 +- lib/db_utils.go | 14 +-- lib/pos_epoch_complete_hook.go | 2 +- lib/pos_epoch_complete_hook_test.go | 28 +++--- lib/pos_snapshot_entries.go | 148 ++++++++++++++-------------- 6 files changed, 109 insertions(+), 105 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 10979be89..ad541719f 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -137,9 +137,10 @@ type UtxoView struct { // It contains the snapshot value of the GlobalParamsEntry at the given SnapshotAtEpochNumber. SnapshotGlobalParamEntries map[uint64]*GlobalParamsEntry - // SnapshotValidatorEntries is a map of to a ValidatorEntry. - // It contains the snapshot value of a ValidatorEntry at the given SnapshotAtEpochNumber. - SnapshotValidatorEntries map[SnapshotValidatorMapKey]*ValidatorEntry + // SnapshotValidatorSet is a map of to a ValidatorEntry. + // It contains the snapshot value of every ValidatorEntry that makes up the validator set at + // the given SnapshotAtEpochNumber. + SnapshotValidatorSet map[SnapshotValidatorSetMapKey]*ValidatorEntry // SnapshotGlobalActiveStakeAmountNanos is a map of SnapshotAtEpochNumber to a GlobalActiveStakeAmountNanos. // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given SnapshotAtEpochNumber. @@ -259,8 +260,8 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // SnapshotGlobalParamEntries bav.SnapshotGlobalParamEntries = make(map[uint64]*GlobalParamsEntry) - // SnapshotValidatorEntries - bav.SnapshotValidatorEntries = make(map[SnapshotValidatorMapKey]*ValidatorEntry) + // SnapshotValidatorSet + bav.SnapshotValidatorSet = make(map[SnapshotValidatorSetMapKey]*ValidatorEntry) // SnapshotGlobalActiveStakeAmountNanos bav.SnapshotGlobalActiveStakeAmountNanos = make(map[uint64]*uint256.Int) @@ -559,9 +560,9 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.SnapshotGlobalParamEntries[epochNumber] = globalParamsEntry.Copy() } - // Copy the SnapshotValidatorEntries - for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { - newView.SnapshotValidatorEntries[mapKey] = validatorEntry.Copy() + // Copy the SnapshotValidatorSet + for mapKey, validatorEntry := range bav.SnapshotValidatorSet { + newView.SnapshotValidatorSet[mapKey] = validatorEntry.Copy() } // Copy the SnapshotGlobalActiveStakeAmountNanos diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index b414e5fc4..4cf1e724d 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -2,11 +2,12 @@ package lib import ( "fmt" + "reflect" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" - "reflect" ) func (bav *UtxoView) FlushToDb(blockHeight uint64) error { @@ -164,7 +165,7 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushSnapshotGlobalParamsEntryToDbWithTxn(txn, blockHeight); err != nil { return err } - if err := bav._flushSnapshotValidatorEntriesToDbWithTxn(txn, blockHeight); err != nil { + if err := bav._flushSnapshotValidatorSetToDbWithTxn(txn, blockHeight); err != nil { return err } if err := bav._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { diff --git a/lib/db_utils.go b/lib/db_utils.go index 7d97ed759..157d75010 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -533,14 +533,16 @@ type DBPrefixes struct { // Prefix, -> *GlobalParamsEntry PrefixSnapshotGlobalParamsEntry []byte `prefix_id:"[85]" is_state:"true"` - // PrefixSnapshotValidatorByPKID: Retrieve a snapshot ValidatorEntry by . + // PrefixSnapshotValidatorSetByPKID: Retrieve a ValidatorEntry from a snapshot validator set by + // . // Prefix, , -> *ValidatorEntry - PrefixSnapshotValidatorByPKID []byte `prefix_id:"[86]" is_state:"true"` + PrefixSnapshotValidatorSetByPKID []byte `prefix_id:"[86]" is_state:"true"` - // PrefixSnapshotValidatorByStatusAndStake: Retrieve stake-ordered active ValidatorEntries by SnapshotAtEpochNumber. + // PrefixSnapshotValidatorSetByStake: Retrieve stake-ordered ValidatorEntries from a snapshot validator set + // by SnapshotAtEpochNumber. // Prefix, , , -> nil // Note: we parse the ValidatorPKID from the key and the value is nil to save space. - PrefixSnapshotValidatorByStake []byte `prefix_id:"[87]" is_state:"true"` + PrefixSnapshotValidatorSetByStake []byte `prefix_id:"[87]" is_state:"true"` // PrefixSnapshotGlobalActiveStakeAmountNanos: Retrieve a snapshot GlobalActiveStakeAmountNanos by SnapshotAtEpochNumber. // Prefix, -> *uint256.Int @@ -776,10 +778,10 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalParamsEntry) { // prefix_id:"[85]" return true, &GlobalParamsEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByPKID) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByPKID) { // prefix_id:"[86]" return true, &ValidatorEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorByStake) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByStake) { // prefix_id:"[87]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos) { diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index b24c2e3de..b3983ad7c 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -80,7 +80,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { return errors.Wrapf(err, "RunEpochCompleteHook: error retrieving top ValidatorEntries: ") } for _, validatorEntry := range validatorSet { - bav._setSnapshotValidatorEntry(validatorEntry, currentEpochEntry.EpochNumber) + bav._setSnapshotValidatorSetEntry(validatorEntry, currentEpochEntry.EpochNumber) } // Snapshot the current GlobalActiveStakeAmountNanos. diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index f2b665bf5..a41f44b72 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -149,9 +149,9 @@ func TestRunEpochCompleteHook(t *testing.T) { _assertEmptyValidatorSnapshots := func() { // Test SnapshotValidatorByPKID is nil. for _, pkid := range validatorPKIDs { - snapshotValidatorEntry, err := utxoView().GetSnapshotValidatorByPKID(pkid) + snapshotValidatorSetEntry, err := utxoView().GetSnapshotValidatorSetEntryByPKID(pkid) require.NoError(t, err) - require.Nil(t, snapshotValidatorEntry) + require.Nil(t, snapshotValidatorSetEntry) } // Test SnapshotTopActiveValidatorsByStake is empty. @@ -297,9 +297,9 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test SnapshotValidatorByPKID is populated. for _, pkid := range validatorPKIDs { - snapshotValidatorEntry, err := utxoView().GetSnapshotValidatorByPKID(pkid) + snapshotValidatorSetEntry, err := utxoView().GetSnapshotValidatorSetEntryByPKID(pkid) require.NoError(t, err) - require.NotNil(t, snapshotValidatorEntry) + require.NotNil(t, snapshotValidatorSetEntry) } // Test SnapshotTopActiveValidatorsByStake is populated. @@ -345,7 +345,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // Snapshot m5 still has 600 staked. - validatorEntry, err = utxoView().GetSnapshotValidatorByPKID(m5PKID) + validatorEntry, err = utxoView().GetSnapshotValidatorSetEntryByPKID(m5PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(600)) @@ -354,7 +354,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // Snapshot m5 now has 800 staked. - validatorEntry, err = utxoView().GetSnapshotValidatorByPKID(m5PKID) + validatorEntry, err = utxoView().GetSnapshotValidatorSetEntryByPKID(m5PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) @@ -397,9 +397,9 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test snapshotting changing validator set. // m0 unregisters as a validator. - snapshotValidatorEntries, err := utxoView().GetSnapshotValidatorSetByStake(10) + snapshotValidatorSet, err := utxoView().GetSnapshotValidatorSetByStake(10) require.NoError(t, err) - require.Len(t, snapshotValidatorEntries, 7) + require.Len(t, snapshotValidatorSet, 7) _, err = _submitUnregisterAsValidatorTxn(testMeta, m0Pub, m0Priv, true) require.NoError(t, err) @@ -408,17 +408,17 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // m0 is still in the snapshot validator set. - snapshotValidatorEntries, err = utxoView().GetSnapshotValidatorSetByStake(10) + snapshotValidatorSet, err = utxoView().GetSnapshotValidatorSetByStake(10) require.NoError(t, err) - require.Len(t, snapshotValidatorEntries, 7) + require.Len(t, snapshotValidatorSet, 7) // Run OnEpochCompleteHook(). _runOnEpochCompleteHook() // m0 is dropped from the snapshot validator set. - snapshotValidatorEntries, err = utxoView().GetSnapshotValidatorSetByStake(10) + snapshotValidatorSet, err = utxoView().GetSnapshotValidatorSetByStake(10) require.NoError(t, err) - require.Len(t, snapshotValidatorEntries, 6) + require.Len(t, snapshotValidatorSet, 6) } { // Test jailing inactive validators. @@ -446,9 +446,9 @@ func TestRunEpochCompleteHook(t *testing.T) { } getNumSnapshotActiveValidators := func() int { - snapshotValidatorEntries, err := utxoView().GetSnapshotValidatorSetByStake(10) + snapshotValidatorSet, err := utxoView().GetSnapshotValidatorSetByStake(10) require.NoError(t, err) - return len(snapshotValidatorEntries) + return len(snapshotValidatorSet) } getCurrentValidator := func(validatorPKID *PKID) *ValidatorEntry { diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 6ae7c6ca8..4fea4508f 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -198,36 +198,36 @@ func DBPutSnapshotGlobalParamsEntryWithTxn( } // -// SnapshotValidatorEntry +// SnapshotValidatorSet // -type SnapshotValidatorMapKey struct { +type SnapshotValidatorSetMapKey struct { SnapshotAtEpochNumber uint64 ValidatorPKID PKID } -func (bav *UtxoView) GetSnapshotValidatorByPKID(pkid *PKID) (*ValidatorEntry, error) { +func (bav *UtxoView) GetSnapshotValidatorSetEntryByPKID(pkid *PKID) (*ValidatorEntry, error) { // Calculate the SnapshotEpochNumber. snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotValidatorByPKID: problem calculating SnapshotEpochNumber: ") + return nil, errors.Wrapf(err, "GetSnapshotValidatorSetEntryByPKID: problem calculating SnapshotEpochNumber: ") } // Check the UtxoView first. - mapKey := SnapshotValidatorMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *pkid} - if validatorEntry, exists := bav.SnapshotValidatorEntries[mapKey]; exists { + mapKey := SnapshotValidatorSetMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *pkid} + if validatorEntry, exists := bav.SnapshotValidatorSet[mapKey]; exists { return validatorEntry, nil } // If we don't have it in the UtxoView, check the db. - validatorEntry, err := DBGetSnapshotValidatorByPKID(bav.Handle, bav.Snapshot, pkid, snapshotAtEpochNumber) + validatorEntry, err := DBGetSnapshotValidatorSetEntryByPKID(bav.Handle, bav.Snapshot, pkid, snapshotAtEpochNumber) if err != nil { return nil, errors.Wrapf( err, - "GetSnapshotValidatorByPKID: problem retrieving ValidatorEntry from db: ", + "GetSnapshotValidatorSetEntryByPKID: problem retrieving ValidatorEntry from db: ", ) } if validatorEntry != nil { // Cache the result in the UtxoView. - bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) + bav._setSnapshotValidatorSetEntry(validatorEntry, snapshotAtEpochNumber) } return validatorEntry, nil } @@ -236,40 +236,40 @@ func (bav *UtxoView) GetSnapshotValidatorSetByStake(limit uint64) ([]*ValidatorE // Calculate the SnapshotEpochNumber. snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotValidatorSetByStake: problem calculating SnapshotEpochNumber: ") + return nil, errors.Wrapf(err, "GetSnapshotValidatorSetEntriesByStake: problem calculating SnapshotEpochNumber: ") } // Create a slice of all UtxoView ValidatorEntries to prevent pulling them from the db. var utxoViewValidatorEntries []*ValidatorEntry - for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { + for mapKey, validatorEntry := range bav.SnapshotValidatorSet { if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber { utxoViewValidatorEntries = append(utxoViewValidatorEntries, validatorEntry) } } - // Pull top N active ValidatorEntries from the database (not present in the UtxoView). + // Pull top N ValidatorEntries from the database (not present in the UtxoView). // Note that we will skip validators that are present in the view because we pass // utxoViewValidatorEntries to the function. - dbValidatorEntries, err := DBGetSnapshotTopActiveValidatorsByStake( + dbValidatorEntries, err := DBGetSnapshotValidatorSetByStake( bav.Handle, bav.Snapshot, limit, snapshotAtEpochNumber, utxoViewValidatorEntries, ) if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotValidatorSetByStake: error retrieving entries from db: ") + return nil, errors.Wrapf(err, "GetSnapshotValidatorSetEntriesByStake: error retrieving entries from db: ") } // Cache top N active ValidatorEntries from the db in the UtxoView. for _, validatorEntry := range dbValidatorEntries { // We only pull ValidatorEntries from the db that are not present in the // UtxoView. As a sanity check, we double-check that the ValidatorEntry // is not already in the UtxoView here. - mapKey := SnapshotValidatorMapKey{ + mapKey := SnapshotValidatorSetMapKey{ SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID, } - if _, exists := bav.SnapshotValidatorEntries[mapKey]; !exists { - bav._setSnapshotValidatorEntry(validatorEntry, snapshotAtEpochNumber) + if _, exists := bav.SnapshotValidatorSet[mapKey]; !exists { + bav._setSnapshotValidatorSetEntry(validatorEntry, snapshotAtEpochNumber) } } // Pull !isDeleted, active ValidatorEntries from the UtxoView with stake > 0. var validatorEntries []*ValidatorEntry - for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { + for mapKey, validatorEntry := range bav.SnapshotValidatorSet { if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber && !validatorEntry.isDeleted && validatorEntry.Status() == ValidatorStatusActive && @@ -294,68 +294,68 @@ func (bav *UtxoView) GetSnapshotValidatorSetByStake(limit uint64) ([]*ValidatorE return validatorEntries[0:upperBound], nil } -func (bav *UtxoView) _setSnapshotValidatorEntry(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) { +func (bav *UtxoView) _setSnapshotValidatorSetEntry(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) { if validatorEntry == nil { - glog.Errorf("_setSnapshotValidatorEntry: called with nil entry, this should never happen") + glog.Errorf("_setSnapshotValidatorSetEntry: called with nil entry, this should never happen") return } - mapKey := SnapshotValidatorMapKey{ + mapKey := SnapshotValidatorSetMapKey{ SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID, } - bav.SnapshotValidatorEntries[mapKey] = validatorEntry.Copy() + bav.SnapshotValidatorSet[mapKey] = validatorEntry.Copy() } -func (bav *UtxoView) _deleteSnapshotValidatorEntry(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) { +func (bav *UtxoView) _deleteSnapshotValidatorSetEntry(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) { // This function shouldn't be called with nil. if validatorEntry == nil { - glog.Errorf("_deleteSnapshotValidatorEntry: called with nil entry, this should never happen") + glog.Errorf("_deleteSnapshotValidatorSetEntry: called with nil entry, this should never happen") return } // Create a tombstone entry. tombstoneEntry := *validatorEntry tombstoneEntry.isDeleted = true // Set the mappings to the point to the tombstone entry. - bav._setSnapshotValidatorEntry(&tombstoneEntry, snapshotAtEpochNumber) + bav._setSnapshotValidatorSetEntry(&tombstoneEntry, snapshotAtEpochNumber) } -func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - // Delete all SnapshotValidatorEntries from the db that are in the UtxoView. - for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { +func (bav *UtxoView) _flushSnapshotValidatorSetToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Delete all SnapshotValidatorSet entries from the db that are in the UtxoView. + for mapKey, validatorEntry := range bav.SnapshotValidatorSet { if validatorEntry == nil { return fmt.Errorf( - "_flushSnapshotValidatorEntriesToDb: found nil entry for EpochNumber %d, this should never happen", + "_flushSnapshotValidatorSetToDbWithTxn: found nil entry for EpochNumber %d, this should never happen", mapKey.SnapshotAtEpochNumber, ) } - if err := DBDeleteSnapshotValidatorEntryWithTxn( + if err := DBDeleteSnapshotValidatorSetEntryWithTxn( txn, bav.Snapshot, &mapKey.ValidatorPKID, mapKey.SnapshotAtEpochNumber, ); err != nil { return errors.Wrapf( err, - "_flushSnapshotValidatorEntriesToDb: problem deleting ValidatorEntry for EpochNumber %d: ", + "_flushSnapshotValidatorSetToDbWithTxn: problem deleting ValidatorEntry for EpochNumber %d: ", mapKey.SnapshotAtEpochNumber, ) } } - // Set all !isDeleted SnapshotValidatorEntries into the db from the UtxoView. - for mapKey, validatorEntry := range bav.SnapshotValidatorEntries { + // Set all !isDeleted SnapshotValidatorSet into the db from the UtxoView. + for mapKey, validatorEntry := range bav.SnapshotValidatorSet { if validatorEntry == nil { return fmt.Errorf( - "_flushSnapshotValidatorEntriesToDb: found nil entry for EpochNumber %d, this should never happen", + "_flushSnapshotValidatorSetToDbWithTxn: found nil entry for EpochNumber %d, this should never happen", mapKey.SnapshotAtEpochNumber, ) } if validatorEntry.isDeleted { - // Skip any deleted SnapshotValidatorEntries. + // Skip any deleted SnapshotValidatorSet. continue } - if err := DBPutSnapshotValidatorEntryWithTxn( + if err := DBPutSnapshotValidatorSetEntryWithTxn( txn, bav.Snapshot, validatorEntry, mapKey.SnapshotAtEpochNumber, blockHeight, ); err != nil { return errors.Wrapf( err, - "_flushSnapshotValidatorEntriesToDb: problem setting ValidatorEntry for EpochNumber %d: ", + "_flushSnapshotValidatorSetToDbWithTxn: problem setting ValidatorEntry for EpochNumber %d: ", mapKey.SnapshotAtEpochNumber, ) } @@ -363,58 +363,58 @@ func (bav *UtxoView) _flushSnapshotValidatorEntriesToDbWithTxn(txn *badger.Txn, return nil } -func DBKeyForSnapshotValidatorByPKID(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByPKID...) +func DBKeyForSnapshotValidatorSetByPKID(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorSetByPKID...) key = append(key, EncodeUint64(snapshotAtEpochNumber)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) return key } -func DBKeyForSnapshotValidatorByStake(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStake...) +func DBKeyForSnapshotValidatorSetByStake(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorSetByStake...) key = append(key, EncodeUint64(snapshotAtEpochNumber)...) key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) return key } -func DBGetSnapshotValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { +func DBGetSnapshotValidatorSetEntryByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { var ret *ValidatorEntry err := handle.View(func(txn *badger.Txn) error { var innerErr error - ret, innerErr = DBGetSnapshotValidatorByPKIDWithTxn(txn, snap, pkid, snapshotAtEpochNumber) + ret, innerErr = DBGetSnapshotValidatorSetEntryByPKIDWithTxn(txn, snap, pkid, snapshotAtEpochNumber) return innerErr }) return ret, err } -func DBGetSnapshotValidatorByPKIDWithTxn( +func DBGetSnapshotValidatorSetEntryByPKIDWithTxn( txn *badger.Txn, snap *Snapshot, pkid *PKID, snapshotAtEpochNumber uint64, ) (*ValidatorEntry, error) { // Retrieve ValidatorEntry from db. - key := DBKeyForSnapshotValidatorByPKID(&ValidatorEntry{ValidatorPKID: pkid}, snapshotAtEpochNumber) + key := DBKeyForSnapshotValidatorSetByPKID(&ValidatorEntry{ValidatorPKID: pkid}, snapshotAtEpochNumber) validatorBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return nil. if err == badger.ErrKeyNotFound { return nil, nil } - return nil, errors.Wrapf(err, "DBGetSnapshotValidatorByPKID: problem retrieving ValidatorEntry") + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetEntryByPKIDWithTxn: problem retrieving ValidatorEntry") } // Decode ValidatorEntry from bytes. validatorEntry := &ValidatorEntry{} rr := bytes.NewReader(validatorBytes) if exist, err := DecodeFromBytes(validatorEntry, rr); !exist || err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotValidatorByPKID: problem decoding ValidatorEntry") + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetEntryByPKIDWithTxn: problem decoding ValidatorEntry") } return validatorEntry, nil } -func DBGetSnapshotTopActiveValidatorsByStake( +func DBGetSnapshotValidatorSetByStake( handle *badger.DB, snap *Snapshot, limit uint64, @@ -426,17 +426,17 @@ func DBGetSnapshotTopActiveValidatorsByStake( // Convert ValidatorEntriesToSkip to ValidatorEntryKeysToSkip. validatorKeysToSkip := NewSet([]string{}) for _, validatorEntryToSkip := range validatorEntriesToSkip { - validatorKeysToSkip.Add(string(DBKeyForSnapshotValidatorByStake(validatorEntryToSkip, snapshotAtEpochNumber))) + validatorKeysToSkip.Add(string(DBKeyForSnapshotValidatorSetByStake(validatorEntryToSkip, snapshotAtEpochNumber))) } // Retrieve top N active ValidatorEntry keys by stake. - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorByStake...) + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorSetByStake...) key = append(key, EncodeUint64(snapshotAtEpochNumber)...) keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( handle, key, int(limit), nil, true, validatorKeysToSkip, ) if err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotTopActiveValidatorsByStake: problem retrieving top validators: ") + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetByStake: problem retrieving top validators: ") } // For each key found, parse the ValidatorPKID from the key, @@ -447,12 +447,12 @@ func DBGetSnapshotTopActiveValidatorsByStake( // Convert PKIDBytes to PKID. validatorPKID := &PKID{} if err = validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotTopActiveValidatorsByStake: problem reading ValidatorPKID: ") + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetByStake: problem reading ValidatorPKID: ") } // Retrieve ValidatorEntry by PKID. - validatorEntry, err := DBGetSnapshotValidatorByPKID(handle, snap, validatorPKID, snapshotAtEpochNumber) + validatorEntry, err := DBGetSnapshotValidatorSetEntryByPKID(handle, snap, validatorPKID, snapshotAtEpochNumber) if err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotTopActiveValidatorsByStake: problem retrieving validator by PKID: ") + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetByStake: problem retrieving validator by PKID: ") } validatorEntries = append(validatorEntries, validatorEntry) } @@ -460,7 +460,7 @@ func DBGetSnapshotTopActiveValidatorsByStake( return validatorEntries, nil } -func DBPutSnapshotValidatorEntryWithTxn( +func DBPutSnapshotValidatorSetEntryWithTxn( txn *badger.Txn, snap *Snapshot, validatorEntry *ValidatorEntry, @@ -469,67 +469,67 @@ func DBPutSnapshotValidatorEntryWithTxn( ) error { if validatorEntry == nil { // This should never happen but is a sanity check. - glog.Errorf("DBPutSnapshotValidatorEntryWithTxn: called with nil ValidatorEntry, this should never happen") + glog.Errorf("DBPutSnapshotValidatorSetEntryWithTxn: called with nil ValidatorEntry, this should never happen") return nil } - // Put the ValidatorEntry in the SnapshotValidatorByPKID index. - key := DBKeyForSnapshotValidatorByPKID(validatorEntry, snapshotAtEpochNumber) + // Put the ValidatorEntry in the SnapshotSetByPKID index. + key := DBKeyForSnapshotValidatorSetByPKID(validatorEntry, snapshotAtEpochNumber) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry)); err != nil { return errors.Wrapf( err, - "DBPutSnapshotValidatorEntryWithTxn: problem putting ValidatorEntry in the SnapshotValidatorByPKID index: ", + "DBPutSnapshotValidatorSetEntryWithTxn: problem putting ValidatorEntry in the SnapshotValidatorByPKID index: ", ) } // Put the ValidatorPKID in the SnapshotValidatorByStatusAndStake index. - key = DBKeyForSnapshotValidatorByStake(validatorEntry, snapshotAtEpochNumber) + key = DBKeyForSnapshotValidatorSetByStake(validatorEntry, snapshotAtEpochNumber) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID)); err != nil { return errors.Wrapf( err, - "DBPutSnapshotValidatorEntryWithTxn: problem putting ValidatorPKID in the SnapshotValidatorByStake index: ", + "DBPutSnapshotValidatorSetEntryWithTxn: problem putting ValidatorPKID in the SnapshotValidatorByStake index: ", ) } return nil } -func DBDeleteSnapshotValidatorEntryWithTxn( +func DBDeleteSnapshotValidatorSetEntryWithTxn( txn *badger.Txn, snap *Snapshot, validatorPKID *PKID, snapshotAtEpochNumber uint64, ) error { if validatorPKID == nil { // This should never happen but is a sanity check. - glog.Errorf("DBDeleteSnapshotValidatorEntryWithTxn: called with nil ValidatorPKID") + glog.Errorf("DBDeleteSnapshotValidatorSetEntryWithTxn: called with nil ValidatorPKID") return nil } - // Look up the existing SnapshotValidatorEntry in the db using the PKID. + // Look up the existing SnapshotValidatorSetEntry in the db using the PKID. // We need to use this validator's values to delete the corresponding indexes. - validatorEntry, err := DBGetSnapshotValidatorByPKIDWithTxn(txn, snap, validatorPKID, snapshotAtEpochNumber) + snapshotValidatorSetEntry, err := DBGetSnapshotValidatorSetEntryByPKIDWithTxn(txn, snap, validatorPKID, snapshotAtEpochNumber) if err != nil { return errors.Wrapf( - err, "DBDeleteSnapshotValidatorEntryWithTxn: problem retrieving ValidatorEntry for PKID %v: ", validatorPKID, + err, "DBDeleteSnapshotValidatorSetEntryWithTxn: problem retrieving ValidatorEntry for PKID %v: ", validatorPKID, ) } // If there is no ValidatorEntry in the DB for this PKID, then there is nothing to delete. - if validatorEntry == nil { + if snapshotValidatorSetEntry == nil { return nil } - // Delete ValidatorEntry from PrefixSnapshotValidatorByPKID. - key := DBKeyForSnapshotValidatorByPKID(validatorEntry, snapshotAtEpochNumber) + // Delete ValidatorEntry from PrefixSnapshotSetByPKID. + key := DBKeyForSnapshotValidatorSetByPKID(snapshotValidatorSetEntry, snapshotAtEpochNumber) if err = DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( - err, "DBDeleteSnapshotValidatorEntryWithTxn: problem deleting ValidatorEntry from index PrefixSnapshotValidatorByPKID", + err, "DBDeleteSnapshotValidatorSetEntryWithTxn: problem deleting ValidatorEntry from index PrefixSnapshotSetByPKID", ) } // Delete ValidatorEntry.PKID from PrefixSnapshotValidatorByStatusAndStake. - key = DBKeyForSnapshotValidatorByStake(validatorEntry, snapshotAtEpochNumber) + key = DBKeyForSnapshotValidatorSetByStake(snapshotValidatorSetEntry, snapshotAtEpochNumber) if err = DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( - err, "DBDeleteSnapshotValidatorEntryWithTxn: problem deleting ValidatorEntry from index PrefixSnapshotValidatorByStatusAndStake", + err, "DBDeleteSnapshotValidatorSetEntryWithTxn: problem deleting ValidatorEntry from index PrefixSnapshotValidatorByStatusAndStake", ) } @@ -693,7 +693,7 @@ func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint16) (*Va // First, check the UtxoView. mapKey := SnapshotLeaderScheduleMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, LeaderIndex: leaderIndex} if validatorPKID, exists := bav.SnapshotLeaderSchedule[mapKey]; exists { - return bav.GetSnapshotValidatorByPKID(validatorPKID) + return bav.GetSnapshotValidatorSetEntryByPKID(validatorPKID) } // Next, check the db. validatorEntry, err := DBGetSnapshotLeaderScheduleValidator(bav.Handle, bav.Snapshot, leaderIndex, snapshotAtEpochNumber) @@ -784,7 +784,7 @@ func DBGetSnapshotLeaderScheduleValidatorWithTxn( } // Retrieve ValidatorEntry by PKID from db. - return DBGetSnapshotValidatorByPKIDWithTxn(txn, snap, validatorPKID, snapshotAtEpochNumber) + return DBGetSnapshotValidatorSetEntryByPKIDWithTxn(txn, snap, validatorPKID, snapshotAtEpochNumber) } func DBPutSnapshotLeaderScheduleValidatorWithTxn( From e9c38f97558b2d93dfeca65b961c74b51a228586 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 11 Jul 2023 15:20:46 -0400 Subject: [PATCH 140/762] PoS Deprecate GlobalActiveStakeAmountNanos (#584) * Initial commit * Clear out all references to GlobalActiveStakeAmountNanos * Update db_utils * Rename dynamic stake sum function * Address Nina's comments --- lib/block_view.go | 13 --- lib/block_view_flush.go | 3 - lib/block_view_stake.go | 104 ++--------------- lib/block_view_stake_test.go | 88 -------------- lib/block_view_types.go | 12 -- lib/block_view_validator.go | 193 +++---------------------------- lib/block_view_validator_test.go | 32 ----- lib/db_utils.go | 45 +++---- lib/pos_epoch_complete_hook.go | 8 +- 9 files changed, 43 insertions(+), 455 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index ad541719f..7192aaf32 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -118,9 +118,6 @@ type UtxoView struct { // Validator mappings ValidatorPKIDToValidatorEntry map[PKID]*ValidatorEntry - // The global active stake is the sum of all stake across validators who have Status = Active. - GlobalActiveStakeAmountNanos *uint256.Int - // Stake mappings StakeMapKeyToStakeEntry map[StakeMapKey]*StakeEntry @@ -243,11 +240,6 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // ValidatorEntries bav.ValidatorPKIDToValidatorEntry = make(map[PKID]*ValidatorEntry) - // Global active stake across validators. We deliberately want this to initialize to nil and not zero - // since a zero value will overwrite an existing GlobalActiveStakeAmountNanos value in the db, whereas - // a nil GlobalActiveStakeAmountNanos value signifies that this value was never set. - bav.GlobalActiveStakeAmountNanos = nil - // StakeEntries bav.StakeMapKeyToStakeEntry = make(map[StakeMapKey]*StakeEntry) @@ -526,11 +518,6 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.ValidatorPKIDToValidatorEntry[entryKey] = entry.Copy() } - // Copy the GlobalActiveStakeAmountNanos. - if bav.GlobalActiveStakeAmountNanos != nil { - newView.GlobalActiveStakeAmountNanos = bav.GlobalActiveStakeAmountNanos.Clone() - } - // Copy the StakeEntries newView.StakeMapKeyToStakeEntry = make(map[StakeMapKey]*StakeEntry, len(bav.StakeMapKeyToStakeEntry)) for entryKey, entry := range bav.StakeMapKeyToStakeEntry { diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 4cf1e724d..9e179a706 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -144,9 +144,6 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushValidatorEntriesToDbWithTxn(txn, blockHeight); err != nil { return err } - if err := bav._flushGlobalActiveStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { - return err - } if err := bav._flushStakeEntriesToDbWithTxn(txn, blockHeight); err != nil { return err } diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 0c8d6656c..0571b84e8 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1226,23 +1226,6 @@ func (bav *UtxoView) _connectStake( // 4. Set the new ValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) - // Increase the GlobalActiveStakeAmountNanos if the validator is active. - var prevGlobalActiveStakeAmountNanos *uint256.Int - if currentValidatorEntry.Status() == ValidatorStatusActive { - // Retrieve the existing GlobalActiveStakeAmountNanos. - // The PrevGlobalActiveStakeAmountNanos will be restored if we disconnect this transaction. - prevGlobalActiveStakeAmountNanos, err = bav.GetGlobalActiveStakeAmountNanos() - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectStake: error retrieving GlobalActiveStakeAmountNanos: ") - } - globalActiveStakeAmountNanos, err := SafeUint256().Add(prevGlobalActiveStakeAmountNanos, txMeta.StakeAmountNanos) - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectStake: error adding StakeAmountNanos to GlobalActiveStakeAmountNanos: ") - } - // Set the new GlobalActiveStakeAmountNanos. - bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) - } - // Add the StakeAmountNanos to TotalOutput. The coins being staked are already // part of the TotalInput. But they are not burned, so they are an implicit // output even though they do not go to a specific public key's balance. @@ -1253,10 +1236,9 @@ func (bav *UtxoView) _connectStake( // Create a UTXO operation utxoOpForTxn := &UtxoOperation{ - Type: OperationTypeStake, - PrevValidatorEntry: prevValidatorEntry, - PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, - PrevStakeEntries: prevStakeEntries, + Type: OperationTypeStake, + PrevValidatorEntry: prevValidatorEntry, + PrevStakeEntries: prevStakeEntries, } if err = bav.SanityCheckStakeTxn( transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.StakeAmountNanos, txn.TxnFeeNanos, prevBalanceNanos, @@ -1341,11 +1323,6 @@ func (bav *UtxoView) _disconnectStake( bav._setStakeEntryMappings(operationData.PrevStakeEntries[0]) } - // Restore the PrevGlobalActiveStakeAmountNanos, if exists. - if operationData.PrevGlobalActiveStakeAmountNanos != nil { - bav._setGlobalActiveStakeAmountNanos(operationData.PrevGlobalActiveStakeAmountNanos) - } - // Disconnect the BasicTransfer. Disconnecting the BasicTransfer also returns // the extra spend associated with the amount the transactor staked. return bav._disconnectBasicTransfer( @@ -1466,22 +1443,6 @@ func (bav *UtxoView) _connectUnstake( // 4. Set the new ValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) - // Decrease the GlobalActiveStakeAmountNanos if the validator is active. - var prevGlobalActiveStakeAmountNanos *uint256.Int - if currentValidatorEntry.Status() == ValidatorStatusActive { - // 1. Retrieve the existing GlobalActiveStakeAmountNanos. This will be restored if we disconnect this txn. - prevGlobalActiveStakeAmountNanos, err = bav.GetGlobalActiveStakeAmountNanos() - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error retrieving GlobalActiveStakeAmountNanos: ") - } - globalActiveStakeAmountNanos, err := SafeUint256().Sub(prevGlobalActiveStakeAmountNanos, txMeta.UnstakeAmountNanos) - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: error subtracting UnstakeAmountNanos from GlobalActiveStakeAmountNanos: ") - } - // 2. Set the new GlobalActiveStakeAmountNanos. - bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) - } - // Retrieve the CurrentEpochNumber. currentEpochNumber, err := bav.GetCurrentEpochNumber() if err != nil { @@ -1532,11 +1493,10 @@ func (bav *UtxoView) _connectUnstake( // Create a UTXO operation. utxoOpForTxn := &UtxoOperation{ - Type: OperationTypeUnstake, - PrevValidatorEntry: prevValidatorEntry, - PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, - PrevStakeEntries: prevStakeEntries, - PrevLockedStakeEntries: prevLockedStakeEntries, + Type: OperationTypeUnstake, + PrevValidatorEntry: prevValidatorEntry, + PrevStakeEntries: prevStakeEntries, + PrevLockedStakeEntries: prevLockedStakeEntries, } if err = bav.SanityCheckUnstakeTxn(transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.UnstakeAmountNanos); err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") @@ -1620,11 +1580,6 @@ func (bav *UtxoView) _disconnectUnstake( } bav._setStakeEntryMappings(operationData.PrevStakeEntries[0]) - // Restore the PrevGlobalActiveStakeAmountNanos, if exists. - if operationData.PrevGlobalActiveStakeAmountNanos != nil { - bav._setGlobalActiveStakeAmountNanos(operationData.PrevGlobalActiveStakeAmountNanos) - } - // Retrieve the CurrentEpochNumber. currentEpochNumber, err := bav.GetCurrentEpochNumber() if err != nil { @@ -2045,28 +2000,6 @@ func (bav *UtxoView) SanityCheckStakeTxn( return errors.New("SanityCheckStakeTxn: StakeAmountNanos increase does not match") } - // Validate GlobalActiveStakeAmountNanos increase if validator is active. - if currentValidatorEntry.Status() == ValidatorStatusActive { - if utxoOp.PrevGlobalActiveStakeAmountNanos == nil { - return errors.New("SanityCheckStakeTxn: nil PrevGlobalActiveStakeAmountNanos provided") - } - currentGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "SanityCheckStakeTxn: error retrieving CurrentGlobalActiveStakeAmountNanos: ") - } - globalActiveStakeAmountNanosIncrease, err := SafeUint256().Sub( - currentGlobalActiveStakeAmountNanos, utxoOp.PrevGlobalActiveStakeAmountNanos, - ) - if err != nil { - return errors.Wrapf(err, "SanityCheckStakeTxn: error calculating GlobalActiveStakeAmountNanos increase: ") - } - if !globalActiveStakeAmountNanosIncrease.Eq(amountNanos) { - return errors.New("SanityCheckStakeTxn: GlobalActiveStakeAmountNanos increase does not match") - } - } else if utxoOp.PrevGlobalActiveStakeAmountNanos != nil { - return errors.New("SanityCheckStakeTxn: non-nil PrevGlobalActiveStakeAmountNanos provided for inactive validator, this should never happen") - } - // Validate TransactorBalance decrease. // PrevTransactorBalanceNanos = CurrentTransactorBalanceNanos + AmountNanos + FeeNanos // PrevTransactorBalanceNanos - CurrentTransactorBalanceNanos - FeeNanos = AmountNanos @@ -2162,29 +2095,6 @@ func (bav *UtxoView) SanityCheckUnstakeTxn(transactorPKID *PKID, utxoOp *UtxoOpe return errors.New("SanityCheckUnstakeTxn: LockedAmountNanos increase does not match") } - // Validate GlobalActiveStakeAmountNanos decrease if validator is active. - if currentValidatorEntry.Status() == ValidatorStatusActive { - if utxoOp.PrevGlobalActiveStakeAmountNanos == nil { - return errors.New("SanityCheckUnstakeTxn: nil PrevGlobalActiveStakeAmountNanos provided") - } - currentGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving CurrentGlobalActiveStakeAmountNanos: ") - } - if currentGlobalActiveStakeAmountNanos == nil { - return errors.New("SanityCheckUnstakeTxn: no CurrentGlobalActiveStakeAmountNanos found") - } - globalActiveStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalActiveStakeAmountNanos, currentGlobalActiveStakeAmountNanos) - if err != nil { - return errors.Wrapf(err, "SanityCheckUnstakeTxn: error calculating GlobalActiveStakeAmountNanos decrease: ") - } - if !globalActiveStakeAmountNanosDecrease.Eq(amountNanos) { - return errors.New("SanityCheckUnstakeTxn: GlobalActiveStakeAmountNanos decrease does not match") - } - } else if utxoOp.PrevGlobalActiveStakeAmountNanos != nil { - return errors.New("SanityCheckUnstakeTxn: non-nil PrevGlobalActiveStakeAmountNanos provided for inactive validator, this should never happen") - } - return nil } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 29a7eabc4..ca4bbc593 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -223,11 +223,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - // Verify GlobalActiveStakeAmountNanos. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) - // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) require.Equal(t, m1OldDESOBalanceNanos-feeNanos-stakeMetadata.StakeAmountNanos.Uint64(), m1NewDESOBalanceNanos) @@ -258,11 +253,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) - // Verify GlobalActiveStakeAmountNanos. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(150)) - // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) require.Equal(t, m1OldDESOBalanceNanos-feeNanos-stakeMetadata.StakeAmountNanos.Uint64(), m1NewDESOBalanceNanos) @@ -373,11 +363,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(110)) - // Verify GlobalActiveStakeAmountNanos. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(110)) - // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) @@ -411,11 +396,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(80)) - // Verify GlobalActiveStakeAmountNanos. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(80)) - // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) @@ -448,11 +428,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) - // Verify GlobalActiveStakeAmountNanos. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) - // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) @@ -556,11 +531,6 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) - // Verify GlobalActiveStakeAmountNanos. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) - // Verify LockedStakeEntry.isDeleted. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) @@ -1980,14 +1950,9 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(validatorPKID) require.NoError(t, err) - // Retrieve current GlobalActiveStakeAmountNanos. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - // Jail the validator. tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) - tmpUtxoView._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) @@ -1995,9 +1960,6 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // from the UtxoView so that they are next read from the db. delete(mempool.universalUtxoView.ValidatorPKIDToValidatorEntry, *validatorPKID) delete(mempool.readOnlyUtxoView.ValidatorPKIDToValidatorEntry, *validatorPKID) - mempool.universalUtxoView.GlobalActiveStakeAmountNanos = nil - mempool.readOnlyUtxoView.GlobalActiveStakeAmountNanos = nil - } // Seed a CurrentEpochEntry. @@ -2040,11 +2002,6 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) - - // GlobalActiveStakeAmountNanos increases. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(150)) } { // m1 unstakes some from m0. m0 is active. @@ -2059,11 +2016,6 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - - // GlobalActiveStakeAmountNanos decreases. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // Jail m0. Since this update takes place outside a transaction, @@ -2079,11 +2031,6 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos stays the same. require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - - // GlobalActiveStakeAmountNanos decreases. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } { // m1 stakes more with m0. m0 is jailed. @@ -2098,11 +2045,6 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) - - // GlobalActiveStakeAmountNanos stays the same. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } { // m1 unstakes some from m0. m0 is jailed. @@ -2117,11 +2059,6 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - - // GlobalActiveStakeAmountNanos stays the same. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } { // m0 unjails himself. @@ -2132,11 +2069,6 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - - // GlobalActiveStakeAmountNanos increases. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // m1 stakes more with m0. m0 is active. @@ -2151,11 +2083,6 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) - - // GlobalActiveStakeAmountNanos increases. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(150)) } { // m1 unstakes some from m0. m0 is active. @@ -2170,11 +2097,6 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - - // GlobalActiveStakeAmountNanos decreases. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // Jail m0 again. Since this update takes place outside a transaction, @@ -2186,11 +2108,6 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - - // GlobalActiveStakeAmountNanos decreases. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } { // m0 unregisters as a validator. @@ -2201,10 +2118,5 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.Nil(t, validatorEntry) - - // GlobalActiveStakeAmountNanos stays the same. - globalActiveStakeAmountNanos, err := utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7e19d48e5..b77782709 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -913,10 +913,6 @@ type UtxoOperation struct { // register, unregister, stake, or unstake txn. PrevValidatorEntry *ValidatorEntry - // PrevGlobalActiveStakeAmountNanos is the previous GlobalActiveStakeAmountNanos - // prior to a stake or unstake operation txn. - PrevGlobalActiveStakeAmountNanos *uint256.Int - // PrevStakeEntries is a slice of StakeEntries prior to // a register, unregister, stake, or unstake txn. PrevStakeEntries []*StakeEntry @@ -1245,9 +1241,6 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada // PrevValidatorEntry data = append(data, EncodeToBytes(blockHeight, op.PrevValidatorEntry, skipMetadata...)...) - // PrevGlobalActiveStakeAmountNanos - data = append(data, VariableEncodeUint256(op.PrevGlobalActiveStakeAmountNanos)...) - // PrevStakeEntries data = append(data, EncodeDeSoEncoderSlice(op.PrevStakeEntries, blockHeight, skipMetadata...)...) @@ -1876,11 +1869,6 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevValidatorEntry: ") } - // PrevGlobalActiveStakeAmountNanos - if op.PrevGlobalActiveStakeAmountNanos, err = VariableDecodeUint256(rr); err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevGlobalActiveStakeAmountNanos: ") - } - // PrevStakeEntries if op.PrevStakeEntries, err = DecodeDeSoEncoderSlice[*StakeEntry](rr); err != nil { return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevStakeEntries: ") diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index ca38ed7a4..7e4ba2009 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -557,10 +557,6 @@ func DBKeyForValidatorByStake(validatorEntry *ValidatorEntry) []byte { return key } -func DBKeyForGlobalActiveStakeAmountNanos() []byte { - return append([]byte{}, Prefixes.PrefixGlobalActiveStakeAmountNanos...) -} - func DBGetValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID) (*ValidatorEntry, error) { var ret *ValidatorEntry err := handle.View(func(txn *badger.Txn) error { @@ -637,38 +633,6 @@ func DBGetTopActiveValidatorsByStake( return validatorEntries, nil } -func DBGetGlobalActiveStakeAmountNanos(handle *badger.DB, snap *Snapshot) (*uint256.Int, error) { - var ret *uint256.Int - err := handle.View(func(txn *badger.Txn) error { - var innerErr error - ret, innerErr = DBGetGlobalActiveStakeAmountNanosWithTxn(txn, snap) - return innerErr - }) - return ret, err -} - -func DBGetGlobalActiveStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot) (*uint256.Int, error) { - // Retrieve from db. - key := DBKeyForGlobalActiveStakeAmountNanos() - globalActiveStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) - if err != nil { - // We don't want to error if the key isn't found. Instead, return nil. - if err == badger.ErrKeyNotFound { - return nil, nil - } - return nil, errors.Wrapf(err, "DBGetGlobalActiveStakeAmountNanosWithTxn: problem retrieving value") - } - - // Decode from bytes. - var globalActiveStakeAmountNanos *uint256.Int - rr := bytes.NewReader(globalActiveStakeAmountNanosBytes) - globalActiveStakeAmountNanos, err = VariableDecodeUint256(rr) - if err != nil { - return nil, errors.Wrapf(err, "DBGetGlobalActiveStakeAmountNanosWithTxn: problem decoding value") - } - return globalActiveStakeAmountNanos, nil -} - func DBPutValidatorWithTxn( txn *badger.Txn, snap *Snapshot, @@ -741,21 +705,6 @@ func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PK return nil } -func DBPutGlobalActiveStakeAmountNanosWithTxn( - txn *badger.Txn, - snap *Snapshot, - globalActiveStakeAmountNanos *uint256.Int, - blockHeight uint64, -) error { - if globalActiveStakeAmountNanos == nil { - // This should never happen but is a sanity check. - glog.Errorf("DBPutGlobalActiveStakeAmountNanosWithTxn: called with nil GlobalActiveStakeAmountNanos") - return nil - } - key := DBKeyForGlobalActiveStakeAmountNanos() - return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos)) -} - // // BLOCKCHAIN UTILS // @@ -1316,35 +1265,12 @@ func (bav *UtxoView) _connectUnregisterAsValidator( ) } - // If the validator was active, decrease the GlobalActiveStakeAmountNanos - // by the amount that was unstaked. Do nothing if the validator was jailed. - var prevGlobalActiveStakeAmountNanos *uint256.Int - if prevValidatorEntry.Status() == ValidatorStatusActive { - // Fetch the existing GlobalActiveStakeAmountNanos. - prevGlobalActiveStakeAmountNanos, err = bav.GetGlobalActiveStakeAmountNanos() - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: error fetching GlobalActiveStakeAmountNanos: ") - } - // Subtract the amount that was unstaked. - globalActiveStakeAmountNanos, err := SafeUint256().Sub( - prevGlobalActiveStakeAmountNanos, totalUnstakedAmountNanos, - ) - if err != nil { - return 0, 0, nil, errors.Wrapf( - err, "_connectUnregisterAsValidator: error subtracting TotalUnstakedAmountNanos from GlobalActiveStakeAmountNanos: ", - ) - } - // Set the new GlobalActiveStakeAmountNanos. - bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) - } - // Create a UTXO operation. utxoOpForTxn := &UtxoOperation{ - Type: OperationTypeUnregisterAsValidator, - PrevValidatorEntry: prevValidatorEntry, - PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, - PrevStakeEntries: prevStakeEntries, - PrevLockedStakeEntries: prevLockedStakeEntries, + Type: OperationTypeUnregisterAsValidator, + PrevValidatorEntry: prevValidatorEntry, + PrevStakeEntries: prevStakeEntries, + PrevLockedStakeEntries: prevLockedStakeEntries, } if err = bav.SanityCheckUnregisterAsValidatorTxn(transactorPKIDEntry.PKID, utxoOpForTxn, totalUnstakedAmountNanos); err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") @@ -1424,11 +1350,6 @@ func (bav *UtxoView) _disconnectUnregisterAsValidator( bav._setLockedStakeEntryMappings(prevLockedStakeEntry) } - // Restore the PrevGlobalActiveStakeAmountNanos, if exists. - if operationData.PrevGlobalActiveStakeAmountNanos != nil { - bav._setGlobalActiveStakeAmountNanos(operationData.PrevGlobalActiveStakeAmountNanos) - } - // Disconnect the BasicTransfer. return bav._disconnectBasicTransfer( currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, @@ -1522,24 +1443,10 @@ func (bav *UtxoView) _connectUnjailValidator( // Set the CurrentValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) - // Increase the GlobalActiveStakeAmountNanos. - prevGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnjailValidator: error retrieving existing GlobalActiveStakeAmountNanos: ") - } - currentGlobalActiveStakeAmountNanos, err := SafeUint256().Add( - prevGlobalActiveStakeAmountNanos, currentValidatorEntry.TotalStakeAmountNanos, - ) - if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUnjailValidator: error calculating updated GlobalActiveStakeAmountNanos ") - } - bav._setGlobalActiveStakeAmountNanos(currentGlobalActiveStakeAmountNanos) - // Add a UTXO operation utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ - Type: OperationTypeUnjailValidator, - PrevValidatorEntry: prevValidatorEntry, - PrevGlobalActiveStakeAmountNanos: prevGlobalActiveStakeAmountNanos, + Type: OperationTypeUnjailValidator, + PrevValidatorEntry: prevValidatorEntry, }) return totalInput, totalOutput, utxoOpsForTxn, nil } @@ -1593,13 +1500,6 @@ func (bav *UtxoView) _disconnectUnjailValidator( } bav._setValidatorEntryMappings(prevValidatorEntry) - // Restore the PrevGlobalActiveStakeAmountNanos. - prevGlobalActiveStakeAmountNanos := operationData.PrevGlobalActiveStakeAmountNanos - if prevGlobalActiveStakeAmountNanos == nil { - return errors.New("_disconnectUnjailValidator: PrevGlobalActiveStakeAmountNanos is nil, this should never happen") - } - bav._setGlobalActiveStakeAmountNanos(prevGlobalActiveStakeAmountNanos) - // Disconnect the BasicTransfer. return bav._disconnectBasicTransfer( currentTxn, txHash, utxoOpsForTxn[:operationIndex], blockHeight, @@ -1794,27 +1694,6 @@ func (bav *UtxoView) SanityCheckUnregisterAsValidatorTxn( return errors.New("SanityCheckUnregisterAsValidatorTxn: TotalUnstakedAmountNanos doesn't match") } - // Sanity check that the GlobalActiveStakeAmountNanos was decreased - // by amountNanos if the PrevValidatorEntry was active. - if utxoOp.PrevValidatorEntry.Status() == ValidatorStatusActive { - if utxoOp.PrevGlobalActiveStakeAmountNanos == nil { - return errors.New("SanityCheckUnregisterAsValidatorTxn: nil PrevGlobalActiveStakeAmountNanos provided") - } - currentGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error retrieving GlobalActiveStakeAmountNanos: ") - } - globalActiveStakeAmountNanosDecrease, err := SafeUint256().Sub(utxoOp.PrevGlobalActiveStakeAmountNanos, currentGlobalActiveStakeAmountNanos) - if err != nil { - return errors.Wrapf(err, "SanityCheckUnregisterAsValidatorTxn: error calculating GlobalActiveStakeAmountNanos decrease: ") - } - if !globalActiveStakeAmountNanosDecrease.Eq(amountNanos) { - return errors.New("SanityCheckUnregisterAsValidatorTxn: GlobalActiveStakeAmountNanos decrease doesn't match") - } - } else if utxoOp.PrevGlobalActiveStakeAmountNanos != nil { - return errors.New("SanityCheckUnregisterAsValidatorTxn: non-nil PrevGlobalActiveStakeAmountNanos provided for inactive validator") - } - return nil } @@ -1919,24 +1798,6 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit uint64) ([]*ValidatorEn return validatorEntries[0:upperBound], nil } -func (bav *UtxoView) GetGlobalActiveStakeAmountNanos() (*uint256.Int, error) { - // Read the GlobalActiveStakeAmountNanos from the UtxoView. - if bav.GlobalActiveStakeAmountNanos != nil { - return bav.GlobalActiveStakeAmountNanos.Clone(), nil - } - // If not set, read the GlobalActiveStakeAmountNanos from the db. - globalActiveStakeAmountNanos, err := DBGetGlobalActiveStakeAmountNanos(bav.Handle, bav.Snapshot) - if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetGlobalActiveStakeAmountNanos: ") - } - if globalActiveStakeAmountNanos == nil { - globalActiveStakeAmountNanos = uint256.NewInt() - } - // Cache the GlobalActiveStakeAmountNanos from the db in the UtxoView. - bav._setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos) - return globalActiveStakeAmountNanos, nil -} - func (bav *UtxoView) JailAllInactiveValidators(blockHeight uint64) error { // First, iterate through all of the !isDeleted ValidatorEntries in the UtxoView and // jail any that are inactive. @@ -2084,24 +1945,9 @@ func (bav *UtxoView) JailValidator(validatorEntry *ValidatorEntry) error { // Set ValidatorEntry.JailedAtEpochNumber to the CurrentEpochNumber. validatorEntry.JailedAtEpochNumber = currentEpochNumber - // Remove the validator's stake from the GlobalActiveStakeAmountNanos. - prevGlobalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "UtxoView.JailValidator: error retrieving GlobalActiveStakeAmountNanos: ") - } - currentGlobalActiveStakeAmountNanos, err := SafeUint256().Sub( - prevGlobalActiveStakeAmountNanos, validatorEntry.TotalStakeAmountNanos, - ) - if err != nil { - return errors.Wrapf(err, "UtxoView.JailValidator: error calculating updated GlobalActiveStakeAmountNanos: ") - } - // Store the updated ValidatorEntry. bav._setValidatorEntryMappings(validatorEntry) - // Store the updated GlobalActiveStakeAmountNanos. - bav._setGlobalActiveStakeAmountNanos(currentGlobalActiveStakeAmountNanos) - return nil } @@ -2127,15 +1973,6 @@ func (bav *UtxoView) _deleteValidatorEntryMappings(validatorEntry *ValidatorEntr bav._setValidatorEntryMappings(&tombstoneEntry) } -func (bav *UtxoView) _setGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int) { - // This function shouldn't be called with nil. - if globalActiveStakeAmountNanos == nil { - glog.Errorf("_setGlobalActiveStakeAmountNanos: called with nil entry, this should never happen") - return - } - bav.GlobalActiveStakeAmountNanos = globalActiveStakeAmountNanos.Clone() -} - func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { // Delete all entries in the ValidatorMapKeyToValidatorEntry UtxoView map. for validatorMapKeyIter, validatorEntryIter := range bav.ValidatorPKIDToValidatorEntry { @@ -2178,16 +2015,6 @@ func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHei return nil } -func (bav *UtxoView) _flushGlobalActiveStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - // If GlobalActiveStakeAmountNanos is nil, then it was never - // set and shouldn't overwrite the value in the db. - if bav.GlobalActiveStakeAmountNanos == nil { - return nil - } - - return DBPutGlobalActiveStakeAmountNanosWithTxn(txn, bav.Snapshot, bav.GlobalActiveStakeAmountNanos, blockHeight) -} - // // MEMPOOL UTILS // @@ -2297,6 +2124,14 @@ func (bav *UtxoView) CreateUnjailValidatorTxindexMetadata( return &UnjailValidatorTxindexMetadata{}, affectedPublicKeys } +func SumValidatorEntriesTotalStakeAmountNanos(validatorEntries []*ValidatorEntry) *uint256.Int { + totalStakeAmountNanos := uint256.NewInt() + for _, validatorEntry := range validatorEntries { + totalStakeAmountNanos.Add(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) + } + return totalStakeAmountNanos +} + // // BLS UTILS // diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 628743548..6739781c2 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -24,7 +24,6 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { var registerMetadata *RegisterAsValidatorMetadata var validatorEntry *ValidatorEntry var validatorEntries []*ValidatorEntry - var globalActiveStakeAmountNanos *uint256.Int var err error // Initialize balance model fork heights. @@ -228,12 +227,6 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Empty(t, validatorEntries) } - { - // Query: retrieve GlobalActiveStakeAmountNanos - globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) - } { // Happy path: update a validator votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) @@ -287,12 +280,6 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Empty(t, validatorEntries) } - { - // Query: retrieve GlobalActiveStakeAmountNanos - globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) - } // Flush mempool to the db and test rollbacks. require.NoError(t, mempool.universalUtxoView.FlushToDb(blockHeight)) @@ -1300,7 +1287,6 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { var stakeEntry *StakeEntry var lockedStakeEntry *LockedStakeEntry _ = lockedStakeEntry - var globalActiveStakeAmountNanos *uint256.Int var err error // Initialize balance model fork heights. @@ -1392,10 +1378,6 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NoError(t, err) require.NotNil(t, stakeEntry) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(600)) - - globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(600)) } { // m1 stakes with m0. @@ -1412,10 +1394,6 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NoError(t, err) require.NotNil(t, stakeEntry) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(400)) - - globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(1000)) } { // m1 partially unstakes with m0. @@ -1438,11 +1416,6 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NoError(t, err) require.NotNil(t, lockedStakeEntry) require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(100)) - - // GlobalActiveStakeAmountNanos is updated. - globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt().SetUint64(900)) } { // m0 unregisters as a validator. @@ -1475,11 +1448,6 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { require.NoError(t, err) require.NotNil(t, lockedStakeEntry) require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(400)) - - // GlobalActiveStakeAmountNanos is updated. - globalActiveStakeAmountNanos, err = utxoView().GetGlobalActiveStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, globalActiveStakeAmountNanos, uint256.NewInt()) } // Flush mempool to the db and test rollbacks. diff --git a/lib/db_utils.go b/lib/db_utils.go index 157d75010..b26ba062d 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -486,13 +486,9 @@ type DBPrefixes struct { // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. PrefixValidatorByStatusAndStake []byte `prefix_id:"[79]" is_state:"true"` - // PrefixGlobalActiveStakeAmountNanos: Retrieve the cumulative stake across all validators. - // Prefix -> *uint256.Int - PrefixGlobalActiveStakeAmountNanos []byte `prefix_id:"[80]" is_state:"true"` - // PrefixStakeByValidatorAndStaker: Retrieve a StakeEntry. // Prefix, , -> StakeEntry - PrefixStakeByValidatorAndStaker []byte `prefix_id:"[81]" is_state:"true"` + PrefixStakeByValidatorAndStaker []byte `prefix_id:"[80]" is_state:"true"` // PrefixLockedStakeByValidatorAndStakerAndLockedAt: Retrieve a LockedStakeEntry. // Prefix, , , -> LockedStakeEntry @@ -519,40 +515,40 @@ type DBPrefixes struct { // (CurrentEpoch - LockedAtEpochNumber) = 133 - 123 = 10, which is greater than // cooldown=3. Thus the UnlockStake will succeed, which will result in the // LockedStakeEntry being deleted and 25 DESO being added to the user's balance. - PrefixLockedStakeByValidatorAndStakerAndLockedAt []byte `prefix_id:"[82]" is_state:"true"` + PrefixLockedStakeByValidatorAndStakerAndLockedAt []byte `prefix_id:"[81]" is_state:"true"` // PrefixCurrentEpoch: Retrieve the current EpochEntry. // Prefix -> EpochEntry - PrefixCurrentEpoch []byte `prefix_id:"[83]" is_state:"true"` + PrefixCurrentEpoch []byte `prefix_id:"[82]" is_state:"true"` // PrefixCurrentRandomSeedHash: Retrieve the current RandomSeedHash. // Prefix -> . - PrefixCurrentRandomSeedHash []byte `prefix_id:"[84]" is_state:"true"` + PrefixCurrentRandomSeedHash []byte `prefix_id:"[83]" is_state:"true"` // PrefixSnapshotGlobalParamsEntry: Retrieve a snapshot GlobalParamsEntry by SnapshotAtEpochNumber. // Prefix, -> *GlobalParamsEntry - PrefixSnapshotGlobalParamsEntry []byte `prefix_id:"[85]" is_state:"true"` + PrefixSnapshotGlobalParamsEntry []byte `prefix_id:"[84]" is_state:"true"` // PrefixSnapshotValidatorSetByPKID: Retrieve a ValidatorEntry from a snapshot validator set by // . // Prefix, , -> *ValidatorEntry - PrefixSnapshotValidatorSetByPKID []byte `prefix_id:"[86]" is_state:"true"` + PrefixSnapshotValidatorSetByPKID []byte `prefix_id:"[85]" is_state:"true"` // PrefixSnapshotValidatorSetByStake: Retrieve stake-ordered ValidatorEntries from a snapshot validator set // by SnapshotAtEpochNumber. // Prefix, , , -> nil // Note: we parse the ValidatorPKID from the key and the value is nil to save space. - PrefixSnapshotValidatorSetByStake []byte `prefix_id:"[87]" is_state:"true"` + PrefixSnapshotValidatorSetByStake []byte `prefix_id:"[86]" is_state:"true"` // PrefixSnapshotGlobalActiveStakeAmountNanos: Retrieve a snapshot GlobalActiveStakeAmountNanos by SnapshotAtEpochNumber. // Prefix, -> *uint256.Int - PrefixSnapshotGlobalActiveStakeAmountNanos []byte `prefix_id:"[88]" is_state:"true"` + PrefixSnapshotGlobalActiveStakeAmountNanos []byte `prefix_id:"[87]" is_state:"true"` // PrefixSnapshotLeaderSchedule: Retrieve a ValidatorPKID by . // Prefix, , -> ValidatorPKID - PrefixSnapshotLeaderSchedule []byte `prefix_id:"[89]" is_state:"true"` + PrefixSnapshotLeaderSchedule []byte `prefix_id:"[88]" is_state:"true"` - // NEXT_TAG: 90 + // NEXT_TAG: 89 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -760,35 +756,32 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStatusAndStake) { // prefix_id:"[79]" return false, nil - } else if bytes.Equal(prefix, Prefixes.PrefixGlobalActiveStakeAmountNanos) { - // prefix_id:"[80]" - return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorAndStaker) { - // prefix_id:"[81]" + // prefix_id:"[80]" return true, &StakeEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixLockedStakeByValidatorAndStakerAndLockedAt) { - // prefix_id:"[82]" + // prefix_id:"[81]" return true, &LockedStakeEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixCurrentEpoch) { - // prefix_id:"[83]" + // prefix_id:"[82]" return true, &EpochEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixCurrentRandomSeedHash) { - // prefix_id:"[84]" + // prefix_id:"[83]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalParamsEntry) { - // prefix_id:"[85]" + // prefix_id:"[84]" return true, &GlobalParamsEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByPKID) { - // prefix_id:"[86]" + // prefix_id:"[85]" return true, &ValidatorEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByStake) { - // prefix_id:"[87]" + // prefix_id:"[86]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos) { - // prefix_id:"[88]" + // prefix_id:"[87]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotLeaderSchedule) { - // prefix_id:"[89]" + // prefix_id:"[88]" return true, &PKID{} } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index b3983ad7c..d17f35a50 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -83,11 +83,9 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { bav._setSnapshotValidatorSetEntry(validatorEntry, currentEpochEntry.EpochNumber) } - // Snapshot the current GlobalActiveStakeAmountNanos. - globalActiveStakeAmountNanos, err := bav.GetGlobalActiveStakeAmountNanos() - if err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving GlobalActiveStakeAmountNanos: ") - } + // Snapshot the current validator set's total stake. Note, the validator set is already filtered to the top n + // active validators for the epoch. The total stake is the sum of all of the active validators' stakes. + globalActiveStakeAmountNanos := SumValidatorEntriesTotalStakeAmountNanos(validatorSet) bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, currentEpochEntry.EpochNumber) // Generate + snapshot a leader schedule. From 24f73b6c750b4e93cb1ebdbc29214b02fa947f3c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 11 Jul 2023 15:26:27 -0400 Subject: [PATCH 141/762] PoS Rename SnapshotGlobalActiveStakeAmountNanos to SnapshotValidatorSetTotalStakeAmountNanos (#585) * PoS Rename SnapshotGlobalActiveStakeAmountNanos to SnapshotValidatorSetTotalStakeAmountNanos * Fix build issues --- lib/block_view.go | 14 ++++---- lib/block_view_flush.go | 2 +- lib/db_utils.go | 7 ++-- lib/pos_epoch_complete_hook.go | 2 +- lib/pos_epoch_complete_hook_test.go | 12 +++---- lib/pos_snapshot_entries.go | 52 ++++++++++++++--------------- 6 files changed, 45 insertions(+), 44 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 7192aaf32..4f89f1ac3 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -139,9 +139,9 @@ type UtxoView struct { // the given SnapshotAtEpochNumber. SnapshotValidatorSet map[SnapshotValidatorSetMapKey]*ValidatorEntry - // SnapshotGlobalActiveStakeAmountNanos is a map of SnapshotAtEpochNumber to a GlobalActiveStakeAmountNanos. + // SnapshotValidatorSetTotalStakeAmountNanos is a map of SnapshotAtEpochNumber to a GlobalActiveStakeAmountNanos. // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given SnapshotAtEpochNumber. - SnapshotGlobalActiveStakeAmountNanos map[uint64]*uint256.Int + SnapshotValidatorSetTotalStakeAmountNanos map[uint64]*uint256.Int // SnapshotLeaderSchedule is a map of to a ValidatorPKID. // It contains the PKID of the validator at the given index in the leader schedule @@ -255,8 +255,8 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // SnapshotValidatorSet bav.SnapshotValidatorSet = make(map[SnapshotValidatorSetMapKey]*ValidatorEntry) - // SnapshotGlobalActiveStakeAmountNanos - bav.SnapshotGlobalActiveStakeAmountNanos = make(map[uint64]*uint256.Int) + // SnapshotValidatorSetTotalStakeAmountNanos + bav.SnapshotValidatorSetTotalStakeAmountNanos = make(map[uint64]*uint256.Int) // SnapshotLeaderSchedule bav.SnapshotLeaderSchedule = make(map[SnapshotLeaderScheduleMapKey]*PKID) @@ -552,9 +552,9 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.SnapshotValidatorSet[mapKey] = validatorEntry.Copy() } - // Copy the SnapshotGlobalActiveStakeAmountNanos - for epochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { - newView.SnapshotGlobalActiveStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos.Clone() + // Copy the SnapshotValidatorSetTotalStakeAmountNanos + for epochNumber, globalActiveStakeAmountNanos := range bav.SnapshotValidatorSetTotalStakeAmountNanos { + newView.SnapshotValidatorSetTotalStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos.Clone() } // Copy the SnapshotLeaderSchedule diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 9e179a706..0e41e9ae5 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -165,7 +165,7 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushSnapshotValidatorSetToDbWithTxn(txn, blockHeight); err != nil { return err } - if err := bav._flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { + if err := bav._flushSnapshotValidatorSetTotalStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { return err } if err := bav._flushSnapshotLeaderScheduleToDbWithTxn(txn, blockHeight); err != nil { diff --git a/lib/db_utils.go b/lib/db_utils.go index b26ba062d..a4cac33e8 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -540,9 +540,10 @@ type DBPrefixes struct { // Note: we parse the ValidatorPKID from the key and the value is nil to save space. PrefixSnapshotValidatorSetByStake []byte `prefix_id:"[86]" is_state:"true"` - // PrefixSnapshotGlobalActiveStakeAmountNanos: Retrieve a snapshot GlobalActiveStakeAmountNanos by SnapshotAtEpochNumber. + // PrefixSnapshotValidatorSetTotalStakeAmountNanos: Retrieve a snapshot of the validator set's total amount of + // staked DESO by SnapshotAtEpochNumber. // Prefix, -> *uint256.Int - PrefixSnapshotGlobalActiveStakeAmountNanos []byte `prefix_id:"[87]" is_state:"true"` + PrefixSnapshotValidatorSetTotalStakeAmountNanos []byte `prefix_id:"[87]" is_state:"true"` // PrefixSnapshotLeaderSchedule: Retrieve a ValidatorPKID by . // Prefix, , -> ValidatorPKID @@ -777,7 +778,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByStake) { // prefix_id:"[86]" return false, nil - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetTotalStakeAmountNanos) { // prefix_id:"[87]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotLeaderSchedule) { diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index d17f35a50..bb087c6a6 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -86,7 +86,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Snapshot the current validator set's total stake. Note, the validator set is already filtered to the top n // active validators for the epoch. The total stake is the sum of all of the active validators' stakes. globalActiveStakeAmountNanos := SumValidatorEntriesTotalStakeAmountNanos(validatorSet) - bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, currentEpochEntry.EpochNumber) + bav._setSnapshotValidatorSetTotalStakeAmountNanos(globalActiveStakeAmountNanos, currentEpochEntry.EpochNumber) // Generate + snapshot a leader schedule. leaderSchedule, err := bav.GenerateLeaderSchedule() diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index a41f44b72..20a637c30 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -159,10 +159,10 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Empty(t, validatorEntries) - // Test SnapshotGlobalActiveStakeAmountNanos is zero. - snapshotGlobalActiveStakeAmountNanos, err := utxoView().GetSnapshotGlobalActiveStakeAmountNanos() + // Test SnapshotValidatorSetTotalStakeAmountNanos is zero. + snapshotValidatorSetTotalStakeAmountNanos, err := utxoView().GetSnapshotValidatorSetTotalStakeAmountNanos() require.NoError(t, err) - require.True(t, snapshotGlobalActiveStakeAmountNanos.IsZero()) + require.True(t, snapshotValidatorSetTotalStakeAmountNanos.IsZero()) // Test SnapshotLeaderSchedule is nil. for index := range validatorPKIDs { @@ -311,10 +311,10 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(700)) require.Equal(t, validatorEntries[6].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) - // Test SnapshotGlobalActiveStakeAmountNanos is populated. - snapshotGlobalActiveStakeAmountNanos, err := utxoView().GetSnapshotGlobalActiveStakeAmountNanos() + // Test SnapshotValidatorSetTotalStakeAmountNanos is populated. + snapshotValidatorSetTotalStakeAmountNanos, err := utxoView().GetSnapshotValidatorSetTotalStakeAmountNanos() require.NoError(t, err) - require.Equal(t, snapshotGlobalActiveStakeAmountNanos, uint256.NewInt().SetUint64(2800)) + require.Equal(t, snapshotValidatorSetTotalStakeAmountNanos, uint256.NewInt().SetUint64(2800)) // Test SnapshotLeaderSchedule is populated. for index := range validatorPKIDs { diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 4fea4508f..33d4cbbd5 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -563,57 +563,57 @@ func DBEnumerateAllCurrentValidators(handle *badger.DB, pkidsToSkip []*PKID) ([] } // -// SnapshotGlobalActiveStakeAmountNanos +// SnapshotValidatorSetTotalStakeAmountNanos // -func (bav *UtxoView) GetSnapshotGlobalActiveStakeAmountNanos() (*uint256.Int, error) { +func (bav *UtxoView) GetSnapshotValidatorSetTotalStakeAmountNanos() (*uint256.Int, error) { // Calculate the SnapshotEpochNumber. snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotGlobalActiveStakeAmountNanos: problem calculating SnapshotEpochNumber: ") + return nil, errors.Wrapf(err, "GetSnapshotValidatorSetTotalStakeAmountNanos: problem calculating SnapshotEpochNumber: ") } // Check the UtxoView first. - if globalActiveStakeAmountNanos, exists := bav.SnapshotGlobalActiveStakeAmountNanos[snapshotAtEpochNumber]; exists { + if globalActiveStakeAmountNanos, exists := bav.SnapshotValidatorSetTotalStakeAmountNanos[snapshotAtEpochNumber]; exists { return globalActiveStakeAmountNanos.Clone(), nil } // If we don't have it in the UtxoView, check the db. - globalActiveStakeAmountNanos, err := DBGetSnapshotGlobalActiveStakeAmountNanos(bav.Handle, bav.Snapshot, snapshotAtEpochNumber) + globalActiveStakeAmountNanos, err := DBGetSnapshotValidatorSetTotalStakeAmountNanos(bav.Handle, bav.Snapshot, snapshotAtEpochNumber) if err != nil { return nil, errors.Wrapf( err, - "GetSnapshotGlobalActiveStakeAmountNanos: problem retrieving SnapshotGlobalActiveStakeAmountNanos from db: ", + "GetSnapshotValidatorSetTotalStakeAmountNanos: problem retrieving SnapshotValidatorSetTotalStakeAmountNanos from db: ", ) } if globalActiveStakeAmountNanos == nil { globalActiveStakeAmountNanos = uint256.NewInt() } // Cache the result in the UtxoView. - bav._setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos, snapshotAtEpochNumber) + bav._setSnapshotValidatorSetTotalStakeAmountNanos(globalActiveStakeAmountNanos, snapshotAtEpochNumber) return globalActiveStakeAmountNanos, nil } -func (bav *UtxoView) _setSnapshotGlobalActiveStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int, snapshotAtEpochNumber uint64) { +func (bav *UtxoView) _setSnapshotValidatorSetTotalStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int, snapshotAtEpochNumber uint64) { if globalActiveStakeAmountNanos == nil { - glog.Errorf("_setSnapshotGlobalActiveStakeAmountNanos: called with nil entry, this should never happen") + glog.Errorf("_setSnapshotValidatorSetTotalStakeAmountNanos: called with nil entry, this should never happen") return } - bav.SnapshotGlobalActiveStakeAmountNanos[snapshotAtEpochNumber] = globalActiveStakeAmountNanos.Clone() + bav.SnapshotValidatorSetTotalStakeAmountNanos[snapshotAtEpochNumber] = globalActiveStakeAmountNanos.Clone() } -func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - for snapshotAtEpochNumber, globalActiveStakeAmountNanos := range bav.SnapshotGlobalActiveStakeAmountNanos { +func (bav *UtxoView) _flushSnapshotValidatorSetTotalStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + for snapshotAtEpochNumber, globalActiveStakeAmountNanos := range bav.SnapshotValidatorSetTotalStakeAmountNanos { if globalActiveStakeAmountNanos == nil { return fmt.Errorf( - "_flushSnapshotGlobalActiveStakeToDb: found nil entry for EpochNumber %d, this should never happen", + "_flushSnapshotValidatorSetTotalStakeAmountNanosToDbWithTxn: found nil entry for EpochNumber %d, this should never happen", snapshotAtEpochNumber, ) } - if err := DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( + if err := DBPutSnapshotValidatorSetTotalStakeAmountNanosWithTxn( txn, bav.Snapshot, globalActiveStakeAmountNanos, snapshotAtEpochNumber, blockHeight, ); err != nil { return errors.Wrapf( err, - "_flushSnapshotGlobalActiveStakeToDb: problem setting SnapshotGlobalActiveStake for EpochNumber %d: ", + "_flushSnapshotValidatorSetTotalStakeAmountNanosToDbWithTxn: problem setting SnapshotValidatorSetTotalStake for EpochNumber %d: ", snapshotAtEpochNumber, ) } @@ -621,32 +621,32 @@ func (bav *UtxoView) _flushSnapshotGlobalActiveStakeAmountNanosToDbWithTxn(txn * return nil } -func DBKeyForSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotGlobalActiveStakeAmountNanos...) +func DBKeyForSnapshotValidatorSetTotalStakeAmountNanos(snapshotAtEpochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorSetTotalStakeAmountNanos...) key = append(key, EncodeUint64(snapshotAtEpochNumber)...) return key } -func DBGetSnapshotGlobalActiveStakeAmountNanos(handle *badger.DB, snap *Snapshot, snapshotAtEpochNumber uint64) (*uint256.Int, error) { +func DBGetSnapshotValidatorSetTotalStakeAmountNanos(handle *badger.DB, snap *Snapshot, snapshotAtEpochNumber uint64) (*uint256.Int, error) { var ret *uint256.Int err := handle.View(func(txn *badger.Txn) error { var innerErr error - ret, innerErr = DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn(txn, snap, snapshotAtEpochNumber) + ret, innerErr = DBGetSnapshotValidatorSetTotalStakeAmountNanosWithTxn(txn, snap, snapshotAtEpochNumber) return innerErr }) return ret, err } -func DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot, snapshotAtEpochNumber uint64) (*uint256.Int, error) { +func DBGetSnapshotValidatorSetTotalStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot, snapshotAtEpochNumber uint64) (*uint256.Int, error) { // Retrieve from db. - key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber) + key := DBKeyForSnapshotValidatorSetTotalStakeAmountNanos(snapshotAtEpochNumber) globalActiveStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return nil. if err == badger.ErrKeyNotFound { return nil, nil } - return nil, errors.Wrapf(err, "DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn: problem retrieving value") + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetTotalStakeAmountNanosWithTxn: problem retrieving value") } // Decode from bytes. @@ -654,12 +654,12 @@ func DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn(txn *badger.Txn, snap *Sna rr := bytes.NewReader(globalActiveStakeAmountNanosBytes) globalActiveStakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotGlobalActiveStakeAmountNanosWithTxn: problem decoding value") + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetTotalStakeAmountNanosWithTxn: problem decoding value") } return globalActiveStakeAmountNanos, nil } -func DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( +func DBPutSnapshotValidatorSetTotalStakeAmountNanosWithTxn( txn *badger.Txn, snap *Snapshot, globalActiveStakeAmountNanos *uint256.Int, @@ -668,10 +668,10 @@ func DBPutSnapshotGlobalActiveStakeAmountNanosWithTxn( ) error { if globalActiveStakeAmountNanos == nil { // This should never happen but is a sanity check. - glog.Errorf("DBPutSnapshotGlobalActiveStakeAmountNanos: called with nil GlobalActiveStake, this should never happen") + glog.Errorf("DBPutSnapshotValidatorSetTotalStakeAmountNanosWithTxn: called with nil GlobalActiveStake, this should never happen") return nil } - key := DBKeyForSnapshotGlobalActiveStakeAmountNanos(snapshotAtEpochNumber) + key := DBKeyForSnapshotValidatorSetTotalStakeAmountNanos(snapshotAtEpochNumber) return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos)) } From a5b59599bc05059f4e335f12012b3ba41b53f04b Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 11 Jul 2023 17:48:36 -0400 Subject: [PATCH 142/762] Ln/merge mem reduction into feature pos (#588) * fix pg genesis block (#554) * fix pg genesis block * add TestPGGenesisBlock to gh actions --------- Co-authored-by: Lazy Nina <> * [stable] Release 3.4.4 * Fix copy paste mistake in function comment (#574) * reduce memory requirements for tests (#587) Co-authored-by: Lazy Nina <> --------- Co-authored-by: Lazy Nina <> Co-authored-by: Peter Elmers --- .github/workflows/ci.yml | 3 ++- integration_testing/tools.go | 3 ++- lib/block_view_bitcoin_test.go | 10 +++++----- lib/block_view_test.go | 4 ++-- lib/block_view_types_test.go | 2 +- lib/blockchain.go | 6 ++++-- lib/blockchain_test.go | 34 ++++++++++++++++++++++++++++------ lib/db_utils.go | 9 ++++++--- lib/db_utils_test.go | 4 ++-- lib/load_test.go | 2 +- lib/mempool.go | 25 +++++++++++++++++++------ lib/mempool_test.go | 10 +++++----- lib/server.go | 4 ++-- lib/snapshot.go | 5 ++++- 14 files changed, 83 insertions(+), 38 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 66d11e57c..afb4dc21c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -84,7 +84,8 @@ jobs: go test -run TestAssociations -v ./lib && go test -run TestDAOCoinLimitOrder -v ./lib && go test -run TestFreezingPosts -v ./lib && - go test -run TestBalanceModelAssociations -v ./lib + go test -run TestBalanceModelAssociations -v ./lib && + go test -run TestPGGenesisBlock -v ./lib - name: Rollback migrations run: go run scripts/migrate.go rollback diff --git a/integration_testing/tools.go b/integration_testing/tools.go index cb7fe448c..c73b82873 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -399,7 +399,8 @@ func listenForBlockHeight(t *testing.T, node *cmd.Node, height uint32, signal ch }() } -// listenForBlockHeight busy-waits until the node's block tip reaches provided height. +// disconnectAtBlockHeight busy-waits until the node's block tip reaches provided height, and then disconnects +// from the provided bridge. func disconnectAtBlockHeight(t *testing.T, syncingNode *cmd.Node, bridge *ConnectionBridge, height uint32) { listener := make(chan bool) listenForBlockHeight(t, syncingNode, height, listener) diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index cb037f1d4..0fc04547b 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -74,7 +74,7 @@ func _dumpAndLoadMempool(t *testing.T, mempool *DeSoMempool) { newMempool := NewDeSoMempool( mempool.bc, 0, /* rateLimitFeeRateNanosPerKB */ 0 /* minFeeRateNanosPerKB */, "", true, - mempool.dataDir, "") + mempool.dataDir, "", mempool.useDefaultBadgerOptions) mempool.mempoolDir = "" newMempool.mempoolDir = mempoolDir newMempool.LoadTxnsFromDB() @@ -370,7 +370,7 @@ func TestBitcoinExchange(t *testing.T) { newMP := NewDeSoMempool(chain, 0, /* rateLimitFeeRateNanosPerKB */ 0, /* minFeeRateNanosPerKB */ "" /*blockCypherAPIKey*/, false, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) mempool.resetPool(newMP) // Validating the first Bitcoin burn transaction via a UtxoView should @@ -1076,7 +1076,7 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { newMP := NewDeSoMempool(chain, 0, /* rateLimitFeeRateNanosPerKB */ 0, /* minFeeRateNanosPerKB */ "" /*blockCypherAPIKey*/, false, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) mempool.resetPool(newMP) //// Validating the first Bitcoin burn transaction via a UtxoView should @@ -1808,7 +1808,7 @@ func TestSpendOffOfUnminedTxnsBitcoinExchange(t *testing.T) { newMP := NewDeSoMempool(chain, 0, /* rateLimitFeeRateNanosPerKB */ 0, /* minFeeRateNanosPerKB */ "" /*blockCypherAPIKey*/, false, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) mempool.resetPool(newMP) // The amount of work on the first burn transaction should be zero. @@ -2419,7 +2419,7 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { newMP := NewDeSoMempool(chain, 0, /* rateLimitFeeRateNanosPerKB */ 0, /* minFeeRateNanosPerKB */ "" /*blockCypherAPIKey*/, false, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) mempool.resetPool(newMP) // The amount of work on the first burn transaction should be zero. diff --git a/lib/block_view_test.go b/lib/block_view_test.go index dc0cc8516..bcc57b653 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -418,7 +418,7 @@ func (tes *transactionTestSuite) InitializeChainAndGetTestMeta(useBadger bool, u pgPort = config.testPostgresPort } chain, params, embpg = NewLowDifficultyBlockchainWithParamsAndDb(tes.t, &DeSoTestnetParams, - true, pgPort) + true, pgPort, false) mempool, miner = NewTestMiner(config.t, chain, params, true /*isSender*/) pg = chain.postgres db = chain.db @@ -1431,7 +1431,7 @@ func TestUpdateGlobalParams(t *testing.T) { txn := _assembleBasicTransferTxnFullySigned(t, chain, 200, 200, m0Pub, moneyPkString, m0Priv, mempool) txn.TxnNonce.ExpirationBlockHeight = uint64(chain.blockTip().Height + 1 + 5001) _signTxn(t, txn, m0Priv) - newMP := NewDeSoMempool(chain, 0, 0, "", true, "", "") + newMP := NewDeSoMempool(chain, 0, 0, "", true, "", "", true) _, _, err = newMP.TryAcceptTransaction(txn, false, false) require.Error(err) require.Contains(err.Error(), TxErrorNonceExpirationBlockHeightOffsetExceeded) diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index db66e746e..46bf612ad 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -263,7 +263,7 @@ func TestUtxoEntryEncodeDecode(t *testing.T) { newMP := NewDeSoMempool(chain, 0, /* rateLimitFeeRateNanosPerKB */ 0, /* minFeeRateNanosPerKB */ "" /*blockCypherAPIKey*/, false, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) mempool.resetPool(newMP) // Validating the first Bitcoin burn transaction via a UtxoView should diff --git a/lib/blockchain.go b/lib/blockchain.go index 59f38840e..7c2586229 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -528,9 +528,11 @@ func (bc *Blockchain) _initChain() error { if bc.postgres != nil { err = bc.postgres.InitGenesisBlock(bc.params, bc.db) - } else { - err = InitDbWithDeSoGenesisBlock(bc.params, bc.db, bc.eventManager, bc.snapshot) + if err != nil { + return errors.Wrapf(err, "_initChain: Problem initializing postgres with genesis block") + } } + err = InitDbWithDeSoGenesisBlock(bc.params, bc.db, bc.eventManager, bc.snapshot, bc.postgres) if err != nil { return errors.Wrapf(err, "_initChain: Problem initializing db with genesis block") } diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 11fa941a4..7928a51c7 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -184,7 +184,7 @@ func AppendToMemLog(t *testing.T, prefix string) { f, err := os.OpenFile("mem.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err == nil { defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("%s\t%s\tMemory Usage\t%v\n", prefix, t.Name(), float64(mem.Alloc)/float64(1e9))); err != nil { + if _, err := f.WriteString(fmt.Sprintf("%s\t%s\tMemory Usage\t%v\tTotal Alloc\t%v\n", prefix, t.Name(), float64(mem.Alloc)/float64(1e9), float64(mem.TotalAlloc)/float64(1e9))); err != nil { log.Println(err) } } @@ -205,11 +205,11 @@ func NewLowDifficultyBlockchainWithParams(t *testing.T, params *DeSoParams) ( // Set the number of txns per view regeneration to one while creating the txns ReadOnlyUtxoViewRegenerationIntervalTxns = 1 - chain, params, _ := NewLowDifficultyBlockchainWithParamsAndDb(t, params, len(os.Getenv("POSTGRES_URI")) > 0, 0) + chain, params, _ := NewLowDifficultyBlockchainWithParamsAndDb(t, params, len(os.Getenv("POSTGRES_URI")) > 0, 0, false) return chain, params, chain.db } -func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, usePostgres bool, postgresPort uint32) ( +func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, usePostgres bool, postgresPort uint32, useProvidedParams bool) ( *Blockchain, *DeSoParams, *embeddedpostgres.EmbeddedPostgres) { TestDeSoEncoderSetup(t) AppendToMemLog(t, "START") @@ -237,13 +237,16 @@ func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, } timesource := chainlib.NewMedianTime() - testParams := NewTestParams(params) + testParams := *params + if !useProvidedParams { + testParams = NewTestParams(params) + } // Temporarily modify the seed balances to make a specific public // key have some DeSo var snap *Snapshot if !usePostgres { - snap, err, _ = NewSnapshot(db, dbDir, SnapshotBlockHeightPeriod, false, false, &testParams, false) + snap, err, _ = NewSnapshot(db, dbDir, SnapshotBlockHeightPeriod, false, false, &testParams, false, true) if err != nil { log.Fatal(err) } @@ -260,6 +263,12 @@ func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, snap.Stop() CleanUpBadger(snap.SnapshotDb) } + if embpg != nil { + err = embpg.Stop() + if err != nil { + glog.Errorf("Error stopping embedded pg: %v", err) + } + } CleanUpBadger(db) TestDeSoEncoderShutdown(t) AppendToMemLog(t, "CLEANUP_END") @@ -324,7 +333,7 @@ func NewTestMiner(t *testing.T, chain *Blockchain, params *DeSoParams, isSender mempool := NewDeSoMempool( chain, 0, /* rateLimitFeeRateNanosPerKB */ 0 /* minFeeRateNanosPerKB */, "", true, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) minerPubKeys := []string{} if isSender { minerPubKeys = append(minerPubKeys, senderPkString) @@ -1742,3 +1751,16 @@ func TestForbiddenBlockSignaturePubKey(t *testing.T) { require.Error(err) require.Contains(err.Error(), RuleErrorForbiddenBlockProducerPublicKey) } + +func TestPGGenesisBlock(t *testing.T) { + // We skip this test in buildkite CI, but include it in GH actions postgres testing. + // Comment out this conditional to test locally. + if len(os.Getenv("POSTGRES_URI")) == 0 { + return + } + chain, params, _ := NewLowDifficultyBlockchainWithParamsAndDb(t, &DeSoTestnetParams, true, 5435, true) + for _, seedBalance := range params.SeedBalances { + bal := chain.postgres.GetBalance(NewPublicKey(seedBalance.PublicKey)) + require.Equal(t, bal, seedBalance.AmountNanos) + } +} diff --git a/lib/db_utils.go b/lib/db_utils.go index c6a28f06f..7980bdc8a 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -5096,7 +5096,7 @@ func DbBulkDeleteHeightHashToNodeInfo(handle *badger.DB, snap *Snapshot, // InitDbWithGenesisBlock initializes the database to contain only the genesis // block. func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, - eventManager *EventManager, snap *Snapshot) error { + eventManager *EventManager, snap *Snapshot, postgres *Postgres) error { // Construct a node for the genesis block. Its height is zero and it has // no parents. Its difficulty should be set to the initial // difficulty specified in the parameters and it should be assumed to be @@ -5154,7 +5154,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, // think things are initialized because we set the best block hash at the // top. We should fix this at some point so that an error in this step // wipes out the best hash. - utxoView, err := NewUtxoView(handle, params, nil, snap) + utxoView, err := NewUtxoView(handle, params, postgres, snap) if err != nil { return fmt.Errorf( "InitDbWithDeSoGenesisBlock: Error initializing UtxoView") @@ -5224,7 +5224,6 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, UtxoOps: utxoOpsForBlock, }) } - // Flush all the data in the view. err = utxoView.FlushToDb(0) if err != nil { @@ -9849,6 +9848,10 @@ func PerformanceBadgerOptions(dir string) badger.Options { return opts } +func DefaultBadgerOptions(dir string) badger.Options { + return badger.DefaultOptions(dir) +} + // --------------------------------------------- // Associations // --------------------------------------------- diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index a0e063deb..ccc69a7b3 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -77,7 +77,7 @@ func GetTestBadgerDb() (_db *badger.DB, _dir string) { } // Open a badgerdb in a temporary directory. - opts := PerformanceBadgerOptions(dir) + opts := DefaultBadgerOptions(dir) opts.Dir = dir opts.ValueDir = dir // Turn off logging for tests. @@ -196,7 +196,7 @@ func TestInitDbWithGenesisBlock(t *testing.T) { db, _ := GetTestBadgerDb() defer CleanUpBadger(db) - err := InitDbWithDeSoGenesisBlock(&DeSoTestnetParams, db, nil, nil) + err := InitDbWithDeSoGenesisBlock(&DeSoTestnetParams, db, nil, nil, nil) require.NoError(err) // Check the block index. diff --git a/lib/load_test.go b/lib/load_test.go index 0d8a20dd4..af413511c 100644 --- a/lib/load_test.go +++ b/lib/load_test.go @@ -151,7 +151,7 @@ func TestComputeMaxTPS(t *testing.T) { newMP := NewDeSoMempool(mempool.bc, 0, /* rateLimitFeeRateNanosPerKB */ 0, /* minFeeRateNanosPerKB */ "" /*blockCypherAPIKey*/, false, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) mempool.resetPool(newMP) { timeStart := time.Now() diff --git a/lib/mempool.go b/lib/mempool.go index 6e7373986..748b5977c 100644 --- a/lib/mempool.go +++ b/lib/mempool.go @@ -255,6 +255,17 @@ type DeSoMempool struct { // We pass a copy of the data dir flag to the tx pool so that we can instantiate // temp badger db instances and dump mempool txns to them. dataDir string + + // set to true for tests. This lowers the memory requirements of the mempool + // by using DefaultBadgerOptions instead of PerformanceBadgerOptions. + useDefaultBadgerOptions bool +} + +func (mp *DeSoMempool) getBadgerOptions(dir string) badger.Options { + if mp.useDefaultBadgerOptions { + return DefaultBadgerOptions(dir) + } + return PerformanceBadgerOptions(dir) } // See comment on RemoveUnconnectedTxn. The mempool lock must be called for writing @@ -321,6 +332,7 @@ func (mp *DeSoMempool) resetPool(newPool *DeSoMempool) { mp.backupUniversalUtxoView = newPool.backupUniversalUtxoView mp.universalUtxoView = newPool.universalUtxoView mp.universalTransactionList = newPool.universalTransactionList + mp.useDefaultBadgerOptions = newPool.useDefaultBadgerOptions // We don't adjust blockCypherAPIKey or blockCypherCheckDoubleSpendChan // since those should be unaffected @@ -381,7 +393,7 @@ func (mp *DeSoMempool) UpdateAfterConnectBlock(blk *MsgDeSoBlock) (_txnsAddedToM 0, /* minFeeRateNanosPerKB */ "", /*blockCypherAPIKey*/ false, /*runReadOnlyViewUpdater*/ - "" /*dataDir*/, "") + "" /*dataDir*/, "", mp.useDefaultBadgerOptions) // Get all the transactions from the old pool object. oldMempoolTxns, oldUnconnectedTxns, err := mp._getTransactionsOrderedByTimeAdded() @@ -485,7 +497,7 @@ func (mp *DeSoMempool) UpdateAfterDisconnectBlock(blk *MsgDeSoBlock) { newPool := NewDeSoMempool(mp.bc, 0, /* rateLimitFeeRateNanosPerKB */ 0, /* minFeeRateNanosPerKB */ "" /*blockCypherAPIKey*/, false, - "" /*dataDir*/, "") + "" /*dataDir*/, "", mp.useDefaultBadgerOptions) // Add the transactions from the block to the new pool (except for the block reward, // which should always be the first transaction). Break out if we encounter @@ -767,7 +779,7 @@ func (mp *DeSoMempool) OpenTempDBAndDumpTxns() error { if err != nil { return fmt.Errorf("OpenTempDBAndDumpTxns: Error making top-level dir: %v", err) } - tempMempoolDBOpts := PerformanceBadgerOptions(tempMempoolDBDir) + tempMempoolDBOpts := mp.getBadgerOptions(tempMempoolDBDir) tempMempoolDBOpts.ValueDir = tempMempoolDBDir tempMempoolDB, err := badger.Open(tempMempoolDBOpts) if err != nil { @@ -2388,7 +2400,7 @@ func (mp *DeSoMempool) inefficientRemoveTransaction(tx *MsgDeSoTxn) { newPool := NewDeSoMempool(mp.bc, 0, /* rateLimitFeeRateNanosPerKB */ 0, /* minFeeRateNanosPerKB */ "" /*blockCypherAPIKey*/, false, - "" /*dataDir*/, "") + "" /*dataDir*/, "", mp.useDefaultBadgerOptions) // At this point the block txns have been added to the new pool. Now we need to // add the txns from the original pool. Start by fetching them in slice form. oldMempoolTxns, oldUnconnectedTxns, err := mp._getTransactionsOrderedByTimeAdded() @@ -2562,7 +2574,7 @@ func (mp *DeSoMempool) LoadTxnsFromDB() { } // If we make it this far, we found a mempool dump to load. Woohoo! - tempMempoolDBOpts := PerformanceBadgerOptions(savedTxnsDir) + tempMempoolDBOpts := mp.getBadgerOptions(savedTxnsDir) tempMempoolDBOpts.ValueDir = savedTxnsDir glog.Infof("LoadTxnsFrom: Opening new temp db %v", savedTxnsDir) tempMempoolDB, err := badger.Open(tempMempoolDBOpts) @@ -2600,7 +2612,7 @@ func (mp *DeSoMempool) Stop() { // Create a new pool with no transactions in it. func NewDeSoMempool(_bc *Blockchain, _rateLimitFeerateNanosPerKB uint64, _minFeerateNanosPerKB uint64, _blockCypherAPIKey string, - _runReadOnlyViewUpdater bool, _dataDir string, _mempoolDumpDir string) *DeSoMempool { + _runReadOnlyViewUpdater bool, _dataDir string, _mempoolDumpDir string, useDefaultBadgerOptions bool) *DeSoMempool { utxoView, _ := NewUtxoView(_bc.db, _bc.params, _bc.postgres, _bc.snapshot) backupUtxoView, _ := NewUtxoView(_bc.db, _bc.params, _bc.postgres, _bc.snapshot) @@ -2624,6 +2636,7 @@ func NewDeSoMempool(_bc *Blockchain, _rateLimitFeerateNanosPerKB uint64, readOnlyUniversalTransactionMap: make(map[BlockHash]*MempoolTx), readOnlyOutpoints: make(map[UtxoKey]*MsgDeSoTxn), dataDir: _dataDir, + useDefaultBadgerOptions: useDefaultBadgerOptions, } if newPool.mempoolDir != "" { diff --git a/lib/mempool_test.go b/lib/mempool_test.go index d09be0241..d7670034d 100644 --- a/lib/mempool_test.go +++ b/lib/mempool_test.go @@ -54,7 +54,7 @@ func TestMempoolLongChainOfDependencies(t *testing.T) { mp := NewDeSoMempool( chain, 0, /* rateLimitFeeRateNanosPerKB */ 0 /* minFeeRateNanosPerKB */, "", true, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) _, err := mp.processTransaction(txn1, false /*allowUnconnectedTxn*/, false /*rateLimit*/, 0 /*peerID*/, true /*verifySignatures*/) require.NoError(err) @@ -118,7 +118,7 @@ func TestMempoolRateLimit(t *testing.T) { mpNoMinFees := NewDeSoMempool( chain, 0, /* rateLimitFeeRateNanosPerKB */ 0 /* minFeeRateNanosPerKB */, "", true, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) // Create a transaction that sends 1 DeSo to the recipient as its // zeroth output. @@ -134,7 +134,7 @@ func TestMempoolRateLimit(t *testing.T) { mpWithMinFee := NewDeSoMempool( chain, 0, /* rateLimitFeeRateNanosPerKB */ 100 /* minFeeRateNanosPerKB */, "", true, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) _, err = mpWithMinFee.processTransaction(txn1, false /*allowUnconnectedTxn*/, true /*rateLimit*/, 0 /*peerID*/, false /*verifySignatures*/) require.Error(err) require.Contains(err.Error(), TxErrorInsufficientFeeMinFee) @@ -183,7 +183,7 @@ func TestMempoolRateLimit(t *testing.T) { mpWithRateLimit := NewDeSoMempool( chain, 100, /* rateLimitFeeRateNanosPerKB */ 0 /* minFeeRateNanosPerKB */, "", true, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) processingErrors := []error{} for _, txn := range txnsCreated { _, err := mpWithRateLimit.processTransaction(txn, false /*allowUnconnectedTxn*/, true /*rateLimit*/, 0 /*peerID*/, false /*verifySignatures*/) @@ -311,7 +311,7 @@ func TestMempoolAugmentedUtxoViewTransactionChain(t *testing.T) { mp := NewDeSoMempool( chain, 0, /* rateLimitFeeRateNanosPerKB */ 0 /* minFeeRateNanosPerKB */, "", true, - "" /*dataDir*/, "") + "" /*dataDir*/, "", true) // Process the first transaction. mempoolTx1, err := mp.processTransaction(txn1, false /*allowUnconnectedTxn*/, false /*rateLimit*/, 0 /*peerID*/, true /*verifySignatures*/) diff --git a/lib/server.go b/lib/server.go index 18e9ade70..ade962f21 100644 --- a/lib/server.go +++ b/lib/server.go @@ -376,7 +376,7 @@ func NewServer( archivalMode := false if _hyperSync { _snapshot, err, shouldRestart = NewSnapshot(_db, _dataDir, _snapshotBlockHeightPeriod, - false, false, _params, _disableEncoderMigrations) + false, false, _params, _disableEncoderMigrations, false) if err != nil { panic(err) } @@ -446,7 +446,7 @@ func NewServer( // blocks. _mempool := NewDeSoMempool(_chain, _rateLimitFeerateNanosPerKB, _minFeeRateNanosPerKB, _blockCypherAPIKey, _runReadOnlyUtxoViewUpdater, _dataDir, - _mempoolDumpDir) + _mempoolDumpDir, false) // Useful for debugging. Every second, it outputs the contents of the mempool // and the contents of the addrmanager. diff --git a/lib/snapshot.go b/lib/snapshot.go index b4fbd3fdf..0e01231cf 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -307,11 +307,14 @@ type Snapshot struct { // NewSnapshot creates a new snapshot instance. func NewSnapshot(mainDb *badger.DB, mainDbDirectory string, snapshotBlockHeightPeriod uint64, isTxIndex bool, - disableChecksum bool, params *DeSoParams, disableMigrations bool) (_snap *Snapshot, _err error, _shouldRestart bool) { + disableChecksum bool, params *DeSoParams, disableMigrations bool, useDefaultBadgerOptions bool) (_snap *Snapshot, _err error, _shouldRestart bool) { // Initialize the ancestral records database snapshotDirectory := filepath.Join(GetBadgerDbPath(mainDbDirectory), "snapshot") snapshotOpts := PerformanceBadgerOptions(snapshotDirectory) + if useDefaultBadgerOptions { + snapshotOpts = DefaultBadgerOptions(snapshotDirectory) + } snapshotOpts.ValueDir = GetBadgerDbPath(snapshotDirectory) snapshotDb, err := badger.Open(snapshotOpts) if err != nil { From eb9832df82a32a4f8bbdb3cfa6bb440133389a16 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 12 Jul 2023 14:10:58 -0400 Subject: [PATCH 143/762] PoS Add RestakeRewards flag to TxnTypeStake (#586) * PoS Add RestakeRewards flag to TxnTypeStake * Update tests * Better edge case handling --- lib/block_view_stake.go | 42 ++++++++++++++++++++++++++++++++---- lib/block_view_stake_test.go | 42 ++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 4 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 0571b84e8..03c55d3a6 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -35,6 +35,7 @@ import ( type StakeEntry struct { StakerPKID *PKID ValidatorPKID *PKID + RestakeRewards bool StakeAmountNanos *uint256.Int ExtraData map[string][]byte isDeleted bool @@ -49,6 +50,7 @@ func (stakeEntry *StakeEntry) Copy() *StakeEntry { return &StakeEntry{ StakerPKID: stakeEntry.StakerPKID.NewPKID(), ValidatorPKID: stakeEntry.ValidatorPKID.NewPKID(), + RestakeRewards: stakeEntry.RestakeRewards, StakeAmountNanos: stakeEntry.StakeAmountNanos.Clone(), ExtraData: copyExtraData(stakeEntry.ExtraData), isDeleted: stakeEntry.isDeleted, @@ -70,6 +72,7 @@ func (stakeEntry *StakeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipM var data []byte data = append(data, EncodeToBytes(blockHeight, stakeEntry.StakerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, stakeEntry.ValidatorPKID, skipMetadata...)...) + data = append(data, BoolToByte(stakeEntry.RestakeRewards)) data = append(data, VariableEncodeUint256(stakeEntry.StakeAmountNanos)...) data = append(data, EncodeExtraData(stakeEntry.ExtraData)...) return data @@ -90,6 +93,12 @@ func (stakeEntry *StakeEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *b return errors.Wrapf(err, "StakeEntry.Decode: Problem reading ValidatorPKID: ") } + // RestakeRewards + stakeEntry.RestakeRewards, err = ReadBoolByte(rr) + if err != nil { + return errors.Wrapf(err, "StakeEntry.Decode: Problem reading RestakeRewards") + } + // StakeAmountNanos stakeEntry.StakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { @@ -215,6 +224,7 @@ func (lockedStakeEntry *LockedStakeEntry) GetEncoderType() EncoderType { type StakeMetadata struct { ValidatorPublicKey *PublicKey + RestakeRewards bool StakeAmountNanos *uint256.Int } @@ -225,6 +235,7 @@ func (txnData *StakeMetadata) GetTxnType() TxnType { func (txnData *StakeMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte data = append(data, EncodeByteArray(txnData.ValidatorPublicKey.ToBytes())...) + data = append(data, BoolToByte(txnData.RestakeRewards)) data = append(data, VariableEncodeUint256(txnData.StakeAmountNanos)...) return data, nil } @@ -239,6 +250,12 @@ func (txnData *StakeMetadata) FromBytes(data []byte) error { } txnData.ValidatorPublicKey = NewPublicKey(validatorPublicKeyBytes) + // RestakeRewards + txnData.RestakeRewards, err = ReadBoolByte(rr) + if err != nil { + return errors.Wrapf(err, "StakeMetadata.FromBytes: Problem reading RestakeRewards: ") + } + // StakeAmountNanos txnData.StakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { @@ -353,6 +370,7 @@ func (txnData *UnlockStakeMetadata) New() DeSoTxnMetadata { type StakeTxindexMetadata struct { StakerPublicKeyBase58Check string ValidatorPublicKeyBase58Check string + RestakeRewards bool StakeAmountNanos *uint256.Int } @@ -360,6 +378,7 @@ func (txindexMetadata *StakeTxindexMetadata) RawEncodeWithoutMetadata(blockHeigh var data []byte data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) + data = append(data, BoolToByte(txindexMetadata.RestakeRewards)) data = append(data, VariableEncodeUint256(txindexMetadata.StakeAmountNanos)...) return data } @@ -381,6 +400,12 @@ func (txindexMetadata *StakeTxindexMetadata) RawDecodeWithoutMetadata(blockHeigh } txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) + // RestakeRewards + txindexMetadata.RestakeRewards, err = ReadBoolByte(rr) + if err != nil { + return errors.Wrapf(err, "StakeTxindexMetadata.Decode: Problem reading RestakeRewards: ") + } + // StakeAmountNanos txindexMetadata.StakeAmountNanos, err = VariableDecodeUint256(rr) if err != nil { @@ -1202,6 +1227,7 @@ func (bav *UtxoView) _connectStake( currentStakeEntry := &StakeEntry{ StakerPKID: transactorPKIDEntry.PKID, ValidatorPKID: prevValidatorEntry.ValidatorPKID, + RestakeRewards: txMeta.RestakeRewards, StakeAmountNanos: stakeAmountNanos, ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), } @@ -1826,13 +1852,11 @@ func (bav *UtxoView) IsValidStakeMetadata(transactorPkBytes []byte, metadata *St return errors.Wrapf(RuleErrorInvalidStakeValidatorDisabledDelegatedStake, "UtxoView.IsValidStakeMetadata: ") } - // Validate 0 < StakeAmountNanos <= transactor's DESO Balance. We ignore + // Validate 0 <= StakeAmountNanos <= transactor's DESO Balance. We ignore // the txn fees in this check. The StakeAmountNanos will be validated to // be less than the transactor's DESO balance net of txn fees in the call // to connectBasicTransferWithExtraSpend. - if metadata.StakeAmountNanos == nil || - metadata.StakeAmountNanos.IsZero() || - !metadata.StakeAmountNanos.IsUint64() { + if metadata.StakeAmountNanos == nil || !metadata.StakeAmountNanos.IsUint64() { return errors.Wrapf(RuleErrorInvalidStakeAmountNanos, "UtxoView.IsValidStakeMetadata: ") } transactorDeSoBalanceNanos, err := bav.GetSpendableDeSoBalanceNanosForPublicKey(transactorPkBytes, blockHeight-1) @@ -1843,6 +1867,16 @@ func (bav *UtxoView) IsValidStakeMetadata(transactorPkBytes []byte, metadata *St return errors.Wrapf(RuleErrorInvalidStakeInsufficientBalance, "UtxoView.IsValidStakeMetadata: ") } + // Validate StakeAmountNanos > 0 when this is the first stake operation where the transactor is staking + // to the validator. It should not be possible for a validator to stake 0 DESO to a validator. + stakeEntry, err := bav.GetStakeEntry(validatorEntry.ValidatorPKID, transactorPKIDEntry.PKID) + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidStakeMetadata: ") + } + if stakeEntry == nil && metadata.StakeAmountNanos.IsZero() { + return errors.Wrapf(RuleErrorInvalidStakeAmountNanos, "UtxoView.IsValidStakeMetadata: ") + } + return nil } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index ca4bbc593..54f53d4bc 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -125,6 +125,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), + RestakeRewards: false, StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxn( @@ -141,6 +142,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidValidatorPKID stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m2PkBytes), + RestakeRewards: false, StakeAmountNanos: uint256.NewInt(), } _, err = _submitStakeTxn( @@ -153,6 +155,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidStakeAmountNanos stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), + RestakeRewards: false, StakeAmountNanos: nil, } _, err = _submitStakeTxn( @@ -189,6 +192,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidStakeInsufficientBalance stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), + RestakeRewards: false, StakeAmountNanos: uint256.NewInt().SetUint64(math.MaxUint64), } _, err = _submitStakeTxn( @@ -202,6 +206,7 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), + RestakeRewards: false, StakeAmountNanos: uint256.NewInt().SetUint64(100), } extraData := map[string][]byte{"TestKey": []byte("TestValue")} @@ -214,6 +219,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) + require.False(t, stakeEntry.RestakeRewards) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue")) @@ -232,6 +238,7 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), + RestakeRewards: false, StakeAmountNanos: uint256.NewInt().SetUint64(50), } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} @@ -244,6 +251,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) + require.False(t, stakeEntry.RestakeRewards) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(150)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue2")) @@ -257,6 +265,40 @@ func _testStaking(t *testing.T, flushToDB bool) { m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) require.Equal(t, m1OldDESOBalanceNanos-feeNanos-stakeMetadata.StakeAmountNanos.Uint64(), m1NewDESOBalanceNanos) } + { + // m1 changes the RestakeRewards flag on their stake with m0. + m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + RestakeRewards: true, + StakeAmountNanos: uint256.NewInt(), + } + extraData := map[string][]byte{"TestKey": []byte("TestValue2")} + feeNanos, err := _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, extraData, flushToDB, + ) + require.NoError(t, err) + + // Verify the StakeEntry.StakeAmountNanos does not change. + stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) + require.NoError(t, err) + require.NotNil(t, stakeEntry) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue2")) + + // Verify the StakeEntry.RestakeRewards flag is updated to true. + require.True(t, stakeEntry.RestakeRewards) + + // Verify the ValidatorEntry.TotalStakeAmountNanos does not change. + validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.NotNil(t, validatorEntry) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + + // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). + m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) + require.Equal(t, m1OldDESOBalanceNanos-feeNanos-stakeMetadata.StakeAmountNanos.Uint64(), m1NewDESOBalanceNanos) + } // // UNSTAKING // From 6ea1915509d2fedb7a43b6debb4b901396772e33 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 12 Jul 2023 15:30:35 -0400 Subject: [PATCH 144/762] Initial commit (#590) --- lib/block_view_validator.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index d13450740..fb6c174d3 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -549,7 +549,7 @@ func DBKeyForValidatorByPKID(validatorEntry *ValidatorEntry) []byte { return key } -func DBKeyForValidatorByStake(validatorEntry *ValidatorEntry) []byte { +func DBKeyForValidatorByStatusAndStake(validatorEntry *ValidatorEntry) []byte { key := append([]byte{}, Prefixes.PrefixValidatorByStatusAndStake...) key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) @@ -599,7 +599,7 @@ func DBGetTopActiveValidatorsByStake( // Convert ValidatorEntriesToSkip to ValidatorEntryKeysToSkip. validatorKeysToSkip := NewSet([]string{}) for _, validatorEntryToSkip := range validatorEntriesToSkip { - validatorKeysToSkip.Add(string(DBKeyForValidatorByStake(validatorEntryToSkip))) + validatorKeysToSkip.Add(string(DBKeyForValidatorByStatusAndStake(validatorEntryToSkip))) } // Retrieve top N active ValidatorEntry keys by stake. @@ -655,7 +655,7 @@ func DBPutValidatorWithTxn( // Set ValidatorEntry key in PrefixValidatorByStatusAndStake. The value should be nil. // We parse the ValidatorPKID from the key for this index. - key = DBKeyForValidatorByStake(validatorEntry) + key = DBKeyForValidatorByStatusAndStake(validatorEntry) if err := DBSetWithTxn(txn, snap, key, nil); err != nil { return errors.Wrapf( err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStatusAndStake", @@ -695,7 +695,7 @@ func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PK } // Delete ValidatorEntry.PKID from PrefixValidatorByStatusAndStake. - key = DBKeyForValidatorByStake(validatorEntry) + key = DBKeyForValidatorByStatusAndStake(validatorEntry) if err := DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( err, "DBDeleteValidatorWithTxn: problem deleting ValidatorEntry from index PrefixValidatorByStatusAndStake", From a8f73bbded0907465bdfe48edb095b1d81ad8227 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 12 Jul 2023 15:58:09 -0400 Subject: [PATCH 145/762] Merge branch 'main' into feature/proof-of-stake (#593) * Merge branch 'main' into feature/proof-of-stake * Update PoS tests --- lib/block_view_access_group_members_test.go | 3 +-- lib/block_view_access_group_test.go | 6 ++---- lib/block_view_association_test.go | 3 +-- lib/block_view_creator_coin_test.go | 12 ++++-------- lib/block_view_dao_coin_limit_order_test.go | 3 +-- lib/block_view_dao_coin_test.go | 3 +-- lib/block_view_derived_key_test.go | 3 +-- lib/block_view_follow_test.go | 3 +-- lib/block_view_like_test.go | 3 +-- lib/block_view_message_test.go | 3 +-- lib/block_view_new_message_test.go | 3 +-- lib/block_view_nft_test.go | 9 +++------ lib/block_view_post_test.go | 3 +-- lib/block_view_profile_test.go | 6 ++---- lib/block_view_stake_test.go | 12 ++++-------- lib/block_view_test.go | 17 +++++++++-------- lib/block_view_types.go | 1 - lib/block_view_validator_test.go | 21 +++++++-------------- lib/blockchain_test.go | 14 +++++++------- lib/db_utils.go | 4 +++- lib/db_utils_test.go | 4 ++-- lib/pos_epoch_complete_hook_test.go | 9 ++++----- lib/pos_leader_schedule_test.go | 6 +++--- 23 files changed, 60 insertions(+), 91 deletions(-) diff --git a/lib/block_view_access_group_members_test.go b/lib/block_view_access_group_members_test.go index bd37f430d..fd357236f 100644 --- a/lib/block_view_access_group_members_test.go +++ b/lib/block_view_access_group_members_test.go @@ -49,8 +49,7 @@ func (data *accessGroupMembersTestData) GetInputType() transactionTestInputType } func TestBalanceModelAccessGroupMembers(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestAccessGroupMembersAdd(t) TestAccessGroupMembersUpdate(t) diff --git a/lib/block_view_access_group_test.go b/lib/block_view_access_group_test.go index 7a32a7f49..0518acbd8 100644 --- a/lib/block_view_access_group_test.go +++ b/lib/block_view_access_group_test.go @@ -32,8 +32,7 @@ func (data *AccessGroupTestData) GetInputType() transactionTestInputType { } func TestBalanceModelAccessGroups(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestAccessGroup(t) TestAccessGroupTxnWithDerivedKey(t) @@ -493,8 +492,7 @@ func _customCreateAccessGroupTxn( func TestAccessGroupTxnWithDerivedKey(t *testing.T) { // Initialize fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. var err error diff --git a/lib/block_view_association_test.go b/lib/block_view_association_test.go index 4e2a1ed0e..43b5f9b65 100644 --- a/lib/block_view_association_test.go +++ b/lib/block_view_association_test.go @@ -11,8 +11,7 @@ import ( ) func TestBalanceModelAssociations(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestAssociations(t) } diff --git a/lib/block_view_creator_coin_test.go b/lib/block_view_creator_coin_test.go index 35e44ad1f..62f32bc7e 100644 --- a/lib/block_view_creator_coin_test.go +++ b/lib/block_view_creator_coin_test.go @@ -792,8 +792,7 @@ func _helpTestCreatorCoinBuySell( } func TestBalanceModelCreatorCoins(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestCreatorCoinWithDiamonds(t) TestCreatorCoinWithDiamondsFailureCases(t) @@ -803,8 +802,7 @@ func TestBalanceModelCreatorCoins(t *testing.T) { } func TestBalanceModelCreatorCoins2(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestCreatorCoinTransferWithSwapIdentity(t) TestCreatorCoinTransferWithSmallBalancesLeftOver(t) @@ -814,8 +812,7 @@ func TestBalanceModelCreatorCoins2(t *testing.T) { } func TestBalanceModelCreatorCoins3(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestCreatorCoinBuySellSimple_DeSoFounderReward(t) TestCreatorCoinSelfBuying_DeSoAndCreatorCoinFounderReward(t) @@ -825,8 +822,7 @@ func TestBalanceModelCreatorCoins3(t *testing.T) { } func TestBalanceModelCreatorCoins4(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestCreatorCoinLargeFounderRewardBuySellAmounts(t) TestCreatorCoinAroundThresholdBuySellAmounts(t) diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index d1b332383..e50030b8d 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -13,8 +13,7 @@ import ( ) func TestBalanceModelDAOCoinLimitOrders(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t) TestDAOCoinLimitOrder(t) diff --git a/lib/block_view_dao_coin_test.go b/lib/block_view_dao_coin_test.go index 1c436ad75..0d22658ac 100644 --- a/lib/block_view_dao_coin_test.go +++ b/lib/block_view_dao_coin_test.go @@ -175,8 +175,7 @@ func _daoCoinTransferTxnWithTestMeta( } func TestBalanceModelDAOCoins(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestDAOCoinBasic(t) } diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index 37dcfc481..97ba47eb4 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -869,8 +869,7 @@ func _doAuthorizeTxnWithExtraDataAndSpendingLimits(testMeta *TestMeta, utxoView } func TestBalanceModelAuthorizeDerivedKey(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestAuthorizeDerivedKeyBasic(t) TestAuthorizeDerivedKeyBasicWithTransactionLimits(t) diff --git a/lib/block_view_follow_test.go b/lib/block_view_follow_test.go index 5d5e0bc5a..fb819796d 100644 --- a/lib/block_view_follow_test.go +++ b/lib/block_view_follow_test.go @@ -70,8 +70,7 @@ func _doFollowTxn(t *testing.T, chain *Blockchain, db *badger.DB, } func TestBalanceModelFollows(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestFollowTxns(t) } diff --git a/lib/block_view_like_test.go b/lib/block_view_like_test.go index c9afadd1b..a59bee193 100644 --- a/lib/block_view_like_test.go +++ b/lib/block_view_like_test.go @@ -68,8 +68,7 @@ func _doLikeTxn(t *testing.T, chain *Blockchain, db *badger.DB, } func TestBalanceModelLikes(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestLikeTxns(t) } diff --git a/lib/block_view_message_test.go b/lib/block_view_message_test.go index c70587791..8e3fcc836 100644 --- a/lib/block_view_message_test.go +++ b/lib/block_view_message_test.go @@ -114,8 +114,7 @@ func _privateMessageWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB } func TestBalanceModelPrivateMessages(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestPrivateMessages(t) TestMessagingKeys(t) diff --git a/lib/block_view_new_message_test.go b/lib/block_view_new_message_test.go index 62f88d2d7..24fb6acca 100644 --- a/lib/block_view_new_message_test.go +++ b/lib/block_view_new_message_test.go @@ -50,8 +50,7 @@ func (data *NewMessageTestData) GetInputType() transactionTestInputType { } func TestBalanceModelNewMessages(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestNewMessage(t) } diff --git a/lib/block_view_nft_test.go b/lib/block_view_nft_test.go index 22d1f7d9f..126109900 100644 --- a/lib/block_view_nft_test.go +++ b/lib/block_view_nft_test.go @@ -829,8 +829,7 @@ func _burnNFTWithTestMeta( } func TestBalanceModelNFTs(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestNFTBasic(t) TestNFTRoyaltiesAndSpendingOfBidderUTXOs(t) @@ -841,8 +840,7 @@ func TestBalanceModelNFTs(t *testing.T) { // Break up into multiple tests to keep memory footprint lower func TestBalanceModelNFTs2(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestNFTMoreErrorCases(t) TestNFTBidsAreCanceledAfterAccept(t) @@ -852,8 +850,7 @@ func TestBalanceModelNFTs2(t *testing.T) { } func TestBalanceModelNFTs3(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestNFTTransfersAndBurns(t) TestBidAmountZero(t) diff --git a/lib/block_view_post_test.go b/lib/block_view_post_test.go index 6853ddfcd..a630822e6 100644 --- a/lib/block_view_post_test.go +++ b/lib/block_view_post_test.go @@ -292,8 +292,7 @@ func _doSubmitPostTxn(t *testing.T, chain *Blockchain, db *badger.DB, } func TestBalanceModelSubmitPost(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestSubmitPost(t) TestDeSoDiamonds(t) diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index 6883c23ef..18651989a 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -218,8 +218,7 @@ const ( ) func TestBalanceModelUpdateProfile(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestUpdateProfile(t) TestSpamUpdateProfile(t) @@ -227,8 +226,7 @@ func TestBalanceModelUpdateProfile(t *testing.T) { } func TestBalanceModelSwapIdentity(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestSwapIdentityNOOPCreatorCoinBuySimple(t) TestSwapIdentityCreatorCoinBuySimple(t) diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 29a7eabc4..2aec1d844 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -22,8 +22,7 @@ func _testStaking(t *testing.T, flushToDB bool) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) @@ -759,8 +758,7 @@ func TestStakingWithDerivedKey(t *testing.T) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) @@ -1761,8 +1759,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) @@ -1927,8 +1924,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index bcc57b653..470c29d33 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -66,7 +66,7 @@ var ( paramUpdaterPkBytes, _, _ = Base58CheckDecode(paramUpdaterPub) ) -func setBalanceModelBlockHeights() { +func setBalanceModelBlockHeights(t *testing.T) { DeSoTestnetParams.ForkHeights.NFTTransferOrBurnAndDerivedKeysBlockHeight = 0 DeSoTestnetParams.ForkHeights.DerivedKeySetSpendingLimitsBlockHeight = 0 DeSoTestnetParams.ForkHeights.DerivedKeyTrackSpendingLimitsBlockHeight = 0 @@ -77,6 +77,8 @@ func setBalanceModelBlockHeights() { DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams + + t.Cleanup(resetBalanceModelBlockHeights) } func resetBalanceModelBlockHeights() { @@ -305,18 +307,19 @@ const TestDeSoEncoderRetries = 3 func TestDeSoEncoderSetup(t *testing.T) { EncodeToBytesImpl = func(blockHeight uint64, encoder DeSoEncoder, skipMetadata ...bool) []byte { + versionByte := encoder.GetVersionByte(blockHeight) encodingBytes := encodeToBytes(blockHeight, encoder, skipMetadata...) - // Check for deterministic encoding, try re-encoding the same encoder a couple of times and compare it with // the original bytes. { for ii := 0; ii < TestDeSoEncoderRetries; ii++ { + newVersionByte := encoder.GetVersionByte(blockHeight) reEncodingBytes := encodeToBytes(blockHeight, encoder, skipMetadata...) if !bytes.Equal(encodingBytes, reEncodingBytes) { t.Fatalf("EncodeToBytes: Found non-deterministic encoding for a DeSoEncoder. Attempted "+ - "encoder type (%v), version byte (%v) at block height (%v).\n "+ + "encoder type (%v), version byte (original: %v, reEncoding: %v) at block height (%v).\n "+ "First encoding: (%v)\n"+"Second encoding: (%v)\n", - encoder.GetEncoderType(), encoder.GetVersionByte(blockHeight), + encoder.GetEncoderType(), versionByte, newVersionByte, blockHeight, hex.EncodeToString(encodingBytes), hex.EncodeToString(reEncodingBytes)) } } @@ -1241,8 +1244,7 @@ func _connectBlockThenDisconnectBlockAndFlush(testMeta *TestMeta) { } func TestBalanceModelUpdateGlobalParams(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestUpdateGlobalParams(t) } @@ -1460,8 +1462,7 @@ func TestUpdateGlobalParams(t *testing.T) { } func TestBalanceModelBasicTransfers(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) TestBasicTransfer(t) TestBasicTransferSignatures(t) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 84628c98f..deceeee8a 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -487,7 +487,6 @@ func GetMigrationVersion(blockHeight uint64, appliedMigrationNames ...MigrationN } } } - return maxMigrationVersion } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 628743548..4a92966d9 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -28,8 +28,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) @@ -419,8 +418,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) @@ -686,8 +684,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) @@ -1119,8 +1116,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) @@ -1304,8 +1300,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) @@ -1497,8 +1492,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) @@ -1711,8 +1705,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { var err error // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 7928a51c7..c3d182898 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -259,6 +259,7 @@ func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, t.Cleanup(func() { AppendToMemLog(t, "CLEANUP_START") + TestDeSoEncoderShutdown(t) if snap != nil { snap.Stop() CleanUpBadger(snap.SnapshotDb) @@ -270,7 +271,6 @@ func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, } } CleanUpBadger(db) - TestDeSoEncoderShutdown(t) AppendToMemLog(t, "CLEANUP_END") }) @@ -444,8 +444,8 @@ func _getBalanceWithView(t *testing.T, chain *Blockchain, utxoView *UtxoView, pk } func TestBalanceModelBlockTests(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) + TestBasicTransferReorg(t) TestProcessBlockConnectBlocks(t) TestProcessHeaderskReorgBlocks(t) @@ -456,8 +456,8 @@ func TestBalanceModelBlockTests(t *testing.T) { } func TestBalanceModelBlockTests2(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) + TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t) TestCalcNextDifficultyTargetHittingLimitsSlow(t) TestCalcNextDifficultyTargetHittingLimitsFast(t) @@ -465,8 +465,8 @@ func TestBalanceModelBlockTests2(t *testing.T) { } func TestBalanceModelBlockTests3(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) + TestCalcNextDifficultyTargetSlightlyOff(t) TestBadMerkleRoot(t) TestBadBlockSignature(t) diff --git a/lib/db_utils.go b/lib/db_utils.go index 7980bdc8a..4d4aaa0d9 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -9849,7 +9849,9 @@ func PerformanceBadgerOptions(dir string) badger.Options { } func DefaultBadgerOptions(dir string) badger.Options { - return badger.DefaultOptions(dir) + opts := badger.DefaultOptions(dir) + opts.Logger = nil + return opts } // --------------------------------------------- diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index ccc69a7b3..64c8b340e 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -593,8 +593,8 @@ func TestFollows(t *testing.T) { } func TestDeleteExpiredTransactorNonceEntries(t *testing.T) { - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) + assert := assert.New(t) require := require.New(t) _ = assert diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 522129103..9fc61ecf6 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -4,17 +4,17 @@ package lib import ( "fmt" + "testing" + "github.com/holiman/uint256" "github.com/stretchr/testify/require" - "testing" ) func TestIsLastBlockInCurrentEpoch(t *testing.T) { var isLastBlockInCurrentEpoch bool // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) @@ -54,8 +54,7 @@ func TestIsLastBlockInCurrentEpoch(t *testing.T) { func TestRunEpochCompleteHook(t *testing.T) { // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index e68493ab6..8cdb48460 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -5,15 +5,15 @@ package lib import ( "crypto/sha256" "fmt" + "testing" + "github.com/holiman/uint256" "github.com/stretchr/testify/require" - "testing" ) func TestGenerateLeaderSchedule(t *testing.T) { // Initialize balance model fork heights. - setBalanceModelBlockHeights() - defer resetBalanceModelBlockHeights() + setBalanceModelBlockHeights(t) // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) From 7167292cbe2e42842de700363103e8358cea8979 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 13 Jul 2023 11:40:56 -0400 Subject: [PATCH 146/762] PoS Add StakeByStakeAmount badger index (#591) * PoS Add StakeByStakeAmount badger index * Update DBPut and DBDelete functions * Fix tests * Use FixedWidthEncodeUint256 to encode stake amount in index * Fix encoder tests --- lib/block_view_stake.go | 68 ++++++++++++++++++++++++++++++++--------- lib/db_utils.go | 41 ++++++++++++++----------- 2 files changed, 77 insertions(+), 32 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 03c55d3a6..6d90af1f3 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -547,15 +547,23 @@ func (txindexMetadata *UnlockStakeTxindexMetadata) GetEncoderType() EncoderType // DB UTILS // -func DBKeyForStakeByValidatorAndStaker(stakeEntry *StakeEntry) []byte { - data := DBKeyForStakeByValidator(stakeEntry) - data = append(data, stakeEntry.StakerPKID.ToBytes()...) +func DBKeyForStakeByValidatorAndStaker(validatorPKID *PKID, stakerPKID *PKID) []byte { + data := DBKeyForStakeByValidator(validatorPKID) + data = append(data, stakerPKID.ToBytes()...) return data } -func DBKeyForStakeByValidator(stakeEntry *StakeEntry) []byte { +func DBKeyForStakeByValidator(validatorPKID *PKID) []byte { data := append([]byte{}, Prefixes.PrefixStakeByValidatorAndStaker...) + data = append(data, validatorPKID.ToBytes()...) + return data +} + +func DBKeyForStakeByStakeAmount(stakeEntry *StakeEntry) []byte { + data := append([]byte{}, Prefixes.PrefixStakeByStakeAmount...) + data = append(data, FixedWidthEncodeUint256(stakeEntry.StakeAmountNanos)...) data = append(data, stakeEntry.ValidatorPKID.ToBytes()...) + data = append(data, stakeEntry.StakerPKID.ToBytes()...) return data } @@ -594,7 +602,7 @@ func DBGetStakeEntryWithTxn( stakerPKID *PKID, ) (*StakeEntry, error) { // Retrieve StakeEntry from db. - key := DBKeyForStakeByValidatorAndStaker(&StakeEntry{ValidatorPKID: validatorPKID, StakerPKID: stakerPKID}) + key := DBKeyForStakeByValidatorAndStaker(validatorPKID, stakerPKID) stakeEntryBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { // We don't want to error if the key isn't found. Instead, return nil. @@ -615,7 +623,7 @@ func DBGetStakeEntryWithTxn( func DBGetStakeEntriesForValidatorPKID(handle *badger.DB, snap *Snapshot, validatorPKID *PKID) ([]*StakeEntry, error) { // Retrieve StakeEntries from db. - prefix := DBKeyForStakeByValidator(&StakeEntry{ValidatorPKID: validatorPKID}) + prefix := DBKeyForStakeByValidator(validatorPKID) _, valsFound, err := EnumerateKeysForPrefixWithLimitOffsetOrder( handle, prefix, 0, nil, false, NewSet([]string{}), ) @@ -644,16 +652,16 @@ func DBValidatorHasDelegatedStake( ) (bool, error) { // Skip any stake the validator has assigned to himself (if exists). skipKeys := NewSet([]string{ - string(DBKeyForStakeByValidatorAndStaker(&StakeEntry{ValidatorPKID: validatorPKID, StakerPKID: validatorPKID})), + string(DBKeyForStakeByValidatorAndStaker(validatorPKID, validatorPKID)), }) // Skip any StakeEntries deleted in the UtxoView. for _, utxoDeletedStakeEntry := range utxoDeletedStakeEntries { - skipKeys.Add(string(DBKeyForStakeByValidatorAndStaker(utxoDeletedStakeEntry))) + skipKeys.Add(string(DBKeyForStakeByValidatorAndStaker(utxoDeletedStakeEntry.ValidatorPKID, utxoDeletedStakeEntry.StakerPKID))) } // Scan for any delegated StakeEntries (limiting to at most one row). - prefix := DBKeyForStakeByValidator(&StakeEntry{ValidatorPKID: validatorPKID}) + prefix := DBKeyForStakeByValidator(validatorPKID) keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( handle, prefix, 1, nil, false, skipKeys, ) @@ -806,13 +814,21 @@ func DBPutStakeEntryWithTxn( } // Set StakeEntry in PrefixStakeByValidatorByStaker. - key := DBKeyForStakeByValidatorAndStaker(stakeEntry) - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, stakeEntry)); err != nil { + stakeByValidatorAndStakerKey := DBKeyForStakeByValidatorAndStaker(stakeEntry.ValidatorPKID, stakeEntry.StakerPKID) + if err := DBSetWithTxn(txn, snap, stakeByValidatorAndStakerKey, EncodeToBytes(blockHeight, stakeEntry)); err != nil { return errors.Wrapf( err, "DBPutStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByValidatorByStaker: ", ) } + // Set StakeEntry in PrefixStakeByStakeAmount. + stakeByStakeAmountKey := DBKeyForStakeByStakeAmount(stakeEntry) + if err := DBSetWithTxn(txn, snap, stakeByStakeAmountKey, nil); err != nil { + return errors.Wrapf( + err, "DBPutStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByStakeAmount: ", + ) + } + return nil } @@ -840,21 +856,43 @@ func DBPutLockedStakeEntryWithTxn( func DBDeleteStakeEntryWithTxn( txn *badger.Txn, snap *Snapshot, - stakeEntry *StakeEntry, + validatorPKID *PKID, + stakerPKID *PKID, blockHeight uint64, ) error { + if validatorPKID == nil || stakerPKID == nil { + return nil + } + + // Look up the existing StakeEntry in the db using the validator and staker PKIDs. + // We need to use this stakeEntry's values to delete all corresponding indexes. + stakeEntry, err := DBGetStakeEntryWithTxn(txn, snap, validatorPKID, stakerPKID) + if err != nil { + return errors.Wrapf(err, "DBDeleteStakeEntryWithTxn: problem retrieving "+ + "StakeEntry for ValidatorPKID %v and StakerPKID %v: ", validatorPKID, stakerPKID) + } + + // If the StakeEntry doesn't exist in the DB, then there's nothing to delete. Exit early. if stakeEntry == nil { return nil } // Delete StakeEntry from PrefixStakeByValidatorByStaker. - key := DBKeyForStakeByValidatorAndStaker(stakeEntry) - if err := DBDeleteWithTxn(txn, snap, key); err != nil { + stakeByValidatorAndStakerKey := DBKeyForStakeByValidatorAndStaker(validatorPKID, stakerPKID) + if err := DBDeleteWithTxn(txn, snap, stakeByValidatorAndStakerKey); err != nil { return errors.Wrapf( err, "DBDeleteStakeEntryWithTxn: problem deleting StakeEntry from index PrefixStakeByValidatorByStaker: ", ) } + // Delete the StakeEntry from PrefixStakeByStakeAmount. + stakeByStakeAmountKey := DBKeyForStakeByStakeAmount(stakeEntry) + if err := DBDeleteWithTxn(txn, snap, stakeByStakeAmountKey); err != nil { + return errors.Wrapf( + err, "DBDeleteStakeEntryWithTxn: problem deleting StakeEntry from index PrefixStakeByStakeAmount: ", + ) + } + return nil } @@ -2425,7 +2463,7 @@ func (bav *UtxoView) _flushStakeEntriesToDbWithTxn(txn *badger.Txn, blockHeight // Delete the existing mappings in the db for this MapKey. They will be // re-added if the corresponding entry in-memory has isDeleted=false. - if err := DBDeleteStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight); err != nil { + if err := DBDeleteStakeEntryWithTxn(txn, bav.Snapshot, entry.ValidatorPKID, entry.StakerPKID, blockHeight); err != nil { return errors.Wrapf(err, "_flushStakeEntriesToDbWithTxn: ") } } diff --git a/lib/db_utils.go b/lib/db_utils.go index a80c8c03b..00a287bb5 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -490,6 +490,10 @@ type DBPrefixes struct { // Prefix, , -> StakeEntry PrefixStakeByValidatorAndStaker []byte `prefix_id:"[80]" is_state:"true"` + // PrefixStakeByStakeAmount: Retrieve the top N stake entries by stake amount. + // Prefix, , , -> nil + PrefixStakeByStakeAmount []byte `prefix_id:"[81]" is_state:"true"` + // PrefixLockedStakeByValidatorAndStakerAndLockedAt: Retrieve a LockedStakeEntry. // Prefix, , , -> LockedStakeEntry // @@ -515,41 +519,41 @@ type DBPrefixes struct { // (CurrentEpoch - LockedAtEpochNumber) = 133 - 123 = 10, which is greater than // cooldown=3. Thus the UnlockStake will succeed, which will result in the // LockedStakeEntry being deleted and 25 DESO being added to the user's balance. - PrefixLockedStakeByValidatorAndStakerAndLockedAt []byte `prefix_id:"[81]" is_state:"true"` + PrefixLockedStakeByValidatorAndStakerAndLockedAt []byte `prefix_id:"[82]" is_state:"true"` // PrefixCurrentEpoch: Retrieve the current EpochEntry. // Prefix -> EpochEntry - PrefixCurrentEpoch []byte `prefix_id:"[82]" is_state:"true"` + PrefixCurrentEpoch []byte `prefix_id:"[83]" is_state:"true"` // PrefixCurrentRandomSeedHash: Retrieve the current RandomSeedHash. // Prefix -> . - PrefixCurrentRandomSeedHash []byte `prefix_id:"[83]" is_state:"true"` + PrefixCurrentRandomSeedHash []byte `prefix_id:"[84]" is_state:"true"` // PrefixSnapshotGlobalParamsEntry: Retrieve a snapshot GlobalParamsEntry by SnapshotAtEpochNumber. // Prefix, -> *GlobalParamsEntry - PrefixSnapshotGlobalParamsEntry []byte `prefix_id:"[84]" is_state:"true"` + PrefixSnapshotGlobalParamsEntry []byte `prefix_id:"[85]" is_state:"true"` // PrefixSnapshotValidatorSetByPKID: Retrieve a ValidatorEntry from a snapshot validator set by // . // Prefix, , -> *ValidatorEntry - PrefixSnapshotValidatorSetByPKID []byte `prefix_id:"[85]" is_state:"true"` + PrefixSnapshotValidatorSetByPKID []byte `prefix_id:"[86]" is_state:"true"` // PrefixSnapshotValidatorSetByStake: Retrieve stake-ordered ValidatorEntries from a snapshot validator set // by SnapshotAtEpochNumber. // Prefix, , , -> nil // Note: we parse the ValidatorPKID from the key and the value is nil to save space. - PrefixSnapshotValidatorSetByStake []byte `prefix_id:"[86]" is_state:"true"` + PrefixSnapshotValidatorSetByStake []byte `prefix_id:"[87]" is_state:"true"` // PrefixSnapshotValidatorSetTotalStakeAmountNanos: Retrieve a snapshot of the validator set's total amount of // staked DESO by SnapshotAtEpochNumber. // Prefix, -> *uint256.Int - PrefixSnapshotValidatorSetTotalStakeAmountNanos []byte `prefix_id:"[87]" is_state:"true"` + PrefixSnapshotValidatorSetTotalStakeAmountNanos []byte `prefix_id:"[88]" is_state:"true"` // PrefixSnapshotLeaderSchedule: Retrieve a ValidatorPKID by . // Prefix, , -> ValidatorPKID - PrefixSnapshotLeaderSchedule []byte `prefix_id:"[88]" is_state:"true"` + PrefixSnapshotLeaderSchedule []byte `prefix_id:"[89]" is_state:"true"` - // NEXT_TAG: 89 + // NEXT_TAG: 90 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -760,29 +764,32 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorAndStaker) { // prefix_id:"[80]" return true, &StakeEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixLockedStakeByValidatorAndStakerAndLockedAt) { + } else if bytes.Equal(prefix, Prefixes.PrefixStakeByStakeAmount) { // prefix_id:"[81]" + return false, nil + } else if bytes.Equal(prefix, Prefixes.PrefixLockedStakeByValidatorAndStakerAndLockedAt) { + // prefix_id:"[82]" return true, &LockedStakeEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixCurrentEpoch) { - // prefix_id:"[82]" + // prefix_id:"[83]" return true, &EpochEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixCurrentRandomSeedHash) { - // prefix_id:"[83]" + // prefix_id:"[84]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalParamsEntry) { - // prefix_id:"[84]" + // prefix_id:"[85]" return true, &GlobalParamsEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByPKID) { - // prefix_id:"[85]" + // prefix_id:"[86]" return true, &ValidatorEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByStake) { - // prefix_id:"[86]" + // prefix_id:"[87]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetTotalStakeAmountNanos) { - // prefix_id:"[87]" + // prefix_id:"[88]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotLeaderSchedule) { - // prefix_id:"[88]" + // prefix_id:"[89]" return true, &PKID{} } From 41d8fe51dac0e28d92bc760d20d7f91edd36dfe3 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 13 Jul 2023 12:21:54 -0400 Subject: [PATCH 147/762] Implement badger seek with arbitrary key skip function (#596) * Implement badger seek with arbitrary key skip function * Better comments --- lib/block_view_validator.go | 38 ++++++++++++++++++++++++--------- lib/db_utils.go | 42 +++++++++++++++++++++++++++++++++---- 2 files changed, 66 insertions(+), 14 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index fb6c174d3..a7a365b24 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -557,6 +557,15 @@ func DBKeyForValidatorByStatusAndStake(validatorEntry *ValidatorEntry) []byte { return key } +func GetValidatorPKIDFromDBKeyForValidatorByStatusAndStake(key []byte) (*PKID, error) { + validatorPKIDBytes := key[len(key)-PublicKeyLenCompressed:] + validatorPKID := PKID{} + if err := validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { + return nil, errors.Wrapf(err, "GetValidatorPKIDFromDBKeyForValidatorByStatusAndStake: problem reading ValidatorPKID: ") + } + return &validatorPKID, nil +} + func DBGetValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID) (*ValidatorEntry, error) { var ret *ValidatorEntry err := handle.View(func(txn *badger.Txn) error { @@ -596,17 +605,29 @@ func DBGetTopActiveValidatorsByStake( ) ([]*ValidatorEntry, error) { var validatorEntries []*ValidatorEntry - // Convert ValidatorEntriesToSkip to ValidatorEntryKeysToSkip. - validatorKeysToSkip := NewSet([]string{}) + // Convert validatorEntriesToSkip to the ValidatorPKIDs we need to skip. + validatorPKIDsToSkip := NewSet([]PKID{}) for _, validatorEntryToSkip := range validatorEntriesToSkip { - validatorKeysToSkip.Add(string(DBKeyForValidatorByStatusAndStake(validatorEntryToSkip))) + validatorPKIDsToSkip.Add(*validatorEntryToSkip.ValidatorPKID) + } + + // Define a function to filter out validators PKIDs we want to skip while seeking through the DB. + canSkipValidatorInBadgerSeek := func(badgerKey []byte) bool { + validatorPKID, err := GetValidatorPKIDFromDBKeyForValidatorByStatusAndStake(badgerKey) + if err != nil { + // We return false here to be safe. Once the seek has completed, we attempt to parse the + // keys a second time below. Any failures there will result in an error that we can propagate + // to the caller. + return false + } + return validatorPKIDsToSkip.Includes(*validatorPKID) } // Retrieve top N active ValidatorEntry keys by stake. key := append([]byte{}, Prefixes.PrefixValidatorByStatusAndStake...) key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) - keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( - handle, key, int(limit), nil, true, validatorKeysToSkip, + keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrderAndSkipFunc( + handle, key, int(limit), nil, true, canSkipValidatorInBadgerSeek, ) if err != nil { return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem retrieving top validators: ") @@ -615,11 +636,8 @@ func DBGetTopActiveValidatorsByStake( // For each key found, parse the ValidatorPKID from the key, // then retrieve the ValidatorEntry by the ValidatorPKID. for _, keyFound := range keysFound { - // Parse the PKIDBytes from the key. The ValidatorPKID is the last component of the key. - validatorPKIDBytes := keyFound[len(keyFound)-PublicKeyLenCompressed:] - // Convert PKIDBytes to PKID. - validatorPKID := &PKID{} - if err = validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { + validatorPKID, err := GetValidatorPKIDFromDBKeyForValidatorByStatusAndStake(keyFound) + if err != nil { return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem reading ValidatorPKID: ") } // Retrieve ValidatorEntry by PKID. diff --git a/lib/db_utils.go b/lib/db_utils.go index 00a287bb5..9db543015 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -10784,7 +10784,7 @@ func EnumerateKeysForPrefixWithLimitOffsetOrder( dbErr := db.View(func(txn *badger.Txn) error { var err error keysFound, valsFound, err = _enumerateKeysForPrefixWithLimitOffsetOrderWithTxn( - txn, prefix, limit, lastSeenKey, sortDescending, skipKeys, + txn, prefix, limit, lastSeenKey, sortDescending, _setMembershipCheckFunc(skipKeys), ) return err }) @@ -10798,13 +10798,41 @@ func EnumerateKeysForPrefixWithLimitOffsetOrder( return keysFound, valsFound, nil } +func EnumerateKeysForPrefixWithLimitOffsetOrderAndSkipFunc( + db *badger.DB, + prefix []byte, + limit int, + lastSeenKey []byte, + sortDescending bool, + canSkipKey func([]byte) bool, +) ([][]byte, [][]byte, error) { + keysFound := [][]byte{} + valsFound := [][]byte{} + + dbErr := db.View(func(txn *badger.Txn) error { + var err error + keysFound, valsFound, err = _enumerateKeysForPrefixWithLimitOffsetOrderWithTxn( + txn, prefix, limit, lastSeenKey, sortDescending, canSkipKey, + ) + return err + }) + if dbErr != nil { + return nil, nil, errors.Wrapf( + dbErr, + "EnumerateKeysForPrefixWithLimitOffsetOrderAndSkipFunc: problem fetching keys and values from db: ", + ) + } + + return keysFound, valsFound, nil +} + func _enumerateKeysForPrefixWithLimitOffsetOrderWithTxn( txn *badger.Txn, prefix []byte, limit int, lastSeenKey []byte, sortDescending bool, - skipKeys *Set[string], + canSkipKey func([]byte) bool, ) ([][]byte, [][]byte, error) { keysFound := [][]byte{} valsFound := [][]byte{} @@ -10846,8 +10874,8 @@ func _enumerateKeysForPrefixWithLimitOffsetOrderWithTxn( } haveSeenLastSeenKey = true } - // Skip if key is included in the set of skipKeys. - if skipKeys.Includes(string(key)) { + // Skip if key can be skipped. + if canSkipKey(key) { continue } // Copy key. @@ -10864,3 +10892,9 @@ func _enumerateKeysForPrefixWithLimitOffsetOrderWithTxn( } return keysFound, valsFound, nil } + +func _setMembershipCheckFunc(set *Set[string]) func([]byte) bool { + return func(key []byte) bool { + return set.Includes(string(key)) + } +} From fae7f8c4ae066f36bbd26e87f3615b17fc2e04cb Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 13 Jul 2023 13:21:13 -0400 Subject: [PATCH 148/762] PoS Add UtxoView function to fetch top n stakers (#594) * PoS Add function to fetch top n stakers * Add unit tests * Fix typos * Address some of Nina's comments * Incorporate new badger seek function * Address Nina's remaining comments * Clean up comments --- lib/block_view_stake.go | 168 +++++++++++++++++++++++++++++++++++ lib/block_view_stake_test.go | 161 +++++++++++++++++++++++++++++++++ 2 files changed, 329 insertions(+) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 6d90af1f3..de1c75a4e 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -567,6 +567,24 @@ func DBKeyForStakeByStakeAmount(stakeEntry *StakeEntry) []byte { return data } +func GetValidatorPKIDFromDBKeyForStakeByStakeAmount(key []byte) (*PKID, error) { + validatorPKIDBytes := key[len(key)-(PublicKeyLenCompressed*2) : len(key)-PublicKeyLenCompressed] + validatorPKID := PKID{} + if err := validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { + return nil, errors.Wrapf(err, "GetValidatorPKIDFromDBKeyForStakeByStakeAmount: ") + } + return &validatorPKID, nil +} + +func GetStakerPKIDFromDBKeyForStakeByStakeAmount(key []byte) (*PKID, error) { + stakerPKIDBytes := key[len(key)-(PublicKeyLenCompressed):] + stakerPKID := PKID{} + if err := stakerPKID.FromBytes(bytes.NewReader(stakerPKIDBytes)); err != nil { + return nil, errors.Wrapf(err, "GetStakerPKIDFromDBKeyForStakeByStakeAmount: ") + } + return &stakerPKID, nil +} + func DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry *LockedStakeEntry) []byte { data := DBPrefixKeyForLockedStakeByValidatorAndStaker(lockedStakeEntry) data = append(data, EncodeUint64(lockedStakeEntry.LockedAtEpochNumber)...) @@ -644,6 +662,77 @@ func DBGetStakeEntriesForValidatorPKID(handle *badger.DB, snap *Snapshot, valida return stakeEntries, nil } +func DBGetTopStakesByStakeAmount( + handle *badger.DB, + snap *Snapshot, + limit uint64, + stakeEntriesToSkip []*StakeEntry, +) ([]*StakeEntry, error) { + var stakeEntries []*StakeEntry + + // Convert StakeEntriesToSkip to StakeMapKey we need to skip. We use StakeMapKey + // here because we need to skip each StakeEntry based on both its ValidatorPKID and + // StakerPKID. + stakeMapKeysToSkip := NewSet([]StakeMapKey{}) + for _, stakeEntryToSkip := range stakeEntriesToSkip { + stakeMapKeysToSkip.Add(stakeEntryToSkip.ToMapKey()) + } + + // Define a function to filter out ValidatorPKID-StakerPKID pairs that we want to skip + // while seeking through the DB. + canSkipValidatorPKIDAndStakerPKIDInBadgerSeek := func(badgerKey []byte) bool { + // Parse both the validator PKID and staker PKID from the key. Just to be safe, we return false if + // we fail to parse them. Once the seek has completed, we attempt to parse all of the same keys a + // second time below. Any failures there will result in an error that we can propagate to the caller. + validatorPKID, err := GetValidatorPKIDFromDBKeyForStakeByStakeAmount(badgerKey) + if err != nil { + return false + } + stakerPKID, err := GetStakerPKIDFromDBKeyForStakeByStakeAmount(badgerKey) + if err != nil { + return false + } + + return stakeMapKeysToSkip.Includes(StakeMapKey{ + ValidatorPKID: *validatorPKID, + StakerPKID: *stakerPKID, + }) + } + + // Retrieve top N StakeEntry keys by stake amount. + key := append([]byte{}, Prefixes.PrefixStakeByStakeAmount...) + keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrderAndSkipFunc( + handle, key, int(limit), nil, true, canSkipValidatorPKIDAndStakerPKIDInBadgerSeek, + ) + if err != nil { + return nil, errors.Wrapf(err, "DBGetTopStakesByStakeAmount: problem retrieving top stakes: ") + } + + // For each key found, parse the staker PKID and validator PKID from the key, then retrieve the StakeEntry.:len(keyFound)-PublicKeyLenCompressed + for _, keyFound := range keysFound { + // Extract the validator PKID from the key. + validatorPKID, err := GetValidatorPKIDFromDBKeyForStakeByStakeAmount(keyFound) + if err != nil { + return nil, errors.Wrapf(err, "DBGetTopStakesByStakeAmount: problem reading ValidatorPKID: ") + } + + // Extract the staker PKID from the key. + stakerPKID, err := GetStakerPKIDFromDBKeyForStakeByStakeAmount(keyFound) + if err != nil { + return nil, errors.Wrapf(err, "DBGetTopStakesByStakeAmount: problem reading StakerPKID: ") + } + + // Retrieve StakeEntry from db. + stakeEntry, err := DBGetStakeEntry(handle, snap, validatorPKID, stakerPKID) + if err != nil { + return nil, errors.Wrapf(err, "DBGetTopStakesByStakeAmount: problem retrieving stake entry: ") + } + stakeEntries = append(stakeEntries, stakeEntry) + } + + return stakeEntries, nil +} + func DBValidatorHasDelegatedStake( handle *badger.DB, snap *Snapshot, @@ -2285,6 +2374,85 @@ func (bav *UtxoView) GetStakeEntriesForValidatorPKID(validatorPKID *PKID) ([]*St return stakeEntries, nil } +func (bav *UtxoView) GetTopStakesByStakeAmount(limit uint64) ([]*StakeEntry, error) { + // Validate limit param. + if limit == uint64(0) { + return []*StakeEntry{}, nil + } + + // Create a slice of UtxoViewStakeEntries. We want to skip pulling these from the database for two + // reasons: + // 1. It's possible that they have been updated in the UtxoView and the changes have not yet flushed + // to the database. + // 2. By skipping these entries from the DB seek, we ensure that the DB seek always returns the top n + // entries not found in the UtxoView. When we merge the entries from the UtxoView and the DB, this + // guarantee that the top n entries will exist in the merged set of entries. + var utxoViewStakeEntries []*StakeEntry + for _, stakeEntry := range bav.StakeMapKeyToStakeEntry { + utxoViewStakeEntries = append(utxoViewStakeEntries, stakeEntry) + } + + // Pull top N StakeEntries from the database (not present in the UtxoView). + // Note that we will skip stakers that are present in the view because we pass + // utxoViewStakeEntries to the function. + dbStakeEntries, err := DBGetTopStakesByStakeAmount(bav.Handle, bav.Snapshot, limit, utxoViewStakeEntries) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetTopStakesByStakeAmount: error retrieving stake entries from db: ") + } + + // Cache top N StakeEntries from the db in the UtxoView. + for _, stakeEntry := range dbStakeEntries { + stakeMapKey := stakeEntry.ToMapKey() + // If the utxoViewStakeEntries have been properly skipped when doing the DB seek, then there + // should be no duplicates here. We perform a sanity check to ensure that is the case. If we + // find duplicates here, then something is wrong. It would unsafe to continue as it may result + // in an invalid ordering of stakes. + if _, exists := bav.StakeMapKeyToStakeEntry[stakeMapKey]; exists { + return nil, fmt.Errorf("UtxoView.GetTopStakesByStakeAmount: duplicate StakeEntry returned from the DB: %v", stakeEntry) + } + + bav._setStakeEntryMappings(stakeEntry) + } + + // Pull !isDeleted StakeEntries with non-zero stake from the UtxoView. + var stakeEntries []*StakeEntry + for _, stakeEntry := range bav.StakeMapKeyToStakeEntry { + if !stakeEntry.isDeleted && !stakeEntry.StakeAmountNanos.IsZero() { + stakeEntries = append(stakeEntries, stakeEntry) + } + } + + // Sort the StakeEntries by StakeAmountNanos DESC. + sort.Slice(stakeEntries, func(ii, jj int) bool { + stakeCmp := stakeEntries[ii].StakeAmountNanos.Cmp(stakeEntries[jj].StakeAmountNanos) + if stakeCmp != 0 { + return stakeCmp > 0 + } + + // Use ValidatorPKID as a tie-breaker if equal StakeAmountNanos. + validatorPKIDCmp := bytes.Compare( + stakeEntries[ii].ValidatorPKID.ToBytes(), + stakeEntries[jj].ValidatorPKID.ToBytes(), + ) + if validatorPKIDCmp != 0 { + return validatorPKIDCmp > 0 + } + + // Use StakerPKID as a tie-breaker if equal ValidatorPKID. + return bytes.Compare( + stakeEntries[ii].StakerPKID.ToBytes(), + stakeEntries[jj].StakerPKID.ToBytes(), + ) > 0 + }) + + // Return top N. + upperBound := limit + if uint64(len(stakeEntries)) < upperBound { + upperBound = uint64(len(stakeEntries)) + } + return stakeEntries[0:upperBound], nil +} + func (bav *UtxoView) ValidatorHasDelegatedStake(validatorPKID *PKID) (bool, error) { // True if the validator has any delegated stake assigned to them. diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 90dc75810..9ec7c4625 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -3,6 +3,7 @@ package lib import ( + "bytes" "errors" "math" "testing" @@ -1653,6 +1654,166 @@ func TestStakingWithDerivedKey(t *testing.T) { _executeAllTestRollbackAndFlush(testMeta) } +func TestGetTopStakesByStakeAmount(t *testing.T) { + _testGetTopStakesByStakeAmount(t, false) + _testGetTopStakesByStakeAmount(t, true) +} + +func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize PoS fork height. + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err := miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID + m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID + m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID + m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID + + // Helper utils + utxoView := func() *UtxoView { + newUtxoView, err := mempool.GetAugmentedUniversalView() + require.NoError(t, err) + return newUtxoView + } + + constructAndSubmitRegisterValidatorTxn := func(validatorPk string, validatorPriv string, validatorPkBytes []byte, domain string) { + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, validatorPkBytes) + registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte(domain)}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, + } + _, err := _submitRegisterAsValidatorTxn(testMeta, validatorPk, validatorPriv, registerAsValidatorMetadata, nil, flushToDB) + require.NoError(t, err) + } + + constructAndSubmitStakeTxn := func(stakerPk string, stakerPriv string, validatorPkBytes []byte, amountNanos uint64) { + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(validatorPkBytes), + RestakeRewards: false, + StakeAmountNanos: uint256.NewInt().SetUint64(amountNanos), + } + _, err := _submitStakeTxn(testMeta, stakerPk, stakerPriv, stakeMetadata, nil, flushToDB) + require.NoError(t, err) + } + + { + // m0 and m1 register as validators. + constructAndSubmitRegisterValidatorTxn(m0Pub, m0Priv, m0PkBytes, "https://example.com") + constructAndSubmitRegisterValidatorTxn(m1Pub, m1Priv, m1PkBytes, "https://example2.com") + } + + { + // m0 stakes 100 nanos to themselves. + constructAndSubmitStakeTxn(m0Pub, m0Priv, m0PkBytes, 100) + // m0 stakes 200 nanos to m1. + constructAndSubmitStakeTxn(m0Pub, m0Priv, m1PkBytes, 200) + + // m1 stakes 110 nanos to m0. + constructAndSubmitStakeTxn(m1Pub, m1Priv, m0PkBytes, 110) + // m1 stakes 210 nanos to themselves. + constructAndSubmitStakeTxn(m1Pub, m1Priv, m1PkBytes, 210) + + // m2 stakes 120 nanos to m0. + constructAndSubmitStakeTxn(m2Pub, m2Priv, m0PkBytes, 120) + // m2 stakes 220 nanos to m1. + constructAndSubmitStakeTxn(m2Pub, m2Priv, m1PkBytes, 220) + + // m3 stakes 130 nanos to m0. + constructAndSubmitStakeTxn(m3Pub, m3Priv, m0PkBytes, 130) + // m3 stakes 230 nanos to m1. + constructAndSubmitStakeTxn(m3Pub, m3Priv, m1PkBytes, 230) + + // m4 stakes 100 nanos to m0. + constructAndSubmitStakeTxn(m4Pub, m4Priv, m0PkBytes, 100) + // m4 stakes 200 nanos to m1. + constructAndSubmitStakeTxn(m4Pub, m4Priv, m1PkBytes, 200) + } + + { + // Verify when query limit 3 is lower than number of stake entries 10. + + topStakeEntries, err := utxoView().GetTopStakesByStakeAmount(3) + require.NoError(t, err) + require.Equal(t, 3, len(topStakeEntries)) + } + + { + // Verify when query limit 1000 is higher than number of stake entries 10. + + topStakeEntries, err := utxoView().GetTopStakesByStakeAmount(1000) + require.NoError(t, err) + require.Equal(t, 10, len(topStakeEntries)) + } + + { + // Verify ordering of top 5 stake entries, which includes breaking ties. + + topStakeEntries, err := utxoView().GetTopStakesByStakeAmount(6) + require.NoError(t, err) + require.Equal(t, 6, len(topStakeEntries)) + + require.Equal(t, uint64(230), topStakeEntries[0].StakeAmountNanos.Uint64()) + require.True(t, bytes.Equal(m1PKID.ToBytes(), topStakeEntries[0].ValidatorPKID.ToBytes())) + require.True(t, bytes.Equal(m3PKID.ToBytes(), topStakeEntries[0].StakerPKID.ToBytes())) + + require.Equal(t, uint64(220), topStakeEntries[1].StakeAmountNanos.Uint64()) + require.True(t, bytes.Equal(m1PKID.ToBytes(), topStakeEntries[1].ValidatorPKID.ToBytes())) + require.True(t, bytes.Equal(m2PKID.ToBytes(), topStakeEntries[1].StakerPKID.ToBytes())) + + require.Equal(t, uint64(210), topStakeEntries[2].StakeAmountNanos.Uint64()) + require.True(t, bytes.Equal(m1PKID.ToBytes(), topStakeEntries[2].ValidatorPKID.ToBytes())) + require.True(t, bytes.Equal(m1PKID.ToBytes(), topStakeEntries[2].StakerPKID.ToBytes())) + + require.Equal(t, uint64(200), topStakeEntries[3].StakeAmountNanos.Uint64()) + require.True(t, bytes.Equal(m1PKID.ToBytes(), topStakeEntries[3].ValidatorPKID.ToBytes())) + require.True(t, bytes.Equal(m0PKID.ToBytes(), topStakeEntries[3].StakerPKID.ToBytes())) + + require.Equal(t, uint64(200), topStakeEntries[4].StakeAmountNanos.Uint64()) + require.True(t, bytes.Equal(m1PKID.ToBytes(), topStakeEntries[4].ValidatorPKID.ToBytes())) + require.True(t, bytes.Equal(m4PKID.ToBytes(), topStakeEntries[4].StakerPKID.ToBytes())) + + require.Equal(t, uint64(130), topStakeEntries[5].StakeAmountNanos.Uint64()) + require.True(t, bytes.Equal(m0PKID.ToBytes(), topStakeEntries[5].ValidatorPKID.ToBytes())) + require.True(t, bytes.Equal(m3PKID.ToBytes(), topStakeEntries[5].StakerPKID.ToBytes())) + } +} + func TestGetLockedStakeEntriesInRange(t *testing.T) { // For this test, we manually place LockedStakeEntries in the database and // UtxoView to test merging the two to GetLockedStakeEntriesInRange. From 77593f1bf60908ccc786d5ee793fe14ce8a22e62 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 13 Jul 2023 14:57:18 -0400 Subject: [PATCH 149/762] More consistent naming for stake ordered badger indices (#597) --- lib/block_view_validator.go | 26 ++++++++++++------------- lib/db_utils.go | 12 ++++++------ lib/pos_epoch_complete_hook_test.go | 12 ++++++------ lib/pos_snapshot_entries.go | 30 ++++++++++++++--------------- 4 files changed, 40 insertions(+), 40 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index a7a365b24..dc78857fb 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -549,19 +549,19 @@ func DBKeyForValidatorByPKID(validatorEntry *ValidatorEntry) []byte { return key } -func DBKeyForValidatorByStatusAndStake(validatorEntry *ValidatorEntry) []byte { - key := append([]byte{}, Prefixes.PrefixValidatorByStatusAndStake...) +func DBKeyForValidatorByStatusAndStakeAmount(validatorEntry *ValidatorEntry) []byte { + key := append([]byte{}, Prefixes.PrefixValidatorByStatusAndStakeAmount...) key = append(key, EncodeUint8(uint8(validatorEntry.Status()))...) key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) return key } -func GetValidatorPKIDFromDBKeyForValidatorByStatusAndStake(key []byte) (*PKID, error) { +func GetValidatorPKIDFromDBKeyForValidatorByStatusAndStakeAmount(key []byte) (*PKID, error) { validatorPKIDBytes := key[len(key)-PublicKeyLenCompressed:] validatorPKID := PKID{} if err := validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { - return nil, errors.Wrapf(err, "GetValidatorPKIDFromDBKeyForValidatorByStatusAndStake: problem reading ValidatorPKID: ") + return nil, errors.Wrapf(err, "GetValidatorPKIDFromDBKeyForValidatorByStatusAndStakeAmount: problem reading ValidatorPKID: ") } return &validatorPKID, nil } @@ -613,7 +613,7 @@ func DBGetTopActiveValidatorsByStake( // Define a function to filter out validators PKIDs we want to skip while seeking through the DB. canSkipValidatorInBadgerSeek := func(badgerKey []byte) bool { - validatorPKID, err := GetValidatorPKIDFromDBKeyForValidatorByStatusAndStake(badgerKey) + validatorPKID, err := GetValidatorPKIDFromDBKeyForValidatorByStatusAndStakeAmount(badgerKey) if err != nil { // We return false here to be safe. Once the seek has completed, we attempt to parse the // keys a second time below. Any failures there will result in an error that we can propagate @@ -624,7 +624,7 @@ func DBGetTopActiveValidatorsByStake( } // Retrieve top N active ValidatorEntry keys by stake. - key := append([]byte{}, Prefixes.PrefixValidatorByStatusAndStake...) + key := append([]byte{}, Prefixes.PrefixValidatorByStatusAndStakeAmount...) key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrderAndSkipFunc( handle, key, int(limit), nil, true, canSkipValidatorInBadgerSeek, @@ -636,7 +636,7 @@ func DBGetTopActiveValidatorsByStake( // For each key found, parse the ValidatorPKID from the key, // then retrieve the ValidatorEntry by the ValidatorPKID. for _, keyFound := range keysFound { - validatorPKID, err := GetValidatorPKIDFromDBKeyForValidatorByStatusAndStake(keyFound) + validatorPKID, err := GetValidatorPKIDFromDBKeyForValidatorByStatusAndStakeAmount(keyFound) if err != nil { return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem reading ValidatorPKID: ") } @@ -671,12 +671,12 @@ func DBPutValidatorWithTxn( ) } - // Set ValidatorEntry key in PrefixValidatorByStatusAndStake. The value should be nil. + // Set ValidatorEntry key in PrefixValidatorByStatusAndStakeAmount. The value should be nil. // We parse the ValidatorPKID from the key for this index. - key = DBKeyForValidatorByStatusAndStake(validatorEntry) + key = DBKeyForValidatorByStatusAndStakeAmount(validatorEntry) if err := DBSetWithTxn(txn, snap, key, nil); err != nil { return errors.Wrapf( - err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStatusAndStake", + err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStatusAndStakeAmount", ) } @@ -712,11 +712,11 @@ func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PK ) } - // Delete ValidatorEntry.PKID from PrefixValidatorByStatusAndStake. - key = DBKeyForValidatorByStatusAndStake(validatorEntry) + // Delete ValidatorEntry.PKID from PrefixValidatorByStatusAndStakeAmount. + key = DBKeyForValidatorByStatusAndStakeAmount(validatorEntry) if err := DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( - err, "DBDeleteValidatorWithTxn: problem deleting ValidatorEntry from index PrefixValidatorByStatusAndStake", + err, "DBDeleteValidatorWithTxn: problem deleting ValidatorEntry from index PrefixValidatorByStatusAndStakeAmount", ) } diff --git a/lib/db_utils.go b/lib/db_utils.go index 9db543015..2543e0fb4 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -481,10 +481,10 @@ type DBPrefixes struct { // Prefix, -> ValidatorEntry PrefixValidatorByPKID []byte `prefix_id:"[78]" is_state:"true"` - // PrefixValidatorByStatusAndStake: Retrieve the top N active validators by stake. + // PrefixValidatorByStatusAndStakeAmount: Retrieve the top N active validators by stake. // Prefix, , , -> nil // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. - PrefixValidatorByStatusAndStake []byte `prefix_id:"[79]" is_state:"true"` + PrefixValidatorByStatusAndStakeAmount []byte `prefix_id:"[79]" is_state:"true"` // PrefixStakeByValidatorAndStaker: Retrieve a StakeEntry. // Prefix, , -> StakeEntry @@ -538,11 +538,11 @@ type DBPrefixes struct { // Prefix, , -> *ValidatorEntry PrefixSnapshotValidatorSetByPKID []byte `prefix_id:"[86]" is_state:"true"` - // PrefixSnapshotValidatorSetByStake: Retrieve stake-ordered ValidatorEntries from a snapshot validator set + // PrefixSnapshotValidatorSetByStakeAmount: Retrieve stake-ordered ValidatorEntries from a snapshot validator set // by SnapshotAtEpochNumber. // Prefix, , , -> nil // Note: we parse the ValidatorPKID from the key and the value is nil to save space. - PrefixSnapshotValidatorSetByStake []byte `prefix_id:"[87]" is_state:"true"` + PrefixSnapshotValidatorSetByStakeAmount []byte `prefix_id:"[87]" is_state:"true"` // PrefixSnapshotValidatorSetTotalStakeAmountNanos: Retrieve a snapshot of the validator set's total amount of // staked DESO by SnapshotAtEpochNumber. @@ -758,7 +758,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByPKID) { // prefix_id:"[78]" return true, &ValidatorEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStatusAndStake) { + } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStatusAndStakeAmount) { // prefix_id:"[79]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorAndStaker) { @@ -782,7 +782,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByPKID) { // prefix_id:"[86]" return true, &ValidatorEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByStake) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByStakeAmount) { // prefix_id:"[87]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetTotalStakeAmountNanos) { diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index ef329d544..bdfc762bf 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -153,7 +153,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } // Test SnapshotTopActiveValidatorsByStake is empty. - validatorEntries, err := utxoView().GetSnapshotValidatorSetByStake(10) + validatorEntries, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Empty(t, validatorEntries) @@ -301,7 +301,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } // Test SnapshotTopActiveValidatorsByStake is populated. - validatorEntries, err := utxoView().GetSnapshotValidatorSetByStake(10) + validatorEntries, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) require.Equal(t, validatorEntries[0].ValidatorPKID, m6PKID) @@ -395,7 +395,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test snapshotting changing validator set. // m0 unregisters as a validator. - snapshotValidatorSet, err := utxoView().GetSnapshotValidatorSetByStake(10) + snapshotValidatorSet, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotValidatorSet, 7) @@ -406,7 +406,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // m0 is still in the snapshot validator set. - snapshotValidatorSet, err = utxoView().GetSnapshotValidatorSetByStake(10) + snapshotValidatorSet, err = utxoView().GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotValidatorSet, 7) @@ -414,7 +414,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // m0 is dropped from the snapshot validator set. - snapshotValidatorSet, err = utxoView().GetSnapshotValidatorSetByStake(10) + snapshotValidatorSet, err = utxoView().GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotValidatorSet, 6) } @@ -444,7 +444,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } getNumSnapshotActiveValidators := func() int { - snapshotValidatorSet, err := utxoView().GetSnapshotValidatorSetByStake(10) + snapshotValidatorSet, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) return len(snapshotValidatorSet) } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 33d4cbbd5..09dfa8edd 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -232,7 +232,7 @@ func (bav *UtxoView) GetSnapshotValidatorSetEntryByPKID(pkid *PKID) (*ValidatorE return validatorEntry, nil } -func (bav *UtxoView) GetSnapshotValidatorSetByStake(limit uint64) ([]*ValidatorEntry, error) { +func (bav *UtxoView) GetSnapshotValidatorSetByStakeAmount(limit uint64) ([]*ValidatorEntry, error) { // Calculate the SnapshotEpochNumber. snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() if err != nil { @@ -249,7 +249,7 @@ func (bav *UtxoView) GetSnapshotValidatorSetByStake(limit uint64) ([]*ValidatorE // Pull top N ValidatorEntries from the database (not present in the UtxoView). // Note that we will skip validators that are present in the view because we pass // utxoViewValidatorEntries to the function. - dbValidatorEntries, err := DBGetSnapshotValidatorSetByStake( + dbValidatorEntries, err := DBGetSnapshotValidatorSetByStakeAmount( bav.Handle, bav.Snapshot, limit, snapshotAtEpochNumber, utxoViewValidatorEntries, ) if err != nil { @@ -370,8 +370,8 @@ func DBKeyForSnapshotValidatorSetByPKID(validatorEntry *ValidatorEntry, snapshot return key } -func DBKeyForSnapshotValidatorSetByStake(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorSetByStake...) +func DBKeyForSnapshotValidatorSetByStakeAmount(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorSetByStakeAmount...) key = append(key, EncodeUint64(snapshotAtEpochNumber)...) key = append(key, FixedWidthEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) @@ -414,7 +414,7 @@ func DBGetSnapshotValidatorSetEntryByPKIDWithTxn( return validatorEntry, nil } -func DBGetSnapshotValidatorSetByStake( +func DBGetSnapshotValidatorSetByStakeAmount( handle *badger.DB, snap *Snapshot, limit uint64, @@ -426,17 +426,17 @@ func DBGetSnapshotValidatorSetByStake( // Convert ValidatorEntriesToSkip to ValidatorEntryKeysToSkip. validatorKeysToSkip := NewSet([]string{}) for _, validatorEntryToSkip := range validatorEntriesToSkip { - validatorKeysToSkip.Add(string(DBKeyForSnapshotValidatorSetByStake(validatorEntryToSkip, snapshotAtEpochNumber))) + validatorKeysToSkip.Add(string(DBKeyForSnapshotValidatorSetByStakeAmount(validatorEntryToSkip, snapshotAtEpochNumber))) } // Retrieve top N active ValidatorEntry keys by stake. - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorSetByStake...) + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorSetByStakeAmount...) key = append(key, EncodeUint64(snapshotAtEpochNumber)...) keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( handle, key, int(limit), nil, true, validatorKeysToSkip, ) if err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetByStake: problem retrieving top validators: ") + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetByStakeAmount: problem retrieving top validators: ") } // For each key found, parse the ValidatorPKID from the key, @@ -447,12 +447,12 @@ func DBGetSnapshotValidatorSetByStake( // Convert PKIDBytes to PKID. validatorPKID := &PKID{} if err = validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetByStake: problem reading ValidatorPKID: ") + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetByStakeAmount: problem reading ValidatorPKID: ") } // Retrieve ValidatorEntry by PKID. validatorEntry, err := DBGetSnapshotValidatorSetEntryByPKID(handle, snap, validatorPKID, snapshotAtEpochNumber) if err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetByStake: problem retrieving validator by PKID: ") + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetByStakeAmount: problem retrieving validator by PKID: ") } validatorEntries = append(validatorEntries, validatorEntry) } @@ -482,8 +482,8 @@ func DBPutSnapshotValidatorSetEntryWithTxn( ) } - // Put the ValidatorPKID in the SnapshotValidatorByStatusAndStake index. - key = DBKeyForSnapshotValidatorSetByStake(validatorEntry, snapshotAtEpochNumber) + // Put the ValidatorPKID in the SnapshotValidatorByStatusAndStakeAmount index. + key = DBKeyForSnapshotValidatorSetByStakeAmount(validatorEntry, snapshotAtEpochNumber) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID)); err != nil { return errors.Wrapf( err, @@ -525,11 +525,11 @@ func DBDeleteSnapshotValidatorSetEntryWithTxn( ) } - // Delete ValidatorEntry.PKID from PrefixSnapshotValidatorByStatusAndStake. - key = DBKeyForSnapshotValidatorSetByStake(snapshotValidatorSetEntry, snapshotAtEpochNumber) + // Delete ValidatorEntry.PKID from PrefixSnapshotValidatorByStatusAndStakeAmount. + key = DBKeyForSnapshotValidatorSetByStakeAmount(snapshotValidatorSetEntry, snapshotAtEpochNumber) if err = DBDeleteWithTxn(txn, snap, key); err != nil { return errors.Wrapf( - err, "DBDeleteSnapshotValidatorSetEntryWithTxn: problem deleting ValidatorEntry from index PrefixSnapshotValidatorByStatusAndStake", + err, "DBDeleteSnapshotValidatorSetEntryWithTxn: problem deleting ValidatorEntry from index PrefixSnapshotValidatorByStatusAndStakeAmount", ) } From 00f9cd24685ffd9aedf9fca20e822dce3e74b4f4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 17 Jul 2023 13:33:54 -0400 Subject: [PATCH 150/762] Snapshot top n StakeEntries for staking rewards (#598) * PoS Snapshot top n StakeEntries for staking rewards * Rename GlobalParam * Stub out all functions to implement * Revert global params changes * Move stubbed functions to pos_snapshot_entries file * Add comments for the block view utils * Implement DB key and DB write functions * Implement DB get function * Rename to SnapshotStakeToReward * Cleanup * Implement bav.GetSnapshotStakesToRewardByStakeAmount function * Snapshot the stake entries * Renames * Add unit tests * Cleanup * Filter stakers to top n in the validator set * More cleanup --- collections/slice.go | 11 ++ lib/block_view.go | 20 ++- lib/block_view_flush.go | 3 + lib/db_utils.go | 10 +- lib/pos_epoch_complete_hook.go | 31 +++- lib/pos_epoch_complete_hook_test.go | 75 +++++++- lib/pos_snapshot_entries.go | 262 ++++++++++++++++++++++++++++ 7 files changed, 405 insertions(+), 7 deletions(-) create mode 100644 collections/slice.go diff --git a/collections/slice.go b/collections/slice.go new file mode 100644 index 000000000..eb242c064 --- /dev/null +++ b/collections/slice.go @@ -0,0 +1,11 @@ +package collections + +func SliceFilter[T any](slice []T, filterFn func(T) bool) []T { + var result []T + for _, val := range slice { + if filterFn(val) { + result = append(result, val) + } + } + return result +} diff --git a/lib/block_view.go b/lib/block_view.go index 4f89f1ac3..363c52c91 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -139,7 +139,8 @@ type UtxoView struct { // the given SnapshotAtEpochNumber. SnapshotValidatorSet map[SnapshotValidatorSetMapKey]*ValidatorEntry - // SnapshotValidatorSetTotalStakeAmountNanos is a map of SnapshotAtEpochNumber to a GlobalActiveStakeAmountNanos. + // SnapshotValidatorSetTotalStakeAmountNanos is a map of SnapshotAtEpochNumber to the sum TotalStakeAmountNanos + // for the validator set of for an epoch. // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given SnapshotAtEpochNumber. SnapshotValidatorSetTotalStakeAmountNanos map[uint64]*uint256.Int @@ -148,6 +149,11 @@ type UtxoView struct { // generated at the given SnapshotAtEpochNumber. SnapshotLeaderSchedule map[SnapshotLeaderScheduleMapKey]*PKID + // SnapshotStakesToReward is a map of + // to a SnapshotStakeEntry. It contains a trimmed down struct with the ValidatorPKID, StakerPKID, and + // StakeAmount values of a StakeEntry that has been snapshotted at a given SnapshotAtEpochNumber. + SnapshotStakesToReward map[SnapshotStakeMapKey]*SnapshotStakeEntry + // The hash of the tip the view is currently referencing. Mainly used // for error-checking when doing a bulk operation on the view. TipHash *BlockHash @@ -260,6 +266,9 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // SnapshotLeaderSchedule bav.SnapshotLeaderSchedule = make(map[SnapshotLeaderScheduleMapKey]*PKID) + + // SnapshotStakesToReward + bav.SnapshotStakesToReward = make(map[SnapshotStakeMapKey]*SnapshotStakeEntry) } func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { @@ -553,8 +562,8 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { } // Copy the SnapshotValidatorSetTotalStakeAmountNanos - for epochNumber, globalActiveStakeAmountNanos := range bav.SnapshotValidatorSetTotalStakeAmountNanos { - newView.SnapshotValidatorSetTotalStakeAmountNanos[epochNumber] = globalActiveStakeAmountNanos.Clone() + for epochNumber, totalStakeAmountNanos := range bav.SnapshotValidatorSetTotalStakeAmountNanos { + newView.SnapshotValidatorSetTotalStakeAmountNanos[epochNumber] = totalStakeAmountNanos.Clone() } // Copy the SnapshotLeaderSchedule @@ -562,6 +571,11 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.SnapshotLeaderSchedule[mapKey] = validatorPKID.NewPKID() } + // Copy the SnapshotStakesToReward + for mapKey, snapshotStakeToReward := range bav.SnapshotStakesToReward { + newView.SnapshotStakesToReward[mapKey] = snapshotStakeToReward.Copy() + } + return newView, nil } diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 0e41e9ae5..5d85dd0c5 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -171,6 +171,9 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushSnapshotLeaderScheduleToDbWithTxn(txn, blockHeight); err != nil { return err } + if err := bav._flushSnapshotStakesToRewardToDbWithTxn(txn, blockHeight); err != nil { + return err + } return nil } diff --git a/lib/db_utils.go b/lib/db_utils.go index 2543e0fb4..567226cd5 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -553,7 +553,12 @@ type DBPrefixes struct { // Prefix, , -> ValidatorPKID PrefixSnapshotLeaderSchedule []byte `prefix_id:"[89]" is_state:"true"` - // NEXT_TAG: 90 + // PrefixSnapshotStakeToRewardByStakeAmount: Retrieves the top N StakeEntries that can receive staking rewards for an epoch. + // Prefix, , , , -> nil + // Note, we parse the ValidatorPKID and StakerPKID from the key and the value is nil to save space. + PrefixSnapshotStakeToRewardByStakeAmount []byte `prefix_id:"[90]" is_state:"true"` + + // NEXT_TAG: 91 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -791,6 +796,9 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotLeaderSchedule) { // prefix_id:"[89]" return true, &PKID{} + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotStakeToRewardByStakeAmount) { + // prefix_id:"[90]" + return false, nil } return true, nil diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index bb087c6a6..336f0629b 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -3,6 +3,7 @@ package lib import ( "math" + "github.com/deso-protocol/core/collections" "github.com/pkg/errors" ) @@ -34,8 +35,9 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // snapshot state. They should have no other side effects that mutate the existing state of the view. // 1. Snapshot the current GlobalParamsEntry. // 2. Snapshot the current validator set. -// 3. Snapshot the current GlobalActiveStakeAmountNanos. +// 3. Snapshot the current validator set's TotalStakeAmountNanos. // 4. Snapshot the leader schedule. +// 5. Snapshot the current top N stake entries, who will receive staking rewards. // // Step 2: Transition to the next epoch. This runs all state-mutating operations that need to be run for // the epoch transition. We always perform state-mutating operations after creating snapshots. This way, @@ -100,6 +102,33 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { bav._setSnapshotLeaderScheduleValidator(validatorPKID, uint16(index), currentEpochEntry.EpochNumber) } + // Snapshot the current top n stake entries. + topStakeEntries, err := bav.GetTopStakesByStakeAmount(currentGlobalParamsEntry.StakingRewardDistributionMaxNumStakers) + if err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: error retrieving top StakeEntries: ") + } + + // Filter the top n stake entries by the current validator set. We do not want to reward + // stakes that are not in the current validator set. + validatorSetPKIDs := NewSet([]PKID{}) + for _, validatorEntry := range validatorSet { + validatorSetPKIDs.Add(*validatorEntry.ValidatorPKID) + } + topStakesInValidatorSet := collections.SliceFilter(topStakeEntries, func(s *StakeEntry) bool { + return validatorSetPKIDs.Includes(*s.ValidatorPKID) + }) + + // Snapshot only the top n stake entries that are in the validator set. + for _, stakeEntry := range topStakesInValidatorSet { + snapshotStakeEntry := SnapshotStakeEntry{ + SnapshotAtEpochNumber: currentEpochEntry.EpochNumber, + ValidatorPKID: stakeEntry.ValidatorPKID, + StakerPKID: stakeEntry.StakerPKID, + StakeAmountNanos: stakeEntry.StakeAmountNanos, + } + bav._setSnapshotStakeToReward(&snapshotStakeEntry) + } + // TODO: Delete old snapshots that are no longer used. // Retrieve the SnapshotGlobalParamsEntry. diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index bdfc762bf..ad40ba4f5 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -152,7 +152,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Nil(t, snapshotValidatorSetEntry) } - // Test SnapshotTopActiveValidatorsByStake is empty. + // Test GetSnapshotValidatorSetByStakeAmount is empty. validatorEntries, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Empty(t, validatorEntries) @@ -170,6 +170,13 @@ func TestRunEpochCompleteHook(t *testing.T) { } } + _assertEmptyStakeSnapshots := func() { + // Test GetSnapshotStakesToRewardByStakeAmount is empty. + stakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + require.NoError(t, err) + require.Empty(t, stakeEntries) + } + // Seed a CurrentEpochEntry. tmpUtxoView := utxoView() tmpUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 0, FinalBlockHeight: blockHeight + 1}) @@ -220,6 +227,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(3)) _assertEmptyValidatorSnapshots() + + _assertEmptyStakeSnapshots() } { // Test RunOnEpochCompleteHook() with no validators or stakers. @@ -242,6 +251,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(4)) _assertEmptyValidatorSnapshots() + + _assertEmptyStakeSnapshots() } { // All validators register + stake to themselves. @@ -256,6 +267,10 @@ func TestRunEpochCompleteHook(t *testing.T) { validatorEntries, err := utxoView().GetTopActiveValidatorsByStake(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) + + stakeEntries, err := utxoView().GetTopStakesByStakeAmount(10) + require.NoError(t, err) + require.Len(t, stakeEntries, 7) } { // Test RunOnEpochCompleteHook(). @@ -275,6 +290,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(4)) _assertEmptyValidatorSnapshots() + + _assertEmptyStakeSnapshots() } { // Test RunOnEpochCompleteHook(). @@ -300,7 +317,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NotNil(t, snapshotValidatorSetEntry) } - // Test SnapshotTopActiveValidatorsByStake is populated. + // Test GetSnapshotValidatorSetByStakeAmount is populated. validatorEntries, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) @@ -320,6 +337,15 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.NotNil(t, snapshotLeaderScheduleValidator) } + + // Test GetSnapshotStakesToRewardByStakeAmount is populated. + stakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + require.NoError(t, err) + require.Len(t, stakeEntries, 7) + require.Equal(t, stakeEntries[0].StakerPKID, m6PKID) + require.Equal(t, stakeEntries[6].StakerPKID, m0PKID) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(700)) + require.Equal(t, stakeEntries[6].StakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // Test snapshotting changing stake. @@ -348,6 +374,12 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(600)) + snapshotStakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + require.NoError(t, err) + require.Len(t, snapshotStakeEntries, 7) + require.Equal(t, snapshotStakeEntries[1].StakerPKID, m5PKID) + require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(600)) + // Run OnEpochCompleteHook(). _runOnEpochCompleteHook() @@ -356,6 +388,12 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) + + snapshotStakeEntries, err = utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + require.NoError(t, err) + require.Len(t, snapshotStakeEntries, 7) + require.Equal(t, snapshotStakeEntries[0].StakerPKID, m5PKID) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(800)) } { // Test snapshotting changing GlobalParams. @@ -410,6 +448,10 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Len(t, snapshotValidatorSet, 7) + snapshotStakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + require.NoError(t, err) + require.Len(t, snapshotStakeEntries, 7) + // Run OnEpochCompleteHook(). _runOnEpochCompleteHook() @@ -417,6 +459,10 @@ func TestRunEpochCompleteHook(t *testing.T) { snapshotValidatorSet, err = utxoView().GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotValidatorSet, 6) + + snapshotStakeEntries, err = utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + require.NoError(t, err) + require.Len(t, snapshotStakeEntries, 6) } { // Test jailing inactive validators. @@ -455,10 +501,24 @@ func TestRunEpochCompleteHook(t *testing.T) { return validatorEntry } + getNumStakes := func() int { + stakeEntries, err := utxoView().GetTopStakesByStakeAmount(10) + require.NoError(t, err) + return len(stakeEntries) + } + + getNumSnapshotStakes := func() int { + snapshotStakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + require.NoError(t, err) + return len(snapshotStakeEntries) + } + // In epoch 9, all registered validators have Status = Active. require.Equal(t, getCurrentEpochNumber(), 9) require.Equal(t, getNumCurrentActiveValidators(), 6) require.Equal(t, getNumSnapshotActiveValidators(), 6) + require.Equal(t, getNumStakes(), 6) + require.Equal(t, getNumSnapshotStakes(), 6) // Run OnEpochCompleteHook(). _runOnEpochCompleteHook() @@ -467,6 +527,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentEpochNumber(), 10) require.Equal(t, getNumCurrentActiveValidators(), 6) require.Equal(t, getNumSnapshotActiveValidators(), 6) + require.Equal(t, getNumStakes(), 6) + require.Equal(t, getNumSnapshotStakes(), 6) // Run OnEpochCompleteHook(). _runOnEpochCompleteHook() @@ -475,6 +537,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentEpochNumber(), 11) require.Equal(t, getNumCurrentActiveValidators(), 6) require.Equal(t, getNumSnapshotActiveValidators(), 6) + require.Equal(t, getNumStakes(), 6) + require.Equal(t, getNumSnapshotStakes(), 6) // Run OnEpochCompleteHook(). _runOnEpochCompleteHook() @@ -484,6 +548,9 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentEpochNumber(), 12) require.Empty(t, getNumCurrentActiveValidators()) require.Equal(t, getNumSnapshotActiveValidators(), 6) + require.Equal(t, getNumStakes(), 6) + require.Equal(t, getNumSnapshotStakes(), 6) + require.Equal(t, getCurrentValidator(m6PKID).Status(), ValidatorStatusJailed) require.Equal(t, getCurrentValidator(m6PKID).JailedAtEpochNumber, uint64(11)) @@ -495,6 +562,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentEpochNumber(), 13) require.Empty(t, getNumCurrentActiveValidators()) require.Equal(t, getNumSnapshotActiveValidators(), 6) + require.Equal(t, getNumStakes(), 6) + require.Equal(t, getNumSnapshotStakes(), 6) // Run OnEpochCompleteHook(). _runOnEpochCompleteHook() @@ -505,5 +574,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentEpochNumber(), 14) require.Empty(t, getNumCurrentActiveValidators()) require.Empty(t, getNumSnapshotActiveValidators()) + require.Equal(t, getNumStakes(), 6) + require.Empty(t, getNumSnapshotStakes()) } } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 09dfa8edd..34d832b54 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -675,6 +675,268 @@ func DBPutSnapshotValidatorSetTotalStakeAmountNanosWithTxn( return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos)) } +// +// SnapshotStakeToReward +// + +type SnapshotStakeMapKey struct { + SnapshotAtEpochNumber uint64 + ValidatorPKID PKID + StakerPKID PKID +} + +// This is a bare bones in-memory only construct used to capture the ValidatorPKID, +// StakerPKID, and StakeAmountNanos from a StakeEntry that has been snapshot. We +// define a new type here rather than re-using the StakeEntry type to reduce the risk +// of bugs. The StakeEntry type has additional fields (ex: RestakeRewards, ExtraData) +// that are not snapshotted. +type SnapshotStakeEntry struct { + SnapshotAtEpochNumber uint64 + StakerPKID *PKID + ValidatorPKID *PKID + StakeAmountNanos *uint256.Int +} + +func (s *SnapshotStakeEntry) Copy() *SnapshotStakeEntry { + return &SnapshotStakeEntry{ + SnapshotAtEpochNumber: s.SnapshotAtEpochNumber, + StakerPKID: s.StakerPKID.NewPKID(), + ValidatorPKID: s.ValidatorPKID.NewPKID(), + StakeAmountNanos: s.StakeAmountNanos.Clone(), + } +} + +func (s *SnapshotStakeEntry) ToMapKey() *SnapshotStakeMapKey { + return &SnapshotStakeMapKey{ + SnapshotAtEpochNumber: s.SnapshotAtEpochNumber, + ValidatorPKID: *s.ValidatorPKID, + StakerPKID: *s.StakerPKID, + } +} + +func (bav *UtxoView) _setSnapshotStakeToReward(snapshotStakeEntry *SnapshotStakeEntry) { + if snapshotStakeEntry == nil { + glog.Errorf("_setSnapshotStakeToReward: called with nil snapshotStakeEntry") + return + } + bav.SnapshotStakesToReward[*snapshotStakeEntry.ToMapKey()] = snapshotStakeEntry.Copy() +} + +// GetSnapshotStakesToRewardByStakeAmount returns the top N SnapshotStakeEntries that are eligible +// to receive block rewards for the current snapshot epoch. The entries are sorted by stake amount +// in descending order. +func (bav *UtxoView) GetSnapshotStakesToRewardByStakeAmount( + limit uint64, +) ([]*SnapshotStakeEntry, error) { + // Calculate the SnapshotEpochNumber. + snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + if err != nil { + return nil, errors.Wrapf(err, "GetSnapshotStakesToRewardByStakeAmount: problem calculating SnapshotEpochNumber: ") + } + + // Create a slice of all UtxoView SnapshotStakeEntries to prevent pulling them from the db. + var utxoViewSnapshotStakeEntries []*SnapshotStakeEntry + for mapKey, stakeEntry := range bav.SnapshotStakesToReward { + if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber { + utxoViewSnapshotStakeEntries = append(utxoViewSnapshotStakeEntries, stakeEntry) + } + } + + // Pull top N SnapshotStakeEntries from the database (not present in the UtxoView). + dbSnapshotStakeEntries, err := DBGetSnapshotStakesToRewardByStakeAmount( + bav.Handle, bav.Snapshot, limit, snapshotAtEpochNumber, utxoViewSnapshotStakeEntries, + ) + if err != nil { + return nil, errors.Wrapf(err, "GetSnapshotStakesToRewardByStakeAmount: error retrieving entries from db: ") + } + + // Cache the SnapshotStakeEntries from the db in the UtxoView. + for _, snapshotStakeEntry := range dbSnapshotStakeEntries { + mapKey := snapshotStakeEntry.ToMapKey() + if _, exists := bav.SnapshotStakesToReward[*mapKey]; exists { + // We should never see duplicate entries from the db that are already in the UtxoView. This is a + // sign of a bug and that the utxoViewSnapshotStakeEntries isn't being used correctly. + return nil, fmt.Errorf("GetSnapshotStakesToRewardByStakeAmount: db returned a SnapshotStakeEntry" + + " that already exists in the UtxoView") + } + + bav._setSnapshotStakeToReward(snapshotStakeEntry) + } + + // Pull SnapshotStakeEntries from the UtxoView with stake > 0. All entries should have > 0 stake to begin + // with, but we filter here again just in case. + var mergedSnapshotStakeEntries []*SnapshotStakeEntry + for mapKey, snapshotStakeEntry := range bav.SnapshotStakesToReward { + if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber && + !snapshotStakeEntry.StakeAmountNanos.IsZero() { + mergedSnapshotStakeEntries = append(mergedSnapshotStakeEntries, snapshotStakeEntry) + } + } + + // Sort the SnapshotStakeEntries DESC by StakeAmountNanos. + sort.Slice(mergedSnapshotStakeEntries, func(ii, jj int) bool { + stakeAmountCmp := mergedSnapshotStakeEntries[ii].StakeAmountNanos.Cmp( + mergedSnapshotStakeEntries[jj].StakeAmountNanos, + ) + if stakeAmountCmp != 0 { + return stakeAmountCmp > 0 + } + + validatorPKIDCmp := bytes.Compare( + mergedSnapshotStakeEntries[ii].ValidatorPKID.ToBytes(), + mergedSnapshotStakeEntries[jj].ValidatorPKID.ToBytes(), + ) + if validatorPKIDCmp != 0 { + return validatorPKIDCmp > 0 + } + + return bytes.Compare( + mergedSnapshotStakeEntries[ii].StakerPKID.ToBytes(), + mergedSnapshotStakeEntries[jj].StakerPKID.ToBytes(), + ) > 0 + }) + + // Return top N. + upperBound := limit + if uint64(len(mergedSnapshotStakeEntries)) < upperBound { + upperBound = uint64(len(mergedSnapshotStakeEntries)) + } + return mergedSnapshotStakeEntries[0:upperBound], nil +} + +func DBGetSnapshotStakesToRewardByStakeAmount( + handle *badger.DB, + snap *Snapshot, + limit uint64, + snapshotAtEpochNumber uint64, + snapshotStakeEntriesToSkip []*SnapshotStakeEntry, +) ([]*SnapshotStakeEntry, error) { + var snapshotStakeEntries []*SnapshotStakeEntry + + // Convert SnapshotStakeEntriesToSkip to the StakeMapKey we need to skip. + snapshotStakeKeysToSkip := NewSet([]string{}) + for _, snapshotStakeEntryToSkip := range snapshotStakeEntriesToSkip { + snapshotStakeKeysToSkip.Add( + string(DBKeyForSnapshotStakeToRewardByStakeAmount(snapshotStakeEntryToSkip)), + ) + } + + // Retrieve top N SnapshotStakeEntry keys by stake amount. + key := DBKeyForSnapshotStakeToRewardAtEpochNumber(snapshotAtEpochNumber) + keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( + handle, key, int(limit), nil, true, snapshotStakeKeysToSkip, + ) + if err != nil { + return nil, errors.Wrapf(err, "DBGetSnapshotStakesToRewardByStakeAmount:"+ + " problem retrieving top stakes: ") + } + + // For each key found, parse the SnapshotStakeEntry from the key. + for _, keyFound := range keysFound { + snapshotStakeEntry, err := DecodeSnapshotStakeFromDBKey(keyFound) + if err != nil { + return nil, errors.Wrapf(err, "DBGetSnapshotStakesToRewardByStakeAmount:"+ + " problem reading SnapshotStakeEntry: ") + } + + snapshotStakeEntries = append(snapshotStakeEntries, snapshotStakeEntry) + } + + return snapshotStakeEntries, nil +} + +func (bav *UtxoView) _flushSnapshotStakesToRewardToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + for mapKey, snapshotStakeEntry := range bav.SnapshotStakesToReward { + if snapshotStakeEntry == nil { + return fmt.Errorf( + "_flushSnapshotStakesToRewardToDbWithTxn: found nil snapshotStakeEntry for"+ + " EpochNumber %d, this should never happen", + mapKey.SnapshotAtEpochNumber, + ) + } + if err := DBPutSnapshotStakeToRewardWithTxn(txn, bav.Snapshot, snapshotStakeEntry, blockHeight); err != nil { + return errors.Wrapf( + err, + "_flushSnapshotStakesToRewardToDbWithTxn: problem setting snapshotStakeEntry"+ + " for SnapshotAtEpochNumber %d: ", + mapKey.SnapshotAtEpochNumber, + ) + } + } + return nil +} + +func DBPutSnapshotStakeToRewardWithTxn( + txn *badger.Txn, + snap *Snapshot, + snapshotStakeEntry *SnapshotStakeEntry, + blockHeight uint64, +) error { + if snapshotStakeEntry == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBPutSnapshotStakeToRewardWithTxn: called with nil snapshotStakeEntry") + return nil + } + key := DBKeyForSnapshotStakeToRewardByStakeAmount(snapshotStakeEntry) + if err := DBSetWithTxn(txn, snap, key, nil); err != nil { + return errors.Wrapf( + err, + "DBPutSnapshotStakeToRewardWithTxn: problem putting snapshotStakeEntry in the"+ + " SnapshotLeaderSchedule index: ", + ) + } + return nil +} + +func DBKeyForSnapshotStakeToRewardByStakeAmount(snapshotStakeEntry *SnapshotStakeEntry) []byte { + data := DBKeyForSnapshotStakeToRewardAtEpochNumber(snapshotStakeEntry.SnapshotAtEpochNumber) + data = append(data, FixedWidthEncodeUint256(snapshotStakeEntry.StakeAmountNanos)...) + data = append(data, snapshotStakeEntry.ValidatorPKID.ToBytes()...) + data = append(data, snapshotStakeEntry.StakerPKID.ToBytes()...) + return data +} + +func DBKeyForSnapshotStakeToRewardAtEpochNumber(snapshotAtEpochNumber uint64) []byte { + data := append([]byte{}, Prefixes.PrefixSnapshotStakeToRewardByStakeAmount...) + data = append(data, EncodeUint64(snapshotAtEpochNumber)...) + return data +} + +func DecodeSnapshotStakeFromDBKey(stakeToRewardByStakeAmountDBKey []byte) (*SnapshotStakeEntry, error) { + var err error + rr := bytes.NewReader(stakeToRewardByStakeAmountDBKey) + + // Seek past the prefix. + if _, err := rr.Seek(int64(len(Prefixes.PrefixSnapshotStakeToRewardByStakeAmount)), 0); err != nil { + return nil, errors.Wrapf(err, "DecodeSnapshotStakeFromDBKey: Unable to skip past the prefix") + } + + decodedOutput := &SnapshotStakeEntry{} + + // The next 8 bytes are guaranteed to be the snapshotAtEpochNumber, since they are fixed-width. + snapshotAtEpochNumberBytes := make([]byte, 8) + if _, err := rr.Read(snapshotAtEpochNumberBytes); err != nil { + return nil, errors.Wrapf(err, "DecodeSnapshotStakeFromDBKey: Unable to read SnapshotAtEpochNumber") + } + decodedOutput.SnapshotAtEpochNumber = DecodeUint64(snapshotAtEpochNumberBytes) + + if decodedOutput.StakeAmountNanos, err = FixedWidthDecodeUint256(rr); err != nil { + return nil, errors.Wrapf(err, "DecodeSnapshotStakeFromDBKey: Unable to read StakeAmountNanos") + } + + decodedOutput.ValidatorPKID = &PKID{} + if err := decodedOutput.ValidatorPKID.FromBytes(rr); err != nil { + return nil, errors.Wrapf(err, "DecodeSnapshotStakeFromDBKey: unable to read ValidatorPKID") + } + + decodedOutput.StakerPKID = &PKID{} + if err := decodedOutput.StakerPKID.FromBytes(rr); err != nil { + return nil, errors.Wrapf(err, "DecodeSnapshotStakeFromDBKey: unable to read StakerPKID") + } + + return decodedOutput, nil +} + // // SnapshotLeaderScheduleValidator // From b0944ead6ab6357e7e5ea68a63bc35bf45819d23 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 17 Jul 2023 13:41:33 -0400 Subject: [PATCH 151/762] Easy - Rename StakingRewardsMaxNumStakes global param (#605) * Easy - Rename StakingRewardsMaxNumStakes global param * Fix typo --- lib/block_view.go | 6 +++--- lib/block_view_types.go | 14 +++++++------- lib/constants.go | 14 +++++++------- lib/pos_epoch_complete_hook.go | 2 +- lib/pos_snapshot_entries.go | 6 +++--- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 363c52c91..33857c167 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3043,10 +3043,10 @@ func (bav *UtxoView) _connectUpdateGlobalParams( return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode ValidatorSetMaxNumValidators as uint64") } } - if len(extraData[StakingRewardDistributionMaxNumStakersKey]) > 0 { - newGlobalParamsEntry.StakingRewardDistributionMaxNumStakers, bytesRead = Uvarint(extraData[StakingRewardDistributionMaxNumStakersKey]) + if len(extraData[StakingRewardsMaxNumStakesKey]) > 0 { + newGlobalParamsEntry.StakingRewardsMaxNumStakes, bytesRead = Uvarint(extraData[StakingRewardsMaxNumStakesKey]) if bytesRead <= 0 { - return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode StakingRewardDistributionMaxNumStakers as uint64") + return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode StakingRewardsMaxNumStakes as uint64") } } if len(extraData[EpochDurationNumBlocksKey]) > 0 { diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 8c75f6738..b3e45339e 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3777,10 +3777,10 @@ type GlobalParamsEntry struct { // consensus. ValidatorSetMaxNumValidators uint64 - // StakingRewardDistributionMaxNumStakers is the maximum number of stake entries - // that are eligible to receive block rewards every epoch in the Proof-of-Stake + // StakingRewardsMaxNumStakes is the maximum number of stake entries that are + // eligible to receive block rewards every epoch in the Proof-of-Stake // consensus. - StakingRewardDistributionMaxNumStakers uint64 + StakingRewardsMaxNumStakes uint64 // EpochDurationNumBlocks is the number of blocks included in one epoch. EpochDurationNumBlocks uint64 @@ -3803,7 +3803,7 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { ValidatorJailEpochDuration: gp.ValidatorJailEpochDuration, LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, ValidatorSetMaxNumValidators: gp.ValidatorSetMaxNumValidators, - StakingRewardDistributionMaxNumStakers: gp.StakingRewardDistributionMaxNumStakers, + StakingRewardsMaxNumStakes: gp.StakingRewardsMaxNumStakes, EpochDurationNumBlocks: gp.EpochDurationNumBlocks, JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, } @@ -3825,7 +3825,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.ValidatorJailEpochDuration)...) data = append(data, UintToBuf(gp.LeaderScheduleMaxNumValidators)...) data = append(data, UintToBuf(gp.ValidatorSetMaxNumValidators)...) - data = append(data, UintToBuf(gp.StakingRewardDistributionMaxNumStakers)...) + data = append(data, UintToBuf(gp.StakingRewardsMaxNumStakes)...) data = append(data, UintToBuf(gp.EpochDurationNumBlocks)...) data = append(data, UintToBuf(gp.JailInactiveValidatorGracePeriodEpochs)...) } @@ -3878,9 +3878,9 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading ValidatorSetMaxNumValidators: ") } - gp.StakingRewardDistributionMaxNumStakers, err = ReadUvarint(rr) + gp.StakingRewardsMaxNumStakes, err = ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading StakingRewardDistributionMaxNumStakers: ") + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading StakingRewardsMaxNumStakes: ") } gp.EpochDurationNumBlocks, err = ReadUvarint(rr) if err != nil { diff --git a/lib/constants.go b/lib/constants.go index ac2e97948..971df1b31 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -670,9 +670,9 @@ type DeSoParams struct { // that are included in the validator set for any given epoch. DefaultValidatorSetMaxNumValidators uint64 - // DefaultStakingRewardDistributionMaxNumStakers is the default number of stake entries + // DefaultStakingRewardsMaxNumStakes is the default number of stake entries // that are included in the staking reward distribution in each epoch. - DefaultStakingRewardDistributionMaxNumStakers uint64 + DefaultStakingRewardsMaxNumStakes uint64 // DefaultEpochDurationNumBlocks is the default number of blocks included in one epoch. DefaultEpochDurationNumBlocks uint64 @@ -1074,8 +1074,8 @@ var DeSoMainnetParams = DeSoParams{ // The max number of validators included in a validator set for any given epoch. DefaultValidatorSetMaxNumValidators: uint64(1000), - // The max number of stakers included in a staking rewards distribution every epoch. - DefaultStakingRewardDistributionMaxNumStakers: uint64(10000), + // The max number of stakes included in a staking rewards distribution every epoch. + DefaultStakingRewardsMaxNumStakes: uint64(10000), // The number of blocks in one epoch DefaultEpochDurationNumBlocks: uint64(3600), @@ -1329,8 +1329,8 @@ var DeSoTestnetParams = DeSoParams{ // The max number of validators included in a validator set for any given epoch. DefaultValidatorSetMaxNumValidators: uint64(1000), - // The max number of stakers included in a staking rewards distribution every epoch. - DefaultStakingRewardDistributionMaxNumStakers: uint64(10000), + // The max number of stakes included in a staking rewards distribution every epoch. + DefaultStakingRewardsMaxNumStakes: uint64(10000), // The number of blocks in one epoch DefaultEpochDurationNumBlocks: uint64(3600), @@ -1386,7 +1386,7 @@ const ( ValidatorJailEpochDurationKey = "ValidatorJailEpochDuration" LeaderScheduleMaxNumValidatorsKey = "LeaderScheduleMaxNumValidators" ValidatorSetMaxNumValidatorsKey = "ValidatorSetMaxNumValidators" - StakingRewardDistributionMaxNumStakersKey = "StakingRewardDistributionMaxNumStakers" + StakingRewardsMaxNumStakesKey = "StakingRewardsMaxNumStakes" EpochDurationNumBlocksKey = "EpochDurationNumBlocks" JailInactiveValidatorGracePeriodEpochsKey = "JailInactiveValidatorGracePeriodEpochs" diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 336f0629b..bae8af90d 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -103,7 +103,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { } // Snapshot the current top n stake entries. - topStakeEntries, err := bav.GetTopStakesByStakeAmount(currentGlobalParamsEntry.StakingRewardDistributionMaxNumStakers) + topStakeEntries, err := bav.GetTopStakesByStakeAmount(currentGlobalParamsEntry.StakingRewardsMaxNumStakes) if err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: error retrieving top StakeEntries: ") } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 34d832b54..44d36881a 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -101,8 +101,8 @@ func _mergeGlobalParamEntryDefaults(bav *UtxoView, globalParamsEntry *GlobalPara if globalParamsEntryCopy.ValidatorSetMaxNumValidators == 0 { globalParamsEntryCopy.ValidatorSetMaxNumValidators = bav.Params.DefaultValidatorSetMaxNumValidators } - if globalParamsEntryCopy.StakingRewardDistributionMaxNumStakers == 0 { - globalParamsEntryCopy.StakingRewardDistributionMaxNumStakers = bav.Params.DefaultStakingRewardDistributionMaxNumStakers + if globalParamsEntryCopy.StakingRewardsMaxNumStakes == 0 { + globalParamsEntryCopy.StakingRewardsMaxNumStakes = bav.Params.DefaultStakingRewardsMaxNumStakes } if globalParamsEntryCopy.EpochDurationNumBlocks == 0 { globalParamsEntryCopy.EpochDurationNumBlocks = bav.Params.DefaultEpochDurationNumBlocks @@ -686,7 +686,7 @@ type SnapshotStakeMapKey struct { } // This is a bare bones in-memory only construct used to capture the ValidatorPKID, -// StakerPKID, and StakeAmountNanos from a StakeEntry that has been snapshot. We +// StakerPKID, and StakeAmountNanos from a StakeEntry that has been snapshotted. We // define a new type here rather than re-using the StakeEntry type to reduce the risk // of bugs. The StakeEntry type has additional fields (ex: RestakeRewards, ExtraData) // that are not snapshotted. From 834170858b818ad1adc242bf7515c8911d2c035b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 17 Jul 2023 13:54:51 -0400 Subject: [PATCH 152/762] Easy - Cleanup and rename GetTopActiveValidatorsByStakeAmount for clarity (#606) --- lib/block_view_validator.go | 14 ++++++------ lib/block_view_validator_test.go | 34 ++++++++++++++--------------- lib/pos_epoch_complete_hook.go | 2 +- lib/pos_epoch_complete_hook_test.go | 4 ++-- lib/pos_leader_schedule.go | 2 +- lib/pos_leader_schedule_test.go | 4 ++-- 6 files changed, 30 insertions(+), 30 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index dc78857fb..a229d26c6 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -597,7 +597,7 @@ func DBGetValidatorByPKIDWithTxn(txn *badger.Txn, snap *Snapshot, pkid *PKID) (* return validatorEntry, nil } -func DBGetTopActiveValidatorsByStake( +func DBGetTopActiveValidatorsByStakeAmount( handle *badger.DB, snap *Snapshot, limit uint64, @@ -630,7 +630,7 @@ func DBGetTopActiveValidatorsByStake( handle, key, int(limit), nil, true, canSkipValidatorInBadgerSeek, ) if err != nil { - return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem retrieving top validators: ") + return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStakeAmount: problem retrieving top validators: ") } // For each key found, parse the ValidatorPKID from the key, @@ -638,12 +638,12 @@ func DBGetTopActiveValidatorsByStake( for _, keyFound := range keysFound { validatorPKID, err := GetValidatorPKIDFromDBKeyForValidatorByStatusAndStakeAmount(keyFound) if err != nil { - return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem reading ValidatorPKID: ") + return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStakeAmount: problem reading ValidatorPKID: ") } // Retrieve ValidatorEntry by PKID. validatorEntry, err := DBGetValidatorByPKID(handle, snap, validatorPKID) if err != nil { - return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStake: problem retrieving validator by PKID: ") + return nil, errors.Wrapf(err, "DBGetTopActiveValidatorsByStakeAmount: problem retrieving validator by PKID: ") } validatorEntries = append(validatorEntries, validatorEntry) } @@ -1759,7 +1759,7 @@ func (bav *UtxoView) GetValidatorByPublicKey(validatorPublicKey *PublicKey) (*Va return validatorEntry, nil } -func (bav *UtxoView) GetTopActiveValidatorsByStake(limit uint64) ([]*ValidatorEntry, error) { +func (bav *UtxoView) GetTopActiveValidatorsByStakeAmount(limit uint64) ([]*ValidatorEntry, error) { // Validate limit param. if limit == uint64(0) { return []*ValidatorEntry{}, nil @@ -1777,9 +1777,9 @@ func (bav *UtxoView) GetTopActiveValidatorsByStake(limit uint64) ([]*ValidatorEn // Pull top N active ValidatorEntries from the database (not present in the UtxoView). // Note that we will skip validators that are present in the view because we pass // utxoViewValidatorEntries to the function. - dbValidatorEntries, err := DBGetTopActiveValidatorsByStake(bav.Handle, bav.Snapshot, limit, utxoViewValidatorEntries) + dbValidatorEntries, err := DBGetTopActiveValidatorsByStakeAmount(bav.Handle, bav.Snapshot, limit, utxoViewValidatorEntries) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetTopActiveValidatorsByStake: error retrieving entries from db: ") + return nil, errors.Wrapf(err, "UtxoView.GetTopActiveValidatorsByStakeAmount: error retrieving entries from db: ") } // Cache top N active ValidatorEntries from the db in the UtxoView. for _, validatorEntry := range dbValidatorEntries { diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index f6a2e8765..39b1c5e05 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -222,7 +222,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { { // Query: retrieve top active ValidatorEntries by stake. // Should be empty since m0's TotalStakeAmountNanos is zero. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(1) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(1) require.NoError(t, err) require.Empty(t, validatorEntries) } @@ -275,7 +275,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // Query: retrieve top active ValidatorEntries by stake - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(1) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(1) require.NoError(t, err) require.Empty(t, validatorEntries) } @@ -661,12 +661,12 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { _executeAllTestRollbackAndFlush(testMeta) } -func TestGetTopActiveValidatorsByStake(t *testing.T) { - _testGetTopActiveValidatorsByStake(t, false) - _testGetTopActiveValidatorsByStake(t, true) +func TestGetTopActiveValidatorsByStakeAmount(t *testing.T) { + _testGetTopActiveValidatorsByStakeAmount(t, false) + _testGetTopActiveValidatorsByStakeAmount(t, true) } -func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { +func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { var validatorEntries []*ValidatorEntry var err error @@ -741,7 +741,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators is empty since m0's TotalStakeAmountNanos is zero. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Empty(t, validatorEntries) } @@ -757,7 +757,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators is empty since both validators' TotalStakeAmountNanos are zero. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Empty(t, validatorEntries) } @@ -773,7 +773,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators is empty since all three validators' TotalStakeAmountNanos are zero. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Empty(t, validatorEntries) } @@ -803,7 +803,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) @@ -822,7 +822,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { _, err = _submitUnstakeTxn(testMeta, m3Pub, m3Priv, unstakeMetadata, nil, flushToDB) // Verify top validators. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) @@ -841,7 +841,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { _, err = _submitUnstakeTxn(testMeta, m3Pub, m3Priv, unstakeMetadata, nil, flushToDB) // Verify top validators. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) @@ -855,7 +855,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) @@ -871,7 +871,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) @@ -889,7 +889,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { require.NoError(t, err) // Verify top validators. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) @@ -899,7 +899,7 @@ func _testGetTopActiveValidatorsByStake(t *testing.T, flushToDB bool) { } { // Verify top validators with LIMIT. - validatorEntries, err = utxoView().GetTopActiveValidatorsByStake(1) + validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(1) require.NoError(t, err) require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) @@ -1081,7 +1081,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { ) // Fetch TopActiveValidatorsByStake merging ValidatorEntries from the db and UtxoView. - validatorEntries, err := utxoView.GetTopActiveValidatorsByStake(6) + validatorEntries, err := utxoView.GetTopActiveValidatorsByStakeAmount(6) require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index bae8af90d..5240764f9 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -77,7 +77,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) // Snapshot the current top n active validators as the current validator set. - validatorSet, err := bav.GetTopActiveValidatorsByStake(currentGlobalParamsEntry.ValidatorSetMaxNumValidators) + validatorSet, err := bav.GetTopActiveValidatorsByStakeAmount(currentGlobalParamsEntry.ValidatorSetMaxNumValidators) if err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: error retrieving top ValidatorEntries: ") } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index ad40ba4f5..10b055372 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -264,7 +264,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _registerAndStake(m5Pub, m5Priv, 600) _registerAndStake(m6Pub, m6Priv, 700) - validatorEntries, err := utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err := utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) @@ -484,7 +484,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } getNumCurrentActiveValidators := func() int { - validatorEntries, err := utxoView().GetTopActiveValidatorsByStake(10) + validatorEntries, err := utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) return len(validatorEntries) } diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 518f08dd2..adb8310f1 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -18,7 +18,7 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() // Retrieve top, active validators ordered by stake. - validatorEntries, err := bav.GetTopActiveValidatorsByStake(currentGlobalParamsEntry.LeaderScheduleMaxNumValidators) + validatorEntries, err := bav.GetTopActiveValidatorsByStakeAmount(currentGlobalParamsEntry.LeaderScheduleMaxNumValidators) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") } diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 8cdb48460..38efa57fb 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -182,8 +182,8 @@ func TestGenerateLeaderSchedule(t *testing.T) { registerValidator(m6Pub, m6Priv, 700) } { - // Verify GetTopActiveValidatorsByStake. - validatorEntries, err := newUtxoView().GetTopActiveValidatorsByStake(10) + // Verify GetTopActiveValidatorsByStakeAmount. + validatorEntries, err := newUtxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) require.True(t, validatorEntries[0].ValidatorPKID.Eq(m6PKID)) From 2b24bebd01ad5052b05cbf110a794c4a56b998b4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 18 Jul 2023 09:38:13 -0400 Subject: [PATCH 153/762] Distribute Staking Rewards on Epoch Transition (#607) * Reward Stakers on Epoch transition * First pass implementation * Cleanup * Add unit tests * Return UtxoOps for distributing staking rewards * Add UtxoOp for restaking * Address feedback --- lib/pos_epoch_complete_hook.go | 17 +++- lib/pos_epoch_complete_hook_test.go | 101 +++++++++++++-------- lib/pos_staking_rewards.go | 134 ++++++++++++++++++++++++++++ 3 files changed, 211 insertions(+), 41 deletions(-) create mode 100644 lib/pos_staking_rewards.go diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 5240764f9..5358b16e2 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -44,8 +44,9 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // the snapshot created at the end of epoch n always reflects the state of the view at the end of epoch n. // And it does not reflect the state changes that occur AFTER epoch n ends and before epoch n+1 BEGINS. // 1. Jail all inactive validators from the current snapshot validator set. -// 2. Compute the final block height for the next epoch. -// 3. Transition CurrentEpochEntry to the next epoch. +// 2. Reward all snapshotted stakes from the current snapshot validator set. +// 3. Compute the final block height for the next epoch. +// 4. Transition CurrentEpochEntry to the next epoch. func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Rolls-over the current epoch into a new one. Handles the associated snapshotting + accounting. @@ -137,13 +138,25 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving SnapshotGlobalParamsEntry: ") } + utxoOps := []*UtxoOperation{} + // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation // that loops through all validators and jails them if they are inactive. A jailed validator should be // considered jailed in the next epoch we are transition into. + // + // TODO: Return UtxoOps for jailing validators. if err = bav.JailAllInactiveValidators(blockHeight); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: problem jailing all inactive validators: ") } + // Reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation + // that loops through all of the snapshotted stakes and rewards them. + stakeDistributionUtxoOps, err := bav.DistributeStakingRewardsToSnapshotStakes() + if err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: problem rewarding snapshot stakes: ") + } + utxoOps = append(utxoOps, stakeDistributionUtxoOps...) + // Calculate the NextEpochFinalBlockHeight. nextEpochFinalBlockHeight, err := SafeUint64().Add(blockHeight, snapshotGlobalParamsEntry.EpochDurationNumBlocks) if err != nil { diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 10b055372..7b9613b31 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -110,7 +110,7 @@ func TestRunEpochCompleteHook(t *testing.T) { return newUtxoView } - _registerAndStake := func(publicKey string, privateKey string, stakeAmountNanos uint64) { + _registerAndStake := func(publicKey string, privateKey string, stakeAmountNanos uint64, restakeRewards bool) { // Convert PublicKeyBase58Check to PublicKeyBytes. pkBytes, _, err := Base58CheckDecode(publicKey) require.NoError(t, err) @@ -125,12 +125,9 @@ func TestRunEpochCompleteHook(t *testing.T) { _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) require.NoError(t, err) - // Validator stakes to himself. - if stakeAmountNanos == 0 { - return - } stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(pkBytes), + RestakeRewards: restakeRewards, StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), } _, err = _submitStakeTxn(testMeta, publicKey, privateKey, stakeMetadata, nil, true) @@ -256,13 +253,13 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // All validators register + stake to themselves. - _registerAndStake(m0Pub, m0Priv, 100) - _registerAndStake(m1Pub, m1Priv, 200) - _registerAndStake(m2Pub, m2Priv, 300) - _registerAndStake(m3Pub, m3Priv, 400) - _registerAndStake(m4Pub, m4Priv, 500) - _registerAndStake(m5Pub, m5Priv, 600) - _registerAndStake(m6Pub, m6Priv, 700) + _registerAndStake(m0Pub, m0Priv, 100, false) + _registerAndStake(m1Pub, m1Priv, 200, false) + _registerAndStake(m2Pub, m2Priv, 300, false) + _registerAndStake(m3Pub, m3Priv, 400, false) + _registerAndStake(m4Pub, m4Priv, 500, false) + _registerAndStake(m5Pub, m5Priv, 600, false) + _registerAndStake(m6Pub, m6Priv, 700, false) validatorEntries, err := utxoView().GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) @@ -339,13 +336,13 @@ func TestRunEpochCompleteHook(t *testing.T) { } // Test GetSnapshotStakesToRewardByStakeAmount is populated. - stakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + snapshotStakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) require.NoError(t, err) - require.Len(t, stakeEntries, 7) - require.Equal(t, stakeEntries[0].StakerPKID, m6PKID) - require.Equal(t, stakeEntries[6].StakerPKID, m0PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(700)) - require.Equal(t, stakeEntries[6].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Len(t, snapshotStakeEntries, 7) + require.Equal(t, snapshotStakeEntries[0].StakerPKID, m6PKID) + require.Equal(t, snapshotStakeEntries[6].StakerPKID, m0PKID) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(700)) + require.Equal(t, snapshotStakeEntries[6].StakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // Test snapshotting changing stake. @@ -357,7 +354,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(600)) // m5 stakes another 200. - _registerAndStake(m5Pub, m5Priv, 200) + _registerAndStake(m5Pub, m5Priv, 200, false) // m5 has 800 staked. validatorEntry, err = utxoView().GetValidatorByPKID(m5PKID) @@ -464,6 +461,52 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Len(t, snapshotStakeEntries, 6) } + { + // Test staking rewards distribution with RestakeRewards enabled. + + // m6 now has a 14333333578 nano balance from staking rewards so far. + balance, err := utxoView().GetDeSoBalanceNanosForPublicKey(m6PkBytes) + require.NoError(t, err) + require.Equal(t, balance, uint64(14333333578)) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // m6 now has 16747126681 after the most recent's epoch's staking rewards. + balance, err = utxoView().GetDeSoBalanceNanosForPublicKey(m6PkBytes) + require.NoError(t, err) + require.Equal(t, balance, uint64(16747126681)) + } + { + // Test staking rewards distribution with RestakeRewards enabled. + + // m6 has 700 nanos staked. + stakeEntry, err := utxoView().GetStakeEntry(m6PKID, m6PKID) + require.NoError(t, err) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(700)) + + // m6 sets their RestakeRewards flag to true. + _registerAndStake(m6Pub, m6Priv, 0, true) + + // m6's wallet balance is 16747126627 after they submit their stake transaction. + balance, err := utxoView().GetDeSoBalanceNanosForPublicKey(m6PkBytes) + require.NoError(t, err) + require.Equal(t, balance, uint64(0x3e634df63)) + + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook() + + // m6 has 2413793803 staked now a after their staking rewards were restaked. + stakeEntry, err = utxoView().GetStakeEntry(m6PKID, m6PKID) + require.NoError(t, err) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(2413793803)) + + // m6's wallet balance has not changed from has 16747126627 now that their rewards + // were restaked. + balance, err = utxoView().GetDeSoBalanceNanosForPublicKey(m6PkBytes) + require.NoError(t, err) + require.Equal(t, balance, uint64(0x3e634df63)) + } { // Test jailing inactive validators. // @@ -513,26 +556,6 @@ func TestRunEpochCompleteHook(t *testing.T) { return len(snapshotStakeEntries) } - // In epoch 9, all registered validators have Status = Active. - require.Equal(t, getCurrentEpochNumber(), 9) - require.Equal(t, getNumCurrentActiveValidators(), 6) - require.Equal(t, getNumSnapshotActiveValidators(), 6) - require.Equal(t, getNumStakes(), 6) - require.Equal(t, getNumSnapshotStakes(), 6) - - // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() - - // In epoch 10, all registered validators have Status = Active. - require.Equal(t, getCurrentEpochNumber(), 10) - require.Equal(t, getNumCurrentActiveValidators(), 6) - require.Equal(t, getNumSnapshotActiveValidators(), 6) - require.Equal(t, getNumStakes(), 6) - require.Equal(t, getNumSnapshotStakes(), 6) - - // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() - // In epoch 11, all registered validators have Status = Active. require.Equal(t, getCurrentEpochNumber(), 11) require.Equal(t, getNumCurrentActiveValidators(), 6) diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go new file mode 100644 index 000000000..5afab30d9 --- /dev/null +++ b/lib/pos_staking_rewards.go @@ -0,0 +1,134 @@ +package lib + +import ( + "github.com/holiman/uint256" + "github.com/pkg/errors" +) + +func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes() ([]*UtxoOperation, error) { + // Retrieve the SnapshotGlobalParamsEntry. + snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() + if err != nil { + return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving SnapshotGlobalParamsEntry: ") + } + + totalStakingRewards := bav._placeholderGetStakingRewardsPerEpoch() + + // If the total rewards to pay out are zero, then there's nothing to be done. Exit early here. + if totalStakingRewards.IsZero() { + return nil, nil + } + + // Reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation + // that loops through all of the snapshotted stakes and rewards them. + snapshotStakesToReward, err := bav.GetSnapshotStakesToRewardByStakeAmount(snapshotGlobalParamsEntry.StakingRewardsMaxNumStakes) + if err != nil { + return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving snapshot stakes to reward: ") + } + + // If there are no stakes to reward, then there's nothing to be done. Exit early here. + if len(snapshotStakesToReward) == 0 { + return nil, nil + } + + // Compute the total stake amount of all snapshot stakes, so we can determine the proportion of each + // staker's staked amount to the total. + snapshotStakesTotalStakeAmount := uint256.NewInt() + for _, snapshotStakeEntry := range snapshotStakesToReward { + snapshotStakesTotalStakeAmount.Add(snapshotStakesTotalStakeAmount, snapshotStakeEntry.StakeAmountNanos) + } + + // Check if the sum of all of the stakes is zero. In practice this should never happen because it's not + // possible for a staker to stake zero DESO. We check it here to make this code more resilient, in case + // that assumption ever changes elsewhere in the codebase. + if snapshotStakesTotalStakeAmount.IsZero() { + return nil, nil + } + + // Create a list of UtxoOperations to return. This will be populated with all of the operations in which + // we distributed rewards to stakers. + utxoOps := []*UtxoOperation{} + prevStakeEntries := []*StakeEntry{} + + // Loop through all of the snapshot stakes and reward them. + for _, snapshotStakeEntry := range snapshotStakesToReward { + rewardAmount := _computeStakingRewardAmount( + snapshotStakeEntry.StakeAmountNanos, + snapshotStakesTotalStakeAmount, + totalStakingRewards, + ) + if rewardAmount.IsZero() { + continue + } + + // At this point, we know that the staker has non-zero rewards. We need to determine how to + // distribute the rewards to them. We need to fetch their latest StakeEntry to determine + // whether they want to restake their rewards or not. + + // Fetch the staker's latest StakeEntry. + stakeEntry, err := bav.GetStakeEntry(snapshotStakeEntry.ValidatorPKID, snapshotStakeEntry.StakerPKID) + if err != nil { + return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem fetching staker's StakeEntry: ") + } + + // At this point, there are three possible cases: + // 1. The staker still exists and wants to restake their rewards. + // 2. The staker still exists and does not want to restake their rewards. + // 3. The staker has unstaked since the snapshot was taken. They no longer have a + // StakeEntry. Their stake is currently in lockup. + + // For case 1, we distribute the rewards by adding them to the staker's staked amount. + if stakeEntry != nil && stakeEntry.RestakeRewards { + prevStakeEntries = append(prevStakeEntries, stakeEntry.Copy()) + + stakeEntry.StakeAmountNanos.Add(stakeEntry.StakeAmountNanos, rewardAmount) + bav._setStakeEntryMappings(stakeEntry) + + continue + } + + // For cases 2 and 3, the staker no longer wants their rewards restaked. The staker is still + // eligible to receive rewards because the validator they had staked to was part of the validator + // set for the snapshot epoch. Their stake at the time was used to secure the network. + + stakerPublicKey := bav.GetPublicKeyForPKID(snapshotStakeEntry.StakerPKID) + + utxoOp, err := bav._addBalance(rewardAmount.Uint64(), stakerPublicKey) + if err != nil { + return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem adding rewards to staker's DESO balance: ") + } + + utxoOps = append(utxoOps, utxoOp) + } + + // Merge all UtxoOps. The order doesn't matter here since the total reward distribution per staker + // is independent of the order in which the stakes are processed. + prevStakeEntriesUtxoOp := &UtxoOperation{ + Type: OperationTypeStake, + PrevStakeEntries: prevStakeEntries, + } + utxoOps = append(utxoOps, prevStakeEntriesUtxoOp) + + return utxoOps, nil +} + +// This function is a placeholder that rewards a constant 10 DESO in staking rewards per epoch. +// The staking rewards will be a function of the burn maximizing fee, which has not been +// implemented yet. +// +// TODO: Replace this function once BMF and staking rewards math are complete. +func (bav *UtxoView) _placeholderGetStakingRewardsPerEpoch() *uint256.Int { + return uint256.NewInt().SetUint64(1e10) +} + +// _computeRewardAmount uses integer math to compute the reward amount for each staker rounded down to +// the nearest DESO nano. +func _computeStakingRewardAmount( + stakeAmount *uint256.Int, + totalStakeAmount *uint256.Int, + totalStakingRewards *uint256.Int, +) *uint256.Int { + output := uint256.NewInt() + output.Mul(stakeAmount, totalStakingRewards) + return output.Div(output, totalStakeAmount) +} From 68e2dddfa59faaab0aff45960a18221f8927a4bf Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 18 Jul 2023 13:51:14 -0400 Subject: [PATCH 154/762] Revert UtxoOp Emits in Stake Reward Distribtion (#609) * Revert_UtxoOp_Emits_in_Stake_Reward_Distribtion * Revert Jail function name --- lib/pos_epoch_complete_hook.go | 8 +----- lib/pos_staking_rewards.go | 45 ++++++++++++---------------------- 2 files changed, 17 insertions(+), 36 deletions(-) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 5358b16e2..cc3120de1 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -138,24 +138,18 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving SnapshotGlobalParamsEntry: ") } - utxoOps := []*UtxoOperation{} - // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation // that loops through all validators and jails them if they are inactive. A jailed validator should be // considered jailed in the next epoch we are transition into. - // - // TODO: Return UtxoOps for jailing validators. if err = bav.JailAllInactiveValidators(blockHeight); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: problem jailing all inactive validators: ") } // Reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation // that loops through all of the snapshotted stakes and rewards them. - stakeDistributionUtxoOps, err := bav.DistributeStakingRewardsToSnapshotStakes() - if err != nil { + if err = bav.DistributeStakingRewardsToSnapshotStakes(blockHeight); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: problem rewarding snapshot stakes: ") } - utxoOps = append(utxoOps, stakeDistributionUtxoOps...) // Calculate the NextEpochFinalBlockHeight. nextEpochFinalBlockHeight, err := SafeUint64().Add(blockHeight, snapshotGlobalParamsEntry.EpochDurationNumBlocks) diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index 5afab30d9..d3b1b9d31 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -5,30 +5,36 @@ import ( "github.com/pkg/errors" ) -func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes() ([]*UtxoOperation, error) { +func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64) error { + // Check if we have switched from PoW to PoS yet. If we have not, then the PoS consensus + // has not started yet. We don't want to distribute any staking rewards until the PoS consensus begins. + if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + return nil + } + // Retrieve the SnapshotGlobalParamsEntry. snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { - return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving SnapshotGlobalParamsEntry: ") + return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving SnapshotGlobalParamsEntry: ") } totalStakingRewards := bav._placeholderGetStakingRewardsPerEpoch() // If the total rewards to pay out are zero, then there's nothing to be done. Exit early here. if totalStakingRewards.IsZero() { - return nil, nil + return nil } // Reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation // that loops through all of the snapshotted stakes and rewards them. snapshotStakesToReward, err := bav.GetSnapshotStakesToRewardByStakeAmount(snapshotGlobalParamsEntry.StakingRewardsMaxNumStakes) if err != nil { - return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving snapshot stakes to reward: ") + return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving snapshot stakes to reward: ") } // If there are no stakes to reward, then there's nothing to be done. Exit early here. if len(snapshotStakesToReward) == 0 { - return nil, nil + return nil } // Compute the total stake amount of all snapshot stakes, so we can determine the proportion of each @@ -42,14 +48,9 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes() ([]*UtxoOperatio // possible for a staker to stake zero DESO. We check it here to make this code more resilient, in case // that assumption ever changes elsewhere in the codebase. if snapshotStakesTotalStakeAmount.IsZero() { - return nil, nil + return nil } - // Create a list of UtxoOperations to return. This will be populated with all of the operations in which - // we distributed rewards to stakers. - utxoOps := []*UtxoOperation{} - prevStakeEntries := []*StakeEntry{} - // Loop through all of the snapshot stakes and reward them. for _, snapshotStakeEntry := range snapshotStakesToReward { rewardAmount := _computeStakingRewardAmount( @@ -68,7 +69,7 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes() ([]*UtxoOperatio // Fetch the staker's latest StakeEntry. stakeEntry, err := bav.GetStakeEntry(snapshotStakeEntry.ValidatorPKID, snapshotStakeEntry.StakerPKID) if err != nil { - return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem fetching staker's StakeEntry: ") + return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem fetching staker's StakeEntry: ") } // At this point, there are three possible cases: @@ -79,8 +80,6 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes() ([]*UtxoOperatio // For case 1, we distribute the rewards by adding them to the staker's staked amount. if stakeEntry != nil && stakeEntry.RestakeRewards { - prevStakeEntries = append(prevStakeEntries, stakeEntry.Copy()) - stakeEntry.StakeAmountNanos.Add(stakeEntry.StakeAmountNanos, rewardAmount) bav._setStakeEntryMappings(stakeEntry) @@ -92,24 +91,12 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes() ([]*UtxoOperatio // set for the snapshot epoch. Their stake at the time was used to secure the network. stakerPublicKey := bav.GetPublicKeyForPKID(snapshotStakeEntry.StakerPKID) - - utxoOp, err := bav._addBalance(rewardAmount.Uint64(), stakerPublicKey) - if err != nil { - return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem adding rewards to staker's DESO balance: ") + if _, err = bav._addBalance(rewardAmount.Uint64(), stakerPublicKey); err != nil { + return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem adding rewards to staker's DESO balance: ") } - - utxoOps = append(utxoOps, utxoOp) - } - - // Merge all UtxoOps. The order doesn't matter here since the total reward distribution per staker - // is independent of the order in which the stakes are processed. - prevStakeEntriesUtxoOp := &UtxoOperation{ - Type: OperationTypeStake, - PrevStakeEntries: prevStakeEntries, } - utxoOps = append(utxoOps, prevStakeEntriesUtxoOp) - return utxoOps, nil + return nil } // This function is a placeholder that rewards a constant 10 DESO in staking rewards per epoch. From a51e6eaf45a09c1228eaad6b924faac8772b554e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 18 Jul 2023 14:11:08 -0400 Subject: [PATCH 155/762] Filter validator jailing to just the current validator set (#610) --- lib/block_view_validator.go | 86 +++++++++++--------------------- lib/block_view_validator_test.go | 3 +- lib/pos_epoch_complete_hook.go | 2 +- 3 files changed, 32 insertions(+), 59 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index a229d26c6..9fe947cda 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1816,65 +1816,53 @@ func (bav *UtxoView) GetTopActiveValidatorsByStakeAmount(limit uint64) ([]*Valid return validatorEntries[0:upperBound], nil } -func (bav *UtxoView) JailAllInactiveValidators(blockHeight uint64) error { - // First, iterate through all of the !isDeleted ValidatorEntries in the UtxoView and - // jail any that are inactive. - var utxoViewValidatorPKIDs []*PKID - for _, validatorEntry := range bav.ValidatorPKIDToValidatorEntry { - // We don't want to retrieve any ValidatorEntries from the db that are present in the UtxoView. - utxoViewValidatorPKIDs = append(utxoViewValidatorPKIDs, validatorEntry.ValidatorPKID) +func (bav *UtxoView) JailInactiveSnapshotValidators(blockHeight uint64) error { + // Check if we have switched from PoW to PoS yet. If we have not, then the PoS consensus + // has not started. We don't want to jail any validators until they have had the opportunity + // to participate in the consensus and are known to be inactive. + if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + return nil + } - if validatorEntry.isDeleted { - continue - } + snapshotGlobalParams, err := bav.GetSnapshotGlobalParamsEntry() + if err != nil { + return errors.Wrapf(err, "UtxoView.JailInactiveSnapshotValidators: ") + } + + // Get the current snapshot validator set. These are the only validator what were + // able to participate in consensus for the current epoch. Only the inactive validators + // from this set can be jailed. + snapshotValidatorSet, err := bav.GetSnapshotValidatorSetByStakeAmount( + // The max size of the validator set is captured via the global params. This is the same + // param used to define the size of the validator set when first snapshotting it. + snapshotGlobalParams.ValidatorSetMaxNumValidators, + ) - // Check if we should jail the validator. - shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry, blockHeight) + // Iterate through all of the validators in the snapshot validator set and jail any inactive ones. + for _, snapshotValidatorEntry := range snapshotValidatorSet { + currentValidatorEntry, err := bav.GetValidatorByPKID(snapshotValidatorEntry.ValidatorPKID) if err != nil { - return errors.Wrapf( - err, - "JailAllInactiveValidators: problem determining if should jail validator %v: ", - validatorEntry.ValidatorPKID, - ) + return errors.Wrapf(err, "UtxoView.JailInactiveSnapshotValidators: ") } - // If this validator should not be jailed, continue to the next validator. - if !shouldJailValidator { + if currentValidatorEntry == nil || currentValidatorEntry.isDeleted { + // If the validator doesn't exist or has just been deleted, we don't need to do anything. continue } - // If we get here, then the validator should be jailed. - if err = bav.JailValidator(validatorEntry); err != nil { - return errors.Wrapf( - err, "JailAllInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, - ) - } - } - - // Second, iterate through all the ValidatorEntries in the db and jail any that are inactive. - dbValidatorEntries, err := DBEnumerateAllCurrentValidators(bav.Handle, utxoViewValidatorPKIDs) - if err != nil { - return errors.Wrapf(err, "JailAllInactiveValidators: problem retrieving ValidatorEntries: ") - } - - for _, validatorEntry := range dbValidatorEntries { - // Check if we should jail the validator. - shouldJailValidator, err := bav.ShouldJailValidator(validatorEntry, blockHeight) + shouldJailValidator, err := bav.ShouldJailValidator(currentValidatorEntry, blockHeight) if err != nil { - return errors.Wrapf( - err, "JailAllInactiveValidators: problem determining if should jail validator %v: ", validatorEntry.ValidatorPKID, - ) + return errors.Wrapf(err, "UtxoView.JailInactiveSnapshotValidators: ") } - // If this validator should not be jailed, continue to the next validator. if !shouldJailValidator { continue } // If we get here, then the validator should be jailed. - if err = bav.JailValidator(validatorEntry); err != nil { + if err = bav.JailValidator(currentValidatorEntry); err != nil { return errors.Wrapf( - err, "JailAllInactiveValidators: problem jailing validator %v: ", validatorEntry.ValidatorPKID, + err, "UtxoView.JailInactiveSnapshotValidators: problem jailing validator %v: ", currentValidatorEntry.ValidatorPKID, ) } } @@ -1883,20 +1871,6 @@ func (bav *UtxoView) JailAllInactiveValidators(blockHeight uint64) error { } func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHeight uint64) (bool, error) { - // Return false if we haven't switched from PoW to PoS yet. Otherwise, - // there would be an edge case where all validators will get jailed - // after we deploy the StateSetup block height, but before we deploy - // the ConsensusCutover block height. - // - // We do another check below to make sure enough blocks have passed even - // after we cut-over to PoS, but since this check is so quick to perform, - // we keep this one here as well, since this will catch all OnEpochCompleteHooks - // after the StateSetup block height and before the CutoverConsensus block height - // and saves us a few look-ups and computations. - if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { - return false, nil - } - // Return false if the validator is already jailed. We do not want to jail // them again as we want to retain their original JailedAtEpochNumber so // that they can eventually unjail themselves. diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 39b1c5e05..eeafa2149 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -1846,8 +1846,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // Jail the sender. tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) - err = tmpUtxoView.JailValidator(validatorEntry) - require.NoError(t, err) + require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) // Delete sender's ValidatorEntry from the UtxoView so that it is read from the db. diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index cc3120de1..433ddbb37 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -141,7 +141,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation // that loops through all validators and jails them if they are inactive. A jailed validator should be // considered jailed in the next epoch we are transition into. - if err = bav.JailAllInactiveValidators(blockHeight); err != nil { + if err = bav.JailInactiveSnapshotValidators(blockHeight); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: problem jailing all inactive validators: ") } From aeb722ba0659cc096aeec9fc4a2b45bff8f5290c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 18 Jul 2023 19:22:13 -0400 Subject: [PATCH 156/762] Run State Mutating Operations Before Snapshotting in End of Epoch Hook (#611) * Rearrange mutatating operations to run before snapshotting * Cleanup + unit tests * Clean up comments * Address Nina's comments --- lib/pos_epoch.go | 6 +- lib/pos_epoch_complete_hook.go | 208 ++++++++++++++++++---------- lib/pos_epoch_complete_hook_test.go | 16 +-- 3 files changed, 147 insertions(+), 83 deletions(-) diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index 6025b04cf..6582260fe 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -2,10 +2,11 @@ package lib import ( "bytes" + "math" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" - "math" ) // @@ -84,6 +85,9 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { // If still not found, return the GenesisEpochEntry. This will be the // case prior to the first execution of the OnEpochCompleteHook. + // + // TODO: Should FinalBlockHeight be ProofOfStake1StateSetupBlockHeight for epoch 0? + // The fork height is exactly when epoch 0 ends. Epoch 1 begins at the following height. return &EpochEntry{EpochNumber: 0, FinalBlockHeight: math.MaxUint64}, nil } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 433ddbb37..fc3117def 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -29,32 +29,31 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) } // RunEpochCompleteHook performs all of the end-of-epoch operations when connecting the final -// block of a epoch. There epoch completion has two steps. +// block of a epoch. The epoch completion hook has three steps. // -// Step 1: Create snapshots of current state. Snapshotting operations here should only create new +// Step 1: Run all state-mutating operations that need to be run when completing an epoch. We always +// perform state-mutating operations before creating snapshots. This way, the snapshot created at the +// end of epoch n always reflects the state of the view at the end of epoch n after all state-mutating +// operations have been applied in the epoch. +// - Jail all inactive validators from the current snapshot validator set. +// - Reward all snapshotted stakes from the current snapshot validator set. +// +// Step 2: Create snapshots of the current state. Snapshotting operations here should only create new // snapshot state. They should have no other side effects that mutate the existing state of the view. -// 1. Snapshot the current GlobalParamsEntry. -// 2. Snapshot the current validator set. -// 3. Snapshot the current validator set's TotalStakeAmountNanos. -// 4. Snapshot the leader schedule. -// 5. Snapshot the current top N stake entries, who will receive staking rewards. +// - Snapshot the current GlobalParamsEntry. +// - Snapshot the current validator set. +// - Snapshot the current validator set's TotalStakeAmountNanos. +// - Snapshot the leader schedule. +// - Snapshot the current top N stake entries, who will receive staking rewards. // -// Step 2: Transition to the next epoch. This runs all state-mutating operations that need to be run for -// the epoch transition. We always perform state-mutating operations after creating snapshots. This way, -// the snapshot created at the end of epoch n always reflects the state of the view at the end of epoch n. -// And it does not reflect the state changes that occur AFTER epoch n ends and before epoch n+1 BEGINS. -// 1. Jail all inactive validators from the current snapshot validator set. -// 2. Reward all snapshotted stakes from the current snapshot validator set. -// 3. Compute the final block height for the next epoch. -// 4. Transition CurrentEpochEntry to the next epoch. +// Step 3: Roll over to the next epoch. +// - Compute the final block height for the next epoch. +// - Update CurrentEpochEntry to the next epoch's. func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { - // Rolls-over the current epoch into a new one. Handles the associated snapshotting + accounting. - // Sanity-check that the current block is the last block in the current epoch. // - // Note that this will also return true if we're currently at the - // ProofOfStake1StateSetupBlockHeight so that we can run the hook for the first time - // to initialize the CurrentEpochEntry. + // Note that this will also return true if we're currently at the ProofOfStake1StateSetupBlockHeight + // so that we can run the hook for the first time to initialize the CurrentEpochEntry. isLastBlockInCurrentEpoch, err := bav.IsLastBlockInCurrentEpoch(blockHeight) if err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: ") @@ -66,51 +65,146 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { // Retrieve the CurrentEpochEntry. currentEpochEntry, err := bav.GetCurrentEpochEntry() if err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving CurrentEpochEntry: ") + return errors.Wrapf(err, "runEpochCompleteSnapshotGeneration: problem retrieving CurrentEpochEntry: ") } if currentEpochEntry == nil { - return errors.New("RunEpochCompleteHook: CurrentEpochEntry is nil, this should never happen") + return errors.New("runEpochCompleteSnapshotGeneration: CurrentEpochEntry is nil, this should never happen") + } + + // Step 1: Run All State Mutating Operations + if err := bav.runEpochCompleteStateMutations(blockHeight); err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: ") + } + + // Step 2: Run All Snapshotting Operations + if err := bav.runEpochCompleteSnapshotGeneration(currentEpochEntry.EpochNumber); err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: ") + } + + // Step 3: Roll Over to The Next Epoch + if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight); err != nil { + return errors.Wrapf(err, "RunEpochCompleteHook: ") + } + + return nil +} + +// Runs all state-mutating operations required when completing an epoch. +func (bav *UtxoView) runEpochCompleteStateMutations(blockHeight uint64) error { + // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation + // that loops through all active unjailed validators from current epoch's snapshot validator set + // and jails them if they have been inactive. + // + // Note, this this will only run if we are past the ProofOfStake2ConsensusCutoverBlockHeight fork height. + if err := bav.JailInactiveSnapshotValidators(blockHeight); err != nil { + return errors.Wrapf(err, "runEpochCompleteStateMutations: problem jailing all inactive validators: ") + } + + // Reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation + // that loops through all of the snapshotted stakes and rewards them. + // + // Note, this this will only run if we are past the ProofOfStake2ConsensusCutoverBlockHeight fork height. + if err := bav.DistributeStakingRewardsToSnapshotStakes(blockHeight); err != nil { + return errors.Wrapf(err, "runEpochCompleteStateMutations: problem rewarding snapshot stakes: ") } - currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() + return nil +} +// Generates all required snapshots for the current epoch. +func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) error { // Snapshot the current GlobalParamsEntry. - bav._setSnapshotGlobalParamsEntry(bav.GlobalParamsEntry, currentEpochEntry.EpochNumber) + bav._setSnapshotGlobalParamsEntry(bav.GetCurrentGlobalParamsEntry(), epochNumber) + + // Snapshot the current top m validators as the validator set. + validatorSet, err := bav.generateAndSnapshotValidatorSet(epochNumber) + if err != nil { + return errors.Wrapf(err, "runEpochCompleteSnapshotGeneration: problem snapshotting validator set: ") + } + + // Snapshot a randomly generated leader schedule. + if err = bav.generateAndSnapshotLeaderSchedule(epochNumber); err != nil { + return errors.Wrapf(err, "runEpochCompleteSnapshotGeneration: problem snapshotting leader schedule: ") + } + + // Snapshot the current top n stake entries as the stakes to reward. + if err = bav.generateAndSnapshotStakesToReward(epochNumber, validatorSet); err != nil { + return errors.Wrapf(err, "runEpochCompleteSnapshotGeneration: problem snapshotting stakes to reward: ") + } + + return nil +} + +// Updates the currentEpochEntry to the next epoch's. +func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64) error { + // Retrieve the SnapshotGlobalParamsEntry. + snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() + if err != nil { + return errors.Wrapf(err, "runEpochCompleteEpochRollover: problem retrieving SnapshotGlobalParamsEntry: ") + } + + // Calculate the NextEpochFinalBlockHeight. + nextEpochFinalBlockHeight, err := SafeUint64().Add(blockHeight, snapshotGlobalParamsEntry.EpochDurationNumBlocks) + if err != nil { + return errors.Wrapf(err, "runEpochCompleteEpochRollover: problem calculating NextEpochFinalBlockHeight: ") + } - // Snapshot the current top n active validators as the current validator set. - validatorSet, err := bav.GetTopActiveValidatorsByStakeAmount(currentGlobalParamsEntry.ValidatorSetMaxNumValidators) + // Roll-over a new epoch by setting a new CurrentEpochEntry. + nextEpochEntry := &EpochEntry{ + EpochNumber: epochNumber + 1, + FinalBlockHeight: nextEpochFinalBlockHeight, + } + bav._setCurrentEpochEntry(nextEpochEntry) + + return nil +} + +func (bav *UtxoView) generateAndSnapshotValidatorSet(epochNumber uint64) ([]*ValidatorEntry, error) { + // Snapshot the current top n active validators as the validator set. + validatorSet, err := bav.GetTopActiveValidatorsByStakeAmount( + bav.GetCurrentGlobalParamsEntry().ValidatorSetMaxNumValidators, + ) if err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: error retrieving top ValidatorEntries: ") + return nil, errors.Wrapf(err, "generateAndSnapshotValidatorSet: error retrieving top ValidatorEntries: ") } for _, validatorEntry := range validatorSet { - bav._setSnapshotValidatorSetEntry(validatorEntry, currentEpochEntry.EpochNumber) + bav._setSnapshotValidatorSetEntry(validatorEntry, epochNumber) } // Snapshot the current validator set's total stake. Note, the validator set is already filtered to the top n // active validators for the epoch. The total stake is the sum of all of the active validators' stakes. - globalActiveStakeAmountNanos := SumValidatorEntriesTotalStakeAmountNanos(validatorSet) - bav._setSnapshotValidatorSetTotalStakeAmountNanos(globalActiveStakeAmountNanos, currentEpochEntry.EpochNumber) + validatorSetTotalStakeAmountNanos := SumValidatorEntriesTotalStakeAmountNanos(validatorSet) + bav._setSnapshotValidatorSetTotalStakeAmountNanos(validatorSetTotalStakeAmountNanos, epochNumber) + + return validatorSet, nil +} - // Generate + snapshot a leader schedule. +func (bav *UtxoView) generateAndSnapshotLeaderSchedule(epochNumber uint64) error { + // Generate a random leader schedule and snapshot it. leaderSchedule, err := bav.GenerateLeaderSchedule() if err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem generating leader schedule: ") + return errors.Wrapf(err, "generateAndSnapshotLeaderSchedule: problem generating leader schedule: ") } + for index, validatorPKID := range leaderSchedule { if index > math.MaxUint16 { - return errors.Errorf("RunEpochCompleteHook: LeaderIndex %d overflows uint16", index) + return errors.Errorf("generateAndSnapshotLeaderSchedule: LeaderIndex %d overflows uint16", index) } - bav._setSnapshotLeaderScheduleValidator(validatorPKID, uint16(index), currentEpochEntry.EpochNumber) + bav._setSnapshotLeaderScheduleValidator(validatorPKID, uint16(index), epochNumber) } - // Snapshot the current top n stake entries. - topStakeEntries, err := bav.GetTopStakesByStakeAmount(currentGlobalParamsEntry.StakingRewardsMaxNumStakes) + return nil +} + +func (bav *UtxoView) generateAndSnapshotStakesToReward(epochNumber uint64, validatorSet []*ValidatorEntry) error { + // Fetch the current top n stake entries. + topStakeEntries, err := bav.GetTopStakesByStakeAmount(bav.GetCurrentGlobalParamsEntry().StakingRewardsMaxNumStakes) if err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: error retrieving top StakeEntries: ") } - // Filter the top n stake entries by the current validator set. We do not want to reward - // stakes that are not in the current validator set. + // Filter the top n stake entries by the current validator set. We do not want to reward stakes that are + // not in the current validator set, so we pre-filter them here before snapshotting them. validatorSetPKIDs := NewSet([]PKID{}) for _, validatorEntry := range validatorSet { validatorSetPKIDs.Add(*validatorEntry.ValidatorPKID) @@ -119,10 +213,10 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { return validatorSetPKIDs.Includes(*s.ValidatorPKID) }) - // Snapshot only the top n stake entries that are in the validator set. + // Snapshot only the top m stake entries that are in the validator set. for _, stakeEntry := range topStakesInValidatorSet { snapshotStakeEntry := SnapshotStakeEntry{ - SnapshotAtEpochNumber: currentEpochEntry.EpochNumber, + SnapshotAtEpochNumber: epochNumber, ValidatorPKID: stakeEntry.ValidatorPKID, StakerPKID: stakeEntry.StakerPKID, StakeAmountNanos: stakeEntry.StakeAmountNanos, @@ -130,39 +224,5 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { bav._setSnapshotStakeToReward(&snapshotStakeEntry) } - // TODO: Delete old snapshots that are no longer used. - - // Retrieve the SnapshotGlobalParamsEntry. - snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() - if err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem retrieving SnapshotGlobalParamsEntry: ") - } - - // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation - // that loops through all validators and jails them if they are inactive. A jailed validator should be - // considered jailed in the next epoch we are transition into. - if err = bav.JailInactiveSnapshotValidators(blockHeight); err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem jailing all inactive validators: ") - } - - // Reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation - // that loops through all of the snapshotted stakes and rewards them. - if err = bav.DistributeStakingRewardsToSnapshotStakes(blockHeight); err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem rewarding snapshot stakes: ") - } - - // Calculate the NextEpochFinalBlockHeight. - nextEpochFinalBlockHeight, err := SafeUint64().Add(blockHeight, snapshotGlobalParamsEntry.EpochDurationNumBlocks) - if err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: problem calculating NextEpochFinalBlockHeight: ") - } - - // Roll-over a new epoch by setting a new CurrentEpochEntry. - nextEpochEntry := &EpochEntry{ - EpochNumber: currentEpochEntry.EpochNumber + 1, - FinalBlockHeight: nextEpochFinalBlockHeight, - } - bav._setCurrentEpochEntry(nextEpochEntry) - return nil } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 7b9613b31..1e251b95a 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -532,7 +532,7 @@ func TestRunEpochCompleteHook(t *testing.T) { return len(validatorEntries) } - getNumSnapshotActiveValidators := func() int { + getNumSnapshotValidatorSet := func() int { snapshotValidatorSet, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) return len(snapshotValidatorSet) @@ -559,7 +559,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // In epoch 11, all registered validators have Status = Active. require.Equal(t, getCurrentEpochNumber(), 11) require.Equal(t, getNumCurrentActiveValidators(), 6) - require.Equal(t, getNumSnapshotActiveValidators(), 6) + require.Equal(t, getNumSnapshotValidatorSet(), 6) require.Equal(t, getNumStakes(), 6) require.Equal(t, getNumSnapshotStakes(), 6) @@ -567,10 +567,10 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // In epoch 12, all current registered validators have Status = Jailed. - // In snapshot 10, all snapshot registered validators have Status = Active. + // In snapshot 10, all snapshot validators have Status = Active. require.Equal(t, getCurrentEpochNumber(), 12) require.Empty(t, getNumCurrentActiveValidators()) - require.Equal(t, getNumSnapshotActiveValidators(), 6) + require.Equal(t, getNumSnapshotValidatorSet(), 6) require.Equal(t, getNumStakes(), 6) require.Equal(t, getNumSnapshotStakes(), 6) @@ -581,12 +581,12 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook() // In epoch 13, all current registered validators have Status = Jailed. - // In snapshot 11, all snapshot registered validators have Status = Active. + // In snapshot 11, the validator set is empty because all validators have Status = Jailed. require.Equal(t, getCurrentEpochNumber(), 13) require.Empty(t, getNumCurrentActiveValidators()) - require.Equal(t, getNumSnapshotActiveValidators(), 6) + require.Empty(t, getNumSnapshotValidatorSet()) require.Equal(t, getNumStakes(), 6) - require.Equal(t, getNumSnapshotStakes(), 6) + require.Empty(t, getNumSnapshotStakes()) // Run OnEpochCompleteHook(). _runOnEpochCompleteHook() @@ -596,7 +596,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentEpochNumber(), 14) require.Empty(t, getNumCurrentActiveValidators()) - require.Empty(t, getNumSnapshotActiveValidators()) + require.Empty(t, getNumSnapshotValidatorSet()) require.Equal(t, getNumStakes(), 6) require.Empty(t, getNumSnapshotStakes()) } From bed6994e2e62b88ec6daf3bae484019e8bfa0f34 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 18 Jul 2023 21:05:32 -0400 Subject: [PATCH 157/762] Update RestakeRewards boolean to a RewardMethod enum (#612) * Update RestakeRewards boolean to a RewardMethod enum * Update TxIndex * Maming consistency --- lib/block_view_stake.go | 47 ++++++++++++++++++----------- lib/block_view_stake_test.go | 41 +++++++++++++++++-------- lib/pos_epoch_complete_hook_test.go | 13 +++++--- lib/pos_snapshot_entries.go | 2 +- lib/pos_staking_rewards.go | 2 +- 5 files changed, 69 insertions(+), 36 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index de1c75a4e..44d79744d 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -32,10 +32,17 @@ import ( // TYPES: StakeEntry // +type StakingRewardMethod = uint8 + +const ( + StakingRewardMethodPayToBalance StakingRewardMethod = iota + StakingRewardMethodRestake StakingRewardMethod = 1 +) + type StakeEntry struct { StakerPKID *PKID ValidatorPKID *PKID - RestakeRewards bool + RewardMethod StakingRewardMethod StakeAmountNanos *uint256.Int ExtraData map[string][]byte isDeleted bool @@ -50,7 +57,7 @@ func (stakeEntry *StakeEntry) Copy() *StakeEntry { return &StakeEntry{ StakerPKID: stakeEntry.StakerPKID.NewPKID(), ValidatorPKID: stakeEntry.ValidatorPKID.NewPKID(), - RestakeRewards: stakeEntry.RestakeRewards, + RewardMethod: stakeEntry.RewardMethod, StakeAmountNanos: stakeEntry.StakeAmountNanos.Clone(), ExtraData: copyExtraData(stakeEntry.ExtraData), isDeleted: stakeEntry.isDeleted, @@ -72,7 +79,7 @@ func (stakeEntry *StakeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipM var data []byte data = append(data, EncodeToBytes(blockHeight, stakeEntry.StakerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, stakeEntry.ValidatorPKID, skipMetadata...)...) - data = append(data, BoolToByte(stakeEntry.RestakeRewards)) + data = append(data, stakeEntry.RewardMethod) data = append(data, VariableEncodeUint256(stakeEntry.StakeAmountNanos)...) data = append(data, EncodeExtraData(stakeEntry.ExtraData)...) return data @@ -93,10 +100,10 @@ func (stakeEntry *StakeEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *b return errors.Wrapf(err, "StakeEntry.Decode: Problem reading ValidatorPKID: ") } - // RestakeRewards - stakeEntry.RestakeRewards, err = ReadBoolByte(rr) + // RewardMethod + stakeEntry.RewardMethod, err = rr.ReadByte() if err != nil { - return errors.Wrapf(err, "StakeEntry.Decode: Problem reading RestakeRewards") + return errors.Wrapf(err, "StakeEntry.Decode: Problem reading RewardMethod") } // StakeAmountNanos @@ -224,7 +231,7 @@ func (lockedStakeEntry *LockedStakeEntry) GetEncoderType() EncoderType { type StakeMetadata struct { ValidatorPublicKey *PublicKey - RestakeRewards bool + RewardMethod StakingRewardMethod StakeAmountNanos *uint256.Int } @@ -235,7 +242,7 @@ func (txnData *StakeMetadata) GetTxnType() TxnType { func (txnData *StakeMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte data = append(data, EncodeByteArray(txnData.ValidatorPublicKey.ToBytes())...) - data = append(data, BoolToByte(txnData.RestakeRewards)) + data = append(data, txnData.RewardMethod) data = append(data, VariableEncodeUint256(txnData.StakeAmountNanos)...) return data, nil } @@ -250,10 +257,10 @@ func (txnData *StakeMetadata) FromBytes(data []byte) error { } txnData.ValidatorPublicKey = NewPublicKey(validatorPublicKeyBytes) - // RestakeRewards - txnData.RestakeRewards, err = ReadBoolByte(rr) + // RewardMethod + txnData.RewardMethod, err = rr.ReadByte() if err != nil { - return errors.Wrapf(err, "StakeMetadata.FromBytes: Problem reading RestakeRewards: ") + return errors.Wrapf(err, "StakeMetadata.FromBytes: Problem reading RewardMethod: ") } // StakeAmountNanos @@ -370,7 +377,7 @@ func (txnData *UnlockStakeMetadata) New() DeSoTxnMetadata { type StakeTxindexMetadata struct { StakerPublicKeyBase58Check string ValidatorPublicKeyBase58Check string - RestakeRewards bool + RewardMethod StakingRewardMethod StakeAmountNanos *uint256.Int } @@ -378,7 +385,7 @@ func (txindexMetadata *StakeTxindexMetadata) RawEncodeWithoutMetadata(blockHeigh var data []byte data = append(data, EncodeByteArray([]byte(txindexMetadata.StakerPublicKeyBase58Check))...) data = append(data, EncodeByteArray([]byte(txindexMetadata.ValidatorPublicKeyBase58Check))...) - data = append(data, BoolToByte(txindexMetadata.RestakeRewards)) + data = append(data, txindexMetadata.RewardMethod) data = append(data, VariableEncodeUint256(txindexMetadata.StakeAmountNanos)...) return data } @@ -400,10 +407,10 @@ func (txindexMetadata *StakeTxindexMetadata) RawDecodeWithoutMetadata(blockHeigh } txindexMetadata.ValidatorPublicKeyBase58Check = string(validatorPublicKeyBase58CheckBytes) - // RestakeRewards - txindexMetadata.RestakeRewards, err = ReadBoolByte(rr) + // RewardMethod + txindexMetadata.RewardMethod, err = rr.ReadByte() if err != nil { - return errors.Wrapf(err, "StakeTxindexMetadata.Decode: Problem reading RestakeRewards: ") + return errors.Wrapf(err, "StakeTxindexMetadata.Decode: Problem reading RewardMethod: ") } // StakeAmountNanos @@ -1354,7 +1361,7 @@ func (bav *UtxoView) _connectStake( currentStakeEntry := &StakeEntry{ StakerPKID: transactorPKIDEntry.PKID, ValidatorPKID: prevValidatorEntry.ValidatorPKID, - RestakeRewards: txMeta.RestakeRewards, + RewardMethod: txMeta.RewardMethod, StakeAmountNanos: stakeAmountNanos, ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), } @@ -1979,6 +1986,11 @@ func (bav *UtxoView) IsValidStakeMetadata(transactorPkBytes []byte, metadata *St return errors.Wrapf(RuleErrorInvalidStakeValidatorDisabledDelegatedStake, "UtxoView.IsValidStakeMetadata: ") } + // Validate RewardMethod. + if metadata.RewardMethod != StakingRewardMethodPayToBalance && metadata.RewardMethod != StakingRewardMethodRestake { + return errors.Wrapf(RuleErrorInvalidStakingRewardMethod, "UtxoView.IsValidStakeMetadata: ") + } + // Validate 0 <= StakeAmountNanos <= transactor's DESO Balance. We ignore // the txn fees in this check. The StakeAmountNanos will be validated to // be less than the transactor's DESO balance net of txn fees in the call @@ -3092,6 +3104,7 @@ func (bav *UtxoView) IsValidStakeLimitKey(transactorPublicKeyBytes []byte, stake // const RuleErrorInvalidStakerPKID RuleError = "RuleErrorInvalidStakerPKID" +const RuleErrorInvalidStakingRewardMethod RuleError = "RuleErrorInvalidStakingRewardMethod" const RuleErrorInvalidStakeAmountNanos RuleError = "RuleErrorInvalidStakeAmountNanos" const RuleErrorInvalidStakeInsufficientBalance RuleError = "RuleErrorInvalidStakeInsufficientBalance" const RuleErrorInvalidStakeValidatorDisabledDelegatedStake RuleError = "RuleErrorInvalidStakeValidatorDisabledDelegatedStake" diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 9ec7c4625..d44208285 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -125,7 +125,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - RestakeRewards: false, + RewardMethod: StakingRewardMethodPayToBalance, StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxn( @@ -142,7 +142,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidValidatorPKID stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m2PkBytes), - RestakeRewards: false, + RewardMethod: StakingRewardMethodPayToBalance, StakeAmountNanos: uint256.NewInt(), } _, err = _submitStakeTxn( @@ -151,11 +151,24 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorInvalidValidatorPKID) } + { + // RuleErrorInvalidStakingRewardMethod + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(m0PkBytes), + RewardMethod: 99, + StakeAmountNanos: uint256.NewInt().SetUint64(1), + } + _, err = _submitStakeTxn( + testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, + ) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorInvalidStakingRewardMethod) + } { // RuleErrorInvalidStakeAmountNanos stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - RestakeRewards: false, + RewardMethod: StakingRewardMethodPayToBalance, StakeAmountNanos: nil, } _, err = _submitStakeTxn( @@ -168,6 +181,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidStakeAmountNanos stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), + RewardMethod: StakingRewardMethodPayToBalance, StakeAmountNanos: uint256.NewInt(), } _, err = _submitStakeTxn( @@ -180,6 +194,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidStakeAmountNanos stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), + RewardMethod: StakingRewardMethodPayToBalance, StakeAmountNanos: MaxUint256, } _, err = _submitStakeTxn( @@ -192,7 +207,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidStakeInsufficientBalance stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - RestakeRewards: false, + RewardMethod: StakingRewardMethodPayToBalance, StakeAmountNanos: uint256.NewInt().SetUint64(math.MaxUint64), } _, err = _submitStakeTxn( @@ -206,7 +221,7 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - RestakeRewards: false, + RewardMethod: StakingRewardMethodPayToBalance, StakeAmountNanos: uint256.NewInt().SetUint64(100), } extraData := map[string][]byte{"TestKey": []byte("TestValue")} @@ -219,7 +234,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.False(t, stakeEntry.RestakeRewards) + require.Equal(t, stakeEntry.RewardMethod, StakingRewardMethodPayToBalance) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue")) @@ -238,7 +253,7 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - RestakeRewards: false, + RewardMethod: StakingRewardMethodPayToBalance, StakeAmountNanos: uint256.NewInt().SetUint64(50), } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} @@ -251,7 +266,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.False(t, stakeEntry.RestakeRewards) + require.Equal(t, stakeEntry.RewardMethod, StakingRewardMethodPayToBalance) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(150)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue2")) @@ -266,11 +281,11 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Equal(t, m1OldDESOBalanceNanos-feeNanos-stakeMetadata.StakeAmountNanos.Uint64(), m1NewDESOBalanceNanos) } { - // m1 changes the RestakeRewards flag on their stake with m0. + // m1 changes the RewardMethod value on their stake with m0. m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - RestakeRewards: true, + RewardMethod: StakingRewardMethodRestake, StakeAmountNanos: uint256.NewInt(), } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} @@ -286,8 +301,8 @@ func _testStaking(t *testing.T, flushToDB bool) { require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(150)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue2")) - // Verify the StakeEntry.RestakeRewards flag is updated to true. - require.True(t, stakeEntry.RestakeRewards) + // Verify the StakeEntry.RewardMethod has changed to StakingRewardMethodRestake. + require.Equal(t, stakeEntry.RewardMethod, StakingRewardMethodRestake) // Verify the ValidatorEntry.TotalStakeAmountNanos does not change. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) @@ -1725,7 +1740,7 @@ func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { constructAndSubmitStakeTxn := func(stakerPk string, stakerPriv string, validatorPkBytes []byte, amountNanos uint64) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(validatorPkBytes), - RestakeRewards: false, + RewardMethod: StakingRewardMethodPayToBalance, StakeAmountNanos: uint256.NewInt().SetUint64(amountNanos), } _, err := _submitStakeTxn(testMeta, stakerPk, stakerPriv, stakeMetadata, nil, flushToDB) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 1e251b95a..b6341e9a9 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -125,9 +125,14 @@ func TestRunEpochCompleteHook(t *testing.T) { _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) require.NoError(t, err) + rewardMethod := StakingRewardMethodPayToBalance + if restakeRewards { + rewardMethod = StakingRewardMethodRestake + } + stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(pkBytes), - RestakeRewards: restakeRewards, + RewardMethod: rewardMethod, StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), } _, err = _submitStakeTxn(testMeta, publicKey, privateKey, stakeMetadata, nil, true) @@ -462,7 +467,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Len(t, snapshotStakeEntries, 6) } { - // Test staking rewards distribution with RestakeRewards enabled. + // Test staking rewards distribution with restaking enabled. // m6 now has a 14333333578 nano balance from staking rewards so far. balance, err := utxoView().GetDeSoBalanceNanosForPublicKey(m6PkBytes) @@ -478,14 +483,14 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, balance, uint64(16747126681)) } { - // Test staking rewards distribution with RestakeRewards enabled. + // Test staking rewards distribution with restaking enabled. // m6 has 700 nanos staked. stakeEntry, err := utxoView().GetStakeEntry(m6PKID, m6PKID) require.NoError(t, err) require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(700)) - // m6 sets their RestakeRewards flag to true. + // m6 enables restaking. _registerAndStake(m6Pub, m6Priv, 0, true) // m6's wallet balance is 16747126627 after they submit their stake transaction. diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 44d36881a..5df397b20 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -688,7 +688,7 @@ type SnapshotStakeMapKey struct { // This is a bare bones in-memory only construct used to capture the ValidatorPKID, // StakerPKID, and StakeAmountNanos from a StakeEntry that has been snapshotted. We // define a new type here rather than re-using the StakeEntry type to reduce the risk -// of bugs. The StakeEntry type has additional fields (ex: RestakeRewards, ExtraData) +// of bugs. The StakeEntry type has additional fields (ex: RewardMethod, ExtraData) // that are not snapshotted. type SnapshotStakeEntry struct { SnapshotAtEpochNumber uint64 diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index d3b1b9d31..fe698df5d 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -79,7 +79,7 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64 // StakeEntry. Their stake is currently in lockup. // For case 1, we distribute the rewards by adding them to the staker's staked amount. - if stakeEntry != nil && stakeEntry.RestakeRewards { + if stakeEntry != nil && stakeEntry.RewardMethod == StakingRewardMethodRestake { stakeEntry.StakeAmountNanos.Add(stakeEntry.StakeAmountNanos, rewardAmount) bav._setStakeEntryMappings(stakeEntry) From 059dd5167b36c65d6949dc229094c068fc8b4141 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 19 Jul 2023 13:44:18 -0400 Subject: [PATCH 158/762] Add delegated stake commissions field to ValidatorEntry (#613) * Add delegated stake commissions field to ValidatorEntry * Add unit tests * Better comments --- lib/block_view_validator.go | 93 ++++++++++++++++++++++---------- lib/block_view_validator_test.go | 14 +++++ 2 files changed, 80 insertions(+), 27 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 9fe947cda..c9f0cef2b 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -34,6 +34,8 @@ import ( // TYPES: ValidatorEntry // +const MaxDelegatedStakeCommissionBasisPoints = uint64(10000) // 100% commission + type ValidatorEntry struct { // The ValidatorPKID is the primary key for a ValidatorEntry. It is the PKID // for the transactor who registered the validator. A user's PKID can only @@ -49,6 +51,12 @@ type ValidatorEntry struct { // stake with themselves, but all other users will receive an error if they // try to stake with this validator. DisableDelegatedStake bool + // DelegatedStakeCommissionBasisPoints determines the percentage of + // staking rewards that the validator takes as commission from its stake delegators' + // rewards. For example, if a stake delegator has 5 DESO in staking rewards and the + // validator's commission rate is 10%, then the validator receives 0.5 DESO and the + // stake delegator receives 4.5 DESO. + DelegatedStakeCommissionBasisPoints uint64 // The VotingPublicKey is a BLS PublicKey that is used in consensus messages. // A validator signs consensus messages with their VotingPrivateKey and then // other validators can reliably prove the message came from this validator @@ -119,16 +127,17 @@ func (validatorEntry *ValidatorEntry) Copy() *ValidatorEntry { // Return new ValidatorEntry. return &ValidatorEntry{ - ValidatorPKID: validatorEntry.ValidatorPKID.NewPKID(), - Domains: domainsCopy, - DisableDelegatedStake: validatorEntry.DisableDelegatedStake, - VotingPublicKey: validatorEntry.VotingPublicKey.Copy(), - VotingAuthorization: validatorEntry.VotingAuthorization.Copy(), - TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), - LastActiveAtEpochNumber: validatorEntry.LastActiveAtEpochNumber, - JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, - ExtraData: copyExtraData(validatorEntry.ExtraData), - isDeleted: validatorEntry.isDeleted, + ValidatorPKID: validatorEntry.ValidatorPKID.NewPKID(), + Domains: domainsCopy, + DisableDelegatedStake: validatorEntry.DisableDelegatedStake, + DelegatedStakeCommissionBasisPoints: validatorEntry.DelegatedStakeCommissionBasisPoints, + VotingPublicKey: validatorEntry.VotingPublicKey.Copy(), + VotingAuthorization: validatorEntry.VotingAuthorization.Copy(), + TotalStakeAmountNanos: validatorEntry.TotalStakeAmountNanos.Clone(), + LastActiveAtEpochNumber: validatorEntry.LastActiveAtEpochNumber, + JailedAtEpochNumber: validatorEntry.JailedAtEpochNumber, + ExtraData: copyExtraData(validatorEntry.ExtraData), + isDeleted: validatorEntry.isDeleted, } } @@ -143,6 +152,7 @@ func (validatorEntry *ValidatorEntry) RawEncodeWithoutMetadata(blockHeight uint6 } data = append(data, BoolToByte(validatorEntry.DisableDelegatedStake)) + data = append(data, UintToBuf(validatorEntry.DelegatedStakeCommissionBasisPoints)...) data = append(data, EncodeBLSPublicKey(validatorEntry.VotingPublicKey)...) data = append(data, EncodeBLSSignature(validatorEntry.VotingAuthorization)...) data = append(data, VariableEncodeUint256(validatorEntry.TotalStakeAmountNanos)...) @@ -180,6 +190,12 @@ func (validatorEntry *ValidatorEntry) RawDecodeWithoutMetadata(blockHeight uint6 return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading DisableDelegatedStake: ") } + // DelegatedStakeCommissionBasisPoints + validatorEntry.DelegatedStakeCommissionBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "ValidatorEntry.Decode: Problem reading DelegatedStakeCommissionBasisPoints: ") + } + // VotingPublicKey validatorEntry.VotingPublicKey, err = DecodeBLSPublicKey(rr) if err != nil { @@ -232,10 +248,11 @@ func (validatorEntry *ValidatorEntry) GetEncoderType() EncoderType { // type RegisterAsValidatorMetadata struct { - Domains [][]byte - DisableDelegatedStake bool - VotingPublicKey *bls.PublicKey - VotingAuthorization *bls.Signature + Domains [][]byte + DisableDelegatedStake bool + DelegatedStakeCommissionBasisPoints uint64 + VotingPublicKey *bls.PublicKey + VotingAuthorization *bls.Signature } func (txnData *RegisterAsValidatorMetadata) GetTxnType() TxnType { @@ -252,6 +269,7 @@ func (txnData *RegisterAsValidatorMetadata) ToBytes(preSignature bool) ([]byte, } data = append(data, BoolToByte(txnData.DisableDelegatedStake)) + data = append(data, UintToBuf(txnData.DelegatedStakeCommissionBasisPoints)...) data = append(data, EncodeBLSPublicKey(txnData.VotingPublicKey)...) data = append(data, EncodeBLSSignature(txnData.VotingAuthorization)...) return data, nil @@ -279,6 +297,12 @@ func (txnData *RegisterAsValidatorMetadata) FromBytes(data []byte) error { return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading DisableDelegatedStake: ") } + // DelegatedStakeCommissionBasisPoints + txnData.DelegatedStakeCommissionBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorMetadata.FromBytes: Problem reading DelegatedStakeCommissionBasisPoints: ") + } + // VotingPublicKey txnData.VotingPublicKey, err = DecodeBLSPublicKey(rr) if err != nil { @@ -347,11 +371,12 @@ func (txnData *UnjailValidatorMetadata) New() DeSoTxnMetadata { // type RegisterAsValidatorTxindexMetadata struct { - ValidatorPublicKeyBase58Check string - Domains []string - DisableDelegatedStake bool - VotingPublicKey string - VotingAuthorization string + ValidatorPublicKeyBase58Check string + Domains []string + DisableDelegatedStake bool + DelegatedStakeCommissionBasisPoints uint64 + VotingPublicKey string + VotingAuthorization string } func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -365,6 +390,7 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawEncodeWithoutMetad } data = append(data, BoolToByte(txindexMetadata.DisableDelegatedStake)) + data = append(data, UintToBuf(txindexMetadata.DelegatedStakeCommissionBasisPoints)...) data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingPublicKey))...) data = append(data, EncodeByteArray([]byte(txindexMetadata.VotingAuthorization))...) return data @@ -399,6 +425,12 @@ func (txindexMetadata *RegisterAsValidatorTxindexMetadata) RawDecodeWithoutMetad return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading DisableDelegatedStake: ") } + // DelegatedStakeCommissionBasisPoints + txindexMetadata.DelegatedStakeCommissionBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "RegisterAsValidatorTxindexMetadata.Decode: Problem reading DelegatedStakeCommissionBasisPoints: ") + } + // VotingPublicKey votingPublicKeyBytes, err := DecodeByteArray(rr) if err != nil { @@ -1065,14 +1097,15 @@ func (bav *UtxoView) _connectRegisterAsValidator( ValidatorPKID: transactorPKIDEntry.PKID, // Note: if someone is updating their ValidatorEntry, they need to include // all domains. The Domains field is not appended to. It is overwritten. - Domains: txMeta.Domains, - DisableDelegatedStake: txMeta.DisableDelegatedStake, - VotingPublicKey: txMeta.VotingPublicKey, - VotingAuthorization: txMeta.VotingAuthorization, - TotalStakeAmountNanos: totalStakeAmountNanos, - LastActiveAtEpochNumber: lastActiveAtEpochNumber, - JailedAtEpochNumber: jailedAtEpochNumber, - ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), + Domains: txMeta.Domains, + DisableDelegatedStake: txMeta.DisableDelegatedStake, + DelegatedStakeCommissionBasisPoints: txMeta.DelegatedStakeCommissionBasisPoints, + VotingPublicKey: txMeta.VotingPublicKey, + VotingAuthorization: txMeta.VotingAuthorization, + TotalStakeAmountNanos: totalStakeAmountNanos, + LastActiveAtEpochNumber: lastActiveAtEpochNumber, + JailedAtEpochNumber: jailedAtEpochNumber, + ExtraData: mergeExtraData(prevExtraData, txn.ExtraData), } // Set the ValidatorEntry. bav._setValidatorEntryMappings(currentValidatorEntry) @@ -1554,6 +1587,11 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( return errors.Wrapf(RuleErrorValidatorDuplicateDomains, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } + // Validate DelegatedStakeCommissionBasisPoints. + if metadata.DelegatedStakeCommissionBasisPoints > MaxDelegatedStakeCommissionBasisPoints { + return errors.Wrapf(RuleErrorValidatorInvalidCommissionBasisPoints, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + } + // Validate VotingPublicKey. if metadata.VotingPublicKey == nil { return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKey, "UtxoView.IsValidRegisterAsValidatorMetadata: ") @@ -2228,6 +2266,7 @@ const RuleErrorValidatorNoDomains RuleError = "RuleErrorValidatorNoDomains" const RuleErrorValidatorTooManyDomains RuleError = "RuleErrorValidatorTooManyDomains" const RuleErrorValidatorInvalidDomain RuleError = "RuleErrorValidatorInvalidDomain" const RuleErrorValidatorDuplicateDomains RuleError = "RuleErrorValidatorDuplicateDomains" +const RuleErrorValidatorInvalidCommissionBasisPoints RuleError = "RuleErrorValidatorInvalidCommissionBasisPoints" const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" const RuleErrorValidatorMissingVotingPublicKey RuleError = "RuleErrorValidatorMissingVotingPublicKey" const RuleErrorValidatorMissingVotingAuthorization RuleError = "RuleErrorValidatorMissingVotingAuthorization" diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index eeafa2149..8daeb056a 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -99,6 +99,20 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) } + { + // RuleErrorValidatorInvalidCommissionBasisPoints + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: true, + DelegatedStakeCommissionBasisPoints: MaxDelegatedStakeCommissionBasisPoints + 1, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, + } + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidCommissionBasisPoints) + } { // RuleErrorValidatorNoDomains registerMetadata = &RegisterAsValidatorMetadata{ From 354f49eadc5494644c4203bd411407f4436d4af8 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 19 Jul 2023 17:52:39 -0700 Subject: [PATCH 159/762] PoS Transaction Register Prune (#563) * Revert "Remove DeSoMempoolPoS" This reverts commit a5ba546dd550661d2fc25103c7605d0a23a9ba8a. * Rearrange * Txn Validator; Mempool Ledger * Add pruning; signature changes; refactoring * Database Interface; Badger and Bolt db implementations; Refactoring * Add nested bolt bucket support * Mempool persister; Start & Stop Mempool; DatabaseContext; Database interface updates * Split transaction register changes * Revert timestamp * Test Prune * Remove go.mod changes * Fix mutex * Placeholder change * PruneToSize; Add RLocks; More tests; No error on non-existing transactions * Revert bitset placeholder * Nits * Review pass * Rename; Test nits --------- Co-authored-by: iamsofonias --- lib/pos_transaction_register.go | 269 ++++++++++++++++++++++++--- lib/pos_transaction_register_test.go | 230 +++++++++++++++++++++-- 2 files changed, 461 insertions(+), 38 deletions(-) diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index 88c9395e5..bfe2f6ed4 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -1,12 +1,14 @@ package lib import ( + "bytes" "fmt" "github.com/emirpasic/gods/sets/treeset" "github.com/golang/glog" "github.com/pkg/errors" "math" "math/big" + "sync" ) // ======================== @@ -17,8 +19,9 @@ import ( // the Fee-Time ordering of transactions. The operations supported by the register are: adding a transaction, removing // a transaction, iterating through all transactions in fee-time order, and retrieving all transactions ordered in // fee-time. The TransactionRegister doesn't perform any validation on the transactions, it just accepts the provided -// MempoolTx and adds it to the appropriate FeeTimeBucket. +// MempoolTx and adds it to the appropriate FeeTimeBucket. The structure is thread safe. type TransactionRegister struct { + sync.RWMutex // feeTimeBucketSet is a set of FeeTimeBucket objects. The set is ordered by the FeeTimeBucket's ranges, based on feeTimeBucketComparator. feeTimeBucketSet *treeset.Set // feeTimeBucketsByMinFeeMap is a map of FeeTimeBucket minimum fees to FeeTimeBucket objects. It is used to quickly find @@ -28,8 +31,6 @@ type TransactionRegister struct { txnMembership *Set[BlockHash] // totalTxnsSizeBytes is the total size of all transactions in the register. totalTxnsSizeBytes uint64 - // maxTxnsSizeBytes is the maximum size of a transaction in the register. - maxTxnsSizeBytes uint64 // minimumNetworkFeeNanosPerKB is the base fee rate for the lowest fee FeeTimeBucket. This value corresponds to // GlobalParamsEntry's MinimumNetworkFeeNanosPerKB. minimumNetworkFeeNanosPerKB *big.Float @@ -38,16 +39,14 @@ type TransactionRegister struct { feeBucketRateMultiplierBasisPoints *big.Float } -func NewTransactionRegister(params *DeSoParams, globalParams *GlobalParamsEntry) *TransactionRegister { +func NewTransactionRegister(globalParams *GlobalParamsEntry) *TransactionRegister { feeTimeBucketSet := treeset.NewWith(feeTimeBucketComparator) minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() - return &TransactionRegister{ feeTimeBucketSet: feeTimeBucketSet, feeTimeBucketsByMinFeeMap: make(map[uint64]*FeeTimeBucket), txnMembership: NewSet([]BlockHash{}), totalTxnsSizeBytes: 0, - maxTxnsSizeBytes: params.MaxMempoolPosSizeBytes, minimumNetworkFeeNanosPerKB: minNetworkFee, feeBucketRateMultiplierBasisPoints: bucketMultiplier, } @@ -77,12 +76,19 @@ func feeTimeBucketComparator(a, b interface{}) int { // exceeds the maximum mempool capacity, it is not added. Returns nil when transaction was successfully added to the // register, or an error otherwise. func (tr *TransactionRegister) AddTransaction(txn *MempoolTx) error { + tr.Lock() + defer tr.Unlock() + + return tr.addTransactionNoLock(txn) +} + +func (tr *TransactionRegister) addTransactionNoLock(txn *MempoolTx) error { if txn == nil || txn.Hash == nil { return fmt.Errorf("TransactionRegister.AddTransaction: Transaction or transaction hash is nil") } if tr.txnMembership.Includes(*txn.Hash) { - return fmt.Errorf("TransactionRegister.AddTransaction: Transaction already exists in register") + return nil } // If the transaction is too large, reject it. @@ -91,11 +97,6 @@ func (tr *TransactionRegister) AddTransaction(txn *MempoolTx) error { "total size %v", txn.TxSizeBytes, tr.totalTxnsSizeBytes) } - // If the transaction overflows the maximum mempool size, reject it. - if tr.totalTxnsSizeBytes+txn.TxSizeBytes > tr.maxTxnsSizeBytes { - return fmt.Errorf("TransactionRegister.AddTransaction: Transaction size exceeds maximum mempool size") - } - // Determine the min fee of the bucket based on the transaction's fee rate. bucketMinFeeNanosPerKb, bucketMaxFeeNanosPerKB := computeFeeTimeBucketRangeFromFeeNanosPerKB(txn.FeePerKB, tr.minimumNetworkFeeNanosPerKB, tr.feeBucketRateMultiplierBasisPoints) @@ -104,9 +105,6 @@ func (tr *TransactionRegister) AddTransaction(txn *MempoolTx) error { if !bucketExists { // If the bucket doesn't exist, create it and add the transaction to it. bucket = NewFeeTimeBucket(bucketMinFeeNanosPerKb, bucketMaxFeeNanosPerKB) - if err := bucket.AddTransaction(txn); err != nil { - return errors.Wrapf(err, "TransactionRegister.AddTransaction: Error adding transaction to bucket: %v", err) - } } // Add the transaction to the bucket. @@ -116,8 +114,7 @@ func (tr *TransactionRegister) AddTransaction(txn *MempoolTx) error { if !bucketExists { // If the bucket didn't exist, add it to the set and the map. - tr.feeTimeBucketSet.Add(bucket) - tr.feeTimeBucketsByMinFeeMap[bucketMinFeeNanosPerKb] = bucket + tr.addBucketNoLock(bucket) } tr.totalTxnsSizeBytes += txn.TxSizeBytes @@ -129,13 +126,19 @@ func (tr *TransactionRegister) AddTransaction(txn *MempoolTx) error { // size exceeds the current register size (which should never happen), it is not removed. Returns nil when transaction // was successfully removed from the register, or an error otherwise. func (tr *TransactionRegister) RemoveTransaction(txn *MempoolTx) error { + tr.Lock() + defer tr.Unlock() + + return tr.removeTransactionNoLock(txn) +} + +func (tr *TransactionRegister) removeTransactionNoLock(txn *MempoolTx) error { if txn == nil || txn.Hash == nil { return fmt.Errorf("TransactionRegister.RemoveTransaction: Transaction or transaction hash is nil") } if !tr.txnMembership.Includes(*txn.Hash) { - return fmt.Errorf("TransactionRegister.RemoveTransaction: Transaction with transaction hash %v does not "+ - "exist in the register", txn.Hash.String()) + return nil } if tr.totalTxnsSizeBytes < txn.TxSizeBytes { @@ -148,11 +151,15 @@ func (tr *TransactionRegister) RemoveTransaction(txn *MempoolTx) error { tr.minimumNetworkFeeNanosPerKB, tr.feeBucketRateMultiplierBasisPoints) // Remove the transaction from the bucket. if bucket, exists := tr.feeTimeBucketsByMinFeeMap[bucketMinFeeNanosPerKb]; exists { + if bucket.minFeeNanosPerKB != bucketMinFeeNanosPerKb { + return fmt.Errorf("TransactionRegister.RemoveTransaction: Bucket min fee %v does not match "+ + "bucketMinFeeNanosPerKb %v", bucket.minFeeNanosPerKB, bucketMinFeeNanosPerKb) + } + bucket.RemoveTransaction(txn) // If the bucket becomes empty, remove it from the TransactionRegister. if bucket.Empty() { - tr.feeTimeBucketSet.Remove(bucket) - delete(tr.feeTimeBucketsByMinFeeMap, bucketMinFeeNanosPerKb) + tr.removeBucketNoLock(bucket) } } @@ -161,13 +168,66 @@ func (tr *TransactionRegister) RemoveTransaction(txn *MempoolTx) error { return nil } +func (tr *TransactionRegister) addBucketNoLock(bucket *FeeTimeBucket) { + if bucket == nil { + return + } + + tr.feeTimeBucketSet.Add(bucket) + tr.feeTimeBucketsByMinFeeMap[bucket.minFeeNanosPerKB] = bucket +} + +func (tr *TransactionRegister) removeBucketNoLock(bucket *FeeTimeBucket) { + if bucket == nil { + return + } + + tr.feeTimeBucketSet.Remove(bucket) + bucket.Clear() + delete(tr.feeTimeBucketsByMinFeeMap, bucket.minFeeNanosPerKB) +} + func (tr *TransactionRegister) Empty() bool { + tr.RLock() + defer tr.RUnlock() + return tr.feeTimeBucketSet.Empty() } +func (tr *TransactionRegister) Size() uint64 { + tr.RLock() + defer tr.RUnlock() + + return tr.totalTxnsSizeBytes +} + +func (tr *TransactionRegister) Includes(txn *MempoolTx) bool { + tr.RLock() + defer tr.RUnlock() + + if txn == nil || txn.Hash == nil { + return false + } + + return tr.txnMembership.Includes(*txn.Hash) +} + +func (tr *TransactionRegister) Reset() { + tr.Lock() + defer tr.Unlock() + + tr.feeTimeBucketSet.Clear() + tr.feeTimeBucketsByMinFeeMap = make(map[uint64]*FeeTimeBucket) + tr.txnMembership = NewSet([]BlockHash{}) + tr.totalTxnsSizeBytes = 0 +} + // GetFeeTimeIterator returns an iterator over the transactions in the register. The iterator goes through all transactions // as ordered by Fee-Time. func (tr *TransactionRegister) GetFeeTimeIterator() *FeeTimeIterator { + tr.RLock() + defer tr.RUnlock() + return &FeeTimeIterator{ bucketIterator: tr.feeTimeBucketSet.Iterator(), mempoolTxIterator: nil, @@ -176,6 +236,9 @@ func (tr *TransactionRegister) GetFeeTimeIterator() *FeeTimeIterator { // GetFeeTimeTransactions returns all transactions in the register ordered by Fee-Time. func (tr *TransactionRegister) GetFeeTimeTransactions() []*MempoolTx { + tr.RLock() + defer tr.RUnlock() + txns := []*MempoolTx{} it := tr.GetFeeTimeIterator() for it.Next() { @@ -186,6 +249,84 @@ func (tr *TransactionRegister) GetFeeTimeTransactions() []*MempoolTx { return txns } +// PruneToSize removes transactions from the end of the register until the size of the register shrinks to the desired +// number of bytes. The returned transactions, _prunedTxns, are ordered by lowest-to-highest priority, i.e. first +// transaction will have the smallest fee, last transaction will have the highest fee. Returns _err = nil if no +// transactions were pruned. +func (tr *TransactionRegister) PruneToSize(maxSizeBytes uint64) (_prunedTxns []*MempoolTx, _err error) { + tr.Lock() + defer tr.Unlock() + + // If the maximum number of bytes is greater or equal to the current size of the register, return. + if maxSizeBytes >= tr.totalTxnsSizeBytes { + return nil, nil + } + + // If the register is empty, return. + if tr.feeTimeBucketSet.Empty() { + return nil, nil + } + + // Determine how many bytes we need to prune and get the transactions to prune. + minPrunedBytes := tr.totalTxnsSizeBytes - maxSizeBytes + prunedTxns, err := tr.getTransactionsToPrune(minPrunedBytes) + if err != nil { + return nil, errors.Wrapf(err, "TransactionRegister.PruneToSize: Error getting transactions to prune") + } + + // Remove the transactions from prunedTxns. + for _, txn := range prunedTxns { + if err := tr.removeTransactionNoLock(txn); err != nil { + return nil, errors.Wrapf(err, "TransactionRegister.PruneToSize: Error removing transaction %v", txn.Hash.String()) + } + } + return prunedTxns, nil +} + +func (tr *TransactionRegister) getTransactionsToPrune(minPrunedBytes uint64) (_prunedTxns []*MempoolTx, _err error) { + if minPrunedBytes == 0 { + return nil, nil + } + + prunedBytes := uint64(0) + prunedTxns := []*MempoolTx{} + + // Find the FeeTime bucket at the end of the Set. It'll have the smallest fee among the buckets in the register. + it := tr.feeTimeBucketSet.Iterator() + it.End() + // Iterate through the buckets in reverse order so that we drop transactions ordered by least-to-highest priority. + for it.Prev() { + bucket, ok := it.Value().(*FeeTimeBucket) + if !ok { + return nil, fmt.Errorf("TransactionRegister.getTransactionsToPrune: " + + "Error casting value of FeeTimeBucket") + } + + // Iterate through the transactions in the current FeeTime bucket. We iterate in reverse order, starting from the + // end, so that we drop transactions ordered by least-to-highest priority. + bucketIt := bucket.GetIterator() + bucketIt.End() + for bucketIt.Prev() { + txn, ok := bucketIt.Value().(*MempoolTx) + if !ok { + return nil, fmt.Errorf("TransactionRegister.getTransactionsToPrune: " + + "Error casting value of MempoolTx") + } + // Add the transaction to the prunedTxns list. + prunedTxns = append(prunedTxns, txn) + prunedBytes += txn.TxSizeBytes + // If we've pruned sufficiently many bytes, we can return early. + if prunedBytes >= minPrunedBytes { + return prunedTxns, nil + } + } + } + + // If we reach this point, it means that we've iterated through the entire TransactionRegister and have no remaining + // transactions to prune. We can return the txns we've found so far. + return prunedTxns, nil +} + // FeeTimeIterator is an iterator over the transactions in a TransactionRegister. The iterator goes through all transactions // as ordered by Fee-Time. type FeeTimeIterator struct { @@ -252,24 +393,37 @@ func (fti *FeeTimeIterator) Initialized() bool { // FeeTimeBucket // ======================== -// FeeTimeBucket is a data structure storing MempoolTx with similar fee rates. +// FeeTimeBucket is a data structure storing MempoolTx with similar fee rates. The structure is thread safe. +// The transactions accepted by the FeeTimeBucket must have a fee rate above or equal to the configured minFeeNanosPerKB, +// and below or equal to the configured maxFeeNanosPerKB. The transactions are stored in a treeset, which orders the +// transactions by timestamp. The earliest timestamp is at the front of the txnsSet, and the latest timestamp is at the +// back of the txnsSet. In case of a timestamp tie, the transactions are ordered by greatest fee rate first. If a tie +// still exists, the transactions are ordered by greatest lexicographic transaction hash first. type FeeTimeBucket struct { + sync.RWMutex + // txnsSet is a set of MempoolTx transactions stored in the FeeTimeBucket. txnsSet *treeset.Set + // txnMembership is a set of transaction hashes. It is used to determine existence of a transaction in the register. + txnMembership *Set[BlockHash] // minFeeNanosPerKB is the minimum fee rate (inclusive) accepted by the FeeTimeBucket, in nanos per KB. minFeeNanosPerKB uint64 // maxFeeNanosPerKB is the maximum fee rate (inclusive) accepted by the FeeTimeBucket, in nanos per KB. It's worth // noting that the maximum fee rate is always 1 below the minimum fee rate of the FeeTimeBucket with exponent+1. maxFeeNanosPerKB uint64 + // totalTxnsSizeBytes is the total size of all transactions in the FeeTimeBucket, in bytes. + totalTxnsSizeBytes uint64 } func NewFeeTimeBucket(minFeeNanosPerKB uint64, maxFeeNanosPerKB uint64) *FeeTimeBucket { txnsSet := treeset.NewWith(mempoolTxTimeOrderComparator) return &FeeTimeBucket{ - minFeeNanosPerKB: minFeeNanosPerKB, - maxFeeNanosPerKB: maxFeeNanosPerKB, - txnsSet: txnsSet, + minFeeNanosPerKB: minFeeNanosPerKB, + maxFeeNanosPerKB: maxFeeNanosPerKB, + totalTxnsSizeBytes: 0, + txnsSet: txnsSet, + txnMembership: NewSet([]BlockHash{}), } } @@ -294,42 +448,78 @@ func mempoolTxTimeOrderComparator(a, b interface{}) int { return -1 } // If the timestamps and fee rates are the same, we order by the transaction hash. - return HashToBigint(aVal.Hash).Cmp(HashToBigint(bVal.Hash)) + return bytes.Compare(aVal.Hash[:], bVal.Hash[:]) } // AddTransaction adds a transaction to the FeeTimeBucket. It returns an error if the transaction is outside the // FeeTimeBucket's fee range, or if the transaction hash is nil. func (tb *FeeTimeBucket) AddTransaction(txn *MempoolTx) error { + tb.Lock() + defer tb.Unlock() + if txn == nil || txn.Hash == nil { return fmt.Errorf("FeeTimeBucket.AddTransaction: Transaction or transaction hash is nil") } + if tb.txnMembership.Includes(*txn.Hash) { + return nil + } + + if tb.totalTxnsSizeBytes > math.MaxUint64-txn.TxSizeBytes { + return fmt.Errorf("FeeTimeBucket.AddTransaction: Transaction size %d would overflow bucket size %d", + txn.TxSizeBytes, tb.totalTxnsSizeBytes) + } + if tb.minFeeNanosPerKB > txn.FeePerKB || tb.maxFeeNanosPerKB < txn.FeePerKB { return fmt.Errorf("FeeTimeBucket.AddTransaction: Transaction fee %d outside of bucket range [%d, %d]", txn.FeePerKB, tb.minFeeNanosPerKB, tb.maxFeeNanosPerKB) } tb.txnsSet.Add(txn) + tb.txnMembership.Add(*txn.Hash) + tb.totalTxnsSizeBytes += txn.TxSizeBytes return nil } // RemoveTransaction removes a transaction from the FeeTimeBucket. func (tb *FeeTimeBucket) RemoveTransaction(txn *MempoolTx) { + tb.Lock() + defer tb.Unlock() + + if txn == nil || txn.Hash == nil { + return + } + + if !tb.txnMembership.Includes(*txn.Hash) { + return + } + tb.txnsSet.Remove(txn) + tb.txnMembership.Remove(*txn.Hash) + tb.totalTxnsSizeBytes -= txn.TxSizeBytes } func (tb *FeeTimeBucket) Empty() bool { + tb.RLock() + defer tb.RUnlock() + return tb.txnsSet.Empty() } // GetIterator returns an iterator over the MempoolTx inside the FeeTimeBucket. func (tb *FeeTimeBucket) GetIterator() treeset.Iterator { + tb.RLock() + defer tb.RUnlock() + return tb.txnsSet.Iterator() } // GetTransactions returns a slice of MempoolTx inside the FeeTimeBucket. The slice is ordered according to the // mempoolTxTimeOrderComparator. func (tb *FeeTimeBucket) GetTransactions() []*MempoolTx { + tb.RLock() + defer tb.RUnlock() + txns := []*MempoolTx{} it := tb.GetIterator() for it.Next() { @@ -340,6 +530,33 @@ func (tb *FeeTimeBucket) GetTransactions() []*MempoolTx { return txns } +func (tb *FeeTimeBucket) Size() uint64 { + tb.RLock() + defer tb.RUnlock() + + return tb.totalTxnsSizeBytes +} + +func (tb *FeeTimeBucket) Includes(txn *MempoolTx) bool { + tb.RLock() + defer tb.RUnlock() + + if txn == nil || txn.Hash == nil { + return false + } + + return tb.txnMembership.Includes(*txn.Hash) +} + +func (tb *FeeTimeBucket) Clear() { + tb.Lock() + defer tb.Unlock() + + tb.txnsSet.Clear() + tb.txnMembership = NewSet([]BlockHash{}) + tb.totalTxnsSizeBytes = 0 +} + //============================================ // Fee-Time Bucket Math //============================================ diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index 71cbf6763..0a55512bc 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -3,6 +3,7 @@ package lib import ( "bytes" "github.com/stretchr/testify/require" + "math" "math/rand" "sort" "testing" @@ -13,7 +14,7 @@ func TestSanityCheckTransactionRegister(t *testing.T) { require := require.New(t) // Empty TransactionRegister - txnRegister := NewTransactionRegister(&DeSoTestnetParams, _testGetDefaultGlobalParams()) + txnRegister := NewTransactionRegister(_testGetDefaultGlobalParams()) require.Equal(true, txnRegister.Empty()) it := txnRegister.GetFeeTimeIterator() require.Equal(false, it.Next()) @@ -26,21 +27,225 @@ func TestSanityCheckTransactionRegister(t *testing.T) { } require.Nil(txnRegister.AddTransaction(txn)) require.Equal(false, txnRegister.Empty()) + require.Len(txnRegister.GetFeeTimeTransactions(), 1) + require.Equal(true, txnRegister.Includes(txn)) it = txnRegister.GetFeeTimeIterator() require.Equal(true, it.Next()) recTxn, ok := it.Value() require.Equal(true, ok) require.Equal(true, bytes.Equal(txn.Hash[:], recTxn.Hash[:])) require.Nil(txnRegister.RemoveTransaction(recTxn)) + require.Len(txnRegister.GetFeeTimeTransactions(), 0) + require.Equal(true, txnRegister.Empty()) // TransactionRegister with no transactions and a single empty FeeTimeBucket. // This should never happen but let's see what happens. - txnRegister = NewTransactionRegister(&DeSoTestnetParams, _testGetDefaultGlobalParams()) + txnRegister = NewTransactionRegister(_testGetDefaultGlobalParams()) emptyFeeTimeBucket := NewFeeTimeBucket(0, 1000) txnRegister.feeTimeBucketSet.Add(emptyFeeTimeBucket) txnRegister.feeTimeBucketsByMinFeeMap[0] = emptyFeeTimeBucket newIt := txnRegister.GetFeeTimeIterator() require.Equal(false, newIt.Next()) + require.Len(txnRegister.GetFeeTimeTransactions(), 0) + + // Remove non-existing transaction from empty TransactionRegister. + txn2 := &MempoolTx{ + FeePerKB: 10050, + Added: time.UnixMicro(1000050), + Hash: NewBlockHash(RandomBytes(32)), + } + require.Nil(txnRegister.RemoveTransaction(txn2)) + require.Len(txnRegister.GetFeeTimeTransactions(), 0) + + // Remove non-existing transaction from non-empty TransactionRegister. + require.NoError(txnRegister.AddTransaction(txn)) + require.Len(txnRegister.GetFeeTimeTransactions(), 1) + require.Equal(true, txnRegister.Includes(txn)) + require.Nil(txnRegister.RemoveTransaction(txn2)) + require.Len(txnRegister.GetFeeTimeTransactions(), 1) + require.Equal(true, txnRegister.Includes(txn)) + require.Equal(false, txnRegister.Includes(txn2)) + require.Equal(true, bytes.Equal(txnRegister.GetFeeTimeTransactions()[0].Hash[:], txn.Hash[:])) +} + +func TestSanityCheckFeeTimeBucket(t *testing.T) { + require := require.New(t) + + // Empty FeeTimeBucket + feeTimeBucket := NewFeeTimeBucket(100000, 110000) + require.Len(feeTimeBucket.GetTransactions(), 0) + require.Equal(uint64(0), feeTimeBucket.totalTxnsSizeBytes) + require.Equal(true, feeTimeBucket.Empty()) + + // FeeTimeBucket with a single transaction + txn := &MempoolTx{ + FeePerKB: 100000, + Added: time.UnixMicro(1000000), + Hash: NewBlockHash(RandomBytes(32)), + TxSizeBytes: 100, + } + + require.Nil(feeTimeBucket.AddTransaction(txn)) + require.Len(feeTimeBucket.GetTransactions(), 1) + require.Equal(txn.TxSizeBytes, feeTimeBucket.totalTxnsSizeBytes) + require.Equal(false, feeTimeBucket.Empty()) + + // Try adding the same transaction again. + require.Nil(feeTimeBucket.AddTransaction(txn)) + require.Len(feeTimeBucket.GetTransactions(), 1) + require.Equal(txn.TxSizeBytes, feeTimeBucket.totalTxnsSizeBytes) + require.Equal(false, feeTimeBucket.Empty()) + + // Remove non-existing transaction from non-empty FeeTimeBucket. + txn2 := &MempoolTx{ + FeePerKB: 10050, + Added: time.UnixMicro(1000050), + Hash: NewBlockHash(RandomBytes(32)), + } + feeTimeBucket.RemoveTransaction(txn2) + require.Len(feeTimeBucket.GetTransactions(), 1) + require.Equal(txn.TxSizeBytes, feeTimeBucket.totalTxnsSizeBytes) + require.Equal(false, feeTimeBucket.Empty()) + require.Equal(true, feeTimeBucket.Includes(txn)) + require.Equal(false, feeTimeBucket.Includes(txn2)) + + // Remove existing transactions from FeeTimeBucket. + feeTimeBucket.RemoveTransaction(txn) + require.Len(feeTimeBucket.GetTransactions(), 0) + require.Equal(uint64(0), feeTimeBucket.totalTxnsSizeBytes) + require.Equal(true, feeTimeBucket.Empty()) + require.Equal(false, feeTimeBucket.Includes(txn)) + require.Equal(false, feeTimeBucket.Includes(txn2)) + + // Remove non-existing transaction from empty FeeTimeBucket. + feeTimeBucket.RemoveTransaction(txn2) + require.Len(feeTimeBucket.GetTransactions(), 0) + require.Equal(uint64(0), feeTimeBucket.totalTxnsSizeBytes) + require.Equal(true, feeTimeBucket.Empty()) + require.Equal(false, feeTimeBucket.Includes(txn)) + require.Equal(false, feeTimeBucket.Includes(txn2)) +} + +func TestTransactionRegisterPrune(t *testing.T) { + seed := int64(111) + testCases := 1000 + feeRange := uint64(10000) + timestampRange := uint64(10000) + + require := require.New(t) + rand := rand.New(rand.NewSource(seed)) + globalParams := _testGetDefaultGlobalParams() + txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, 1000, timestampRange, testCases) + + txnRegister := NewTransactionRegister(globalParams) + totalSize := uint64(0) + for _, tx := range txnPool { + require.Nil(txnRegister.AddTransaction(tx)) + totalSize += tx.TxSizeBytes + } + + // Try pruning 0 bytes + txns, err := txnRegister.PruneToSize(txnRegister.totalTxnsSizeBytes) + require.Nil(err) + require.Len(txns, 0) + + // Remove a single transaction + txns, err = txnRegister.PruneToSize(txnRegister.totalTxnsSizeBytes - 1) + require.Nil(err) + require.Len(txns, 1) + totalSize -= txns[0].TxSizeBytes + require.Equal(totalSize, txnRegister.totalTxnsSizeBytes) + + sortedTxns := _testSortMempoolTxnsByFeeTime(txnPool, globalParams, false) + lastTxn := sortedTxns[len(sortedTxns)-1] + require.Equal(true, bytes.Equal(lastTxn.Hash[:], txns[0].Hash[:])) + sortedTxns = sortedTxns[:len(sortedTxns)-1] + registerTxns := txnRegister.GetFeeTimeTransactions() + require.Equal(len(sortedTxns), len(registerTxns)) + for ii := 0; ii < len(sortedTxns); ii++ { + require.Equal(true, bytes.Equal(sortedTxns[ii].Hash[:], registerTxns[ii].Hash[:])) + } + + // Remove 10 transactions + last10Txns := sortedTxns[len(sortedTxns)-10:] + last10TxnsByteSize := uint64(0) + for _, txn := range last10Txns { + last10TxnsByteSize += txn.TxSizeBytes + } + + txns, err = txnRegister.PruneToSize(txnRegister.totalTxnsSizeBytes - last10TxnsByteSize) + require.Nil(err) + require.Equal(10, len(txns)) + totalSize -= last10TxnsByteSize + require.Equal(totalSize, txnRegister.totalTxnsSizeBytes) + + for ii := len(sortedTxns) - 1; ii >= len(sortedTxns)-10; ii-- { + require.Equal(true, bytes.Equal(sortedTxns[ii].Hash[:], txns[len(sortedTxns)-1-ii].Hash[:])) + } + sortedTxns = sortedTxns[:len(sortedTxns)-10] + registerTxns = txnRegister.GetFeeTimeTransactions() + require.Equal(len(sortedTxns), len(registerTxns)) + for ii := 0; ii < len(sortedTxns); ii++ { + require.Equal(true, bytes.Equal(sortedTxns[ii].Hash[:], registerTxns[ii].Hash[:])) + } + + // Remove all but 1 transaction + firstTxn := sortedTxns[0] + txns, err = txnRegister.PruneToSize(firstTxn.TxSizeBytes) + require.Nil(err) + require.Equal(len(sortedTxns)-1, len(txns)) + require.Equal(firstTxn.TxSizeBytes, txnRegister.totalTxnsSizeBytes) + totalSize = firstTxn.TxSizeBytes + for ii := len(sortedTxns) - 1; ii >= 1; ii-- { + require.Equal(true, bytes.Equal(sortedTxns[ii].Hash[:], txns[len(sortedTxns)-1-ii].Hash[:])) + } + sortedTxns = sortedTxns[:1] + registerTxns = txnRegister.GetFeeTimeTransactions() + require.Equal(len(sortedTxns), len(registerTxns)) + for ii := 0; ii < len(sortedTxns); ii++ { + require.Equal(true, bytes.Equal(sortedTxns[ii].Hash[:], registerTxns[ii].Hash[:])) + } + + // Remove the last transaction + txns, err = txnRegister.PruneToSize(txnRegister.totalTxnsSizeBytes - 1) + require.Nil(err) + require.Len(txns, 1) + require.Equal(uint64(0), txnRegister.totalTxnsSizeBytes) + require.Equal(0, len(txnRegister.GetFeeTimeTransactions())) + require.Equal(true, bytes.Equal(firstTxn.Hash[:], txns[0].Hash[:])) + + // Try pruning empty register + txns, err = txnRegister.PruneToSize(txnRegister.totalTxnsSizeBytes - 1) + require.Nil(err) + require.Len(txns, 0) + + // Re-add all transactions + totalSize = 0 + for _, tx := range txnPool { + require.Nil(txnRegister.AddTransaction(tx)) + totalSize += tx.TxSizeBytes + } + require.Equal(totalSize, txnRegister.totalTxnsSizeBytes) + + // Remove all transactions + txns, err = txnRegister.PruneToSize(0) + require.Nil(err) + require.Equal(len(txnPool), len(txns)) + require.Equal(uint64(0), txnRegister.totalTxnsSizeBytes) + require.Equal(0, len(txnRegister.GetFeeTimeTransactions())) + + // Re-add all transactions again + totalSize = 0 + for _, tx := range txnPool { + require.Nil(txnRegister.AddTransaction(tx)) + totalSize += tx.TxSizeBytes + } + + // Remove all transactions with higher min byte count + txns, err = txnRegister.PruneToSize(math.MaxUint64) + require.Nil(err) + require.Len(txns, 0) + require.Equal(len(txnPool), len(txnRegister.GetFeeTimeTransactions())) } func TestTransactionRegisterWithRemoves(t *testing.T) { @@ -52,9 +257,9 @@ func TestTransactionRegisterWithRemoves(t *testing.T) { require := require.New(t) rand := rand.New(rand.NewSource(seed)) globalParams := _testGetDefaultGlobalParams() - txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, timestampRange, testCases) + txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, 1000, timestampRange, testCases) - txnRegister := NewTransactionRegister(&DeSoTestnetParams, globalParams) + txnRegister := NewTransactionRegister(globalParams) _testBucketStandardRemoveTest(t, txnPool, globalParams, false, func(tx *MempoolTx) { require.Nil(txnRegister.AddTransaction(tx)) @@ -80,9 +285,9 @@ func TestTransactionRegisterBasic(t *testing.T) { require := require.New(t) rand := rand.New(rand.NewSource(seed)) globalParams := _testGetDefaultGlobalParams() - txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, timestampRange, testCases) + txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, 1000, timestampRange, testCases) - txnRegister := NewTransactionRegister(&DeSoTestnetParams, globalParams) + txnRegister := NewTransactionRegister(globalParams) _testBucketStandardAddTest(t, txnPool, globalParams, false, func(tx *MempoolTx) { require.Nil(txnRegister.AddTransaction(tx)) @@ -105,7 +310,7 @@ func TestFeeTimeBucketRemove(t *testing.T) { randomExponent := uint32(rand.Intn(exponentRange)) baseRate, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() feeMin, feeMax := computeFeeTimeBucketRangeFromExponent(randomExponent, baseRate, bucketMultiplier) - txnPool := _testGetRandomMempoolTxns(rand, feeMin, feeMax, timestampRange, testCases) + txnPool := _testGetRandomMempoolTxns(rand, feeMin, feeMax, 1000, timestampRange, testCases) // Create new FeeBucket and add the txn pool bucketFeeMin, bucketFeeMax := computeFeeTimeBucketRangeFromExponent(randomExponent, baseRate, bucketMultiplier) @@ -138,7 +343,7 @@ func TestFeeTimeBucketBasic(t *testing.T) { randomExponent := uint32(rand.Intn(exponentRange)) baseRate, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() feeMin, feeMax := computeFeeTimeBucketRangeFromExponent(randomExponent, baseRate, bucketMultiplier) - txnPool := _testGetRandomMempoolTxns(rand, feeMin, feeMax, timestampRange, testCases) + txnPool := _testGetRandomMempoolTxns(rand, feeMin, feeMax, 1000, timestampRange, testCases) // Create new FeeBucket and add the txn pool bucketFeeMin, bucketFeeMax := computeFeeTimeBucketRangeFromExponent(randomExponent, baseRate, bucketMultiplier) @@ -161,13 +366,14 @@ func _testGetDefaultGlobalParams() *GlobalParamsEntry { return &globalParams } -func _testGetRandomMempoolTxns(rand *rand.Rand, feeMin uint64, feeMax uint64, timestampRange uint64, numTxns int) []*MempoolTx { +func _testGetRandomMempoolTxns(rand *rand.Rand, feeMin uint64, feeMax uint64, sizeMax uint64, timestampRange uint64, numTxns int) []*MempoolTx { txnPool := []*MempoolTx{} for ii := 0; ii < numTxns; ii++ { txnPool = append(txnPool, &MempoolTx{ - FeePerKB: rand.Uint64()%(feeMax-feeMin) + feeMin, - Added: time.UnixMicro(int64(rand.Uint64() % timestampRange)), - Hash: NewBlockHash(RandomBytes(32)), + FeePerKB: rand.Uint64()%(feeMax-feeMin) + feeMin, + Added: time.UnixMicro(int64(rand.Uint64() % timestampRange)), + Hash: NewBlockHash(RandomBytes(32)), + TxSizeBytes: 1 + rand.Uint64()%sizeMax, }) } return txnPool From d347aac51b0327990d2fb79534a58a4b6550273e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 25 Jul 2023 13:00:09 -0400 Subject: [PATCH 160/762] Add block timestamps to epoch entry and epoch completed hook (#616) * Add block timestamps to epoch entry and epoch completed hook * Populate timestamp on epoch complete hook * Update comment for new TimeStampNanoSecs field * Rename new field comment * Fix word in comment --- lib/network.go | 5 +++++ lib/pos_epoch.go | 26 ++++++++++++++++++++++---- lib/pos_epoch_complete_hook.go | 11 ++++++----- lib/pos_epoch_complete_hook_test.go | 5 ++++- lib/pos_epoch_test.go | 12 +++++++++--- 5 files changed, 46 insertions(+), 13 deletions(-) diff --git a/lib/network.go b/lib/network.go index 6954ecbee..ae50520e8 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1890,6 +1890,11 @@ type MsgDeSoHeader struct { // mined. TstampSecs uint64 + // TODO: Add a new TstampNanoSecs field that will have nanosecond resolution. + // For backwards compatibility with the existing backends and frontends, we will keep + // the existing TstampSecs and populate it as TstampNanoSecs / 1e9 when decoding block + // headers from bytes. + // The height of the block this header corresponds to. Height uint64 diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index 6582260fe..a6ca1fb1d 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -16,12 +16,18 @@ import ( type EpochEntry struct { EpochNumber uint64 FinalBlockHeight uint64 + + // This captures the on-chain timestamp when this epoch entry was created. This does not + // represent the first block of the epoch, but rather when this epoch transition was triggered, + // at the end of the previous epoch. + CreatedAtBlockTimestampNanoSecs uint64 } func (epochEntry *EpochEntry) Copy() *EpochEntry { return &EpochEntry{ - EpochNumber: epochEntry.EpochNumber, - FinalBlockHeight: epochEntry.FinalBlockHeight, + EpochNumber: epochEntry.EpochNumber, + FinalBlockHeight: epochEntry.FinalBlockHeight, + CreatedAtBlockTimestampNanoSecs: epochEntry.CreatedAtBlockTimestampNanoSecs, } } @@ -29,6 +35,7 @@ func (epochEntry *EpochEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipM var data []byte data = append(data, UintToBuf(epochEntry.EpochNumber)...) data = append(data, UintToBuf(epochEntry.FinalBlockHeight)...) + data = append(data, UintToBuf(epochEntry.CreatedAtBlockTimestampNanoSecs)...) return data } @@ -47,6 +54,12 @@ func (epochEntry *EpochEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *b return errors.Wrapf(err, "EpochEntry.Decode: Problem reading FinalBlockHeight: ") } + // CreatedAtBlockTimestampNanoSecs + epochEntry.CreatedAtBlockTimestampNanoSecs, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "EpochEntry.Decode: Problem reading CreatedAtBlockTimestampNanoSecs: ") + } + return err } @@ -87,8 +100,13 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { // case prior to the first execution of the OnEpochCompleteHook. // // TODO: Should FinalBlockHeight be ProofOfStake1StateSetupBlockHeight for epoch 0? - // The fork height is exactly when epoch 0 ends. Epoch 1 begins at the following height. - return &EpochEntry{EpochNumber: 0, FinalBlockHeight: math.MaxUint64}, nil + // The fork height is exactly when epoch 0 ends. Epoch 1 begins at the next height. + genesisEpochEntry := &EpochEntry{ + EpochNumber: 0, + FinalBlockHeight: math.MaxUint64, + CreatedAtBlockTimestampNanoSecs: 0, + } + return genesisEpochEntry, nil } func (bav *UtxoView) GetCurrentEpochNumber() (uint64, error) { diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index fc3117def..8e48c31e0 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -49,7 +49,7 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // Step 3: Roll over to the next epoch. // - Compute the final block height for the next epoch. // - Update CurrentEpochEntry to the next epoch's. -func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { +func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, blockTimestampNanoSecs uint64) error { // Sanity-check that the current block is the last block in the current epoch. // // Note that this will also return true if we're currently at the ProofOfStake1StateSetupBlockHeight @@ -82,7 +82,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64) error { } // Step 3: Roll Over to The Next Epoch - if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight); err != nil { + if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, blockTimestampNanoSecs); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: ") } @@ -136,7 +136,7 @@ func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) erro } // Updates the currentEpochEntry to the next epoch's. -func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64) error { +func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, blockTimestampNanoSecs uint64) error { // Retrieve the SnapshotGlobalParamsEntry. snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { @@ -151,8 +151,9 @@ func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeig // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ - EpochNumber: epochNumber + 1, - FinalBlockHeight: nextEpochFinalBlockHeight, + EpochNumber: epochNumber + 1, + FinalBlockHeight: nextEpochFinalBlockHeight, + CreatedAtBlockTimestampNanoSecs: blockTimestampNanoSecs, } bav._setCurrentEpochEntry(nextEpochEntry) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index b6341e9a9..0a9c0d085 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -74,6 +74,8 @@ func TestRunEpochCompleteHook(t *testing.T) { // We build the testMeta obj after mining blocks so that we save the correct block height. blockHeight := uint64(chain.blockTip().Height) + 1 + blockTimestampNanoSecs := (chain.blockTip().Header.TstampSecs * 1e9) + 1e9 // 1 second after the last block. + testMeta := &TestMeta{ t: t, chain: chain, @@ -142,7 +144,8 @@ func TestRunEpochCompleteHook(t *testing.T) { _runOnEpochCompleteHook := func() { tmpUtxoView := utxoView() blockHeight += 1 - require.NoError(t, tmpUtxoView.RunEpochCompleteHook(blockHeight)) + blockTimestampNanoSecs += 1e9 + require.NoError(t, tmpUtxoView.RunEpochCompleteHook(blockHeight, blockTimestampNanoSecs)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) } diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index 9d3f58ecb..c73a5fca3 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -1,9 +1,10 @@ package lib import ( - "github.com/stretchr/testify/require" "math" "testing" + + "github.com/stretchr/testify/require" ) func TestCurrentEpoch(t *testing.T) { @@ -13,6 +14,7 @@ func TestCurrentEpoch(t *testing.T) { // Initialize blockchain. chain, params, db := NewLowDifficultyBlockchain(t) blockHeight := uint64(chain.blockTip().Height) + 1 + blockTimestampNanoSecs := uint64(chain.blockTip().Header.TstampSecs)*1e9 + 1e9 utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) @@ -33,8 +35,9 @@ func TestCurrentEpoch(t *testing.T) { // Set the CurrentEpoch. epochEntry = &EpochEntry{ - EpochNumber: 1, - FinalBlockHeight: blockHeight + 5, + EpochNumber: 1, + FinalBlockHeight: blockHeight + 5, + CreatedAtBlockTimestampNanoSecs: blockTimestampNanoSecs + 5*1e9, } utxoView._setCurrentEpochEntry(epochEntry) require.NoError(t, utxoView.FlushToDb(blockHeight)) @@ -45,6 +48,7 @@ func TestCurrentEpoch(t *testing.T) { require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) + require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) // Test that the CurrentEpoch is flushed from the UtxoView. require.Nil(t, utxoView.CurrentEpochEntry) @@ -55,12 +59,14 @@ func TestCurrentEpoch(t *testing.T) { require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) + require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) // Test that the CurrentEpoch is set in the UtxoView. epochEntry = utxoView.CurrentEpochEntry require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) + require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) // Delete CurrentEpoch from the UtxoView. utxoView.CurrentEpochEntry = nil From 28477c05e6d67f550ac1a4ff0b9788d6e9b8da32 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 25 Jul 2023 13:23:59 -0400 Subject: [PATCH 161/762] Add Global Param for Staking Rewards APY (#614) * Add Global Param for Staking Rewards Interest Rate * Clean up comments * Migrate to APY --- lib/block_view.go | 6 ++++++ lib/block_view_types.go | 13 +++++++++++++ lib/constants.go | 11 +++++++++++ lib/pos_snapshot_entries.go | 3 +++ 4 files changed, 33 insertions(+) diff --git a/lib/block_view.go b/lib/block_view.go index 33857c167..6ff7beecd 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3049,6 +3049,12 @@ func (bav *UtxoView) _connectUpdateGlobalParams( return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode StakingRewardsMaxNumStakes as uint64") } } + if len(extraData[StakingRewardsAPYBasisPointsKey]) > 0 { + newGlobalParamsEntry.StakingRewardsAPYBasisPoints, bytesRead = Uvarint(extraData[StakingRewardsAPYBasisPointsKey]) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode StakingRewardsAPYBasisPoints as uint64") + } + } if len(extraData[EpochDurationNumBlocksKey]) > 0 { newGlobalParamsEntry.EpochDurationNumBlocks, bytesRead = Uvarint(extraData[EpochDurationNumBlocksKey]) if bytesRead <= 0 { diff --git a/lib/block_view_types.go b/lib/block_view_types.go index b3e45339e..91c9aa3bb 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3782,6 +3782,13 @@ type GlobalParamsEntry struct { // consensus. StakingRewardsMaxNumStakes uint64 + // StakingRewardsAPYBasisPoints determines the annual interest rate that stakers + // receive on their stake in the Proof-of-Stake consensus. Stake rewards are paid + // out at the end of every epoch based on the APY. The APY is configured as basis + // points. Example: + // - An APY of 5% corresponds to a value of 0.05 * 10000 = 500 basis points + StakingRewardsAPYBasisPoints uint64 + // EpochDurationNumBlocks is the number of blocks included in one epoch. EpochDurationNumBlocks uint64 @@ -3804,6 +3811,7 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, ValidatorSetMaxNumValidators: gp.ValidatorSetMaxNumValidators, StakingRewardsMaxNumStakes: gp.StakingRewardsMaxNumStakes, + StakingRewardsAPYBasisPoints: gp.StakingRewardsAPYBasisPoints, EpochDurationNumBlocks: gp.EpochDurationNumBlocks, JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, } @@ -3826,6 +3834,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.LeaderScheduleMaxNumValidators)...) data = append(data, UintToBuf(gp.ValidatorSetMaxNumValidators)...) data = append(data, UintToBuf(gp.StakingRewardsMaxNumStakes)...) + data = append(data, UintToBuf(gp.StakingRewardsAPYBasisPoints)...) data = append(data, UintToBuf(gp.EpochDurationNumBlocks)...) data = append(data, UintToBuf(gp.JailInactiveValidatorGracePeriodEpochs)...) } @@ -3882,6 +3891,10 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading StakingRewardsMaxNumStakes: ") } + gp.StakingRewardsAPYBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading StakingRewardsAPYBasisPoints: ") + } gp.EpochDurationNumBlocks, err = ReadUvarint(rr) if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading EpochDurationNumBlocks: ") diff --git a/lib/constants.go b/lib/constants.go index 971df1b31..747d9b8a1 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -674,6 +674,10 @@ type DeSoParams struct { // that are included in the staking reward distribution in each epoch. DefaultStakingRewardsMaxNumStakes uint64 + // DefaultStakingRewardsAPYBasisPoints is the default scaled interest rate + // that is applied to all stake entries in the staking reward distribution in each epoch. + DefaultStakingRewardsAPYBasisPoints uint64 + // DefaultEpochDurationNumBlocks is the default number of blocks included in one epoch. DefaultEpochDurationNumBlocks uint64 @@ -1077,6 +1081,9 @@ var DeSoMainnetParams = DeSoParams{ // The max number of stakes included in a staking rewards distribution every epoch. DefaultStakingRewardsMaxNumStakes: uint64(10000), + // Staking reward APY is defaulted to 0% to be safe. + DefaultStakingRewardsAPYBasisPoints: uint64(0), + // The number of blocks in one epoch DefaultEpochDurationNumBlocks: uint64(3600), @@ -1332,6 +1339,9 @@ var DeSoTestnetParams = DeSoParams{ // The max number of stakes included in a staking rewards distribution every epoch. DefaultStakingRewardsMaxNumStakes: uint64(10000), + // Staking reward APY is defaulted to 0% to be safe. + DefaultStakingRewardsAPYBasisPoints: uint64(0), + // The number of blocks in one epoch DefaultEpochDurationNumBlocks: uint64(3600), @@ -1387,6 +1397,7 @@ const ( LeaderScheduleMaxNumValidatorsKey = "LeaderScheduleMaxNumValidators" ValidatorSetMaxNumValidatorsKey = "ValidatorSetMaxNumValidators" StakingRewardsMaxNumStakesKey = "StakingRewardsMaxNumStakes" + StakingRewardsAPYBasisPointsKey = "StakingRewardsAPYBasisPoints" EpochDurationNumBlocksKey = "EpochDurationNumBlocks" JailInactiveValidatorGracePeriodEpochsKey = "JailInactiveValidatorGracePeriodEpochs" diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 5df397b20..7b07b9120 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -104,6 +104,9 @@ func _mergeGlobalParamEntryDefaults(bav *UtxoView, globalParamsEntry *GlobalPara if globalParamsEntryCopy.StakingRewardsMaxNumStakes == 0 { globalParamsEntryCopy.StakingRewardsMaxNumStakes = bav.Params.DefaultStakingRewardsMaxNumStakes } + if globalParamsEntryCopy.StakingRewardsAPYBasisPoints == 0 { + globalParamsEntryCopy.StakingRewardsAPYBasisPoints = bav.Params.DefaultStakingRewardsAPYBasisPoints + } if globalParamsEntryCopy.EpochDurationNumBlocks == 0 { globalParamsEntryCopy.EpochDurationNumBlocks = bav.Params.DefaultEpochDurationNumBlocks } From 1d0ea29d32f257d221ac51c17a200f81389a4917 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Tue, 25 Jul 2023 10:35:37 -0700 Subject: [PATCH 162/762] PoS Mempool Balance Ledger (#599) * Revert "Split transaction register changes" This reverts commit 4798856f05e8a23098269f67d6d26077fed8b8d1. * Split Balance Ledger * Renames; Function signature changes * Comments; Add test * Balance ledger review * Nits * Nit --- lib/pos_mempool_ledger.go | 110 +++++++++++++++++++++++++++++++++ lib/pos_mempool_ledger_test.go | 52 ++++++++++++++++ 2 files changed, 162 insertions(+) create mode 100644 lib/pos_mempool_ledger.go create mode 100644 lib/pos_mempool_ledger_test.go diff --git a/lib/pos_mempool_ledger.go b/lib/pos_mempool_ledger.go new file mode 100644 index 000000000..45d9ea1f9 --- /dev/null +++ b/lib/pos_mempool_ledger.go @@ -0,0 +1,110 @@ +package lib + +import ( + "github.com/pkg/errors" + "math" + "sync" +) + +// BalanceLedger is a simple in-memory ledger of balances for user public keys. The balances in the ledger can be +// increased or decreased, as long as user's new balance doesn't exceed the user's total max balance. +type BalanceLedger struct { + sync.RWMutex + + // Map of public keys to balances. + balances map[PublicKey]uint64 +} + +func NewBalanceLedger() *BalanceLedger { + return &BalanceLedger{ + balances: make(map[PublicKey]uint64), + } +} + +// CanIncreaseBalance checks if the user's balance can be increased by the given amount. If the user's balance + amount +// is less or equal than the provided maxBalance, the increase is allowed. Otherwise, an error is returned. +func (bl *BalanceLedger) CanIncreaseBalance(publicKey PublicKey, amount uint64, maxBalance uint64) error { + bl.RLock() + defer bl.RUnlock() + + balance, exists := bl.balances[publicKey] + + // Check for balance overflow. + if exists && amount > math.MaxUint64-balance { + return errors.Errorf("CanIncreaseBalance: balance overflow") + } + + newBalance := balance + amount + if newBalance > maxBalance { + return errors.Errorf("CanIncreaseBalance: Not enough balance to cover txn fees "+ + "(newBalance: %d, maxBalance: %d)", newBalance, maxBalance) + } + return nil +} + +// CanDecreaseBalance checks if the user's balance can be decreased by the given amount. If the user's balance is +// greater or equal to the amount, the decrease is allowed. Otherwise, an error is returned. +func (bl *BalanceLedger) CanDecreaseBalance(publicKey PublicKey, amountNanos uint64) error { + bl.RLock() + defer bl.RUnlock() + + balance, exists := bl.balances[publicKey] + if !exists { + return errors.Errorf("CanDecreaseBalance: No balance for public key") + } + if amountNanos > balance { + return errors.Errorf("CanDecreaseBalance: Amount exceeds current balance") + } + return nil +} + +// IncreaseBalance increases the user's balance by the given amount. CanIncreaseBalance should be called before +// calling this function to ensure the increase is allowed. +func (bl *BalanceLedger) IncreaseBalance(publicKey PublicKey, amount uint64) { + bl.Lock() + defer bl.Unlock() + + balance, _ := bl.balances[publicKey] + // Check for balance overflow. + if amount > math.MaxUint64-balance { + bl.balances[publicKey] = math.MaxUint64 + return + } + + bl.balances[publicKey] = balance + amount +} + +// DecreaseBalance decreases the user's balance by the given amount. CanDecreaseBalance should be called before +// calling this function to ensure the decrease is allowed. +func (bl *BalanceLedger) DecreaseBalance(publicKey PublicKey, amount uint64) { + bl.Lock() + defer bl.Unlock() + + balance, exists := bl.balances[publicKey] + if !exists { + return + } + // Check for balance underflow. + if amount > balance { + delete(bl.balances, publicKey) + return + } + + bl.balances[publicKey] = balance - amount +} + +// GetBalance returns the user's balance. +func (bl *BalanceLedger) GetBalance(publicKey PublicKey) uint64 { + bl.RLock() + defer bl.RUnlock() + + balance, _ := bl.balances[publicKey] + return balance +} + +func (bl *BalanceLedger) Reset() { + bl.Lock() + defer bl.Unlock() + + bl.balances = make(map[PublicKey]uint64) +} diff --git a/lib/pos_mempool_ledger_test.go b/lib/pos_mempool_ledger_test.go new file mode 100644 index 000000000..f11ab8f11 --- /dev/null +++ b/lib/pos_mempool_ledger_test.go @@ -0,0 +1,52 @@ +package lib + +import ( + "github.com/stretchr/testify/require" + "math" + "testing" +) + +func TestBalanceLedger(t *testing.T) { + require := require.New(t) + + pk0 := *NewPublicKey(m0PkBytes) + pk1 := *NewPublicKey(m1PkBytes) + pk2 := *NewPublicKey(m2PkBytes) + + // Sanity-check some balance increase and decreases for pk0 + balanceLedger := NewBalanceLedger() + require.NoError(balanceLedger.CanIncreaseBalance(pk0, 100, 100)) + require.NoError(balanceLedger.CanIncreaseBalance(pk0, 0, 100)) + balanceLedger.IncreaseBalance(pk0, 100) + require.Equal(uint64(100), balanceLedger.GetBalance(pk0)) + require.NoError(balanceLedger.CanIncreaseBalance(pk0, 0, 100)) + require.Error(balanceLedger.CanIncreaseBalance(pk0, 1, 100)) + require.Error(balanceLedger.CanIncreaseBalance(pk0, 0, 99)) + require.Error(balanceLedger.CanIncreaseBalance(pk0, math.MaxUint64, math.MaxUint64)) + require.NoError(balanceLedger.CanDecreaseBalance(pk0, 100)) + require.NoError(balanceLedger.CanDecreaseBalance(pk0, 0)) + require.Error(balanceLedger.CanDecreaseBalance(pk0, 101)) + require.Error(balanceLedger.CanDecreaseBalance(pk0, math.MaxUint64)) + balanceLedger.DecreaseBalance(pk0, 100) + require.Equal(uint64(0), balanceLedger.GetBalance(pk0)) + balanceLedger.IncreaseBalance(pk0, 10) + require.Equal(uint64(10), balanceLedger.GetBalance(pk0)) + balanceLedger.DecreaseBalance(pk0, 100) + require.Equal(uint64(0), balanceLedger.GetBalance(pk0)) + balanceLedger.IncreaseBalance(pk0, 100) + + // Increase balance for pk1 and pk2 a couple of times + balanceLedger.IncreaseBalance(pk1, 100) + balanceLedger.IncreaseBalance(pk2, 100) + balanceLedger.DecreaseBalance(pk1, 40) + balanceLedger.IncreaseBalance(pk2, 40) + require.Equal(uint64(100), balanceLedger.GetBalance(pk0)) + require.Equal(uint64(60), balanceLedger.GetBalance(pk1)) + require.Equal(uint64(140), balanceLedger.GetBalance(pk2)) + + // Test clearing balance ledger + balanceLedger.Reset() + require.Equal(uint64(0), balanceLedger.GetBalance(pk0)) + require.Equal(uint64(0), balanceLedger.GetBalance(pk1)) + require.Equal(uint64(0), balanceLedger.GetBalance(pk2)) +} From c1a0b6eb0abbb1f3e21569476e915aab7479d49f Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Tue, 25 Jul 2023 12:54:04 -0700 Subject: [PATCH 163/762] PoS Mempool Transaction Validation (#600) * Revert "Split Balance Ledger" This reverts commit d45efdda924b670057d1e30f4db151f7a4bda89f. * Split transaction validation * Add tests * Nits * Fix SafeUint64 addition --- lib/network.go | 24 +++++ lib/validation_deso_txn.go | 166 ++++++++++++++++++++++++++++++++ lib/validation_deso_txn_test.go | 135 ++++++++++++++++++++++++++ 3 files changed, 325 insertions(+) create mode 100644 lib/validation_deso_txn.go create mode 100644 lib/validation_deso_txn_test.go diff --git a/lib/network.go b/lib/network.go index 6954ecbee..1c564ad26 100644 --- a/lib/network.go +++ b/lib/network.go @@ -3779,6 +3779,30 @@ func (msg *MsgDeSoTxn) UnmarshalJSON(data []byte) error { return nil } +// ComputeFeeRatePerKBNanos computes the fee rate per KB for a signed transaction. This function should not be used for +// unsigned transactions because the fee rate will not be accurate. +func (txn *MsgDeSoTxn) ComputeFeeRatePerKBNanos() (uint64, error) { + if txn.Signature.Sign == nil { + return 0, fmt.Errorf("ComputeFeeRatePerKBNanos: Cannot compute fee rate for unsigned txn") + } + + txBytes, err := txn.ToBytes(false) + if err != nil { + return 0, errors.Wrapf(err, "ComputeFeeRatePerKBNanos: Problem converting txn to bytes") + } + serializedLen := uint64(len(txBytes)) + if serializedLen == 0 { + return 0, fmt.Errorf("ComputeFeeRatePerKBNanos: Txn has zero length") + } + + fees := txn.TxnFeeNanos + if fees != ((fees * 1000) / 1000) { + return 0, errors.Wrapf(RuleErrorOverflowDetectedInFeeRateCalculation, "ComputeFeeRatePerKBNanos: Overflow detected in fee rate calculation") + } + + return (fees * 1000) / serializedLen, nil +} + // ================================================================== // BasicTransferMetadata // ================================================================== diff --git a/lib/validation_deso_txn.go b/lib/validation_deso_txn.go new file mode 100644 index 000000000..e7f2ad83d --- /dev/null +++ b/lib/validation_deso_txn.go @@ -0,0 +1,166 @@ +package lib + +import ( + "bytes" + "fmt" + "github.com/pkg/errors" +) + +// ValidateDeSoTxnSanityBalanceModel performs a variety of sanity checks to ensure transaction is correctly formatted +// under the balance model. The test checks pretty much everything, except validating the transaction's signature or +// that the transaction is valid given a BlockView. +func ValidateDeSoTxnSanityBalanceModel(txn *MsgDeSoTxn, blockHeight uint64, + params *DeSoParams, globalParams *GlobalParamsEntry) error { + + // Validate encoding + if err := ValidateDeSoTxnEncoding(txn, params); err != nil { + return errors.Wrapf(err, "ValidateDeSoTxnSanityBalanceModel: ") + } + // Validate transaction metadata + if err := ValidateDeSoTxnMetadata(txn); err != nil { + return errors.Wrapf(err, "ValidateDeSoTxnSanityBalanceModel: ") + } + // Validate transaction Hash + if err := ValidateDeSoTxnHash(txn); err != nil { + return errors.Wrapf(err, "ValidateTransactionSanityBalanceModel: ") + } + // Validate public key + if err := ValidateDeSoTxnPublicKey(txn); err != nil { + return errors.Wrapf(err, "ValidateDeSoTxnSanityBalanceModel: ") + } + // Validate transaction is above network's minimal fee + if err := ValidateDeSoTxnMinimalNetworkFee(txn, globalParams); err != nil { + return errors.Wrapf(err, "ValidateDeSoTxnSanityBalanceModel: ") + } + // Validate transaction is properly formatted according to the balance model + if err := ValidateDeSoTxnFormatBalanceModel(txn, blockHeight, globalParams); err != nil { + return errors.Wrapf(err, "ValidateDeSoTxnSanityBalanceModel: ") + } + return nil +} + +// ValidateDeSoTxnEncoding validates that the transaction encoding works as expected. +func ValidateDeSoTxnEncoding(txn *MsgDeSoTxn, params *DeSoParams) error { + // Validate transaction to/from bytes encoding + txnBytes, err := txn.ToBytes(false) + if err != nil { + return fmt.Errorf("ValidateDeSoTxnEncoding: Problem encoding transaction: %v", err) + } + dummyTxn := &MsgDeSoTxn{} + err = dummyTxn.FromBytes(txnBytes) + if err != nil { + return fmt.Errorf("ValidateDeSoTxnEncoding: Problem decoding transaction: %v", err) + } + reTxnBytes, err := dummyTxn.ToBytes(false) + if err != nil { + return fmt.Errorf("ValidateDeSoTxnEncoding: Problem re-encoding transaction: %v", err) + } + if !bytes.Equal(txnBytes, reTxnBytes) { + return fmt.Errorf("ValidateDeSoTxnEncoding: Transaction bytes are not equal: %v, %v", txnBytes, reTxnBytes) + } + + // TODO: Do we want a separate parameter for transaction size? Should it be a part of GlobalDeSoParams? + // Validate transaction size + if uint64(len(txnBytes)) > params.MaxBlockSizeBytes/2 { + return errors.Wrapf(RuleErrorTxnTooBig, "ValidateDeSoTxnEncoding: Transaction size %d is greater than "+ + "MaxBlockSizeBytes/2 %d", len(txnBytes), params.MaxBlockSizeBytes/2) + } + return nil +} + +// ValidateDeSoTxnMetadata validates that the transaction metadata is correctly formatted. +func ValidateDeSoTxnMetadata(txn *MsgDeSoTxn) error { + // Validate that the transaction has correct metadata + if txn.TxnMeta == nil { + return fmt.Errorf("ValidateDeSoTxnMetadata: Transaction is missing TxnMeta") + } + if _, err := NewTxnMetadata(txn.TxnMeta.GetTxnType()); err != nil { + return errors.Wrapf(err, "ValidateDeSoTxnMetadata: Problem parsing TxnType") + } + return nil +} + +// ValidateDeSoTxnHash validates that the transaction hash is correctly computed. +func ValidateDeSoTxnHash(txn *MsgDeSoTxn) error { + // Validate transaction hash + if txn.Hash() == nil { + return fmt.Errorf("ValidateDeSoTxnHash: Problem computing tx hash") + } + return nil +} + +// ValidateDeSoTxnPublicKey validates that the transaction public key is correctly formatted. +func ValidateDeSoTxnPublicKey(txn *MsgDeSoTxn) error { + // Validate public key + if err := IsByteArrayValidPublicKey(txn.PublicKey); err != nil { + return errors.Wrapf(err, "ValidateDeSoTxnPublicKey: Problem with public key") + } + return nil +} + +// ValidateDeSoTxnFormatBalanceModel validates that the transaction is correctly formatted according to the balance model. +func ValidateDeSoTxnFormatBalanceModel(txn *MsgDeSoTxn, blockHeight uint64, globalParams *GlobalParamsEntry) error { + var err error + + // Validate transaction version + if txn.TxnVersion == DeSoTxnVersion0 { + return fmt.Errorf("ValidateDeSoTxnFormatBalanceModel: DeSoTxnVersion0 is outdated in balance model") + } + + if txn.TxnNonce == nil { + return errors.Wrapf(TxErrorNoNonceAfterBalanceModelBlockHeight, "ValidateDeSoTxnFormatBalanceModel: Transaction "+ + "does not have a nonce.") + } + if txn.TxnNonce.ExpirationBlockHeight < blockHeight { + return errors.Wrapf(TxErrorNonceExpired, "ValidateDeSoTxnFormatBalanceModel: Transaction nonce has expired") + } + if globalParams.MaxNonceExpirationBlockHeightOffset != 0 && + txn.TxnNonce.ExpirationBlockHeight > blockHeight+globalParams.MaxNonceExpirationBlockHeightOffset { + return errors.Wrapf(TxErrorNonceExpirationBlockHeightOffsetExceeded, "ValidateDeSoTxnFormatBalanceModel: Transaction "+ + "nonce expiration block height offset exceeded") + } + + // Verify inputs/outputs. + if len(txn.TxInputs) != 0 { + return errors.Wrapf(RuleErrorBalanceModelDoesNotUseUTXOInputs, "ValidateDeSoTxnFormatBalanceModel: Balance model "+ + "transactions should not have any inputs") + } + + // Loop through the outputs and do a few sanity checks. + var totalOutNanos uint64 + for _, txout := range txn.TxOutputs { + // Check that each output's amount is not bigger than the max as a + // sanity check. + if txout.AmountNanos > MaxNanos { + return errors.Wrapf(RuleErrorOutputExceedsMax, "ValidateDeSoTxnFormatBalanceModel: Output amount %d "+ + "exceeds max %d", txout.AmountNanos, MaxNanos) + } + // Check that this output doesn't overflow the total as a sanity + // check. This is frankly impossible since our maximum limit is + // not close to the max size of a uint64 but check it nevertheless. + if totalOutNanos, err = SafeUint64().Add(totalOutNanos, txout.AmountNanos); err != nil { + return errors.Wrapf(RuleErrorOutputOverflowsTotal, "ValidateDeSoTxnFormatBalanceModel: Output amount %d "+ + "overflows total %d", txout.AmountNanos, totalOutNanos) + } + // Check that the total isn't bigger than the max supply. + if totalOutNanos > MaxNanos { + return errors.Wrapf(RuleErrorTotalOutputExceedsMax, "ValidateDeSoTxnFormatBalanceModel: Total output "+ + "amount %d exceeds max %d", totalOutNanos, MaxNanos) + } + } + return nil +} + +// ValidateDeSoTxnMinimalNetworkFee validates that the transaction is above the network's minimal fee. +func ValidateDeSoTxnMinimalNetworkFee(txn *MsgDeSoTxn, globalParams *GlobalParamsEntry) error { + // Verify the transaction fee + feeNanosPerKb, err := txn.ComputeFeeRatePerKBNanos() + if err != nil { + return errors.Wrapf(err, "ValidateDeSoTxnMinimalNetworkFee: Problem computing fee per KB") + } + if feeNanosPerKb < globalParams.MinimumNetworkFeeNanosPerKB { + return errors.Wrapf(RuleErrorTxnFeeBelowNetworkMinimum, "ValidateDeSoTxnMinimalNetworkFee: Transaction fee "+ + "per KB %d is less than the network minimum %d", feeNanosPerKb, globalParams.MinimumNetworkFeeNanosPerKB) + } + return nil +} diff --git a/lib/validation_deso_txn_test.go b/lib/validation_deso_txn_test.go new file mode 100644 index 000000000..3b4bce6cd --- /dev/null +++ b/lib/validation_deso_txn_test.go @@ -0,0 +1,135 @@ +package lib + +import ( + "encoding/hex" + "github.com/stretchr/testify/require" + "math" + "testing" +) + +var testTransaction1HexString = "00000583010000727b22426f6479223a2249206d6973732074686f736520646179733b206d6179626520746865792077696c6c20636f6d6520616761696e2e5c6e405768616c65536861726b4554482040496e657669746162696c69747920406469616d6f6e6468616e647320404469616d6f6e64564320227de807d461dcfd9d89be90e2b917002103eed65faac1cb3e8790ab268ebde15da0f207e95ddd88bdfc2b25be0b30e2173a040d456d626564566964656f55524c000f497351756f7465645265636c6f75740101044e6f64650134115265636c6f75746564506f7374486173682080cb73b4308808cb7bf9e4657cc906742ee8fb31312fcf867214ad8c0817138e473145022100ceb6c9d2f16cc06cdd48130ecc8de1a18efc2759df4cc3ddccbc8aeb836309db0220458055db0c8c42ed3468e9d6204352907fe64b149c601b2267c8c4dbfacfc8360194238fae0f9dbd9c9e86f7f0bc34" +var testTransaction2HexString = "000102a7d230b752ae31861d6e405fba00260c6c9b6222642705549e5e96ad7e08cd70d00f0548203c449682b1ab2f16cc9c91c95b2f022449983588c8903fd4a7a07407d01491aa00177b22426f6479223a226578706563746174696f6e73227de807d461e1e7e8cda9fedfb917012103ee83d9a6e3f18c443fdc2e3c1d900fd97839fc118a929ed219282da9ce460b1f020f497351756f7465645265636c6f75740101115265636c6f75746564506f73744861736820f030c92536a92545a2ae5d1f2028a881d5989d7ef338053384621c7ad5255991463044022061974a8b6ce1eb337a9e3eeb149073384d4f8762e8eb22d1d2719eed7fd91c9a02207166fdc39b91d99d934edfb865634925dd744fdd7ddd8431d4517a330f6d2df101b902dead0fc6e28aa8e8d7b3e4d701" +var testTransaction3HexString = "000005c0022028b53cc989e26e7762c71042aec7de4b5f8c52f10a5dd6c72e0894ae6dac3f60008e027b22426f6479223a224973206974206a757374206d65206f7220616e796f6e6520656c73652067657474696e672074686973206c61796f757420746f6f3f5c6e4d79206c65667468616e64207461627320617265206d6f76696e672061726f756e6420616e6420626c6f636b2065616368206f746865722e5c6e49747320737570657220616e6e6f79696e6720406469616d6f6e6420407a6f72646f6e20222c22496d61676555524c73223a5b2268747470733a2f2f696d616765732e6465736f2e6f72672f363430396133613065663264666237393831373162643061373530376634643031373933643036336230336165346161616562616136303763393166353230362e77656270225d7de807d461abb7e6a99bffc3b9170021034584aab2cd71d14ec4709015244ed62d58bca0bdb511631838566567b1bef733020d456d626564566964656f55524c00044e6f64650134473045022100a4380b72c94f016781e9fbe87071c83038f8f007da874ec7bc5f9bd075e0047002202e52facbd641f182c7b3a2627e213260f2f8803a2a9f427b6266f6e218add31f01e003b6aa0fa487d6a3f3ab86a4ff01" + +func decodeTestTxns(t *testing.T) []*MsgDeSoTxn { + require := require.New(t) + + tx1Bytes, err := hex.DecodeString(testTransaction1HexString) + require.NoError(err) + tx2Bytes, err := hex.DecodeString(testTransaction2HexString) + require.NoError(err) + tx3Bytes, err := hex.DecodeString(testTransaction3HexString) + require.NoError(err) + + tx1 := &MsgDeSoTxn{} + require.NoError(tx1.FromBytes(tx1Bytes)) + tx2 := &MsgDeSoTxn{} + require.NoError(tx2.FromBytes(tx2Bytes)) + tx3 := &MsgDeSoTxn{} + require.NoError(tx3.FromBytes(tx3Bytes)) + + return []*MsgDeSoTxn{tx1, tx2, tx3} +} + +func TestValidateDeSoTxnEncoding(t *testing.T) { + require := require.New(t) + + params := DeSoTestnetParams + txns := decodeTestTxns(t) + + for _, txn := range txns { + require.NoError(ValidateDeSoTxnEncoding(txn, ¶ms)) + } + + params.MaxBlockSizeBytes = 0 + for _, txn := range txns { + require.Contains(ValidateDeSoTxnEncoding(txn, ¶ms).Error(), RuleErrorTxnTooBig) + } +} + +func TestValidateDeSoTxnMetadata(t *testing.T) { + require := require.New(t) + + txns := decodeTestTxns(t) + + for _, txn := range txns { + require.NoError(ValidateDeSoTxnMetadata(txn)) + } +} + +func TestValidateDeSoTxnHash(t *testing.T) { + require := require.New(t) + + txns := decodeTestTxns(t) + + for _, txn := range txns { + require.NoError(ValidateDeSoTxnHash(txn)) + } +} + +func TestValidateDeSoTxnPublicKey(t *testing.T) { + require := require.New(t) + + txns := decodeTestTxns(t) + + for _, txn := range txns { + require.NoError(ValidateDeSoTxnPublicKey(txn)) + } +} + +func TestValidateDeSoTxnFormatBalanceModel(t *testing.T) { + require := require.New(t) + + // txns have expiration block heights equal to: + // 251663, 251614, 251190 + txns := decodeTestTxns(t) + blockHeight := uint64(251000) + globalParams := InitialGlobalParamsEntry + globalParams.MaxNonceExpirationBlockHeightOffset = 1000 + + for _, txn := range txns { + require.NoError(ValidateDeSoTxnFormatBalanceModel(txn, blockHeight, &globalParams)) + } + + globalParams.MaxNonceExpirationBlockHeightOffset = 1 + for _, txn := range txns { + require.Contains(ValidateDeSoTxnFormatBalanceModel(txn, blockHeight, &globalParams).Error(), TxErrorNonceExpirationBlockHeightOffsetExceeded) + } +} + +func TestValidateDeSoTxnMinimalNetworkFee(t *testing.T) { + require := require.New(t) + + txns := decodeTestTxns(t) + globalParams := InitialGlobalParamsEntry + + for _, txn := range txns { + require.NoError(ValidateDeSoTxnMinimalNetworkFee(txn, &globalParams)) + } + + globalParams.MinimumNetworkFeeNanosPerKB = math.MaxUint64 + for _, txn := range txns { + require.Contains(ValidateDeSoTxnMinimalNetworkFee(txn, &globalParams).Error(), RuleErrorTxnFeeBelowNetworkMinimum) + } + + txn1 := txns[0] + txn1.TxnFeeNanos = math.MaxUint64 / 999 + require.Contains(ValidateDeSoTxnMinimalNetworkFee(txn1, &globalParams).Error(), RuleErrorOverflowDetectedInFeeRateCalculation) + + txn2 := txns[1] + txn2.Signature.Sign = nil + require.Error(ValidateDeSoTxnMinimalNetworkFee(txn2, &globalParams)) +} + +func TestValidateDeSoTxnSanityBalanceModel(t *testing.T) { + require := require.New(t) + + txns := decodeTestTxns(t) + blockHeight := uint64(251000) + params := DeSoTestnetParams + globalParams := InitialGlobalParamsEntry + + for _, txn := range txns { + require.NoError(ValidateDeSoTxnSanityBalanceModel(txn, blockHeight, ¶ms, &globalParams)) + } +} From 4ad1eb1fa11fa2135a0c4d825fc5e2df3101ff9a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 26 Jul 2023 11:02:53 -0400 Subject: [PATCH 164/762] Pull out common setup logic out of end of epoch test (#620) * Refactor end of epoch unit test * Remove reward distribution checks --- lib/pos_epoch_complete_hook_test.go | 422 +++++++++++++--------------- 1 file changed, 196 insertions(+), 226 deletions(-) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 0a9c0d085..a6649a6af 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -53,39 +53,9 @@ func TestIsLastBlockInCurrentEpoch(t *testing.T) { } func TestRunEpochCompleteHook(t *testing.T) { - // Initialize balance model fork heights. - setBalanceModelBlockHeights(t) - - // Initialize test chain and miner. - chain, params, db := NewLowDifficultyBlockchain(t) - mempool, miner := NewTestMiner(t, chain, params, true) - - // Initialize PoS fork heights. - params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) - params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(1) - GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) - GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - - // Mine a few blocks to give the senderPkString some money. - for ii := 0; ii < 10; ii++ { - _, err := miner.MineAndProcessSingleBlock(0, mempool) - require.NoError(t, err) - } - - // We build the testMeta obj after mining blocks so that we save the correct block height. - blockHeight := uint64(chain.blockTip().Height) + 1 - blockTimestampNanoSecs := (chain.blockTip().Header.TstampSecs * 1e9) + 1e9 // 1 second after the last block. - testMeta := &TestMeta{ - t: t, - chain: chain, - params: params, - db: db, - mempool: mempool, - miner: miner, - savedHeight: uint32(blockHeight), - feeRateNanosPerKb: uint64(101), - } + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForEpochCompleteTest(t) _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) @@ -96,106 +66,39 @@ func TestRunEpochCompleteHook(t *testing.T) { _registerOrTransferWithTestMeta(testMeta, "m6", senderPkString, m6Pub, senderPrivString, 1e3) _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) - m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID - m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID - m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID - m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes).PKID - m4PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m4PkBytes).PKID - m5PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m5PkBytes).PKID - m6PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m6PkBytes).PKID - validatorPKIDs := []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} - - // Helper utils - utxoView := func() *UtxoView { - newUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) - require.NoError(t, err) - return newUtxoView - } - - _registerAndStake := func(publicKey string, privateKey string, stakeAmountNanos uint64, restakeRewards bool) { - // Convert PublicKeyBase58Check to PublicKeyBytes. - pkBytes, _, err := Base58CheckDecode(publicKey) - require.NoError(t, err) - - // Validator registers. - votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, pkBytes) - registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, - VotingPublicKey: votingPublicKey, - VotingAuthorization: votingAuthorization, - } - _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) - require.NoError(t, err) - - rewardMethod := StakingRewardMethodPayToBalance - if restakeRewards { - rewardMethod = StakingRewardMethodRestake - } + m0PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m1PkBytes).PKID + m2PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m2PkBytes).PKID + m3PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m3PkBytes).PKID + m4PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m4PkBytes).PKID + m5PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m5PkBytes).PKID + m6PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m6PkBytes).PKID - stakeMetadata := &StakeMetadata{ - ValidatorPublicKey: NewPublicKey(pkBytes), - RewardMethod: rewardMethod, - StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), - } - _, err = _submitStakeTxn(testMeta, publicKey, privateKey, stakeMetadata, nil, true) - require.NoError(t, err) - } + validatorPKIDs := []*PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} - _runOnEpochCompleteHook := func() { - tmpUtxoView := utxoView() + blockHeight := uint64(testMeta.chain.blockTip().Height) + 1 + incrBlockHeight := func() uint64 { blockHeight += 1 - blockTimestampNanoSecs += 1e9 - require.NoError(t, tmpUtxoView.RunEpochCompleteHook(blockHeight, blockTimestampNanoSecs)) - require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) - } - - _assertEmptyValidatorSnapshots := func() { - // Test SnapshotValidatorByPKID is nil. - for _, pkid := range validatorPKIDs { - snapshotValidatorSetEntry, err := utxoView().GetSnapshotValidatorSetEntryByPKID(pkid) - require.NoError(t, err) - require.Nil(t, snapshotValidatorSetEntry) - } - - // Test GetSnapshotValidatorSetByStakeAmount is empty. - validatorEntries, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) - require.NoError(t, err) - require.Empty(t, validatorEntries) - - // Test SnapshotValidatorSetTotalStakeAmountNanos is zero. - snapshotValidatorSetTotalStakeAmountNanos, err := utxoView().GetSnapshotValidatorSetTotalStakeAmountNanos() - require.NoError(t, err) - require.True(t, snapshotValidatorSetTotalStakeAmountNanos.IsZero()) - - // Test SnapshotLeaderSchedule is nil. - for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView().GetSnapshotLeaderScheduleValidator(uint16(index)) - require.NoError(t, err) - require.Nil(t, snapshotLeaderScheduleValidator) - } - } - - _assertEmptyStakeSnapshots := func() { - // Test GetSnapshotStakesToRewardByStakeAmount is empty. - stakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) - require.NoError(t, err) - require.Empty(t, stakeEntries) + return blockHeight } // Seed a CurrentEpochEntry. - tmpUtxoView := utxoView() + tmpUtxoView := _newUtxoView(testMeta) tmpUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 0, FinalBlockHeight: blockHeight + 1}) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) // For these tests, we set each epoch duration to only one block. - params.DefaultEpochDurationNumBlocks = uint64(1) + testMeta.params.DefaultEpochDurationNumBlocks = uint64(1) + + // We set the default staking rewards APY to 10% + testMeta.params.DefaultStakingRewardsAPYBasisPoints = uint64(1000) { // ParamUpdater set MinFeeRateNanos, ValidatorJailEpochDuration, // and JailInactiveValidatorGracePeriodEpochs. - params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - require.Zero(t, utxoView().GlobalParamsEntry.MinimumNetworkFeeNanosPerKB) - require.Zero(t, utxoView().GlobalParamsEntry.ValidatorJailEpochDuration) + testMeta.params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + require.Zero(t, _newUtxoView(testMeta).GlobalParamsEntry.MinimumNetworkFeeNanosPerKB) + require.Zero(t, _newUtxoView(testMeta).GlobalParamsEntry.ValidatorJailEpochDuration) _updateGlobalParamsEntryWithExtraData( testMeta, @@ -208,108 +111,108 @@ func TestRunEpochCompleteHook(t *testing.T) { }, ) - require.Equal(t, utxoView().GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) - require.Equal(t, utxoView().GlobalParamsEntry.ValidatorJailEpochDuration, uint64(4)) + require.Equal(t, _newUtxoView(testMeta).GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + require.Equal(t, _newUtxoView(testMeta).GlobalParamsEntry.ValidatorJailEpochDuration, uint64(4)) // We need to reset the UniversalUtxoView since the RegisterAsValidator and Stake // txn test helper utils use and flush the UniversalUtxoView. Otherwise, the // updated GlobalParamsEntry will be overwritten by the default one cached in // the UniversalUtxoView when it is flushed. - mempool.universalUtxoView._ResetViewMappingsAfterFlush() + testMeta.mempool.universalUtxoView._ResetViewMappingsAfterFlush() } { // Test the state of the snapshots prior to running our first OnEpochCompleteHook. // Test CurrentEpochNumber. - currentEpochNumber, err := utxoView().GetCurrentEpochNumber() + currentEpochNumber, err := _newUtxoView(testMeta).GetCurrentEpochNumber() require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(0)) // Test SnapshotGlobalParamsEntry is non-nil and contains the default values. - snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() + snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(3)) - _assertEmptyValidatorSnapshots() + _assertEmptyValidatorSnapshots(testMeta) - _assertEmptyStakeSnapshots() + _assertEmptyStakeSnapshots(testMeta) } { // Test RunOnEpochCompleteHook() with no validators or stakers. - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { // Test the state of the snapshots after running our first OnEpochCompleteHook // but with no existing validators or stakers. // Test CurrentEpochNumber. - currentEpochNumber, err := utxoView().GetCurrentEpochNumber() + currentEpochNumber, err := _newUtxoView(testMeta).GetCurrentEpochNumber() require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(1)) // Test SnapshotGlobalParamsEntry is nil. - snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() + snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) - require.Equal(t, utxoView().GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) + require.Equal(t, _newUtxoView(testMeta).GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(4)) - _assertEmptyValidatorSnapshots() + _assertEmptyValidatorSnapshots(testMeta) - _assertEmptyStakeSnapshots() + _assertEmptyStakeSnapshots(testMeta) } { // All validators register + stake to themselves. - _registerAndStake(m0Pub, m0Priv, 100, false) - _registerAndStake(m1Pub, m1Priv, 200, false) - _registerAndStake(m2Pub, m2Priv, 300, false) - _registerAndStake(m3Pub, m3Priv, 400, false) - _registerAndStake(m4Pub, m4Priv, 500, false) - _registerAndStake(m5Pub, m5Priv, 600, false) - _registerAndStake(m6Pub, m6Priv, 700, false) - - validatorEntries, err := utxoView().GetTopActiveValidatorsByStakeAmount(10) + _registerValidatorAndStake(testMeta, m0Pub, m0Priv, 100, false) + _registerValidatorAndStake(testMeta, m1Pub, m1Priv, 200, false) + _registerValidatorAndStake(testMeta, m2Pub, m2Priv, 300, false) + _registerValidatorAndStake(testMeta, m3Pub, m3Priv, 400, false) + _registerValidatorAndStake(testMeta, m4Pub, m4Priv, 500, false) + _registerValidatorAndStake(testMeta, m5Pub, m5Priv, 600, false) + _registerValidatorAndStake(testMeta, m6Pub, m6Priv, 700, false) + + validatorEntries, err := _newUtxoView(testMeta).GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) - stakeEntries, err := utxoView().GetTopStakesByStakeAmount(10) + stakeEntries, err := _newUtxoView(testMeta).GetTopStakesByStakeAmount(10) require.NoError(t, err) require.Len(t, stakeEntries, 7) } { // Test RunOnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { // Test CurrentEpochNumber. - currentEpochNumber, err := utxoView().GetCurrentEpochNumber() + currentEpochNumber, err := _newUtxoView(testMeta).GetCurrentEpochNumber() require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(2)) // Test SnapshotGlobalParamsEntry is populated. - snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() + snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(4)) - _assertEmptyValidatorSnapshots() + _assertEmptyValidatorSnapshots(testMeta) - _assertEmptyStakeSnapshots() + _assertEmptyStakeSnapshots(testMeta) } { // Test RunOnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { // Test CurrentEpochNumber. - currentEpochNumber, err := utxoView().GetCurrentEpochNumber() + currentEpochNumber, err := _newUtxoView(testMeta).GetCurrentEpochNumber() require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(3)) // Test SnapshotGlobalParamsEntry is populated. - snapshotGlobalParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() + snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) @@ -317,13 +220,13 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test SnapshotValidatorByPKID is populated. for _, pkid := range validatorPKIDs { - snapshotValidatorSetEntry, err := utxoView().GetSnapshotValidatorSetEntryByPKID(pkid) + snapshotValidatorSetEntry, err := _newUtxoView(testMeta).GetSnapshotValidatorSetEntryByPKID(pkid) require.NoError(t, err) require.NotNil(t, snapshotValidatorSetEntry) } // Test GetSnapshotValidatorSetByStakeAmount is populated. - validatorEntries, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) + validatorEntries, err := _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Len(t, validatorEntries, 7) require.Equal(t, validatorEntries[0].ValidatorPKID, m6PKID) @@ -332,19 +235,19 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, validatorEntries[6].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) // Test SnapshotValidatorSetTotalStakeAmountNanos is populated. - snapshotValidatorSetTotalStakeAmountNanos, err := utxoView().GetSnapshotValidatorSetTotalStakeAmountNanos() + snapshotValidatorSetTotalStakeAmountNanos, err := _newUtxoView(testMeta).GetSnapshotValidatorSetTotalStakeAmountNanos() require.NoError(t, err) require.Equal(t, snapshotValidatorSetTotalStakeAmountNanos, uint256.NewInt().SetUint64(2800)) // Test SnapshotLeaderSchedule is populated. for index := range validatorPKIDs { - snapshotLeaderScheduleValidator, err := utxoView().GetSnapshotLeaderScheduleValidator(uint16(index)) + snapshotLeaderScheduleValidator, err := _newUtxoView(testMeta).GetSnapshotLeaderScheduleValidator(uint16(index)) require.NoError(t, err) require.NotNil(t, snapshotLeaderScheduleValidator) } // Test GetSnapshotStakesToRewardByStakeAmount is populated. - snapshotStakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + snapshotStakeEntries, err := _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotStakeEntries, 7) require.Equal(t, snapshotStakeEntries[0].StakerPKID, m6PKID) @@ -356,45 +259,45 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test snapshotting changing stake. // m5 has 600 staked. - validatorEntry, err := utxoView().GetValidatorByPKID(m5PKID) + validatorEntry, err := _newUtxoView(testMeta).GetValidatorByPKID(m5PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(600)) // m5 stakes another 200. - _registerAndStake(m5Pub, m5Priv, 200, false) + _registerValidatorAndStake(testMeta, m5Pub, m5Priv, 200, false) // m5 has 800 staked. - validatorEntry, err = utxoView().GetValidatorByPKID(m5PKID) + validatorEntry, err = _newUtxoView(testMeta).GetValidatorByPKID(m5PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // Snapshot m5 still has 600 staked. - validatorEntry, err = utxoView().GetSnapshotValidatorSetEntryByPKID(m5PKID) + validatorEntry, err = _newUtxoView(testMeta).GetSnapshotValidatorSetEntryByPKID(m5PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(600)) - snapshotStakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + snapshotStakeEntries, err := _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotStakeEntries, 7) require.Equal(t, snapshotStakeEntries[1].StakerPKID, m5PKID) require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(600)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // Snapshot m5 now has 800 staked. - validatorEntry, err = utxoView().GetSnapshotValidatorSetEntryByPKID(m5PKID) + validatorEntry, err = _newUtxoView(testMeta).GetSnapshotValidatorSetEntryByPKID(m5PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) - snapshotStakeEntries, err = utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + snapshotStakeEntries, err = _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotStakeEntries, 7) require.Equal(t, snapshotStakeEntries[0].StakerPKID, m5PKID) @@ -404,7 +307,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test snapshotting changing GlobalParams. // Update StakeLockupEpochDuration from default of 3 to 2. - snapshotGlobalsParamsEntry, err := utxoView().GetSnapshotGlobalParamsEntry() + snapshotGlobalsParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(3)) @@ -416,21 +319,21 @@ func TestRunEpochCompleteHook(t *testing.T) { map[string][]byte{StakeLockupEpochDurationKey: UintToBuf(2)}, ) - require.Equal(t, utxoView().GlobalParamsEntry.StakeLockupEpochDuration, uint64(2)) + require.Equal(t, _newUtxoView(testMeta).GlobalParamsEntry.StakeLockupEpochDuration, uint64(2)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // Snapshot StakeLockupEpochDuration is still 3. - snapshotGlobalsParamsEntry, err = utxoView().GetSnapshotGlobalParamsEntry() + snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(3)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // Snapshot StakeLockupEpochDuration is updated to 2. - snapshotGlobalsParamsEntry, err = utxoView().GetSnapshotGlobalParamsEntry() + snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(2)) } @@ -438,7 +341,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test snapshotting changing validator set. // m0 unregisters as a validator. - snapshotValidatorSet, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) + snapshotValidatorSet, err := _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotValidatorSet, 7) @@ -446,74 +349,36 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // m0 is still in the snapshot validator set. - snapshotValidatorSet, err = utxoView().GetSnapshotValidatorSetByStakeAmount(10) + snapshotValidatorSet, err = _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotValidatorSet, 7) - snapshotStakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + snapshotStakeEntries, err := _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotStakeEntries, 7) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // m0 is dropped from the snapshot validator set. - snapshotValidatorSet, err = utxoView().GetSnapshotValidatorSetByStakeAmount(10) + snapshotValidatorSet, err = _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotValidatorSet, 6) - snapshotStakeEntries, err = utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + snapshotStakeEntries, err = _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) require.NoError(t, err) require.Len(t, snapshotStakeEntries, 6) } { - // Test staking rewards distribution with restaking enabled. - - // m6 now has a 14333333578 nano balance from staking rewards so far. - balance, err := utxoView().GetDeSoBalanceNanosForPublicKey(m6PkBytes) - require.NoError(t, err) - require.Equal(t, balance, uint64(14333333578)) - // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() - - // m6 now has 16747126681 after the most recent's epoch's staking rewards. - balance, err = utxoView().GetDeSoBalanceNanosForPublicKey(m6PkBytes) - require.NoError(t, err) - require.Equal(t, balance, uint64(16747126681)) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { - // Test staking rewards distribution with restaking enabled. - - // m6 has 700 nanos staked. - stakeEntry, err := utxoView().GetStakeEntry(m6PKID, m6PKID) - require.NoError(t, err) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(700)) - - // m6 enables restaking. - _registerAndStake(m6Pub, m6Priv, 0, true) - - // m6's wallet balance is 16747126627 after they submit their stake transaction. - balance, err := utxoView().GetDeSoBalanceNanosForPublicKey(m6PkBytes) - require.NoError(t, err) - require.Equal(t, balance, uint64(0x3e634df63)) - - // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() - - // m6 has 2413793803 staked now a after their staking rewards were restaked. - stakeEntry, err = utxoView().GetStakeEntry(m6PKID, m6PKID) - require.NoError(t, err) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(2413793803)) - - // m6's wallet balance has not changed from has 16747126627 now that their rewards - // were restaked. - balance, err = utxoView().GetDeSoBalanceNanosForPublicKey(m6PkBytes) - require.NoError(t, err) - require.Equal(t, balance, uint64(0x3e634df63)) + // Run OnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { // Test jailing inactive validators. @@ -529,37 +394,37 @@ func TestRunEpochCompleteHook(t *testing.T) { // Define helper utils. getCurrentEpochNumber := func() int { - currentEpochNumber, err := utxoView().GetCurrentEpochNumber() + currentEpochNumber, err := _newUtxoView(testMeta).GetCurrentEpochNumber() require.NoError(t, err) return int(currentEpochNumber) } getNumCurrentActiveValidators := func() int { - validatorEntries, err := utxoView().GetTopActiveValidatorsByStakeAmount(10) + validatorEntries, err := _newUtxoView(testMeta).GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) return len(validatorEntries) } getNumSnapshotValidatorSet := func() int { - snapshotValidatorSet, err := utxoView().GetSnapshotValidatorSetByStakeAmount(10) + snapshotValidatorSet, err := _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) require.NoError(t, err) return len(snapshotValidatorSet) } getCurrentValidator := func(validatorPKID *PKID) *ValidatorEntry { - validatorEntry, err := utxoView().GetValidatorByPKID(validatorPKID) + validatorEntry, err := _newUtxoView(testMeta).GetValidatorByPKID(validatorPKID) require.NoError(t, err) return validatorEntry } getNumStakes := func() int { - stakeEntries, err := utxoView().GetTopStakesByStakeAmount(10) + stakeEntries, err := _newUtxoView(testMeta).GetTopStakesByStakeAmount(10) require.NoError(t, err) return len(stakeEntries) } getNumSnapshotStakes := func() int { - snapshotStakeEntries, err := utxoView().GetSnapshotStakesToRewardByStakeAmount(10) + snapshotStakeEntries, err := _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) require.NoError(t, err) return len(snapshotStakeEntries) } @@ -572,7 +437,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getNumSnapshotStakes(), 6) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // In epoch 12, all current registered validators have Status = Jailed. // In snapshot 10, all snapshot validators have Status = Active. @@ -586,7 +451,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentValidator(m6PKID).JailedAtEpochNumber, uint64(11)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // In epoch 13, all current registered validators have Status = Jailed. // In snapshot 11, the validator set is empty because all validators have Status = Jailed. @@ -597,7 +462,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Empty(t, getNumSnapshotStakes()) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook() + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // In epoch 14, all current registered validators have Status = Jailed. // In snapshot 12, the validator set is empty because all validators have Status = Jailed. @@ -609,3 +474,108 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Empty(t, getNumSnapshotStakes()) } } + +func _setUpMinerAndTestMetaForEpochCompleteTest(t *testing.T) *TestMeta { + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Initialize PoS fork heights. + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err := miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + + return &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } +} + +func _registerValidatorAndStake(testMeta *TestMeta, publicKey string, privateKey string, stakeAmountNanos uint64, restakeRewards bool) { + // Convert PublicKeyBase58Check to PublicKeyBytes. + pkBytes, _, err := Base58CheckDecode(publicKey) + require.NoError(testMeta.t, err) + + // Validator registers. + votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(testMeta.t, pkBytes) + registerMetadata := &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, + } + _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) + require.NoError(testMeta.t, err) + + rewardMethod := StakingRewardMethodPayToBalance + if restakeRewards { + rewardMethod = StakingRewardMethodRestake + } + + stakeMetadata := &StakeMetadata{ + ValidatorPublicKey: NewPublicKey(pkBytes), + RewardMethod: rewardMethod, + StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), + } + _, err = _submitStakeTxn(testMeta, publicKey, privateKey, stakeMetadata, nil, true) + require.NoError(testMeta.t, err) +} + +func _newUtxoView(testMeta *TestMeta) *UtxoView { + newUtxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(testMeta.t, err) + return newUtxoView +} + +func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64) { + tmpUtxoView := _newUtxoView(testMeta) + // Set blockTimestampNanoSecs to 1 year * block height. Every time the block height increments, + // the timestamp increases by 1 year + blockTimestampNanoSecs := blockHeight * 365 * 24 * 3600 * 1e9 + require.NoError(testMeta.t, tmpUtxoView.RunEpochCompleteHook(blockHeight, blockTimestampNanoSecs)) + require.NoError(testMeta.t, tmpUtxoView.FlushToDb(blockHeight)) +} + +func _assertEmptyValidatorSnapshots(testMeta *TestMeta) { + // Test GetSnapshotValidatorSetByStakeAmount is empty. + validatorEntries, err := _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(100) + require.NoError(testMeta.t, err) + require.Empty(testMeta.t, validatorEntries) + + // Test SnapshotValidatorSetTotalStakeAmountNanos is zero. + snapshotValidatorSetTotalStakeAmountNanos, err := _newUtxoView(testMeta).GetSnapshotValidatorSetTotalStakeAmountNanos() + require.NoError(testMeta.t, err) + require.True(testMeta.t, snapshotValidatorSetTotalStakeAmountNanos.IsZero()) + + // Test SnapshotLeaderSchedule is nil. + for index := range validatorEntries { + snapshotLeaderScheduleValidator, err := _newUtxoView(testMeta).GetSnapshotLeaderScheduleValidator(uint16(index)) + require.NoError(testMeta.t, err) + require.Nil(testMeta.t, snapshotLeaderScheduleValidator) + } +} + +func _assertEmptyStakeSnapshots(testMeta *TestMeta) { + // Test GetSnapshotStakesToRewardByStakeAmount is empty. + stakeEntries, err := _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(100) + require.NoError(testMeta.t, err) + require.Empty(testMeta.t, stakeEntries) +} From f14138112c44cb0a8675df99c263c2748984eec4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 26 Jul 2023 14:17:48 -0400 Subject: [PATCH 165/762] Deprecate old placeholder staking reward distribution logic (#621) --- lib/pos_staking_rewards.go | 116 ------------------------------------- 1 file changed, 116 deletions(-) diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index fe698df5d..633613c22 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -1,121 +1,5 @@ package lib -import ( - "github.com/holiman/uint256" - "github.com/pkg/errors" -) - func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64) error { - // Check if we have switched from PoW to PoS yet. If we have not, then the PoS consensus - // has not started yet. We don't want to distribute any staking rewards until the PoS consensus begins. - if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { - return nil - } - - // Retrieve the SnapshotGlobalParamsEntry. - snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() - if err != nil { - return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving SnapshotGlobalParamsEntry: ") - } - - totalStakingRewards := bav._placeholderGetStakingRewardsPerEpoch() - - // If the total rewards to pay out are zero, then there's nothing to be done. Exit early here. - if totalStakingRewards.IsZero() { - return nil - } - - // Reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation - // that loops through all of the snapshotted stakes and rewards them. - snapshotStakesToReward, err := bav.GetSnapshotStakesToRewardByStakeAmount(snapshotGlobalParamsEntry.StakingRewardsMaxNumStakes) - if err != nil { - return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving snapshot stakes to reward: ") - } - - // If there are no stakes to reward, then there's nothing to be done. Exit early here. - if len(snapshotStakesToReward) == 0 { - return nil - } - - // Compute the total stake amount of all snapshot stakes, so we can determine the proportion of each - // staker's staked amount to the total. - snapshotStakesTotalStakeAmount := uint256.NewInt() - for _, snapshotStakeEntry := range snapshotStakesToReward { - snapshotStakesTotalStakeAmount.Add(snapshotStakesTotalStakeAmount, snapshotStakeEntry.StakeAmountNanos) - } - - // Check if the sum of all of the stakes is zero. In practice this should never happen because it's not - // possible for a staker to stake zero DESO. We check it here to make this code more resilient, in case - // that assumption ever changes elsewhere in the codebase. - if snapshotStakesTotalStakeAmount.IsZero() { - return nil - } - - // Loop through all of the snapshot stakes and reward them. - for _, snapshotStakeEntry := range snapshotStakesToReward { - rewardAmount := _computeStakingRewardAmount( - snapshotStakeEntry.StakeAmountNanos, - snapshotStakesTotalStakeAmount, - totalStakingRewards, - ) - if rewardAmount.IsZero() { - continue - } - - // At this point, we know that the staker has non-zero rewards. We need to determine how to - // distribute the rewards to them. We need to fetch their latest StakeEntry to determine - // whether they want to restake their rewards or not. - - // Fetch the staker's latest StakeEntry. - stakeEntry, err := bav.GetStakeEntry(snapshotStakeEntry.ValidatorPKID, snapshotStakeEntry.StakerPKID) - if err != nil { - return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem fetching staker's StakeEntry: ") - } - - // At this point, there are three possible cases: - // 1. The staker still exists and wants to restake their rewards. - // 2. The staker still exists and does not want to restake their rewards. - // 3. The staker has unstaked since the snapshot was taken. They no longer have a - // StakeEntry. Their stake is currently in lockup. - - // For case 1, we distribute the rewards by adding them to the staker's staked amount. - if stakeEntry != nil && stakeEntry.RewardMethod == StakingRewardMethodRestake { - stakeEntry.StakeAmountNanos.Add(stakeEntry.StakeAmountNanos, rewardAmount) - bav._setStakeEntryMappings(stakeEntry) - - continue - } - - // For cases 2 and 3, the staker no longer wants their rewards restaked. The staker is still - // eligible to receive rewards because the validator they had staked to was part of the validator - // set for the snapshot epoch. Their stake at the time was used to secure the network. - - stakerPublicKey := bav.GetPublicKeyForPKID(snapshotStakeEntry.StakerPKID) - if _, err = bav._addBalance(rewardAmount.Uint64(), stakerPublicKey); err != nil { - return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem adding rewards to staker's DESO balance: ") - } - } - return nil } - -// This function is a placeholder that rewards a constant 10 DESO in staking rewards per epoch. -// The staking rewards will be a function of the burn maximizing fee, which has not been -// implemented yet. -// -// TODO: Replace this function once BMF and staking rewards math are complete. -func (bav *UtxoView) _placeholderGetStakingRewardsPerEpoch() *uint256.Int { - return uint256.NewInt().SetUint64(1e10) -} - -// _computeRewardAmount uses integer math to compute the reward amount for each staker rounded down to -// the nearest DESO nano. -func _computeStakingRewardAmount( - stakeAmount *uint256.Int, - totalStakeAmount *uint256.Int, - totalStakingRewards *uint256.Int, -) *uint256.Int { - output := uint256.NewInt() - output.Mul(stakeAmount, totalStakingRewards) - return output.Div(output, totalStakeAmount) -} From 54c95aa2bd8c2f9295c3a96f897859ddf6e0e699 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 26 Jul 2023 17:45:34 -0400 Subject: [PATCH 166/762] Naming, comment, and error message clean up for snapshotting changes (#622) * Clean up naming and error messages on JailAllInactiveSnapshotValidators * More comments * Add comments for DB flushes * More cleanup --- lib/block_view_stake.go | 9 ++++++--- lib/block_view_validator.go | 13 ++++++++----- lib/network.go | 6 +++--- lib/pos_epoch.go | 7 ++++--- lib/pos_epoch_complete_hook.go | 10 ++++++++-- lib/pos_leader_schedule.go | 3 ++- lib/pos_snapshot_entries.go | 11 ++++++++++- 7 files changed, 41 insertions(+), 18 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 44d79744d..1a9b5a866 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -685,8 +685,10 @@ func DBGetTopStakesByStakeAmount( stakeMapKeysToSkip.Add(stakeEntryToSkip.ToMapKey()) } - // Define a function to filter out ValidatorPKID-StakerPKID pairs that we want to skip - // while seeking through the DB. + // Define a function to filter out ValidatorPKID-StakerPKID pairs that we want to skip while + // seeking through the DB. We can't simply pass in the exact keys from the UtxoView that we + // need to skip through because it's possible that the stake entries (and their stake amounts) + // have changed in the UtxoView, and no longer match the stake amounts in the DB used to index them. canSkipValidatorPKIDAndStakerPKIDInBadgerSeek := func(badgerKey []byte) bool { // Parse both the validator PKID and staker PKID from the key. Just to be safe, we return false if // we fail to parse them. Once the seek has completed, we attempt to parse all of the same keys a @@ -961,7 +963,8 @@ func DBDeleteStakeEntryWithTxn( } // Look up the existing StakeEntry in the db using the validator and staker PKIDs. - // We need to use this stakeEntry's values to delete all corresponding indexes. + // We need to use the stakeEntry's current values from the DB to delete it from all + // indexes that store it. stakeEntry, err := DBGetStakeEntryWithTxn(txn, snap, validatorPKID, stakerPKID) if err != nil { return errors.Wrapf(err, "DBDeleteStakeEntryWithTxn: problem retrieving "+ diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index c9f0cef2b..387910639 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -644,6 +644,9 @@ func DBGetTopActiveValidatorsByStakeAmount( } // Define a function to filter out validators PKIDs we want to skip while seeking through the DB. + // We can't simply pass in the exact keys from the UtxoView that we need to skip through because + // it's possible that the validator entries (and their total stake amounts) have changed in the + // UtxoView, and no longer match the stake amounts in the DB used to index them. canSkipValidatorInBadgerSeek := func(badgerKey []byte) bool { validatorPKID, err := GetValidatorPKIDFromDBKeyForValidatorByStatusAndStakeAmount(badgerKey) if err != nil { @@ -1854,7 +1857,7 @@ func (bav *UtxoView) GetTopActiveValidatorsByStakeAmount(limit uint64) ([]*Valid return validatorEntries[0:upperBound], nil } -func (bav *UtxoView) JailInactiveSnapshotValidators(blockHeight uint64) error { +func (bav *UtxoView) JailAllInactiveSnapshotValidators(blockHeight uint64) error { // Check if we have switched from PoW to PoS yet. If we have not, then the PoS consensus // has not started. We don't want to jail any validators until they have had the opportunity // to participate in the consensus and are known to be inactive. @@ -1864,7 +1867,7 @@ func (bav *UtxoView) JailInactiveSnapshotValidators(blockHeight uint64) error { snapshotGlobalParams, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { - return errors.Wrapf(err, "UtxoView.JailInactiveSnapshotValidators: ") + return errors.Wrapf(err, "UtxoView.JailAllInactiveSnapshotValidators: error retrieving SnapshotGlobalParamsEntry: ") } // Get the current snapshot validator set. These are the only validator what were @@ -1880,7 +1883,7 @@ func (bav *UtxoView) JailInactiveSnapshotValidators(blockHeight uint64) error { for _, snapshotValidatorEntry := range snapshotValidatorSet { currentValidatorEntry, err := bav.GetValidatorByPKID(snapshotValidatorEntry.ValidatorPKID) if err != nil { - return errors.Wrapf(err, "UtxoView.JailInactiveSnapshotValidators: ") + return errors.Wrapf(err, "UtxoView.JailAllInactiveSnapshotValidators: error retrieving ValidatorEntry by PKID: ") } if currentValidatorEntry == nil || currentValidatorEntry.isDeleted { @@ -1890,7 +1893,7 @@ func (bav *UtxoView) JailInactiveSnapshotValidators(blockHeight uint64) error { shouldJailValidator, err := bav.ShouldJailValidator(currentValidatorEntry, blockHeight) if err != nil { - return errors.Wrapf(err, "UtxoView.JailInactiveSnapshotValidators: ") + return errors.Wrapf(err, "UtxoView.JailAllInactiveSnapshotValidators: error checking if validator should be jailed: ") } if !shouldJailValidator { @@ -1900,7 +1903,7 @@ func (bav *UtxoView) JailInactiveSnapshotValidators(blockHeight uint64) error { // If we get here, then the validator should be jailed. if err = bav.JailValidator(currentValidatorEntry); err != nil { return errors.Wrapf( - err, "UtxoView.JailInactiveSnapshotValidators: problem jailing validator %v: ", currentValidatorEntry.ValidatorPKID, + err, "UtxoView.JailAllInactiveSnapshotValidators: error jailing validator %v: ", currentValidatorEntry.ValidatorPKID, ) } } diff --git a/lib/network.go b/lib/network.go index ae50520e8..dc0884642 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1891,9 +1891,9 @@ type MsgDeSoHeader struct { TstampSecs uint64 // TODO: Add a new TstampNanoSecs field that will have nanosecond resolution. - // For backwards compatibility with the existing backends and frontends, we will keep - // the existing TstampSecs and populate it as TstampNanoSecs / 1e9 when decoding block - // headers from bytes. + // For backwards compatibility with existing backends and frontends, we will keep + // the existing TstampSecs field and populate it as TstampNanoSecs / 1e9 when decoding + // block headers from bytes. // The height of the block this header corresponds to. Height uint64 diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index a6ca1fb1d..9d558b703 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -18,8 +18,8 @@ type EpochEntry struct { FinalBlockHeight uint64 // This captures the on-chain timestamp when this epoch entry was created. This does not - // represent the first block of the epoch, but rather when this epoch transition was triggered, - // at the end of the previous epoch. + // represent the timestamp for first block of the epoch, but rather when this epoch entry + // was created during that epoch transition at the end of the previous epoch. CreatedAtBlockTimestampNanoSecs uint64 } @@ -100,7 +100,8 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { // case prior to the first execution of the OnEpochCompleteHook. // // TODO: Should FinalBlockHeight be ProofOfStake1StateSetupBlockHeight for epoch 0? - // The fork height is exactly when epoch 0 ends. Epoch 1 begins at the next height. + // The ProofOfStake1StateSetupBlockHeight fork height is exactly when epoch 0 ends. + // Epoch 1 begins at the next height. genesisEpochEntry := &EpochEntry{ EpochNumber: 0, FinalBlockHeight: math.MaxUint64, diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 8e48c31e0..46bf3e1e7 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -81,6 +81,8 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, blockTimestampNano return errors.Wrapf(err, "RunEpochCompleteHook: ") } + // TODO: Evict old snapshots when safe to do so. + // Step 3: Roll Over to The Next Epoch if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, blockTimestampNanoSecs); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: ") @@ -96,7 +98,7 @@ func (bav *UtxoView) runEpochCompleteStateMutations(blockHeight uint64) error { // and jails them if they have been inactive. // // Note, this this will only run if we are past the ProofOfStake2ConsensusCutoverBlockHeight fork height. - if err := bav.JailInactiveSnapshotValidators(blockHeight); err != nil { + if err := bav.JailAllInactiveSnapshotValidators(blockHeight); err != nil { return errors.Wrapf(err, "runEpochCompleteStateMutations: problem jailing all inactive validators: ") } @@ -198,7 +200,11 @@ func (bav *UtxoView) generateAndSnapshotLeaderSchedule(epochNumber uint64) error } func (bav *UtxoView) generateAndSnapshotStakesToReward(epochNumber uint64, validatorSet []*ValidatorEntry) error { - // Fetch the current top n stake entries. + // Fetch the current top n stake entries. Note, this query will return the top n stake entries regardless of whether + // or not they are in the active validator set of the epoch. By not filtering the stake entries while seeking through + // the DB, we guarantee that we will at most seek through n stake entries. This avoids the edge cases where the global + // set of stake entries is very large, and as a result of additional filters in the seek, we end up seeking through + // the entire global set. topStakeEntries, err := bav.GetTopStakesByStakeAmount(bav.GetCurrentGlobalParamsEntry().StakingRewardsMaxNumStakes) if err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: error retrieving top StakeEntries: ") diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index adb8310f1..703f85941 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -14,7 +14,8 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving CurrentRandomSeedHash: ") } - // Retrieve the SnapshotGlobalParamsEntry.LeaderScheduleMaxNumValidators. + // Retrieve the SnapshotGlobalParamsEntry.LeaderScheduleMaxNumValidators. Note, we are safe to use the + // currentGlobalParamsEntry.LeaderScheduleMaxNumValidators here since we are creating a new snapshot. currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() // Retrieve top, active validators ordered by stake. diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 7b07b9120..9c193f078 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -341,7 +341,7 @@ func (bav *UtxoView) _flushSnapshotValidatorSetToDbWithTxn(txn *badger.Txn, bloc } } - // Set all !isDeleted SnapshotValidatorSet into the db from the UtxoView. + // Put all !isDeleted SnapshotValidatorSet entry into the db from the UtxoView. for mapKey, validatorEntry := range bav.SnapshotValidatorSet { if validatorEntry == nil { return fmt.Errorf( @@ -849,6 +849,15 @@ func DBGetSnapshotStakesToRewardByStakeAmount( } func (bav *UtxoView) _flushSnapshotStakesToRewardToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Note: the typical DELETE -> PUT pattern we follow in other _flush functions to update re-indexed keys + // will not work here because the SnapshotStakeToRewardByStakeAmount index has all of the information about + // the snapshot stake entry in the key. We would need to know the DB stakerPKID, validatorPKID, and stakeAmount + // values for snapshotStakeEntry in order to delete it from the DB. If we want to follow the DELETE -> PUT, we'll + // need to seek through ALL keys in the index and delete them first. This is not ideal. + // + // Omitting the DELETE here is fine in practice because the snapshotStakeEntries that are written when committing + // the final block of an epoch will always be identical, and are never overwritten. + for mapKey, snapshotStakeEntry := range bav.SnapshotStakesToReward { if snapshotStakeEntry == nil { return fmt.Errorf( From b872c38630199ac44b8af70fd1b5b6f14cac47ba Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 27 Jul 2023 12:19:00 -0400 Subject: [PATCH 167/762] Refactor SnapshotStakesToReward Badger Index (#623) * Refactor SnapshotStakesToReward Badger Index * Finish implementation * Cleanup and unit test fixes * Cleanup * Address Nina's feedback --- lib/block_view.go | 7 +- lib/db_utils.go | 13 +- lib/pos_epoch_complete_hook.go | 8 +- lib/pos_epoch_complete_hook_test.go | 46 +++-- lib/pos_snapshot_entries.go | 308 +++++++++++++--------------- 5 files changed, 177 insertions(+), 205 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 6ff7beecd..98debd288 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -150,9 +150,8 @@ type UtxoView struct { SnapshotLeaderSchedule map[SnapshotLeaderScheduleMapKey]*PKID // SnapshotStakesToReward is a map of - // to a SnapshotStakeEntry. It contains a trimmed down struct with the ValidatorPKID, StakerPKID, and - // StakeAmount values of a StakeEntry that has been snapshotted at a given SnapshotAtEpochNumber. - SnapshotStakesToReward map[SnapshotStakeMapKey]*SnapshotStakeEntry + // to a snapshotted StakeEntry for the ValidatorPKID and StakerPKID pair at a given SnapshotAtEpochNumber. + SnapshotStakesToReward map[SnapshotStakeMapKey]*StakeEntry // The hash of the tip the view is currently referencing. Mainly used // for error-checking when doing a bulk operation on the view. @@ -268,7 +267,7 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { bav.SnapshotLeaderSchedule = make(map[SnapshotLeaderScheduleMapKey]*PKID) // SnapshotStakesToReward - bav.SnapshotStakesToReward = make(map[SnapshotStakeMapKey]*SnapshotStakeEntry) + bav.SnapshotStakesToReward = make(map[SnapshotStakeMapKey]*StakeEntry) } func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { diff --git a/lib/db_utils.go b/lib/db_utils.go index 567226cd5..3ba9e5304 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -553,10 +553,11 @@ type DBPrefixes struct { // Prefix, , -> ValidatorPKID PrefixSnapshotLeaderSchedule []byte `prefix_id:"[89]" is_state:"true"` - // PrefixSnapshotStakeToRewardByStakeAmount: Retrieves the top N StakeEntries that can receive staking rewards for an epoch. - // Prefix, , , , -> nil - // Note, we parse the ValidatorPKID and StakerPKID from the key and the value is nil to save space. - PrefixSnapshotStakeToRewardByStakeAmount []byte `prefix_id:"[90]" is_state:"true"` + // PrefixSnapshotStakeToRewardByValidatorAndStaker: Retrieves snapshotted StakeEntries that are eligible to + // receive staking rewards for an epoch. StakeEntries can be retrieved by ValidatorPKID and StakerPKID. + // Prefix, , , -> *StakeEntry + // Note, we parse the ValidatorPKID and StakerPKID from the key. + PrefixSnapshotStakeToRewardByValidatorAndStaker []byte `prefix_id:"[90]" is_state:"true"` // NEXT_TAG: 91 } @@ -796,9 +797,9 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotLeaderSchedule) { // prefix_id:"[89]" return true, &PKID{} - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotStakeToRewardByStakeAmount) { + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotStakeToRewardByValidatorAndStaker) { // prefix_id:"[90]" - return false, nil + return true, &StakeEntry{} } return true, nil diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 46bf3e1e7..d07e1e732 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -222,13 +222,7 @@ func (bav *UtxoView) generateAndSnapshotStakesToReward(epochNumber uint64, valid // Snapshot only the top m stake entries that are in the validator set. for _, stakeEntry := range topStakesInValidatorSet { - snapshotStakeEntry := SnapshotStakeEntry{ - SnapshotAtEpochNumber: epochNumber, - ValidatorPKID: stakeEntry.ValidatorPKID, - StakerPKID: stakeEntry.StakerPKID, - StakeAmountNanos: stakeEntry.StakeAmountNanos, - } - bav._setSnapshotStakeToReward(&snapshotStakeEntry) + bav._setSnapshotStakeToReward(stakeEntry.Copy(), epochNumber) } return nil diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index a6649a6af..8d301d5c7 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -4,6 +4,7 @@ package lib import ( "fmt" + "sort" "testing" "github.com/holiman/uint256" @@ -247,13 +248,14 @@ func TestRunEpochCompleteHook(t *testing.T) { } // Test GetSnapshotStakesToRewardByStakeAmount is populated. - snapshotStakeEntries, err := _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) + stakeEntries, err := _newUtxoView(testMeta).GetAllSnapshotStakesToReward() require.NoError(t, err) - require.Len(t, snapshotStakeEntries, 7) - require.Equal(t, snapshotStakeEntries[0].StakerPKID, m6PKID) - require.Equal(t, snapshotStakeEntries[6].StakerPKID, m0PKID) - require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(700)) - require.Equal(t, snapshotStakeEntries[6].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + _sortStakeEntriesByStakeAmount(stakeEntries) + require.Len(t, stakeEntries, 7) + require.Equal(t, stakeEntries[0].StakerPKID, m6PKID) + require.Equal(t, stakeEntries[6].StakerPKID, m0PKID) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(700)) + require.Equal(t, stakeEntries[6].StakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // Test snapshotting changing stake. @@ -282,11 +284,12 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(600)) - snapshotStakeEntries, err := _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) + stakeEntries, err := _newUtxoView(testMeta).GetAllSnapshotStakesToReward() require.NoError(t, err) - require.Len(t, snapshotStakeEntries, 7) - require.Equal(t, snapshotStakeEntries[1].StakerPKID, m5PKID) - require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(600)) + _sortStakeEntriesByStakeAmount(stakeEntries) + require.Len(t, stakeEntries, 7) + require.Equal(t, stakeEntries[1].StakerPKID, m5PKID) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(600)) // Run OnEpochCompleteHook(). _runOnEpochCompleteHook(testMeta, incrBlockHeight()) @@ -297,11 +300,12 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) - snapshotStakeEntries, err = _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) + stakeEntries, err = _newUtxoView(testMeta).GetAllSnapshotStakesToReward() require.NoError(t, err) - require.Len(t, snapshotStakeEntries, 7) - require.Equal(t, snapshotStakeEntries[0].StakerPKID, m5PKID) - require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(800)) + _sortStakeEntriesByStakeAmount(stakeEntries) + require.Len(t, stakeEntries, 7) + require.Equal(t, stakeEntries[0].StakerPKID, m5PKID) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(800)) } { // Test snapshotting changing GlobalParams. @@ -356,7 +360,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Len(t, snapshotValidatorSet, 7) - snapshotStakeEntries, err := _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) + snapshotStakeEntries, err := _newUtxoView(testMeta).GetAllSnapshotStakesToReward() require.NoError(t, err) require.Len(t, snapshotStakeEntries, 7) @@ -368,7 +372,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Len(t, snapshotValidatorSet, 6) - snapshotStakeEntries, err = _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) + snapshotStakeEntries, err = _newUtxoView(testMeta).GetAllSnapshotStakesToReward() require.NoError(t, err) require.Len(t, snapshotStakeEntries, 6) } @@ -424,7 +428,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } getNumSnapshotStakes := func() int { - snapshotStakeEntries, err := _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(10) + snapshotStakeEntries, err := _newUtxoView(testMeta).GetAllSnapshotStakesToReward() require.NoError(t, err) return len(snapshotStakeEntries) } @@ -575,7 +579,13 @@ func _assertEmptyValidatorSnapshots(testMeta *TestMeta) { func _assertEmptyStakeSnapshots(testMeta *TestMeta) { // Test GetSnapshotStakesToRewardByStakeAmount is empty. - stakeEntries, err := _newUtxoView(testMeta).GetSnapshotStakesToRewardByStakeAmount(100) + stakeEntries, err := _newUtxoView(testMeta).GetAllSnapshotStakesToReward() require.NoError(testMeta.t, err) require.Empty(testMeta.t, stakeEntries) } + +func _sortStakeEntriesByStakeAmount(stakeEntries []*StakeEntry) { + sort.Slice(stakeEntries, func(ii, jj int) bool { + return stakeEntries[ii].StakeAmountNanos.Cmp(stakeEntries[jj].StakeAmountNanos) > 0 + }) +} diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 9c193f078..13c526cb4 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -688,188 +688,168 @@ type SnapshotStakeMapKey struct { StakerPKID PKID } -// This is a bare bones in-memory only construct used to capture the ValidatorPKID, -// StakerPKID, and StakeAmountNanos from a StakeEntry that has been snapshotted. We -// define a new type here rather than re-using the StakeEntry type to reduce the risk -// of bugs. The StakeEntry type has additional fields (ex: RewardMethod, ExtraData) -// that are not snapshotted. -type SnapshotStakeEntry struct { - SnapshotAtEpochNumber uint64 - StakerPKID *PKID - ValidatorPKID *PKID - StakeAmountNanos *uint256.Int +func NewSnapshotStakeMapKey(stakeEntry *StakeEntry, snapshotAtEpochNumber uint64) SnapshotStakeMapKey { + return SnapshotStakeMapKey{ + SnapshotAtEpochNumber: snapshotAtEpochNumber, + ValidatorPKID: *stakeEntry.ValidatorPKID, + StakerPKID: *stakeEntry.StakerPKID, + } } -func (s *SnapshotStakeEntry) Copy() *SnapshotStakeEntry { - return &SnapshotStakeEntry{ - SnapshotAtEpochNumber: s.SnapshotAtEpochNumber, - StakerPKID: s.StakerPKID.NewPKID(), - ValidatorPKID: s.ValidatorPKID.NewPKID(), - StakeAmountNanos: s.StakeAmountNanos.Clone(), +func (bav *UtxoView) _setSnapshotStakeToReward(stakeEntry *StakeEntry, snapshotAtEpochNumber uint64) { + if stakeEntry == nil { + glog.Errorf("_setSnapshotStakeToReward: called with nil stakeEntry") + return } + bav.SnapshotStakesToReward[NewSnapshotStakeMapKey(stakeEntry, snapshotAtEpochNumber)] = stakeEntry.Copy() } -func (s *SnapshotStakeEntry) ToMapKey() *SnapshotStakeMapKey { - return &SnapshotStakeMapKey{ - SnapshotAtEpochNumber: s.SnapshotAtEpochNumber, - ValidatorPKID: *s.ValidatorPKID, - StakerPKID: *s.StakerPKID, +// GetSnapshotStakesToReward returns all snapshotted StakeEntries that are eligible to receive staking +// rewards for the current snapshot epoch. The order of the returned entries is arbitrary. +func (bav *UtxoView) GetAllSnapshotStakesToReward() ([]*StakeEntry, error) { + snapshotGlobalParams, err := bav.GetSnapshotGlobalParamsEntry() + if err != nil { + return nil, errors.Wrapf(err, "GetAllSnapshotStakesToReward: problem calculating SnapshotEpochNumber: ") } -} -func (bav *UtxoView) _setSnapshotStakeToReward(snapshotStakeEntry *SnapshotStakeEntry) { - if snapshotStakeEntry == nil { - glog.Errorf("_setSnapshotStakeToReward: called with nil snapshotStakeEntry") - return + // If the max number of snapshot stakes is 0, then we don't need to do anything. + maxNumSnapshotStakes := snapshotGlobalParams.StakingRewardsMaxNumStakes + if maxNumSnapshotStakes == 0 { + return nil, nil } - bav.SnapshotStakesToReward[*snapshotStakeEntry.ToMapKey()] = snapshotStakeEntry.Copy() -} -// GetSnapshotStakesToRewardByStakeAmount returns the top N SnapshotStakeEntries that are eligible -// to receive block rewards for the current snapshot epoch. The entries are sorted by stake amount -// in descending order. -func (bav *UtxoView) GetSnapshotStakesToRewardByStakeAmount( - limit uint64, -) ([]*SnapshotStakeEntry, error) { // Calculate the SnapshotEpochNumber. snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotStakesToRewardByStakeAmount: problem calculating SnapshotEpochNumber: ") + return nil, errors.Wrapf(err, "GetAllSnapshotStakesToReward: problem calculating SnapshotEpochNumber: ") } - // Create a slice of all UtxoView SnapshotStakeEntries to prevent pulling them from the db. - var utxoViewSnapshotStakeEntries []*SnapshotStakeEntry + // Create a slice of all UtxoView snapshot StakeEntries to prevent pulling them from the db. + var utxoViewStakeEntries []*StakeEntry for mapKey, stakeEntry := range bav.SnapshotStakesToReward { if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber { - utxoViewSnapshotStakeEntries = append(utxoViewSnapshotStakeEntries, stakeEntry) + utxoViewStakeEntries = append(utxoViewStakeEntries, stakeEntry) } } - // Pull top N SnapshotStakeEntries from the database (not present in the UtxoView). - dbSnapshotStakeEntries, err := DBGetSnapshotStakesToRewardByStakeAmount( - bav.Handle, bav.Snapshot, limit, snapshotAtEpochNumber, utxoViewSnapshotStakeEntries, + // Pull top N snapshot StakeEntries from the database (not present in the UtxoView). + dbStakeEntries, err := DBGetSnapshotStakesToReward( + bav.Handle, bav.Snapshot, maxNumSnapshotStakes, snapshotAtEpochNumber, utxoViewStakeEntries, ) if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotStakesToRewardByStakeAmount: error retrieving entries from db: ") + return nil, errors.Wrapf(err, "GetAllSnapshotStakesToReward: error retrieving entries from db: ") } - // Cache the SnapshotStakeEntries from the db in the UtxoView. - for _, snapshotStakeEntry := range dbSnapshotStakeEntries { - mapKey := snapshotStakeEntry.ToMapKey() - if _, exists := bav.SnapshotStakesToReward[*mapKey]; exists { + // Cache the snapshot StakeEntries from the db in the UtxoView. + for _, stakeEntry := range dbStakeEntries { + mapKey := NewSnapshotStakeMapKey(stakeEntry, snapshotAtEpochNumber) + if _, exists := bav.SnapshotStakesToReward[mapKey]; exists { // We should never see duplicate entries from the db that are already in the UtxoView. This is a - // sign of a bug and that the utxoViewSnapshotStakeEntries isn't being used correctly. - return nil, fmt.Errorf("GetSnapshotStakesToRewardByStakeAmount: db returned a SnapshotStakeEntry" + + // sign of a bug and that the utxoViewStakeEntries isn't being used correctly. + return nil, fmt.Errorf("GetAllSnapshotStakesToReward: db returned a snapshot StakeEntry" + " that already exists in the UtxoView") } - bav._setSnapshotStakeToReward(snapshotStakeEntry) + bav._setSnapshotStakeToReward(stakeEntry, snapshotAtEpochNumber) } - // Pull SnapshotStakeEntries from the UtxoView with stake > 0. All entries should have > 0 stake to begin + // Pull snapshot StakeEntries from the UtxoView with stake > 0. All entries should have > 0 stake to begin // with, but we filter here again just in case. - var mergedSnapshotStakeEntries []*SnapshotStakeEntry - for mapKey, snapshotStakeEntry := range bav.SnapshotStakesToReward { - if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber && - !snapshotStakeEntry.StakeAmountNanos.IsZero() { - mergedSnapshotStakeEntries = append(mergedSnapshotStakeEntries, snapshotStakeEntry) + var mergedStakeEntries []*StakeEntry + for mapKey, stakeEntry := range bav.SnapshotStakesToReward { + if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber && !stakeEntry.StakeAmountNanos.IsZero() { + mergedStakeEntries = append(mergedStakeEntries, stakeEntry) } } - // Sort the SnapshotStakeEntries DESC by StakeAmountNanos. - sort.Slice(mergedSnapshotStakeEntries, func(ii, jj int) bool { - stakeAmountCmp := mergedSnapshotStakeEntries[ii].StakeAmountNanos.Cmp( - mergedSnapshotStakeEntries[jj].StakeAmountNanos, - ) - if stakeAmountCmp != 0 { - return stakeAmountCmp > 0 - } - - validatorPKIDCmp := bytes.Compare( - mergedSnapshotStakeEntries[ii].ValidatorPKID.ToBytes(), - mergedSnapshotStakeEntries[jj].ValidatorPKID.ToBytes(), - ) - if validatorPKIDCmp != 0 { - return validatorPKIDCmp > 0 - } - - return bytes.Compare( - mergedSnapshotStakeEntries[ii].StakerPKID.ToBytes(), - mergedSnapshotStakeEntries[jj].StakerPKID.ToBytes(), - ) > 0 - }) - // Return top N. - upperBound := limit - if uint64(len(mergedSnapshotStakeEntries)) < upperBound { - upperBound = uint64(len(mergedSnapshotStakeEntries)) + upperBound := maxNumSnapshotStakes + if uint64(len(mergedStakeEntries)) < upperBound { + upperBound = uint64(len(mergedStakeEntries)) } - return mergedSnapshotStakeEntries[0:upperBound], nil + return mergedStakeEntries[0:upperBound], nil } -func DBGetSnapshotStakesToRewardByStakeAmount( +func DBGetSnapshotStakesToReward( handle *badger.DB, snap *Snapshot, limit uint64, snapshotAtEpochNumber uint64, - snapshotStakeEntriesToSkip []*SnapshotStakeEntry, -) ([]*SnapshotStakeEntry, error) { - var snapshotStakeEntries []*SnapshotStakeEntry - - // Convert SnapshotStakeEntriesToSkip to the StakeMapKey we need to skip. - snapshotStakeKeysToSkip := NewSet([]string{}) - for _, snapshotStakeEntryToSkip := range snapshotStakeEntriesToSkip { - snapshotStakeKeysToSkip.Add( - string(DBKeyForSnapshotStakeToRewardByStakeAmount(snapshotStakeEntryToSkip)), + stakeEntriesToSkip []*StakeEntry, +) ([]*StakeEntry, error) { + // Convert StakeEntriesToSkip to the SnapshotStakeMapKeys we need to skip. + snapshotStakeDBKeysToSkip := NewSet([]string{}) + for _, stakeEntryToSkip := range stakeEntriesToSkip { + snapshotStakeDBKeysToSkip.Add( + string(DBKeyForSnapshotStakeToRewardByValidatorAndStaker( + snapshotAtEpochNumber, + stakeEntryToSkip.ValidatorPKID, + stakeEntryToSkip.StakerPKID, + )), ) } - // Retrieve top N SnapshotStakeEntry keys by stake amount. - key := DBKeyForSnapshotStakeToRewardAtEpochNumber(snapshotAtEpochNumber) - keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrder( - handle, key, int(limit), nil, true, snapshotStakeKeysToSkip, + // Retrieve the snapshot StakeEntries from the DB. + prefix := DBKeyForSnapshotStakeToRewardAtEpochNumber(snapshotAtEpochNumber) + _, valsFound, err := EnumerateKeysForPrefixWithLimitOffsetOrder( + handle, prefix, int(limit), nil, true, snapshotStakeDBKeysToSkip, ) if err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotStakesToRewardByStakeAmount:"+ + return nil, errors.Wrapf(err, "DBGetSnapshotStakesToReward:"+ " problem retrieving top stakes: ") } - // For each key found, parse the SnapshotStakeEntry from the key. - for _, keyFound := range keysFound { - snapshotStakeEntry, err := DecodeSnapshotStakeFromDBKey(keyFound) + // Decode StakeEntries from bytes. + var stakeEntries []*StakeEntry + for _, stakeEntryBytes := range valsFound { + rr := bytes.NewReader(stakeEntryBytes) + stakeEntry, err := DecodeDeSoEncoder(&StakeEntry{}, rr) if err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotStakesToRewardByStakeAmount:"+ - " problem reading SnapshotStakeEntry: ") + return nil, errors.Wrapf(err, "DBGetSnapshotStakesToReward: problem decoding StakeEntry: ") } - - snapshotStakeEntries = append(snapshotStakeEntries, snapshotStakeEntry) + stakeEntries = append(stakeEntries, stakeEntry) } - - return snapshotStakeEntries, nil + return stakeEntries, nil } func (bav *UtxoView) _flushSnapshotStakesToRewardToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - // Note: the typical DELETE -> PUT pattern we follow in other _flush functions to update re-indexed keys - // will not work here because the SnapshotStakeToRewardByStakeAmount index has all of the information about - // the snapshot stake entry in the key. We would need to know the DB stakerPKID, validatorPKID, and stakeAmount - // values for snapshotStakeEntry in order to delete it from the DB. If we want to follow the DELETE -> PUT, we'll - // need to seek through ALL keys in the index and delete them first. This is not ideal. - // - // Omitting the DELETE here is fine in practice because the snapshotStakeEntries that are written when committing - // the final block of an epoch will always be identical, and are never overwritten. - - for mapKey, snapshotStakeEntry := range bav.SnapshotStakesToReward { - if snapshotStakeEntry == nil { + // Delete all snapshot StakeEntries in the UtxoView map. + for mapKeyIter, stakeEntryIter := range bav.SnapshotStakesToReward { + // Make a copy of the iterators since we make references to them below. + mapKey := mapKeyIter + stakeEntry := *stakeEntryIter + + // Sanity-check that the entry matches the map key. + mapKeyFromEntry := NewSnapshotStakeMapKey(&stakeEntry, mapKey.SnapshotAtEpochNumber) + if mapKeyFromEntry != mapKey { return fmt.Errorf( - "_flushSnapshotStakesToRewardToDbWithTxn: found nil snapshotStakeEntry for"+ - " EpochNumber %d, this should never happen", - mapKey.SnapshotAtEpochNumber, + "_flushSnapshotStakesToRewardToDbWithTxn: snapshot StakeEntry key %v doesn't match MapKey %v", + &mapKeyFromEntry, + &mapKey, ) } - if err := DBPutSnapshotStakeToRewardWithTxn(txn, bav.Snapshot, snapshotStakeEntry, blockHeight); err != nil { + + // Delete the existing mappings in the db for this map key. They will be + // re-added if the corresponding entry in-memory has isDeleted=false. + if err := DBDeleteSnapshotStakeToRewardWithTxn( + txn, bav.Snapshot, stakeEntry.ValidatorPKID, stakeEntry.StakerPKID, mapKey.SnapshotAtEpochNumber, blockHeight, + ); err != nil { + return errors.Wrapf(err, "_flushSnapshotStakesToRewardToDbWithTxn: ") + } + } + + for mapKey, stakeEntry := range bav.SnapshotStakesToReward { + if stakeEntry.isDeleted { + // Skip any deleted StakeEntries. + continue + } + + if err := DBPutSnapshotStakeToRewardWithTxn( + txn, bav.Snapshot, stakeEntry, mapKey.SnapshotAtEpochNumber, blockHeight, + ); err != nil { return errors.Wrapf( err, - "_flushSnapshotStakesToRewardToDbWithTxn: problem setting snapshotStakeEntry"+ + "_flushSnapshotStakesToRewardToDbWithTxn: problem setting snapshot stakeEntry"+ " for SnapshotAtEpochNumber %d: ", mapKey.SnapshotAtEpochNumber, ) @@ -878,77 +858,65 @@ func (bav *UtxoView) _flushSnapshotStakesToRewardToDbWithTxn(txn *badger.Txn, bl return nil } +func DBDeleteSnapshotStakeToRewardWithTxn( + txn *badger.Txn, + snap *Snapshot, + validatorPKID *PKID, + stakerPKID *PKID, + snapshotAtEpochNumber uint64, + blockHeight uint64, +) error { + if validatorPKID == nil || stakerPKID == nil { + return nil + } + + // Delete the snapshot StakeEntry from PrefixSnapshotStakeToRewardByValidatorByStaker. + stakeByValidatorAndStakerKey := DBKeyForSnapshotStakeToRewardByValidatorAndStaker(snapshotAtEpochNumber, validatorPKID, stakerPKID) + if err := DBDeleteWithTxn(txn, snap, stakeByValidatorAndStakerKey); err != nil { + return errors.Wrapf( + err, "DBDeleteSnapshotStakeToRewardWithTxn: problem deleting snapshot StakeEntry from index PrefixSnapshotStakeToRewardByValidatorByStaker: ", + ) + } + + return nil +} + func DBPutSnapshotStakeToRewardWithTxn( txn *badger.Txn, snap *Snapshot, - snapshotStakeEntry *SnapshotStakeEntry, + stakeEntry *StakeEntry, + snapshotAtEpochNumber uint64, blockHeight uint64, ) error { - if snapshotStakeEntry == nil { + if stakeEntry == nil { // This should never happen but is a sanity check. - glog.Errorf("DBPutSnapshotStakeToRewardWithTxn: called with nil snapshotStakeEntry") + glog.Errorf("DBPutSnapshotStakeToRewardWithTxn: called with nil stakeEntry") return nil } - key := DBKeyForSnapshotStakeToRewardByStakeAmount(snapshotStakeEntry) - if err := DBSetWithTxn(txn, snap, key, nil); err != nil { + + dbKey := DBKeyForSnapshotStakeToRewardByValidatorAndStaker(snapshotAtEpochNumber, stakeEntry.ValidatorPKID, stakeEntry.StakerPKID) + if err := DBSetWithTxn(txn, snap, dbKey, EncodeToBytes(blockHeight, stakeEntry)); err != nil { return errors.Wrapf( err, - "DBPutSnapshotStakeToRewardWithTxn: problem putting snapshotStakeEntry in the"+ - " SnapshotLeaderSchedule index: ", + "DBPutSnapshotStakeToRewardWithTxn: problem putting snapshot stakeEntry in the SnapshotStakeToRewardByValidatorAndStaker index: ", ) } return nil } -func DBKeyForSnapshotStakeToRewardByStakeAmount(snapshotStakeEntry *SnapshotStakeEntry) []byte { - data := DBKeyForSnapshotStakeToRewardAtEpochNumber(snapshotStakeEntry.SnapshotAtEpochNumber) - data = append(data, FixedWidthEncodeUint256(snapshotStakeEntry.StakeAmountNanos)...) - data = append(data, snapshotStakeEntry.ValidatorPKID.ToBytes()...) - data = append(data, snapshotStakeEntry.StakerPKID.ToBytes()...) +func DBKeyForSnapshotStakeToRewardByValidatorAndStaker(snapshotAtEpochNumber uint64, validatorPKID *PKID, stakerPKID *PKID) []byte { + data := DBKeyForSnapshotStakeToRewardAtEpochNumber(snapshotAtEpochNumber) + data = append(data, validatorPKID.ToBytes()...) + data = append(data, stakerPKID.ToBytes()...) return data } func DBKeyForSnapshotStakeToRewardAtEpochNumber(snapshotAtEpochNumber uint64) []byte { - data := append([]byte{}, Prefixes.PrefixSnapshotStakeToRewardByStakeAmount...) + data := append([]byte{}, Prefixes.PrefixSnapshotStakeToRewardByValidatorAndStaker...) data = append(data, EncodeUint64(snapshotAtEpochNumber)...) return data } -func DecodeSnapshotStakeFromDBKey(stakeToRewardByStakeAmountDBKey []byte) (*SnapshotStakeEntry, error) { - var err error - rr := bytes.NewReader(stakeToRewardByStakeAmountDBKey) - - // Seek past the prefix. - if _, err := rr.Seek(int64(len(Prefixes.PrefixSnapshotStakeToRewardByStakeAmount)), 0); err != nil { - return nil, errors.Wrapf(err, "DecodeSnapshotStakeFromDBKey: Unable to skip past the prefix") - } - - decodedOutput := &SnapshotStakeEntry{} - - // The next 8 bytes are guaranteed to be the snapshotAtEpochNumber, since they are fixed-width. - snapshotAtEpochNumberBytes := make([]byte, 8) - if _, err := rr.Read(snapshotAtEpochNumberBytes); err != nil { - return nil, errors.Wrapf(err, "DecodeSnapshotStakeFromDBKey: Unable to read SnapshotAtEpochNumber") - } - decodedOutput.SnapshotAtEpochNumber = DecodeUint64(snapshotAtEpochNumberBytes) - - if decodedOutput.StakeAmountNanos, err = FixedWidthDecodeUint256(rr); err != nil { - return nil, errors.Wrapf(err, "DecodeSnapshotStakeFromDBKey: Unable to read StakeAmountNanos") - } - - decodedOutput.ValidatorPKID = &PKID{} - if err := decodedOutput.ValidatorPKID.FromBytes(rr); err != nil { - return nil, errors.Wrapf(err, "DecodeSnapshotStakeFromDBKey: unable to read ValidatorPKID") - } - - decodedOutput.StakerPKID = &PKID{} - if err := decodedOutput.StakerPKID.FromBytes(rr); err != nil { - return nil, errors.Wrapf(err, "DecodeSnapshotStakeFromDBKey: unable to read StakerPKID") - } - - return decodedOutput, nil -} - // // SnapshotLeaderScheduleValidator // From 0f3c3c7924ba4f9db3929fc658673cc9b117c863 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 27 Jul 2023 14:00:44 -0400 Subject: [PATCH 168/762] Use current global params outside of the PoS consensus (#624) * Use current global params outside of the PoS consensus * Beef up comments * Comment cleanup * Update comment for leader schedule generation --- lib/block_view_stake.go | 11 +++++------ lib/block_view_validator.go | 29 +++++++++++++++-------------- lib/pos_epoch_complete_hook.go | 5 ++++- lib/pos_leader_schedule.go | 5 +++-- lib/pos_snapshot_entries.go | 15 +++++++++++++++ 5 files changed, 42 insertions(+), 23 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 1a9b5a866..69607d52d 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -2085,15 +2085,14 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving CurrentEpochNumber: ") } - // Retrieve the SnapshotGlobalParamsEntry.StakeLockupEpochDuration. - snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() - if err != nil { - return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error retrieving SnapshotGlobalParamsEntry: ") - } + // Retrieve the StakeLockupEpochDuration from the current global params. It's safe to use the current global + // params here because the changes made to locked stake do not affect the PoS consensus until they are + // snapshotted. + currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() // Calculate UnlockableAtEpochNumber. unlockableAtEpochNumber, err := SafeUint64().Add( - metadata.EndEpochNumber, snapshotGlobalParamsEntry.StakeLockupEpochDuration, + metadata.EndEpochNumber, currentGlobalParamsEntry.StakeLockupEpochDuration, ) if err != nil { return errors.Wrapf(err, "UtxoView.IsValidUnlockStakeMetadata: error calculating UnlockableAtEpochNumber: ") diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 387910639..8ee6d768b 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1683,15 +1683,14 @@ func (bav *UtxoView) IsValidUnjailValidatorMetadata(transactorPublicKey []byte) return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving CurrentEpochNumber: ") } - // Retrieve the SnapshotGlobalParamsEntry.ValidatorJailEpochDuration. - snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() - if err != nil { - return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error retrieving SnapshotGlobalParamsEntry: ") - } + // Retrieve the ValidatorJailEpochDuration from the current global params. It's safe to use the current global + // params here because the changes made to locked stake do not affect the PoS consensus until they are + // snapshotted. + currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() // Calculate UnjailableAtEpochNumber. unjailableAtEpochNumber, err := SafeUint64().Add( - validatorEntry.JailedAtEpochNumber, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, + validatorEntry.JailedAtEpochNumber, currentGlobalParamsEntry.ValidatorJailEpochDuration, ) if err != nil { return errors.Wrapf(err, "UtxoView.IsValidUnjailValidatorMetadata: error calculating UnjailableAtEpochNumber: ") @@ -1865,6 +1864,9 @@ func (bav *UtxoView) JailAllInactiveSnapshotValidators(blockHeight uint64) error return nil } + // Fetch the ValidatorSetMaxNumValidators from the snapshot global params. We use the snapshot global + // params here because the value used to snapshot the size of the validator set was snapshotted along + // with the validator set. snapshotGlobalParams, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { return errors.Wrapf(err, "UtxoView.JailAllInactiveSnapshotValidators: error retrieving SnapshotGlobalParamsEntry: ") @@ -1919,13 +1921,12 @@ func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHe return false, nil } - // Retrieve the SnapshotGlobalParamsEntry: + // Retrieve the current GlobalParamsEntry. It's safe to use the current global params here because the + // jailing operations made here does not affect the PoS consensus until they are snapshotted and used in + // consensus n epochs later. The two params we care about here are: // - JailInactiveValidatorGracePeriodEpochs // - EpochDurationNumBlocks - snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() - if err != nil { - return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error retrieving SnapshotGlobalParamsEntry: ") - } + currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() // Calculate if enough blocks have passed since cutting over to PoS to start jailing validators. // We want to allow a buffer after we cut-over to PoS to allow validators enough time to vote. @@ -1934,8 +1935,8 @@ func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHe // // StartJailingBlockHeight = ConsensusCutoverBlockHeight + (JailInactiveValidatorGracePeriodEpochs * EpochDurationNumBlocks) startJailingGracePeriodBlocks, err := SafeUint64().Mul( - snapshotGlobalParamsEntry.JailInactiveValidatorGracePeriodEpochs, - snapshotGlobalParamsEntry.EpochDurationNumBlocks, + currentGlobalParamsEntry.JailInactiveValidatorGracePeriodEpochs, + currentGlobalParamsEntry.EpochDurationNumBlocks, ) if err != nil { return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error calculating StartJailingGracePeriod: ") @@ -1958,7 +1959,7 @@ func (bav *UtxoView) ShouldJailValidator(validatorEntry *ValidatorEntry, blockHe // Calculate the JailAtEpochNumber. jailAtEpochNumber, err := SafeUint64().Add( - validatorEntry.LastActiveAtEpochNumber, snapshotGlobalParamsEntry.JailInactiveValidatorGracePeriodEpochs, + validatorEntry.LastActiveAtEpochNumber, currentGlobalParamsEntry.JailInactiveValidatorGracePeriodEpochs, ) if err != nil { return false, errors.Wrapf(err, "UtxoView.ShouldJailValidator: error calculating JailAtEpochNumber: ") diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index d07e1e732..2f55362fe 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -139,7 +139,10 @@ func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) erro // Updates the currentEpochEntry to the next epoch's. func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, blockTimestampNanoSecs uint64) error { - // Retrieve the SnapshotGlobalParamsEntry. + // Retrieve the SnapshotGlobalParamsEntry to determine the next epoch's final block height. We use the + // snapshot global params here because the next epoch begin immediately, and its length is used in the PoS + // consensus. The validator set for the next epoch needs to be in agreement on the length of the epoch + // before the epoch begins. snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() if err != nil { return errors.Wrapf(err, "runEpochCompleteEpochRollover: problem retrieving SnapshotGlobalParamsEntry: ") diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 703f85941..30617d627 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -14,8 +14,9 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving CurrentRandomSeedHash: ") } - // Retrieve the SnapshotGlobalParamsEntry.LeaderScheduleMaxNumValidators. Note, we are safe to use the - // currentGlobalParamsEntry.LeaderScheduleMaxNumValidators here since we are creating a new snapshot. + // Retrieve the LeaderScheduleMaxNumValidators from the current GlobalParams. We are safe to use the current + // global params because this generates a new leader schedule from the current validator entries, in preparation + // to snapshot the leader schedule. currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() // Retrieve top, active validators ordered by stake. diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 13c526cb4..3a1016731 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -52,6 +52,21 @@ func (bav *UtxoView) GetCurrentGlobalParamsEntry() *GlobalParamsEntry { return _mergeGlobalParamEntryDefaults(bav, bav.GlobalParamsEntry) } +// GetSnapshotGlobalParamsEntry retrieves a snapshot of the GlobalParamsEntry from n epochs ago. If a snapshot +// does not exist for that epoch, it will return the default values. We snapshot GlobalParams to make sure that +// the validator set in the PoS consensus is in agreement ahead of time on the params used for an epoch long +// before that epoch begins. Snapshot GlobalParams are only appropriate to use in two scenarios: +// - In the PoS consensus logic run by validators for block proposal, voting, and timeouts; validators need to +// be in agreement on the size of the validator set, leader schedule, stakes to reward, and epoch duration. +// - When transitioning to a new epoch, we use the snapshot GlobalParams to determine the length of the next +// epoch. All validators need to be in agreement ahead of time on what length of the next epoch will be before +// the epoch begins. +// +// For all other uses, only the CurrentGlobalParamsEntry is appropriate to use. This includes all transaction connect +// logic and end of epoch operations that mutate the validator entries and stake entries BEFORE they are +// snapshotted. This approach ensures that whenever we create a snapshot of the validator set, leader schedule, +// and stakes to reward... the GlobalParams used to create the snapshots are snapshotted along with that data, and +// live alongside them. func (bav *UtxoView) GetSnapshotGlobalParamsEntry() (*GlobalParamsEntry, error) { // Calculate the SnapshotEpochNumber. snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() From 06880f6879a30ace0012169e495c697963b10a7d Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 31 Jul 2023 12:11:23 -0400 Subject: [PATCH 169/762] Implement Staking Rewards APY Computations (#615) * Implement Stake Rewards Interest Rate * Finish first pass implementation * Adapt restaking logic to use APY * Add unit tests * Fix typo * Comments * Address Nina's comments * Rebase * Formatting and clearer variable naming * Remove redundant filter from snapshot stake entries query * Address PR comments --- lib/deso_math.go | 6 +- lib/errors.go | 3 + lib/pos_epoch_complete_hook.go | 10 +- lib/pos_epoch_complete_hook_test.go | 317 ++++++++++++++++++++++++++-- lib/pos_snapshot_entries.go | 24 ++- lib/pos_staking_rewards.go | 279 +++++++++++++++++++++++- 6 files changed, 602 insertions(+), 37 deletions(-) diff --git a/lib/deso_math.go b/lib/deso_math.go index b7c164bf7..994ebfe0b 100644 --- a/lib/deso_math.go +++ b/lib/deso_math.go @@ -2,10 +2,11 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec" - "github.com/holiman/uint256" "math" "math/big" + + "github.com/btcsuite/btcd/btcec" + "github.com/holiman/uint256" ) // This library implements basic float functions using big.Float objects. @@ -99,6 +100,7 @@ var ( bigOne = NewFloat().SetUint64(1) bigTwo = NewFloat().SetUint64(2) bigSqrt2Over2 = NewFloat().Quo(bigSqrt2, bigTwo) + bigE = NewFloat().SetFloat64(2.71828182845904523536028747135266249775724709369995957496696763) // Constants for BigFloatExpMulti bigP1 = NewFloat().SetFloat64(1.66666666666666657415e-01) /* 0x3FC55555; 0x55555555 */ diff --git a/lib/errors.go b/lib/errors.go index 5a9a2a773..73a61aae9 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -482,6 +482,9 @@ const ( RuleErrorNonceExpired RuleError = "RuleErrorNonceExpired" RuleErrorBalanceChangeGreaterThanZero RuleError = "RuleErrorBalanceChangeGreaterThanZero" + // EpochCompleteHook + RuleErrorBlockTimestampBeforeEpochStartTimestamp RuleError = "RuleErrorBlockTimestampBeforeEpochStartTimestamp" + HeaderErrorDuplicateHeader RuleError = "HeaderErrorDuplicateHeader" HeaderErrorNilPrevHash RuleError = "HeaderErrorNilPrevHash" HeaderErrorInvalidParent RuleError = "HeaderErrorInvalidParent" diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 2f55362fe..d95efa3ad 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -72,7 +72,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, blockTimestampNano } // Step 1: Run All State Mutating Operations - if err := bav.runEpochCompleteStateMutations(blockHeight); err != nil { + if err := bav.runEpochCompleteStateTransition(blockHeight, blockTimestampNanoSecs); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: ") } @@ -92,22 +92,22 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, blockTimestampNano } // Runs all state-mutating operations required when completing an epoch. -func (bav *UtxoView) runEpochCompleteStateMutations(blockHeight uint64) error { +func (bav *UtxoView) runEpochCompleteStateTransition(blockHeight uint64, blockTimestampNanoSecs uint64) error { // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation // that loops through all active unjailed validators from current epoch's snapshot validator set // and jails them if they have been inactive. // // Note, this this will only run if we are past the ProofOfStake2ConsensusCutoverBlockHeight fork height. if err := bav.JailAllInactiveSnapshotValidators(blockHeight); err != nil { - return errors.Wrapf(err, "runEpochCompleteStateMutations: problem jailing all inactive validators: ") + return errors.Wrapf(err, "runEpochCompleteStateTransition: problem jailing all inactive validators: ") } // Reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation // that loops through all of the snapshotted stakes and rewards them. // // Note, this this will only run if we are past the ProofOfStake2ConsensusCutoverBlockHeight fork height. - if err := bav.DistributeStakingRewardsToSnapshotStakes(blockHeight); err != nil { - return errors.Wrapf(err, "runEpochCompleteStateMutations: problem rewarding snapshot stakes: ") + if err := bav.DistributeStakingRewardsToSnapshotStakes(blockHeight, blockTimestampNanoSecs); err != nil { + return errors.Wrapf(err, "runEpochCompleteStateTransition: problem rewarding snapshot stakes: ") } return nil diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 8d301d5c7..993cbd262 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -54,7 +54,6 @@ func TestIsLastBlockInCurrentEpoch(t *testing.T) { } func TestRunEpochCompleteHook(t *testing.T) { - // Initialize test chain, miner, and testMeta testMeta := _setUpMinerAndTestMetaForEpochCompleteTest(t) @@ -91,9 +90,6 @@ func TestRunEpochCompleteHook(t *testing.T) { // For these tests, we set each epoch duration to only one block. testMeta.params.DefaultEpochDurationNumBlocks = uint64(1) - // We set the default staking rewards APY to 10% - testMeta.params.DefaultStakingRewardsAPYBasisPoints = uint64(1000) - { // ParamUpdater set MinFeeRateNanos, ValidatorJailEpochDuration, // and JailInactiveValidatorGracePeriodEpochs. @@ -165,13 +161,13 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // All validators register + stake to themselves. - _registerValidatorAndStake(testMeta, m0Pub, m0Priv, 100, false) - _registerValidatorAndStake(testMeta, m1Pub, m1Priv, 200, false) - _registerValidatorAndStake(testMeta, m2Pub, m2Priv, 300, false) - _registerValidatorAndStake(testMeta, m3Pub, m3Priv, 400, false) - _registerValidatorAndStake(testMeta, m4Pub, m4Priv, 500, false) - _registerValidatorAndStake(testMeta, m5Pub, m5Priv, 600, false) - _registerValidatorAndStake(testMeta, m6Pub, m6Priv, 700, false) + _registerValidatorAndStake(testMeta, m0Pub, m0Priv, 0, 100, false) + _registerValidatorAndStake(testMeta, m1Pub, m1Priv, 0, 200, false) + _registerValidatorAndStake(testMeta, m2Pub, m2Priv, 0, 300, false) + _registerValidatorAndStake(testMeta, m3Pub, m3Priv, 0, 400, false) + _registerValidatorAndStake(testMeta, m4Pub, m4Priv, 0, 500, false) + _registerValidatorAndStake(testMeta, m5Pub, m5Priv, 0, 600, false) + _registerValidatorAndStake(testMeta, m6Pub, m6Priv, 0, 700, false) validatorEntries, err := _newUtxoView(testMeta).GetTopActiveValidatorsByStakeAmount(10) require.NoError(t, err) @@ -267,7 +263,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(600)) // m5 stakes another 200. - _registerValidatorAndStake(testMeta, m5Pub, m5Priv, 200, false) + _registerValidatorAndStake(testMeta, m5Pub, m5Priv, 0, 200, false) // m5 has 800 staked. validatorEntry, err = _newUtxoView(testMeta).GetValidatorByPKID(m5PKID) @@ -479,6 +475,268 @@ func TestRunEpochCompleteHook(t *testing.T) { } } +func TestStakingRewardDistribution(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForEpochCompleteTest(t) + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3) + + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m0PkBytes).PKID + m1PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m1PkBytes).PKID + m2PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m2PkBytes).PKID + m3PKID := DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m3PkBytes).PKID + + blockHeight := uint64(testMeta.chain.blockTip().Height) + 1 + incrBlockHeight := func() uint64 { + blockHeight += 1 + return blockHeight + } + + // Seed a CurrentEpochEntry. + tmpUtxoView := _newUtxoView(testMeta) + tmpUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 2, FinalBlockHeight: blockHeight + 1}) + require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + + // For these tests, we set each epoch duration to only one block. + testMeta.params.DefaultEpochDurationNumBlocks = uint64(1) + + // We set the default staking rewards APY to 10% + testMeta.params.DefaultStakingRewardsAPYBasisPoints = uint64(1000) + + // Two validators register + stake to themselves. + _registerValidatorAndStake(testMeta, m0Pub, m0Priv, 2000, 400, true) // 20% commission rate, 400 nano stake + _registerValidatorAndStake(testMeta, m1Pub, m1Priv, 2000, 200, false) // 20% commission rate, 200 nano stake + + // Two stakers delegate their stake to the validators. + _stakeToValidator(testMeta, m2Pub, m2Priv, m0Pub, 100, true) // 100 nano stake + _stakeToValidator(testMeta, m3Pub, m3Priv, m1Pub, 50, false) // 50 nano stake + + { + // Verify the validators and their total stakes. + validatorEntries, err := _newUtxoView(testMeta).GetTopActiveValidatorsByStakeAmount(10) + require.NoError(t, err) + require.Len(t, validatorEntries, 2) + + // Validator m0 has 500 nanos staked in total: 400 staked by itself and 100 delegated by m2. + require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(500)) + + // Validator m1 has 250 nanos staked in total: 200 staked by itself and 50 delegated by m3. + require.Equal(t, validatorEntries[1].ValidatorPKID, m1PKID) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(250)) + } + + { + // Verify the stakers' stakes. + stakeEntries, err := _newUtxoView(testMeta).GetTopStakesByStakeAmount(10) + require.NoError(t, err) + require.Len(t, stakeEntries, 4) + + require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, stakeEntries[1].StakerPKID, m1PKID) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, stakeEntries[2].StakerPKID, m2PKID) + require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, stakeEntries[3].StakerPKID, m3PKID) + require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + } + + { + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + } + + { + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + } + + { + // Test that the stakes are unchanged. + stakeEntries, err := _newUtxoView(testMeta).GetTopStakesByStakeAmount(10) + require.NoError(t, err) + require.Len(t, stakeEntries, 4) + require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, stakeEntries[1].StakerPKID, m1PKID) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, stakeEntries[2].StakerPKID, m2PKID) + require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, stakeEntries[3].StakerPKID, m3PKID) + require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + + // Test that DESO wallet balances are unchanged. + m0Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m0PkBytes) + require.NoError(t, err) + require.Equal(t, m0Balance, uint64(546)) + m1Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + require.Equal(t, m1Balance, uint64(746)) + m2Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m2PkBytes) + require.NoError(t, err) + require.Equal(t, m2Balance, uint64(882)) + m3Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m3PkBytes) + require.NoError(t, err) + require.Equal(t, m3Balance, uint64(932)) + + // Test that snapshot stakes have been created. + snapshotStakeEntries, err := _newUtxoView(testMeta).GetAllSnapshotStakesToReward() + require.NoError(t, err) + _sortStakeEntriesByStakeAmount(snapshotStakeEntries) + require.Len(t, snapshotStakeEntries, 4) + require.Equal(t, snapshotStakeEntries[0].StakerPKID, m0PKID) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, snapshotStakeEntries[1].StakerPKID, m1PKID) + require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, snapshotStakeEntries[2].StakerPKID, m2PKID) + require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, snapshotStakeEntries[3].StakerPKID, m3PKID) + require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + } + + { + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + } + + { + // This is the first epoch where staking rewards have been distributed. The nominal interest + // rate for staking rewards is 10% APY. Exactly 1 year's worth of time has passed since the + // previous epoch. + + // Test that the number of stakes is unchanged. + stakeEntries, err := _newUtxoView(testMeta).GetTopStakesByStakeAmount(10) + require.NoError(t, err) + require.Len(t, stakeEntries, 4) + + // Test reward computation and restaking for m0: + // - m0's original stake was 400 nanos + // - m0 had 100 nanos delegated to it + // - m0's commission rate is 20% + // - all rewards for m0 will be restaked + // + // Reward Computations: + // - m0's reward from its own stake is: 400 * [e^0.1 - 1] = 42 nanos + // - m0's reward from delegated stake is: 100 * [e^0.1 - 1] * 0.2 = 2 nanos + // + // Final stake amount: + // - m0's final stake is: 400 + 42 + 2 = 444 nanos + require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(444)) + + // Test that m0's DESO wallet balance is unchanged. + m0Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m0PkBytes) + require.NoError(t, err) + require.Equal(t, m0Balance, uint64(546)) + + // Test reward computation for m1: + // - m1's original stake was 200 nanos + // - m1 had 50 nanos delegated to it + // - m1's original DESO wallet balance was 746 nanos + // - m1's commission rate is 10% + // - all rewards for m1 will be paid out to its DESO wallet + // + // Reward Computations: + // - m1's reward from its own stake is: 200 * [e^0.1 - 1] = 21 nanos + // - m1's reward from delegated stake is: 50 * [e^0.1 - 1] * 0.2 = 1 nano + // + // Final DESO wallet balance: + // - m1's final DESO wallet balance is: 746 + 21 + 1 = 768 nanos + m1Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + require.Equal(t, m1Balance, uint64(768)) + + // Test that m1's stake is unchanged. + require.Equal(t, stakeEntries[1].StakerPKID, m1PKID) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + + // Test reward computation and restaking for m2: + // - m2's original stake was 100 nanos + // - m2's validator m0 has a commission rate of 20% + // - m2's rewards will be restaked + // + // Reward Computations: + // - m2's total reward for its stake is: 100 * [e^0.1 - 1] = 10 nanos + // - m2's reward lost to m0's commission is: 10 nanos * 0.2 = 2 nanos + // + // Final stake amount: + // - m2's final stake is: 100 + 10 - 2 = 108 nanos + require.Equal(t, stakeEntries[2].StakerPKID, m2PKID) + require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(108)) + + // Test that m2's DESO wallet balance is unchanged. + m2Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m2PkBytes) + require.NoError(t, err) + require.Equal(t, m2Balance, uint64(882)) + + // Test reward computation for m3: + // - m2's original stake was 50 nanos + // - m2's validator m1 has a commission rate of 20% + // - m2's original DESO wallet balance was 932 nanos + // - m2's rewards will be paid out to its DESO wallet + // + // Reward Computations: + // - m2's total reward for its stake is 50 * [e^0.1 - 1] = 5 nanos + // - m2's reward lost to m1's commission is: 5 nanos * 0.2 = 1 nano + // + // Final DESO wallet balance: + // - m2's final DESO wallet balance is: 932 + 5 - 1 = 936 nanos + m3Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m3PkBytes) + require.NoError(t, err) + require.Equal(t, m3Balance, uint64(936)) + + // Test that m3's stake is unchanged. + require.Equal(t, stakeEntries[3].StakerPKID, m3PKID) + require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + } + + { + // Test that snapshot stakes have not changed. + snapshotStakeEntries, err := _newUtxoView(testMeta).GetAllSnapshotStakesToReward() + require.NoError(t, err) + _sortStakeEntriesByStakeAmount(snapshotStakeEntries) + require.Len(t, snapshotStakeEntries, 4) + require.Equal(t, snapshotStakeEntries[0].StakerPKID, m0PKID) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, snapshotStakeEntries[1].StakerPKID, m1PKID) + require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, snapshotStakeEntries[2].StakerPKID, m2PKID) + require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, snapshotStakeEntries[3].StakerPKID, m3PKID) + require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + } + + { + // Run OnEpochCompleteHook(). + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + } + + { + // Test that the current epoch's snapshot stakes now reflect the rewards that were + // restaked at the end of epoch n-2. + + snapshotStakeEntries, err := _newUtxoView(testMeta).GetAllSnapshotStakesToReward() + require.NoError(t, err) + _sortStakeEntriesByStakeAmount(snapshotStakeEntries) + require.Len(t, snapshotStakeEntries, 4) + require.Equal(t, snapshotStakeEntries[0].StakerPKID, m0PKID) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(444)) + require.Equal(t, snapshotStakeEntries[1].StakerPKID, m1PKID) + require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, snapshotStakeEntries[2].StakerPKID, m2PKID) + require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(108)) + require.Equal(t, snapshotStakeEntries[3].StakerPKID, m3PKID) + require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + } + +} + func _setUpMinerAndTestMetaForEpochCompleteTest(t *testing.T) *TestMeta { // Initialize balance model fork heights. setBalanceModelBlockHeights(t) @@ -514,7 +772,14 @@ func _setUpMinerAndTestMetaForEpochCompleteTest(t *testing.T) *TestMeta { } } -func _registerValidatorAndStake(testMeta *TestMeta, publicKey string, privateKey string, stakeAmountNanos uint64, restakeRewards bool) { +func _registerValidatorAndStake( + testMeta *TestMeta, + publicKey string, + privateKey string, + commissionBasisPoints uint64, + stakeAmountNanos uint64, + restakeRewards bool, +) { // Convert PublicKeyBase58Check to PublicKeyBytes. pkBytes, _, err := Base58CheckDecode(publicKey) require.NoError(testMeta.t, err) @@ -522,24 +787,40 @@ func _registerValidatorAndStake(testMeta *TestMeta, publicKey string, privateKey // Validator registers. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(testMeta.t, pkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, - VotingPublicKey: votingPublicKey, - VotingAuthorization: votingAuthorization, + Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, + VotingPublicKey: votingPublicKey, + DelegatedStakeCommissionBasisPoints: commissionBasisPoints, + VotingAuthorization: votingAuthorization, } _, err = _submitRegisterAsValidatorTxn(testMeta, publicKey, privateKey, registerMetadata, nil, true) require.NoError(testMeta.t, err) + _stakeToValidator(testMeta, publicKey, privateKey, publicKey, stakeAmountNanos, restakeRewards) +} + +func _stakeToValidator( + testMeta *TestMeta, + stakerPubKey string, + stakerPrivKey string, + validatorPubKey string, + stakeAmountNanos uint64, + restakeRewards bool, +) { + // Convert ValidatorPublicKeyBase58Check to ValidatorPublicKeyBytes. + validatorPkBytes, _, err := Base58CheckDecode(validatorPubKey) + require.NoError(testMeta.t, err) + rewardMethod := StakingRewardMethodPayToBalance if restakeRewards { rewardMethod = StakingRewardMethodRestake } stakeMetadata := &StakeMetadata{ - ValidatorPublicKey: NewPublicKey(pkBytes), + ValidatorPublicKey: NewPublicKey(validatorPkBytes), RewardMethod: rewardMethod, StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), } - _, err = _submitStakeTxn(testMeta, publicKey, privateKey, stakeMetadata, nil, true) + _, err = _submitStakeTxn(testMeta, stakerPubKey, stakerPrivKey, stakeMetadata, nil, true) require.NoError(testMeta.t, err) } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 3a1016731..de18d64e3 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -719,7 +719,7 @@ func (bav *UtxoView) _setSnapshotStakeToReward(stakeEntry *StakeEntry, snapshotA bav.SnapshotStakesToReward[NewSnapshotStakeMapKey(stakeEntry, snapshotAtEpochNumber)] = stakeEntry.Copy() } -// GetSnapshotStakesToReward returns all snapshotted StakeEntries that are eligible to receive staking +// GetAllSnapshotStakesToReward returns all snapshotted StakeEntries that are eligible to receive staking // rewards for the current snapshot epoch. The order of the returned entries is arbitrary. func (bav *UtxoView) GetAllSnapshotStakesToReward() ([]*StakeEntry, error) { snapshotGlobalParams, err := bav.GetSnapshotGlobalParamsEntry() @@ -768,21 +768,23 @@ func (bav *UtxoView) GetAllSnapshotStakesToReward() ([]*StakeEntry, error) { bav._setSnapshotStakeToReward(stakeEntry, snapshotAtEpochNumber) } - // Pull snapshot StakeEntries from the UtxoView with stake > 0. All entries should have > 0 stake to begin - // with, but we filter here again just in case. + // Pull all non-deleted snapshot StakeEntries from the UtxoView with stake > 0. var mergedStakeEntries []*StakeEntry for mapKey, stakeEntry := range bav.SnapshotStakesToReward { - if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber && !stakeEntry.StakeAmountNanos.IsZero() { - mergedStakeEntries = append(mergedStakeEntries, stakeEntry) + if stakeEntry.isDeleted { + // Skip any deleted StakeEntries. + continue } - } - // Return top N. - upperBound := maxNumSnapshotStakes - if uint64(len(mergedStakeEntries)) < upperBound { - upperBound = uint64(len(mergedStakeEntries)) + // All entries should have > 0 stake to begin with, but we filter here again just in case. + if mapKey.SnapshotAtEpochNumber != snapshotAtEpochNumber || stakeEntry.StakeAmountNanos.IsZero() { + continue + } + + mergedStakeEntries = append(mergedStakeEntries, stakeEntry) } - return mergedStakeEntries[0:upperBound], nil + + return mergedStakeEntries, nil } func DBGetSnapshotStakesToReward( diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index 633613c22..915d51078 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -1,5 +1,282 @@ package lib -func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64) error { +import ( + "math/big" + + "github.com/holiman/uint256" + "github.com/pkg/errors" +) + +func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64, blockTimestampNanoSecs uint64) error { + // Check if we have switched from PoW to PoS yet. If we have not, then the PoS consensus + // has not started yet. We don't want to distribute any staking rewards until the PoS consensus begins. + if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + return nil + } + + // Retrieve the current EpochEntry. + currentEpochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving current EpochEntry: ") + } + + // Check if the current epoch's timestamp is somehow greater than the block timestamp. This should never happen as long + // as timestamps are moving forward when connecting each block. + if currentEpochEntry.CreatedAtBlockTimestampNanoSecs >= blockTimestampNanoSecs { + return errors.Wrapf(RuleErrorBlockTimestampBeforeEpochStartTimestamp, "DistributeStakingRewardsToSnapshotStakes: ") + } + + // Compute the amount of time that has elapsed since the current epoch started. As long as the elapsed time is > 0, + // the fraction of the year will be > 0 as well. + elapsedTimeNanoSecs := blockTimestampNanoSecs - currentEpochEntry.CreatedAtBlockTimestampNanoSecs + elapsedFractionOfYear := computeFractionOfYearAsFloat(elapsedTimeNanoSecs) + + // Fetch the staking rewards APY. It is safe to use the APY from the current global params because the staking + // distribution made here do not affect the PoS consensus until they are snapshotted. + apyBasisPoints := bav.GetCurrentGlobalParamsEntry().StakingRewardsAPYBasisPoints + if apyBasisPoints == 0 { + // If the APY is zero or not yet defined, then there are no staking rewards to distribute. + return nil + } + + // Convert the APY from a scaled integer to a float. During the conversion, the interest rate + // is scaled down. Examples: + // - a APY basis points value of 526 is converted to a float of 0.0526 + // As long as the scaled interest rate is > 0, the converted float is guaranteed to be non-zero as well. + apy := convertAPYBasisPointsToFloat(apyBasisPoints) + + // We reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation + // that loops through all of the snapshotted stakes and rewards them one by one. + snapshotStakesToReward, err := bav.GetAllSnapshotStakesToReward() + if err != nil { + return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving snapshot stakes to reward: ") + } + + // If there are no stakes to reward, then there's nothing to be done. Exit early here. + if len(snapshotStakesToReward) == 0 { + return nil + } + + // Loop through all of the snapshot stakes; distribute staking rewards to the staker and commissions to + // their validator. + for _, snapshotStakeEntry := range snapshotStakesToReward { + if snapshotStakeEntry == nil { + // This should never happen. If we encounter a nil entry, then the setter for UtxoView.SnapshotStakesToReward + // is unexpectedly setting nil values. We just skip such values here. + continue + } + + // Compute the staker's portion of the staking reward, and the validator's commission. + stakerRewardNanos, validatorCommissionNanos, err := bav.computeStakerRewardAndValidatorCommission( + snapshotStakeEntry, elapsedFractionOfYear, apy, + ) + if err != nil { + return errors.Wrapf( + err, + "DistributeStakingRewardsToSnapshotStakes: problem computing staker reward and validator commission: ", + ) + } + + // If both the staker reward and the validator commission are zero, then there's nothing to be done. + // Move on to the next staker. + if stakerRewardNanos == 0 && validatorCommissionNanos == 0 { + continue + } + + // Reward the staker their portion of the staking reward. + if stakerRewardNanos > 0 { + if err = bav.distributeStakingReward(snapshotStakeEntry.ValidatorPKID, snapshotStakeEntry.StakerPKID, stakerRewardNanos); err != nil { + return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem distributing staker reward: ") + } + } + + // Reward the validator their commission from the staking reward. + if validatorCommissionNanos > 0 { + if err = bav.distributeValidatorCommission(snapshotStakeEntry.ValidatorPKID, validatorCommissionNanos); err != nil { + return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem distributing validator commission reward: ") + } + } + } + return nil } + +func (bav *UtxoView) computeStakerRewardAndValidatorCommission( + snapshotStakeEntry *StakeEntry, + elapsedFractionOfYear *big.Float, + apy *big.Float, +) ( + _stakerRewardNanos uint64, + _validatorCommissionNanos uint64, + _err error, +) { + // Compute the staker's reward amount using a big float math, and immediately convert it to big int + // so we can do the remainder of the math using integer operations. This is the only operation where + // we need float math. + stakerRewardNanos := convertBigFloatToBigInt( + computeStakingReward(snapshotStakeEntry.StakeAmountNanos, elapsedFractionOfYear, apy), + ) + + // If the reward is 0, then there's nothing to be done. In practice, the reward should never be < 0 + // either, but we check for it here in case it resulted from a rounding error. Either way, we're + // safe to exit early here. + if stakerRewardNanos == nil || stakerRewardNanos.Sign() <= 0 { + return 0, 0, nil + } + + // At this point, we know that the staker has non-zero rewards. We need to determine how to + // distribute the rewards to them, and how to distribute the validator's commissions. + + // Compute the validator's commission and deduct it from the staker's reward. + validatorCommissionNanos := big.NewInt(0) + + // We only compute validator commission if the staker had delegated stake to another validator. If the staker + // staked to themselves, then there's no reason to compute the validator commission. + if !snapshotStakeEntry.StakerPKID.Eq(snapshotStakeEntry.ValidatorPKID) { + // Fetch the ValidatorEntry that the stake is delegated to. The validator is guaranteed to be in the + // snapshot validator set, because only stakes from the snapshot validator set are eligible to receive + // rewards. + validatorEntry, err := bav.GetSnapshotValidatorSetEntryByPKID(snapshotStakeEntry.ValidatorPKID) + if err != nil { + return 0, 0, errors.Wrapf(err, "computeStakerRewardAndValidatorCommission: problem fetching validator entry: ") + } + if validatorEntry == nil { + // This should never happen. If we can't find the validator, then something is wrong. It's safest to error + // and return early here. + return 0, 0, errors.Errorf("computeStakerRewardAndValidatorCommission: validator entry should never be nil") + } + + if validatorEntry.DelegatedStakeCommissionBasisPoints > 0 { + // We use integer math to compute the validator's commission. The commission is computed as: + // floor(stakerReward * validatorCommissionBasisPoints / 10000) + validatorCommissionNanos = computeValidatorCommission( + stakerRewardNanos, validatorEntry.DelegatedStakeCommissionBasisPoints, + ) + + if validatorCommissionNanos.Cmp(stakerRewardNanos) > 0 { + // This should never happen. If the validator's commission is greater than the total staker reward amount, + // then something has gone wrong. + return 0, 0, errors.Errorf( + "computeStakerRewardAndValidatorCommission: validator commission is greater than staker reward amount", + ) + } + + // Deduct the validator commission from the staker's reward. + stakerRewardNanos.Sub(stakerRewardNanos, validatorCommissionNanos) + } + } + + // At this point, we have the staker's reward and the validator's commission. We need to convert them + // to uint64s and return them. + if !stakerRewardNanos.IsUint64() || !validatorCommissionNanos.IsUint64() { + return 0, 0, errors.Errorf( + "computeStakerRewardAndValidatorCommission: staker reward or validator commission is not a uint64", + ) + } + + return stakerRewardNanos.Uint64(), validatorCommissionNanos.Uint64(), nil +} + +func (bav *UtxoView) distributeStakingReward(validatorPKID *PKID, stakerPKID *PKID, rewardNanos uint64) error { + // Fetch the staker's latest StakeEntry. + stakeEntry, err := bav.GetStakeEntry(validatorPKID, stakerPKID) + if err != nil { + return errors.Wrapf(err, "distributeStakingReward: problem fetching staker's StakeEntry: ") + } + + // At this point, there are three possible cases: + // 1. The stake entry still exists and wants to restake their rewards. + // 2. The stake entry still exists and does not want to restake their rewards. + // 3. The stake entry has unstaked since the snapshot was taken. + + // For case 1, we distribute the rewards by adding them to the staker's staked amount. + if stakeEntry != nil && stakeEntry.RewardMethod == StakingRewardMethodRestake { + stakeEntry.StakeAmountNanos.Add(stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(rewardNanos)) + bav._setStakeEntryMappings(stakeEntry) + + return nil + } + + // For cases 2 and 3, the staker does not want their rewards restaked. The staker is still + // eligible to receive rewards because their stake was used to secure the network. So we pay out + // the rewards directly to the staker's wallet. + + stakerPublicKey := bav.GetPublicKeyForPKID(stakerPKID) + if _, err = bav._addBalance(rewardNanos, stakerPublicKey); err != nil { + return errors.Wrapf(err, "distributeStakingReward: problem adding rewards to staker's DESO balance: ") + } + + return nil +} + +func (bav *UtxoView) distributeValidatorCommission(validatorPKID *PKID, commissionNanos uint64) error { + // Here, we treat the validator's commission identically to staking rewards. We view commissions as another source of staking + // rewards that validators receive at the end of each epoch. And these commissions are eligible to be restaked if the validator + // desires. To determine whether to re-stake commissions or pay out the commissions to the validator's wallet, we rely on the + // validators own StakeEntry where they have staked to themselves, and the RewardMethod flag on the entry. The logic works as follows: + // - If the validator has staked to themselves, and they have reward restaking enabled, then their commissions are restaked. + // - If the validator has not staked to themselves, or they have reward restaking disabled, then their commissions are paid out + // to their wallet. + // + // This approach has a few advantages: + // 1. It gives validators an easy opt-in feature to restake their commissions. This is useful for validators that want to maximize + // their staking rewards over the long run. Validators can opt out of it by disabling reward restaking on their own StakeEntry. + // 2. It simplifies the validator commission distribution code by re-using the same code path for distributing staking + // rewards. By requiring the validator to already have a StakeEntry for themselves if they want to restake their commissions, + // this approach allows us to avoid manually creating new StakeEntries for the validator specifically for restaking commissions. + // + // TODO: The downside of the above is that it couples the restaking behavior for validator commissions and the validator's own + // staking reward. This is fine though, because if the validator wants to restake their own rewards but not their commissions, then + // they can stake to themselves using a separate wallet and only enable reward restaking for that StakeEntry. + // + // If the above isn't desired the behavior, then we can alternatively always pay out validator's commission directly to their wallet. + return bav.distributeStakingReward(validatorPKID, validatorPKID, commissionNanos) +} + +const ( + _basisPoints = uint64(10000) // 1e4 + _nanoSecsPerYear = uint64(365) * 24 * 60 * 60 * 1e9 // 365 days * 24 hours * 60 minutes * 60 seconds * 1e9 nanoseconds +) + +var ( + _basisPointsAsInt = big.NewInt(int64(_basisPoints)) + _basisPointsAsFloat = NewFloat().SetUint64(_basisPoints) + _nanoSecsPerYearAsFloat = NewFloat().SetUint64(_nanoSecsPerYear) +) + +func convertBigFloatToBigInt(float *big.Float) *big.Int { + floatAsInt, _ := float.Int(nil) + return floatAsInt +} + +func convertAPYBasisPointsToFloat(apyBasisPoints uint64) *big.Float { + apyBasisPointsAsFloat := NewFloat().SetUint64(apyBasisPoints) + return apyBasisPointsAsFloat.Quo(apyBasisPointsAsFloat, _basisPointsAsFloat) +} + +func computeFractionOfYearAsFloat(nanoSecs uint64) *big.Float { + nanoSecsAsFloat := NewFloat().SetUint64(nanoSecs) + return nanoSecsAsFloat.Quo(nanoSecsAsFloat, _nanoSecsPerYearAsFloat) +} + +// computeStakingReward uses float math to compute the compound interest on the stake amounts based on the +// elapsed time since the last staking reward distribution and the APY. +// +// It produces the result for: stakeAmount * [e ^ (apy * elapsedTime / 1 year) - 1] +func computeStakingReward(stakeAmountNanos *uint256.Int, elapsedFractionOfYear *big.Float, apy *big.Float) *big.Float { + stakeAmountFloat := NewFloat().SetInt(stakeAmountNanos.ToBig()) + growthExponent := NewFloat().Mul(elapsedFractionOfYear, apy) // apy * elapsedTime / 1 year + growthMultiplier := BigFloatPow(bigE, growthExponent) // e ^ (apy * elapsedTime / 1 year) + finalStakeAmountNanos := NewFloat().Mul(stakeAmountFloat, growthMultiplier) // stakeAmount * [e ^ (apy * elapsedTime / 1 year)] + return finalStakeAmountNanos.Sub(finalStakeAmountNanos, stakeAmountFloat) // stakeAmount * [e ^ (apy * elapsedTime / 1 year) - 1] +} + +// computeValidatorCommission uses integer math to compute the validator's commission amount based on the staker's +// reward amount and the validator's commission rate. Wherever possible, we rely on integer math so that rounding +// errors are simpler to reason through. +// +// It produces the integer result for: floor[(stakerReward * validatorCommissionBasisPoints) / 1e4] +func computeValidatorCommission(stakerRewardNanos *big.Int, validatorCommissionBasisPoints uint64) *big.Int { + scaledStakerReward := big.NewInt(0).Mul(stakerRewardNanos, big.NewInt(int64(validatorCommissionBasisPoints))) + return scaledStakerReward.Div(scaledStakerReward, _basisPointsAsInt) +} From 66867aab65a2b57b5ad810da22ebaf480f67af7c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 31 Jul 2023 12:41:34 -0400 Subject: [PATCH 170/762] Cross validator size of validator set and leader schedule (#625) --- lib/block_view.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/block_view.go b/lib/block_view.go index 98debd288..89f10f7de 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3042,6 +3042,13 @@ func (bav *UtxoView) _connectUpdateGlobalParams( return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: unable to decode ValidatorSetMaxNumValidators as uint64") } } + + // Cross-validate the new LeaderScheduleMaxNumValidators and ValidatorSetMaxNumValidators values. The size of the + // leader schedule must be less than or equal to the size of the validator set. + if newGlobalParamsEntry.ValidatorSetMaxNumValidators < newGlobalParamsEntry.LeaderScheduleMaxNumValidators { + return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: ValidatorSetMaxNumValidators must be >= LeaderScheduleMaxNumValidators") + } + if len(extraData[StakingRewardsMaxNumStakesKey]) > 0 { newGlobalParamsEntry.StakingRewardsMaxNumStakes, bytesRead = Uvarint(extraData[StakingRewardsMaxNumStakesKey]) if bytesRead <= 0 { From f9c0587de733cc20a49c04bd41d8633d72a24b86 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 1 Aug 2023 15:31:49 -0400 Subject: [PATCH 171/762] Create new rule error for leader schedule size validation (#626) * Create new rule error for leader schedule size validation * New name for rule error * Clean up naming * Clean up naming (#627) --- lib/block_view.go | 2 +- lib/errors.go | 33 +++++++++++++++++---------------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 89f10f7de..41e524f57 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3046,7 +3046,7 @@ func (bav *UtxoView) _connectUpdateGlobalParams( // Cross-validate the new LeaderScheduleMaxNumValidators and ValidatorSetMaxNumValidators values. The size of the // leader schedule must be less than or equal to the size of the validator set. if newGlobalParamsEntry.ValidatorSetMaxNumValidators < newGlobalParamsEntry.LeaderScheduleMaxNumValidators { - return 0, 0, nil, fmt.Errorf("_connectUpdateGlobalParams: ValidatorSetMaxNumValidators must be >= LeaderScheduleMaxNumValidators") + return 0, 0, nil, RuleErrorLeaderScheduleExceedsValidatorSetMaxNumValidators } if len(extraData[StakingRewardsMaxNumStakesKey]) > 0 { diff --git a/lib/errors.go b/lib/errors.go index 73a61aae9..70d33128f 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -208,22 +208,23 @@ const ( RuleErrorProfileForPostDoesNotExist RuleError = "RuleErrorProfileForPostDoesNotExist" // Global Params - RuleErrorExchangeRateTooLow RuleError = "RuleErrorExchangeRateTooLow" - RuleErrorExchangeRateTooHigh RuleError = "RuleErrorExchangeRateTooHigh" - RuleErrorMinNetworkFeeTooLow RuleError = "RuleErrorMinNetworkFeeTooLow" - RuleErrorMinNetworkFeeTooHigh RuleError = "RuleErrorMinNetworkFeeTooHigh" - RuleErrorCreateProfileFeeTooLow RuleError = "RuleErrorCreateProfileFeeTooLow" - RuleErrorCreateProfileTooHigh RuleError = "RuleErrorCreateProfileTooHigh" - RuleErrorCreateNFTFeeTooLow RuleError = "RuleErrorCreateNFTFeeTooLow" - RuleErrorCreateNFTFeeTooHigh RuleError = "RuleErrorCreateNFTFeeTooHigh" - RuleErrorMaxCopiesPerNFTTooLow RuleError = "RuleErrorMaxCopiesPerNFTTooLow" - RuleErrorMaxCopiesPerNFTTooHigh RuleError = "RuleErrorMaxCopiesPerNFTTooHigh" - RuleErrorMaxNonceExpirationBlockHeightOffsetTooLow RuleError = "RuleErrorMaxNonceExpirationBlockHeightOffsetTooLow" - RuleErrorMaxNonceExpirationBlockHeightOffsetTooHigh RuleError = "RuleErrorMaxNonceExpirationBlockHeightOffsetTooHigh" - RuleErrorForbiddenPubKeyLength RuleError = "RuleErrorForbiddenPubKeyLength" - RuleErrorUserNotAuthorizedToUpdateExchangeRate RuleError = "RuleErrorUserNotAuthorizedToUpdateExchangeRate" - RuleErrorUserNotAuthorizedToUpdateGlobalParams RuleError = "RuleErrorUserNotAuthorizedToUpdateGlobalParams" - RuleErrorUserOutputMustBeNonzero RuleError = "RuleErrorUserOutputMustBeNonzero" + RuleErrorExchangeRateTooLow RuleError = "RuleErrorExchangeRateTooLow" + RuleErrorExchangeRateTooHigh RuleError = "RuleErrorExchangeRateTooHigh" + RuleErrorMinNetworkFeeTooLow RuleError = "RuleErrorMinNetworkFeeTooLow" + RuleErrorMinNetworkFeeTooHigh RuleError = "RuleErrorMinNetworkFeeTooHigh" + RuleErrorCreateProfileFeeTooLow RuleError = "RuleErrorCreateProfileFeeTooLow" + RuleErrorCreateProfileTooHigh RuleError = "RuleErrorCreateProfileTooHigh" + RuleErrorCreateNFTFeeTooLow RuleError = "RuleErrorCreateNFTFeeTooLow" + RuleErrorCreateNFTFeeTooHigh RuleError = "RuleErrorCreateNFTFeeTooHigh" + RuleErrorMaxCopiesPerNFTTooLow RuleError = "RuleErrorMaxCopiesPerNFTTooLow" + RuleErrorMaxCopiesPerNFTTooHigh RuleError = "RuleErrorMaxCopiesPerNFTTooHigh" + RuleErrorMaxNonceExpirationBlockHeightOffsetTooLow RuleError = "RuleErrorMaxNonceExpirationBlockHeightOffsetTooLow" + RuleErrorMaxNonceExpirationBlockHeightOffsetTooHigh RuleError = "RuleErrorMaxNonceExpirationBlockHeightOffsetTooHigh" + RuleErrorForbiddenPubKeyLength RuleError = "RuleErrorForbiddenPubKeyLength" + RuleErrorUserNotAuthorizedToUpdateExchangeRate RuleError = "RuleErrorUserNotAuthorizedToUpdateExchangeRate" + RuleErrorUserNotAuthorizedToUpdateGlobalParams RuleError = "RuleErrorUserNotAuthorizedToUpdateGlobalParams" + RuleErrorUserOutputMustBeNonzero RuleError = "RuleErrorUserOutputMustBeNonzero" + RuleErrorLeaderScheduleExceedsValidatorSetMaxNumValidators RuleError = "RuleErrorLeaderScheduleExceedsValidatorSetMaxNumValidators" // DeSo Diamonds RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel RuleError = "RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel" From b3050dc0ff2a0de8c827d360313c8ad36334544c Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Wed, 2 Aug 2023 17:59:27 -0400 Subject: [PATCH 172/762] Add TstampNanoSecs to block header --- lib/constants.go | 5 +++++ lib/network.go | 19 ++++++++++++------- lib/network_test.go | 3 ++- 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index 7d5abb260..dfbc9f3ff 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -50,6 +50,11 @@ const ( NodeErase ) +// Time constants +const ( + NanoSecondsPerSecond = uint64(1000000000) +) + // Snapshot constants const ( // GetSnapshotTimeout is used in Peer when we fetch a snapshot chunk, and we need to retry. diff --git a/lib/network.go b/lib/network.go index 6954ecbee..451cac5c9 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1886,10 +1886,14 @@ type MsgDeSoHeader struct { // The merkle root of all the transactions contained within the block. TransactionMerkleRoot *BlockHash - // The unix timestamp (in seconds) specifying when this block was - // mined. + // The unix timestamp (in seconds) specifying when this block was produced. This + // field is deprecated after header beginning with MsgDeSoHeaderVersion2, but + // will temporarily remain populated for backwards compatibility. TstampSecs uint64 + // The unix timestamp (in nano-seconds) specifying when this block was produced. + TstampNanoSecs uint64 + // The height of the block this header corresponds to. Height uint64 @@ -2102,9 +2106,9 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error } retBytes = append(retBytes, transactionMerkleRoot[:]...) - // TstampSecs: this field can be encoded to take up to the full 64 bits now + // TstampNanosSecs: this field can be encoded to take up to the full 64 bits now // that MsgDeSoHeader version 2 does not need to be backwards compatible. - retBytes = append(retBytes, UintToBuf(msg.TstampSecs)...) + retBytes = append(retBytes, UintToBuf(msg.TstampNanoSecs)...) // Height: similar to the field above, this field can be encoded to take // up to the full 64 bits now that MsgDeSoHeader version 2 does not need to @@ -2305,11 +2309,12 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TransactionMerkleRoot") } - // TstampSecs - retHeader.TstampSecs, err = ReadUvarint(rr) + // TstampNanoSecs + retHeader.TstampNanoSecs, err = ReadUvarint(rr) if err != nil { - return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TstampSecs") + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TstampNanoSecs") } + retHeader.TstampSecs = retHeader.TstampNanoSecs / NanoSecondsPerSecond // Height retHeader.Height, err = ReadUvarint(rr) diff --git a/lib/network_test.go b/lib/network_test.go index f56b318ef..4008668ff 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -130,6 +130,7 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { PrevBlockHash: &testBlockHash, TransactionMerkleRoot: &testMerkleRoot, TstampSecs: uint64(1678943210), + TstampNanoSecs: uint64(1678943210) * NanoSecondsPerSecond, Height: uint64(1321012345), // Nonce and ExtraNonce are unused and set to 0 starting in version 2. Nonce: uint64(0), @@ -213,7 +214,7 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { assert.NoError(err) assert.Equal(hdrPayload, data) - assert.Equalf(13, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), + assert.Equalf(14, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), "Number of fields in HEADER message is different from expected. "+ "Did you add a new field? If so, make sure the serialization code "+ "works, add the new field to the test case, and fix this error.") From 6287c184afd18c76aa0307ab76fffff8ddd2e148 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Thu, 3 Aug 2023 15:31:49 -0400 Subject: [PATCH 173/762] Address Nina's feedback --- lib/block_producer.go | 15 ++++++++------- lib/block_view_bitcoin_test.go | 17 +++++++++-------- lib/blockchain.go | 19 ++++++++++--------- lib/blockchain_test.go | 23 ++++++++++++----------- lib/constants.go | 18 +++++++++++++----- lib/network.go | 29 ++++++++++++++++++----------- lib/network_test.go | 19 +++++++++---------- lib/postgres.go | 17 ++++++++++------- lib/server.go | 2 +- 9 files changed, 90 insertions(+), 69 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index b4433124e..63f24cde5 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -3,13 +3,14 @@ package lib import ( "encoding/hex" "fmt" - "github.com/btcsuite/btcd/wire" - "github.com/tyler-smith/go-bip39" "math" "sync" "sync/atomic" "time" + "github.com/btcsuite/btcd/wire" + "github.com/tyler-smith/go-bip39" + "github.com/deso-protocol/go-deadlock" "github.com/btcsuite/btcd/btcec" @@ -114,10 +115,10 @@ func (desoBlockProducer *DeSoBlockProducer) _updateBlockTimestamp(blk *MsgDeSoBl // block's timestamp instead. We do this because consensus rules require a // monotonically increasing timestamp. blockTstamp := uint32(desoBlockProducer.chain.timeSource.AdjustedTime().Unix()) - if blockTstamp <= uint32(lastNode.Header.TstampSecs) { - blockTstamp = uint32(lastNode.Header.TstampSecs) + 1 + if blockTstamp <= uint32(lastNode.Header.GetTstampSecs()) { + blockTstamp = uint32(lastNode.Header.GetTstampSecs()) + 1 } - blk.Header.TstampSecs = uint64(blockTstamp) + blk.Header.SetTstampSecs(uint64(blockTstamp)) } func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) ( @@ -425,8 +426,8 @@ func (desoBlockProducer *DeSoBlockProducer) AddBlockTemplate(block *MsgDeSoBlock minTstamp := uint32(math.MaxUint32) var oldestBlockHash *BlockHash for _, cachedBlock := range desoBlockProducer.recentBlockTemplatesProduced { - if uint32(cachedBlock.Header.TstampSecs) < minTstamp { - minTstamp = uint32(cachedBlock.Header.TstampSecs) + if uint32(cachedBlock.Header.GetTstampSecs()) < minTstamp { + minTstamp = uint32(cachedBlock.Header.GetTstampSecs()) oldestBlockHash, _ = cachedBlock.Header.Hash() } } diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index 0fc04547b..6a422dec5 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -4,18 +4,19 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/wire" - merkletree "github.com/deso-protocol/go-merkle-tree" - "github.com/dgraph-io/badger/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "io/ioutil" "math/big" "os" "strconv" "strings" "testing" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/wire" + merkletree "github.com/deso-protocol/go-merkle-tree" + "github.com/dgraph-io/badger/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -58,8 +59,8 @@ func GetTestParamsCopy( big.NewInt(0), // We are bastardizing the DeSo header to store Bitcoin information here. &MsgDeSoHeader{ - TstampSecs: uint64(startHeader.Timestamp.Unix()), - Height: 0, + TstampNanoSecs: SecondsToNanoSeconds(uint64(startHeader.Timestamp.Unix())), + Height: 0, }, StatusBitcoinHeaderValidated, ) diff --git a/lib/blockchain.go b/lib/blockchain.go index 7c2586229..2c83da490 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5,7 +5,6 @@ import ( "container/list" "encoding/hex" "fmt" - "github.com/holiman/uint256" "math" "math/big" "reflect" @@ -14,6 +13,8 @@ import ( "strings" "time" + "github.com/holiman/uint256" + btcdchain "github.com/btcsuite/btcd/blockchain" chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" @@ -278,7 +279,7 @@ func (nn *BlockNode) String() string { } tstamp := uint32(0) if nn.Header != nil { - tstamp = uint32(nn.Header.TstampSecs) + tstamp = uint32(nn.Header.GetTstampSecs()) } return fmt.Sprintf("< TstampSecs: %d, Height: %d, Hash: %s, ParentHash %s, Status: %s, CumWork: %v>", tstamp, nn.Header.Height, nn.Hash, parentHash, nn.Status, nn.CumWork) @@ -373,7 +374,7 @@ func CalcNextDifficultyTarget( firstNodeHeight, lastNode.Height) } - actualTimeDiffSecs := int64(lastNode.Header.TstampSecs - firstNode.Header.TstampSecs) + actualTimeDiffSecs := int64(lastNode.Header.GetTstampSecs() - firstNode.Header.GetTstampSecs()) clippedTimeDiffSecs := actualTimeDiffSecs if actualTimeDiffSecs < minRetargetTimeSecs { clippedTimeDiffSecs = minRetargetTimeSecs @@ -1063,7 +1064,7 @@ func (bc *Blockchain) isTipCurrent(tip *BlockNode) bool { // Not current if the tip has a timestamp older than the maximum // tip age. - tipTime := time.Unix(int64(tip.Header.TstampSecs), 0) + tipTime := time.Unix(int64(tip.Header.GetTstampSecs()), 0) oldestAllowedTipTime := bc.timeSource.AdjustedTime().Add(-1 * bc.params.MaxTipAge) return !tipTime.Before(oldestAllowedTipTime) @@ -1618,11 +1619,11 @@ func (bc *Blockchain) processHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc // seen before. // Reject the header if it is more than N seconds in the future. - tstampDiff := int64(blockHeader.TstampSecs) - bc.timeSource.AdjustedTime().Unix() + tstampDiff := int64(blockHeader.GetTstampSecs()) - bc.timeSource.AdjustedTime().Unix() if tstampDiff > int64(bc.params.MaxTstampOffsetSeconds) { glog.V(1).Infof("HeaderErrorBlockTooFarInTheFuture: tstampDiff %d > "+ "MaxTstampOffsetSeconds %d. blockHeader.TstampSecs=%d; adjustedTime=%d", - tstampDiff, bc.params.MaxTstampOffsetSeconds, blockHeader.TstampSecs, + tstampDiff, bc.params.MaxTstampOffsetSeconds, blockHeader.GetTstampSecs(), bc.timeSource.AdjustedTime().Unix()) return false, false, HeaderErrorBlockTooFarInTheFuture } @@ -1684,11 +1685,11 @@ func (bc *Blockchain) processHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc // TODO: Consider a per-block difficulty adjustment scheme like Ethereum has. // This commentary is useful to consider with regard to that: // https://github.com/zawy12/difficulty-algorithms/issues/45 - if blockHeader.TstampSecs <= parentHeader.TstampSecs { + if blockHeader.GetTstampSecs() <= parentHeader.GetTstampSecs() { glog.Warningf("processHeader: Rejecting header because timestamp %v is "+ "before timestamp of previous block %v", - time.Unix(int64(blockHeader.TstampSecs), 0), - time.Unix(int64(parentHeader.TstampSecs), 0)) + time.Unix(int64(blockHeader.GetTstampSecs()), 0), + time.Unix(int64(parentHeader.GetTstampSecs()), 0)) return false, false, HeaderErrorTimestampTooEarly } diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index c3d182898..c0589a715 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -4,8 +4,6 @@ import ( "encoding/hex" "flag" "fmt" - embeddedpostgres "github.com/fergusstrange/embedded-postgres" - "github.com/go-pg/pg/v10" "log" "math/big" "math/rand" @@ -14,6 +12,9 @@ import ( "testing" "time" + embeddedpostgres "github.com/fergusstrange/embedded-postgres" + "github.com/go-pg/pg/v10" + chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v3" @@ -286,7 +287,7 @@ func NewTestParams(inputParams *DeSoParams) DeSoParams { Version: 0, PrevBlockHash: mustDecodeHexBlockHash("0000000000000000000000000000000000000000000000000000000000000000"), TransactionMerkleRoot: mustDecodeHexBlockHash("097158f0d27e6d10565c4dc696c784652c3380e0ff8382d3599a4d18b782e965"), - TstampSecs: uint64(1560735050), + TstampNanoSecs: SecondsToNanoSeconds(1560735050), Height: uint64(0), Nonce: uint64(0), // No ExtraNonce is set in the genesis block @@ -1240,7 +1241,7 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 1 second, which is 2x too fast. - TstampSecs: uint64(ii), + TstampNanoSecs: SecondsToNanoSeconds(uint64(ii)), }, StatusNone, )) @@ -1277,7 +1278,7 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 4 second, which is 2x too slow. - TstampSecs: uint64(ii * 4), + TstampNanoSecs: SecondsToNanoSeconds(uint64(ii * 4)), }, StatusNone, )) @@ -1336,7 +1337,7 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 1 second, which is 2x too fast. - TstampSecs: uint64(ii), + TstampNanoSecs: SecondsToNanoSeconds(uint64(ii)), }, StatusNone, )) @@ -1373,7 +1374,7 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 8 second, which is >2x too slow. - TstampSecs: uint64(ii * 4), + TstampNanoSecs: SecondsToNanoSeconds(uint64(ii * 4)), }, StatusNone, )) @@ -1432,7 +1433,7 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating all at once. - TstampSecs: uint64(0), + TstampNanoSecs: SecondsToNanoSeconds(0), }, StatusNone, )) @@ -1487,7 +1488,7 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 2 second, which is under the limit. - TstampSecs: uint64(ii * 2), + TstampNanoSecs: SecondsToNanoSeconds(uint64(ii * 2)), }, StatusNone, )) @@ -1542,7 +1543,7 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 1 second, which is 2x too fast. - TstampSecs: uint64(ii), + TstampNanoSecs: SecondsToNanoSeconds(uint64(ii)), }, StatusNone, )) @@ -1579,7 +1580,7 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 3 seconds, which is slow but under the limit. - TstampSecs: uint64(float32(ii) * 3), + TstampNanoSecs: SecondsToNanoSeconds(uint64(float32(ii) * 3)), }, StatusNone, )) diff --git a/lib/constants.go b/lib/constants.go index dfbc9f3ff..b1551af74 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -55,6 +55,14 @@ const ( NanoSecondsPerSecond = uint64(1000000000) ) +func SecondsToNanoSeconds(secs uint64) uint64 { + return secs * NanoSecondsPerSecond +} + +func NanoSecondsToSeconds(nanos uint64) uint64 { + return nanos / NanoSecondsPerSecond +} + // Snapshot constants const ( // GetSnapshotTimeout is used in Peer when we fetch a snapshot chunk, and we need to retry. @@ -767,7 +775,7 @@ var ( Version: 0, PrevBlockHash: &BlockHash{}, TransactionMerkleRoot: mustDecodeHexBlockHash("4b71d103dd6fff1bd6110bc8ed0a2f3118bbe29a67e45c6c7d97546ad126906f"), - TstampSecs: uint64(1610948544), + TstampNanoSecs: SecondsToNanoSeconds(1610948544), Height: uint64(0), Nonce: uint64(0), }, @@ -951,8 +959,8 @@ var DeSoMainnetParams = DeSoParams{ big.NewInt(0), // We are bastardizing the DeSo header to store Bitcoin information here. &MsgDeSoHeader{ - TstampSecs: 1602950620, - Height: 0, + TstampNanoSecs: SecondsToNanoSeconds(1602950620), + Height: 0, }, StatusBitcoinHeaderValidated, ), @@ -1199,8 +1207,8 @@ var DeSoTestnetParams = DeSoParams{ big.NewInt(0), // We are bastardizing the DeSo header to store Bitcoin information here. &MsgDeSoHeader{ - TstampSecs: 1607659152, - Height: 0, + TstampNanoSecs: SecondsToNanoSeconds(1607659152), + Height: 0, }, StatusBitcoinHeaderValidated, ), diff --git a/lib/network.go b/lib/network.go index 451cac5c9..1c977f569 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1886,12 +1886,12 @@ type MsgDeSoHeader struct { // The merkle root of all the transactions contained within the block. TransactionMerkleRoot *BlockHash - // The unix timestamp (in seconds) specifying when this block was produced. This - // field is deprecated after header beginning with MsgDeSoHeaderVersion2, but - // will temporarily remain populated for backwards compatibility. - TstampSecs uint64 + // The original TstampSecs struct field is deprecated and replaced by the higher resolution + // TstampNanoSecs field. The deprecation is backwards compatible for all existing header + // versions and byte encodings. To read or write timestamps with the old 1-second resolution, + // use the SetTstampSecs() and GetTstampSecs() public methods. - // The unix timestamp (in nano-seconds) specifying when this block was produced. + // The unix timestamp (in nanoseconds) specifying when this block was produced. TstampNanoSecs uint64 // The height of the block this header corresponds to. @@ -1962,6 +1962,14 @@ func HeaderSizeBytes() int { return len(headerBytes) } +func (msg *MsgDeSoHeader) SetTstampSecs(tstampSecs uint64) { + msg.TstampNanoSecs = SecondsToNanoSeconds(tstampSecs) +} + +func (msg *MsgDeSoHeader) GetTstampSecs() uint64 { + return NanoSecondsToSeconds(msg.TstampNanoSecs) +} + func (msg *MsgDeSoHeader) EncodeHeaderVersion0(preSignature bool) ([]byte, error) { retBytes := []byte{} @@ -1989,7 +1997,7 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion0(preSignature bool) ([]byte, error // TstampSecs { scratchBytes := [4]byte{} - binary.LittleEndian.PutUint32(scratchBytes[:], uint32(msg.TstampSecs)) + binary.LittleEndian.PutUint32(scratchBytes[:], uint32(msg.GetTstampSecs())) retBytes = append(retBytes, scratchBytes[:]...) } @@ -2038,13 +2046,13 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion1(preSignature bool) ([]byte, error // TstampSecs { scratchBytes := [8]byte{} - binary.BigEndian.PutUint64(scratchBytes[:], msg.TstampSecs) + binary.BigEndian.PutUint64(scratchBytes[:], msg.GetTstampSecs()) retBytes = append(retBytes, scratchBytes[:]...) // TODO: Don't allow this field to exceed 32-bits for now. This will // adjust once other parts of the code are fixed to handle the wider // type. - if msg.TstampSecs > math.MaxUint32 { + if msg.GetTstampSecs() > math.MaxUint32 { return nil, fmt.Errorf("EncodeHeaderVersion1: TstampSecs not yet allowed " + "to exceed max uint32. This will be fixed in the future") } @@ -2210,7 +2218,7 @@ func DecodeHeaderVersion0(rr io.Reader) (*MsgDeSoHeader, error) { if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TstampSecs") } - retHeader.TstampSecs = uint64(binary.LittleEndian.Uint32(scratchBytes[:])) + retHeader.SetTstampSecs(uint64(binary.LittleEndian.Uint32(scratchBytes[:]))) } // Height @@ -2258,7 +2266,7 @@ func DecodeHeaderVersion1(rr io.Reader) (*MsgDeSoHeader, error) { if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TstampSecs") } - retHeader.TstampSecs = binary.BigEndian.Uint64(scratchBytes[:]) + retHeader.SetTstampSecs(binary.BigEndian.Uint64(scratchBytes[:])) } // Height @@ -2314,7 +2322,6 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TstampNanoSecs") } - retHeader.TstampSecs = retHeader.TstampNanoSecs / NanoSecondsPerSecond // Height retHeader.Height, err = ReadUvarint(rr) diff --git a/lib/network_test.go b/lib/network_test.go index 4008668ff..900ef2c92 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -100,10 +100,10 @@ var expectedBlockHeaderVersion1 = &MsgDeSoHeader{ 0x64, 0x65, }, // Use full uint64 values to make sure serialization and de-serialization work - TstampSecs: uint64(1678943210), - Height: uint64(1321012345), - Nonce: uint64(12345678901234), - ExtraNonce: uint64(101234123456789), + TstampNanoSecs: SecondsToNanoSeconds(1678943210), + Height: uint64(1321012345), + Nonce: uint64(12345678901234), + ExtraNonce: uint64(101234123456789), } // Creates fully formatted a PoS block header with random signatures @@ -129,8 +129,7 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { Version: 2, PrevBlockHash: &testBlockHash, TransactionMerkleRoot: &testMerkleRoot, - TstampSecs: uint64(1678943210), - TstampNanoSecs: uint64(1678943210) * NanoSecondsPerSecond, + TstampNanoSecs: SecondsToNanoSeconds(1678943210), Height: uint64(1321012345), // Nonce and ExtraNonce are unused and set to 0 starting in version 2. Nonce: uint64(0), @@ -214,7 +213,7 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { assert.NoError(err) assert.Equal(hdrPayload, data) - assert.Equalf(14, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), + assert.Equalf(13, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), "Number of fields in HEADER message is different from expected. "+ "Did you add a new field? If so, make sure the serialization code "+ "works, add the new field to the test case, and fix this error.") @@ -539,9 +538,9 @@ var expectedV0Header = &MsgDeSoHeader{ 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, }, - TstampSecs: uint64(0x70717273), - Height: uint64(99999), - Nonce: uint64(123456), + TstampNanoSecs: SecondsToNanoSeconds(0x70717273), + Height: uint64(99999), + Nonce: uint64(123456), } func TestBlockSerialize(t *testing.T) { diff --git a/lib/postgres.go b/lib/postgres.go index e77faf7e7..517161553 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -6,14 +6,15 @@ import ( "encoding/hex" "encoding/json" "fmt" + "net/url" + "regexp" + "strings" + "github.com/dgraph-io/badger/v3" "github.com/go-pg/pg/v10" "github.com/go-pg/pg/v10/orm" "github.com/golang/glog" "github.com/holiman/uint256" - "net/url" - "regexp" - "strings" ) type Postgres struct { @@ -1288,9 +1289,11 @@ func (postgres *Postgres) UpsertBlockTx(tx *pg.Tx, blockNode *BlockNode) error { TxMerkleRoot: blockNode.Header.TransactionMerkleRoot, Version: blockNode.Header.Version, - Timestamp: blockNode.Header.TstampSecs, - Nonce: blockNode.Header.Nonce, - ExtraNonce: blockNode.Header.ExtraNonce, + // TODO: Does it matter that the postgres block timestamps are now in nanoseconds? + // Postgres in its current form should be deprecated by the time this matters. + Timestamp: blockNode.Header.TstampNanoSecs, + Nonce: blockNode.Header.Nonce, + ExtraNonce: blockNode.Header.ExtraNonce, } // The genesis block has a nil parent @@ -1321,7 +1324,7 @@ func (postgres *Postgres) GetBlockIndex() (map[BlockHash]*BlockNode, error) { Version: block.Version, PrevBlockHash: block.ParentHash, TransactionMerkleRoot: block.TxMerkleRoot, - TstampSecs: block.Timestamp, + TstampNanoSecs: block.Timestamp, Height: block.Height, Nonce: block.Nonce, ExtraNonce: block.ExtraNonce, diff --git a/lib/server.go b/lib/server.go index ade962f21..57a79291e 100644 --- a/lib/server.go +++ b/lib/server.go @@ -858,7 +858,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { glog.V(1).Infof("Server._handleHeaderBundle: Disconnecting from peer %v because "+ "we have exhausted their headers but our tip is still only "+ "at time=%v height=%d", pp, - time.Unix(int64(srv.blockchain.headerTip().Header.TstampSecs), 0), + time.Unix(int64(srv.blockchain.headerTip().Header.GetTstampSecs()), 0), srv.blockchain.headerTip().Header.Height) pp.Disconnect() return From c3b133ae0a9ee64dd4a23629664a102c32d46771 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Thu, 3 Aug 2023 15:45:52 -0400 Subject: [PATCH 174/762] Update postgres timestamp to nanosecond resolution --- lib/postgres.go | 8 +++--- ...20230803154200_scale_timestamp_to_nanos.go | 25 +++++++++++++++++++ 2 files changed, 28 insertions(+), 5 deletions(-) create mode 100644 migrate/20230803154200_scale_timestamp_to_nanos.go diff --git a/lib/postgres.go b/lib/postgres.go index 517161553..1b38f820f 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -1289,11 +1289,9 @@ func (postgres *Postgres) UpsertBlockTx(tx *pg.Tx, blockNode *BlockNode) error { TxMerkleRoot: blockNode.Header.TransactionMerkleRoot, Version: blockNode.Header.Version, - // TODO: Does it matter that the postgres block timestamps are now in nanoseconds? - // Postgres in its current form should be deprecated by the time this matters. - Timestamp: blockNode.Header.TstampNanoSecs, - Nonce: blockNode.Header.Nonce, - ExtraNonce: blockNode.Header.ExtraNonce, + Timestamp: blockNode.Header.TstampNanoSecs, + Nonce: blockNode.Header.Nonce, + ExtraNonce: blockNode.Header.ExtraNonce, } // The genesis block has a nil parent diff --git a/migrate/20230803154200_scale_timestamp_to_nanos.go b/migrate/20230803154200_scale_timestamp_to_nanos.go new file mode 100644 index 000000000..b3ca39620 --- /dev/null +++ b/migrate/20230803154200_scale_timestamp_to_nanos.go @@ -0,0 +1,25 @@ +package migrate + +import ( + "github.com/go-pg/pg/v10/orm" + migrations "github.com/robinjoseph08/go-pg-migrations/v3" +) + +func init() { + up := func(db orm.DB) error { + _, err := db.Exec(` + UPDATE pg_blocks SET timestamp = timestamp * 1000000000; + `) + return err + } + + down := func(db orm.DB) error { + _, err := db.Exec(` + UPDATE pg_blocks SET timestamp = timestamp / 1000000000; + `) + return err + } + + opts := migrations.MigrationOptions{} + migrations.Register("20230803154200_scale_timestamp_to_nanos", up, down, opts) +} From 2427b9e780d9a6b62c233903d656d4276c11e725 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Wed, 2 Aug 2023 18:33:03 -0400 Subject: [PATCH 175/762] Add ProposerRandomSeedHash to block header --- lib/network.go | 18 ++++++++++++++++++ lib/network_test.go | 9 ++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/lib/network.go b/lib/network.go index 1c977f569..f38fbbc31 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1926,6 +1926,12 @@ type MsgDeSoHeader struct { // The BLS public key of the validator who proposed this block. ProposerVotingPublicKey *bls.PublicKey + // ProposerRandomSeedHash is only used for Proof of Stake blocks, starting with + // MsgDeSoHeader version 2. For all earlier versions, this field will default to nil. + // + // The current block's randomness seed provided by the block's proposer. + ProposerRandomSeedHash *RandomSeedHash + // ProposedInView is only used for Proof of Stake blocks, starting with MsgDeSoHeader // version 2. For all earlier versions, this field will default to nil. // @@ -2138,6 +2144,12 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error } retBytes = append(retBytes, EncodeBLSPublicKey(msg.ProposerVotingPublicKey)...) + // ProposerRandomSeedHash + if msg.ProposerRandomSeedHash == nil { + return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerRandomSeedHash must be non-nil") + } + retBytes = append(retBytes, EncodeRandomSeedHash(msg.ProposerRandomSeedHash)...) + // ProposedInView retBytes = append(retBytes, UintToBuf(msg.ProposedInView)...) @@ -2346,6 +2358,12 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerVotingPublicKey") } + // ProposerRandomSeedHash + retHeader.ProposerRandomSeedHash, err = DecodeRandomSeedHash(rr) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerRandomSeedHash") + } + // ProposedInView retHeader.ProposedInView, err = ReadUvarint(rr) if err != nil { diff --git a/lib/network_test.go b/lib/network_test.go index 900ef2c92..73d11683c 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -121,6 +121,12 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, } + testRandomSeedHash := RandomSeedHash{ + 0x00, 0x36, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, 0x43, + 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x50, 0x51, 0x52, 0x53, + 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, + 0x64, 0x65, + } testBitset := bitset.NewBitset().Set(0, true).Set(3, true) testBLSPublicKey, testBLSSignature := _generateValidatorVotingPublicKeyAndSignature(t) @@ -136,6 +142,7 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { ExtraNonce: uint64(0), ProposerPublicKey: NewPublicKey(pkForTesting1), ProposerVotingPublicKey: testBLSPublicKey, + ProposerRandomSeedHash: &testRandomSeedHash, ProposedInView: uint64(1432101234), // Use real signatures and public keys for the PoS fields ValidatorsVoteQC: &QuorumCertificate{ @@ -213,7 +220,7 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { assert.NoError(err) assert.Equal(hdrPayload, data) - assert.Equalf(13, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), + assert.Equalf(14, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), "Number of fields in HEADER message is different from expected. "+ "Did you add a new field? If so, make sure the serialization code "+ "works, add the new field to the test case, and fix this error.") From 18268a2de747d77b84ee8f67dd6b3e0a085eace1 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 7 Aug 2023 13:58:24 -0700 Subject: [PATCH 176/762] PoS Database Interface (#601) * Revert "Split transaction validation" This reverts commit 0d402aeb6fff9cfd460bc239dde2948bac678fe9. * Split Database * Better interface; better tests * Remove BoltDB * Remove BoltDB id * Documentation * Remove nested context * gofmt * Use Storage for BadgerDB Params * gofmt again * Storage nit * Fix docker * iterator fix * Remove context and simplify tests * Review * Update dockerfile * Remove all --- lib/db_entry.go | 75 +++++++++++++++++++++++++++++++++++++++++++++++++ lib/snapshot.go | 65 ------------------------------------------ 2 files changed, 75 insertions(+), 65 deletions(-) create mode 100644 lib/db_entry.go diff --git a/lib/db_entry.go b/lib/db_entry.go new file mode 100644 index 000000000..b42278300 --- /dev/null +++ b/lib/db_entry.go @@ -0,0 +1,75 @@ +package lib + +import "bytes" + +// ------------------------------------------------------------------------------------- +// DBEntry +// ------------------------------------------------------------------------------------- + +// DBEntry is used to represent a database record. It's more convenient than passing +// everywhere. +type DBEntry struct { + Key []byte + Value []byte +} + +func NewDBEntry(key []byte, value []byte) *DBEntry { + return &DBEntry{ + Key: key, + Value: value, + } +} + +func (entry *DBEntry) ToBytes() []byte { + data := []byte{} + + data = append(data, EncodeByteArray(entry.Key)...) + data = append(data, EncodeByteArray(entry.Value)...) + return data +} + +func (entry *DBEntry) FromBytes(rr *bytes.Reader) error { + var err error + + // Decode key. + entry.Key, err = DecodeByteArray(rr) + if err != nil { + return err + } + + // Decode value. + entry.Value, err = DecodeByteArray(rr) + if err != nil { + return err + } + + return nil +} + +// KeyValueToDBEntry is used to instantiate db entry from a pair. +func KeyValueToDBEntry(key []byte, value []byte) *DBEntry { + dbEntry := &DBEntry{} + // Encode the key. + dbEntry.Key = make([]byte, len(key)) + copy(dbEntry.Key, key) + + // Encode the value. + dbEntry.Value = make([]byte, len(value)) + copy(dbEntry.Value, value) + + return dbEntry +} + +// EmptyDBEntry indicates an empty DB entry. It's used for convenience. +func EmptyDBEntry() *DBEntry { + // We do not use prefix 0 for state so we can use it in the empty DBEntry. + return &DBEntry{ + Key: []byte{0}, + Value: []byte{}, + } +} + +// IsEmpty return true if the DBEntry is empty, false otherwise. +func (entry *DBEntry) IsEmpty() bool { + return bytes.Equal(entry.Key, []byte{0}) +} diff --git a/lib/snapshot.go b/lib/snapshot.go index 0e01231cf..628e2dbab 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -1642,71 +1642,6 @@ func (sc *StateChecksum) FromBytes(checksumBytes []byte) error { return nil } -// ------------------------------------------------------------------------------------- -// DBEntry -// ------------------------------------------------------------------------------------- - -// DBEntry is used to represent a database record. It's more convenient than passing -// everywhere. -type DBEntry struct { - Key []byte - Value []byte -} - -func (entry *DBEntry) ToBytes() []byte { - data := []byte{} - - data = append(data, EncodeByteArray(entry.Key)...) - data = append(data, EncodeByteArray(entry.Value)...) - return data -} - -func (entry *DBEntry) FromBytes(rr *bytes.Reader) error { - var err error - - // Decode key. - entry.Key, err = DecodeByteArray(rr) - if err != nil { - return err - } - - // Decode value. - entry.Value, err = DecodeByteArray(rr) - if err != nil { - return err - } - - return nil -} - -// KeyValueToDBEntry is used to instantiate db entry from a pair. -func KeyValueToDBEntry(key []byte, value []byte) *DBEntry { - dbEntry := &DBEntry{} - // Encode the key. - dbEntry.Key = make([]byte, len(key)) - copy(dbEntry.Key, key) - - // Encode the value. - dbEntry.Value = make([]byte, len(value)) - copy(dbEntry.Value, value) - - return dbEntry -} - -// EmptyDBEntry indicates an empty DB entry. It's used for convenience. -func EmptyDBEntry() *DBEntry { - // We do not use prefix 0 for state so we can use it in the empty DBEntry. - return &DBEntry{ - Key: []byte{0}, - Value: []byte{}, - } -} - -// IsEmpty return true if the DBEntry is empty, false otherwise. -func (entry *DBEntry) IsEmpty() bool { - return bytes.Equal(entry.Key, []byte{0}) -} - // ------------------------------------------------------------------------------------- // SnapshotEpochMetadata // ------------------------------------------------------------------------------------- From eecec612e4966e29432c545cc21b45dbf8f97278 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 7 Aug 2023 23:16:27 -0700 Subject: [PATCH 177/762] PoS Mempool Persister (#602) * Revert "Split Database" This reverts commit 07e9f4d0119ed13692793eddd439dcf43d4fd64f. * Mempool persister & mempool transaction split * Revert "Mempool persister & mempool transaction split" This reverts commit 8041631db55344621a27d8f9a583ffe298d93eaa. * Revert "Revert "Mempool persister & mempool transaction split"" This reverts commit 1989880b7ac6290b25f5704a8d57cdfdc529f012. * Storage fix * Add tests * revert db changes * fix * Fix status transition * Comments and nits * Fix badger and review * Database update; remove event handler --- lib/errors.go | 3 + lib/mempool.go | 36 ---- lib/pos_mempool_persister.go | 245 +++++++++++++++++++++++++++ lib/pos_mempool_persister_test.go | 181 ++++++++++++++++++++ lib/pos_mempool_transaction.go | 124 ++++++++++++++ lib/pos_transaction_register_test.go | 19 ++- 6 files changed, 571 insertions(+), 37 deletions(-) create mode 100644 lib/pos_mempool_persister.go create mode 100644 lib/pos_mempool_persister_test.go create mode 100644 lib/pos_mempool_transaction.go diff --git a/lib/errors.go b/lib/errors.go index 5a9a2a773..434bbd7ba 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -501,6 +501,9 @@ const ( TxErrorNonceExpired RuleError = "TxErrorNonceExpired" TxErrorNonceExpirationBlockHeightOffsetExceeded RuleError = "TxErrorNonceExpirationBlockHeightOffsetExceeded" TxErrorNoNonceAfterBalanceModelBlockHeight RuleError = "TxErrorNoNonceAfterBalanceModelBlockHeight" + + // Mempool + MempoolErrorNotRunning RuleError = "MempoolErrorNotRunning" ) func (e RuleError) Error() string { diff --git a/lib/mempool.go b/lib/mempool.go index 748b5977c..69687b2e1 100644 --- a/lib/mempool.go +++ b/lib/mempool.go @@ -61,38 +61,6 @@ var ( LowFeeTxLimitBytesPerTenMinutes = 150000 // Allow 150KB per minute in low-fee txns. ) -// MempoolTx contains a transaction along with additional metadata like the -// fee and time added. -type MempoolTx struct { - Tx *MsgDeSoTxn - - // TxMeta is the transaction metadata - TxMeta *TransactionMetadata - - // Hash is a hash of the transaction so we don't have to recompute - // it all the time. - Hash *BlockHash - - // TxSizeBytes is the cached size of the transaction. - TxSizeBytes uint64 - - // The time when the txn was added to the pool - Added time.Time - - // The block height when the txn was added to the pool. It's generally set - // to tip+1. - Height uint32 - - // The total fee the txn pays. Cached for efficiency reasons. - Fee uint64 - - // The fee rate of the transaction in nanos per KB. - FeePerKB uint64 - - // index is used by the heap logic to allow for modification in-place. - index int -} - // Summary stats for a set of transactions of a specific type in the mempool. type SummaryStats struct { // Number of transactions of this type in the mempool. @@ -102,10 +70,6 @@ type SummaryStats struct { TotalBytes uint64 } -func (mempoolTx *MempoolTx) String() string { - return fmt.Sprintf("< Added: %v, index: %d, Fee: %d, Type: %v, Hash: %v", mempoolTx.Added, mempoolTx.index, mempoolTx.Fee, mempoolTx.Tx.TxnMeta.GetTxnType(), mempoolTx.Hash) -} - // MempoolTxFeeMinHeap is a priority queue based on transaction fee rate type MempoolTxFeeMinHeap []*MempoolTx diff --git a/lib/pos_mempool_persister.go b/lib/pos_mempool_persister.go new file mode 100644 index 000000000..33434226b --- /dev/null +++ b/lib/pos_mempool_persister.go @@ -0,0 +1,245 @@ +package lib + +import ( + "bytes" + "github.com/dgraph-io/badger/v3" + "github.com/golang/glog" + "github.com/pkg/errors" + "sync" + "time" +) + +const ( + DbMempoolContextId = "transactions" + eventQueueSize = 10000 +) + +type MempoolEventType int + +const ( + MempoolEventAdd MempoolEventType = iota + MempoolEventRemove + MempoolEventExit +) + +type MempoolPersisterStatus int + +const ( + MempoolPersisterStatusRunning MempoolPersisterStatus = iota + MempoolPersisterStatusNotRunning +) + +type MempoolEvent struct { + Txn *MempoolTx + Type MempoolEventType +} + +// MempoolPersister is responsible for persisting transactions in the mempool to the database. Whenever a transaction is +// added or removed from the mempool, the MempoolPersister is notified via the EnqueueEvent callback. The Persister +// will then add the event to a queue. Periodically, the transaction queue is flushed to the database and all the cached +// transactions are persisted. To achieve this, the persister runs its own goroutine. +type MempoolPersister struct { + sync.Mutex + status MempoolPersisterStatus + + // db is the database that the persister will write transactions to. + db *badger.DB + + // stopGroup and startGroup are used to manage the synchronization of the run loop. + stopGroup sync.WaitGroup + startGroup sync.WaitGroup + + // batchPersistFrequencyMilliseconds is the time frequency at which the persister will flush the transaction queue to the database. + batchPersistFrequencyMilliseconds int + // eventQueue is used to queue up transactions to be persisted. The queue receives events from the EnqueueEvent, + // which is called whenever a transaction is added or removed from the mempool. + eventQueue chan *MempoolEvent + // updateBatch is used to cache transactions that need to be persisted to the database. The batch is flushed to the + // database periodically based on the batchPersistFrequencyMilliseconds. + updateBatch []*MempoolEvent +} + +func NewMempoolPersister(db *badger.DB, batchPersistFrequencyMilliseconds int) *MempoolPersister { + return &MempoolPersister{ + batchPersistFrequencyMilliseconds: batchPersistFrequencyMilliseconds, + status: MempoolPersisterStatusNotRunning, + db: db, + eventQueue: make(chan *MempoolEvent, eventQueueSize), + } +} + +// Start is the entry point for the MempoolPersister. It starts the run loop and begins persisting transactions to the database. +func (mp *MempoolPersister) Start() { + if mp.status == MempoolPersisterStatusRunning { + return + } + + // Make sure the persister is cleared before starting. + mp.reset() + // We use syncGroups to synchronize the persister thread. The stop group is used to wait for the persister to stop + // the run loop. The start group is used to wait for the persister goroutine allocation. Note that the startGroup is + // used to ensure that the run() goroutine has been allocated a thread before returning from Start(). + mp.stopGroup.Add(1) + mp.startGroup.Add(1) + // Run the persister in a goroutine. + go mp.run() + // Wait for the persister goroutine to start. + mp.startGroup.Wait() + mp.status = MempoolPersisterStatusRunning +} + +// run is the main even loop for the MempoolPersister thread. +func (mp *MempoolPersister) run() { + mp.startGroup.Done() + // The run loop will run until a MempoolEventExit is received. The main actions of the run loop are to either + // add a new transaction event to the updateBatch or to flush the updateBatch to the database. + for { + select { + case event := <-mp.eventQueue: + switch event.Type { + case MempoolEventAdd, MempoolEventRemove: + mp.Lock() + mp.updateBatch = append(mp.updateBatch, event) + mp.Unlock() + + case MempoolEventExit: + close(mp.eventQueue) + mp.stopGroup.Done() + return + } + continue + + case <-time.After(time.Duration(mp.batchPersistFrequencyMilliseconds) * time.Millisecond): + if err := mp.persistBatch(); err != nil { + glog.Errorf("MempoolPersister: Error persisting batch: %v", err) + } + continue + } + } +} + +// Stop is used to stop the persister thread and reset the persister state. It will wait for the persister thread to +// flush the outstanding updateBatch to the database before returning. +func (mp *MempoolPersister) Stop() error { + if mp.status == MempoolPersisterStatusNotRunning { + return nil + } + // Enqueue the exit event and wait for the persister thread to stop. + mp.eventQueue <- &MempoolEvent{Type: MempoolEventExit} + mp.stopGroup.Wait() + // Persist any outstanding transactions. + if err := mp.persistBatch(); err != nil { + return errors.Wrapf(err, "MempoolPersister: Error persisting batch") + } + // Reset the persister state. + mp.reset() + mp.status = MempoolPersisterStatusNotRunning + return nil +} + +// persistBatch is used to flush the updateBatch to the database. It will iterate through the updateBatch and add or remove +// transactions from the database based on the event type. Error is returned if the persister is not running or if there +// is an error persisting the batch. +func (mp *MempoolPersister) persistBatch() error { + if mp.status == MempoolPersisterStatusNotRunning { + return nil + } + + mp.Lock() + defer mp.Unlock() + + // If there are no transactions to persist, return. + if len(mp.updateBatch) == 0 { + return nil + } + + wb := mp.db.NewWriteBatch() + defer wb.Cancel() + + for _, event := range mp.updateBatch { + if event.Txn == nil || event.Txn.Hash == nil { + continue + } + // The transactions are stored in a KV database. The key is the transaction hash and the value is the + // serialized transaction. + key := event.Txn.Hash.ToBytes() + value, err := event.Txn.ToBytes() + if err != nil { + continue + } + + // Set or delete a record based on the event type. + switch event.Type { + case MempoolEventAdd: + if err := wb.Set(key, value); err != nil { + glog.Errorf("MempoolPersister: Error setting key: %v", err) + } + case MempoolEventRemove: + if err := wb.Delete(key); err != nil { + glog.Errorf("MempoolPersister: Error deleting key: %v", err) + } + } + } + err := wb.Flush() + if err != nil { + return errors.Wrapf(err, "MempoolPersister: Error persisting batch") + } + + mp.updateBatch = nil + + return nil +} + +// GetPersistedTransactions is used to retrieve all transactions from the database. It will return an error if the persister +// is not currently running or if there was an issue retrieving the transactions. +func (mp *MempoolPersister) GetPersistedTransactions() ([]*MempoolTx, error) { + if mp.status == MempoolPersisterStatusNotRunning { + return nil, errors.Wrapf(MempoolErrorNotRunning, "MempoolPersister: Cannot retrieve transactions while running") + } + + mp.Lock() + defer mp.Unlock() + + var mempoolTxns []*MempoolTx + err := mp.db.View(func(txn *badger.Txn) error { + // Iterate through the transaction records in the database. + iter := txn.NewIterator(badger.DefaultIteratorOptions) + defer iter.Close() + for iter.Seek([]byte{}); iter.Valid(); iter.Next() { + item := iter.Item() + txnBytes, err := item.ValueCopy(nil) + if err != nil { + return errors.Wrapf(err, "MempoolPersister: Error retrieving value") + } + + rr := bytes.NewReader(txnBytes) + mempoolTx := &MempoolTx{} + if err := mempoolTx.FromBytes(rr); err != nil { + return errors.Wrapf(err, "MempoolPersister: Error retrieving txn") + } + mempoolTxns = append(mempoolTxns, mempoolTx) + } + return nil + }) + if err != nil { + return nil, errors.Wrapf(err, "MempoolPersister: Error retrieving transactions") + } + return mempoolTxns, nil +} + +// EnqueueEvent is used to add a transaction event to the eventQueue. +func (mp *MempoolPersister) EnqueueEvent(event *MempoolEvent) { + if mp.eventQueue == nil { + return + } + mp.eventQueue <- event +} + +// reset is used to clear the persister state. +func (mp *MempoolPersister) reset() { + mp.Lock() + defer mp.Unlock() + + mp.updateBatch = nil + mp.eventQueue = make(chan *MempoolEvent, eventQueueSize) +} diff --git a/lib/pos_mempool_persister_test.go b/lib/pos_mempool_persister_test.go new file mode 100644 index 000000000..3159745c5 --- /dev/null +++ b/lib/pos_mempool_persister_test.go @@ -0,0 +1,181 @@ +package lib + +import ( + "github.com/dgraph-io/badger/v3" + "github.com/stretchr/testify/require" + "math/rand" + "os" + "testing" + "time" +) + +func TestMempoolPersister(t *testing.T) { + require := require.New(t) + + seed := int64(177) + testCases := 1000 + addRemoveCases := 100 + feeRange := uint64(10000) + timestampRange := uint64(10000) + rand := rand.New(rand.NewSource(seed)) + globalParams := _testGetDefaultGlobalParams() + + // Generate a set of random mempool transactions. + txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, 1000, timestampRange, testCases) + + // Create a new mempool persister. + dir, err := os.MkdirTemp("", "badgerdb-persister") + t.Logf("BadgerDB directory: %s\nIt should be automatically removed at the end of the test", dir) + require.NoError(err) + + opts := DefaultBadgerOptions(dir) + db, err := badger.Open(opts) + require.NoError(err) + defer os.RemoveAll(dir) + defer db.Close() + + mempoolPersister := NewMempoolPersister(db, 100) + + // Start the mempool persister. + mempoolPersister.Start() + + defer mempoolPersister.Stop() + + // Add all the transactions to the mempool. + for _, txn := range txnPool { + event := &MempoolEvent{ + Txn: txn, + Type: MempoolEventAdd, + } + mempoolPersister.EnqueueEvent(event) + } + + waitForPersisterToProcessEventQueue(mempoolPersister) + retrievedTxns, err := mempoolPersister.GetPersistedTransactions() + require.NoError(err) + require.Equal(len(txnPool), len(retrievedTxns)) + + // Add the same transactions again and ensure there are no overlaps. + for ii := 0; ii < addRemoveCases; ii++ { + event := &MempoolEvent{ + Txn: txnPool[ii], + Type: MempoolEventAdd, + } + mempoolPersister.EnqueueEvent(event) + } + + waitForPersisterToProcessEventQueue(mempoolPersister) + retrievedTxns, err = mempoolPersister.GetPersistedTransactions() + require.NoError(err) + require.Equal(len(txnPool), len(retrievedTxns)) + + // Remove couple transactions from the mempool + for ii := 0; ii < addRemoveCases; ii++ { + event := &MempoolEvent{ + Txn: txnPool[ii], + Type: MempoolEventRemove, + } + mempoolPersister.EnqueueEvent(event) + } + + waitForPersisterToProcessEventQueue(mempoolPersister) + retrievedTxns, err = mempoolPersister.GetPersistedTransactions() + require.NoError(err) + require.Equal(len(txnPool)-addRemoveCases, len(retrievedTxns)) + + // Add & Remove some transactions again and ensure transactions were really removed. + for ii := 0; ii < addRemoveCases; ii++ { + event := &MempoolEvent{ + Txn: txnPool[ii], + Type: MempoolEventAdd, + } + mempoolPersister.EnqueueEvent(event) + + event2 := &MempoolEvent{ + Txn: txnPool[ii], + Type: MempoolEventRemove, + } + mempoolPersister.EnqueueEvent(event2) + } + + waitForPersisterToProcessEventQueue(mempoolPersister) + retrievedTxns, err = mempoolPersister.GetPersistedTransactions() + require.NoError(err) + require.Equal(len(txnPool)-addRemoveCases, len(retrievedTxns)) +} + +func TestMempoolPersisterRestart(t *testing.T) { + require := require.New(t) + + seed := int64(178) + feeRange := uint64(10000) + testCases := 100 + timestampRange := uint64(10000) + rand := rand.New(rand.NewSource(seed)) + globalParams := _testGetDefaultGlobalParams() + + // Generate a set of random mempool transactions. + txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, 1000, timestampRange, testCases) + + // Create a new mempool persister. + dir, err := os.MkdirTemp("", "badgerdb-persister") + t.Logf("BadgerDB directory: %s\nIt should be automatically removed at the end of the test", dir) + require.NoError(err) + + opts := DefaultBadgerOptions(dir) + db, err := badger.Open(opts) + require.NoError(err) + + mempoolPersister := NewMempoolPersister(db, 100) + + // Start the mempool persister. + mempoolPersister.Start() + + // Add all the transactions to the mempool. + for _, txn := range txnPool { + event := &MempoolEvent{ + Txn: txn, + Type: MempoolEventAdd, + } + mempoolPersister.EnqueueEvent(event) + } + + waitForPersisterToProcessEventQueue(mempoolPersister) + retrievedTxns, err := mempoolPersister.GetPersistedTransactions() + require.NoError(err) + require.Equal(len(txnPool), len(retrievedTxns)) + + // Stop the mempool persister. + require.NoError(mempoolPersister.Stop()) + + // Make sure we get an error retrieving transactions on stopped persister. + _, err = mempoolPersister.GetPersistedTransactions() + require.Contains(err.Error(), MempoolErrorNotRunning.Error()) + + // Reopen the db. + require.NoError(db.Close()) + db, err = badger.Open(opts) + require.NoError(err) + mempoolPersister = NewMempoolPersister(db, 100) + + // Restart the mempool persister. + mempoolPersister.Start() + + // Make sure we can retrieve the transactions again. + retrievedTxns, err = mempoolPersister.GetPersistedTransactions() + require.NoError(err) + require.Equal(len(txnPool), len(retrievedTxns)) + + require.NoError(db.Close()) + require.NoError(os.RemoveAll(dir)) +} + +func waitForPersisterToProcessEventQueue(mempoolPersister *MempoolPersister) { + mempoolPersister.Lock() + for len(mempoolPersister.eventQueue) > 0 || len(mempoolPersister.updateBatch) > 0 { + mempoolPersister.Unlock() + time.Sleep(100 * time.Millisecond) + mempoolPersister.Lock() + } + mempoolPersister.Unlock() +} diff --git a/lib/pos_mempool_transaction.go b/lib/pos_mempool_transaction.go new file mode 100644 index 000000000..4163c0a69 --- /dev/null +++ b/lib/pos_mempool_transaction.go @@ -0,0 +1,124 @@ +package lib + +import ( + "bytes" + "fmt" + "github.com/pkg/errors" + "time" +) + +// MempoolTx contains a transaction along with additional metadata like the +// fee and time added. +type MempoolTx struct { + Tx *MsgDeSoTxn + + // TxMeta is the transaction metadata + TxMeta *TransactionMetadata + + // Hash is a hash of the transaction so we don't have to recompute + // it all the time. + Hash *BlockHash + + // TxSizeBytes is the cached size of the transaction. + TxSizeBytes uint64 + + // The time when the txn was added to the pool + Added time.Time + + // The block height when the txn was added to the pool. It's generally set + // to tip+1. + Height uint32 + + // The total fee the txn pays. Cached for efficiency reasons. + Fee uint64 + + // The fee rate of the transaction in nanos per KB. + FeePerKB uint64 + + // index is used by the heap logic to allow for modification in-place. + index int +} + +func NewMempoolTx(txn *MsgDeSoTxn, blockHeight uint64) (*MempoolTx, error) { + txnBytes, err := txn.ToBytes(false) + if err != nil { + return nil, errors.Wrapf(err, "PosMempool.GetMempoolTx: Problem serializing txn") + } + serializedLen := uint64(len(txnBytes)) + + txnHash := txn.Hash() + if txnHash == nil { + return nil, errors.Errorf("PosMempool.GetMempoolTx: Problem hashing txn") + } + feePerKb, err := txn.ComputeFeeRatePerKBNanos() + if err != nil { + return nil, errors.Wrapf(err, "PosMempool.GetMempoolTx: Problem computing fee per KB") + } + + return &MempoolTx{ + Tx: txn, + Hash: txnHash, + TxSizeBytes: serializedLen, + Added: time.Now(), + Height: uint32(blockHeight), + Fee: txn.TxnFeeNanos, + FeePerKB: feePerKb, + }, nil +} + +func (mempoolTx *MempoolTx) String() string { + return fmt.Sprintf("< Added: %v, index: %d, Fee: %d, Type: %v, Hash: %v", mempoolTx.Added, mempoolTx.index, mempoolTx.Fee, mempoolTx.Tx.TxnMeta.GetTxnType(), mempoolTx.Hash) +} + +func (mempoolTx *MempoolTx) GetTimestamp() uint64 { + return uint64(mempoolTx.Added.UnixMicro()) +} + +func (mempoolTx *MempoolTx) ToBytes() ([]byte, error) { + var data []byte + + txnBytes, err := mempoolTx.Tx.ToBytes(false) + if err != nil { + return nil, errors.Wrapf(err, "MempoolTx.Encode: Problem serializing txn") + } + data = append(data, EncodeByteArray(txnBytes)...) + data = append(data, UintToBuf(uint64(mempoolTx.Height))...) + data = append(data, UintToBuf(mempoolTx.GetTimestamp())...) + return data, nil +} + +func (mempoolTx *MempoolTx) FromBytes(rr *bytes.Reader) error { + if mempoolTx == nil { + return errors.New("MempoolTx.Decode: mempoolTx is nil") + } + + // Decode the transaction + txn := &MsgDeSoTxn{} + txnBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "MempoolTx.Decode: Problem reading txnBytes") + } + err = txn.FromBytes(txnBytes) + if err != nil { + return errors.Wrapf(err, "MempoolTx.Decode: Problem deserializing txn") + } + + // Decode the height + height, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MempoolTx.Decode: Problem reading height") + } + + // Create a new MempoolTx + newTxn, err := NewMempoolTx(txn, height) + *mempoolTx = *newTxn + + // Decode the timestamp + timestamp, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MempoolTx.Decode: Problem reading timestamp") + } + mempoolTx.Added = time.UnixMicro(int64(timestamp)) + + return nil +} diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index 0a55512bc..b5e223f05 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -2,6 +2,7 @@ package lib import ( "bytes" + "github.com/btcsuite/btcd/btcec" "github.com/stretchr/testify/require" "math" "math/rand" @@ -369,8 +370,24 @@ func _testGetDefaultGlobalParams() *GlobalParamsEntry { func _testGetRandomMempoolTxns(rand *rand.Rand, feeMin uint64, feeMax uint64, sizeMax uint64, timestampRange uint64, numTxns int) []*MempoolTx { txnPool := []*MempoolTx{} for ii := 0; ii < numTxns; ii++ { + randPriv, _ := btcec.NewPrivateKey(btcec.S256()) + randMsg := RandomBytes(32) + randSig, _ := randPriv.Sign(randMsg) + fee := rand.Uint64()%(feeMax-feeMin) + feeMin + txnPool = append(txnPool, &MempoolTx{ - FeePerKB: rand.Uint64()%(feeMax-feeMin) + feeMin, + Tx: &MsgDeSoTxn{ + TxnVersion: DeSoTxnVersion1, + TxnMeta: &BasicTransferMetadata{}, + Signature: DeSoSignature{ + Sign: randSig, + RecoveryId: 0, + IsRecoverable: false, + }, + TxnFeeNanos: fee, + TxnNonce: &DeSoNonce{}, + }, + FeePerKB: fee, Added: time.UnixMicro(int64(rand.Uint64() % timestampRange)), Hash: NewBlockHash(RandomBytes(32)), TxSizeBytes: 1 + rand.Uint64()%sizeMax, From e63396a4b992e088d224489f97bcb3bc98a8aef0 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 8 Aug 2023 15:48:34 -0400 Subject: [PATCH 178/762] Address Staker Snapshotting PR Feedback (#633) --- lib/block_view_stake.go | 2 +- lib/deso_math.go | 1 - lib/pos_epoch.go | 7 +------ lib/pos_epoch_test.go | 3 +-- lib/pos_staking_rewards.go | 24 ++++++++++++++---------- 5 files changed, 17 insertions(+), 20 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 69607d52d..0089b9c82 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -35,7 +35,7 @@ import ( type StakingRewardMethod = uint8 const ( - StakingRewardMethodPayToBalance StakingRewardMethod = iota + StakingRewardMethodPayToBalance StakingRewardMethod = 0 StakingRewardMethodRestake StakingRewardMethod = 1 ) diff --git a/lib/deso_math.go b/lib/deso_math.go index 994ebfe0b..b9fe6bcd6 100644 --- a/lib/deso_math.go +++ b/lib/deso_math.go @@ -100,7 +100,6 @@ var ( bigOne = NewFloat().SetUint64(1) bigTwo = NewFloat().SetUint64(2) bigSqrt2Over2 = NewFloat().Quo(bigSqrt2, bigTwo) - bigE = NewFloat().SetFloat64(2.71828182845904523536028747135266249775724709369995957496696763) // Constants for BigFloatExpMulti bigP1 = NewFloat().SetFloat64(1.66666666666666657415e-01) /* 0x3FC55555; 0x55555555 */ diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index 9d558b703..2a7f8439f 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -2,7 +2,6 @@ package lib import ( "bytes" - "math" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" @@ -98,13 +97,9 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { // If still not found, return the GenesisEpochEntry. This will be the // case prior to the first execution of the OnEpochCompleteHook. - // - // TODO: Should FinalBlockHeight be ProofOfStake1StateSetupBlockHeight for epoch 0? - // The ProofOfStake1StateSetupBlockHeight fork height is exactly when epoch 0 ends. - // Epoch 1 begins at the next height. genesisEpochEntry := &EpochEntry{ EpochNumber: 0, - FinalBlockHeight: math.MaxUint64, + FinalBlockHeight: uint64(bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight), CreatedAtBlockTimestampNanoSecs: 0, } return genesisEpochEntry, nil diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index c73a5fca3..aaf16b138 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -1,7 +1,6 @@ package lib import ( - "math" "testing" "github.com/stretchr/testify/require" @@ -31,7 +30,7 @@ func TestCurrentEpoch(t *testing.T) { require.NoError(t, err) require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(0)) - require.Equal(t, epochEntry.FinalBlockHeight, uint64(math.MaxUint64)) + require.Equal(t, epochEntry.FinalBlockHeight, uint64(utxoView.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight)) // Set the CurrentEpoch. epochEntry = &EpochEntry{ diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index 915d51078..e4ed376a1 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -140,7 +140,7 @@ func (bav *UtxoView) computeStakerRewardAndValidatorCommission( if err != nil { return 0, 0, errors.Wrapf(err, "computeStakerRewardAndValidatorCommission: problem fetching validator entry: ") } - if validatorEntry == nil { + if validatorEntry == nil || validatorEntry.isDeleted { // This should never happen. If we can't find the validator, then something is wrong. It's safest to error // and return early here. return 0, 0, errors.Errorf("computeStakerRewardAndValidatorCommission: validator entry should never be nil") @@ -162,7 +162,7 @@ func (bav *UtxoView) computeStakerRewardAndValidatorCommission( } // Deduct the validator commission from the staker's reward. - stakerRewardNanos.Sub(stakerRewardNanos, validatorCommissionNanos) + stakerRewardNanos = big.NewInt(0).Sub(stakerRewardNanos, validatorCommissionNanos) } } @@ -191,7 +191,7 @@ func (bav *UtxoView) distributeStakingReward(validatorPKID *PKID, stakerPKID *PK // For case 1, we distribute the rewards by adding them to the staker's staked amount. if stakeEntry != nil && stakeEntry.RewardMethod == StakingRewardMethodRestake { - stakeEntry.StakeAmountNanos.Add(stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(rewardNanos)) + stakeEntry.StakeAmountNanos = uint256.NewInt().Add(stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(rewardNanos)) bav._setStakeEntryMappings(stakeEntry) return nil @@ -251,12 +251,12 @@ func convertBigFloatToBigInt(float *big.Float) *big.Int { func convertAPYBasisPointsToFloat(apyBasisPoints uint64) *big.Float { apyBasisPointsAsFloat := NewFloat().SetUint64(apyBasisPoints) - return apyBasisPointsAsFloat.Quo(apyBasisPointsAsFloat, _basisPointsAsFloat) + return NewFloat().Quo(apyBasisPointsAsFloat, _basisPointsAsFloat) } func computeFractionOfYearAsFloat(nanoSecs uint64) *big.Float { nanoSecsAsFloat := NewFloat().SetUint64(nanoSecs) - return nanoSecsAsFloat.Quo(nanoSecsAsFloat, _nanoSecsPerYearAsFloat) + return NewFloat().Quo(nanoSecsAsFloat, _nanoSecsPerYearAsFloat) } // computeStakingReward uses float math to compute the compound interest on the stake amounts based on the @@ -265,10 +265,14 @@ func computeFractionOfYearAsFloat(nanoSecs uint64) *big.Float { // It produces the result for: stakeAmount * [e ^ (apy * elapsedTime / 1 year) - 1] func computeStakingReward(stakeAmountNanos *uint256.Int, elapsedFractionOfYear *big.Float, apy *big.Float) *big.Float { stakeAmountFloat := NewFloat().SetInt(stakeAmountNanos.ToBig()) - growthExponent := NewFloat().Mul(elapsedFractionOfYear, apy) // apy * elapsedTime / 1 year - growthMultiplier := BigFloatPow(bigE, growthExponent) // e ^ (apy * elapsedTime / 1 year) - finalStakeAmountNanos := NewFloat().Mul(stakeAmountFloat, growthMultiplier) // stakeAmount * [e ^ (apy * elapsedTime / 1 year)] - return finalStakeAmountNanos.Sub(finalStakeAmountNanos, stakeAmountFloat) // stakeAmount * [e ^ (apy * elapsedTime / 1 year) - 1] + growthExponent := NewFloat().Mul(elapsedFractionOfYear, apy) // apy * elapsedTime / 1 year + growthMultiplier := BigFloatExp(growthExponent) // e ^ (apy * elapsedTime / 1 year) + finalStakeAmountNanos := NewFloat().Mul(stakeAmountFloat, growthMultiplier) // stakeAmount * [e ^ (apy * elapsedTime / 1 year)] + rewardAmountNanos := NewFloat().Sub(finalStakeAmountNanos, stakeAmountFloat) // stakeAmount * [e ^ (apy * elapsedTime / 1 year) - 1] + if rewardAmountNanos.Sign() < 0 { + return NewFloat() // This should not be possible, but we clamp the result to zero just in case. + } + return rewardAmountNanos } // computeValidatorCommission uses integer math to compute the validator's commission amount based on the staker's @@ -278,5 +282,5 @@ func computeStakingReward(stakeAmountNanos *uint256.Int, elapsedFractionOfYear * // It produces the integer result for: floor[(stakerReward * validatorCommissionBasisPoints) / 1e4] func computeValidatorCommission(stakerRewardNanos *big.Int, validatorCommissionBasisPoints uint64) *big.Int { scaledStakerReward := big.NewInt(0).Mul(stakerRewardNanos, big.NewInt(int64(validatorCommissionBasisPoints))) - return scaledStakerReward.Div(scaledStakerReward, _basisPointsAsInt) + return big.NewInt(0).Div(scaledStakerReward, _basisPointsAsInt) } From 1ac4696427ae12145ba31ee018e053ac504c0407 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 9 Aug 2023 09:49:28 -0400 Subject: [PATCH 179/762] Guarantee top n staker snapshotting (#632) --- collections/slice.go | 8 ++-- lib/block_view_stake.go | 58 +++++++++++++++++++++-------- lib/block_view_stake_test.go | 9 +++-- lib/pos_epoch_complete_hook.go | 29 ++++++--------- lib/pos_epoch_complete_hook_test.go | 13 ++++--- 5 files changed, 72 insertions(+), 45 deletions(-) diff --git a/collections/slice.go b/collections/slice.go index eb242c064..b96cc41d9 100644 --- a/collections/slice.go +++ b/collections/slice.go @@ -1,11 +1,9 @@ package collections -func SliceFilter[T any](slice []T, filterFn func(T) bool) []T { - var result []T +func TransformSlice[TInput any, TOutput any](slice []TInput, transformFn func(TInput) TOutput) []TOutput { + var result []TOutput for _, val := range slice { - if filterFn(val) { - result = append(result, val) - } + result = append(result, transformFn(val)) } return result } diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 0089b9c82..18fa50eba 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -669,10 +669,11 @@ func DBGetStakeEntriesForValidatorPKID(handle *badger.DB, snap *Snapshot, valida return stakeEntries, nil } -func DBGetTopStakesByStakeAmount( +func DBGetTopStakesForValidatorsByStakeAmount( handle *badger.DB, snap *Snapshot, limit uint64, + validatorPKIDsToInclude *Set[PKID], stakeEntriesToSkip []*StakeEntry, ) ([]*StakeEntry, error) { var stakeEntries []*StakeEntry @@ -697,6 +698,11 @@ func DBGetTopStakesByStakeAmount( if err != nil { return false } + + if !validatorPKIDsToInclude.Includes(*validatorPKID) { + return true + } + stakerPKID, err := GetStakerPKIDFromDBKeyForStakeByStakeAmount(badgerKey) if err != nil { return false @@ -714,7 +720,7 @@ func DBGetTopStakesByStakeAmount( handle, key, int(limit), nil, true, canSkipValidatorPKIDAndStakerPKIDInBadgerSeek, ) if err != nil { - return nil, errors.Wrapf(err, "DBGetTopStakesByStakeAmount: problem retrieving top stakes: ") + return nil, errors.Wrapf(err, "DBGetTopStakesForValidatorsByStakeAmount: problem retrieving top stakes: ") } // For each key found, parse the staker PKID and validator PKID from the key, then retrieve the StakeEntry.:len(keyFound)-PublicKeyLenCompressed @@ -722,19 +728,19 @@ func DBGetTopStakesByStakeAmount( // Extract the validator PKID from the key. validatorPKID, err := GetValidatorPKIDFromDBKeyForStakeByStakeAmount(keyFound) if err != nil { - return nil, errors.Wrapf(err, "DBGetTopStakesByStakeAmount: problem reading ValidatorPKID: ") + return nil, errors.Wrapf(err, "DBGetTopStakesForValidatorsByStakeAmount: problem reading ValidatorPKID: ") } // Extract the staker PKID from the key. stakerPKID, err := GetStakerPKIDFromDBKeyForStakeByStakeAmount(keyFound) if err != nil { - return nil, errors.Wrapf(err, "DBGetTopStakesByStakeAmount: problem reading StakerPKID: ") + return nil, errors.Wrapf(err, "DBGetTopStakesForValidatorsByStakeAmount: problem reading StakerPKID: ") } // Retrieve StakeEntry from db. stakeEntry, err := DBGetStakeEntry(handle, snap, validatorPKID, stakerPKID) if err != nil { - return nil, errors.Wrapf(err, "DBGetTopStakesByStakeAmount: problem retrieving stake entry: ") + return nil, errors.Wrapf(err, "DBGetTopStakesForValidatorsByStakeAmount: problem retrieving stake entry: ") } stakeEntries = append(stakeEntries, stakeEntry) } @@ -2388,9 +2394,15 @@ func (bav *UtxoView) GetStakeEntriesForValidatorPKID(validatorPKID *PKID) ([]*St return stakeEntries, nil } -func (bav *UtxoView) GetTopStakesByStakeAmount(limit uint64) ([]*StakeEntry, error) { - // Validate limit param. - if limit == uint64(0) { +// GetTopStakesForValidatorsByStakeAmount fetches the top n StakeEntries sorted by stake amount for +// the given validators. The validatorPKIDs and limit parameters are strictly respected. If either has +// 0 size, then no StakeEntries are returned. +func (bav *UtxoView) GetTopStakesForValidatorsByStakeAmount( + validatorPKIDs []*PKID, + limit uint64, +) ([]*StakeEntry, error) { + // Validate validator PKIDs and limit params. + if len(validatorPKIDs) == 0 || limit == uint64(0) { return []*StakeEntry{}, nil } @@ -2406,15 +2418,27 @@ func (bav *UtxoView) GetTopStakesByStakeAmount(limit uint64) ([]*StakeEntry, err utxoViewStakeEntries = append(utxoViewStakeEntries, stakeEntry) } + // Convert the validatorPKIDs to a set for easy lookup. + validatorPKIDsToInclude := NewSet([]PKID{}) + for _, validatorPKID := range validatorPKIDs { + validatorPKIDsToInclude.Add(*validatorPKID) + } + // Pull top N StakeEntries from the database (not present in the UtxoView). // Note that we will skip stakers that are present in the view because we pass // utxoViewStakeEntries to the function. - dbStakeEntries, err := DBGetTopStakesByStakeAmount(bav.Handle, bav.Snapshot, limit, utxoViewStakeEntries) + dbStakeEntries, err := DBGetTopStakesForValidatorsByStakeAmount( + bav.Handle, + bav.Snapshot, + limit, + validatorPKIDsToInclude, + utxoViewStakeEntries, + ) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetTopStakesByStakeAmount: error retrieving stake entries from db: ") + return nil, errors.Wrapf(err, "UtxoView.GetTopStakesForValidatorsByStakeAmount: error retrieving stake entries from db: ") } - // Cache top N StakeEntries from the db in the UtxoView. + // Cache StakeEntries from the db in the UtxoView. for _, stakeEntry := range dbStakeEntries { stakeMapKey := stakeEntry.ToMapKey() // If the utxoViewStakeEntries have been properly skipped when doing the DB seek, then there @@ -2422,18 +2446,22 @@ func (bav *UtxoView) GetTopStakesByStakeAmount(limit uint64) ([]*StakeEntry, err // find duplicates here, then something is wrong. It would unsafe to continue as it may result // in an invalid ordering of stakes. if _, exists := bav.StakeMapKeyToStakeEntry[stakeMapKey]; exists { - return nil, fmt.Errorf("UtxoView.GetTopStakesByStakeAmount: duplicate StakeEntry returned from the DB: %v", stakeEntry) + return nil, fmt.Errorf("UtxoView.GetTopStakesForValidatorsByStakeAmount: duplicate StakeEntry returned from the DB: %v", stakeEntry) } bav._setStakeEntryMappings(stakeEntry) } - // Pull !isDeleted StakeEntries with non-zero stake from the UtxoView. + // Pull !isDeleted StakeEntries that have staked to the desired validators and have non-zero stake. var stakeEntries []*StakeEntry for _, stakeEntry := range bav.StakeMapKeyToStakeEntry { - if !stakeEntry.isDeleted && !stakeEntry.StakeAmountNanos.IsZero() { - stakeEntries = append(stakeEntries, stakeEntry) + if stakeEntry.isDeleted || stakeEntry.StakeAmountNanos.IsZero() { + continue } + if !validatorPKIDsToInclude.Includes(*stakeEntry.ValidatorPKID) { + continue + } + stakeEntries = append(stakeEntries, stakeEntry) } // Sort the StakeEntries by StakeAmountNanos DESC. diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index d44208285..83f1692a4 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -1753,6 +1753,9 @@ func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { constructAndSubmitRegisterValidatorTxn(m1Pub, m1Priv, m1PkBytes, "https://example2.com") } + // Cache the validator set for easy access. + validatorPKIDs := []*PKID{m0PKID, m1PKID} + { // m0 stakes 100 nanos to themselves. constructAndSubmitStakeTxn(m0Pub, m0Priv, m0PkBytes, 100) @@ -1783,7 +1786,7 @@ func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { { // Verify when query limit 3 is lower than number of stake entries 10. - topStakeEntries, err := utxoView().GetTopStakesByStakeAmount(3) + topStakeEntries, err := utxoView().GetTopStakesForValidatorsByStakeAmount(validatorPKIDs, 3) require.NoError(t, err) require.Equal(t, 3, len(topStakeEntries)) } @@ -1791,7 +1794,7 @@ func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { { // Verify when query limit 1000 is higher than number of stake entries 10. - topStakeEntries, err := utxoView().GetTopStakesByStakeAmount(1000) + topStakeEntries, err := utxoView().GetTopStakesForValidatorsByStakeAmount(validatorPKIDs, 1000) require.NoError(t, err) require.Equal(t, 10, len(topStakeEntries)) } @@ -1799,7 +1802,7 @@ func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { { // Verify ordering of top 5 stake entries, which includes breaking ties. - topStakeEntries, err := utxoView().GetTopStakesByStakeAmount(6) + topStakeEntries, err := utxoView().GetTopStakesForValidatorsByStakeAmount(validatorPKIDs, 6) require.NoError(t, err) require.Equal(t, 6, len(topStakeEntries)) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index d95efa3ad..daa6e53b8 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -203,28 +203,23 @@ func (bav *UtxoView) generateAndSnapshotLeaderSchedule(epochNumber uint64) error } func (bav *UtxoView) generateAndSnapshotStakesToReward(epochNumber uint64, validatorSet []*ValidatorEntry) error { - // Fetch the current top n stake entries. Note, this query will return the top n stake entries regardless of whether - // or not they are in the active validator set of the epoch. By not filtering the stake entries while seeking through - // the DB, we guarantee that we will at most seek through n stake entries. This avoids the edge cases where the global - // set of stake entries is very large, and as a result of additional filters in the seek, we end up seeking through - // the entire global set. - topStakeEntries, err := bav.GetTopStakesByStakeAmount(bav.GetCurrentGlobalParamsEntry().StakingRewardsMaxNumStakes) + // Fetch the validator set's PKIDs so we can filter the top stakes by the current validator set. + validatorSetPKIDs := collections.TransformSlice(validatorSet, func(validatorEntry *ValidatorEntry) *PKID { + return validatorEntry.ValidatorPKID + }) + + // Fetch the current top n stake entries. This query is guaranteed to return the top n stake entries that have + // staked to the validator set. + topStakeEntries, err := bav.GetTopStakesForValidatorsByStakeAmount( + validatorSetPKIDs, + bav.GetCurrentGlobalParamsEntry().StakingRewardsMaxNumStakes, + ) if err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: error retrieving top StakeEntries: ") } - // Filter the top n stake entries by the current validator set. We do not want to reward stakes that are - // not in the current validator set, so we pre-filter them here before snapshotting them. - validatorSetPKIDs := NewSet([]PKID{}) - for _, validatorEntry := range validatorSet { - validatorSetPKIDs.Add(*validatorEntry.ValidatorPKID) - } - topStakesInValidatorSet := collections.SliceFilter(topStakeEntries, func(s *StakeEntry) bool { - return validatorSetPKIDs.Includes(*s.ValidatorPKID) - }) - // Snapshot only the top m stake entries that are in the validator set. - for _, stakeEntry := range topStakesInValidatorSet { + for _, stakeEntry := range topStakeEntries { bav._setSnapshotStakeToReward(stakeEntry.Copy(), epochNumber) } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 993cbd262..70f5c9368 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -173,7 +173,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Len(t, validatorEntries, 7) - stakeEntries, err := _newUtxoView(testMeta).GetTopStakesByStakeAmount(10) + stakeEntries, err := _newUtxoView(testMeta).GetTopStakesForValidatorsByStakeAmount(validatorPKIDs, 10) require.NoError(t, err) require.Len(t, stakeEntries, 7) } @@ -418,7 +418,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } getNumStakes := func() int { - stakeEntries, err := _newUtxoView(testMeta).GetTopStakesByStakeAmount(10) + stakeEntries, err := _newUtxoView(testMeta).GetTopStakesForValidatorsByStakeAmount(validatorPKIDs, 10) require.NoError(t, err) return len(stakeEntries) } @@ -512,6 +512,9 @@ func TestStakingRewardDistribution(t *testing.T) { _registerValidatorAndStake(testMeta, m0Pub, m0Priv, 2000, 400, true) // 20% commission rate, 400 nano stake _registerValidatorAndStake(testMeta, m1Pub, m1Priv, 2000, 200, false) // 20% commission rate, 200 nano stake + // Cache the validator PKIDs. + validatorPKIDs := []*PKID{m0PKID, m1PKID} + // Two stakers delegate their stake to the validators. _stakeToValidator(testMeta, m2Pub, m2Priv, m0Pub, 100, true) // 100 nano stake _stakeToValidator(testMeta, m3Pub, m3Priv, m1Pub, 50, false) // 50 nano stake @@ -533,7 +536,7 @@ func TestStakingRewardDistribution(t *testing.T) { { // Verify the stakers' stakes. - stakeEntries, err := _newUtxoView(testMeta).GetTopStakesByStakeAmount(10) + stakeEntries, err := _newUtxoView(testMeta).GetTopStakesForValidatorsByStakeAmount(validatorPKIDs, 10) require.NoError(t, err) require.Len(t, stakeEntries, 4) @@ -559,7 +562,7 @@ func TestStakingRewardDistribution(t *testing.T) { { // Test that the stakes are unchanged. - stakeEntries, err := _newUtxoView(testMeta).GetTopStakesByStakeAmount(10) + stakeEntries, err := _newUtxoView(testMeta).GetTopStakesForValidatorsByStakeAmount(validatorPKIDs, 10) require.NoError(t, err) require.Len(t, stakeEntries, 4) require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) @@ -611,7 +614,7 @@ func TestStakingRewardDistribution(t *testing.T) { // previous epoch. // Test that the number of stakes is unchanged. - stakeEntries, err := _newUtxoView(testMeta).GetTopStakesByStakeAmount(10) + stakeEntries, err := _newUtxoView(testMeta).GetTopStakesForValidatorsByStakeAmount(validatorPKIDs, 10) require.NoError(t, err) require.Len(t, stakeEntries, 4) From eb4105dc0fe63af7250d3a1fe9875f4aa80a0f95 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 9 Aug 2023 11:00:08 -0400 Subject: [PATCH 180/762] Merge Block Nanosecond Timestamp Change with OnEpochComplete Hook Changes (#634) --- lib/network.go | 5 ----- lib/pos_epoch_test.go | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/lib/network.go b/lib/network.go index 2f5f09b18..f38fbbc31 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1894,11 +1894,6 @@ type MsgDeSoHeader struct { // The unix timestamp (in nanoseconds) specifying when this block was produced. TstampNanoSecs uint64 - // TODO: Add a new TstampNanoSecs field that will have nanosecond resolution. - // For backwards compatibility with existing backends and frontends, we will keep - // the existing TstampSecs field and populate it as TstampNanoSecs / 1e9 when decoding - // block headers from bytes. - // The height of the block this header corresponds to. Height uint64 diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index aaf16b138..7ea66a52a 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -13,7 +13,7 @@ func TestCurrentEpoch(t *testing.T) { // Initialize blockchain. chain, params, db := NewLowDifficultyBlockchain(t) blockHeight := uint64(chain.blockTip().Height) + 1 - blockTimestampNanoSecs := uint64(chain.blockTip().Header.TstampSecs)*1e9 + 1e9 + blockTimestampNanoSecs := uint64(chain.blockTip().Header.TstampNanoSecs) + 1e9 utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) From a9f782751b11e8aa3a0977fd2b2535b1ef4d7242 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Tue, 22 Aug 2023 12:04:27 -0700 Subject: [PATCH 181/762] PoS Mempool Struct (#603) * Revert "Revert "Revert "Mempool persister & mempool transaction split""" This reverts commit 1da7cda3f7bd70d2138dea95f5364ea6b2b77bf8. * Update TransactionRegister Prune; Update BalanceLedger CheckBalanceIncrease * Initial add transaction and restart tests * Comments and nits * gofmt * Add mempool interface and support for GetTransaction * Update persister * Initial review * Revert "Revert "Revert "Mempool persister & mempool transaction split""" This reverts commit 1da7cda3f7bd70d2138dea95f5364ea6b2b77bf8. * Initial add transaction and restart tests * Comment fix * Tests for prune, getTransaction, getIterator, updateGlobalParams * Review round * Nit * Txn hash check before lock * More review stuff --- cmd/config.go | 6 + cmd/node.go | 3 + cmd/run.go | 5 + lib/block_view.go | 4 + lib/constants.go | 9 +- lib/pos_mempool.go | 418 ++++++++++++++++++++++++++++++++ lib/pos_mempool_ledger.go | 55 ++--- lib/pos_mempool_ledger_test.go | 54 ++--- lib/pos_mempool_persister.go | 18 +- lib/pos_mempool_test.go | 245 +++++++++++++++++++ lib/pos_transaction_register.go | 29 ++- 11 files changed, 762 insertions(+), 84 deletions(-) create mode 100644 lib/pos_mempool.go create mode 100644 lib/pos_mempool_test.go diff --git a/cmd/config.go b/cmd/config.go index 64b2e8c00..a4a274382 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -41,6 +41,9 @@ type Config struct { SnapshotBlockHeightPeriod uint64 DisableEncoderMigrations bool + // Mempool + MempoolBackupTimeMilliseconds uint64 + // Mining MinerPublicKeys []string NumMiningThreads uint64 @@ -102,6 +105,9 @@ func LoadConfig() *Config { config.SnapshotBlockHeightPeriod = viper.GetUint64("snapshot-block-height-period") config.DisableEncoderMigrations = viper.GetBool("disable-encoder-migrations") + // Mempool + config.MempoolBackupTimeMilliseconds = viper.GetUint64("mempool-backup-time-milliseconds") + // Peers config.ConnectIPs = viper.GetStringSlice("connect-ips") config.AddIPs = viper.GetStringSlice("add-ips") diff --git a/cmd/node.go b/cmd/node.go index cd6a79c0f..2cf161eed 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -85,6 +85,9 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Params.EnableRegtest() } + // Set the frequency of mempool's database backup. + node.Params.MempoolBackupTimeMilliseconds = node.Config.MempoolBackupTimeMilliseconds + // Validate params validateParams(node.Params) // This is a bit of a hack, and we should deprecate this. We rely on GlobalDeSoParams static variable in only one diff --git a/cmd/run.go b/cmd/run.go index 57a03c8a3..a9cecfe66 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -79,6 +79,11 @@ func SetupRunFlags(cmd *cobra.Command) { - hypersync: Will sync by downloading historical state, and will NOT download historical blocks. Can only be set if HyperSync is true.`) + // Mempool + cmd.PersistentFlags().Uint64("batch-persist-frequency-milliseconds", 30000, + "The frequency in milliseconds with which the mempool will persist its state to disk. "+ + "The default value is 30 seconds, or 30,000 milliseconds.") + // Peers cmd.PersistentFlags().StringSlice("connect-ips", []string{}, "A comma-separated list of ip:port addresses that we should connect to on startup. "+ diff --git a/lib/block_view.go b/lib/block_view.go index 41e524f57..d41592456 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -1608,6 +1608,10 @@ func _isEntryImmatureBlockReward(utxoEntry *UtxoEntry, blockHeight uint32, param return false } +func (bav *UtxoView) VerifySignature(txn *MsgDeSoTxn, blockHeight uint32) (_derivedPkBytes []byte, _err error) { + return bav._verifySignature(txn, blockHeight) +} + func (bav *UtxoView) _verifySignature(txn *MsgDeSoTxn, blockHeight uint32) (_derivedPkBytes []byte, _err error) { if txn.Signature.Sign == nil { return nil, fmt.Errorf("_verifySignature: Transaction signature is empty") diff --git a/lib/constants.go b/lib/constants.go index 465c43761..7b69371ba 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -706,6 +706,9 @@ type DeSoParams struct { // The maximum aggregate number of bytes of transactions included in the PoS mempool. MaxMempoolPosSizeBytes uint64 + + // MempoolBackupTimeMilliseconds is the frequency with which pos mempool persists transactions to storage. + MempoolBackupTimeMilliseconds uint64 } var RegtestForkHeights = ForkHeights{ @@ -1110,7 +1113,8 @@ var DeSoMainnetParams = DeSoParams{ EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), - MaxMempoolPosSizeBytes: 3 << 30, // 3GB + MaxMempoolPosSizeBytes: 3 << 30, // 3Gb + MempoolBackupTimeMilliseconds: 30000, } func mustDecodeHexBlockHashBitcoin(ss string) *BlockHash { @@ -1370,7 +1374,8 @@ var DeSoTestnetParams = DeSoParams{ EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), - MaxMempoolPosSizeBytes: 3 << 30, // 3GB + MaxMempoolPosSizeBytes: 3 << 30, // 3Gb + MempoolBackupTimeMilliseconds: 30000, } // GetDataDir gets the user data directory where we store files diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go new file mode 100644 index 000000000..e6a953edb --- /dev/null +++ b/lib/pos_mempool.go @@ -0,0 +1,418 @@ +package lib + +import ( + "github.com/dgraph-io/badger/v3" + "github.com/golang/glog" + "github.com/pkg/errors" + "path/filepath" + "strings" + "sync" +) + +type PosMempoolStatus int + +const ( + PosMempoolStatusRunning PosMempoolStatus = iota + PosMempoolStatusNotRunning +) + +type Mempool interface { + Start() error + Stop() + IsRunning() bool + AddTransaction(txn *MsgDeSoTxn) error + RemoveTransaction(txnHash *BlockHash) error + GetTransaction(txnHash *BlockHash) *MsgDeSoTxn + GetTransactions() []*MsgDeSoTxn + GetIterator() MempoolIterator + UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) + UpdateGlobalParams(globalParams *GlobalParamsEntry) +} + +type MempoolIterator interface { + Next() bool + Value() (*MsgDeSoTxn, bool) + Initialized() bool +} + +// PosMempool is used by the node to keep track of uncommitted transactions. The main responsibilities of the PosMempool +// include addition/removal of transactions, back up of transaction to database, and retrieval of transactions ordered +// by Fee-Time algorithm. More on the Fee-Time algorithm can be found in the documentation of TransactionRegister. +type PosMempool struct { + sync.RWMutex + status PosMempoolStatus + // params of the blockchain + params *DeSoParams + // globalParams are used to track the latest GlobalParamsEntry. In case the GlobalParamsEntry changes, the PosMempool + // is equipped with UpdateGlobalParams method to handle upgrading GlobalParamsEntry. + globalParams *GlobalParamsEntry + // dir of the directory where the database should be stored. + dir string + // db is the database that the mempool will use to persist transactions. + db *badger.DB + + // txnRegister is the in-memory data structure keeping track of the transactions in the mempool. The TransactionRegister + // is responsible for ordering transactions by the Fee-Time algorithm. + txnRegister *TransactionRegister + // ledger is a simple in-memory data structure that keeps track of cumulative transaction fees in the mempool. + // The ledger keeps track of how much each user would have spent in fees across all their transactions in the mempool. + ledger *BalanceLedger + // persister is responsible for interfacing with the database. The persister backs up mempool transactions so not to + // lose them when node reboots. The persister also retrieves transactions from the database when the node starts up. + // The persister runs on its dedicated thread and events are used to notify the persister thread whenever + // transactions are added/removed from the mempool. The persister thread then updates the database accordingly. + persister *MempoolPersister + + // latestBlockView is used to check if a transaction is valid before being added to the mempool. The latestBlockView + // checks if the transaction has a valid signature and if the transaction's sender has enough funds to cover the fee. + // The latestBlockView should be updated whenever a new block is added to the blockchain via UpdateLatestBlock. + latestBlockView *UtxoView + // latestBlockNode is used to infer the latest block height. The latestBlockNode should be updated whenever a new + // block is added to the blockchain via UpdateLatestBlock. + latestBlockHeight uint64 +} + +// PosMempoolIterator is a wrapper around FeeTimeIterator, modified to return MsgDeSoTxn instead of MempoolTx. +type PosMempoolIterator struct { + it *FeeTimeIterator +} + +func (it *PosMempoolIterator) Next() bool { + return it.it.Next() +} + +func (it *PosMempoolIterator) Value() (*MsgDeSoTxn, bool) { + txn, ok := it.it.Value() + if txn == nil || txn.Tx == nil { + return nil, ok + } + return txn.Tx, ok +} + +func (it *PosMempoolIterator) Initialized() bool { + return it.it.Initialized() +} + +func NewPosMempoolIterator(it *FeeTimeIterator) *PosMempoolIterator { + return &PosMempoolIterator{it: it} +} + +func NewPosMempool(params *DeSoParams, globalParams *GlobalParamsEntry, latestBlockView *UtxoView, + latestBlockHeight uint64, dir string) *PosMempool { + return &PosMempool{ + status: PosMempoolStatusNotRunning, + params: params, + globalParams: globalParams, + dir: dir, + latestBlockView: latestBlockView, + latestBlockHeight: latestBlockHeight, + } +} + +func (dmp *PosMempool) Start() error { + if dmp.IsRunning() { + return nil + } + + // Setup the database. + mempoolDirectory := filepath.Join(dmp.dir, "mempool") + opts := DefaultBadgerOptions(mempoolDirectory) + db, err := badger.Open(opts) + if err != nil { + return errors.Wrapf(err, "PosMempool.Start: Problem setting up database") + } + dmp.db = db + + // Create the transaction register and ledger + dmp.txnRegister = NewTransactionRegister(dmp.globalParams) + dmp.ledger = NewBalanceLedger() + + // Create the persister + dmp.persister = NewMempoolPersister(dmp.db, int(dmp.params.MempoolBackupTimeMilliseconds)) + + // Start the persister and retrieve transactions from the database. + dmp.persister.Start() + txns, err := dmp.persister.GetPersistedTransactions() + if err != nil { + return errors.Wrapf(err, "PosMempool.Start: Problem retrieving transactions from persister") + } + // We set the persistToDb flag to false so that persister doesn't try to save the transactions. + for _, txn := range txns { + if err := dmp.addTransactionNoLock(txn, false); err != nil { + glog.Errorf("PosMempool.Start: Problem adding transaction with hash (%v) from persister: %v", + txn.Hash, err) + } + } + dmp.status = PosMempoolStatusRunning + + return nil +} + +func (dmp *PosMempool) Stop() { + if !dmp.IsRunning() { + return + } + + // Close the persister and stop the database. + if err := dmp.persister.Stop(); err != nil { + glog.Errorf("PosMempool.Stop: Problem stopping persister: %v", err) + } + if err := dmp.db.Close(); err != nil { + glog.Errorf("PosMempool.Stop: Problem closing database: %v", err) + } + // Reset the transaction register and the ledger. + dmp.txnRegister.Reset() + dmp.ledger.Reset() + + dmp.status = PosMempoolStatusNotRunning +} + +func (dmp *PosMempool) IsRunning() bool { + return dmp.status == PosMempoolStatusRunning +} + +// AddTransaction validates a MsgDeSoTxn transaction and adds it to the mempool if it is valid. +// If the mempool overflows as a result of adding the transaction, the mempool is pruned. +func (dmp *PosMempool) AddTransaction(txn *MsgDeSoTxn) error { + if !dmp.IsRunning() { + return errors.Wrapf(MempoolErrorNotRunning, "PosMempool.AddTransaction: ") + } + + // First, validate that the transaction is properly formatted according to BalanceModel. + if err := ValidateDeSoTxnSanityBalanceModel(txn, dmp.latestBlockHeight, dmp.params, dmp.globalParams); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction sanity") + } + + // Construct the MempoolTx from the MsgDeSoTxn. + mempoolTx, err := NewMempoolTx(txn, dmp.latestBlockHeight) + if err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem constructing MempoolTx") + } + + // Check transaction signature + if _, err := dmp.latestBlockView.VerifySignature(txn, uint32(dmp.latestBlockHeight)); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Signature validation failed") + } + + // If we get this far, it means that the transaction is valid. We can now add it to the mempool. + // We lock the mempool to ensure that no other thread is modifying it while we add the transaction. + dmp.Lock() + defer dmp.Unlock() + + // Add the transaction to the mempool and then prune if needed. + if err := dmp.addTransactionNoLock(mempoolTx, true); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem adding transaction to mempool") + } + + if err := dmp.pruneNoLock(); err != nil { + glog.Errorf("PosMempool.AddTransaction: Problem pruning mempool: %v", err) + } + + return nil +} + +func (dmp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) error { + userPk := NewPublicKey(txn.Tx.PublicKey) + txnFee := txn.Tx.TxnFeeNanos + + // Validate that the user has enough balance to cover the transaction fees. + spendableBalanceNanos, err := dmp.latestBlockView.GetSpendableDeSoBalanceNanosForPublicKey(userPk.ToBytes(), + uint32(dmp.latestBlockHeight)) + if err != nil { + return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem getting spendable balance") + } + if err := dmp.ledger.CanIncreaseEntryWithLimit(*userPk, txnFee, spendableBalanceNanos); err != nil { + return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem checking balance increase for transaction with"+ + "hash %v, fee %v", txn.Tx.Hash(), txnFee) + } + + // If we get here, it means that the transaction's sender has enough balance to cover transaction fees. We can now + // add the transaction to mempool. + if err := dmp.txnRegister.AddTransaction(txn); err != nil { + return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem adding txn to register") + } + + // We update the reserved balance to include the newly added transaction's fee. + dmp.ledger.IncreaseEntry(*userPk, txnFee) + + // Emit an event for the newly added transaction. + if persistToDb { + event := &MempoolEvent{ + Txn: txn, + Type: MempoolEventAdd, + } + dmp.persister.EnqueueEvent(event) + } + + return nil +} + +// RemoveTransaction is the main function for removing a transaction from the mempool. +func (dmp *PosMempool) RemoveTransaction(txnHash *BlockHash) error { + if !dmp.IsRunning() { + return errors.Wrapf(MempoolErrorNotRunning, "PosMempool.RemoveTransaction: ") + } + + dmp.Lock() + defer dmp.Unlock() + + // Get the transaction from the register. + txn := dmp.txnRegister.GetTransaction(txnHash) + if txn == nil { + return nil + } + + return dmp.removeTransactionNoLock(txn, true) +} + +func (dmp *PosMempool) removeTransactionNoLock(txn *MempoolTx, persistToDb bool) error { + // First, sanity check our reserved balance. + userPk := NewPublicKey(txn.Tx.PublicKey) + + // Remove the transaction from the register. + if err := dmp.txnRegister.RemoveTransaction(txn); err != nil { + return errors.Wrapf(err, "PosMempool.removeTransactionNoLock: Problem removing txn from register") + } + // Decrease the appropriate ledger's balance by the transaction fee. + dmp.ledger.DecreaseEntry(*userPk, txn.Fee) + + // Emit an event for the removed transaction. + if persistToDb { + event := &MempoolEvent{ + Txn: txn, + Type: MempoolEventRemove, + } + dmp.persister.EnqueueEvent(event) + } + + return nil +} + +// GetTransaction returns the transaction with the given hash if it exists in the mempool. This function is thread-safe. +func (dmp *PosMempool) GetTransaction(txnHash *BlockHash) *MsgDeSoTxn { + if !dmp.IsRunning() { + return nil + } + + dmp.RLock() + defer dmp.RUnlock() + + txn := dmp.txnRegister.GetTransaction(txnHash) + if txn == nil || txn.Tx == nil { + return nil + } + + return txn.Tx +} + +// GetTransactions returns all transactions in the mempool ordered by the Fee-Time algorithm. This function is thread-safe. +func (dmp *PosMempool) GetTransactions() []*MsgDeSoTxn { + if !dmp.IsRunning() { + return nil + } + + dmp.RLock() + defer dmp.RUnlock() + + var desoTxns []*MsgDeSoTxn + poolTxns := dmp.txnRegister.GetFeeTimeTransactions() + for _, txn := range poolTxns { + if txn == nil || txn.Tx == nil { + continue + } + desoTxns = append(desoTxns, txn.Tx) + } + return desoTxns +} + +// GetIterator returns an iterator for the mempool transactions. The iterator can be used to peek transactions in the +// mempool ordered by the Fee-Time algorithm. Transactions can be fetched with the following pattern: +// +// for it.Next() { +// if txn, ok := it.Value(); ok { +// // Do something with txn. +// } +// } +// +// Note that the iteration pattern is not thread-safe. Another lock should be used to ensure thread-safety. +func (dmp *PosMempool) GetIterator() MempoolIterator { + if !dmp.IsRunning() { + return nil + } + + dmp.RLock() + defer dmp.RUnlock() + + return NewPosMempoolIterator(dmp.txnRegister.GetFeeTimeIterator()) +} + +// pruneNoLock removes transactions from the mempool until the mempool size is below the maximum allowed size. The transactions +// are removed in lowest to highest Fee-Time priority, i.e. opposite way that transactions are ordered in +// GetTransactions(). +func (dmp *PosMempool) pruneNoLock() error { + if dmp.txnRegister.Size() < dmp.params.MaxMempoolPosSizeBytes { + return nil + } + + prunedTxns, err := dmp.txnRegister.PruneToSize(dmp.params.MaxMempoolPosSizeBytes) + if err != nil { + return errors.Wrapf(err, "PosMempool.pruneNoLock: Problem pruning mempool") + } + for _, prunedTxn := range prunedTxns { + if err := dmp.removeTransactionNoLock(prunedTxn, true); err != nil { + // We should never get to here since the transaction was already pruned from the TransactionRegister. + glog.Errorf("PosMempool.pruneNoLock: Problem removing transaction from mempool: %v", err) + } + } + return nil +} + +// UpdateLatestBlock updates the latest block view and latest block node in the mempool. +func (dmp *PosMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) { + if !dmp.IsRunning() { + return + } + + dmp.Lock() + defer dmp.Unlock() + + dmp.latestBlockView = blockView + dmp.latestBlockHeight = blockHeight +} + +// UpdateGlobalParams updates the global params in the mempool. Changing GlobalParamsEntry can impact the validity of +// transactions in the mempool. For example, if the minimum network fee is increased, transactions with a fee below the +// new minimum will be removed from the mempool. To safely handle this, this method re-creates the TransactionRegister +// with the new global params and re-adds all transactions in the mempool to the new register. +func (dmp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { + if !dmp.IsRunning() { + return + } + + dmp.Lock() + defer dmp.Unlock() + + dmp.globalParams = globalParams + mempoolTxns := dmp.txnRegister.GetFeeTimeTransactions() + newRegister := NewTransactionRegister(dmp.globalParams) + removedTxnHashes := []string{} + + for _, mempoolTx := range mempoolTxns { + if err := newRegister.AddTransaction(mempoolTx); err == nil { + continue + } + // If we get here, it means that the transaction is no longer valid. We remove it from the mempool. + removedTxnHashes = append(removedTxnHashes, mempoolTx.Hash.String()) + if err := dmp.removeTransactionNoLock(mempoolTx, true); err != nil { + glog.Errorf("PosMempool.UpdateGlobalParams: Problem removing txn with hash %v from register: %v", + mempoolTx.Hash.String(), err) + } + } + + if len(removedTxnHashes) > 0 { + glog.Infof("PosMempool.UpdateGlobalParams: Transactions with the following hashes were removed: %v", + strings.Join(removedTxnHashes, ",")) + } + dmp.txnRegister.Reset() + dmp.txnRegister = newRegister +} diff --git a/lib/pos_mempool_ledger.go b/lib/pos_mempool_ledger.go index 45d9ea1f9..e44096cd4 100644 --- a/lib/pos_mempool_ledger.go +++ b/lib/pos_mempool_ledger.go @@ -21,62 +21,45 @@ func NewBalanceLedger() *BalanceLedger { } } -// CanIncreaseBalance checks if the user's balance can be increased by the given amount. If the user's balance + amount -// is less or equal than the provided maxBalance, the increase is allowed. Otherwise, an error is returned. -func (bl *BalanceLedger) CanIncreaseBalance(publicKey PublicKey, amount uint64, maxBalance uint64) error { +// CanIncreaseEntryWithLimit checks if the user's ledger entry can be increased by delta. If the user's +// balance + delta is less or equal than the balanceLimit, the increase is allowed. Otherwise, an error is returned. +func (bl *BalanceLedger) CanIncreaseEntryWithLimit(publicKey PublicKey, delta uint64, balanceLimit uint64) error { bl.RLock() defer bl.RUnlock() balance, exists := bl.balances[publicKey] // Check for balance overflow. - if exists && amount > math.MaxUint64-balance { - return errors.Errorf("CanIncreaseBalance: balance overflow") + if exists && delta > math.MaxUint64-balance { + return errors.Errorf("CanIncreaseEntryWithLimit: balance overflow") } - newBalance := balance + amount - if newBalance > maxBalance { - return errors.Errorf("CanIncreaseBalance: Not enough balance to cover txn fees "+ - "(newBalance: %d, maxBalance: %d)", newBalance, maxBalance) + newBalance := balance + delta + if newBalance > balanceLimit { + return errors.Errorf("CanIncreaseEntryWithLimit: Balance + delta exceeds balance limit "+ + "(balance: %d, delta %v, balanceLimit: %d)", balance, delta, balanceLimit) } return nil } -// CanDecreaseBalance checks if the user's balance can be decreased by the given amount. If the user's balance is -// greater or equal to the amount, the decrease is allowed. Otherwise, an error is returned. -func (bl *BalanceLedger) CanDecreaseBalance(publicKey PublicKey, amountNanos uint64) error { - bl.RLock() - defer bl.RUnlock() - - balance, exists := bl.balances[publicKey] - if !exists { - return errors.Errorf("CanDecreaseBalance: No balance for public key") - } - if amountNanos > balance { - return errors.Errorf("CanDecreaseBalance: Amount exceeds current balance") - } - return nil -} - -// IncreaseBalance increases the user's balance by the given amount. CanIncreaseBalance should be called before +// IncreaseEntry increases the user's ledger entry by delta. CanIncreaseEntryWithLimit should be called before // calling this function to ensure the increase is allowed. -func (bl *BalanceLedger) IncreaseBalance(publicKey PublicKey, amount uint64) { +func (bl *BalanceLedger) IncreaseEntry(publicKey PublicKey, delta uint64) { bl.Lock() defer bl.Unlock() balance, _ := bl.balances[publicKey] // Check for balance overflow. - if amount > math.MaxUint64-balance { + if delta > math.MaxUint64-balance { bl.balances[publicKey] = math.MaxUint64 return } - bl.balances[publicKey] = balance + amount + bl.balances[publicKey] = balance + delta } -// DecreaseBalance decreases the user's balance by the given amount. CanDecreaseBalance should be called before -// calling this function to ensure the decrease is allowed. -func (bl *BalanceLedger) DecreaseBalance(publicKey PublicKey, amount uint64) { +// DecreaseEntry decreases the user's ledger entry by delta. +func (bl *BalanceLedger) DecreaseEntry(publicKey PublicKey, delta uint64) { bl.Lock() defer bl.Unlock() @@ -85,16 +68,16 @@ func (bl *BalanceLedger) DecreaseBalance(publicKey PublicKey, amount uint64) { return } // Check for balance underflow. - if amount > balance { + if delta > balance { delete(bl.balances, publicKey) return } - bl.balances[publicKey] = balance - amount + bl.balances[publicKey] = balance - delta } -// GetBalance returns the user's balance. -func (bl *BalanceLedger) GetBalance(publicKey PublicKey) uint64 { +// GetEntry returns the user's ledger entry. +func (bl *BalanceLedger) GetEntry(publicKey PublicKey) uint64 { bl.RLock() defer bl.RUnlock() diff --git a/lib/pos_mempool_ledger_test.go b/lib/pos_mempool_ledger_test.go index f11ab8f11..b909ce44b 100644 --- a/lib/pos_mempool_ledger_test.go +++ b/lib/pos_mempool_ledger_test.go @@ -15,38 +15,34 @@ func TestBalanceLedger(t *testing.T) { // Sanity-check some balance increase and decreases for pk0 balanceLedger := NewBalanceLedger() - require.NoError(balanceLedger.CanIncreaseBalance(pk0, 100, 100)) - require.NoError(balanceLedger.CanIncreaseBalance(pk0, 0, 100)) - balanceLedger.IncreaseBalance(pk0, 100) - require.Equal(uint64(100), balanceLedger.GetBalance(pk0)) - require.NoError(balanceLedger.CanIncreaseBalance(pk0, 0, 100)) - require.Error(balanceLedger.CanIncreaseBalance(pk0, 1, 100)) - require.Error(balanceLedger.CanIncreaseBalance(pk0, 0, 99)) - require.Error(balanceLedger.CanIncreaseBalance(pk0, math.MaxUint64, math.MaxUint64)) - require.NoError(balanceLedger.CanDecreaseBalance(pk0, 100)) - require.NoError(balanceLedger.CanDecreaseBalance(pk0, 0)) - require.Error(balanceLedger.CanDecreaseBalance(pk0, 101)) - require.Error(balanceLedger.CanDecreaseBalance(pk0, math.MaxUint64)) - balanceLedger.DecreaseBalance(pk0, 100) - require.Equal(uint64(0), balanceLedger.GetBalance(pk0)) - balanceLedger.IncreaseBalance(pk0, 10) - require.Equal(uint64(10), balanceLedger.GetBalance(pk0)) - balanceLedger.DecreaseBalance(pk0, 100) - require.Equal(uint64(0), balanceLedger.GetBalance(pk0)) - balanceLedger.IncreaseBalance(pk0, 100) + require.NoError(balanceLedger.CanIncreaseEntryWithLimit(pk0, 100, 100)) + require.NoError(balanceLedger.CanIncreaseEntryWithLimit(pk0, 0, 100)) + balanceLedger.IncreaseEntry(pk0, 100) + require.Equal(uint64(100), balanceLedger.GetEntry(pk0)) + require.NoError(balanceLedger.CanIncreaseEntryWithLimit(pk0, 0, 100)) + require.Error(balanceLedger.CanIncreaseEntryWithLimit(pk0, 1, 100)) + require.Error(balanceLedger.CanIncreaseEntryWithLimit(pk0, 0, 99)) + require.Error(balanceLedger.CanIncreaseEntryWithLimit(pk0, math.MaxUint64, math.MaxUint64)) + balanceLedger.DecreaseEntry(pk0, 100) + require.Equal(uint64(0), balanceLedger.GetEntry(pk0)) + balanceLedger.IncreaseEntry(pk0, 10) + require.Equal(uint64(10), balanceLedger.GetEntry(pk0)) + balanceLedger.DecreaseEntry(pk0, 100) + require.Equal(uint64(0), balanceLedger.GetEntry(pk0)) + balanceLedger.IncreaseEntry(pk0, 100) // Increase balance for pk1 and pk2 a couple of times - balanceLedger.IncreaseBalance(pk1, 100) - balanceLedger.IncreaseBalance(pk2, 100) - balanceLedger.DecreaseBalance(pk1, 40) - balanceLedger.IncreaseBalance(pk2, 40) - require.Equal(uint64(100), balanceLedger.GetBalance(pk0)) - require.Equal(uint64(60), balanceLedger.GetBalance(pk1)) - require.Equal(uint64(140), balanceLedger.GetBalance(pk2)) + balanceLedger.IncreaseEntry(pk1, 100) + balanceLedger.IncreaseEntry(pk2, 100) + balanceLedger.DecreaseEntry(pk1, 40) + balanceLedger.IncreaseEntry(pk2, 40) + require.Equal(uint64(100), balanceLedger.GetEntry(pk0)) + require.Equal(uint64(60), balanceLedger.GetEntry(pk1)) + require.Equal(uint64(140), balanceLedger.GetEntry(pk2)) // Test clearing balance ledger balanceLedger.Reset() - require.Equal(uint64(0), balanceLedger.GetBalance(pk0)) - require.Equal(uint64(0), balanceLedger.GetBalance(pk1)) - require.Equal(uint64(0), balanceLedger.GetBalance(pk2)) + require.Equal(uint64(0), balanceLedger.GetEntry(pk0)) + require.Equal(uint64(0), balanceLedger.GetEntry(pk1)) + require.Equal(uint64(0), balanceLedger.GetEntry(pk2)) } diff --git a/lib/pos_mempool_persister.go b/lib/pos_mempool_persister.go index 33434226b..55d1b735d 100644 --- a/lib/pos_mempool_persister.go +++ b/lib/pos_mempool_persister.go @@ -49,22 +49,22 @@ type MempoolPersister struct { stopGroup sync.WaitGroup startGroup sync.WaitGroup - // batchPersistFrequencyMilliseconds is the time frequency at which the persister will flush the transaction queue to the database. - batchPersistFrequencyMilliseconds int + // mempoolBackupTimeMilliseconds is the time frequency at which the persister will flush the transaction queue to the database. + mempoolBackupTimeMilliseconds int // eventQueue is used to queue up transactions to be persisted. The queue receives events from the EnqueueEvent, // which is called whenever a transaction is added or removed from the mempool. eventQueue chan *MempoolEvent // updateBatch is used to cache transactions that need to be persisted to the database. The batch is flushed to the - // database periodically based on the batchPersistFrequencyMilliseconds. + // database periodically based on the mempoolBackupTimeMilliseconds. updateBatch []*MempoolEvent } -func NewMempoolPersister(db *badger.DB, batchPersistFrequencyMilliseconds int) *MempoolPersister { +func NewMempoolPersister(db *badger.DB, mempoolBackupTimeMilliseconds int) *MempoolPersister { return &MempoolPersister{ - batchPersistFrequencyMilliseconds: batchPersistFrequencyMilliseconds, - status: MempoolPersisterStatusNotRunning, - db: db, - eventQueue: make(chan *MempoolEvent, eventQueueSize), + mempoolBackupTimeMilliseconds: mempoolBackupTimeMilliseconds, + status: MempoolPersisterStatusNotRunning, + db: db, + eventQueue: make(chan *MempoolEvent, eventQueueSize), } } @@ -109,7 +109,7 @@ func (mp *MempoolPersister) run() { } continue - case <-time.After(time.Duration(mp.batchPersistFrequencyMilliseconds) * time.Millisecond): + case <-time.After(time.Duration(mp.mempoolBackupTimeMilliseconds) * time.Millisecond): if err := mp.persistBatch(); err != nil { glog.Errorf("MempoolPersister: Error persisting batch: %v", err) } diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go new file mode 100644 index 000000000..c8a689500 --- /dev/null +++ b/lib/pos_mempool_test.go @@ -0,0 +1,245 @@ +package lib + +import ( + "bytes" + "github.com/dgraph-io/badger/v3" + "github.com/stretchr/testify/require" + "math/rand" + "os" + "testing" +) + +func TestPosMempoolStart(t *testing.T) { + require := require.New(t) + + params := DeSoTestnetParams + globalParams := _testGetDefaultGlobalParams() + dir := _dbDirSetup(t) + + mempool := NewPosMempool(¶ms, globalParams, nil, 0, dir) + require.NoError(mempool.Start()) + require.Equal(PosMempoolStatusRunning, mempool.status) + mempool.Stop() +} + +func TestPosMempoolRestartWithTransactions(t *testing.T) { + require := require.New(t) + seed := int64(991) + rand := rand.New(rand.NewSource(seed)) + + globalParams := _testGetDefaultGlobalParams() + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(10000) + + params, db := _blockchainSetup(t) + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + m1PubBytes, _, _ := Base58CheckDecode(m1Pub) + + latestBlockView, err := NewUtxoView(db, params, nil, nil) + require.NoError(err) + dir := _dbDirSetup(t) + + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) + require.NoError(mempool.Start()) + require.Equal(PosMempoolStatusRunning, mempool.status) + + txn1 := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) + txn2 := _generateTestTxn(t, rand, feeMin, feeMax, m1PubBytes, m1Priv, 100, 0) + require.NoError(mempool.AddTransaction(txn1)) + require.NoError(mempool.AddTransaction(txn2)) + + poolTxns := mempool.GetTransactions() + require.Equal(2, len(poolTxns)) + mempool.Stop() + + newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) + require.NoError(newPool.Start()) + require.Equal(PosMempoolStatusRunning, newPool.status) + newPoolTxns := newPool.GetTransactions() + require.Equal(2, len(newPoolTxns)) + newPool.Stop() +} + +func TestPosMempoolPrune(t *testing.T) { + require := require.New(t) + seed := int64(993) + rand := rand.New(rand.NewSource(seed)) + + globalParams := _testGetDefaultGlobalParams() + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(2000) + + params, db := _blockchainSetup(t) + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + m1PubBytes, _, _ := Base58CheckDecode(m1Pub) + + latestBlockView, err := NewUtxoView(db, params, nil, nil) + require.NoError(err) + dir := _dbDirSetup(t) + + params.MaxMempoolPosSizeBytes = 500 + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) + require.NoError(mempool.Start()) + require.Equal(PosMempoolStatusRunning, mempool.status) + + var txns []*MsgDeSoTxn + for ii := 0; ii < 10; ii++ { + pk := m0PubBytes + priv := m0Priv + if ii%2 == 1 { + pk = m1PubBytes + priv = m1Priv + } + txn := _generateTestTxn(t, rand, feeMin, feeMax, pk, priv, 100, 25) + txns = append(txns, txn) + require.NoError(mempool.AddTransaction(txn)) + } + + fetchedTxns := mempool.GetTransactions() + require.Equal(3, len(fetchedTxns)) + require.Equal(uint64(1974), fetchedTxns[0].TxnFeeNanos) + require.Equal(uint64(1931), fetchedTxns[1].TxnFeeNanos) + require.Equal(uint64(1776), fetchedTxns[2].TxnFeeNanos) + require.Equal(uint64(1974), mempool.GetTransaction(fetchedTxns[0].Hash()).TxnFeeNanos) + require.Equal(uint64(1931), mempool.GetTransaction(fetchedTxns[1].Hash()).TxnFeeNanos) + require.Equal(uint64(1776), mempool.GetTransaction(fetchedTxns[2].Hash()).TxnFeeNanos) + + // Remove one transaction. + require.NoError(mempool.RemoveTransaction(fetchedTxns[0].Hash())) + mempool.Stop() + + newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) + require.NoError(newPool.Start()) + require.Equal(PosMempoolStatusRunning, newPool.status) + require.Equal(2, len(newPool.GetTransactions())) + + // Remove the other transactions. + require.NoError(newPool.RemoveTransaction(fetchedTxns[1].Hash())) + require.NoError(newPool.RemoveTransaction(fetchedTxns[2].Hash())) + // Remove the same transaction twice + require.NoError(newPool.RemoveTransaction(fetchedTxns[1].Hash())) + require.Equal(0, len(newPool.GetTransactions())) + + // Add the transactions back. + for _, txn := range fetchedTxns { + require.NoError(newPool.AddTransaction(txn)) + } + require.Equal(3, len(newPool.GetTransactions())) + + // Iterate through the transactions. + it := newPool.GetIterator() + index := 0 + for it.Next() { + tx, ok := it.Value() + require.True(ok) + require.True(bytes.Equal(tx.Hash().ToBytes(), fetchedTxns[index].Hash().ToBytes())) + index++ + } + newPool.Stop() +} + +func TestPosMempoolUpdateGlobalParams(t *testing.T) { + require := require.New(t) + seed := int64(995) + rand := rand.New(rand.NewSource(seed)) + + globalParams := _testGetDefaultGlobalParams() + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(2000) + + params, db := _blockchainSetup(t) + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + m1PubBytes, _, _ := Base58CheckDecode(m1Pub) + + latestBlockView, err := NewUtxoView(db, params, nil, nil) + require.NoError(err) + dir := _dbDirSetup(t) + + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) + require.NoError(mempool.Start()) + require.Equal(PosMempoolStatusRunning, mempool.status) + + var txns []*MsgDeSoTxn + for ii := 0; ii < 100; ii++ { + pk := m0PubBytes + priv := m0Priv + if ii%2 == 1 { + pk = m1PubBytes + priv = m1Priv + } + txn := _generateTestTxn(t, rand, feeMin, feeMax, pk, priv, 100, 25) + txns = append(txns, txn) + require.NoError(mempool.AddTransaction(txn)) + } + + require.Equal(100, len(mempool.GetTransactions())) + newGlobalParams := _testGetDefaultGlobalParams() + newGlobalParams.MinimumNetworkFeeNanosPerKB = 20000 + mempool.UpdateGlobalParams(newGlobalParams) + require.Equal(0, len(mempool.GetTransactions())) + mempool.Stop() + + newPool := NewPosMempool(params, newGlobalParams, latestBlockView, 2, dir) + require.NoError(newPool.Start()) + require.Equal(PosMempoolStatusRunning, newPool.status) + newPoolTxns := newPool.GetTransactions() + require.Equal(0, len(newPoolTxns)) + newPool.Stop() +} + +func _blockchainSetup(t *testing.T) (_params *DeSoParams, _db *badger.DB) { + require := require.New(t) + + chain, params, db := NewLowDifficultyBlockchain(t) + oldPool, miner := NewTestMiner(t, chain, params, true) + // Mine a few blocks to give the senderPkString some money. + _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, oldPool) + require.NoError(err) + _, err = miner.MineAndProcessSingleBlock(0 /*threadIndex*/, oldPool) + require.NoError(err) + + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + m0PublicKeyBase58Check := Base58CheckEncode(m0PubBytes, false, params) + m1PubBytes, _, _ := Base58CheckDecode(m1Pub) + m1PublicKeyBase58Check := Base58CheckEncode(m1PubBytes, false, params) + + _, _, _ = _doBasicTransferWithViewFlush( + t, chain, db, params, senderPkString, m0PublicKeyBase58Check, + senderPrivString, 200000, 11) + _, _, _ = _doBasicTransferWithViewFlush( + t, chain, db, params, senderPkString, m1PublicKeyBase58Check, + senderPrivString, 200000, 11) + + return params, db +} + +func _dbDirSetup(t *testing.T) (_dir string) { + require := require.New(t) + + dir, err := os.MkdirTemp("", "badgerdb-mempool") + require.NoError(err) + t.Logf("BadgerDB directory: %s\nIt should be automatically removed at the end of the test", dir) + t.Cleanup(func() { + os.RemoveAll(dir) + }) + return dir +} + +func _generateTestTxn(t *testing.T, rand *rand.Rand, feeMin uint64, feeMax uint64, pk []byte, priv string, expirationHeight uint64, + extraDataBytes int32) *MsgDeSoTxn { + + extraData := make(map[string][]byte) + extraData["key"] = RandomBytes(extraDataBytes) + txn := &MsgDeSoTxn{ + TxnVersion: DeSoTxnVersion1, + PublicKey: pk, + TxnMeta: &BasicTransferMetadata{}, + TxnFeeNanos: rand.Uint64()%(feeMax-feeMin) + feeMin, + TxnNonce: &DeSoNonce{ + ExpirationBlockHeight: expirationHeight, + }, + ExtraData: extraData, + } + _signTxn(t, txn, priv) + return txn +} diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index bfe2f6ed4..e42ec5982 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -28,7 +28,7 @@ type TransactionRegister struct { // a FeeTimeBucket given its min fee. feeTimeBucketsByMinFeeMap map[uint64]*FeeTimeBucket // txnMembership is a set of transaction hashes. It is used to determine existence of a transaction in the register. - txnMembership *Set[BlockHash] + txnMembership map[BlockHash]*MempoolTx // totalTxnsSizeBytes is the total size of all transactions in the register. totalTxnsSizeBytes uint64 // minimumNetworkFeeNanosPerKB is the base fee rate for the lowest fee FeeTimeBucket. This value corresponds to @@ -45,7 +45,7 @@ func NewTransactionRegister(globalParams *GlobalParamsEntry) *TransactionRegiste return &TransactionRegister{ feeTimeBucketSet: feeTimeBucketSet, feeTimeBucketsByMinFeeMap: make(map[uint64]*FeeTimeBucket), - txnMembership: NewSet([]BlockHash{}), + txnMembership: make(map[BlockHash]*MempoolTx), totalTxnsSizeBytes: 0, minimumNetworkFeeNanosPerKB: minNetworkFee, feeBucketRateMultiplierBasisPoints: bucketMultiplier, @@ -87,7 +87,7 @@ func (tr *TransactionRegister) addTransactionNoLock(txn *MempoolTx) error { return fmt.Errorf("TransactionRegister.AddTransaction: Transaction or transaction hash is nil") } - if tr.txnMembership.Includes(*txn.Hash) { + if _, ok := tr.txnMembership[*txn.Hash]; ok { return nil } @@ -118,7 +118,7 @@ func (tr *TransactionRegister) addTransactionNoLock(txn *MempoolTx) error { } tr.totalTxnsSizeBytes += txn.TxSizeBytes - tr.txnMembership.Add(*txn.Hash) + tr.txnMembership[*txn.Hash] = txn return nil } @@ -137,7 +137,7 @@ func (tr *TransactionRegister) removeTransactionNoLock(txn *MempoolTx) error { return fmt.Errorf("TransactionRegister.RemoveTransaction: Transaction or transaction hash is nil") } - if !tr.txnMembership.Includes(*txn.Hash) { + if _, ok := tr.txnMembership[*txn.Hash]; !ok { return nil } @@ -163,7 +163,7 @@ func (tr *TransactionRegister) removeTransactionNoLock(txn *MempoolTx) error { } } - tr.txnMembership.Remove(*txn.Hash) + delete(tr.txnMembership, *txn.Hash) tr.totalTxnsSizeBytes -= txn.TxSizeBytes return nil } @@ -209,7 +209,8 @@ func (tr *TransactionRegister) Includes(txn *MempoolTx) bool { return false } - return tr.txnMembership.Includes(*txn.Hash) + _, ok := tr.txnMembership[*txn.Hash] + return ok } func (tr *TransactionRegister) Reset() { @@ -218,7 +219,7 @@ func (tr *TransactionRegister) Reset() { tr.feeTimeBucketSet.Clear() tr.feeTimeBucketsByMinFeeMap = make(map[uint64]*FeeTimeBucket) - tr.txnMembership = NewSet([]BlockHash{}) + tr.txnMembership = make(map[BlockHash]*MempoolTx) tr.totalTxnsSizeBytes = 0 } @@ -249,6 +250,18 @@ func (tr *TransactionRegister) GetFeeTimeTransactions() []*MempoolTx { return txns } +// GetTransaction returns the transaction with the given hash if it exists in the register, or nil otherwise. +func (tr *TransactionRegister) GetTransaction(hash *BlockHash) *MempoolTx { + if hash == nil { + return nil + } + + tr.RLock() + defer tr.RUnlock() + + return tr.txnMembership[*hash] +} + // PruneToSize removes transactions from the end of the register until the size of the register shrinks to the desired // number of bytes. The returned transactions, _prunedTxns, are ordered by lowest-to-highest priority, i.e. first // transaction will have the smallest fee, last transaction will have the highest fee. Returns _err = nil if no From 9b79bed01e852788cd1f684febca6e83d9ed4cbd Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Thu, 10 Aug 2023 11:33:39 -0400 Subject: [PATCH 182/762] Create Skeleton for PoS Consensus Components --- Dockerfile | 1 + consensus/consensus.go | 65 ++++++++++++++++ consensus/consensus_test.go | 1 + consensus/types.go | 53 +++++++++++++ lib/blockchain.go | 75 +++++++++++++++--- lib/server.go | 147 +++++++++++++++++++++++++++++------- test.Dockerfile | 3 +- 7 files changed, 304 insertions(+), 41 deletions(-) create mode 100644 consensus/consensus.go create mode 100644 consensus/consensus_test.go create mode 100644 consensus/types.go diff --git a/Dockerfile b/Dockerfile index 6aac58b66..56e466d8e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,6 +16,7 @@ RUN go mod download COPY bls bls COPY cmd cmd COPY collections collections +COPY consensus consensus COPY desohash desohash COPY lib lib COPY migrate migrate diff --git a/consensus/consensus.go b/consensus/consensus.go new file mode 100644 index 000000000..16136d3d7 --- /dev/null +++ b/consensus/consensus.go @@ -0,0 +1,65 @@ +package consensus + +import ( + "time" + + "github.com/deso-protocol/core/bls" +) + +func NewFastHotStuffConsensus() *FastHotStuffConsensus { + return &FastHotStuffConsensus{ + internalTimersUpdated: make(chan interface{}), + votesSeen: make(map[BlockHash]map[bls.PublicKey]*bls.Signature), + timeoutsSeen: make(map[BlockHash]map[bls.PublicKey]*bls.Signature), + ConsensusEvents: make(chan *ConsensusEvent), + } +} + +func (fc *FastHotStuffConsensus) Init( /*TODO */ ) { + // TODO +} + +func (fc *FastHotStuffConsensus) HandleAcceptedBlock( /* TODO */ ) { + // TODO +} + +func (fc *FastHotStuffConsensus) HandleVoteMessage( /* TODO */ ) { + // TODO +} + +func (pc *FastHotStuffConsensus) HandleTimeoutMessage( /* TODO */ ) { + // TODO +} + +func (fc *FastHotStuffConsensus) HandleBlockProposal( /* TODO */ ) { + // TODO +} + +func (fc *FastHotStuffConsensus) Start() { + for { + select { + case <-time.After(time.Until(fc.nextBlockProposalTime)): + { + // TODO + } + case <-time.After(time.Until(fc.nextTimeoutTime)): + { + // TODO + } + case <-fc.internalTimersUpdated: + { + // TODO + } + case <-fc.quit: + { + // TODO + close(fc.quit) + return + } + } + } +} + +func (fc *FastHotStuffConsensus) Stop() { + fc.quit <- struct{}{} +} diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go new file mode 100644 index 000000000..4bbf8296e --- /dev/null +++ b/consensus/consensus_test.go @@ -0,0 +1 @@ +package consensus diff --git a/consensus/types.go b/consensus/types.go new file mode 100644 index 000000000..0f2cd3280 --- /dev/null +++ b/consensus/types.go @@ -0,0 +1,53 @@ +package consensus + +import ( + "time" + + "github.com/deso-protocol/core/bls" +) + +// ConsensusEvent is a way for FastHotStuffConsensus to send messages back to +// the Server. There are three types of events that can be sent: +// - Vote: The consensus is ready to vote on a block +// - Timeout: The consensus has timed out on a view +// - BlockProposal: The consensus has a QC for a block and is ready to propose the next +// block + +type ConsensusEventType byte + +const ( + ConsensusEventTypeTimeout ConsensusEventType = iota + ConsensusEventTypeBlockProposal + ConsensusEventTypeVote +) + +type ConsensusEvent struct { + EventType ConsensusEventType + BlockHeight uint64 + View uint64 +} + +// BlockHash is a 32-byte hash of a block used to uniquely identify a block. It's re-defined here +// to match the exact structure of the BlockHash type in core, so the two packages are decoupled and +// the Fast HotStuff consensus can be tested end-to-end independently. When using the Fast HotStuff, +// the lib package can convert its own BlockHash type to and from this type trivially. +type BlockHash [32]byte + +// NewFastHotStuffConsensus creates a new persistent FastHotStuffConsensus object that internally +// runs the Fast HotStuff consensus protocol, and signals the Server whenever it's ready to perform +// an action. + +type FastHotStuffConsensus struct { + nextBlockProposalTime time.Time + nextTimeoutTime time.Time + + internalTimersUpdated chan interface{} + quit chan interface{} + + // votesSeen and timeoutsSeen are in-memory maps of all the votes and timeouts we've + // seen so far, organized by their block hash and then by the public key of the voter. + votesSeen map[BlockHash]map[bls.PublicKey]*bls.Signature // TODO: this should be a vote message + timeoutsSeen map[BlockHash]map[bls.PublicKey]*bls.Signature // TODO: this should be a timeout message + + ConsensusEvents chan *ConsensusEvent +} diff --git a/lib/blockchain.go b/lib/blockchain.go index 2c83da490..0a8ba1077 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1605,10 +1605,10 @@ func updateBestChainInMemory(mainChainList []*BlockNode, mainChainMap map[BlockH } // Caller must acquire the ChainLock for writing prior to calling this. -func (bc *Blockchain) processHeader(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { +func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { // Start by checking if the header already exists in our node // index. If it does, then return an error. We should generally - // expect that processHeader will only be called on headers we + // expect that processHeaderPoW will only be called on headers we // haven't seen before. _, nodeExists := bc.blockIndex[*headerHash] if nodeExists { @@ -1657,7 +1657,7 @@ func (bc *Blockchain) processHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc // Verify that the height is one greater than the parent. prevHeight := parentHeader.Height if blockHeader.Height != prevHeight+1 { - glog.Errorf("processHeader: Height of block (=%d) is not equal to one greater "+ + glog.Errorf("processHeaderPoW: Height of block (=%d) is not equal to one greater "+ "than the parent height (=%d)", blockHeader.Height, prevHeight) return false, false, HeaderErrorHeightInvalid } @@ -1686,7 +1686,7 @@ func (bc *Blockchain) processHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc // This commentary is useful to consider with regard to that: // https://github.com/zawy12/difficulty-algorithms/issues/45 if blockHeader.GetTstampSecs() <= parentHeader.GetTstampSecs() { - glog.Warningf("processHeader: Rejecting header because timestamp %v is "+ + glog.Warningf("processHeaderPoW: Rejecting header because timestamp %v is "+ "before timestamp of previous block %v", time.Unix(int64(blockHeader.GetTstampSecs()), 0), time.Unix(int64(parentHeader.GetTstampSecs()), 0)) @@ -1776,27 +1776,61 @@ func (bc *Blockchain) processHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc return isMainChain, false, nil } -// ProcessHeader is a wrapper around processHeader, which does the leg-work, that -// acquires the ChainLock first. +// processHeaderPoS is validates and stores an incoming block header as follows: +// 1. Validating the block header's structure and timestamp +// 2. Connect the block header to the header chain's tip +// 3. Store the block header in the db and the in-memory block index +func (bc *Blockchain) processHeaderPoS(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { + // TODO + return false, false, fmt.Errorf("processHeaderPoS: Not implemented") +} + +// ProcessHeader is a wrapper around processHeaderPoW and processHeaderPoS, which do the leg-work. func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { bc.ChainLock.Lock() defer bc.ChainLock.Unlock() - return bc.processHeader(blockHeader, headerHash) + if blockHeader == nil { + // If the header is nil then we return an error. Nothing we can do here. + return false, false, fmt.Errorf("ProcessHeader: Header is nil") + } + + // If the header's height is after the PoS cut-over fork height, then we use the PoS header processing logic. Otherwise, fall back + // to the PoW logic. + if blockHeader.Height >= uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + // TODO: call bc.processHeaderPoS(blockHeader, headerHash) instead + return bc.processHeaderPoW(blockHeader, headerHash) + } + + return bc.processHeaderPoW(blockHeader, headerHash) } func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _err error) { - // TODO: Move this to be more isolated. bc.ChainLock.Lock() defer bc.ChainLock.Unlock() - blockHeight := uint64(bc.BlockTip().Height + 1) - - bc.timer.Start("Blockchain.ProcessBlock: Initial") if desoBlock == nil { + // If the block is nil then we return an error. Nothing we can do here. return false, false, fmt.Errorf("ProcessBlock: Block is nil") } + // If the block's height is after the PoS cut-over fork height, then we use the PoS block processing logic. Otherwise, fall back + // to the PoW logic. + if desoBlock.Header.Height >= uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + // TODO: call bc.processBlockPoS(desoBlock, verifySignatures) instead + return bc.processBlockPoW(desoBlock, verifySignatures) + } + + return bc.processBlockPoW(desoBlock, verifySignatures) +} + +func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { + // TODO: Move this to be more isolated. + + blockHeight := uint64(bc.BlockTip().Height + 1) + + bc.timer.Start("Blockchain.ProcessBlock: Initial") + // Start by getting and validating the block's header. blockHeader := desoBlock.Header if blockHeader == nil { @@ -1875,7 +1909,7 @@ func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures boo // first before we do anything. This should create a node and set // the header validation status for it. if !nodeExists { - _, isOrphan, err := bc.processHeader(blockHeader, blockHash) + _, isOrphan, err := bc.processHeaderPoW(blockHeader, blockHash) if err != nil { // If an error occurred processing the header, then the header // should be marked as invalid, which should be sufficient. @@ -2554,6 +2588,23 @@ func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures boo return isMainChain, false, nil } +// processBlockPoS runs the Fast-Hotstuff block connect and commit rule as follows: +// 1. Validate on an incoming block and its header +// 2. Store the block in the db +// 3. Resolves forks within the last two blocks +// 4. Connect the block to the blockchain's tip +// 5. If applicable, flush the incoming block's grandparent to the DB +// 6. Notify the block proposer, pacemaker, and voting logic that the incoming block has been accepted +func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { + // TODO: Implement me + return false, false, fmt.Errorf("ProcessBlockPoS: Not implemented yet") +} + +func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { + // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks + panic("GetUncommittedTipView: Not implemented yet") +} + // DisconnectBlocksToHeight will rollback blocks from the db and blockchain structs until block tip reaches the provided // blockHeight parameter. func (bc *Blockchain) DisconnectBlocksToHeight(blockHeight uint64, snap *Snapshot) error { diff --git a/lib/server.go b/lib/server.go index 916da4a09..5eeb6b0c7 100644 --- a/lib/server.go +++ b/lib/server.go @@ -19,6 +19,7 @@ import ( chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/deso-protocol/core/consensus" "github.com/deso-protocol/go-deadlock" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" @@ -60,6 +61,9 @@ type Server struct { eventManager *EventManager TxIndex *TXIndex + fastHotStuffConsensus *consensus.FastHotStuffConsensus + // posMempool *PosMemPool TODO: Add the mempool later + // All messages received from peers get sent from the ConnectionManager to the // Server through this channel. // @@ -1717,6 +1721,11 @@ func (srv *Server) _handleBlockAccepted(event *BlockEvent) { return } + // Notify the consensus that a block was accepted. + if srv.fastHotStuffConsensus != nil { + srv.fastHotStuffConsensus.HandleAcceptedBlock() + } + // Construct an inventory vector to relay to peers. blockHash, _ := blk.Header.Hash() invVect := &InvVect{ @@ -2159,41 +2168,111 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { } } -// Note that messageHandler is single-threaded and so all of the handle* functions -// it calls can assume they can access the Server's variables without concurrency -// issues. -func (srv *Server) messageHandler() { +func (srv *Server) _handleFastHostStuffBlockProposal(event *consensus.ConsensusEvent) { + // The consensus module has signaled that we can propose a block at a certain block + // height. We construct the block and broadcast it here: + // 1. Verify that the block height we want to propose at is valid + // 2. Get a QC from the consensus module + // 3. Iterate over the top n transactions from the mempool + // 4. Construct a block with the QC and the top n transactions from the mempool + // 5. Sign the block + // 6. Process the block locally + // - This will connect the block to the blockchain, remove the transactions from the + // - mempool, and process the vote in the consensus module + // 7. Broadcast the block to the network +} + +func (srv *Server) _handleFastHostStuffVote(event *consensus.ConsensusEvent) { + // The consensus module has signaled that we can vote on a block. We construct and + // broadcast the vote here: + // 1. Verify that the block height we want to vote on is valid + // 2. Construct the vote message + // 3. Process the vote in the consensus module + // 4. Broadcast the timeout msg to the network +} + +func (srv *Server) _handleFastHostStuffTimeout(event *consensus.ConsensusEvent) { + // The consensus module has signaled that we have timed out for a view. We construct and + // broadcast the timeout here: + // 1. Verify the block height and view we want to timeout on are valid + // 2. Construct the timeout message + // 3. Process the timeout in the consensus module + // 4. Broadcast the timeout msg to the network +} + +func (srv *Server) _handleFastHostStuffConsensusEvent(event *consensus.ConsensusEvent) { + switch event.EventType { + case consensus.ConsensusEventTypeBlockProposal: + srv._handleFastHostStuffBlockProposal(event) + case consensus.ConsensusEventTypeVote: + srv._handleFastHostStuffVote(event) + case consensus.ConsensusEventTypeTimeout: + srv._handleFastHostStuffTimeout(event) + } +} + +// _startConsensusEventLoop contains the top-level event loop to run both the PoW and PoS consensus. It is +// single-threaded to ensure that concurrent event do not conflict with each other. It's role is to guarantee +// single threaded processing and act as an entry point for consensus events. It does minimal validation on its +// own. +// +// For the PoW consensus: +// - It listens to all peer messages from the network and handles them as they come in. This includes +// control messages from peer, proposed blocks from peers, votes/timeouts, block requests, mempool +// requests from syncing peers +// +// For the PoS consensus: +// - It listens to all peer messages from the network and handles them as they come in. This includes +// control messages from peer, proposed blocks from peers, votes/timeouts, block requests, mempool +// requests from syncing peers +// - It listens to consensus events from the Fast HostStuff consensus engine. The consensus signals when +// it's ready to vote, timeout, or propose a block. +func (srv *Server) _startConsensus() { for { // This is used instead of the shouldQuit control message exist mechanism below. shouldQuit will be true only // when all incoming messages have been processed, on the other hand this shutdown will quit immediately. if atomic.LoadInt32(&srv.shutdown) >= 1 { break } - serverMessage := <-srv.incomingMessages - glog.V(2).Infof("Server.messageHandler: Handling message of type %v from Peer %v", - serverMessage.Msg.GetMsgType(), serverMessage.Peer) - - // If the message is an addr message we handle it independent of whether or - // not the BitcoinManager is synced. - if serverMessage.Msg.GetMsgType() == MsgTypeAddr { - srv._handleAddrMessage(serverMessage.Peer, serverMessage.Msg.(*MsgDeSoAddr)) - continue - } - // If the message is a GetAddr message we handle it independent of whether or - // not the BitcoinManager is synced. - if serverMessage.Msg.GetMsgType() == MsgTypeGetAddr { - srv._handleGetAddrMessage(serverMessage.Peer, serverMessage.Msg.(*MsgDeSoGetAddr)) - continue - } - srv._handlePeerMessages(serverMessage) + select { + case consensusEvent := <-srv.fastHotStuffConsensus.ConsensusEvents: + { + glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.BlockHeight) + srv._handleFastHostStuffConsensusEvent(consensusEvent) + } + + case serverMessage := <-srv.incomingMessages: + { + // There is an incoming network message from a peer. + + glog.V(2).Infof("Server._startConsensus: Handling message of type %v from Peer %v", + serverMessage.Msg.GetMsgType(), serverMessage.Peer) + + // If the message is an addr message we handle it independent of whether or + // not the BitcoinManager is synced. + if serverMessage.Msg.GetMsgType() == MsgTypeAddr { + srv._handleAddrMessage(serverMessage.Peer, serverMessage.Msg.(*MsgDeSoAddr)) + continue + } + // If the message is a GetAddr message we handle it independent of whether or + // not the BitcoinManager is synced. + if serverMessage.Msg.GetMsgType() == MsgTypeGetAddr { + srv._handleGetAddrMessage(serverMessage.Peer, serverMessage.Msg.(*MsgDeSoGetAddr)) + continue + } + + srv._handlePeerMessages(serverMessage) + + // Always check for and handle control messages regardless of whether the + // BitcoinManager is synced. Note that we filter control messages out in a + // Peer's inHandler so any control message we get at this point should be bona fide. + shouldQuit := srv._handleControlMessages(serverMessage) + if shouldQuit { + break + } + } - // Always check for and handle control messages regardless of whether the - // BitcoinManager is synced. Note that we filter control messages out in a - // Peer's inHandler so any control message we get at this point should be bona fide. - shouldQuit := srv._handleControlMessages(serverMessage) - if shouldQuit { - break } } @@ -2322,6 +2401,14 @@ func (srv *Server) Stop() { glog.Infof(CLog(Yellow, "Server.Stop: Closed the Miner")) } + // Stop the PoS block proposer if we have one running. + if srv.fastHotStuffConsensus != nil { + srv.fastHotStuffConsensus.Stop() + glog.Infof(CLog(Yellow, "Server.Stop: Closed the FastHotStuffConsensus")) + } + + // TODO: Stop the PoS mempool if we have one running. + if srv.mempool != nil { // Before the node shuts down, write all the mempool txns to disk // if the flag is set. @@ -2374,7 +2461,8 @@ func (srv *Server) Start() { // finds some Peers. glog.Info("Server.Start: Starting Server") srv.waitGroup.Add(1) - go srv.messageHandler() + + go srv._startConsensus() go srv._startAddressRelayer() @@ -2389,6 +2477,9 @@ func (srv *Server) Start() { if srv.miner != nil && len(srv.miner.PublicKeys) > 0 { go srv.miner.Start() } + + // TODO: Gate these behind a PoS consensus flag. + go srv.fastHotStuffConsensus.Start() } // SyncPrefixProgress keeps track of sync progress on an individual prefix. It is used in diff --git a/test.Dockerfile b/test.Dockerfile index c6e5bc6ce..3a2d8aa9d 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -17,6 +17,7 @@ RUN go mod download COPY bls bls COPY cmd cmd COPY collections collections +COPY consensus consensus COPY desohash desohash COPY lib lib COPY migrate migrate @@ -29,4 +30,4 @@ RUN ./scripts/install-relic.sh # build backend RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go -ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "github.com/deso-protocol/core/bls", "github.com/deso-protocol/core/lib"] +ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "github.com/deso-protocol/core/bls", "github.com/deso-protocol/core/collections", "github.com/deso-protocol/core/consensus", "github.com/deso-protocol/core/lib"] From b8a66b33bad4a5b8187b45045870a3d6513674da Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 29 Aug 2023 14:04:38 -0400 Subject: [PATCH 183/762] Implement PoS Consensus Interfaces and Initialization (#640) * Implement PoS Consensus Interfaces and Initialization * Separate Init and Start functions' roles * Address Nina's feedback * Revert votesSeen and timeoutsSeen map keys * Address Nina's 2nd round comments * Use pointers for bls types * Add uninitalized status * Add stubbed function for updating the consensus view * Rename ProcessXMsg functions * Clean up status naming * Address Piotr's feedback * Fix duplicate words * Better comment * Increment start and stop group counters --- consensus/consensus.go | 169 ++++++++++++++++++++++++++++++++++++----- consensus/types.go | 160 ++++++++++++++++++++++++++++++++------ lib/network.go | 2 +- lib/pos_network.go | 4 +- lib/server.go | 24 ++++-- 5 files changed, 308 insertions(+), 51 deletions(-) diff --git a/consensus/consensus.go b/consensus/consensus.go index 16136d3d7..30635ced6 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -1,6 +1,8 @@ package consensus import ( + "errors" + "sync" "time" "github.com/deso-protocol/core/bls" @@ -8,58 +10,191 @@ import ( func NewFastHotStuffConsensus() *FastHotStuffConsensus { return &FastHotStuffConsensus{ - internalTimersUpdated: make(chan interface{}), - votesSeen: make(map[BlockHash]map[bls.PublicKey]*bls.Signature), - timeoutsSeen: make(map[BlockHash]map[bls.PublicKey]*bls.Signature), - ConsensusEvents: make(chan *ConsensusEvent), + status: consensusStatusNotInitialized, + startGroup: sync.WaitGroup{}, + stopGroup: sync.WaitGroup{}, } } -func (fc *FastHotStuffConsensus) Init( /*TODO */ ) { +// Initializes the consensus instance with the latest known valid block in the blockchain, and +// the validator set for the next block height. The functions expects the following for the input +// params: +// - blockConstructionCadence: block construction duration must be > 0 +// - timeoutBaseDuration: timeout base duration must be > 0 +// - chainTip: the input block must have a valid block hash, block height, view, and QC +// - validators: the validators must be sorted in decreasing order of stake, with a +// consistent tie breaking scheme. The validator set is expected to be valid for +// validating votes and timeouts for the next block height. +// +// Given the above, This function updates the chain tip internally, and re-initializes all internal +// data structures that are used to track incoming votes and timeout messages for QC construction. +func (fc *FastHotStuffConsensus) Init( + blockConstructionCadence time.Duration, + timeoutBaseDuration time.Duration, + chainTip Block, + validators []Validator, +) error { + // Grab the consensus instance's lock + fc.lock.Lock() + defer fc.lock.Unlock() + + // Ensure the consensus instance is not already running + if fc.status == consensusStatusRunning { + return errors.New("Consensus instance is already running") + } + + // Validate the inputs + if blockConstructionCadence <= 0 { + return errors.New("Block construction duration must be > 0") + } + if timeoutBaseDuration <= 0 { + return errors.New("Timeout base duration must be > 0") + } + + // Update the chain tip and validator set + fc.chainTip = chainTip + fc.currentView = chainTip.GetView() + 1 + fc.validators = validators + + // Reset all internal data structures for votes and timeouts + fc.votesSeen = make(map[BlockHash]map[bls.PublicKey]VoteMessage) + fc.timeoutsSeen = make(map[uint64]map[bls.PublicKey]TimeoutMessage) + + // Reset all internal and external channels used for signaling + fc.internalTimersUpdatedSignal = make(chan interface{}) + fc.stopSignal = make(chan interface{}) + fc.ConsensusEvents = make(chan *ConsensusEvent) + + // Set the block construction and timeout base durations + fc.blockConstructionCadence = blockConstructionCadence + fc.timeoutBaseDuration = timeoutBaseDuration + + // Update the consensus status + fc.status = consensusStatusInitialized + + return nil +} + +func (fc *FastHotStuffConsensus) UpdateChainTip( /* TODO */ ) { + // TODO +} + +func (fc *FastHotStuffConsensus) UpdateView( /* TODO */ ) { // TODO } -func (fc *FastHotStuffConsensus) HandleAcceptedBlock( /* TODO */ ) { +func (fc *FastHotStuffConsensus) ProcessVoteMsg( /* TODO */ ) { // TODO } -func (fc *FastHotStuffConsensus) HandleVoteMessage( /* TODO */ ) { +func (pc *FastHotStuffConsensus) ProcessTimeoutMsg( /* TODO */ ) { // TODO } -func (pc *FastHotStuffConsensus) HandleTimeoutMessage( /* TODO */ ) { +func (fc *FastHotStuffConsensus) ConstructVoteQC( /* TODO */ ) { // TODO } -func (fc *FastHotStuffConsensus) HandleBlockProposal( /* TODO */ ) { +func (fc *FastHotStuffConsensus) ConstructTimeoutQC( /* TODO */ ) { // TODO } +// Sets the initial times for the block construction and timeout timers and starts +// the event loop building off of the current chain tip. func (fc *FastHotStuffConsensus) Start() { + fc.lock.Lock() + defer fc.lock.Unlock() + + // Check if the consensus instance is either running or uninitialized. + // If it's running or uninitialized, then there's nothing to do here. + if fc.status != consensusStatusInitialized { + return + } + + // Set the initial times for the block construction and timeout timers + fc.nextBlockConstructionTimeStamp = time.Now().Add(fc.blockConstructionCadence) + fc.nextTimeoutTimeStamp = time.Now().Add(fc.timeoutBaseDuration) + + // Kick off the event loop in a separate goroutine + fc.startGroup.Add(1) + go fc.runEventLoop() + + // Wait for the event loop to start + fc.startGroup.Wait() + + // Update the consensus status to mark it as running. + fc.status = consensusStatusRunning +} + +func (fc *FastHotStuffConsensus) Stop() { + fc.lock.Lock() + defer fc.lock.Unlock() + + // Check if the consensus instance is no longer running. If it's not running + // we can simply return here. + if fc.status != consensusStatusRunning { + return + } + + // Signal the event loop to stop + fc.stopGroup.Add(1) + fc.stopSignal <- struct{}{} + + // Wait for the event loop to stop + fc.stopGroup.Wait() + + // Update the consensus status + fc.status = consensusStatusInitialized + + // Close all internal and external channels used for signaling + close(fc.internalTimersUpdatedSignal) + close(fc.stopSignal) +} + +// Runs the internal event loop that waits for all internal or external signals. If the +// event loop is running, the consensus instance status must be set to consensusStatusRunning. +// Note, this function does not directly update the consensus status. To simplify the inner +// implementation of the loop, the caller who starts and stops should always be responsible +// for updating the status as it starts and stop the loop. +func (fc *FastHotStuffConsensus) runEventLoop() { + // Signal that the event loop has started + fc.startGroup.Done() + + // Start the event loop for { select { - case <-time.After(time.Until(fc.nextBlockProposalTime)): + case <-time.After(time.Until(fc.nextBlockConstructionTimeStamp)): { // TODO } - case <-time.After(time.Until(fc.nextTimeoutTime)): + case <-time.After(time.Until(fc.nextTimeoutTimeStamp)): { // TODO } - case <-fc.internalTimersUpdated: + case <-fc.internalTimersUpdatedSignal: { // TODO } - case <-fc.quit: + case <-fc.stopSignal: { - // TODO - close(fc.quit) + // Signal that the event loop has stopped + fc.stopGroup.Done() return } } } } -func (fc *FastHotStuffConsensus) Stop() { - fc.quit <- struct{}{} +func (fc *FastHotStuffConsensus) IsInitialized() bool { + fc.lock.RLock() + defer fc.lock.RUnlock() + + return fc.status != consensusStatusNotInitialized +} + +func (fc *FastHotStuffConsensus) IsRunning() bool { + fc.lock.RLock() + defer fc.lock.RUnlock() + + return fc.status == consensusStatusRunning } diff --git a/consensus/types.go b/consensus/types.go index 0f2cd3280..f9306b5b3 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -1,53 +1,165 @@ package consensus import ( + "sync" "time" "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" + "github.com/holiman/uint256" ) -// ConsensusEvent is a way for FastHotStuffConsensus to send messages back to -// the Server. There are three types of events that can be sent: -// - Vote: The consensus is ready to vote on a block -// - Timeout: The consensus has timed out on a view -// - BlockProposal: The consensus has a QC for a block and is ready to propose the next -// block +// ConsensusEvent is a way for FastHotStuffConsensus to send messages back to the Server. +// There are four types of events that can be sent: +// - ConsensusEventTypeVote: The consensus is ready to vote on a block at a given block height and view +// - ConsensusEventTypeTimeout: The consensus has timed out on a view +// - ConsensusEventTypeConstructVoteQC: The consensus has a QC for a block and is ready to construct the +// next block at the next block height and the current view +// - ConsensusEventTypeConstructTimeoutQC: The consensus has a timeout QC for a view and is ready to +// construct an empty block with the timeout QC at the next block height and the current view type ConsensusEventType byte const ( - ConsensusEventTypeTimeout ConsensusEventType = iota - ConsensusEventTypeBlockProposal - ConsensusEventTypeVote + ConsensusEventTypeVote ConsensusEventType = 0 + ConsensusEventTypeTimeout ConsensusEventType = 1 + ConsensusEventTypeConstructVoteQC ConsensusEventType = 2 + ConsensusEventTypeConstructTimeoutQC ConsensusEventType = 3 ) type ConsensusEvent struct { EventType ConsensusEventType + BlockHash BlockHash BlockHeight uint64 View uint64 } // BlockHash is a 32-byte hash of a block used to uniquely identify a block. It's re-defined here -// to match the exact structure of the BlockHash type in core, so the two packages are decoupled and -// the Fast HotStuff consensus can be tested end-to-end independently. When using the Fast HotStuff, -// the lib package can convert its own BlockHash type to and from this type trivially. -type BlockHash [32]byte +// as an interface that matches the exact structure of the BlockHash type in core, so that the two +// packages are decoupled and the Fast HotStuff consensus can be tested end-to-end independently. +// When using the Fast HotStuff, the lib package can convert its own BlockHash type to and from this +// type trivially. +type BlockHash interface { + GetValue() [32]byte +} + +type Validator interface { + GetPublicKey() *bls.PublicKey + GetStakeAmount() *uint256.Int +} -// NewFastHotStuffConsensus creates a new persistent FastHotStuffConsensus object that internally -// runs the Fast HotStuff consensus protocol, and signals the Server whenever it's ready to perform -// an action. +type QuorumCertificate interface { + GetBlockHash() BlockHash + GetView() uint64 + GetSignersList() *bitset.Bitset + GetAggregatedSignature() *bls.Signature +} + +type VoteMessage interface { + GetView() + GetBlockHash() BlockHash + GetPublicKey() *bls.PublicKey + GetSignature() *bls.Signature +} + +type TimeoutMessage interface { + GetView() uint64 + GetHighQC() QuorumCertificate + + GetPublicKey() *bls.PublicKey + GetSignature() *bls.Signature +} + +type Block interface { + GetBlockHash() BlockHash + GetHeight() uint64 + GetView() uint64 + // This is a hybrid function that returns the QC from the block. + // - If the block is a normal block, this returns the QC from validators' votes for the previous block + // - If the block contains a timeout QC, this returns the validator high QC aggregated from + // validators' timeout messages + // We are able to simplify the GetQC() to this behavior because this QC is only needed to construct + // a timeout QC for the next block in the event of a timeout. So, this QC will always be the latest QC + // at the current chain's tip that subsequent blocks will build on top of. + GetQC() QuorumCertificate +} + +// An instance of FastHotStuffConsensus is a self-contained module that represents a single node running the +// Fast HotStuff consensus protocol. The module is initialized at the current chain's tip, with a given +// block hash, block height, view number, and validator set. The module is simplified and does not know +// whether its role is that of a block proposer or a validator. +// +// Given a block that's at the tip of the current chain, this module maintains its own internal data structures +// and runs internal timers that handles all of the following: +// - Tracking of the current view, incrementing the view during timeouts, and computing exponential +// back-off durations during consecutive timeouts +// - Aggregation of votes and QC construction for the current block +// - Aggregation of timeout messages for the current view +// - Signaling its caller when it can vote on the current chain tip +// - Signaling its caller when it has timed out the current view +// - Signaling its caller when it has a QC for the current block +// - Signaling its caller when it has a timeout QC for the current view +// +// When a new block is connected to the chain, the caller is expected to update the chain tip. The module +// resets all internal data structures and timers to handle all of the above based on the new chain tip. +// +// This module is very simple and only houses the logic that decides what action to perform next given the +// current chain tip. The module does not track the history of blocks, and instead needs its caller to +// update the block at the current chain tip. It expects its caller to maintain the block chain, +// the index of all past blocks, to perform QC validations for incoming blocks, to handle the commit rule, +// and only then to pass the validated chain tip. Note: this module takes the provided chain tip as a +// trusted input and does NOT validate any incoming blocks. This also mean the module expects its caller to +// track historical vote and timeout messages it has sent so as to not vote more than once at a given view +// or block height. type FastHotStuffConsensus struct { - nextBlockProposalTime time.Time - nextTimeoutTime time.Time + lock sync.RWMutex + + blockConstructionCadence time.Duration + timeoutBaseDuration time.Duration + + nextBlockConstructionTimeStamp time.Time + nextTimeoutTimeStamp time.Time - internalTimersUpdated chan interface{} - quit chan interface{} + // The latest block accepted by the caller. We only keep track of and build on top of the chain + // tip here. In the event of a fork, we expect the new tip to be resolved and provided by the caller + chainTip Block + // The current view at which we expect to see or propose the next block. In the event of a timeout, + // the timeout signal will be triggered for this view. + currentView uint64 + // The validator set sorted in decreasing order of stake amount, with a consistent tie-breaking + // scheme. This validator set is expected to be valid for validating votes and timeouts for the + // next block height. + validators []Validator - // votesSeen and timeoutsSeen are in-memory maps of all the votes and timeouts we've - // seen so far, organized by their block hash and then by the public key of the voter. - votesSeen map[BlockHash]map[bls.PublicKey]*bls.Signature // TODO: this should be a vote message - timeoutsSeen map[BlockHash]map[bls.PublicKey]*bls.Signature // TODO: this should be a timeout message + // votesSeen is an in-memory map of all the votes we've seen so far, organized by the block hash + // that was voted on and the public key of the sender. We use a nested map because we want to be + // able to fetch all votes by block hash. + votesSeen map[BlockHash]map[bls.PublicKey]VoteMessage + // timeoutsSeen is an in-memory map of all the timeout messages we've seen so far, organized by + // the timed out view and the public key of the sender. We use a nested map because we want to + // be able to fetch all timeout messages by view. + timeoutsSeen map[uint64]map[bls.PublicKey]TimeoutMessage + + // Externally accessible channel for signals sent to the Server. ConsensusEvents chan *ConsensusEvent + + // Internal channels used by this module to coordinate the event loop + internalTimersUpdatedSignal chan interface{} + stopSignal chan interface{} + + // Internal statuses and wait groups used to coordinate the start and stop operations for + // the event loop. + status consensusStatus + startGroup sync.WaitGroup + stopGroup sync.WaitGroup } + +type consensusStatus byte + +const ( + consensusStatusNotInitialized consensusStatus = 0 // Not initialized and the event loop is not running + consensusStatusInitialized consensusStatus = 1 // Initialized but the event loop is not running + consensusStatusRunning consensusStatus = 2 // Initialized and the event loop is running +) diff --git a/lib/network.go b/lib/network.go index f38fbbc31..8077fa5e0 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1949,7 +1949,7 @@ type MsgDeSoHeader struct { // // In the event of a timeout, this field will contain the aggregate QC constructed from // timeout messages from 2/3 of validators weighted by stake, and proves that they have - // timed out. This value is set to null in normal cases where a regular block vote has + // timed out. This value is set to nil in normal cases where a regular block vote has // taken place. ValidatorsTimeoutAggregateQC *TimeoutAggregateQuorumCertificate diff --git a/lib/pos_network.go b/lib/pos_network.go index 775281ee7..d09697009 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -38,7 +38,7 @@ type MsgDeSoValidatorVote struct { // The block hash corresponding to the block that this vote is for. BlockHash *BlockHash - // The view number when the the block was proposed. + // The view number when the block was proposed. ProposedInView uint64 // The validator's partial BLS signature of the (ProposedInView, BlockHash) pair @@ -280,7 +280,7 @@ type QuorumCertificate struct { // The block hash corresponding to the block that this QC authorizes. BlockHash *BlockHash - // The view number when the the block was proposed. + // The view number when the block was proposed. ProposedInView uint64 // This BLS signature is aggregated from all of the partial BLS signatures for diff --git a/lib/server.go b/lib/server.go index 5eeb6b0c7..3a680aa59 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1723,7 +1723,7 @@ func (srv *Server) _handleBlockAccepted(event *BlockEvent) { // Notify the consensus that a block was accepted. if srv.fastHotStuffConsensus != nil { - srv.fastHotStuffConsensus.HandleAcceptedBlock() + srv.fastHotStuffConsensus.UpdateChainTip() } // Construct an inventory vector to relay to peers. @@ -2182,6 +2182,17 @@ func (srv *Server) _handleFastHostStuffBlockProposal(event *consensus.ConsensusE // 7. Broadcast the block to the network } +func (srv *Server) _handleFastHostStuffEmptyTimeoutBlockProposal(event *consensus.ConsensusEvent) { + // The consensus module has signaled that we have a timeout QC and can propose one at a certain + // block height. We construct an empty block with a timeout QC and broadcast it here: + // 1. Verify that the block height and view we want to propose at is valid + // 2. Get a timeout QC from the consensus module + // 3. Construct a block with the timeout QC + // 4. Sign the block + // 5. Process the block locally + // 6. Broadcast the block to the network +} + func (srv *Server) _handleFastHostStuffVote(event *consensus.ConsensusEvent) { // The consensus module has signaled that we can vote on a block. We construct and // broadcast the vote here: @@ -2202,12 +2213,14 @@ func (srv *Server) _handleFastHostStuffTimeout(event *consensus.ConsensusEvent) func (srv *Server) _handleFastHostStuffConsensusEvent(event *consensus.ConsensusEvent) { switch event.EventType { - case consensus.ConsensusEventTypeBlockProposal: - srv._handleFastHostStuffBlockProposal(event) case consensus.ConsensusEventTypeVote: srv._handleFastHostStuffVote(event) case consensus.ConsensusEventTypeTimeout: srv._handleFastHostStuffTimeout(event) + case consensus.ConsensusEventTypeConstructVoteQC: + srv._handleFastHostStuffBlockProposal(event) + case consensus.ConsensusEventTypeConstructTimeoutQC: + srv._handleFastHostStuffEmptyTimeoutBlockProposal(event) } } @@ -2226,7 +2239,7 @@ func (srv *Server) _handleFastHostStuffConsensusEvent(event *consensus.Consensus // control messages from peer, proposed blocks from peers, votes/timeouts, block requests, mempool // requests from syncing peers // - It listens to consensus events from the Fast HostStuff consensus engine. The consensus signals when -// it's ready to vote, timeout, or propose a block. +// it's ready to vote, timeout, propose a block, or propose an empty block with a timeout QC. func (srv *Server) _startConsensus() { for { // This is used instead of the shouldQuit control message exist mechanism below. shouldQuit will be true only @@ -2477,9 +2490,6 @@ func (srv *Server) Start() { if srv.miner != nil && len(srv.miner.PublicKeys) > 0 { go srv.miner.Start() } - - // TODO: Gate these behind a PoS consensus flag. - go srv.fastHotStuffConsensus.Start() } // SyncPrefixProgress keeps track of sync progress on an individual prefix. It is used in From ffac62884c52b319aab8e96b293596d89b2de282 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 29 Aug 2023 14:32:17 -0400 Subject: [PATCH 184/762] Add Basic Unit Tests for Fast HotStuff Consensus Initialization (#641) * Start Unit Tests for Fast HotStuff Consensus * Implement simple tests * Add test for start and stop * Move test utils to utils_test.go file * Remove busy waits --- consensus/consensus_test.go | 91 +++++++++++++++++++++++++++++++++++++ consensus/types_test.go | 90 ++++++++++++++++++++++++++++++++++++ consensus/utils_test.go | 66 +++++++++++++++++++++++++++ 3 files changed, 247 insertions(+) create mode 100644 consensus/types_test.go create mode 100644 consensus/utils_test.go diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go index 4bbf8296e..6b019c199 100644 --- a/consensus/consensus_test.go +++ b/consensus/consensus_test.go @@ -1 +1,92 @@ +//go:build relic + package consensus + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestFastHotStuffInitialization(t *testing.T) { + + // Test initial status for newly constructed instance + { + fc := NewFastHotStuffConsensus() + require.Equal(t, consensusStatusNotInitialized, fc.status) + require.Equal(t, fc.IsInitialized(), false) + require.Equal(t, fc.IsRunning(), false) + require.NotPanics(t, fc.Stop) // Calling Stop() on an uninitialized instance should be a no-op + } + + // Test Init() function with invalid block construction cadence + { + fc := NewFastHotStuffConsensus() + err := fc.Init(0, 1, createDummyBlock(), createDummyValidatorSet()) + require.Error(t, err) + } + + // Test Init() function with invalid timeout duration + { + fc := NewFastHotStuffConsensus() + err := fc.Init(1, 0, createDummyBlock(), createDummyValidatorSet()) + require.Error(t, err) + } + + // Test Init() function with valid parameters + { + fc := NewFastHotStuffConsensus() + err := fc.Init(100, 101, createDummyBlock(), createDummyValidatorSet()) + require.NoError(t, err) + + require.Equal(t, consensusStatusInitialized, fc.status) + require.Equal(t, fc.IsInitialized(), true) + require.Equal(t, fc.IsRunning(), false) + + require.NotPanics(t, fc.Stop) // Calling Stop() on an initialized instance should be a no-op + require.Equal(t, fc.status, consensusStatusInitialized) + + require.Equal(t, fc.chainTip.GetBlockHash().GetValue(), createDummyBlockHash().GetValue()) + require.Equal(t, fc.chainTip.GetView(), uint64(0)) + require.Equal(t, fc.chainTip.GetHeight(), uint64(0)) + + require.Equal(t, fc.blockConstructionCadence, time.Duration(100)) + require.Equal(t, fc.timeoutBaseDuration, time.Duration(101)) + + require.Equal(t, fc.currentView, uint64(1)) + require.Equal(t, len(fc.validators), len(createDummyValidatorSet())) + } +} + +func TestFastHotStuffEventLoopStartStop(t *testing.T) { + oneHourInNanoSecs := time.Duration(3600000000000) + tenSecondsInNanoSecs := time.Duration(10000000000) + + fc := NewFastHotStuffConsensus() + err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + require.NoError(t, err) + + // Start the event loop + fc.Start() + + // Confirm the consensus instance status has changed to running + require.Equal(t, consensusStatusRunning, fc.status) + + // Confirm that the ETAs for the block construction and timeout timers have been set + require.Greater(t, fc.nextBlockConstructionTimeStamp, time.Now().Add( + oneHourInNanoSecs-tenSecondsInNanoSecs, // Subtract 10 second buffer so this test ins't flaky + )) + require.Greater(t, fc.nextTimeoutTimeStamp, time.Now().Add( + 2*oneHourInNanoSecs-tenSecondsInNanoSecs, // Subtract 10 second buffer so this test ins't flaky + )) + + // Stop the event loop + fc.Stop() + + // Confirm the consensus instance status has reverted to initialized + require.Equal(t, consensusStatusInitialized, fc.status) + + // Confirm that calling fc.Stop() again doesn't panic + require.NotPanics(t, fc.Stop) +} diff --git a/consensus/types_test.go b/consensus/types_test.go new file mode 100644 index 000000000..d588d14ab --- /dev/null +++ b/consensus/types_test.go @@ -0,0 +1,90 @@ +package consensus + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" + "github.com/holiman/uint256" +) + +////////////////////////////////////////////////////////// +// BlockHash interface implementation for testing +////////////////////////////////////////////////////////// + +type blockHash struct { + value [32]byte +} + +func (bh *blockHash) GetValue() [32]byte { + return bh.value +} + +////////////////////////////////////////////////////////// +// Validator interface implementation for testing +////////////////////////////////////////////////////////// + +type validator struct { + publicKey *bls.PublicKey + stakeAmount *uint256.Int +} + +func (v *validator) GetPublicKey() *bls.PublicKey { + return v.publicKey +} + +func (v *validator) GetStakeAmount() *uint256.Int { + return v.stakeAmount +} + +////////////////////////////////////////////////////////// +// QuorumCertificate interface implementation for testing +////////////////////////////////////////////////////////// + +type quorumCertificate struct { + blockHash *blockHash + view uint64 + signersList *bitset.Bitset + aggregatedSignature *bls.Signature +} + +func (qc *quorumCertificate) GetBlockHash() BlockHash { + return qc.blockHash +} + +func (qc *quorumCertificate) GetView() uint64 { + return qc.view +} + +func (qc *quorumCertificate) GetSignersList() *bitset.Bitset { + return qc.signersList +} + +func (qc *quorumCertificate) GetAggregatedSignature() *bls.Signature { + return qc.aggregatedSignature +} + +////////////////////////////////////////////////////////// +// Block interface implementation for testing +////////////////////////////////////////////////////////// + +type block struct { + blockHash *blockHash + height uint64 + view uint64 + qc *quorumCertificate +} + +func (b *block) GetBlockHash() BlockHash { + return b.blockHash +} + +func (b *block) GetHeight() uint64 { + return b.height +} + +func (b *block) GetView() uint64 { + return b.view +} + +func (b *block) GetQC() QuorumCertificate { + return b.qc +} diff --git a/consensus/utils_test.go b/consensus/utils_test.go new file mode 100644 index 000000000..a011ab460 --- /dev/null +++ b/consensus/utils_test.go @@ -0,0 +1,66 @@ +package consensus + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/collections/bitset" + "github.com/holiman/uint256" +) + +func createDummyValidatorSet() []Validator { + validators := []*validator{ + { + publicKey: createDummyBLSPublicKey(), + stakeAmount: uint256.NewInt().SetUint64(100), + }, + { + publicKey: createDummyBLSPublicKey(), + stakeAmount: uint256.NewInt().SetUint64(50), + }, + } + // Cast the slice of concrete structs []*validators to a slice of interfaces []Validator + return collections.TransformSlice(validators, func(v *validator) Validator { + return v + }) +} + +func createDummyBlock() *block { + return &block{ + blockHash: createDummyBlockHash(), + view: 0, + height: 0, + qc: createDummyQC(), + } +} + +func createDummyQC() *quorumCertificate { + return &quorumCertificate{ + blockHash: createDummyBlockHash(), + view: 0, + signersList: bitset.NewBitset().FromBytes([]byte{0x3}), + aggregatedSignature: createDummyBLSSignature(), + } +} + +func createDummyBLSSignature() *bls.Signature { + blsPrivateKey, _ := bls.NewPrivateKey() + blockHashValue := createDummyBlockHash().GetValue() + blsSignature, _ := blsPrivateKey.Sign(blockHashValue[:]) + return blsSignature +} + +func createDummyBLSPublicKey() *bls.PublicKey { + blsPrivateKey, _ := bls.NewPrivateKey() + return blsPrivateKey.PublicKey() +} + +func createDummyBlockHash() *blockHash { + return &blockHash{ + value: [32]byte{ + 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0xf, + 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, + }, + } +} From 584756a50c75929aae1c55b30f1e833f5815919f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 29 Aug 2023 17:57:28 -0400 Subject: [PATCH 185/762] Add Basic Block And Validator Set Integrity Checks (#642) * Add Basic Block And Validator Set Validation * Add better nil check * Require relic build tag * Address Piotr's feedback * Rename isProperlyFormedValidatorSet --- collections/slice.go | 11 ++++- consensus/consensus.go | 18 +++++-- consensus/consensus_test.go | 20 ++++++-- consensus/utils.go | 63 +++++++++++++++++++++++++ consensus/utils_test.go | 85 ++++++++++++++++++++++++++++++++-- lib/pos_epoch_complete_hook.go | 2 +- 6 files changed, 186 insertions(+), 13 deletions(-) create mode 100644 consensus/utils.go diff --git a/collections/slice.go b/collections/slice.go index b96cc41d9..112b28d1a 100644 --- a/collections/slice.go +++ b/collections/slice.go @@ -1,6 +1,15 @@ package collections -func TransformSlice[TInput any, TOutput any](slice []TInput, transformFn func(TInput) TOutput) []TOutput { +func Any[T any](slice []T, predicate func(T) bool) bool { + for _, val := range slice { + if predicate(val) { + return true + } + } + return false +} + +func Transform[TInput any, TOutput any](slice []TInput, transformFn func(TInput) TOutput) []TOutput { var result []TOutput for _, val := range slice { result = append(result, transformFn(val)) diff --git a/consensus/consensus.go b/consensus/consensus.go index 30635ced6..e8b0ff88c 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -40,15 +40,25 @@ func (fc *FastHotStuffConsensus) Init( // Ensure the consensus instance is not already running if fc.status == consensusStatusRunning { - return errors.New("Consensus instance is already running") + return errors.New("FastHotStuffConsensus.Init: Consensus instance is already running") } - // Validate the inputs + // Validate the timer durations if blockConstructionCadence <= 0 { - return errors.New("Block construction duration must be > 0") + return errors.New("FastHotStuffConsensus.Init: Block construction duration must be > 0") } if timeoutBaseDuration <= 0 { - return errors.New("Timeout base duration must be > 0") + return errors.New("FastHotStuffConsensus.Init: Timeout base duration must be > 0") + } + + // Validate the integrity of the block + if !isProperlyFormedBlock(chainTip) { + return errors.New("FastHotStuffConsensus.Init: Invalid block") + } + + // Validate the integrity of the validator set + if !isProperlyFormedValidatorSet(validators) { + return errors.New("FastHotStuffConsensus.Init: Invalid validator set") } // Update the chain tip and validator set diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go index 6b019c199..f56da6665 100644 --- a/consensus/consensus_test.go +++ b/consensus/consensus_test.go @@ -34,6 +34,20 @@ func TestFastHotStuffInitialization(t *testing.T) { require.Error(t, err) } + // Test Init() function with malformed block + { + fc := NewFastHotStuffConsensus() + err := fc.Init(1, 1, nil, createDummyValidatorSet()) + require.Error(t, err) + } + + // Test Init() function with malformed validator set + { + fc := NewFastHotStuffConsensus() + err := fc.Init(1, 1, createDummyBlock(), nil) + require.Error(t, err) + } + // Test Init() function with valid parameters { fc := NewFastHotStuffConsensus() @@ -48,13 +62,13 @@ func TestFastHotStuffInitialization(t *testing.T) { require.Equal(t, fc.status, consensusStatusInitialized) require.Equal(t, fc.chainTip.GetBlockHash().GetValue(), createDummyBlockHash().GetValue()) - require.Equal(t, fc.chainTip.GetView(), uint64(0)) - require.Equal(t, fc.chainTip.GetHeight(), uint64(0)) + require.Equal(t, fc.chainTip.GetView(), uint64(1)) + require.Equal(t, fc.chainTip.GetHeight(), uint64(1)) require.Equal(t, fc.blockConstructionCadence, time.Duration(100)) require.Equal(t, fc.timeoutBaseDuration, time.Duration(101)) - require.Equal(t, fc.currentView, uint64(1)) + require.Equal(t, fc.currentView, uint64(2)) require.Equal(t, len(fc.validators), len(createDummyValidatorSet())) } } diff --git a/consensus/utils.go b/consensus/utils.go new file mode 100644 index 000000000..3614a26e8 --- /dev/null +++ b/consensus/utils.go @@ -0,0 +1,63 @@ +package consensus + +import ( + "reflect" + + "github.com/deso-protocol/core/collections" +) + +// This function checks if the block is properly formed. These are all surface level checks that +// ensure that critical fields in the block are not nil so that the code in this package does not +// panic. +func isProperlyFormedBlock(block Block) bool { + // The block must be non-nil + if block == nil { + return false + } + + // The block height and view must be non-zero + if block.GetHeight() == 0 || block.GetView() == 0 { + return false + } + + // The block hash and QC must be non-nil + if isInterfaceNil(block.GetBlockHash()) || isInterfaceNil(block.GetQC()) { + return false + } + + qc := block.GetQC() + + // The QC fields must be non-nil and the view non-zero + if isInterfaceNil(qc.GetAggregatedSignature()) || + isInterfaceNil(qc.GetBlockHash()) || + qc.GetSignersList() == nil || + qc.GetView() == 0 { + return false + } + + return true +} + +func isProperlyFormedValidatorSet(validators []Validator) bool { + // The validator set must be non-empty + if len(validators) == 0 { + return false + } + + // If any validator in the slice has an invalid property, then something is wrong. + return !collections.Any(validators, func(v Validator) bool { + return isInterfaceNil(v) || v.GetPublicKey() == nil || v.GetStakeAmount() == nil || v.GetStakeAmount().IsZero() + }) +} + +// golang interface types are stored as a tuple of (type, value). A single i==nil check is not enough to +// determine if a pointer that implements an interface is nil. This function checks if the interface is nil +// by checking if the pointer itself is nil. +func isInterfaceNil(i interface{}) bool { + if i == nil { + return true + } + + value := reflect.ValueOf(i) + return value.Kind() == reflect.Ptr && value.IsNil() +} diff --git a/consensus/utils_test.go b/consensus/utils_test.go index a011ab460..4667d908e 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -1,12 +1,89 @@ +//go:build relic + package consensus import ( + "testing" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/collections/bitset" "github.com/holiman/uint256" + "github.com/stretchr/testify/require" ) +func TestIsProperlyFormedBlock(t *testing.T) { + // Test nil block + { + require.False(t, isProperlyFormedBlock(nil)) + } + + // Test zero height + { + block := block{height: 0, view: 1, blockHash: createDummyBlockHash(), qc: createDummyQC()} + require.False(t, isProperlyFormedBlock(&block)) + } + + // Test zero view + { + block := block{height: 1, view: 0, blockHash: createDummyBlockHash(), qc: createDummyQC()} + require.False(t, isProperlyFormedBlock(&block)) + } + + // Test nil block hash + { + block := block{height: 1, view: 1, blockHash: nil, qc: createDummyQC()} + require.False(t, isProperlyFormedBlock(&block)) + } + + // Test nil QC + { + block := block{height: 1, view: 1, blockHash: createDummyBlockHash(), qc: nil} + require.False(t, isProperlyFormedBlock(&block)) + } + + // Test valid block + { + require.True(t, isProperlyFormedBlock(createDummyBlock())) + } +} + +func TestIsProperlyFormedValidatorSet(t *testing.T) { + // Test empty slice + { + require.False(t, isProperlyFormedValidatorSet([]Validator{})) + } + + // Test nil validator + { + require.False(t, isProperlyFormedValidatorSet([]Validator{nil})) + } + + // Test nil public key + { + validator := validator{publicKey: nil, stakeAmount: uint256.NewInt().SetUint64(1)} + require.False(t, isProperlyFormedValidatorSet([]Validator{&validator})) + } + + // Test nil stake amount + { + validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: nil} + require.False(t, isProperlyFormedValidatorSet([]Validator{&validator})) + } + + // Test zero stake amount + { + validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt()} + require.False(t, isProperlyFormedValidatorSet([]Validator{&validator})) + } + + // Test valid validator + { + validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt().SetUint64(1)} + require.True(t, isProperlyFormedValidatorSet([]Validator{&validator})) + } +} + func createDummyValidatorSet() []Validator { validators := []*validator{ { @@ -19,7 +96,7 @@ func createDummyValidatorSet() []Validator { }, } // Cast the slice of concrete structs []*validators to a slice of interfaces []Validator - return collections.TransformSlice(validators, func(v *validator) Validator { + return collections.Transform(validators, func(v *validator) Validator { return v }) } @@ -27,8 +104,8 @@ func createDummyValidatorSet() []Validator { func createDummyBlock() *block { return &block{ blockHash: createDummyBlockHash(), - view: 0, - height: 0, + view: 1, + height: 1, qc: createDummyQC(), } } @@ -36,7 +113,7 @@ func createDummyBlock() *block { func createDummyQC() *quorumCertificate { return &quorumCertificate{ blockHash: createDummyBlockHash(), - view: 0, + view: 1, signersList: bitset.NewBitset().FromBytes([]byte{0x3}), aggregatedSignature: createDummyBLSSignature(), } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index daa6e53b8..9ecb90636 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -204,7 +204,7 @@ func (bav *UtxoView) generateAndSnapshotLeaderSchedule(epochNumber uint64) error func (bav *UtxoView) generateAndSnapshotStakesToReward(epochNumber uint64, validatorSet []*ValidatorEntry) error { // Fetch the validator set's PKIDs so we can filter the top stakes by the current validator set. - validatorSetPKIDs := collections.TransformSlice(validatorSet, func(validatorEntry *ValidatorEntry) *PKID { + validatorSetPKIDs := collections.Transform(validatorSet, func(validatorEntry *ValidatorEntry) *PKID { return validatorEntry.ValidatorPKID }) From 970c7da5b9eb514bbeff1df1e92e5fafe6b457c9 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 6 Sep 2023 11:17:26 -0400 Subject: [PATCH 186/762] Rename FastHotStuffConsensus to FastHotStuffEventLoop (#651) --- consensus/consensus.go | 38 ++++++++++++++++++------------------- consensus/consensus_test.go | 14 +++++++------- consensus/types.go | 12 ++++++------ lib/server.go | 14 +++++++------- 4 files changed, 39 insertions(+), 39 deletions(-) diff --git a/consensus/consensus.go b/consensus/consensus.go index e8b0ff88c..a35122112 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -8,8 +8,8 @@ import ( "github.com/deso-protocol/core/bls" ) -func NewFastHotStuffConsensus() *FastHotStuffConsensus { - return &FastHotStuffConsensus{ +func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { + return &FastHotStuffEventLoop{ status: consensusStatusNotInitialized, startGroup: sync.WaitGroup{}, stopGroup: sync.WaitGroup{}, @@ -28,7 +28,7 @@ func NewFastHotStuffConsensus() *FastHotStuffConsensus { // // Given the above, This function updates the chain tip internally, and re-initializes all internal // data structures that are used to track incoming votes and timeout messages for QC construction. -func (fc *FastHotStuffConsensus) Init( +func (fc *FastHotStuffEventLoop) Init( blockConstructionCadence time.Duration, timeoutBaseDuration time.Duration, chainTip Block, @@ -40,25 +40,25 @@ func (fc *FastHotStuffConsensus) Init( // Ensure the consensus instance is not already running if fc.status == consensusStatusRunning { - return errors.New("FastHotStuffConsensus.Init: Consensus instance is already running") + return errors.New("FastHotStuffEventLoop.Init: Consensus instance is already running") } // Validate the timer durations if blockConstructionCadence <= 0 { - return errors.New("FastHotStuffConsensus.Init: Block construction duration must be > 0") + return errors.New("FastHotStuffEventLoop.Init: Block construction duration must be > 0") } if timeoutBaseDuration <= 0 { - return errors.New("FastHotStuffConsensus.Init: Timeout base duration must be > 0") + return errors.New("FastHotStuffEventLoop.Init: Timeout base duration must be > 0") } // Validate the integrity of the block if !isProperlyFormedBlock(chainTip) { - return errors.New("FastHotStuffConsensus.Init: Invalid block") + return errors.New("FastHotStuffEventLoop.Init: Invalid block") } // Validate the integrity of the validator set if !isProperlyFormedValidatorSet(validators) { - return errors.New("FastHotStuffConsensus.Init: Invalid validator set") + return errors.New("FastHotStuffEventLoop.Init: Invalid validator set") } // Update the chain tip and validator set @@ -85,33 +85,33 @@ func (fc *FastHotStuffConsensus) Init( return nil } -func (fc *FastHotStuffConsensus) UpdateChainTip( /* TODO */ ) { +func (fc *FastHotStuffEventLoop) UpdateChainTip( /* TODO */ ) { // TODO } -func (fc *FastHotStuffConsensus) UpdateView( /* TODO */ ) { +func (fc *FastHotStuffEventLoop) UpdateView( /* TODO */ ) { // TODO } -func (fc *FastHotStuffConsensus) ProcessVoteMsg( /* TODO */ ) { +func (fc *FastHotStuffEventLoop) ProcessVoteMsg( /* TODO */ ) { // TODO } -func (pc *FastHotStuffConsensus) ProcessTimeoutMsg( /* TODO */ ) { +func (pc *FastHotStuffEventLoop) ProcessTimeoutMsg( /* TODO */ ) { // TODO } -func (fc *FastHotStuffConsensus) ConstructVoteQC( /* TODO */ ) { +func (fc *FastHotStuffEventLoop) ConstructVoteQC( /* TODO */ ) { // TODO } -func (fc *FastHotStuffConsensus) ConstructTimeoutQC( /* TODO */ ) { +func (fc *FastHotStuffEventLoop) ConstructTimeoutQC( /* TODO */ ) { // TODO } // Sets the initial times for the block construction and timeout timers and starts // the event loop building off of the current chain tip. -func (fc *FastHotStuffConsensus) Start() { +func (fc *FastHotStuffEventLoop) Start() { fc.lock.Lock() defer fc.lock.Unlock() @@ -136,7 +136,7 @@ func (fc *FastHotStuffConsensus) Start() { fc.status = consensusStatusRunning } -func (fc *FastHotStuffConsensus) Stop() { +func (fc *FastHotStuffEventLoop) Stop() { fc.lock.Lock() defer fc.lock.Unlock() @@ -166,7 +166,7 @@ func (fc *FastHotStuffConsensus) Stop() { // Note, this function does not directly update the consensus status. To simplify the inner // implementation of the loop, the caller who starts and stops should always be responsible // for updating the status as it starts and stop the loop. -func (fc *FastHotStuffConsensus) runEventLoop() { +func (fc *FastHotStuffEventLoop) runEventLoop() { // Signal that the event loop has started fc.startGroup.Done() @@ -195,14 +195,14 @@ func (fc *FastHotStuffConsensus) runEventLoop() { } } -func (fc *FastHotStuffConsensus) IsInitialized() bool { +func (fc *FastHotStuffEventLoop) IsInitialized() bool { fc.lock.RLock() defer fc.lock.RUnlock() return fc.status != consensusStatusNotInitialized } -func (fc *FastHotStuffConsensus) IsRunning() bool { +func (fc *FastHotStuffEventLoop) IsRunning() bool { fc.lock.RLock() defer fc.lock.RUnlock() diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go index f56da6665..d9a2e86ac 100644 --- a/consensus/consensus_test.go +++ b/consensus/consensus_test.go @@ -13,7 +13,7 @@ func TestFastHotStuffInitialization(t *testing.T) { // Test initial status for newly constructed instance { - fc := NewFastHotStuffConsensus() + fc := NewFastHotStuffEventLoop() require.Equal(t, consensusStatusNotInitialized, fc.status) require.Equal(t, fc.IsInitialized(), false) require.Equal(t, fc.IsRunning(), false) @@ -22,35 +22,35 @@ func TestFastHotStuffInitialization(t *testing.T) { // Test Init() function with invalid block construction cadence { - fc := NewFastHotStuffConsensus() + fc := NewFastHotStuffEventLoop() err := fc.Init(0, 1, createDummyBlock(), createDummyValidatorSet()) require.Error(t, err) } // Test Init() function with invalid timeout duration { - fc := NewFastHotStuffConsensus() + fc := NewFastHotStuffEventLoop() err := fc.Init(1, 0, createDummyBlock(), createDummyValidatorSet()) require.Error(t, err) } // Test Init() function with malformed block { - fc := NewFastHotStuffConsensus() + fc := NewFastHotStuffEventLoop() err := fc.Init(1, 1, nil, createDummyValidatorSet()) require.Error(t, err) } // Test Init() function with malformed validator set { - fc := NewFastHotStuffConsensus() + fc := NewFastHotStuffEventLoop() err := fc.Init(1, 1, createDummyBlock(), nil) require.Error(t, err) } // Test Init() function with valid parameters { - fc := NewFastHotStuffConsensus() + fc := NewFastHotStuffEventLoop() err := fc.Init(100, 101, createDummyBlock(), createDummyValidatorSet()) require.NoError(t, err) @@ -77,7 +77,7 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) tenSecondsInNanoSecs := time.Duration(10000000000) - fc := NewFastHotStuffConsensus() + fc := NewFastHotStuffEventLoop() err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) require.NoError(t, err) diff --git a/consensus/types.go b/consensus/types.go index f9306b5b3..c17260ee9 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -9,7 +9,7 @@ import ( "github.com/holiman/uint256" ) -// ConsensusEvent is a way for FastHotStuffConsensus to send messages back to the Server. +// ConsensusEvent is a way for FastHotStuffEventLoop to send messages back to the Server. // There are four types of events that can be sent: // - ConsensusEventTypeVote: The consensus is ready to vote on a block at a given block height and view // - ConsensusEventTypeTimeout: The consensus has timed out on a view @@ -85,10 +85,10 @@ type Block interface { GetQC() QuorumCertificate } -// An instance of FastHotStuffConsensus is a self-contained module that represents a single node running the -// Fast HotStuff consensus protocol. The module is initialized at the current chain's tip, with a given -// block hash, block height, view number, and validator set. The module is simplified and does not know -// whether its role is that of a block proposer or a validator. +// An instance of FastHotStuffEventLoop is a self-contained module that represents a single node running +// the event loop for the Fast HotStuff consensus protocol. The module is initialized at the current chain's +// tip, with a given block hash, block height, view number, and validator set. The module is simplified and +// does not know whether its role is that of a block proposer or a validator. // // Given a block that's at the tip of the current chain, this module maintains its own internal data structures // and runs internal timers that handles all of the following: @@ -112,7 +112,7 @@ type Block interface { // trusted input and does NOT validate any incoming blocks. This also mean the module expects its caller to // track historical vote and timeout messages it has sent so as to not vote more than once at a given view // or block height. -type FastHotStuffConsensus struct { +type FastHotStuffEventLoop struct { lock sync.RWMutex blockConstructionCadence time.Duration diff --git a/lib/server.go b/lib/server.go index 3a680aa59..3db2e19dd 100644 --- a/lib/server.go +++ b/lib/server.go @@ -61,7 +61,7 @@ type Server struct { eventManager *EventManager TxIndex *TXIndex - fastHotStuffConsensus *consensus.FastHotStuffConsensus + fastHotStuffEventLoop *consensus.FastHotStuffEventLoop // posMempool *PosMemPool TODO: Add the mempool later // All messages received from peers get sent from the ConnectionManager to the @@ -1722,8 +1722,8 @@ func (srv *Server) _handleBlockAccepted(event *BlockEvent) { } // Notify the consensus that a block was accepted. - if srv.fastHotStuffConsensus != nil { - srv.fastHotStuffConsensus.UpdateChainTip() + if srv.fastHotStuffEventLoop != nil { + srv.fastHotStuffEventLoop.UpdateChainTip() } // Construct an inventory vector to relay to peers. @@ -2249,7 +2249,7 @@ func (srv *Server) _startConsensus() { } select { - case consensusEvent := <-srv.fastHotStuffConsensus.ConsensusEvents: + case consensusEvent := <-srv.fastHotStuffEventLoop.ConsensusEvents: { glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.BlockHeight) srv._handleFastHostStuffConsensusEvent(consensusEvent) @@ -2415,9 +2415,9 @@ func (srv *Server) Stop() { } // Stop the PoS block proposer if we have one running. - if srv.fastHotStuffConsensus != nil { - srv.fastHotStuffConsensus.Stop() - glog.Infof(CLog(Yellow, "Server.Stop: Closed the FastHotStuffConsensus")) + if srv.fastHotStuffEventLoop != nil { + srv.fastHotStuffEventLoop.Stop() + glog.Infof(CLog(Yellow, "Server.Stop: Closed the fastHotStuffEventLoop")) } // TODO: Stop the PoS mempool if we have one running. From d2f4b57710792bc15c4e472d1990b626d4dc7b66 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 8 Sep 2023 08:30:25 -0400 Subject: [PATCH 187/762] Rename consensus.go to event_loop.go (#655) --- consensus/{consensus.go => event_loop.go} | 0 consensus/{consensus_test.go => event_loop_test.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename consensus/{consensus.go => event_loop.go} (100%) rename consensus/{consensus_test.go => event_loop_test.go} (100%) diff --git a/consensus/consensus.go b/consensus/event_loop.go similarity index 100% rename from consensus/consensus.go rename to consensus/event_loop.go diff --git a/consensus/consensus_test.go b/consensus/event_loop_test.go similarity index 100% rename from consensus/consensus_test.go rename to consensus/event_loop_test.go From fde4fa3dfca3085639856092f7b0c91d781f54af Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 8 Sep 2023 14:29:55 -0400 Subject: [PATCH 188/762] Implement FastHotStuffEventLoop.ProcessSafeBlock(...) (#643) * Implement FastHotStuffEventLoop.ProcessSafeBlock * Clean up comments --- consensus/event_loop.go | 117 ++++++++++++++++++++++++++++++----- consensus/event_loop_test.go | 106 +++++++++++++++++++++++++++++-- consensus/types.go | 31 ++++++---- consensus/types_test.go | 54 ++++++++++++++++ consensus/utils_test.go | 18 ++++++ lib/server.go | 4 +- 6 files changed, 297 insertions(+), 33 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index a35122112..48c7ea907 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -4,8 +4,6 @@ import ( "errors" "sync" "time" - - "github.com/deso-protocol/core/bls" ) func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { @@ -61,19 +59,19 @@ func (fc *FastHotStuffEventLoop) Init( return errors.New("FastHotStuffEventLoop.Init: Invalid validator set") } - // Update the chain tip and validator set + // Update the latest safe block and validator set fc.chainTip = chainTip fc.currentView = chainTip.GetView() + 1 - fc.validators = validators + fc.validatorsAtChainTip = validators // Reset all internal data structures for votes and timeouts - fc.votesSeen = make(map[BlockHash]map[bls.PublicKey]VoteMessage) - fc.timeoutsSeen = make(map[uint64]map[bls.PublicKey]TimeoutMessage) + fc.votesSeen = make(map[[32]byte]map[string]VoteMessage) + fc.timeoutsSeen = make(map[uint64]map[string]TimeoutMessage) // Reset all internal and external channels used for signaling - fc.internalTimersUpdatedSignal = make(chan interface{}) - fc.stopSignal = make(chan interface{}) - fc.ConsensusEvents = make(chan *ConsensusEvent) + fc.resetEventLoopSignal = make(chan interface{}, signalChannelBufferSize) + fc.stopSignal = make(chan interface{}, signalChannelBufferSize) + fc.ConsensusEvents = make(chan *ConsensusEvent, signalChannelBufferSize) // Set the block construction and timeout base durations fc.blockConstructionCadence = blockConstructionCadence @@ -85,8 +83,58 @@ func (fc *FastHotStuffEventLoop) Init( return nil } -func (fc *FastHotStuffEventLoop) UpdateChainTip( /* TODO */ ) { - // TODO +// ProcessSafeBlock must only be called when the caller has accepted a new block, connected it +// to the tip of the blockchain, and determined that the block is safe to vote on. Given such a +// block, this function resets the internal timers and state of the Fast HotStuff consensus that +// determine the next action. The functions expects the following for the input params: +// - block: the input block that was safely added to the blockchain and is safe to vote on +// - validators: the validator set for the next block height +func (fc *FastHotStuffEventLoop) ProcessSafeBlock(block Block, validators []Validator) error { + // Grab the consensus instance's lock + fc.lock.Lock() + defer fc.lock.Unlock() + + // Ensure the consensus instance is running + if fc.status != consensusStatusRunning { + return errors.New("FastHotStuffEventLoop.ProcessSafeBlock: Consensus instance is not running") + } + + // Do a basic integrity check on the block + if !isProperlyFormedBlock(block) { + return errors.New("FastHotStuffEventLoop.ProcessSafeBlock: Invalid block") + } + + // Do a basic integrity check on the validator set + if !isProperlyFormedValidatorSet(validators) { + return errors.New("FastHotStuffEventLoop.ProcessSafeBlock: Invalid validator set") + } + + // Update the chain tip and validator set + fc.chainTip = block + + // We track the current view here so we know which view to start the timeout timer for. + fc.currentView = block.GetView() + 1 + + // Update the validator set so we know when we have a QC from votes at the next block height + // and view. + fc.validatorsAtChainTip = validators + + // Evict all stale votes and timeouts + fc.evictStaleVotesAndTimeouts() + + // Signal the caller that we can vote for the block. The caller will decide whether to construct and + // broadcast the vote. + fc.ConsensusEvents <- &ConsensusEvent{ + EventType: ConsensusEventTypeVote, + BlockHash: fc.chainTip.GetBlockHash(), + BlockHeight: fc.chainTip.GetHeight(), + View: fc.chainTip.GetView(), + } + + // Signal the event loop to reset the internal timers + fc.resetEventLoopSignal <- struct{}{} + + return nil } func (fc *FastHotStuffEventLoop) UpdateView( /* TODO */ ) { @@ -156,8 +204,8 @@ func (fc *FastHotStuffEventLoop) Stop() { // Update the consensus status fc.status = consensusStatusInitialized - // Close all internal and external channels used for signaling - close(fc.internalTimersUpdatedSignal) + // Close all internal channels used for signaling + close(fc.resetEventLoopSignal) close(fc.stopSignal) } @@ -181,7 +229,7 @@ func (fc *FastHotStuffEventLoop) runEventLoop() { { // TODO } - case <-fc.internalTimersUpdatedSignal: + case <-fc.resetEventLoopSignal: { // TODO } @@ -208,3 +256,44 @@ func (fc *FastHotStuffEventLoop) IsRunning() bool { return fc.status == consensusStatusRunning } + +// Evict all locally stored votes and timeout messages with stale views. We can safely use the current +// view to determine what is stale. The consensus mechanism will never construct a block with a view +// that's lower than its current view. Consider the following: +// - In the event the event update the chain tip, we will vote for that block and the view it was proposed in +// - In the event we locally time out a view locally, we will send a timeout message for that view +// +// In both cases, we will never roll back the chain tip, or decrement the current view to construct a +// conflicting block at that lower view that we have previously voted or timed out on. So we are safe to evict +// locally stored votes and timeout messages with stale views because we expect to never use them for +// block construction. +// +// The eviction works as follows: +// - Votes: if the next block were to be a regular block with a QC aggregated from votes, then the it must +// satisfy nextBlock.GetView() = chainTip.GetView() + 1, which means that currentView = chainTip.GetView() + 1. +// We can safely evict all votes where vote.GetView() < currentView - 1. +// - Timeouts: if the next block were be an empty block with a timeout QC aggregated from timeout messages, +// then it must satisfy nextBlock.GetView() = timeout.GetView() + 1. We can safely evict all timeout messages with +// currentView > timeout.GetView() + 1. +func (fc *FastHotStuffEventLoop) evictStaleVotesAndTimeouts() { + // Evict stale vote messages + for blockHash, voters := range fc.votesSeen { + for _, vote := range voters { + if fc.currentView > vote.GetView()+1 { + // Each block is proposed at a known view, and has an immutable block hash. Votes are signed on the + // tuple (blockhash, view). So, if any vote message for the blockhash has a view that satisfies this + // condition, then it's guaranteed that all votes for the same block hash have satisfy this condition. + // We can safely evict all votes for this block hash. + delete(fc.votesSeen, blockHash) + break + } + } + } + + // Evict stale timeout messages + for view := range fc.timeoutsSeen { + if fc.currentView > view+1 { + delete(fc.timeoutsSeen, view) + } + } +} diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index d9a2e86ac..866749468 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -15,8 +15,6 @@ func TestFastHotStuffInitialization(t *testing.T) { { fc := NewFastHotStuffEventLoop() require.Equal(t, consensusStatusNotInitialized, fc.status) - require.Equal(t, fc.IsInitialized(), false) - require.Equal(t, fc.IsRunning(), false) require.NotPanics(t, fc.Stop) // Calling Stop() on an uninitialized instance should be a no-op } @@ -55,8 +53,6 @@ func TestFastHotStuffInitialization(t *testing.T) { require.NoError(t, err) require.Equal(t, consensusStatusInitialized, fc.status) - require.Equal(t, fc.IsInitialized(), true) - require.Equal(t, fc.IsRunning(), false) require.NotPanics(t, fc.Stop) // Calling Stop() on an initialized instance should be a no-op require.Equal(t, fc.status, consensusStatusInitialized) @@ -69,10 +65,110 @@ func TestFastHotStuffInitialization(t *testing.T) { require.Equal(t, fc.timeoutBaseDuration, time.Duration(101)) require.Equal(t, fc.currentView, uint64(2)) - require.Equal(t, len(fc.validators), len(createDummyValidatorSet())) + require.Equal(t, len(fc.validatorsAtChainTip), 2) } } +func TestFastHotStuffProcessSafeBlock(t *testing.T) { + oneHourInNanoSecs := time.Duration(3600000000000) + + fc := NewFastHotStuffEventLoop() + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + require.NoError(t, err) + + // Test ProcessSafeBlock() function when consensus event loop is not running + { + err := fc.ProcessSafeBlock(createDummyBlock(), createDummyValidatorSet()) + require.Error(t, err) + } + + // Start the consensus event loop + fc.Start() + + // Test ProcessSafeBlock() function with malformed block + { + err := fc.ProcessSafeBlock(nil, createDummyValidatorSet()) + require.Error(t, err) + } + + // Test ProcessSafeBlock() function with malformed validator set + { + err := fc.ProcessSafeBlock(createDummyBlock(), nil) + require.Error(t, err) + } + + // Populate the votesSeen and timeoutsSeen maps with dummy data + { + fc.votesSeen = map[[32]byte]map[string]VoteMessage{ + {0}: { // blockHash = 0 + "pubKeyA": createDummyVoteMessage(0), + }, + {1}: { // blockHash = 1 + "pubKeyB": createDummyVoteMessage(1), + }, + {2}: { // blockHash = 2 + "pubKeyC": createDummyVoteMessage(2), + }, + {3}: { // blockHash = 3 + "pubKeyD": createDummyVoteMessage(3), + }, + {4}: { // blockHash = 4 + "pubKeyE": createDummyVoteMessage(4), + }, + } + + fc.timeoutsSeen = map[uint64]map[string]TimeoutMessage{ + 0: { // view = 0 + "pubKeyA": createDummyTimeoutMessage(0), + }, + 1: { // view = 1 + "pubKeyB": createDummyTimeoutMessage(1), + }, + 2: { // view = 2 + "pubKeyC": createDummyTimeoutMessage(2), + }, + 3: { // view = 3 + "pubKeyD": createDummyTimeoutMessage(3), + }, + 4: { // view = 4 + "pubKeyE": createDummyTimeoutMessage(4), + }, + } + } + + // Verify the sizes of the votesSeen and timeoutsSeen maps + { + require.Equal(t, len(fc.votesSeen), 5) + require.Equal(t, len(fc.timeoutsSeen), 5) + } + + // Test ProcessSafeBlock() function with valid parameters + { + nextBlock := createDummyBlock() + nextBlock.height = 2 + nextBlock.view = 3 + + err := fc.ProcessSafeBlock(nextBlock, createDummyValidatorSet()) + require.NoError(t, err) + + require.Equal(t, createDummyBlockHash().GetValue(), fc.chainTip.GetBlockHash().GetValue()) + require.Equal(t, uint64(3), fc.chainTip.GetView()) + require.Equal(t, uint64(2), fc.chainTip.GetHeight()) + + require.Equal(t, uint64(4), fc.currentView) + require.Equal(t, 2, len(fc.validatorsAtChainTip)) + } + + // Verify that stale votes and timeouts have been evicted + { + require.Equal(t, 2, len(fc.votesSeen)) + require.Equal(t, 2, len(fc.timeoutsSeen)) + } + + // Stop the event loop + fc.Stop() +} + func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) tenSecondsInNanoSecs := time.Duration(10000000000) diff --git a/consensus/types.go b/consensus/types.go index c17260ee9..e6c41b942 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -56,7 +56,7 @@ type QuorumCertificate interface { } type VoteMessage interface { - GetView() + GetView() uint64 GetBlockHash() BlockHash GetPublicKey() *bls.PublicKey @@ -85,6 +85,13 @@ type Block interface { GetQC() QuorumCertificate } +// We want a large buffer for the signal channels to ensure threads don't block when trying to push new +// signals. +// +// TODO: is a size of 100 enough? If we want to bullet-proof this, we could back it by a slice as a +// secondary buffer. +const signalChannelBufferSize = 100 + // An instance of FastHotStuffEventLoop is a self-contained module that represents a single node running // the event loop for the Fast HotStuff consensus protocol. The module is initialized at the current chain's // tip, with a given block hash, block height, view number, and validator set. The module is simplified and @@ -121,8 +128,8 @@ type FastHotStuffEventLoop struct { nextBlockConstructionTimeStamp time.Time nextTimeoutTimeStamp time.Time - // The latest block accepted by the caller. We only keep track of and build on top of the chain - // tip here. In the event of a fork, we expect the new tip to be resolved and provided by the caller + // The latest block accepted by the caller. We only keep track of the latest safe block here because + // it's the block we vote on, and construct a vote QC for. chainTip Block // The current view at which we expect to see or propose the next block. In the event of a timeout, // the timeout signal will be triggered for this view. @@ -130,24 +137,24 @@ type FastHotStuffEventLoop struct { // The validator set sorted in decreasing order of stake amount, with a consistent tie-breaking // scheme. This validator set is expected to be valid for validating votes and timeouts for the // next block height. - validators []Validator + validatorsAtChainTip []Validator // votesSeen is an in-memory map of all the votes we've seen so far, organized by the block hash - // that was voted on and the public key of the sender. We use a nested map because we want to be - // able to fetch all votes by block hash. - votesSeen map[BlockHash]map[bls.PublicKey]VoteMessage + // that was voted on and the BLS public key string of the sender. We use a nested map because we + // want to be able to fetch all votes by block hash. + votesSeen map[[32]byte]map[string]VoteMessage // timeoutsSeen is an in-memory map of all the timeout messages we've seen so far, organized by - // the timed out view and the public key of the sender. We use a nested map because we want to - // be able to fetch all timeout messages by view. - timeoutsSeen map[uint64]map[bls.PublicKey]TimeoutMessage + // the timed out view and the BLS public key string of the sender. We use a nested map because + // we want to be able to fetch all timeout messages by view. + timeoutsSeen map[uint64]map[string]TimeoutMessage // Externally accessible channel for signals sent to the Server. ConsensusEvents chan *ConsensusEvent // Internal channels used by this module to coordinate the event loop - internalTimersUpdatedSignal chan interface{} - stopSignal chan interface{} + resetEventLoopSignal chan interface{} + stopSignal chan interface{} // Internal statuses and wait groups used to coordinate the start and stop operations for // the event loop. diff --git a/consensus/types_test.go b/consensus/types_test.go index d588d14ab..952745e60 100644 --- a/consensus/types_test.go +++ b/consensus/types_test.go @@ -88,3 +88,57 @@ func (b *block) GetView() uint64 { func (b *block) GetQC() QuorumCertificate { return b.qc } + +////////////////////////////////////////////////////////// +// VoteMessage interface implementation for testing +////////////////////////////////////////////////////////// + +type voteMessage struct { + view uint64 + blockHash *blockHash + publicKey *bls.PublicKey + signature *bls.Signature +} + +func (vm *voteMessage) GetView() uint64 { + return vm.view +} + +func (vm *voteMessage) GetBlockHash() BlockHash { + return vm.blockHash +} + +func (vm *voteMessage) GetPublicKey() *bls.PublicKey { + return vm.publicKey +} + +func (vm *voteMessage) GetSignature() *bls.Signature { + return vm.signature +} + +////////////////////////////////////////////////////////// +// TimeoutMessage interface implementation for testing +////////////////////////////////////////////////////////// + +type timeoutMessage struct { + view uint64 + highQC *quorumCertificate + publicKey *bls.PublicKey + signature *bls.Signature +} + +func (tm *timeoutMessage) GetView() uint64 { + return tm.view +} + +func (tm *timeoutMessage) GetHighQC() QuorumCertificate { + return tm.highQC +} + +func (tm *timeoutMessage) GetPublicKey() *bls.PublicKey { + return tm.publicKey +} + +func (tm *timeoutMessage) GetSignature() *bls.Signature { + return tm.signature +} diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 4667d908e..d324dd5a4 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -110,6 +110,24 @@ func createDummyBlock() *block { } } +func createDummyVoteMessage(view uint64) *voteMessage { + return &voteMessage{ + blockHash: createDummyBlockHash(), + view: view, + publicKey: createDummyBLSPublicKey(), + signature: createDummyBLSSignature(), + } +} + +func createDummyTimeoutMessage(view uint64) *timeoutMessage { + return &timeoutMessage{ + highQC: createDummyQC(), + view: view, + publicKey: createDummyBLSPublicKey(), + signature: createDummyBLSSignature(), + } +} + func createDummyQC() *quorumCertificate { return &quorumCertificate{ blockHash: createDummyBlockHash(), diff --git a/lib/server.go b/lib/server.go index 3db2e19dd..4184c7d74 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1721,9 +1721,9 @@ func (srv *Server) _handleBlockAccepted(event *BlockEvent) { return } - // Notify the consensus that a block was accepted. + // Notify the consensus event loop that a block was accepted. if srv.fastHotStuffEventLoop != nil { - srv.fastHotStuffEventLoop.UpdateChainTip() + srv.fastHotStuffEventLoop.ProcessSafeBlock(nil, nil) // TODO: fill these out } // Construct an inventory vector to relay to peers. From 250d37c717778497bd679fbadb927c4f415d64d6 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 8 Sep 2023 14:46:44 -0400 Subject: [PATCH 189/762] Implement FastHotStuffEventLoop.AdvanceView(...) (#645) * Implement AdvanceView() function for Fast-HotStuff Consensus * Clean up error messages --- consensus/event_loop.go | 30 +++++++++++-- consensus/event_loop_test.go | 87 ++++++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+), 4 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 48c7ea907..0e6557694 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -83,6 +83,32 @@ func (fc *FastHotStuffEventLoop) Init( return nil } +// AdvanceView is called when the chain tip has not changed but the consensus instance has signaled a +// timeout, and can advance to the next view. This function resets the timeout timer and crank timer +// for the next view. +func (fc *FastHotStuffEventLoop) AdvanceView() (uint64, error) { + // Grab the consensus instance's lock + fc.lock.Lock() + defer fc.lock.Unlock() + + // Ensure the consensus instance is running. This guarantees that the chain tip and validator set + // have already been set. + if fc.status != consensusStatusRunning { + return 0, errors.New("FastHotStuffEventLoop.AdvanceView: Consensus instance is not running") + } + + // Advance the view + fc.currentView++ + + // Evict all stale votes and timeouts + fc.evictStaleVotesAndTimeouts() + + // Signal the event loop to reset the internal timers + fc.resetEventLoopSignal <- struct{}{} + + return fc.currentView, nil +} + // ProcessSafeBlock must only be called when the caller has accepted a new block, connected it // to the tip of the blockchain, and determined that the block is safe to vote on. Given such a // block, this function resets the internal timers and state of the Fast HotStuff consensus that @@ -137,10 +163,6 @@ func (fc *FastHotStuffEventLoop) ProcessSafeBlock(block Block, validators []Vali return nil } -func (fc *FastHotStuffEventLoop) UpdateView( /* TODO */ ) { - // TODO -} - func (fc *FastHotStuffEventLoop) ProcessVoteMsg( /* TODO */ ) { // TODO } diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 866749468..6da3d5674 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -169,6 +169,93 @@ func TestFastHotStuffProcessSafeBlock(t *testing.T) { fc.Stop() } +func TestAdvanceView(t *testing.T) { + oneHourInNanoSecs := time.Duration(3600000000000) + + fc := NewFastHotStuffEventLoop() + + // BlockHeight = 1, Current View = 2 + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + require.NoError(t, err) + + // Running AdvanceView() should fail because the consensus event loop is not running + { + _, err := fc.AdvanceView() + require.Error(t, err) + } + + // Start the consensus event loop + fc.Start() + + // Populate the votesSeen and timeoutsSeen maps with dummy data + { + fc.votesSeen = map[[32]byte]map[string]VoteMessage{ + {0}: { // blockHash = 0 + "pubKeyA": createDummyVoteMessage(0), + }, + {1}: { // blockHash = 1 + "pubKeyB": createDummyVoteMessage(1), + }, + {2}: { // blockHash = 2 + "pubKeyC": createDummyVoteMessage(2), + }, + {3}: { // blockHash = 3 + "pubKeyD": createDummyVoteMessage(3), + }, + {4}: { // blockHash = 4 + "pubKeyE": createDummyVoteMessage(4), + }, + } + + fc.timeoutsSeen = map[uint64]map[string]TimeoutMessage{ + 0: { // view = 0 + "pubKeyA": createDummyTimeoutMessage(0), + }, + 1: { // view = 1 + "pubKeyB": createDummyTimeoutMessage(1), + }, + 2: { // view = 2 + "pubKeyC": createDummyTimeoutMessage(2), + }, + 3: { // view = 3 + "pubKeyD": createDummyTimeoutMessage(3), + }, + 4: { // view = 4 + "pubKeyE": createDummyTimeoutMessage(4), + }, + } + } + + // Run AdvanceView() to view 3 + { + newView, err := fc.AdvanceView() + require.NoError(t, err) + require.Equal(t, uint64(3), newView) + } + + // Verify that vote and timeout messages haven't changed + { + require.Equal(t, len(fc.votesSeen), 3) + require.Equal(t, len(fc.timeoutsSeen), 3) + } + + // Run AdvanceView() to view 4 + { + newView, err := fc.AdvanceView() + require.NoError(t, err) + require.Equal(t, uint64(4), newView) + } + + // Verify that stale votes and timeouts have been evicted + { + require.Equal(t, len(fc.votesSeen), 2) + require.Equal(t, len(fc.timeoutsSeen), 2) + } + + // Stop the event loop + fc.Stop() +} + func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) tenSecondsInNanoSecs := time.Duration(10000000000) From 4998cfd0d163df5d5d6934528b5feea4be1cdab0 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 11 Sep 2023 02:05:17 -0700 Subject: [PATCH 190/762] Mempool Status and Lock updates, and small refactors (#647) * Create Skeleton for PoS Consensus Components * Status updates; lock updates; mempool nits * Persister tests nits * mutex fix * Review round * Fix merge * Fix comment * Fix --------- Co-authored-by: iamsofonias Co-authored-by: tholonious <99746187+tholonious@users.noreply.github.com> --- lib/pos_mempool.go | 85 +++++++++++++++++++------------ lib/pos_mempool_persister.go | 50 +++++++++++------- lib/pos_mempool_persister_test.go | 13 ++++- lib/pos_mempool_test.go | 21 +++++--- 4 files changed, 111 insertions(+), 58 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index e6a953edb..ef8918666 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -110,6 +110,9 @@ func NewPosMempool(params *DeSoParams, globalParams *GlobalParamsEntry, latestBl } func (dmp *PosMempool) Start() error { + dmp.Lock() + defer dmp.Unlock() + if dmp.IsRunning() { return nil } @@ -132,23 +135,19 @@ func (dmp *PosMempool) Start() error { // Start the persister and retrieve transactions from the database. dmp.persister.Start() - txns, err := dmp.persister.GetPersistedTransactions() + err = dmp.loadPersistedTransactions() if err != nil { - return errors.Wrapf(err, "PosMempool.Start: Problem retrieving transactions from persister") + return errors.Wrapf(err, "PosMempool.Start: Problem loading persisted transactions") } - // We set the persistToDb flag to false so that persister doesn't try to save the transactions. - for _, txn := range txns { - if err := dmp.addTransactionNoLock(txn, false); err != nil { - glog.Errorf("PosMempool.Start: Problem adding transaction with hash (%v) from persister: %v", - txn.Hash, err) - } - } - dmp.status = PosMempoolStatusRunning + dmp.status = PosMempoolStatusRunning return nil } func (dmp *PosMempool) Stop() { + dmp.Lock() + defer dmp.Unlock() + if !dmp.IsRunning() { return } @@ -174,11 +173,11 @@ func (dmp *PosMempool) IsRunning() bool { // AddTransaction validates a MsgDeSoTxn transaction and adds it to the mempool if it is valid. // If the mempool overflows as a result of adding the transaction, the mempool is pruned. func (dmp *PosMempool) AddTransaction(txn *MsgDeSoTxn) error { - if !dmp.IsRunning() { - return errors.Wrapf(MempoolErrorNotRunning, "PosMempool.AddTransaction: ") - } + // First, validate that the transaction is properly formatted according to BalanceModel. We acquire a read lock on + // the mempool. This allows multiple goroutines to safely perform transaction validation concurrently. In particular, + // transaction signature verification can be parallelized. + dmp.RLock() - // First, validate that the transaction is properly formatted according to BalanceModel. if err := ValidateDeSoTxnSanityBalanceModel(txn, dmp.latestBlockHeight, dmp.params, dmp.globalParams); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction sanity") } @@ -193,12 +192,17 @@ func (dmp *PosMempool) AddTransaction(txn *MsgDeSoTxn) error { if _, err := dmp.latestBlockView.VerifySignature(txn, uint32(dmp.latestBlockHeight)); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Signature validation failed") } + dmp.RUnlock() // If we get this far, it means that the transaction is valid. We can now add it to the mempool. // We lock the mempool to ensure that no other thread is modifying it while we add the transaction. dmp.Lock() defer dmp.Unlock() + if !dmp.IsRunning() { + return errors.Wrapf(MempoolErrorNotRunning, "PosMempool.AddTransaction: ") + } + // Add the transaction to the mempool and then prune if needed. if err := dmp.addTransactionNoLock(mempoolTx, true); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem adding transaction to mempool") @@ -247,15 +251,32 @@ func (dmp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) er return nil } +// loadPersistedTransactions fetches transactions from the persister's storage and adds the transactions to the mempool. +// No lock is held and (persistToDb = false) flag is used when adding transactions internally. +func (dmp *PosMempool) loadPersistedTransactions() error { + txns, err := dmp.persister.GetPersistedTransactions() + if err != nil { + return errors.Wrapf(err, "PosMempool.Start: Problem retrieving transactions from persister") + } + // We set the persistToDb flag to false so that persister doesn't try to save the transactions. + for _, txn := range txns { + if err := dmp.addTransactionNoLock(txn, false); err != nil { + glog.Errorf("PosMempool.Start: Problem adding transaction with hash (%v) from persister: %v", + txn.Hash, err) + } + } + return nil +} + // RemoveTransaction is the main function for removing a transaction from the mempool. func (dmp *PosMempool) RemoveTransaction(txnHash *BlockHash) error { + dmp.Lock() + defer dmp.Unlock() + if !dmp.IsRunning() { return errors.Wrapf(MempoolErrorNotRunning, "PosMempool.RemoveTransaction: ") } - dmp.Lock() - defer dmp.Unlock() - // Get the transaction from the register. txn := dmp.txnRegister.GetTransaction(txnHash) if txn == nil { @@ -290,13 +311,13 @@ func (dmp *PosMempool) removeTransactionNoLock(txn *MempoolTx, persistToDb bool) // GetTransaction returns the transaction with the given hash if it exists in the mempool. This function is thread-safe. func (dmp *PosMempool) GetTransaction(txnHash *BlockHash) *MsgDeSoTxn { + dmp.RLock() + defer dmp.RUnlock() + if !dmp.IsRunning() { return nil } - dmp.RLock() - defer dmp.RUnlock() - txn := dmp.txnRegister.GetTransaction(txnHash) if txn == nil || txn.Tx == nil { return nil @@ -307,13 +328,13 @@ func (dmp *PosMempool) GetTransaction(txnHash *BlockHash) *MsgDeSoTxn { // GetTransactions returns all transactions in the mempool ordered by the Fee-Time algorithm. This function is thread-safe. func (dmp *PosMempool) GetTransactions() []*MsgDeSoTxn { + dmp.RLock() + defer dmp.RUnlock() + if !dmp.IsRunning() { return nil } - dmp.RLock() - defer dmp.RUnlock() - var desoTxns []*MsgDeSoTxn poolTxns := dmp.txnRegister.GetFeeTimeTransactions() for _, txn := range poolTxns { @@ -336,13 +357,13 @@ func (dmp *PosMempool) GetTransactions() []*MsgDeSoTxn { // // Note that the iteration pattern is not thread-safe. Another lock should be used to ensure thread-safety. func (dmp *PosMempool) GetIterator() MempoolIterator { + dmp.RLock() + defer dmp.RUnlock() + if !dmp.IsRunning() { return nil } - dmp.RLock() - defer dmp.RUnlock() - return NewPosMempoolIterator(dmp.txnRegister.GetFeeTimeIterator()) } @@ -369,13 +390,13 @@ func (dmp *PosMempool) pruneNoLock() error { // UpdateLatestBlock updates the latest block view and latest block node in the mempool. func (dmp *PosMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) { + dmp.Lock() + defer dmp.Unlock() + if !dmp.IsRunning() { return } - dmp.Lock() - defer dmp.Unlock() - dmp.latestBlockView = blockView dmp.latestBlockHeight = blockHeight } @@ -385,13 +406,13 @@ func (dmp *PosMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64 // new minimum will be removed from the mempool. To safely handle this, this method re-creates the TransactionRegister // with the new global params and re-adds all transactions in the mempool to the new register. func (dmp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { + dmp.Lock() + defer dmp.Unlock() + if !dmp.IsRunning() { return } - dmp.Lock() - defer dmp.Unlock() - dmp.globalParams = globalParams mempoolTxns := dmp.txnRegister.GetFeeTimeTransactions() newRegister := NewTransactionRegister(dmp.globalParams) diff --git a/lib/pos_mempool_persister.go b/lib/pos_mempool_persister.go index 55d1b735d..25f6480f2 100644 --- a/lib/pos_mempool_persister.go +++ b/lib/pos_mempool_persister.go @@ -10,8 +10,7 @@ import ( ) const ( - DbMempoolContextId = "transactions" - eventQueueSize = 10000 + eventQueueSize = 10000 ) type MempoolEventType int @@ -70,7 +69,10 @@ func NewMempoolPersister(db *badger.DB, mempoolBackupTimeMilliseconds int) *Memp // Start is the entry point for the MempoolPersister. It starts the run loop and begins persisting transactions to the database. func (mp *MempoolPersister) Start() { - if mp.status == MempoolPersisterStatusRunning { + mp.Lock() + defer mp.Unlock() + + if mp.IsRunning() { return } @@ -119,16 +121,21 @@ func (mp *MempoolPersister) run() { } // Stop is used to stop the persister thread and reset the persister state. It will wait for the persister thread to -// flush the outstanding updateBatch to the database before returning. +// flush the outstanding updateBatch to the database before returning. Stop should not be called in concurrent threads. func (mp *MempoolPersister) Stop() error { - if mp.status == MempoolPersisterStatusNotRunning { + mp.Lock() + if !mp.IsRunning() { return nil } + mp.Unlock() + // Enqueue the exit event and wait for the persister thread to stop. - mp.eventQueue <- &MempoolEvent{Type: MempoolEventExit} + event := &MempoolEvent{Type: MempoolEventExit} + mp.EnqueueEvent(event) mp.stopGroup.Wait() + // Persist any outstanding transactions. - if err := mp.persistBatch(); err != nil { + if err := mp.persistBatchNoLock(); err != nil { return errors.Wrapf(err, "MempoolPersister: Error persisting batch") } // Reset the persister state. @@ -137,16 +144,28 @@ func (mp *MempoolPersister) Stop() error { return nil } +func (mp *MempoolPersister) IsRunning() bool { + return mp.status == MempoolPersisterStatusRunning +} + // persistBatch is used to flush the updateBatch to the database. It will iterate through the updateBatch and add or remove // transactions from the database based on the event type. Error is returned if the persister is not running or if there // is an error persisting the batch. func (mp *MempoolPersister) persistBatch() error { - if mp.status == MempoolPersisterStatusNotRunning { + mp.Lock() + defer mp.Unlock() + + if !mp.IsRunning() { return nil } - mp.Lock() - defer mp.Unlock() + return mp.persistBatchNoLock() +} + +func (mp *MempoolPersister) persistBatchNoLock() error { + if !mp.IsRunning() { + return nil + } // If there are no transactions to persist, return. if len(mp.updateBatch) == 0 { @@ -193,13 +212,13 @@ func (mp *MempoolPersister) persistBatch() error { // GetPersistedTransactions is used to retrieve all transactions from the database. It will return an error if the persister // is not currently running or if there was an issue retrieving the transactions. func (mp *MempoolPersister) GetPersistedTransactions() ([]*MempoolTx, error) { - if mp.status == MempoolPersisterStatusNotRunning { - return nil, errors.Wrapf(MempoolErrorNotRunning, "MempoolPersister: Cannot retrieve transactions while running") - } - mp.Lock() defer mp.Unlock() + if !mp.IsRunning() { + return nil, errors.Wrapf(MempoolErrorNotRunning, "MempoolPersister: Cannot retrieve transactions while not running") + } + var mempoolTxns []*MempoolTx err := mp.db.View(func(txn *badger.Txn) error { // Iterate through the transaction records in the database. @@ -237,9 +256,6 @@ func (mp *MempoolPersister) EnqueueEvent(event *MempoolEvent) { // reset is used to clear the persister state. func (mp *MempoolPersister) reset() { - mp.Lock() - defer mp.Unlock() - mp.updateBatch = nil mp.eventQueue = make(chan *MempoolEvent, eventQueueSize) } diff --git a/lib/pos_mempool_persister_test.go b/lib/pos_mempool_persister_test.go index 3159745c5..6050456c4 100644 --- a/lib/pos_mempool_persister_test.go +++ b/lib/pos_mempool_persister_test.go @@ -38,8 +38,7 @@ func TestMempoolPersister(t *testing.T) { // Start the mempool persister. mempoolPersister.Start() - - defer mempoolPersister.Stop() + require.True(mempoolPersister.IsRunning()) // Add all the transactions to the mempool. for _, txn := range txnPool { @@ -102,6 +101,9 @@ func TestMempoolPersister(t *testing.T) { retrievedTxns, err = mempoolPersister.GetPersistedTransactions() require.NoError(err) require.Equal(len(txnPool)-addRemoveCases, len(retrievedTxns)) + + require.NoError(mempoolPersister.Stop()) + require.False(mempoolPersister.IsRunning()) } func TestMempoolPersisterRestart(t *testing.T) { @@ -130,6 +132,7 @@ func TestMempoolPersisterRestart(t *testing.T) { // Start the mempool persister. mempoolPersister.Start() + require.True(mempoolPersister.IsRunning()) // Add all the transactions to the mempool. for _, txn := range txnPool { @@ -147,6 +150,7 @@ func TestMempoolPersisterRestart(t *testing.T) { // Stop the mempool persister. require.NoError(mempoolPersister.Stop()) + require.False(mempoolPersister.IsRunning()) // Make sure we get an error retrieving transactions on stopped persister. _, err = mempoolPersister.GetPersistedTransactions() @@ -160,12 +164,17 @@ func TestMempoolPersisterRestart(t *testing.T) { // Restart the mempool persister. mempoolPersister.Start() + require.True(mempoolPersister.IsRunning()) // Make sure we can retrieve the transactions again. retrievedTxns, err = mempoolPersister.GetPersistedTransactions() require.NoError(err) require.Equal(len(txnPool), len(retrievedTxns)) + // Stop the mempool persister. + require.NoError(mempoolPersister.Stop()) + require.False(mempoolPersister.IsRunning()) + require.NoError(db.Close()) require.NoError(os.RemoveAll(dir)) } diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index c8a689500..bbf3462af 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -18,8 +18,9 @@ func TestPosMempoolStart(t *testing.T) { mempool := NewPosMempool(¶ms, globalParams, nil, 0, dir) require.NoError(mempool.Start()) - require.Equal(PosMempoolStatusRunning, mempool.status) + require.True(mempool.IsRunning()) mempool.Stop() + require.False(mempool.IsRunning()) } func TestPosMempoolRestartWithTransactions(t *testing.T) { @@ -41,7 +42,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) require.NoError(mempool.Start()) - require.Equal(PosMempoolStatusRunning, mempool.status) + require.True(mempool.IsRunning()) txn1 := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) txn2 := _generateTestTxn(t, rand, feeMin, feeMax, m1PubBytes, m1Priv, 100, 0) @@ -51,13 +52,15 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { poolTxns := mempool.GetTransactions() require.Equal(2, len(poolTxns)) mempool.Stop() + require.False(mempool.IsRunning()) newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) require.NoError(newPool.Start()) - require.Equal(PosMempoolStatusRunning, newPool.status) + require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() require.Equal(2, len(newPoolTxns)) newPool.Stop() + require.False(newPool.IsRunning()) } func TestPosMempoolPrune(t *testing.T) { @@ -80,7 +83,7 @@ func TestPosMempoolPrune(t *testing.T) { params.MaxMempoolPosSizeBytes = 500 mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) require.NoError(mempool.Start()) - require.Equal(PosMempoolStatusRunning, mempool.status) + require.True(mempool.IsRunning()) var txns []*MsgDeSoTxn for ii := 0; ii < 10; ii++ { @@ -107,10 +110,11 @@ func TestPosMempoolPrune(t *testing.T) { // Remove one transaction. require.NoError(mempool.RemoveTransaction(fetchedTxns[0].Hash())) mempool.Stop() + require.False(mempool.IsRunning()) newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) require.NoError(newPool.Start()) - require.Equal(PosMempoolStatusRunning, newPool.status) + require.True(newPool.IsRunning()) require.Equal(2, len(newPool.GetTransactions())) // Remove the other transactions. @@ -136,6 +140,7 @@ func TestPosMempoolPrune(t *testing.T) { index++ } newPool.Stop() + require.False(newPool.IsRunning()) } func TestPosMempoolUpdateGlobalParams(t *testing.T) { @@ -157,7 +162,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) require.NoError(mempool.Start()) - require.Equal(PosMempoolStatusRunning, mempool.status) + require.True(mempool.IsRunning()) var txns []*MsgDeSoTxn for ii := 0; ii < 100; ii++ { @@ -178,13 +183,15 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { mempool.UpdateGlobalParams(newGlobalParams) require.Equal(0, len(mempool.GetTransactions())) mempool.Stop() + require.False(mempool.IsRunning()) newPool := NewPosMempool(params, newGlobalParams, latestBlockView, 2, dir) require.NoError(newPool.Start()) - require.Equal(PosMempoolStatusRunning, newPool.status) + require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() require.Equal(0, len(newPoolTxns)) newPool.Stop() + require.False(newPool.IsRunning()) } func _blockchainSetup(t *testing.T) (_params *DeSoParams, _db *badger.DB) { From 53e5e50ba561882683362acc892c43aeef7c2c66 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 11 Sep 2023 11:19:38 -0700 Subject: [PATCH 191/762] Mempool Status and Lock updates (#659) * Fix comment * Fix From b0f9fdd8fec0615104b8f1c37216ac3f55a7e3df Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 11 Sep 2023 11:53:34 -0700 Subject: [PATCH 192/762] Fix status (#662) * Fix comment * Fix From b2024db4e788cb58aa4e3f7b1c936fa360a2d606 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 11 Sep 2023 11:53:56 -0700 Subject: [PATCH 193/762] Mempool Fix Read Lock (#660) * Fix comment * Fix * Fix read-lock deadlock * rename --- lib/pos_mempool.go | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index ef8918666..445910cd5 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -176,24 +176,10 @@ func (dmp *PosMempool) AddTransaction(txn *MsgDeSoTxn) error { // First, validate that the transaction is properly formatted according to BalanceModel. We acquire a read lock on // the mempool. This allows multiple goroutines to safely perform transaction validation concurrently. In particular, // transaction signature verification can be parallelized. - dmp.RLock() - - if err := ValidateDeSoTxnSanityBalanceModel(txn, dmp.latestBlockHeight, dmp.params, dmp.globalParams); err != nil { - return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction sanity") - } - - // Construct the MempoolTx from the MsgDeSoTxn. - mempoolTx, err := NewMempoolTx(txn, dmp.latestBlockHeight) - if err != nil { - return errors.Wrapf(err, "PosMempool.AddTransaction: Problem constructing MempoolTx") + if err := dmp.validateTransaction(txn); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem verifying transaction") } - // Check transaction signature - if _, err := dmp.latestBlockView.VerifySignature(txn, uint32(dmp.latestBlockHeight)); err != nil { - return errors.Wrapf(err, "PosMempool.AddTransaction: Signature validation failed") - } - dmp.RUnlock() - // If we get this far, it means that the transaction is valid. We can now add it to the mempool. // We lock the mempool to ensure that no other thread is modifying it while we add the transaction. dmp.Lock() @@ -203,6 +189,12 @@ func (dmp *PosMempool) AddTransaction(txn *MsgDeSoTxn) error { return errors.Wrapf(MempoolErrorNotRunning, "PosMempool.AddTransaction: ") } + // Construct the MempoolTx from the MsgDeSoTxn. + mempoolTx, err := NewMempoolTx(txn, dmp.latestBlockHeight) + if err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem constructing MempoolTx") + } + // Add the transaction to the mempool and then prune if needed. if err := dmp.addTransactionNoLock(mempoolTx, true); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem adding transaction to mempool") @@ -215,6 +207,22 @@ func (dmp *PosMempool) AddTransaction(txn *MsgDeSoTxn) error { return nil } +func (dmp *PosMempool) validateTransaction(txn *MsgDeSoTxn) error { + dmp.RLock() + defer dmp.RUnlock() + + if err := ValidateDeSoTxnSanityBalanceModel(txn, dmp.latestBlockHeight, dmp.params, dmp.globalParams); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction sanity") + } + + // Check transaction signature + if _, err := dmp.latestBlockView.VerifySignature(txn, uint32(dmp.latestBlockHeight)); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Signature validation failed") + } + + return nil +} + func (dmp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) error { userPk := NewPublicKey(txn.Tx.PublicKey) txnFee := txn.Tx.TxnFeeNanos From 82bff164f6eeb53f7e8ad0beb46bf25ee70e9e4f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 11 Sep 2023 19:02:28 -0400 Subject: [PATCH 194/762] Fast HotStuff Vote Msg Storage (#648) * Fast HotStuff Vote Msg Storage * Cleanup * Error message cleanup * Addres Piotr's comments --- consensus/event_loop.go | 126 +++++++++++++++++++++++++++++++++-- consensus/event_loop_test.go | 81 ++++++++++++++++++++++ consensus/types.go | 17 +++-- consensus/utils.go | 44 +++++++++++- consensus/utils_test.go | 52 ++++++++++++++- 5 files changed, 307 insertions(+), 13 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 0e6557694..248270bfd 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -1,9 +1,12 @@ package consensus import ( - "errors" "sync" "time" + + "github.com/pkg/errors" + + "github.com/deso-protocol/core/bls" ) func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { @@ -163,8 +166,75 @@ func (fc *FastHotStuffEventLoop) ProcessSafeBlock(block Block, validators []Vali return nil } -func (fc *FastHotStuffEventLoop) ProcessVoteMsg( /* TODO */ ) { - // TODO +// ProcessValidatorVote captures an incoming vote message from a validator. This module has no knowledge +// of who the leader is for a given view, so it is up to the caller to decide whether to process the vote +// message or not. If a vote message is passed here, then the consensus instance will store it until +// it can construct a QC with it or until the vote's view has gone stale. +// +// This function does not directly check if the vote results in a stake weighted super majority vote +// for the target block. Instead, it stores the vote locally and waits for the crank timer to determine +// when to run the super majority vote check, and to signal the caller that we can construct a QC. +// +// Reference implementation: +// https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L756 +func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { + // Grab the consensus instance's lock + fc.lock.Lock() + defer fc.lock.Unlock() + + // Ensure the consensus instance is running. This guarantees that the chain tip and validator set + // have already been set. + if fc.status != consensusStatusRunning { + return errors.New("FastHotStuffEventLoop.ProcessValidatorVote: Consensus instance is not running") + } + + // Do a basic integrity check on the vote message + if !isProperlyFormedVote(vote) { + return errors.New("FastHotStuffEventLoop.ProcessValidatorVote: Malformed vote message") + } + + // Check if the vote is stale + if isStaleVote(fc.currentView, vote) { + return errors.Errorf("FastHotStuffEventLoop.ProcessValidatorVote: Vote has a stale view %d", vote.GetView()) + } + + // Check if the public key has already voted for this view. The protocol does not allow + // a validator to vote for more than one block in a given view. + if fc.hasVotedForView(vote.GetPublicKey(), vote.GetView()) { + return errors.Errorf( + "FastHotStuffEventLoop.ProcessValidatorVote: validator %s has already voted for view %d", + vote.GetPublicKey().ToString(), + vote.GetView(), + ) + } + + // Check if the public key has already timed out for this view. The protocol does not allow + // for a validator to vote for a block in a view that it has already timed out for. + if fc.hasTimedOutForView(vote.GetPublicKey(), vote.GetView()) { + return errors.Errorf( + "FastHotStuffEventLoop.ProcessValidatorVote: validator %s has already timed out for view %d", + vote.GetPublicKey().ToString(), + vote.GetView(), + ) + } + + // Compute the value sha3-256(vote.View, vote.BlockHash) + voteSignaturePayload := GetVoteSignaturePayload(vote.GetView(), vote.GetBlockHash()) + + // Verify the vote signature + if !isValidSignature(vote.GetPublicKey(), vote.GetSignature(), voteSignaturePayload[:]) { + return errors.New("FastHotStuffEventLoop.ProcessValidatorVote: Invalid signature") + } + + // Note: we do not check if the vote is for the current chain tip's blockhash. During leader changes + // where we will be the next block proposer, it is possible for us to receive a vote for a block that + // we haven't seen yet, but we will need to construct the QC for the block as we are the next leader. + // To make this code resilient to these race conditions during leader changes, we simply store the vote + // as long as it's properly formed and not stale. + + fc.storeVote(voteSignaturePayload, vote) + + return nil } func (pc *FastHotStuffEventLoop) ProcessTimeoutMsg( /* TODO */ ) { @@ -301,7 +371,7 @@ func (fc *FastHotStuffEventLoop) evictStaleVotesAndTimeouts() { // Evict stale vote messages for blockHash, voters := range fc.votesSeen { for _, vote := range voters { - if fc.currentView > vote.GetView()+1 { + if isStaleVote(fc.currentView, vote) { // Each block is proposed at a known view, and has an immutable block hash. Votes are signed on the // tuple (blockhash, view). So, if any vote message for the blockhash has a view that satisfies this // condition, then it's guaranteed that all votes for the same block hash have satisfy this condition. @@ -319,3 +389,51 @@ func (fc *FastHotStuffEventLoop) evictStaleVotesAndTimeouts() { } } } + +func (fc *FastHotStuffEventLoop) storeVote(signaturePayload [32]byte, vote VoteMessage) { + votesForBlockHash, ok := fc.votesSeen[signaturePayload] + if !ok { + votesForBlockHash = make(map[string]VoteMessage) + fc.votesSeen[signaturePayload] = votesForBlockHash + } + + votesForBlockHash[vote.GetPublicKey().ToString()] = vote +} + +func (fc *FastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view uint64) bool { + // This is an O(n) operation that scales with the number of block hashes that we have stored + // votes for. In practice, n will be very small because we evict stale votes, and server.go + // will be smart about not processing votes for views we won't be the block proposer for. + // + // TODO: We can further optimize this by adding a second map[view][publicKey]VoteMessage, but + // this is unnecessary for the forseeable future. + + // Compute the string encoding for the public key + publicKeyString := publicKey.ToString() + + // Search for the public key's votes across all existing block hashes + for _, votesForBlock := range fc.votesSeen { + vote, ok := votesForBlock[publicKeyString] + if ok && vote.GetView() == view { + return true + } + } + + return false +} + +func (fc *FastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, view uint64) bool { + timeoutsForView, ok := fc.timeoutsSeen[view] + if !ok { + return false + } + + // If the public key exists for the view, then we know the validator has sent a valid + // timeout message for the view. + _, ok = timeoutsForView[publicKey.ToString()] + return ok +} + +func isStaleVote(currentView uint64, vote VoteMessage) bool { + return currentView > vote.GetView()+1 +} diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 6da3d5674..8380b6d79 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -256,6 +256,87 @@ func TestAdvanceView(t *testing.T) { fc.Stop() } +func TestProcessValidatorVote(t *testing.T) { + oneHourInNanoSecs := time.Duration(3600000000000) + + fc := NewFastHotStuffEventLoop() + + // BlockHeight = 1, Current View = 2 + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + require.NoError(t, err) + + // Start the event loop + fc.Start() + + // Current View = 3 + { + currentView, err := fc.AdvanceView() + require.NoError(t, err) + require.Equal(t, uint64(3), currentView) + } + + // Test with malformed vote + { + err := fc.ProcessValidatorVote(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "Malformed vote message") + } + + // Test invalid signature + { + vote := createDummyVoteMessage(3) + vote.signature = createDummyBLSSignature() + err := fc.ProcessValidatorVote(vote) + require.Error(t, err) + require.Contains(t, err.Error(), "Invalid signature") + } + + // Test with stale view + { + vote := createDummyVoteMessage(1) + err := fc.ProcessValidatorVote(vote) + require.Error(t, err) + require.Contains(t, err.Error(), "Vote has a stale view") + } + + // Test when we've already seen a vote from the validator for the same view + { + vote := createDummyVoteMessage(3) + fc.votesSeen[GetVoteSignaturePayload(vote.GetView(), vote.GetBlockHash())] = map[string]VoteMessage{ + vote.publicKey.ToString(): vote, + } + + err := fc.ProcessValidatorVote(vote) + require.Error(t, err) + require.Contains(t, err.Error(), "has already voted for view") + } + + // Test when we've already seen a timeout from the validator for the same view + { + vote := createDummyVoteMessage(4) + timeout := createDummyTimeoutMessage(4) + timeout.publicKey = vote.publicKey + + fc.timeoutsSeen[timeout.GetView()] = map[string]TimeoutMessage{ + timeout.publicKey.ToString(): timeout, + } + + err := fc.ProcessValidatorVote(vote) + require.Error(t, err) + require.Contains(t, err.Error(), "has already timed out for view") + } + + // Test happy path + { + vote := createDummyVoteMessage(3) + err := fc.ProcessValidatorVote(vote) + require.NoError(t, err) + } + + // Stop the event loop + fc.Stop() +} + func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) tenSecondsInNanoSecs := time.Duration(10000000000) diff --git a/consensus/types.go b/consensus/types.go index e6c41b942..682d6a70d 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -60,6 +60,9 @@ type VoteMessage interface { GetBlockHash() BlockHash GetPublicKey() *bls.PublicKey + + // The validator's BLS signature of the (View, BlockHash) pair. This represents the validator's + // vote for this block. The block height is implicitly captured in the block hash. GetSignature() *bls.Signature } @@ -89,13 +92,14 @@ type Block interface { // signals. // // TODO: is a size of 100 enough? If we want to bullet-proof this, we could back it by a slice as a -// secondary buffer. +// secondary buffer. That seems unnecessary since every channel will only have signals pushed by a single +// producer thread. const signalChannelBufferSize = 100 // An instance of FastHotStuffEventLoop is a self-contained module that represents a single node running // the event loop for the Fast HotStuff consensus protocol. The module is initialized at the current chain's // tip, with a given block hash, block height, view number, and validator set. The module is simplified and -// does not know whether its role is that of a block proposer or a validator. +// does not know whether its role is that of a block proposer or a replica validator. // // Given a block that's at the tip of the current chain, this module maintains its own internal data structures // and runs internal timers that handles all of the following: @@ -139,9 +143,12 @@ type FastHotStuffEventLoop struct { // next block height. validatorsAtChainTip []Validator - // votesSeen is an in-memory map of all the votes we've seen so far, organized by the block hash - // that was voted on and the BLS public key string of the sender. We use a nested map because we - // want to be able to fetch all votes by block hash. + // votesSeen is an in-memory map of all the votes we've seen so far. It's a nested map with the + // following nested key structure: + // + // sha256(vote.View, vote.BlockHash) - > string(vote.PublicKey) -> VoteMessage + // + // We use a nested map as above because we want to be able to efficiently fetch all votes by block hash. votesSeen map[[32]byte]map[string]VoteMessage // timeoutsSeen is an in-memory map of all the timeout messages we've seen so far, organized by diff --git a/consensus/utils.go b/consensus/utils.go index 3614a26e8..9e915fbc5 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -1,17 +1,35 @@ package consensus import ( + "encoding/binary" "reflect" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" + "golang.org/x/crypto/sha3" ) +// When voting on a block, validators sign the payload sha3-256(View, BlockHash) with their BLS +// private key. This hash guarantees that the view and block hash fields in a VoteMessage +// have not been tampered with, while maintaining all existing guarantees that the validator +// has voted for a given block. +// +// Reference Implementation: +// https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L294 +func GetVoteSignaturePayload(view uint64, blockHash BlockHash) [32]byte { + viewBytes := make([]byte, 8) + binary.BigEndian.PutUint64(viewBytes, view) + blockHashBytes := blockHash.GetValue() + + return sha3.Sum256(append(viewBytes, blockHashBytes[:]...)) +} + // This function checks if the block is properly formed. These are all surface level checks that // ensure that critical fields in the block are not nil so that the code in this package does not // panic. func isProperlyFormedBlock(block Block) bool { // The block must be non-nil - if block == nil { + if isInterfaceNil(block) { return false } @@ -50,6 +68,25 @@ func isProperlyFormedValidatorSet(validators []Validator) bool { }) } +func isProperlyFormedVote(vote VoteMessage) bool { + // The vote must be non-nil + if vote == nil { + return false + } + + // The view must be non-zero and and block hash non-nil + if vote.GetView() == 0 || isInterfaceNil(vote.GetBlockHash()) { + return false + } + + // The signature and public key must be non-nil + if vote.GetSignature() == nil || vote.GetPublicKey() == nil { + return false + } + + return true +} + // golang interface types are stored as a tuple of (type, value). A single i==nil check is not enough to // determine if a pointer that implements an interface is nil. This function checks if the interface is nil // by checking if the pointer itself is nil. @@ -61,3 +98,8 @@ func isInterfaceNil(i interface{}) bool { value := reflect.ValueOf(i) return value.Kind() == reflect.Ptr && value.IsNil() } + +func isValidSignature(publicKey *bls.PublicKey, signature *bls.Signature, payload []byte) bool { + isValid, err := bls.VerifyAggregateSignatureSinglePayload([]*bls.PublicKey{publicKey}, signature, payload) + return err == nil && isValid +} diff --git a/consensus/utils_test.go b/consensus/utils_test.go index d324dd5a4..c94994654 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -84,6 +84,46 @@ func TestIsProperlyFormedValidatorSet(t *testing.T) { } } +func TestIsProperlyFormedVote(t *testing.T) { + // Test nil value + { + require.False(t, isProperlyFormedVote(nil)) + } + + // Test zero-value view + { + vote := createDummyVoteMessage(0) + require.False(t, isProperlyFormedVote(vote)) + } + + // Test nil block hash + { + vote := createDummyVoteMessage(1) + vote.blockHash = nil + require.False(t, isProperlyFormedVote(vote)) + } + + // Test nil public key + { + vote := createDummyVoteMessage(1) + vote.publicKey = nil + require.False(t, isProperlyFormedVote(vote)) + } + + // Test nil signature + { + vote := createDummyVoteMessage(1) + vote.signature = nil + require.False(t, isProperlyFormedVote(vote)) + } + + // Test happy path + { + vote := createDummyVoteMessage(1) + require.True(t, isProperlyFormedVote(vote)) + } +} + func createDummyValidatorSet() []Validator { validators := []*validator{ { @@ -111,11 +151,17 @@ func createDummyBlock() *block { } func createDummyVoteMessage(view uint64) *voteMessage { + blockHash := createDummyBlockHash() + signaturePayload := GetVoteSignaturePayload(view, blockHash) + + blsPrivateKey, _ := bls.NewPrivateKey() + blsSignature, _ := blsPrivateKey.Sign(signaturePayload[:]) + return &voteMessage{ - blockHash: createDummyBlockHash(), + blockHash: blockHash, view: view, - publicKey: createDummyBLSPublicKey(), - signature: createDummyBLSSignature(), + publicKey: blsPrivateKey.PublicKey(), + signature: blsSignature, } } From 52b89625e38a68b57578d791fd2d10cd4e8b9e98 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 11 Sep 2023 19:06:24 -0400 Subject: [PATCH 195/762] Fast HotStuff Timeout Msg Storage (#650) * Fast HotStuff Timeout Msg Storage * Address Piotr's comments * Store the timeout message --- consensus/event_loop.go | 90 +++++++++++++++++++++++++++++++++--- consensus/event_loop_test.go | 81 ++++++++++++++++++++++++++++++++ consensus/types.go | 1 + consensus/utils.go | 63 ++++++++++++++++++++----- consensus/utils_test.go | 53 +++++++++++++++++++-- 5 files changed, 267 insertions(+), 21 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 248270bfd..6327f7c79 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -194,7 +194,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { } // Check if the vote is stale - if isStaleVote(fc.currentView, vote) { + if isStaleView(fc.currentView, vote.GetView()) { return errors.Errorf("FastHotStuffEventLoop.ProcessValidatorVote: Vote has a stale view %d", vote.GetView()) } @@ -237,8 +237,74 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { return nil } -func (pc *FastHotStuffEventLoop) ProcessTimeoutMsg( /* TODO */ ) { - // TODO +// ProcessValidatorTimeout captures an incoming timeout message from a validator. This module has no knowledge +// of who the leader is for a given view, so it is up to the caller to decide whether to process the timeout +// message or not. If a timeout message is passed here, then the consensus instance will store it until +// it can construct a QC with it or until the timeout's view has gone stale. +// +// This function does not directly check if the timeout results in a stake weighted super majority to build +// a timeout QC. Instead, it stores the timeout locally and waits for the crank timer to determine +// when to run the super majority timeout check, and to signal the caller that we can construct a timeout QC. +// +// Reference implementation: +// https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L958 +func (fc *FastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) error { + // Grab the consensus instance's lock + fc.lock.Lock() + defer fc.lock.Unlock() + + // Ensure the consensus instance is running. This guarantees that the chain tip and validator set + // have already been set. + if fc.status != consensusStatusRunning { + return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Consensus instance is not running") + } + + // Do a basic integrity check on the timeout message + if !isProperlyFormedTimeout(timeout) { + return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Malformed timeout message") + } + + // Check if the timeout is stale + if isStaleView(fc.currentView, timeout.GetView()) { + return errors.Errorf("FastHotStuffEventLoop.ProcessValidatorTimeout: Timeout has a stale view %d", timeout.GetView()) + } + + // Check if the public key has already voted for this view. The protocol does not allow + // a validator to time out for a view it has already voted on. + if fc.hasVotedForView(timeout.GetPublicKey(), timeout.GetView()) { + return errors.Errorf( + "FastHotStuffEventLoop.ProcessValidatorTimeout: validator %s has already voted for view %d", + timeout.GetPublicKey().ToString(), + timeout.GetView(), + ) + } + + // Check if the public key has already timed out for this view. The protocol does not allow + // for a validator to time out more than once for the same view. + if fc.hasTimedOutForView(timeout.GetPublicKey(), timeout.GetView()) { + return errors.Errorf( + "FastHotStuffEventLoop.ProcessValidatorTimeout: validator %s has already timed out for view %d", + timeout.GetPublicKey().ToString(), + timeout.GetView(), + ) + } + + // Compute the value sha3-256(timeout.View, timeout.HighQC.View) + timeoutSignaturePayload := GetTimeoutSignaturePayload(timeout.GetView(), timeout.GetHighQC().GetView()) + + // Verify the vote signature + if !isValidSignature(timeout.GetPublicKey(), timeout.GetSignature(), timeoutSignaturePayload[:]) { + return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Invalid signature") + } + + // Note: we do not check if the timeout is for the current view. Nodes in the network are expected to have + // slightly different timings and may be at different views. To make this code resilient to timing + // differences between nodes, we simply store the timeout as long as it's properly formed and not stale. + // Stored timeouts will be evicted once we advance beyond them. + + fc.storeTimeout(timeout) + + return nil } func (fc *FastHotStuffEventLoop) ConstructVoteQC( /* TODO */ ) { @@ -371,7 +437,7 @@ func (fc *FastHotStuffEventLoop) evictStaleVotesAndTimeouts() { // Evict stale vote messages for blockHash, voters := range fc.votesSeen { for _, vote := range voters { - if isStaleVote(fc.currentView, vote) { + if isStaleView(fc.currentView, vote.GetView()) { // Each block is proposed at a known view, and has an immutable block hash. Votes are signed on the // tuple (blockhash, view). So, if any vote message for the blockhash has a view that satisfies this // condition, then it's guaranteed that all votes for the same block hash have satisfy this condition. @@ -384,7 +450,7 @@ func (fc *FastHotStuffEventLoop) evictStaleVotesAndTimeouts() { // Evict stale timeout messages for view := range fc.timeoutsSeen { - if fc.currentView > view+1 { + if isStaleView(fc.currentView, view) { delete(fc.timeoutsSeen, view) } } @@ -422,6 +488,16 @@ func (fc *FastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view return false } +func (fc *FastHotStuffEventLoop) storeTimeout(timeout TimeoutMessage) { + timeoutsForView, ok := fc.timeoutsSeen[timeout.GetView()] + if !ok { + timeoutsForView = make(map[string]TimeoutMessage) + fc.timeoutsSeen[timeout.GetView()] = timeoutsForView + } + + timeoutsForView[timeout.GetPublicKey().ToString()] = timeout +} + func (fc *FastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, view uint64) bool { timeoutsForView, ok := fc.timeoutsSeen[view] if !ok { @@ -434,6 +510,6 @@ func (fc *FastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, vi return ok } -func isStaleVote(currentView uint64, vote VoteMessage) bool { - return currentView > vote.GetView()+1 +func isStaleView(currentView uint64, testView uint64) bool { + return currentView > testView+1 } diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 8380b6d79..b8c029b45 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -337,6 +337,87 @@ func TestProcessValidatorVote(t *testing.T) { fc.Stop() } +func TestProcessValidatorTimeout(t *testing.T) { + oneHourInNanoSecs := time.Duration(3600000000000) + + fc := NewFastHotStuffEventLoop() + + // BlockHeight = 1, Current View = 2 + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + require.NoError(t, err) + + // Start the event loop + fc.Start() + + // Current View = 3 + { + currentView, err := fc.AdvanceView() + require.NoError(t, err) + require.Equal(t, uint64(3), currentView) + } + + // Test with malformed timeout + { + err := fc.ProcessValidatorTimeout(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "Malformed timeout message") + } + + // Test invalid signature + { + timeout := createDummyTimeoutMessage(3) + timeout.signature = createDummyBLSSignature() + err := fc.ProcessValidatorTimeout(timeout) + require.Error(t, err) + require.Contains(t, err.Error(), "Invalid signature") + } + + // Test with stale view + { + timeout := createDummyTimeoutMessage(1) + err := fc.ProcessValidatorTimeout(timeout) + require.Error(t, err) + require.Contains(t, err.Error(), "Timeout has a stale view") + } + + // Test when we've already seen a vote from the validator for the same view + { + timeout := createDummyTimeoutMessage(3) + vote := createDummyVoteMessage(3) + + fc.votesSeen[GetVoteSignaturePayload(vote.GetView(), vote.GetBlockHash())] = map[string]VoteMessage{ + timeout.publicKey.ToString(): vote, + } + + err = fc.ProcessValidatorTimeout(timeout) + require.Error(t, err) + require.Contains(t, err.Error(), "has already voted for view") + } + + // Test when we've already seen a timeout from the validator for the same view + { + timeout := createDummyTimeoutMessage(3) + + fc.timeoutsSeen[timeout.view] = map[string]TimeoutMessage{ + timeout.publicKey.ToString(): timeout, + } + + err = fc.ProcessValidatorTimeout(timeout) + require.Error(t, err) + require.Contains(t, err.Error(), "has already timed out for view") + } + + // Test happy path + { + timeout := createDummyTimeoutMessage(3) + err := fc.ProcessValidatorTimeout(timeout) + require.NoError(t, err) + } + + // Stop the event loop + fc.Stop() +} + func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) tenSecondsInNanoSecs := time.Duration(10000000000) diff --git a/consensus/types.go b/consensus/types.go index 682d6a70d..59ab76aca 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -71,6 +71,7 @@ type TimeoutMessage interface { GetHighQC() QuorumCertificate GetPublicKey() *bls.PublicKey + // The validator's BLS signature of the (View, HighQC.View) pair. GetSignature() *bls.Signature } diff --git a/consensus/utils.go b/consensus/utils.go index 9e915fbc5..0bcfd0465 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -24,6 +24,19 @@ func GetVoteSignaturePayload(view uint64, blockHash BlockHash) [32]byte { return sha3.Sum256(append(viewBytes, blockHashBytes[:]...)) } +// When timing out for a view, validators sign the payload sha3-256(View, HighQCView) with their BLS +// private key. This hash guarantees that the view and high QC view fields in a TimeoutMessage +// have not been tampered with. +func GetTimeoutSignaturePayload(view uint64, highQCView uint64) [32]byte { + viewBytes := make([]byte, 8) + binary.BigEndian.PutUint64(viewBytes, view) + + highQCViewBytes := make([]byte, 8) + binary.BigEndian.PutUint64(highQCViewBytes, highQCView) + + return sha3.Sum256(append(viewBytes, highQCViewBytes...)) +} + // This function checks if the block is properly formed. These are all surface level checks that // ensure that critical fields in the block are not nil so that the code in this package does not // panic. @@ -39,17 +52,7 @@ func isProperlyFormedBlock(block Block) bool { } // The block hash and QC must be non-nil - if isInterfaceNil(block.GetBlockHash()) || isInterfaceNil(block.GetQC()) { - return false - } - - qc := block.GetQC() - - // The QC fields must be non-nil and the view non-zero - if isInterfaceNil(qc.GetAggregatedSignature()) || - isInterfaceNil(qc.GetBlockHash()) || - qc.GetSignersList() == nil || - qc.GetView() == 0 { + if isInterfaceNil(block.GetBlockHash()) || !isProperlyFormedQC(block.GetQC()) { return false } @@ -87,6 +90,44 @@ func isProperlyFormedVote(vote VoteMessage) bool { return true } +func isProperlyFormedTimeout(timeout TimeoutMessage) bool { + // The timeout must be non-nil + if isInterfaceNil(timeout) { + return false + } + + // The view must be non-zero and the high QC non-nil + if timeout.GetView() == 0 || isInterfaceNil(timeout.GetHighQC()) { + return false + } + + // The signature and public key must be non-nil + if timeout.GetSignature() == nil || timeout.GetPublicKey() == nil { + return false + } + + return true +} + +func isProperlyFormedQC(qc QuorumCertificate) bool { + // The QC must be non-nil + if isInterfaceNil(qc) { + return false + } + + // The view must be non-zero and the aggregated signature non-nil + if qc.GetView() == 0 || isInterfaceNil(qc.GetAggregatedSignature()) { + return false + } + + // The signers list must be non-nil + if qc.GetSignersList() == nil { + return false + } + + return true +} + // golang interface types are stored as a tuple of (type, value). A single i==nil check is not enough to // determine if a pointer that implements an interface is nil. This function checks if the interface is nil // by checking if the pointer itself is nil. diff --git a/consensus/utils_test.go b/consensus/utils_test.go index c94994654..207f996b5 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -124,6 +124,46 @@ func TestIsProperlyFormedVote(t *testing.T) { } } +func TestIsProperlyFormedTimeout(t *testing.T) { + // Test nil value + { + require.False(t, isProperlyFormedTimeout(nil)) + } + + // Test zero-value view + { + timeout := createDummyTimeoutMessage(0) + require.False(t, isProperlyFormedTimeout(timeout)) + } + + // Test nil high QC + { + timeout := createDummyTimeoutMessage(1) + timeout.highQC = nil + require.False(t, isProperlyFormedTimeout(timeout)) + } + + // Test nil public key + { + timeout := createDummyTimeoutMessage(1) + timeout.publicKey = nil + require.False(t, isProperlyFormedTimeout(timeout)) + } + + // Test nil signature + { + timeout := createDummyTimeoutMessage(1) + timeout.signature = nil + require.False(t, isProperlyFormedTimeout(timeout)) + } + + // Test happy path + { + timeout := createDummyTimeoutMessage(1) + require.True(t, isProperlyFormedTimeout(timeout)) + } +} + func createDummyValidatorSet() []Validator { validators := []*validator{ { @@ -166,11 +206,18 @@ func createDummyVoteMessage(view uint64) *voteMessage { } func createDummyTimeoutMessage(view uint64) *timeoutMessage { + highQC := createDummyQC() + + signaturePayload := GetTimeoutSignaturePayload(view, highQC.view) + + blsPrivateKey, _ := bls.NewPrivateKey() + blsSignature, _ := blsPrivateKey.Sign(signaturePayload[:]) + return &timeoutMessage{ - highQC: createDummyQC(), + highQC: highQC, view: view, - publicKey: createDummyBLSPublicKey(), - signature: createDummyBLSSignature(), + publicKey: blsPrivateKey.PublicKey(), + signature: blsSignature, } } From bc29cf041b577814d22d167c64d3e348aa0b521f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 11 Sep 2023 19:09:39 -0400 Subject: [PATCH 196/762] Fast HotStuff Timeout Exponential Backoff (#654) * Fast HotStuff Timeout Exponential Backoff * Fix typo --- consensus/event_loop.go | 43 +++++++++++++++++++--- consensus/event_loop_test.go | 71 ++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+), 5 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 6327f7c79..8312d9262 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -106,8 +106,8 @@ func (fc *FastHotStuffEventLoop) AdvanceView() (uint64, error) { // Evict all stale votes and timeouts fc.evictStaleVotesAndTimeouts() - // Signal the event loop to reset the internal timers - fc.resetEventLoopSignal <- struct{}{} + // Recompute the event loop's next ETAs + fc.resetEventLoopTimers() return fc.currentView, nil } @@ -160,8 +160,8 @@ func (fc *FastHotStuffEventLoop) ProcessSafeBlock(block Block, validators []Vali View: fc.chainTip.GetView(), } - // Signal the event loop to reset the internal timers - fc.resetEventLoopSignal <- struct{}{} + // Recompute the event loop's next ETAs + fc.resetEventLoopTimers() return nil } @@ -389,7 +389,8 @@ func (fc *FastHotStuffEventLoop) runEventLoop() { } case <-fc.resetEventLoopSignal: { - // TODO + // Do nothing. We use this signal purely to refresh the timers above + // and rerun another iteration of the event loop. } case <-fc.stopSignal: { @@ -415,6 +416,38 @@ func (fc *FastHotStuffEventLoop) IsRunning() bool { return fc.status == consensusStatusRunning } +// resetEventLoopTimers recomputes the nextBlockConstructionTimeStamp and nextTimeoutTimeStamp +// values and signals the event loop to rerun. +func (fc *FastHotStuffEventLoop) resetEventLoopTimers() { + // Compute the next block construction ETA + fc.nextBlockConstructionTimeStamp = time.Now().Add(fc.blockConstructionCadence) + + // Compute the next timeout ETA. We use exponential back-off for timeouts when there are + // multiple consecutive timeouts. We use the difference between the current view and the + // chain tip's view to determine this. The current view can only drift from the chain tip's + // view as a result of timeouts. This guarantees that the number of consecutive timeouts is + // always: max(currentView - chainTip.GetView() - 1, 0). + + timeoutDuration := fc.timeoutBaseDuration + + // Check if we have timed out at all for the last n view. If so, we apply exponential + // back-off to the timeout base duration. + if fc.chainTip.GetView() < fc.currentView-1 { + // Note, there is no risk of underflow here because the following is guaranteed: + // currentView > chainTip.GetView() + 1. + numTimeouts := fc.currentView - fc.chainTip.GetView() - 1 + + // Compute the exponential back-off: nextTimeoutDuration * 2^numTimeouts + timeoutDuration = fc.timeoutBaseDuration << numTimeouts + } + + // Compute the next timeout ETA + fc.nextTimeoutTimeStamp = time.Now().Add(timeoutDuration) + + // Signal the event loop to rerun + fc.resetEventLoopSignal <- struct{}{} +} + // Evict all locally stored votes and timeout messages with stale views. We can safely use the current // view to determine what is stale. The consensus mechanism will never construct a block with a view // that's lower than its current view. Consider the following: diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index b8c029b45..dda639616 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -418,6 +418,77 @@ func TestProcessValidatorTimeout(t *testing.T) { fc.Stop() } +func TestResetEventLoopSignal(t *testing.T) { + oneHourInNanoSecs := time.Duration(3600000000000) + tenSecondsInNanoSecs := time.Duration(10000000000) + + fc := NewFastHotStuffEventLoop() + err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + require.NoError(t, err) + + // Start the event loop + fc.Start() + + // Confirm the ETAs for the block construction and timeout timers + require.Greater(t, fc.nextBlockConstructionTimeStamp, time.Now().Add( + oneHourInNanoSecs-tenSecondsInNanoSecs, // 1 hour away + )) + require.Greater(t, fc.nextTimeoutTimeStamp, time.Now().Add( + 2*oneHourInNanoSecs-tenSecondsInNanoSecs, // 2 hours away + )) + require.Less(t, fc.nextTimeoutTimeStamp, time.Now().Add( + 2*oneHourInNanoSecs+tenSecondsInNanoSecs, // 2 hours away + )) + + // Advance the view to simulate a timeout + _, err = fc.AdvanceView() + require.NoError(t, err) + + // Confirm the ETAs for the block construction and timeout timers + require.Greater(t, fc.nextBlockConstructionTimeStamp, time.Now().Add( + oneHourInNanoSecs-tenSecondsInNanoSecs, // 1 hour away + )) + require.Greater(t, fc.nextTimeoutTimeStamp, time.Now().Add( + 4*oneHourInNanoSecs-tenSecondsInNanoSecs, // 2 hours * 2 = 4 hours away + )) + require.Less(t, fc.nextTimeoutTimeStamp, time.Now().Add( + 4*oneHourInNanoSecs+tenSecondsInNanoSecs, // 2 hours * 2 = 4 hours away + )) + + // Advance the view to simulate a 2nd timeout + _, err = fc.AdvanceView() + require.NoError(t, err) + + // Confirm the ETAs for the block construction and timeout timers + require.Greater(t, fc.nextBlockConstructionTimeStamp, time.Now().Add( + oneHourInNanoSecs-tenSecondsInNanoSecs, // 1 hour away + )) + require.Greater(t, fc.nextTimeoutTimeStamp, time.Now().Add( + 8*oneHourInNanoSecs-tenSecondsInNanoSecs, // 2 hours * 2^2 = 8 hours away + )) + require.Less(t, fc.nextTimeoutTimeStamp, time.Now().Add( + 8*oneHourInNanoSecs+tenSecondsInNanoSecs, // 2 hours * 2 = 8 hours away + )) + + // Advance the view to simulate a 3nd timeout + _, err = fc.AdvanceView() + require.NoError(t, err) + + // Confirm the ETAs for the block construction and timeout timers + require.Greater(t, fc.nextBlockConstructionTimeStamp, time.Now().Add( + oneHourInNanoSecs-tenSecondsInNanoSecs, // 1 hour away + )) + require.Greater(t, fc.nextTimeoutTimeStamp, time.Now().Add( + 16*oneHourInNanoSecs-tenSecondsInNanoSecs, // 2 hours * 2^3 = 16 hours away + )) + require.Less(t, fc.nextTimeoutTimeStamp, time.Now().Add( + 16*oneHourInNanoSecs+tenSecondsInNanoSecs, // 2 hours * 2^3 = 16 hours away + )) + + // Stop the event loop + fc.Stop() +} + func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) tenSecondsInNanoSecs := time.Duration(10000000000) From 57cf5b7b5442750829b501fded00e801a05fba0b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 12 Sep 2023 08:51:01 -0400 Subject: [PATCH 197/762] Implement ScheduledTask Pattern for FastHotStuffEventLoop Timers (#656) * Implement scheduled task pattern for FastHotStuffEventLoop timers * Add GetDuration function * Clean up comments * Address Nina's comments --- consensus/scheduled_task.go | 62 ++++++++++++++++++++++++++++++++ consensus/scheduled_task_test.go | 47 ++++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 consensus/scheduled_task.go create mode 100644 consensus/scheduled_task_test.go diff --git a/consensus/scheduled_task.go b/consensus/scheduled_task.go new file mode 100644 index 000000000..eab855634 --- /dev/null +++ b/consensus/scheduled_task.go @@ -0,0 +1,62 @@ +package consensus + +import ( + "sync" + "time" +) + +// ScheduledTask is a thread-safe wrapper around time.Timer that allows for creating tasks that +// can be scheduled to execute at a later time with pre-specified params. Both the params and the +// task are fully defined at the time of scheduling. Once a task has been scheduled, it cannot +// be modified. However, tasks can be cancelled and rescheduled. +// +// This pattern is useful for spawning off tasks that we want to run after some specified amount +// of time, but still want to have the ability to cancel. +type ScheduledTask[TaskParam any] struct { + lock sync.RWMutex + timer *time.Timer + duration time.Duration +} + +func NewScheduledTask[TaskParam any]() *ScheduledTask[TaskParam] { + return &ScheduledTask[TaskParam]{ + lock: sync.RWMutex{}, + timer: nil, + } +} + +// Schedule a new task to be executed after the countdown duration. If there is an existing scheduled +// task, it will be cancelled and replaced with the new task. +func (t *ScheduledTask[TaskParam]) Schedule(duration time.Duration, param TaskParam, task func(TaskParam)) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.timer != nil { + t.timer.Stop() + } + + // Update the duration struct field so it's available to external callers. This struct + // field has no other purpose. + t.duration = duration + + // Replacing the timer results in it being garbage collected, so this is entirely safe. + t.timer = time.AfterFunc(duration, func() { + task(param) + }) +} + +func (t *ScheduledTask[TaskParam]) Cancel() { + t.lock.Lock() + defer t.lock.Unlock() + + if t.timer != nil { + t.timer.Stop() + } +} + +func (t *ScheduledTask[TaskParam]) GetDuration() time.Duration { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.duration +} diff --git a/consensus/scheduled_task_test.go b/consensus/scheduled_task_test.go new file mode 100644 index 000000000..3639357f5 --- /dev/null +++ b/consensus/scheduled_task_test.go @@ -0,0 +1,47 @@ +package consensus + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestScheduledTask(t *testing.T) { + + // Test short scheduled task + { + task := NewScheduledTask[uint64]() + + executedTaskParam := uint64(0) + task.Schedule(time.Microsecond, 100, func(param uint64) { + executedTaskParam = param + }) + time.Sleep(time.Second / 2) + + // The task should have executed so this value will now be 100. + require.Equal(t, uint64(100), executedTaskParam) + + // Confirm the last duration of the task was 1 microsecond. + require.Equal(t, time.Microsecond, task.GetDuration()) + } + + // Test long scheduled task that's not expected to execute + { + task := NewScheduledTask[uint64]() + + executedTaskParam := uint64(0) + task.Schedule(time.Hour, 100, func(param uint64) { + executedTaskParam = param + }) + + // The task should not have executed so this value will remain 0. + require.Equal(t, uint64(0), executedTaskParam) + + // Confirm the last duration of the task was 1 hour. + require.Equal(t, time.Hour, task.GetDuration()) + + // Cancel the task. + task.Cancel() + } +} From f1155c86030c25c7d1095a1be5e1be20624be7e7 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 12 Sep 2023 08:54:48 -0400 Subject: [PATCH 198/762] Migrate Fast-HotStuff Event Loop to Use ScheduledTasks (#657) * Migrate event loop to ScheduledTask pattern * Address Nina's comments --- consensus/event_loop.go | 123 +++++++++++------------------------ consensus/event_loop_test.go | 54 +++------------ consensus/types.go | 16 ++--- 3 files changed, 52 insertions(+), 141 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 8312d9262..fb294f50b 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -1,7 +1,6 @@ package consensus import ( - "sync" "time" "github.com/pkg/errors" @@ -11,9 +10,9 @@ import ( func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { return &FastHotStuffEventLoop{ - status: consensusStatusNotInitialized, - startGroup: sync.WaitGroup{}, - stopGroup: sync.WaitGroup{}, + status: consensusStatusNotInitialized, + nextBlockConstructionTask: NewScheduledTask[uint64](), + nextTimeoutTask: NewScheduledTask[uint64](), } } @@ -44,7 +43,7 @@ func (fc *FastHotStuffEventLoop) Init( return errors.New("FastHotStuffEventLoop.Init: Consensus instance is already running") } - // Validate the timer durations + // Validate the scheduled task durations if blockConstructionCadence <= 0 { return errors.New("FastHotStuffEventLoop.Init: Block construction duration must be > 0") } @@ -71,9 +70,7 @@ func (fc *FastHotStuffEventLoop) Init( fc.votesSeen = make(map[[32]byte]map[string]VoteMessage) fc.timeoutsSeen = make(map[uint64]map[string]TimeoutMessage) - // Reset all internal and external channels used for signaling - fc.resetEventLoopSignal = make(chan interface{}, signalChannelBufferSize) - fc.stopSignal = make(chan interface{}, signalChannelBufferSize) + // Reset the external channel used for signaling fc.ConsensusEvents = make(chan *ConsensusEvent, signalChannelBufferSize) // Set the block construction and timeout base durations @@ -87,8 +84,8 @@ func (fc *FastHotStuffEventLoop) Init( } // AdvanceView is called when the chain tip has not changed but the consensus instance has signaled a -// timeout, and can advance to the next view. This function resets the timeout timer and crank timer -// for the next view. +// timeout, and can advance to the next view. This function resets the timeout scheduled task and block +// production scheduled task for the next view. func (fc *FastHotStuffEventLoop) AdvanceView() (uint64, error) { // Grab the consensus instance's lock fc.lock.Lock() @@ -106,15 +103,15 @@ func (fc *FastHotStuffEventLoop) AdvanceView() (uint64, error) { // Evict all stale votes and timeouts fc.evictStaleVotesAndTimeouts() - // Recompute the event loop's next ETAs - fc.resetEventLoopTimers() + // Schedule the next block construction and timeout scheduled tasks + fc.resetScheduledTasks() return fc.currentView, nil } // ProcessSafeBlock must only be called when the caller has accepted a new block, connected it // to the tip of the blockchain, and determined that the block is safe to vote on. Given such a -// block, this function resets the internal timers and state of the Fast HotStuff consensus that +// block, this function resets internal state and schedules the next block construction and timeout // determine the next action. The functions expects the following for the input params: // - block: the input block that was safely added to the blockchain and is safe to vote on // - validators: the validator set for the next block height @@ -141,7 +138,7 @@ func (fc *FastHotStuffEventLoop) ProcessSafeBlock(block Block, validators []Vali // Update the chain tip and validator set fc.chainTip = block - // We track the current view here so we know which view to start the timeout timer for. + // We track the current view here so we know which view to time out on later on. fc.currentView = block.GetView() + 1 // Update the validator set so we know when we have a QC from votes at the next block height @@ -160,8 +157,8 @@ func (fc *FastHotStuffEventLoop) ProcessSafeBlock(block Block, validators []Vali View: fc.chainTip.GetView(), } - // Recompute the event loop's next ETAs - fc.resetEventLoopTimers() + // Schedule the next block construction and timeout scheduled tasks + fc.resetScheduledTasks() return nil } @@ -243,7 +240,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { // it can construct a QC with it or until the timeout's view has gone stale. // // This function does not directly check if the timeout results in a stake weighted super majority to build -// a timeout QC. Instead, it stores the timeout locally and waits for the crank timer to determine +// a timeout QC. Instead, it stores the timeout locally and waits for the block production scheduled task to determine // when to run the super majority timeout check, and to signal the caller that we can construct a timeout QC. // // Reference implementation: @@ -315,8 +312,7 @@ func (fc *FastHotStuffEventLoop) ConstructTimeoutQC( /* TODO */ ) { // TODO } -// Sets the initial times for the block construction and timeout timers and starts -// the event loop building off of the current chain tip. +// Sets the initial times for the block construction and timeouts and starts scheduled tasks. func (fc *FastHotStuffEventLoop) Start() { fc.lock.Lock() defer fc.lock.Unlock() @@ -327,19 +323,11 @@ func (fc *FastHotStuffEventLoop) Start() { return } - // Set the initial times for the block construction and timeout timers - fc.nextBlockConstructionTimeStamp = time.Now().Add(fc.blockConstructionCadence) - fc.nextTimeoutTimeStamp = time.Now().Add(fc.timeoutBaseDuration) - - // Kick off the event loop in a separate goroutine - fc.startGroup.Add(1) - go fc.runEventLoop() - - // Wait for the event loop to start - fc.startGroup.Wait() - // Update the consensus status to mark it as running. fc.status = consensusStatusRunning + + // Set the initial block construction and timeout scheduled tasks + fc.resetScheduledTasks() } func (fc *FastHotStuffEventLoop) Stop() { @@ -352,54 +340,12 @@ func (fc *FastHotStuffEventLoop) Stop() { return } - // Signal the event loop to stop - fc.stopGroup.Add(1) - fc.stopSignal <- struct{}{} - - // Wait for the event loop to stop - fc.stopGroup.Wait() + // Cancel the next block construction and timeout scheduled tasks, if any. + fc.nextBlockConstructionTask.Cancel() + fc.nextTimeoutTask.Cancel() - // Update the consensus status + // Update the consensus status so it is no longer marked as running. fc.status = consensusStatusInitialized - - // Close all internal channels used for signaling - close(fc.resetEventLoopSignal) - close(fc.stopSignal) -} - -// Runs the internal event loop that waits for all internal or external signals. If the -// event loop is running, the consensus instance status must be set to consensusStatusRunning. -// Note, this function does not directly update the consensus status. To simplify the inner -// implementation of the loop, the caller who starts and stops should always be responsible -// for updating the status as it starts and stop the loop. -func (fc *FastHotStuffEventLoop) runEventLoop() { - // Signal that the event loop has started - fc.startGroup.Done() - - // Start the event loop - for { - select { - case <-time.After(time.Until(fc.nextBlockConstructionTimeStamp)): - { - // TODO - } - case <-time.After(time.Until(fc.nextTimeoutTimeStamp)): - { - // TODO - } - case <-fc.resetEventLoopSignal: - { - // Do nothing. We use this signal purely to refresh the timers above - // and rerun another iteration of the event loop. - } - case <-fc.stopSignal: - { - // Signal that the event loop has stopped - fc.stopGroup.Done() - return - } - } - } } func (fc *FastHotStuffEventLoop) IsInitialized() bool { @@ -416,12 +362,9 @@ func (fc *FastHotStuffEventLoop) IsRunning() bool { return fc.status == consensusStatusRunning } -// resetEventLoopTimers recomputes the nextBlockConstructionTimeStamp and nextTimeoutTimeStamp -// values and signals the event loop to rerun. -func (fc *FastHotStuffEventLoop) resetEventLoopTimers() { - // Compute the next block construction ETA - fc.nextBlockConstructionTimeStamp = time.Now().Add(fc.blockConstructionCadence) - +// resetScheduledTasks recomputes the nextBlockConstructionTimeStamp and nextTimeoutTimeStamp +// values, and reschedules the next block construction and timeout tasks. +func (fc *FastHotStuffEventLoop) resetScheduledTasks() { // Compute the next timeout ETA. We use exponential back-off for timeouts when there are // multiple consecutive timeouts. We use the difference between the current view and the // chain tip's view to determine this. The current view can only drift from the chain tip's @@ -441,11 +384,19 @@ func (fc *FastHotStuffEventLoop) resetEventLoopTimers() { timeoutDuration = fc.timeoutBaseDuration << numTimeouts } - // Compute the next timeout ETA - fc.nextTimeoutTimeStamp = time.Now().Add(timeoutDuration) + // Schedule the next block construction task. This will run with currentView param. + fc.nextBlockConstructionTask.Schedule(fc.blockConstructionCadence, fc.currentView, fc.onBlockConstructionScheduledTask) - // Signal the event loop to rerun - fc.resetEventLoopSignal <- struct{}{} + // Schedule the next timeout task. This will run with currentView param. + fc.nextTimeoutTask.Schedule(timeoutDuration, fc.currentView, fc.onTimeoutScheduledTask) +} + +func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTask(blockConstructionView uint64) { + // TODO +} + +func (fc *FastHotStuffEventLoop) onTimeoutScheduledTask(timedOutView uint64) { + // TODO } // Evict all locally stored votes and timeout messages with stale views. We can safely use the current diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index dda639616..661620ac3 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -420,7 +420,6 @@ func TestProcessValidatorTimeout(t *testing.T) { func TestResetEventLoopSignal(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) - tenSecondsInNanoSecs := time.Duration(10000000000) fc := NewFastHotStuffEventLoop() err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) @@ -430,60 +429,32 @@ func TestResetEventLoopSignal(t *testing.T) { fc.Start() // Confirm the ETAs for the block construction and timeout timers - require.Greater(t, fc.nextBlockConstructionTimeStamp, time.Now().Add( - oneHourInNanoSecs-tenSecondsInNanoSecs, // 1 hour away - )) - require.Greater(t, fc.nextTimeoutTimeStamp, time.Now().Add( - 2*oneHourInNanoSecs-tenSecondsInNanoSecs, // 2 hours away - )) - require.Less(t, fc.nextTimeoutTimeStamp, time.Now().Add( - 2*oneHourInNanoSecs+tenSecondsInNanoSecs, // 2 hours away - )) + require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) // 1 hour away + require.Equal(t, fc.nextTimeoutTask.GetDuration(), 2*oneHourInNanoSecs) // 2 hours away // Advance the view to simulate a timeout _, err = fc.AdvanceView() require.NoError(t, err) // Confirm the ETAs for the block construction and timeout timers - require.Greater(t, fc.nextBlockConstructionTimeStamp, time.Now().Add( - oneHourInNanoSecs-tenSecondsInNanoSecs, // 1 hour away - )) - require.Greater(t, fc.nextTimeoutTimeStamp, time.Now().Add( - 4*oneHourInNanoSecs-tenSecondsInNanoSecs, // 2 hours * 2 = 4 hours away - )) - require.Less(t, fc.nextTimeoutTimeStamp, time.Now().Add( - 4*oneHourInNanoSecs+tenSecondsInNanoSecs, // 2 hours * 2 = 4 hours away - )) + require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) // 1 hour away + require.Equal(t, fc.nextTimeoutTask.GetDuration(), 4*oneHourInNanoSecs) // 2 hours * 2 = 4 hours away // Advance the view to simulate a 2nd timeout _, err = fc.AdvanceView() require.NoError(t, err) // Confirm the ETAs for the block construction and timeout timers - require.Greater(t, fc.nextBlockConstructionTimeStamp, time.Now().Add( - oneHourInNanoSecs-tenSecondsInNanoSecs, // 1 hour away - )) - require.Greater(t, fc.nextTimeoutTimeStamp, time.Now().Add( - 8*oneHourInNanoSecs-tenSecondsInNanoSecs, // 2 hours * 2^2 = 8 hours away - )) - require.Less(t, fc.nextTimeoutTimeStamp, time.Now().Add( - 8*oneHourInNanoSecs+tenSecondsInNanoSecs, // 2 hours * 2 = 8 hours away - )) + require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) // 1 hour away + require.Equal(t, fc.nextTimeoutTask.GetDuration(), 8*oneHourInNanoSecs) // 2 hours * 2^2 = 8 hours away // Advance the view to simulate a 3nd timeout _, err = fc.AdvanceView() require.NoError(t, err) // Confirm the ETAs for the block construction and timeout timers - require.Greater(t, fc.nextBlockConstructionTimeStamp, time.Now().Add( - oneHourInNanoSecs-tenSecondsInNanoSecs, // 1 hour away - )) - require.Greater(t, fc.nextTimeoutTimeStamp, time.Now().Add( - 16*oneHourInNanoSecs-tenSecondsInNanoSecs, // 2 hours * 2^3 = 16 hours away - )) - require.Less(t, fc.nextTimeoutTimeStamp, time.Now().Add( - 16*oneHourInNanoSecs+tenSecondsInNanoSecs, // 2 hours * 2^3 = 16 hours away - )) + require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) // 1 hour away + require.Equal(t, fc.nextTimeoutTask.GetDuration(), 16*oneHourInNanoSecs) // 2 hours * 2^3 = 16 hours away // Stop the event loop fc.Stop() @@ -491,7 +462,6 @@ func TestResetEventLoopSignal(t *testing.T) { func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) - tenSecondsInNanoSecs := time.Duration(10000000000) fc := NewFastHotStuffEventLoop() err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) @@ -504,12 +474,8 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { require.Equal(t, consensusStatusRunning, fc.status) // Confirm that the ETAs for the block construction and timeout timers have been set - require.Greater(t, fc.nextBlockConstructionTimeStamp, time.Now().Add( - oneHourInNanoSecs-tenSecondsInNanoSecs, // Subtract 10 second buffer so this test ins't flaky - )) - require.Greater(t, fc.nextTimeoutTimeStamp, time.Now().Add( - 2*oneHourInNanoSecs-tenSecondsInNanoSecs, // Subtract 10 second buffer so this test ins't flaky - )) + require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) + require.Equal(t, fc.nextTimeoutTask.GetDuration(), 2*oneHourInNanoSecs) // Stop the event loop fc.Stop() diff --git a/consensus/types.go b/consensus/types.go index 59ab76aca..3a4bd7031 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -103,7 +103,7 @@ const signalChannelBufferSize = 100 // does not know whether its role is that of a block proposer or a replica validator. // // Given a block that's at the tip of the current chain, this module maintains its own internal data structures -// and runs internal timers that handles all of the following: +// and runs an internal event loop that handles all of the following: // - Tracking of the current view, incrementing the view during timeouts, and computing exponential // back-off durations during consecutive timeouts // - Aggregation of votes and QC construction for the current block @@ -114,7 +114,7 @@ const signalChannelBufferSize = 100 // - Signaling its caller when it has a timeout QC for the current view // // When a new block is connected to the chain, the caller is expected to update the chain tip. The module -// resets all internal data structures and timers to handle all of the above based on the new chain tip. +// resets all internal data structures and scheduled tasks to handle all of the above based on the new chain tip. // // This module is very simple and only houses the logic that decides what action to perform next given the // current chain tip. The module does not track the history of blocks, and instead needs its caller to @@ -130,8 +130,8 @@ type FastHotStuffEventLoop struct { blockConstructionCadence time.Duration timeoutBaseDuration time.Duration - nextBlockConstructionTimeStamp time.Time - nextTimeoutTimeStamp time.Time + nextBlockConstructionTask *ScheduledTask[uint64] + nextTimeoutTask *ScheduledTask[uint64] // The latest block accepted by the caller. We only keep track of the latest safe block here because // it's the block we vote on, and construct a vote QC for. @@ -160,15 +160,9 @@ type FastHotStuffEventLoop struct { // Externally accessible channel for signals sent to the Server. ConsensusEvents chan *ConsensusEvent - // Internal channels used by this module to coordinate the event loop - resetEventLoopSignal chan interface{} - stopSignal chan interface{} - // Internal statuses and wait groups used to coordinate the start and stop operations for // the event loop. - status consensusStatus - startGroup sync.WaitGroup - stopGroup sync.WaitGroup + status consensusStatus } type consensusStatus byte From 04666cbda3a96328a872d9691c558b3c1340c490 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 13 Sep 2023 09:55:16 -0400 Subject: [PATCH 199/762] Implement Fast-HotStuff Timeout Signal (#658) --- consensus/event_loop.go | 33 +++++++++++++++++++++++++--- consensus/event_loop_test.go | 42 ++++++++++++++++++++++++++++++++++++ consensus/scheduled_task.go | 9 ++++++++ 3 files changed, 81 insertions(+), 3 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index fb294f50b..916994a45 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -388,15 +388,42 @@ func (fc *FastHotStuffEventLoop) resetScheduledTasks() { fc.nextBlockConstructionTask.Schedule(fc.blockConstructionCadence, fc.currentView, fc.onBlockConstructionScheduledTask) // Schedule the next timeout task. This will run with currentView param. - fc.nextTimeoutTask.Schedule(timeoutDuration, fc.currentView, fc.onTimeoutScheduledTask) + fc.nextTimeoutTask.Schedule(timeoutDuration, fc.currentView, fc.onTimeoutScheduledTaskExecuted) } func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTask(blockConstructionView uint64) { // TODO } -func (fc *FastHotStuffEventLoop) onTimeoutScheduledTask(timedOutView uint64) { - // TODO +// When this function is triggered, it means that we have reached out the timeout ETA for the +// timedOutView. In the event of a timeout, we signal the server that we are ready to time out +// and cancel the timeout task. +func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uint64) { + fc.lock.Lock() + defer fc.lock.Unlock() + + // Check if the consensus instance is running. If it's not running, then there's nothing + // to do here. + if fc.status != consensusStatusRunning { + return + } + + // Check if the timed out view is stale. If it's stale, then there's nothing to do here. + // The view may be stale in the race condition where the view advanced at the exact moment + // this task began to execute and wait for the event loop's lock at the top of this function. + if fc.currentView != timedOutView { + return + } + + // Signal the server that we are ready to time out + fc.ConsensusEvents <- &ConsensusEvent{ + EventType: ConsensusEventTypeTimeout, // The timeout event type + View: timedOutView, // The view we timed out + BlockHash: fc.chainTip.GetBlockHash(), // The last block we saw + } + + // Cancel the timeout task. The server will reschedule it when it advances the view. + fc.nextTimeoutTask.Cancel() } // Evict all locally stored votes and timeout messages with stale views. We can safely use the current diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 661620ac3..8af97e8e2 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -418,6 +418,48 @@ func TestProcessValidatorTimeout(t *testing.T) { fc.Stop() } +func TestTimeoutScheduledTaskExecuted(t *testing.T) { + oneHourInNanoSecs := time.Duration(3600000000000) + oneMilliSecondInNanoSeconds := time.Duration(1000000) + + dummyBlock := createDummyBlock() + + fc := NewFastHotStuffEventLoop() + err := fc.Init(oneHourInNanoSecs, oneMilliSecondInNanoSeconds, dummyBlock, createDummyValidatorSet()) + require.NoError(t, err) + + // Start the event loop + fc.Start() + + // Wait for the timeout signal to be sent + timeoutSignal := <-fc.ConsensusEvents + + // Confirm that the timeout signal is for the the expected view + require.Equal(t, timeoutSignal.EventType, ConsensusEventTypeTimeout) + require.Equal(t, timeoutSignal.View, dummyBlock.GetView()+1) + require.Equal(t, timeoutSignal.BlockHash.GetValue(), dummyBlock.GetBlockHash().GetValue()) + + // Confirm that the timeout is no longer running + require.False(t, fc.nextTimeoutTask.IsScheduled()) + + // Advance the view, which should reset the timeout scheduled task + fc.AdvanceView() + + // Wait for the timeout signal to be sent + timeoutSignal = <-fc.ConsensusEvents + + // Confirm that the timeout signal is for the the expected view + require.Equal(t, timeoutSignal.EventType, ConsensusEventTypeTimeout) + require.Equal(t, timeoutSignal.View, dummyBlock.GetView()+2) + require.Equal(t, timeoutSignal.BlockHash.GetValue(), dummyBlock.GetBlockHash().GetValue()) + + // Confirm that the timeout is no longer running + require.False(t, fc.nextTimeoutTask.IsScheduled()) + + // Stop the event loop + fc.Stop() +} + func TestResetEventLoopSignal(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) diff --git a/consensus/scheduled_task.go b/consensus/scheduled_task.go index eab855634..d971656ce 100644 --- a/consensus/scheduled_task.go +++ b/consensus/scheduled_task.go @@ -52,6 +52,8 @@ func (t *ScheduledTask[TaskParam]) Cancel() { if t.timer != nil { t.timer.Stop() } + + t.timer = nil } func (t *ScheduledTask[TaskParam]) GetDuration() time.Duration { @@ -60,3 +62,10 @@ func (t *ScheduledTask[TaskParam]) GetDuration() time.Duration { return t.duration } + +func (t *ScheduledTask[TaskParam]) IsScheduled() bool { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.timer != nil +} From 48754729847db48529a5826d69d0c73dc012486f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 13 Sep 2023 10:03:46 -0400 Subject: [PATCH 200/762] Cross-validate View and QC View in Block and Timeout Msgs (#664) --- consensus/event_loop_test.go | 102 +++++++++++++++++------------------ consensus/utils.go | 10 ++++ consensus/utils_test.go | 20 +++---- 3 files changed, 71 insertions(+), 61 deletions(-) diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 8af97e8e2..25d475f24 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -21,14 +21,14 @@ func TestFastHotStuffInitialization(t *testing.T) { // Test Init() function with invalid block construction cadence { fc := NewFastHotStuffEventLoop() - err := fc.Init(0, 1, createDummyBlock(), createDummyValidatorSet()) + err := fc.Init(0, 1, createDummyBlock(2), createDummyValidatorSet()) require.Error(t, err) } // Test Init() function with invalid timeout duration { fc := NewFastHotStuffEventLoop() - err := fc.Init(1, 0, createDummyBlock(), createDummyValidatorSet()) + err := fc.Init(1, 0, createDummyBlock(2), createDummyValidatorSet()) require.Error(t, err) } @@ -42,14 +42,14 @@ func TestFastHotStuffInitialization(t *testing.T) { // Test Init() function with malformed validator set { fc := NewFastHotStuffEventLoop() - err := fc.Init(1, 1, createDummyBlock(), nil) + err := fc.Init(1, 1, createDummyBlock(2), nil) require.Error(t, err) } // Test Init() function with valid parameters { fc := NewFastHotStuffEventLoop() - err := fc.Init(100, 101, createDummyBlock(), createDummyValidatorSet()) + err := fc.Init(100, 101, createDummyBlock(2), createDummyValidatorSet()) require.NoError(t, err) require.Equal(t, consensusStatusInitialized, fc.status) @@ -58,13 +58,13 @@ func TestFastHotStuffInitialization(t *testing.T) { require.Equal(t, fc.status, consensusStatusInitialized) require.Equal(t, fc.chainTip.GetBlockHash().GetValue(), createDummyBlockHash().GetValue()) - require.Equal(t, fc.chainTip.GetView(), uint64(1)) + require.Equal(t, fc.chainTip.GetView(), uint64(2)) require.Equal(t, fc.chainTip.GetHeight(), uint64(1)) require.Equal(t, fc.blockConstructionCadence, time.Duration(100)) require.Equal(t, fc.timeoutBaseDuration, time.Duration(101)) - require.Equal(t, fc.currentView, uint64(2)) + require.Equal(t, fc.currentView, uint64(3)) require.Equal(t, len(fc.validatorsAtChainTip), 2) } } @@ -73,12 +73,12 @@ func TestFastHotStuffProcessSafeBlock(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) fc := NewFastHotStuffEventLoop() - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) require.NoError(t, err) // Test ProcessSafeBlock() function when consensus event loop is not running { - err := fc.ProcessSafeBlock(createDummyBlock(), createDummyValidatorSet()) + err := fc.ProcessSafeBlock(createDummyBlock(2), createDummyValidatorSet()) require.Error(t, err) } @@ -93,7 +93,7 @@ func TestFastHotStuffProcessSafeBlock(t *testing.T) { // Test ProcessSafeBlock() function with malformed validator set { - err := fc.ProcessSafeBlock(createDummyBlock(), nil) + err := fc.ProcessSafeBlock(createDummyBlock(2), nil) require.Error(t, err) } @@ -144,7 +144,7 @@ func TestFastHotStuffProcessSafeBlock(t *testing.T) { // Test ProcessSafeBlock() function with valid parameters { - nextBlock := createDummyBlock() + nextBlock := createDummyBlock(2) nextBlock.height = 2 nextBlock.view = 3 @@ -174,8 +174,8 @@ func TestAdvanceView(t *testing.T) { fc := NewFastHotStuffEventLoop() - // BlockHeight = 1, Current View = 2 - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + // BlockHeight = 1, Current View = 3 + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) require.NoError(t, err) // Running AdvanceView() should fail because the consensus event loop is not running @@ -190,47 +190,47 @@ func TestAdvanceView(t *testing.T) { // Populate the votesSeen and timeoutsSeen maps with dummy data { fc.votesSeen = map[[32]byte]map[string]VoteMessage{ - {0}: { // blockHash = 0 - "pubKeyA": createDummyVoteMessage(0), - }, {1}: { // blockHash = 1 - "pubKeyB": createDummyVoteMessage(1), + "pubKeyA": createDummyVoteMessage(1), }, {2}: { // blockHash = 2 - "pubKeyC": createDummyVoteMessage(2), + "pubKeyB": createDummyVoteMessage(2), }, {3}: { // blockHash = 3 - "pubKeyD": createDummyVoteMessage(3), + "pubKeyC": createDummyVoteMessage(3), }, {4}: { // blockHash = 4 - "pubKeyE": createDummyVoteMessage(4), + "pubKeyD": createDummyVoteMessage(4), + }, + {5}: { // blockHash = 5 + "pubKeyE": createDummyVoteMessage(5), }, } fc.timeoutsSeen = map[uint64]map[string]TimeoutMessage{ - 0: { // view = 0 - "pubKeyA": createDummyTimeoutMessage(0), - }, 1: { // view = 1 - "pubKeyB": createDummyTimeoutMessage(1), + "pubKeyA": createDummyTimeoutMessage(1), }, 2: { // view = 2 - "pubKeyC": createDummyTimeoutMessage(2), + "pubKeyB": createDummyTimeoutMessage(2), }, 3: { // view = 3 - "pubKeyD": createDummyTimeoutMessage(3), + "pubKeyC": createDummyTimeoutMessage(3), }, 4: { // view = 4 - "pubKeyE": createDummyTimeoutMessage(4), + "pubKeyD": createDummyTimeoutMessage(4), + }, + 5: { // view = 5 + "pubKeyE": createDummyTimeoutMessage(5), }, } } - // Run AdvanceView() to view 3 + // Run AdvanceView() to view 4 { newView, err := fc.AdvanceView() require.NoError(t, err) - require.Equal(t, uint64(3), newView) + require.Equal(t, uint64(4), newView) } // Verify that vote and timeout messages haven't changed @@ -239,11 +239,11 @@ func TestAdvanceView(t *testing.T) { require.Equal(t, len(fc.timeoutsSeen), 3) } - // Run AdvanceView() to view 4 + // Run AdvanceView() to view 5 { newView, err := fc.AdvanceView() require.NoError(t, err) - require.Equal(t, uint64(4), newView) + require.Equal(t, uint64(5), newView) } // Verify that stale votes and timeouts have been evicted @@ -261,18 +261,18 @@ func TestProcessValidatorVote(t *testing.T) { fc := NewFastHotStuffEventLoop() - // BlockHeight = 1, Current View = 2 - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + // BlockHeight = 1, Current View = 3 + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) require.NoError(t, err) // Start the event loop fc.Start() - // Current View = 3 + // Current View = 4 { currentView, err := fc.AdvanceView() require.NoError(t, err) - require.Equal(t, uint64(3), currentView) + require.Equal(t, uint64(4), currentView) } // Test with malformed vote @@ -284,7 +284,7 @@ func TestProcessValidatorVote(t *testing.T) { // Test invalid signature { - vote := createDummyVoteMessage(3) + vote := createDummyVoteMessage(4) vote.signature = createDummyBLSSignature() err := fc.ProcessValidatorVote(vote) require.Error(t, err) @@ -301,7 +301,7 @@ func TestProcessValidatorVote(t *testing.T) { // Test when we've already seen a vote from the validator for the same view { - vote := createDummyVoteMessage(3) + vote := createDummyVoteMessage(4) fc.votesSeen[GetVoteSignaturePayload(vote.GetView(), vote.GetBlockHash())] = map[string]VoteMessage{ vote.publicKey.ToString(): vote, } @@ -313,8 +313,8 @@ func TestProcessValidatorVote(t *testing.T) { // Test when we've already seen a timeout from the validator for the same view { - vote := createDummyVoteMessage(4) - timeout := createDummyTimeoutMessage(4) + vote := createDummyVoteMessage(5) + timeout := createDummyTimeoutMessage(5) timeout.publicKey = vote.publicKey fc.timeoutsSeen[timeout.GetView()] = map[string]TimeoutMessage{ @@ -328,7 +328,7 @@ func TestProcessValidatorVote(t *testing.T) { // Test happy path { - vote := createDummyVoteMessage(3) + vote := createDummyVoteMessage(4) err := fc.ProcessValidatorVote(vote) require.NoError(t, err) } @@ -342,18 +342,18 @@ func TestProcessValidatorTimeout(t *testing.T) { fc := NewFastHotStuffEventLoop() - // BlockHeight = 1, Current View = 2 - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + // BlockHeight = 1, Current View = 3 + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) require.NoError(t, err) // Start the event loop fc.Start() - // Current View = 3 + // Current View = 4 { currentView, err := fc.AdvanceView() require.NoError(t, err) - require.Equal(t, uint64(3), currentView) + require.Equal(t, uint64(4), currentView) } // Test with malformed timeout @@ -365,7 +365,7 @@ func TestProcessValidatorTimeout(t *testing.T) { // Test invalid signature { - timeout := createDummyTimeoutMessage(3) + timeout := createDummyTimeoutMessage(4) timeout.signature = createDummyBLSSignature() err := fc.ProcessValidatorTimeout(timeout) require.Error(t, err) @@ -382,8 +382,8 @@ func TestProcessValidatorTimeout(t *testing.T) { // Test when we've already seen a vote from the validator for the same view { - timeout := createDummyTimeoutMessage(3) - vote := createDummyVoteMessage(3) + timeout := createDummyTimeoutMessage(4) + vote := createDummyVoteMessage(4) fc.votesSeen[GetVoteSignaturePayload(vote.GetView(), vote.GetBlockHash())] = map[string]VoteMessage{ timeout.publicKey.ToString(): vote, @@ -396,7 +396,7 @@ func TestProcessValidatorTimeout(t *testing.T) { // Test when we've already seen a timeout from the validator for the same view { - timeout := createDummyTimeoutMessage(3) + timeout := createDummyTimeoutMessage(4) fc.timeoutsSeen[timeout.view] = map[string]TimeoutMessage{ timeout.publicKey.ToString(): timeout, @@ -409,7 +409,7 @@ func TestProcessValidatorTimeout(t *testing.T) { // Test happy path { - timeout := createDummyTimeoutMessage(3) + timeout := createDummyTimeoutMessage(4) err := fc.ProcessValidatorTimeout(timeout) require.NoError(t, err) } @@ -422,7 +422,7 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) oneMilliSecondInNanoSeconds := time.Duration(1000000) - dummyBlock := createDummyBlock() + dummyBlock := createDummyBlock(2) fc := NewFastHotStuffEventLoop() err := fc.Init(oneHourInNanoSecs, oneMilliSecondInNanoSeconds, dummyBlock, createDummyValidatorSet()) @@ -464,7 +464,7 @@ func TestResetEventLoopSignal(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) fc := NewFastHotStuffEventLoop() - err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) require.NoError(t, err) // Start the event loop @@ -506,7 +506,7 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) fc := NewFastHotStuffEventLoop() - err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(), createDummyValidatorSet()) + err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) require.NoError(t, err) // Start the event loop diff --git a/consensus/utils.go b/consensus/utils.go index 0bcfd0465..a364d36a0 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -56,6 +56,11 @@ func isProperlyFormedBlock(block Block) bool { return false } + // The QC's view must be less than the block's view + if block.GetQC().GetView() >= block.GetView() { + return false + } + return true } @@ -106,6 +111,11 @@ func isProperlyFormedTimeout(timeout TimeoutMessage) bool { return false } + // The QC's view must be less than the timed out view + if timeout.GetHighQC().GetView() >= timeout.GetView() { + return false + } + return true } diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 207f996b5..db21e0e31 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -20,19 +20,19 @@ func TestIsProperlyFormedBlock(t *testing.T) { // Test zero height { - block := block{height: 0, view: 1, blockHash: createDummyBlockHash(), qc: createDummyQC()} + block := block{height: 0, view: 2, blockHash: createDummyBlockHash(), qc: createDummyQC(1)} require.False(t, isProperlyFormedBlock(&block)) } // Test zero view { - block := block{height: 1, view: 0, blockHash: createDummyBlockHash(), qc: createDummyQC()} + block := block{height: 1, view: 0, blockHash: createDummyBlockHash(), qc: createDummyQC(0)} require.False(t, isProperlyFormedBlock(&block)) } // Test nil block hash { - block := block{height: 1, view: 1, blockHash: nil, qc: createDummyQC()} + block := block{height: 1, view: 1, blockHash: nil, qc: createDummyQC(0)} require.False(t, isProperlyFormedBlock(&block)) } @@ -44,7 +44,7 @@ func TestIsProperlyFormedBlock(t *testing.T) { // Test valid block { - require.True(t, isProperlyFormedBlock(createDummyBlock())) + require.True(t, isProperlyFormedBlock(createDummyBlock(2))) } } @@ -181,12 +181,12 @@ func createDummyValidatorSet() []Validator { }) } -func createDummyBlock() *block { +func createDummyBlock(view uint64) *block { return &block{ blockHash: createDummyBlockHash(), - view: 1, + view: view, height: 1, - qc: createDummyQC(), + qc: createDummyQC(view - 1), } } @@ -206,7 +206,7 @@ func createDummyVoteMessage(view uint64) *voteMessage { } func createDummyTimeoutMessage(view uint64) *timeoutMessage { - highQC := createDummyQC() + highQC := createDummyQC(view - 1) signaturePayload := GetTimeoutSignaturePayload(view, highQC.view) @@ -221,10 +221,10 @@ func createDummyTimeoutMessage(view uint64) *timeoutMessage { } } -func createDummyQC() *quorumCertificate { +func createDummyQC(view uint64) *quorumCertificate { return &quorumCertificate{ blockHash: createDummyBlockHash(), - view: 1, + view: view, signersList: bitset.NewBitset().FromBytes([]byte{0x3}), aggregatedSignature: createDummyBLSSignature(), } From 57c210a27e293a555526a9a4352131bd949bd6ac Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Thu, 14 Sep 2023 14:18:56 -0400 Subject: [PATCH 201/762] Add race condition comments to ScheduledTask --- consensus/scheduled_task.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/consensus/scheduled_task.go b/consensus/scheduled_task.go index d971656ce..baf970ebd 100644 --- a/consensus/scheduled_task.go +++ b/consensus/scheduled_task.go @@ -27,6 +27,14 @@ func NewScheduledTask[TaskParam any]() *ScheduledTask[TaskParam] { // Schedule a new task to be executed after the countdown duration. If there is an existing scheduled // task, it will be cancelled and replaced with the new task. +// +// Note: There is a race condition in which a scheduled task is rescheduled at the same moment the +// previously scheduled task begins to execute. This gives the appearance from the caller's POV that +// the previous task executed, despite being replaced. There is no simple internal-only solution to +// prevent, in a way that guarantees no risk of deadlock with outside code's mutexes. If such race +// conditions are a concern, the caller must internally validate the provided param to ensure its +// attached task is no longer stale. See FastHotStuffEventLoop.onTimeoutScheduledTaskExecuted(view uint64) +// for a simple example that exits task execution early when the view param is stale. func (t *ScheduledTask[TaskParam]) Schedule(duration time.Duration, param TaskParam, task func(TaskParam)) { t.lock.Lock() defer t.lock.Unlock() @@ -45,6 +53,9 @@ func (t *ScheduledTask[TaskParam]) Schedule(duration time.Duration, param TaskPa }) } +// Note: The same race condition as above exists, in which a scheduled task is cancelled at the same moment +// it is begins to execute. If such race conditions are a concern, the caller must internally validate the +// provided param to ensure its attached task is no longer stale, similar to the above. func (t *ScheduledTask[TaskParam]) Cancel() { t.lock.Lock() defer t.lock.Unlock() From 1998f979aef081c82aad0dcea5327f36bb791478 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 25 Sep 2023 12:59:40 -0400 Subject: [PATCH 202/762] Mitgate Transient Failures from Encoder Test and Balance Model Height Setters (#690) * Cleanup + add sub-tests * Fix nested block height changes --- lib/block_view_access_group_members_test.go | 8 +-- lib/block_view_access_group_test.go | 4 +- lib/block_view_association_test.go | 17 +++-- lib/block_view_creator_coin_test.go | 45 ++++++------- lib/block_view_dao_coin_limit_order_test.go | 13 ++-- lib/block_view_derived_key_test.go | 23 ++++--- lib/block_view_message_test.go | 15 ++--- lib/block_view_nft_test.go | 37 +++++------ lib/block_view_post_test.go | 28 +++++---- lib/block_view_profile_test.go | 16 ++--- lib/block_view_stake_test.go | 42 ++++++++----- lib/block_view_test.go | 62 +++++++++--------- lib/block_view_validator_test.go | 70 +++++++++++++-------- lib/blockchain_test.go | 34 +++++----- lib/pos_epoch_complete_hook_test.go | 9 ++- test.Dockerfile | 2 +- 16 files changed, 244 insertions(+), 181 deletions(-) diff --git a/lib/block_view_access_group_members_test.go b/lib/block_view_access_group_members_test.go index fd357236f..3ef10a0f3 100644 --- a/lib/block_view_access_group_members_test.go +++ b/lib/block_view_access_group_members_test.go @@ -51,10 +51,10 @@ func (data *accessGroupMembersTestData) GetInputType() transactionTestInputType func TestBalanceModelAccessGroupMembers(t *testing.T) { setBalanceModelBlockHeights(t) - TestAccessGroupMembersAdd(t) - TestAccessGroupMembersUpdate(t) - TestAccessGroupMembersRemove(t) - TestAccessGroupMembersTxnWithDerivedKey(t) + t.Run("TestAccessGroupMembersAdd", TestAccessGroupMembersAdd) + t.Run("TestAccessGroupMembersUpdate", TestAccessGroupMembersUpdate) + t.Run("TestAccessGroupMembersRemove", TestAccessGroupMembersRemove) + t.Run("TestAccessGroupMembersTxnWithDerivedKey", TestAccessGroupMembersTxnWithDerivedKey) } func TestAccessGroupMembersAdd(t *testing.T) { diff --git a/lib/block_view_access_group_test.go b/lib/block_view_access_group_test.go index 0518acbd8..aa4b4c7fa 100644 --- a/lib/block_view_access_group_test.go +++ b/lib/block_view_access_group_test.go @@ -3,10 +3,11 @@ package lib import ( "bytes" "fmt" + "testing" + "github.com/btcsuite/btcd/btcec" "github.com/pkg/errors" "github.com/stretchr/testify/require" - "testing" ) type AccessGroupTestData struct { @@ -35,7 +36,6 @@ func TestBalanceModelAccessGroups(t *testing.T) { setBalanceModelBlockHeights(t) TestAccessGroup(t) - TestAccessGroupTxnWithDerivedKey(t) } func TestAccessGroup(t *testing.T) { diff --git a/lib/block_view_association_test.go b/lib/block_view_association_test.go index 43b5f9b65..fa8b5c281 100644 --- a/lib/block_view_association_test.go +++ b/lib/block_view_association_test.go @@ -3,11 +3,12 @@ package lib import ( "bytes" "errors" - "github.com/btcsuite/btcd/btcec" - "github.com/stretchr/testify/require" "math" "sort" "testing" + + "github.com/btcsuite/btcd/btcec" + "github.com/stretchr/testify/require" ) func TestBalanceModelAssociations(t *testing.T) { @@ -19,9 +20,15 @@ func TestBalanceModelAssociations(t *testing.T) { func TestAssociations(t *testing.T) { // Run all tests twice: once flushing all txns to the // db, and once just keeping all txns in the mempool. - _testAssociations(t, true) - _testAssociations(t, false) - _testAssociationsWithDerivedKey(t) + t.Run("flushToDB=true", func(t *testing.T) { + _testAssociations(t, true) + }) + t.Run("flushToDB=false", func(t *testing.T) { + _testAssociations(t, false) + }) + t.Run("_testAssociationsWithDerivedKey", func(t *testing.T) { + _testAssociationsWithDerivedKey(t) + }) } func _testAssociations(t *testing.T, flushToDB bool) { diff --git a/lib/block_view_creator_coin_test.go b/lib/block_view_creator_coin_test.go index 62f32bc7e..01395fa63 100644 --- a/lib/block_view_creator_coin_test.go +++ b/lib/block_view_creator_coin_test.go @@ -2,11 +2,12 @@ package lib import ( "fmt" + "strconv" + "testing" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "strconv" - "testing" ) type _CreatorCoinTestData struct { @@ -794,41 +795,41 @@ func _helpTestCreatorCoinBuySell( func TestBalanceModelCreatorCoins(t *testing.T) { setBalanceModelBlockHeights(t) - TestCreatorCoinWithDiamonds(t) - TestCreatorCoinWithDiamondsFailureCases(t) - TestCreatorCoinDiamondAfterDeSoDiamondsBlockHeight(t) - TestCreatorCoinTransferSimple_CreatorCoinFounderReward(t) - TestCreatorCoinTransferSimple_DeSoFounderReward(t) + t.Run("TestCreatorCoinWithDiamonds", TestCreatorCoinWithDiamonds) + t.Run("TestCreatorCoinWithDiamondsFailureCases", TestCreatorCoinWithDiamondsFailureCases) + t.Run("TestCreatorCoinDiamondAfterDeSoDiamondsBlockHeight", TestCreatorCoinDiamondAfterDeSoDiamondsBlockHeight) + t.Run("TestCreatorCoinTransferSimple_CreatorCoinFounderReward", TestCreatorCoinTransferSimple_CreatorCoinFounderReward) + t.Run("TestCreatorCoinTransferSimple_DeSoFounderReward", TestCreatorCoinTransferSimple_DeSoFounderReward) } func TestBalanceModelCreatorCoins2(t *testing.T) { setBalanceModelBlockHeights(t) - TestCreatorCoinTransferWithSwapIdentity(t) - TestCreatorCoinTransferWithSmallBalancesLeftOver(t) - TestCreatorCoinTransferWithMaxTransfers(t) - TestCreatorCoinTransferBelowMinThreshold(t) - TestCreatorCoinBuySellSimple_CreatorCoinFounderReward(t) + t.Run("TestCreatorCoinTransferWithSwapIdentity", TestCreatorCoinTransferWithSwapIdentity) + t.Run("TestCreatorCoinTransferWithSmallBalancesLeftOver", TestCreatorCoinTransferWithSmallBalancesLeftOver) + t.Run("TestCreatorCoinTransferWithMaxTransfers", TestCreatorCoinTransferWithMaxTransfers) + t.Run("TestCreatorCoinTransferBelowMinThreshold", TestCreatorCoinTransferBelowMinThreshold) + t.Run("TestCreatorCoinBuySellSimple_CreatorCoinFounderReward", TestCreatorCoinBuySellSimple_CreatorCoinFounderReward) } func TestBalanceModelCreatorCoins3(t *testing.T) { setBalanceModelBlockHeights(t) - TestCreatorCoinBuySellSimple_DeSoFounderReward(t) - TestCreatorCoinSelfBuying_DeSoAndCreatorCoinFounderReward(t) - TestCreatorCoinTinyFounderRewardBuySellAmounts_CreatorCoinFounderReward(t) - TestCreatorCoinTinyFounderRewardBuySellAmounts_DeSoFounderReward(t) - TestCreatorCoinFullFounderRewardBuySellAmounts_CreatorCoinFounderReward(t) + t.Run("TestCreatorCoinBuySellSimple_DeSoFounderReward", TestCreatorCoinBuySellSimple_DeSoFounderReward) + t.Run("TestCreatorCoinSelfBuying_DeSoAndCreatorCoinFounderReward", TestCreatorCoinSelfBuying_DeSoAndCreatorCoinFounderReward) + t.Run("TestCreatorCoinTinyFounderRewardBuySellAmounts_CreatorCoinFounderReward", TestCreatorCoinTinyFounderRewardBuySellAmounts_CreatorCoinFounderReward) + t.Run("TestCreatorCoinTinyFounderRewardBuySellAmounts_DeSoFounderReward", TestCreatorCoinTinyFounderRewardBuySellAmounts_DeSoFounderReward) + t.Run("TestCreatorCoinFullFounderRewardBuySellAmounts_CreatorCoinFounderReward", TestCreatorCoinFullFounderRewardBuySellAmounts_CreatorCoinFounderReward) } func TestBalanceModelCreatorCoins4(t *testing.T) { setBalanceModelBlockHeights(t) - TestCreatorCoinLargeFounderRewardBuySellAmounts(t) - TestCreatorCoinAroundThresholdBuySellAmounts(t) - TestSalomonSequence(t) - TestCreatorCoinBigBuyAfterSmallBuy(t) - TestCreatorCoinBigBigBuyBigSell(t) + t.Run("TestCreatorCoinLargeFounderRewardBuySellAmounts", TestCreatorCoinLargeFounderRewardBuySellAmounts) + t.Run("TestCreatorCoinAroundThresholdBuySellAmounts", TestCreatorCoinAroundThresholdBuySellAmounts) + t.Run("TestSalomonSequence", TestSalomonSequence) + t.Run("TestCreatorCoinBigBuyAfterSmallBuy", TestCreatorCoinBigBuyAfterSmallBuy) + t.Run("TestCreatorCoinBigBigBuyBigSell", TestCreatorCoinBigBigBuyBigSell) } func TestCreatorCoinWithDiamonds(t *testing.T) { diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index e50030b8d..7e847218a 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -3,21 +3,22 @@ package lib import ( "bytes" "fmt" - "github.com/dgraph-io/badger/v3" - "github.com/holiman/uint256" - "github.com/stretchr/testify/require" "math" "math/big" "testing" "time" + + "github.com/dgraph-io/badger/v3" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" ) func TestBalanceModelDAOCoinLimitOrders(t *testing.T) { setBalanceModelBlockHeights(t) - TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t) - TestDAOCoinLimitOrder(t) - TestFlushingDAOCoinLimitOrders(t) + t.Run("TestZeroCostOrderEdgeCaseDAOCoinLimitOrder", TestZeroCostOrderEdgeCaseDAOCoinLimitOrder) + t.Run("TestDAOCoinLimitOrder", TestDAOCoinLimitOrder) + t.Run("TestFlushingDAOCoinLimitOrders", TestFlushingDAOCoinLimitOrders) } func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { // ----------------------- diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index 97ba47eb4..a21d0a4d0 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -5,14 +5,15 @@ import ( "encoding/hex" "encoding/json" "fmt" + "math/rand" + "testing" + "time" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v3" "github.com/holiman/uint256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "math/rand" - "testing" - "time" ) const ( @@ -871,13 +872,15 @@ func _doAuthorizeTxnWithExtraDataAndSpendingLimits(testMeta *TestMeta, utxoView func TestBalanceModelAuthorizeDerivedKey(t *testing.T) { setBalanceModelBlockHeights(t) - TestAuthorizeDerivedKeyBasic(t) - TestAuthorizeDerivedKeyBasicWithTransactionLimits(t) - TestAuthorizedDerivedKeyWithTransactionLimitsHardcore(t) - // We need to set the block height here to 7 so that encoder migrations have the proper version and heights. - // Otherwise, the access groups and associations migrations do not run when encoding Utxo Operations. - DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = 7 - TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t) + t.Run("TestAuthorizeDerivedKeyBasic", TestAuthorizeDerivedKeyBasic) + t.Run("TestAuthorizeDerivedKeyBasicWithTransactionLimits", TestAuthorizeDerivedKeyBasicWithTransactionLimits) + t.Run("TestAuthorizedDerivedKeyWithTransactionLimitsHardcore", TestAuthorizedDerivedKeyWithTransactionLimitsHardcore) + t.Run("TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups", func(t *testing.T) { + // We need to set the block height here to 7 so that encoder migrations have the proper version and heights. + // Otherwise, the access groups and associations migrations do not run when encoding Utxo Operations. + DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = 7 + TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t) + }) } func TestAuthorizeDerivedKeyBasic(t *testing.T) { diff --git a/lib/block_view_message_test.go b/lib/block_view_message_test.go index 8e3fcc836..6371ddf3e 100644 --- a/lib/block_view_message_test.go +++ b/lib/block_view_message_test.go @@ -4,14 +4,15 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "reflect" "strings" "testing" "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/dgraph-io/badger/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestBasePointSignature(t *testing.T) { @@ -116,9 +117,9 @@ func _privateMessageWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB func TestBalanceModelPrivateMessages(t *testing.T) { setBalanceModelBlockHeights(t) - TestPrivateMessages(t) - TestMessagingKeys(t) - TestGroupMessages(t) + t.Run("TestPrivateMessages", TestPrivateMessages) + t.Run("TestMessagingKeys", TestMessagingKeys) + t.Run("TestGroupMessages", TestGroupMessages) } func TestPrivateMessage(t *testing.T) { diff --git a/lib/block_view_nft_test.go b/lib/block_view_nft_test.go index 126109900..477487896 100644 --- a/lib/block_view_nft_test.go +++ b/lib/block_view_nft_test.go @@ -1,13 +1,14 @@ package lib import ( - "github.com/dgraph-io/badger/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "math" "reflect" "testing" "time" + + "github.com/dgraph-io/badger/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func _createNFTWithAdditionalRoyalties(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoParams, @@ -831,32 +832,32 @@ func _burnNFTWithTestMeta( func TestBalanceModelNFTs(t *testing.T) { setBalanceModelBlockHeights(t) - TestNFTBasic(t) - TestNFTRoyaltiesAndSpendingOfBidderUTXOs(t) - TestNFTSerialNumberZeroBid(t) - TestNFTMinimumBidAmount(t) - TestNFTCreatedIsNotForSale(t) + t.Run("TestNFTBasic", TestNFTBasic) + t.Run("TestNFTRoyaltiesAndSpendingOfBidderUTXOs", TestNFTRoyaltiesAndSpendingOfBidderUTXOs) + t.Run("TestNFTSerialNumberZeroBid", TestNFTSerialNumberZeroBid) + t.Run("TestNFTMinimumBidAmount", TestNFTMinimumBidAmount) + t.Run("TestNFTCreatedIsNotForSale", TestNFTCreatedIsNotForSale) } // Break up into multiple tests to keep memory footprint lower func TestBalanceModelNFTs2(t *testing.T) { setBalanceModelBlockHeights(t) - TestNFTMoreErrorCases(t) - TestNFTBidsAreCanceledAfterAccept(t) - TestNFTDifferentMinBidAmountSerialNumbers(t) - TestNFTMaxCopiesGlobalParam(t) - TestNFTPreviousOwnersCantAcceptBids(t) + t.Run("TestNFTMoreErrorCases", TestNFTMoreErrorCases) + t.Run("TestNFTBidsAreCanceledAfterAccept", TestNFTBidsAreCanceledAfterAccept) + t.Run("TestNFTDifferentMinBidAmountSerialNumbers", TestNFTDifferentMinBidAmountSerialNumbers) + t.Run("TestNFTMaxCopiesGlobalParam", TestNFTMaxCopiesGlobalParam) + t.Run("TestNFTPreviousOwnersCantAcceptBids", TestNFTPreviousOwnersCantAcceptBids) } func TestBalanceModelNFTs3(t *testing.T) { setBalanceModelBlockHeights(t) - TestNFTTransfersAndBurns(t) - TestBidAmountZero(t) - TestNFTBuyNow(t) - TestNFTSplits(t) - TestNFTSplitsHardcorePKIDBug(t) + t.Run("TestNFTTransfersAndBurns", TestNFTTransfersAndBurns) + t.Run("TestBidAmountZero", TestBidAmountZero) + t.Run("TestNFTBuyNow", TestNFTBuyNow) + t.Run("TestNFTSplits", TestNFTSplits) + t.Run("TestNFTSplitsHardcorePKIDBug", TestNFTSplitsHardcorePKIDBug) } func TestNFTBasic(t *testing.T) { diff --git a/lib/block_view_post_test.go b/lib/block_view_post_test.go index a630822e6..2e03531f9 100644 --- a/lib/block_view_post_test.go +++ b/lib/block_view_post_test.go @@ -4,14 +4,15 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/dgraph-io/badger/v3" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "reflect" "sort" "testing" "time" + + "github.com/dgraph-io/badger/v3" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func _submitPost(t *testing.T, chain *Blockchain, db *badger.DB, @@ -294,10 +295,10 @@ func _doSubmitPostTxn(t *testing.T, chain *Blockchain, db *badger.DB, func TestBalanceModelSubmitPost(t *testing.T) { setBalanceModelBlockHeights(t) - TestSubmitPost(t) - TestDeSoDiamonds(t) - TestDeSoDiamondErrorCases(t) - TestFreezingPosts(t) + t.Run("TestSubmitPost", TestSubmitPost) + t.Run("TestDeSoDiamonds", TestDeSoDiamonds) + t.Run("TestDeSoDiamondErrorCases", TestDeSoDiamondErrorCases) + t.Run("TestFreezingPosts", TestFreezingPosts) } func TestSubmitPost(t *testing.T) { @@ -2105,6 +2106,12 @@ func TestDeSoDiamondErrorCases(t *testing.T) { } func TestFreezingPosts(t *testing.T) { + // Set up block heights + DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = 1 + DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) + DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) + GlobalDeSoParams = DeSoTestnetParams + // Initialize blockchain. chain, params, db := NewLowDifficultyBlockchain(t) defer func() { @@ -2112,10 +2119,7 @@ func TestFreezingPosts(t *testing.T) { require.NoError(t, ResetPostgres(chain.postgres)) } }() - params.ForkHeights.AssociationsAndAccessGroupsBlockHeight = 1 - params.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) - params.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - GlobalDeSoParams = *params + mempool, miner := NewTestMiner(t, chain, params, true) // Mine a few blocks to give the senderPkString some money. diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index 18651989a..eda99235e 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -220,19 +220,19 @@ const ( func TestBalanceModelUpdateProfile(t *testing.T) { setBalanceModelBlockHeights(t) - TestUpdateProfile(t) - TestSpamUpdateProfile(t) - TestUpdateProfileChangeBack(t) + t.Run("TestUpdateProfile", TestUpdateProfile) + t.Run("TestSpamUpdateProfile", TestSpamUpdateProfile) + t.Run("TestUpdateProfileChangeBack", TestUpdateProfileChangeBack) } func TestBalanceModelSwapIdentity(t *testing.T) { setBalanceModelBlockHeights(t) - TestSwapIdentityNOOPCreatorCoinBuySimple(t) - TestSwapIdentityCreatorCoinBuySimple(t) - TestSwapIdentityFailureCases(t) - TestSwapIdentityMain(t) - TestSwapIdentityWithFollows(t) + t.Run("TestSwapIdentityNOOPCreatorCoinBuySimple", TestSwapIdentityNOOPCreatorCoinBuySimple) + t.Run("TestSwapIdentityCreatorCoinBuySimple", TestSwapIdentityCreatorCoinBuySimple) + t.Run("TestSwapIdentityFailureCases", TestSwapIdentityFailureCases) + t.Run("TestSwapIdentityMain", TestSwapIdentityMain) + t.Run("TestSwapIdentityWithFollows", TestSwapIdentityWithFollows) } func TestUpdateProfile(t *testing.T) { diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 83f1692a4..374922960 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -14,17 +14,21 @@ import ( ) func TestStaking(t *testing.T) { - _testStaking(t, false) - _testStaking(t, true) + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + t.Run("flushToDB=false", func(t *testing.T) { + _testStaking(t, false) + }) + t.Run("flushToDB=true", func(t *testing.T) { + _testStaking(t, true) + }) } func _testStaking(t *testing.T, flushToDB bool) { // Local variables var err error - // Initialize balance model fork heights. - setBalanceModelBlockHeights(t) - // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) @@ -1670,14 +1674,18 @@ func TestStakingWithDerivedKey(t *testing.T) { } func TestGetTopStakesByStakeAmount(t *testing.T) { - _testGetTopStakesByStakeAmount(t, false) - _testGetTopStakesByStakeAmount(t, true) -} - -func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { // Initialize balance model fork heights. setBalanceModelBlockHeights(t) + t.Run("flushToDB=false", func(t *testing.T) { + _testGetTopStakesByStakeAmount(t, false) + }) + t.Run("flushToDB=true", func(t *testing.T) { + _testGetTopStakesByStakeAmount(t, true) + }) +} + +func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) @@ -2107,16 +2115,20 @@ func TestStakeLockupEpochDuration(t *testing.T) { } func TestStakingToJailedValidator(t *testing.T) { - testStakingToJailedValidator(t, false) - testStakingToJailedValidator(t, true) + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + t.Run("flushToDB=false", func(t *testing.T) { + testStakingToJailedValidator(t, false) + }) + t.Run("flushToDB=true", func(t *testing.T) { + testStakingToJailedValidator(t, true) + }) } func testStakingToJailedValidator(t *testing.T, flushToDB bool) { var err error - // Initialize balance model fork heights. - setBalanceModelBlockHeights(t) - // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 470c29d33..a32cf82e5 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -4,6 +4,11 @@ import ( "bytes" "encoding/hex" "fmt" + _ "net/http/pprof" + "reflect" + "sort" + "testing" + "github.com/btcsuite/btcd/btcec" "github.com/decred/dcrd/lru" "github.com/dgraph-io/badger/v3" @@ -11,10 +16,6 @@ import ( "github.com/golang/glog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - _ "net/http/pprof" - "reflect" - "sort" - "testing" ) func _strToPk(t *testing.T, pkStr string) []byte { @@ -82,13 +83,13 @@ func setBalanceModelBlockHeights(t *testing.T) { } func resetBalanceModelBlockHeights() { - DeSoTestnetParams.ForkHeights.NFTTransferOrBurnAndDerivedKeysBlockHeight = 1000000 - DeSoTestnetParams.ForkHeights.DerivedKeySetSpendingLimitsBlockHeight = 1000000 - DeSoTestnetParams.ForkHeights.DerivedKeyTrackSpendingLimitsBlockHeight = 1000000 - DeSoTestnetParams.ForkHeights.DerivedKeyEthSignatureCompatibilityBlockHeight = 1000000 - DeSoTestnetParams.ForkHeights.ExtraDataOnEntriesBlockHeight = 1000000 - DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = 1000000 - DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = 1000000 + DeSoTestnetParams.ForkHeights.NFTTransferOrBurnAndDerivedKeysBlockHeight = uint32(60743) + DeSoTestnetParams.ForkHeights.DerivedKeySetSpendingLimitsBlockHeight = uint32(304087) + DeSoTestnetParams.ForkHeights.DerivedKeyTrackSpendingLimitsBlockHeight = uint32(304087 + 18*60) + DeSoTestnetParams.ForkHeights.DerivedKeyEthSignatureCompatibilityBlockHeight = uint32(360584) + DeSoTestnetParams.ForkHeights.ExtraDataOnEntriesBlockHeight = uint32(304087) + DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = uint32(596555) + DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = uint32(683058) DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams @@ -303,16 +304,16 @@ func (tes *transactionTestSuite) Run() { } } -const TestDeSoEncoderRetries = 3 +const testDeSoEncoderRetries = 3 -func TestDeSoEncoderSetup(t *testing.T) { +func setupTestDeSoEncoder(t *testing.T) { EncodeToBytesImpl = func(blockHeight uint64, encoder DeSoEncoder, skipMetadata ...bool) []byte { versionByte := encoder.GetVersionByte(blockHeight) encodingBytes := encodeToBytes(blockHeight, encoder, skipMetadata...) // Check for deterministic encoding, try re-encoding the same encoder a couple of times and compare it with // the original bytes. { - for ii := 0; ii < TestDeSoEncoderRetries; ii++ { + for ii := 0; ii < testDeSoEncoderRetries; ii++ { newVersionByte := encoder.GetVersionByte(blockHeight) reEncodingBytes := encodeToBytes(blockHeight, encoder, skipMetadata...) if !bytes.Equal(encodingBytes, reEncodingBytes) { @@ -329,7 +330,7 @@ func TestDeSoEncoderSetup(t *testing.T) { } } -func TestDeSoEncoderShutdown(t *testing.T) { +func resetTestDeSoEncoder(t *testing.T) { EncodeToBytesImpl = encodeToBytes } @@ -1464,9 +1465,9 @@ func TestUpdateGlobalParams(t *testing.T) { func TestBalanceModelBasicTransfers(t *testing.T) { setBalanceModelBlockHeights(t) - TestBasicTransfer(t) - TestBasicTransferSignatures(t) - TestBlockRewardPatch(t) + t.Run("TestBasicTransfer", TestBasicTransfer) + t.Run("TestBasicTransferSignatures", TestBasicTransferSignatures) + t.Run("TestBlockRewardPatch", TestBlockRewardPatch) } func TestBasicTransfer(t *testing.T) { @@ -1746,21 +1747,24 @@ func TestBasicTransferSignatures(t *testing.T) { require := require.New(t) _ = require - chain, params, db := NewLowDifficultyBlockchain(t) - postgres := chain.postgres - params.ForkHeights.NFTTransferOrBurnAndDerivedKeysBlockHeight = uint32(0) - params.ForkHeights.DerivedKeySetSpendingLimitsBlockHeight = uint32(0) - params.ForkHeights.DerivedKeyTrackSpendingLimitsBlockHeight = uint32(0) - // Make sure encoder migrations are not triggered yet. - GlobalDeSoParams = *params - GlobalDeSoParams.ForkHeights.DeSoUnlimitedDerivedKeysBlockHeight = uint32(100) - for ii := range GlobalDeSoParams.EncoderMigrationHeightsList { - if GlobalDeSoParams.EncoderMigrationHeightsList[ii].Version == 0 { + // Set up block heights + DeSoTestnetParams.ForkHeights.NFTTransferOrBurnAndDerivedKeysBlockHeight = uint32(0) + DeSoTestnetParams.ForkHeights.DerivedKeySetSpendingLimitsBlockHeight = uint32(0) + DeSoTestnetParams.ForkHeights.DerivedKeyTrackSpendingLimitsBlockHeight = uint32(0) + DeSoTestnetParams.ForkHeights.DeSoUnlimitedDerivedKeysBlockHeight = uint32(100) + for ii := range DeSoTestnetParams.EncoderMigrationHeightsList { + if DeSoTestnetParams.EncoderMigrationHeightsList[ii].Version == 0 { continue } - GlobalDeSoParams.EncoderMigrationHeightsList[ii].Height = 100 + DeSoTestnetParams.EncoderMigrationHeightsList[ii].Height = 100 } + // Make sure encoder migrations are not triggered yet. + GlobalDeSoParams = DeSoTestnetParams + + chain, params, db := NewLowDifficultyBlockchain(t) + postgres := chain.postgres + _ = db mempool, miner := NewTestMiner(t, chain, params, true /*isSender*/) // Mine two blocks to give the sender some DeSo. diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 8daeb056a..0f3f8c70b 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -15,8 +15,15 @@ import ( ) func TestValidatorRegistration(t *testing.T) { - _testValidatorRegistration(t, false) - _testValidatorRegistration(t, true) + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + t.Run("flushToDB=false", func(t *testing.T) { + _testValidatorRegistration(t, false) + }) + t.Run("flushToDB=true", func(t *testing.T) { + _testValidatorRegistration(t, true) + }) } func _testValidatorRegistration(t *testing.T, flushToDB bool) { @@ -26,9 +33,6 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { var validatorEntries []*ValidatorEntry var err error - // Initialize balance model fork heights. - setBalanceModelBlockHeights(t) - // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) @@ -676,17 +680,21 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { } func TestGetTopActiveValidatorsByStakeAmount(t *testing.T) { - _testGetTopActiveValidatorsByStakeAmount(t, false) - _testGetTopActiveValidatorsByStakeAmount(t, true) + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + t.Run("flushToDB=false", func(t *testing.T) { + _testGetTopActiveValidatorsByStakeAmount(t, false) + }) + t.Run("flushToDB=true", func(t *testing.T) { + _testGetTopActiveValidatorsByStakeAmount(t, true) + }) } func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { var validatorEntries []*ValidatorEntry var err error - // Initialize balance model fork heights. - setBalanceModelBlockHeights(t) - // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) @@ -1107,8 +1115,15 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { } func TestUpdatingValidatorDisableDelegatedStake(t *testing.T) { - _testUpdatingValidatorDisableDelegatedStake(t, false) - _testUpdatingValidatorDisableDelegatedStake(t, true) + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + t.Run("flushToDB=false", func(t *testing.T) { + _testUpdatingValidatorDisableDelegatedStake(t, false) + }) + t.Run("flushToDB=true", func(t *testing.T) { + _testUpdatingValidatorDisableDelegatedStake(t, true) + }) } func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { @@ -1116,9 +1131,6 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { var stakeEntries []*StakeEntry var err error - // Initialize balance model fork heights. - setBalanceModelBlockHeights(t) - // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) @@ -1288,8 +1300,15 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { } func TestUnregisterAsValidator(t *testing.T) { - _testUnregisterAsValidator(t, false) - _testUnregisterAsValidator(t, true) + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + t.Run("flushToDB=false", func(t *testing.T) { + _testUnregisterAsValidator(t, false) + }) + t.Run("flushToDB=true", func(t *testing.T) { + _testUnregisterAsValidator(t, true) + }) } func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { @@ -1299,9 +1318,6 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { _ = lockedStakeEntry var err error - // Initialize balance model fork heights. - setBalanceModelBlockHeights(t) - // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) @@ -1465,17 +1481,21 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { } func TestUnjailValidator(t *testing.T) { - _testUnjailValidator(t, false) - _testUnjailValidator(t, true) + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + t.Run("flushToDB=false", func(t *testing.T) { + _testUnjailValidator(t, false) + }) + t.Run("flushToDB=true", func(t *testing.T) { + _testUnjailValidator(t, true) + }) } func _testUnjailValidator(t *testing.T, flushToDB bool) { var validatorEntry *ValidatorEntry var err error - // Initialize balance model fork heights. - setBalanceModelBlockHeights(t) - // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 9bbd34231..c444425df 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -212,7 +212,7 @@ func NewLowDifficultyBlockchainWithParams(t *testing.T, params *DeSoParams) ( func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, usePostgres bool, postgresPort uint32, useProvidedParams bool) ( *Blockchain, *DeSoParams, *embeddedpostgres.EmbeddedPostgres) { - TestDeSoEncoderSetup(t) + setupTestDeSoEncoder(t) AppendToMemLog(t, "START") // Set the number of txns per view regeneration to one while creating the txns @@ -260,7 +260,7 @@ func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, t.Cleanup(func() { AppendToMemLog(t, "CLEANUP_START") - TestDeSoEncoderShutdown(t) + resetTestDeSoEncoder(t) if snap != nil { snap.Stop() CleanUpBadger(snap.SnapshotDb) @@ -358,6 +358,11 @@ func NewTestMiner(t *testing.T, chain *Blockchain, params *DeSoParams, isSender if !mempool.stopped { mempool.Stop() } + // The above Stop() calls are non-blocking so we need to wait a bit + // for them to finish. The alternative is to make them blocking but + // that would require a reasonable amount of refactoring that changes + // production behavior. + time.Sleep(100 * time.Millisecond) }) return mempool, newMiner } @@ -447,31 +452,32 @@ func _getBalanceWithView(t *testing.T, chain *Blockchain, utxoView *UtxoView, pk func TestBalanceModelBlockTests(t *testing.T) { setBalanceModelBlockHeights(t) - TestBasicTransferReorg(t) - TestProcessBlockConnectBlocks(t) - TestProcessHeaderskReorgBlocks(t) + t.Run("TestBasicTransferReorg", TestBasicTransferReorg) + t.Run("TestProcessBlockConnectBlocks", TestProcessBlockConnectBlocks) + t.Run("TestProcessHeaderskReorgBlocks", TestProcessHeaderskReorgBlocks) + t.Run("TestValidateBasicTransfer", TestValidateBasicTransfer) + // The below two tests check utxos and need to be updated for balance model //TestProcessBlockReorgBlocks(t) //TestAddInputsAndChangeToTransaction(t) - TestValidateBasicTransfer(t) } func TestBalanceModelBlockTests2(t *testing.T) { setBalanceModelBlockHeights(t) - TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t) - TestCalcNextDifficultyTargetHittingLimitsSlow(t) - TestCalcNextDifficultyTargetHittingLimitsFast(t) - TestCalcNextDifficultyTargetJustRight(t) + t.Run("TestCalcNextDifficultyTargetHalvingDoublingHitLimit", TestCalcNextDifficultyTargetHalvingDoublingHitLimit) + t.Run("TestCalcNextDifficultyTargetHittingLimitsSlow", TestCalcNextDifficultyTargetHittingLimitsSlow) + t.Run("TestCalcNextDifficultyTargetHittingLimitsFast", TestCalcNextDifficultyTargetHittingLimitsFast) + t.Run("TestCalcNextDifficultyTargetJustRight", TestCalcNextDifficultyTargetJustRight) } func TestBalanceModelBlockTests3(t *testing.T) { setBalanceModelBlockHeights(t) - TestCalcNextDifficultyTargetSlightlyOff(t) - TestBadMerkleRoot(t) - TestBadBlockSignature(t) - TestForbiddenBlockSignaturePubKey(t) + t.Run("TestCalcNextDifficultyTargetSlightlyOff", TestCalcNextDifficultyTargetSlightlyOff) + t.Run("TestBadMerkleRoot", TestBadMerkleRoot) + t.Run("TestBadBlockSignature", TestBadBlockSignature) + t.Run("TestForbiddenBlockSignaturePubKey", TestForbiddenBlockSignaturePubKey) } func TestBasicTransferReorg(t *testing.T) { diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 70f5c9368..269c25486 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -54,6 +54,9 @@ func TestIsLastBlockInCurrentEpoch(t *testing.T) { } func TestRunEpochCompleteHook(t *testing.T) { + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + // Initialize test chain, miner, and testMeta testMeta := _setUpMinerAndTestMetaForEpochCompleteTest(t) @@ -476,6 +479,9 @@ func TestRunEpochCompleteHook(t *testing.T) { } func TestStakingRewardDistribution(t *testing.T) { + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + // Initialize test chain, miner, and testMeta testMeta := _setUpMinerAndTestMetaForEpochCompleteTest(t) @@ -741,9 +747,6 @@ func TestStakingRewardDistribution(t *testing.T) { } func _setUpMinerAndTestMetaForEpochCompleteTest(t *testing.T) *TestMeta { - // Initialize balance model fork heights. - setBalanceModelBlockHeights(t) - // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) diff --git a/test.Dockerfile b/test.Dockerfile index 3a2d8aa9d..92dd66a55 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -30,4 +30,4 @@ RUN ./scripts/install-relic.sh # build backend RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go -ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "github.com/deso-protocol/core/bls", "github.com/deso-protocol/core/collections", "github.com/deso-protocol/core/consensus", "github.com/deso-protocol/core/lib"] +ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "-failfast", "-p", "1", "github.com/deso-protocol/core/bls", "github.com/deso-protocol/core/collections", "github.com/deso-protocol/core/consensus", "github.com/deso-protocol/core/lib"] From fae304910761e2db18d573f363d5935ecdcb2c72 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 25 Sep 2023 14:42:08 -0400 Subject: [PATCH 203/762] introduce-committed-status-to-block-node (#677) * introduce-committed-status-to-block-node * update serialization/deserialization of block node, remove unnecessary fields from NewPoSBlockNode * fix syntax error * don't use encoder type for block node * dont treat block node serialization changes as encoder migrations * cleanup + remove migration name * address SA feedback --------- Co-authored-by: Lazy Nina <> --- lib/block_view_bitcoin_test.go | 2 +- lib/blockchain.go | 50 +++++++++++++++++++++-- lib/blockchain_test.go | 16 ++++---- lib/constants.go | 4 +- lib/db_utils.go | 72 +++++++++++++++++++--------------- lib/postgres.go | 2 +- 6 files changed, 98 insertions(+), 48 deletions(-) diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index 6a422dec5..79d27e5d1 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -49,7 +49,7 @@ func GetTestParamsCopy( // Set the BitcoinExchange-related params to canned values. paramsCopy := *paramss headerHash := (BlockHash)(startHeader.BlockHash()) - paramsCopy.BitcoinStartBlockNode = NewBlockNode( + paramsCopy.BitcoinStartBlockNode = NewPoWBlockNode( nil, /*ParentNode*/ &headerHash, /*Hash*/ startHeight, diff --git a/lib/blockchain.go b/lib/blockchain.go index 0a8ba1077..6322c8c10 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -75,6 +75,13 @@ const ( StatusBitcoinHeaderValidateFailed // Deprecated ) +type CommittedBlockStatus uint8 + +const ( + COMMITTED CommittedBlockStatus = 0 + UNCOMMITTED CommittedBlockStatus = 1 +) + // IsFullyProcessed determines if the BlockStatus corresponds to a fully processed and stored block. func (blockStatus BlockStatus) IsFullyProcessed() bool { return blockStatus&StatusHeaderValidated != 0 && @@ -149,6 +156,16 @@ type BlockNode struct { // Status holds the validation state for the block and whether or not // it's stored in the database. Status BlockStatus + + // CommittedStatus is either COMMITTED or UNCOMMITTED. If it's UNCOMMITTED, then + // the block is not yet committed to the blockchain. If it's COMMITTED, then the + // block is committed to the blockchain. + // In PoW consensus, all blocks will have CommittedStatus = COMMITTED. + // In PoS consensus, the chain tip and its parent will have CommittedStatus = UNCOMMITTED and + // all other blocks will have CommittedStatus = COMMITTED. When a new block is added to the tip, + // its CommittedStatus will be set to UNCOMMITTED and its grandparent's CommittedStatus will be + // updated to COMMITTED. + CommittedStatus CommittedBlockStatus } func _difficultyBitsToHash(diffBits uint32) (_diffHash *BlockHash) { @@ -281,12 +298,15 @@ func (nn *BlockNode) String() string { if nn.Header != nil { tstamp = uint32(nn.Header.GetTstampSecs()) } - return fmt.Sprintf("< TstampSecs: %d, Height: %d, Hash: %s, ParentHash %s, Status: %s, CumWork: %v>", - tstamp, nn.Header.Height, nn.Hash, parentHash, nn.Status, nn.CumWork) + return fmt.Sprintf("< TstampSecs: %d, Height: %d, Hash: %s, ParentHash %s, Status: %s, CumWork: %v, CommittedStatus: %v>", + tstamp, nn.Header.Height, nn.Hash, parentHash, nn.Status, nn.CumWork, nn.CommittedStatus) } +// NewPoWBlockNode is a helper function to create a BlockNode +// when running PoW consensus. All blocks in the PoW consensus +// have a committed status of COMMITTED. // TODO: Height not needed in this since it's in the header. -func NewBlockNode( +func NewPoWBlockNode( parent *BlockNode, hash *BlockHash, height uint32, @@ -303,6 +323,28 @@ func NewBlockNode( CumWork: cumWork, Header: header, Status: status, + // All blocks have a committed status in PoW. + CommittedStatus: COMMITTED, + } +} + +// NewPoSBlockNode is a new helper function to create a block node +// as we need to control the value of the CommittedStatus field. +func NewPoSBlockNode( + parent *BlockNode, + hash *BlockHash, + height uint32, + header *MsgDeSoHeader, + status BlockStatus, + committedStatus CommittedBlockStatus) *BlockNode { + + return &BlockNode{ + Parent: parent, + Hash: hash, + Height: height, + Header: header, + Status: status, + CommittedStatus: committedStatus, } } @@ -1724,7 +1766,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // and try to mine on top of it before revealing it to everyone. newWork := BytesToBigint(ExpectedWorkForBlockHash(diffTarget)[:]) cumWork := newWork.Add(newWork, parentNode.CumWork) - newNode := NewBlockNode( + newNode := NewPoWBlockNode( parentNode, headerHash, uint32(blockHeader.Height), diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index c444425df..26ae76d5d 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -1239,7 +1239,7 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { } nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewBlockNode( + nodes = append(nodes, NewPoWBlockNode( lastNode, nil, uint32(ii), @@ -1276,7 +1276,7 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { lastNode := nodes[ii-1] nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewBlockNode( + nodes = append(nodes, NewPoWBlockNode( lastNode, nil, uint32(ii), @@ -1335,7 +1335,7 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { } nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewBlockNode( + nodes = append(nodes, NewPoWBlockNode( lastNode, nil, uint32(ii), @@ -1372,7 +1372,7 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { lastNode := nodes[ii-1] nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewBlockNode( + nodes = append(nodes, NewPoWBlockNode( lastNode, nil, uint32(ii), @@ -1431,7 +1431,7 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { } nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewBlockNode( + nodes = append(nodes, NewPoWBlockNode( lastNode, nil, uint32(ii), @@ -1486,7 +1486,7 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { } nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewBlockNode( + nodes = append(nodes, NewPoWBlockNode( lastNode, nil, uint32(ii), @@ -1541,7 +1541,7 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { } nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewBlockNode( + nodes = append(nodes, NewPoWBlockNode( lastNode, nil, uint32(ii), @@ -1578,7 +1578,7 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { lastNode := nodes[ii-1] nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewBlockNode( + nodes = append(nodes, NewPoWBlockNode( lastNode, nil, uint32(ii), diff --git a/lib/constants.go b/lib/constants.go index fe5618213..2302aff93 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -961,7 +961,7 @@ var DeSoMainnetParams = DeSoParams{ // value should equal the amount of work it takes to get from whatever start node you // choose and the tip. This is done by running once, letting it fail, and then rerunning // with the value it outputs. - BitcoinStartBlockNode: NewBlockNode( + BitcoinStartBlockNode: NewPoWBlockNode( nil, mustDecodeHexBlockHashBitcoin("000000000000000000092d577cc673bede24b6d7199ee69c67eeb46c18fc978c"), // Note the height is always one greater than the parent node. @@ -1217,7 +1217,7 @@ var DeSoTestnetParams = DeSoParams{ DeSoNanosPurchasedAtGenesis: uint64(6000000000000000), // See comment in mainnet config. - BitcoinStartBlockNode: NewBlockNode( + BitcoinStartBlockNode: NewPoWBlockNode( nil, mustDecodeHexBlockHashBitcoin("000000000000003aae8fb976056413aa1d863eb5bee381ff16c9642283b1da1a"), 1897056, diff --git a/lib/db_utils.go b/lib/db_utils.go index eaedb4b20..2e35e6fd3 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -4670,44 +4670,44 @@ func DeleteUtxoOperationsForBlockWithTxn(txn *badger.Txn, snap *Snapshot, blockH return DBDeleteWithTxn(txn, snap, _DbKeyForUtxoOps(blockHash)) } +func blockNodeProofOfStakeCutoverMigrationTriggered(height uint32) bool { + return height >= GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight +} + func SerializeBlockNode(blockNode *BlockNode) ([]byte, error) { data := []byte{} - // Hash if blockNode.Hash == nil { return nil, fmt.Errorf("SerializeBlockNode: Hash cannot be nil") } data = append(data, blockNode.Hash[:]...) - - // Height data = append(data, UintToBuf(uint64(blockNode.Height))...) + if !blockNodeProofOfStakeCutoverMigrationTriggered(blockNode.Height) { + // DifficultyTarget + if blockNode.DifficultyTarget == nil { + return nil, fmt.Errorf("SerializeBlockNode: DifficultyTarget cannot be nil") + } + data = append(data, blockNode.DifficultyTarget[:]...) - // DifficultyTarget - if blockNode.DifficultyTarget == nil { - return nil, fmt.Errorf("SerializeBlockNode: DifficultyTarget cannot be nil") + // CumWork + data = append(data, BigintToHash(blockNode.CumWork)[:]...) } - data = append(data, blockNode.DifficultyTarget[:]...) - - // CumWork - data = append(data, BigintToHash(blockNode.CumWork)[:]...) - - // Header serializedHeader, err := blockNode.Header.ToBytes(false) if err != nil { - return nil, errors.Wrapf(err, "SerializeBlockNode: Problem serializing header") + return nil, fmt.Errorf("serializePoSBlockNode: Problem serializing header: %v", err) } data = append(data, IntToBuf(int64(len(serializedHeader)))...) data = append(data, serializedHeader...) - // Status - // It's assumed this field is one byte long. data = append(data, UintToBuf(uint64(blockNode.Status))...) - + if blockNodeProofOfStakeCutoverMigrationTriggered(blockNode.Height) { + data = append(data, UintToBuf(uint64(blockNode.CommittedStatus))...) + } return data, nil } func DeserializeBlockNode(data []byte) (*BlockNode, error) { - blockNode := NewBlockNode( + blockNode := NewPoWBlockNode( nil, // Parent &BlockHash{}, // Hash 0, // Height @@ -4719,7 +4719,6 @@ func DeserializeBlockNode(data []byte) (*BlockNode, error) { ) rr := bytes.NewReader(data) - // Hash _, err := io.ReadFull(rr, blockNode.Hash[:]) if err != nil { @@ -4733,19 +4732,21 @@ func DeserializeBlockNode(data []byte) (*BlockNode, error) { } blockNode.Height = uint32(height) - // DifficultyTarget - _, err = io.ReadFull(rr, blockNode.DifficultyTarget[:]) - if err != nil { - return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding DifficultyTarget") - } + if !blockNodeProofOfStakeCutoverMigrationTriggered(blockNode.Height) { + // DifficultyTarget + _, err = io.ReadFull(rr, blockNode.DifficultyTarget[:]) + if err != nil { + return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding DifficultyTarget") + } - // CumWork - tmp := BlockHash{} - _, err = io.ReadFull(rr, tmp[:]) - if err != nil { - return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding CumWork") + // CumWork + tmp := BlockHash{} + _, err = io.ReadFull(rr, tmp[:]) + if err != nil { + return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding CumWork") + } + blockNode.CumWork = HashToBigint(&tmp) } - blockNode.CumWork = HashToBigint(&tmp) // Header payloadLen, err := ReadVarint(rr) @@ -4773,6 +4774,14 @@ func DeserializeBlockNode(data []byte) (*BlockNode, error) { } blockNode.Status = BlockStatus(uint32(status)) + // CommittedStatus + if blockNodeProofOfStakeCutoverMigrationTriggered(blockNode.Height) { + committedStatus, err := ReadUvarint(rr) + if err != nil { + return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding CommittedStatus") + } + blockNode.CommittedStatus = CommittedBlockStatus(committedStatus) + } return blockNode, nil } @@ -5034,8 +5043,7 @@ func GetHeightHashToNodeInfoWithTxn(txn *badger.Txn, snap *Snapshot, return nil } - var blockNode *BlockNode - blockNode, err = DeserializeBlockNode(nodeBytes) + blockNode, err := DeserializeBlockNode(nodeBytes) if err != nil { return nil } @@ -5116,7 +5124,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, genesisBlock := params.GenesisBlock diffTarget := MustDecodeHexBlockHash(params.MinDifficultyTargetHex) blockHash := MustDecodeHexBlockHash(params.GenesisBlockHashHex) - genesisNode := NewBlockNode( + genesisNode := NewPoWBlockNode( nil, // Parent blockHash, 0, // Height diff --git a/lib/postgres.go b/lib/postgres.go index 1b38f820f..772641007 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -4083,7 +4083,7 @@ func (postgres *Postgres) InitGenesisBlock(params *DeSoParams, db *badger.DB) er genesisBlock := params.GenesisBlock diffTarget := MustDecodeHexBlockHash(params.MinDifficultyTargetHex) blockHash := MustDecodeHexBlockHash(params.GenesisBlockHashHex) - genesisNode := NewBlockNode( + genesisNode := NewPoWBlockNode( nil, blockHash, 0, From f3645a80e9347300b65df313266c1282329e8427 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 25 Sep 2023 15:13:24 -0400 Subject: [PATCH 204/762] Rename blockConstructionCadence to blockConstructionInterval (#694) --- consensus/event_loop.go | 10 +++++----- consensus/event_loop_test.go | 2 +- consensus/types.go | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 916994a45..1609a9de0 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -19,7 +19,7 @@ func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { // Initializes the consensus instance with the latest known valid block in the blockchain, and // the validator set for the next block height. The functions expects the following for the input // params: -// - blockConstructionCadence: block construction duration must be > 0 +// - blockConstructionInterval: block construction duration must be > 0 // - timeoutBaseDuration: timeout base duration must be > 0 // - chainTip: the input block must have a valid block hash, block height, view, and QC // - validators: the validators must be sorted in decreasing order of stake, with a @@ -29,7 +29,7 @@ func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { // Given the above, This function updates the chain tip internally, and re-initializes all internal // data structures that are used to track incoming votes and timeout messages for QC construction. func (fc *FastHotStuffEventLoop) Init( - blockConstructionCadence time.Duration, + blockConstructionInterval time.Duration, timeoutBaseDuration time.Duration, chainTip Block, validators []Validator, @@ -44,7 +44,7 @@ func (fc *FastHotStuffEventLoop) Init( } // Validate the scheduled task durations - if blockConstructionCadence <= 0 { + if blockConstructionInterval <= 0 { return errors.New("FastHotStuffEventLoop.Init: Block construction duration must be > 0") } if timeoutBaseDuration <= 0 { @@ -74,7 +74,7 @@ func (fc *FastHotStuffEventLoop) Init( fc.ConsensusEvents = make(chan *ConsensusEvent, signalChannelBufferSize) // Set the block construction and timeout base durations - fc.blockConstructionCadence = blockConstructionCadence + fc.blockConstructionInterval = blockConstructionInterval fc.timeoutBaseDuration = timeoutBaseDuration // Update the consensus status @@ -385,7 +385,7 @@ func (fc *FastHotStuffEventLoop) resetScheduledTasks() { } // Schedule the next block construction task. This will run with currentView param. - fc.nextBlockConstructionTask.Schedule(fc.blockConstructionCadence, fc.currentView, fc.onBlockConstructionScheduledTask) + fc.nextBlockConstructionTask.Schedule(fc.blockConstructionInterval, fc.currentView, fc.onBlockConstructionScheduledTask) // Schedule the next timeout task. This will run with currentView param. fc.nextTimeoutTask.Schedule(timeoutDuration, fc.currentView, fc.onTimeoutScheduledTaskExecuted) diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 25d475f24..a7c2eb703 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -61,7 +61,7 @@ func TestFastHotStuffInitialization(t *testing.T) { require.Equal(t, fc.chainTip.GetView(), uint64(2)) require.Equal(t, fc.chainTip.GetHeight(), uint64(1)) - require.Equal(t, fc.blockConstructionCadence, time.Duration(100)) + require.Equal(t, fc.blockConstructionInterval, time.Duration(100)) require.Equal(t, fc.timeoutBaseDuration, time.Duration(101)) require.Equal(t, fc.currentView, uint64(3)) diff --git a/consensus/types.go b/consensus/types.go index 3a4bd7031..141e55acd 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -127,8 +127,8 @@ const signalChannelBufferSize = 100 type FastHotStuffEventLoop struct { lock sync.RWMutex - blockConstructionCadence time.Duration - timeoutBaseDuration time.Duration + blockConstructionInterval time.Duration + timeoutBaseDuration time.Duration nextBlockConstructionTask *ScheduledTask[uint64] nextTimeoutTask *ScheduledTask[uint64] From 9ba10bfa38b29e65d18913c9c99c128eb047aa9c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 26 Sep 2023 12:05:48 -0400 Subject: [PATCH 205/762] Implement QC Validation and Super-majority Stake Check (#695) * Implement QC Validation and Super-majority Stake Check * Address Piotr's feedback * Update comments for isSuperMajorityStake --- consensus/event_loop.go | 4 +- consensus/utils.go | 98 +++++++++++++++++++++++++++- consensus/utils_test.go | 140 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 239 insertions(+), 3 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 1609a9de0..09e00333a 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -219,7 +219,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { voteSignaturePayload := GetVoteSignaturePayload(vote.GetView(), vote.GetBlockHash()) // Verify the vote signature - if !isValidSignature(vote.GetPublicKey(), vote.GetSignature(), voteSignaturePayload[:]) { + if !isValidSignatureSinglePublicKey(vote.GetPublicKey(), vote.GetSignature(), voteSignaturePayload[:]) { return errors.New("FastHotStuffEventLoop.ProcessValidatorVote: Invalid signature") } @@ -290,7 +290,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) timeoutSignaturePayload := GetTimeoutSignaturePayload(timeout.GetView(), timeout.GetHighQC().GetView()) // Verify the vote signature - if !isValidSignature(timeout.GetPublicKey(), timeout.GetSignature(), timeoutSignaturePayload[:]) { + if !isValidSignatureSinglePublicKey(timeout.GetPublicKey(), timeout.GetSignature(), timeoutSignaturePayload[:]) { return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Invalid signature") } diff --git a/consensus/utils.go b/consensus/utils.go index a364d36a0..1645ff439 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -6,9 +6,45 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" + "github.com/holiman/uint256" "golang.org/x/crypto/sha3" ) +// Given a QC and a sorted validator set, this function returns true if the QC contains a valid +// super-majority of signatures from the validator set for the QC's (View, BlockHash) pair. +func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Validator) bool { + if !isProperlyFormedQC(qc) || !isProperlyFormedValidatorSet(validators) { + return false + } + + // Compute the signature that validators in the QC would have signed + signaturePayload := GetVoteSignaturePayload(qc.GetView(), qc.GetBlockHash()) + + // Compute the total stake in the QC and the total stake in the network + stakeInQC := uint256.NewInt() + totalStake := uint256.NewInt() + + // Fetch the validators in the QC + validatorPublicKeysInQC := []*bls.PublicKey{} + + // Fetch the validators in the QC, and compute the sum of stake in the QC and in the network + for ii := range validators { + if qc.GetSignersList().Get(ii) { + stakeInQC.Add(stakeInQC, validators[ii].GetStakeAmount()) + validatorPublicKeysInQC = append(validatorPublicKeysInQC, validators[ii].GetPublicKey()) + } + totalStake.Add(totalStake, validators[ii].GetStakeAmount()) + } + + // Check if the QC contains a super-majority of stake + if !isSuperMajorityStake(stakeInQC, totalStake) { + return false + } + + // Finally, validate the signature + return isValidSignatureManyPublicKeys(validatorPublicKeysInQC, qc.GetAggregatedSignature(), signaturePayload[:]) +} + // When voting on a block, validators sign the payload sha3-256(View, BlockHash) with their BLS // private key. This hash guarantees that the view and block hash fields in a VoteMessage // have not been tampered with, while maintaining all existing guarantees that the validator @@ -125,6 +161,11 @@ func isProperlyFormedQC(qc QuorumCertificate) bool { return false } + // The block hash must be non-nil + if isInterfaceNil(qc.GetBlockHash()) { + return false + } + // The view must be non-zero and the aggregated signature non-nil if qc.GetView() == 0 || isInterfaceNil(qc.GetAggregatedSignature()) { return false @@ -150,7 +191,62 @@ func isInterfaceNil(i interface{}) bool { return value.Kind() == reflect.Ptr && value.IsNil() } -func isValidSignature(publicKey *bls.PublicKey, signature *bls.Signature, payload []byte) bool { +func isValidSignatureSinglePublicKey(publicKey *bls.PublicKey, signature *bls.Signature, payload []byte) bool { isValid, err := bls.VerifyAggregateSignatureSinglePayload([]*bls.PublicKey{publicKey}, signature, payload) return err == nil && isValid } + +func isValidSignatureManyPublicKeys(publicKeys []*bls.PublicKey, signature *bls.Signature, payload []byte) bool { + isValid, err := bls.VerifyAggregateSignatureSinglePayload(publicKeys, signature, payload) + return err == nil && isValid +} + +// This function uses integer math to verify if the provided stake amount represents a +// super-majority 2f+1 Byzantine Quorum. First we need the following context: +// - Assume N = total stake in the network +// - Assume f = faulty stake in the network +// - Assume C = honest stake in the network +// - We have N = C + f. +// +// As our security assumptions, we need C >= 2f+1. If we consider worst-case scenario (C=2f+1), we have N = 3f + 1. +// - We want to determine if we have a super-majority Quorum containing the majority of C +// - The minimal size of such Quorum is f + [floor(C/2) + 1] +// - For a fixed N, this function grows larger as C gets smaller relative to f. +// - We would need the largest Quorum for C = 2f+1, and it's size would also be 2f+1 = f + floor((2f+1)/2) + 1. +// +// So, for a given N, we check for a super-majority Quorum, containing at least 2f+1 votes, where f is defined +// in worst-case scenario of N = 3f+1. +// +// Given the above, let's say Cq := stake that is provided to this function. We can derive the following +// super-majority check: +// - Cq >= 2f + 1 +// - 3Cq >= 6f + 3 +// - 3Cq >= 2(3f + 1) + 1 +// - 3Cq >= 2N + 1 +// - Finally, this gives us the condition: 3Cq - 2N - 1 >= 0. Which is what we will verify in this function. +func isSuperMajorityStake(stake *uint256.Int, totalStake *uint256.Int) bool { + // Both values must be > 0 + if stake == nil || totalStake == nil || stake.IsZero() || totalStake.IsZero() { + return false + } + + // The stake must be less than or equal to the total stake + if stake.Cmp(totalStake) > 0 { + return false + } + + // Compute 3Cq + honestStakeComponent := uint256.NewInt().Mul(stake, uint256.NewInt().SetUint64(3)) + + // Compute 2N + totalStakeComponent := uint256.NewInt().Mul(totalStake, uint256.NewInt().SetUint64(2)) + + // Compute 3Cq - 2N - 1 + superMajorityConditionSum := uint256.NewInt().Sub( + uint256.NewInt().Sub(honestStakeComponent, totalStakeComponent), + uint256.NewInt().SetOne(), + ) + + // Check if 3Cq - 2N - 1 >= 0 + return superMajorityConditionSum.Sign() >= 0 +} diff --git a/consensus/utils_test.go b/consensus/utils_test.go index db21e0e31..10e769775 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -12,6 +12,83 @@ import ( "github.com/stretchr/testify/require" ) +func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { + // Test malformed QC + { + require.False(t, IsValidSuperMajorityQuorumCertificate(nil, createDummyValidatorSet())) + } + + // Test malformed validator set + { + require.False(t, IsValidSuperMajorityQuorumCertificate(createDummyQC(1), nil)) + } + + // Set up test validator data + validatorPrivateKey1 := createDummyBLSPrivateKey() + validatorPrivateKey2 := createDummyBLSPrivateKey() + validatorPrivateKey3 := createDummyBLSPrivateKey() + + validator1 := validator{ + publicKey: validatorPrivateKey1.PublicKey(), + stakeAmount: uint256.NewInt().SetUint64(3), + } + + validator2 := validator{ + publicKey: validatorPrivateKey2.PublicKey(), + stakeAmount: uint256.NewInt().SetUint64(2), + } + + validator3 := validator{ + publicKey: validatorPrivateKey3.PublicKey(), + stakeAmount: uint256.NewInt().SetUint64(1), + } + + validators := []Validator{&validator1, &validator2, &validator3} + + // Set up the block hash and view + blockHash := createDummyBlockHash() + view := uint64(10) + + // Compute the signature payload + signaturePayload := GetVoteSignaturePayload(view, blockHash) + + // Test with no super-majority stake + { + validator1Signature, err := validatorPrivateKey1.Sign(signaturePayload[:]) + require.NoError(t, err) + + qc := quorumCertificate{ + blockHash: blockHash, + view: view, + signersList: bitset.NewBitset().FromBytes([]byte{0x1}), // 0b0001, which represents validator 1 + aggregatedSignature: validator1Signature, + } + + require.False(t, IsValidSuperMajorityQuorumCertificate(&qc, validators)) + } + + // Test with 5/6 super-majority stake + { + validator1Signature, err := validatorPrivateKey1.Sign(signaturePayload[:]) + require.NoError(t, err) + + validator2Signature, err := validatorPrivateKey2.Sign(signaturePayload[:]) + require.NoError(t, err) + + aggregatedSignature, err := bls.AggregateSignatures([]*bls.Signature{validator1Signature, validator2Signature}) + require.NoError(t, err) + + qc := quorumCertificate{ + blockHash: blockHash, + view: view, + signersList: bitset.NewBitset().FromBytes([]byte{0x3}), // 0b0011, which represents validators 1 and 2 + aggregatedSignature: aggregatedSignature, + } + + require.True(t, IsValidSuperMajorityQuorumCertificate(&qc, validators)) + } +} + func TestIsProperlyFormedBlock(t *testing.T) { // Test nil block { @@ -164,6 +241,64 @@ func TestIsProperlyFormedTimeout(t *testing.T) { } } +func TestIsSuperMajorityStake(t *testing.T) { + // Test nil values + { + require.False(t, isSuperMajorityStake(nil, nil)) + } + + // Test zero values + { + require.False(t, isSuperMajorityStake(uint256.NewInt(), uint256.NewInt())) + } + + // Test stake amount greater than total stake + { + require.False(t, isSuperMajorityStake(uint256.NewInt().SetUint64(2), uint256.NewInt().SetUint64(1))) + } + + // Test stake amount much less than super majority + { + stake := uint256.NewInt().SetUint64(1) + totalStake := uint256.NewInt().SetUint64(1000) + require.False(t, isSuperMajorityStake(stake, totalStake)) + } + + // Test stake amount less than super majority + { + stake := uint256.NewInt().SetUint64(666) + totalStake := uint256.NewInt().SetUint64(1000) + require.False(t, isSuperMajorityStake(stake, totalStake)) + } + + // Test stake amount equal to super majority + { + stake := uint256.NewInt().SetUint64(667) + totalStake := uint256.NewInt().SetUint64(1000) + require.True(t, isSuperMajorityStake(stake, totalStake)) + } + + // Test stake amount greater than super majority + { + stake := uint256.NewInt().SetUint64(668) + totalStake := uint256.NewInt().SetUint64(1000) + require.True(t, isSuperMajorityStake(stake, totalStake)) + } + + // Test stake amount much greater than super majority + { + stake := uint256.NewInt().SetUint64(999) + totalStake := uint256.NewInt().SetUint64(1000) + require.True(t, isSuperMajorityStake(stake, totalStake)) + } + + // Test stake amount equal to total stake + { + totalStake := uint256.NewInt().SetUint64(1000) + require.True(t, isSuperMajorityStake(totalStake, totalStake)) + } +} + func createDummyValidatorSet() []Validator { validators := []*validator{ { @@ -242,6 +377,11 @@ func createDummyBLSPublicKey() *bls.PublicKey { return blsPrivateKey.PublicKey() } +func createDummyBLSPrivateKey() *bls.PrivateKey { + blsPrivateKey, _ := bls.NewPrivateKey() + return blsPrivateKey +} + func createDummyBlockHash() *blockHash { return &blockHash{ value: [32]byte{ From 69b3da8b5f0843a98f58b75676dfb37101d4a3f6 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 26 Sep 2023 13:17:57 -0400 Subject: [PATCH 206/762] update-process-block-return-values (#687) --- lib/blockchain.go | 10 ++++++---- lib/blockchain_test.go | 16 ++++++++-------- lib/load_test.go | 4 ++-- lib/miner.go | 2 +- lib/server.go | 4 ++-- lib/txindex.go | 2 +- 6 files changed, 20 insertions(+), 18 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 6322c8c10..76786930e 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1847,23 +1847,25 @@ func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc return bc.processHeaderPoW(blockHeader, headerHash) } -func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _err error) { +func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { bc.ChainLock.Lock() defer bc.ChainLock.Unlock() if desoBlock == nil { // If the block is nil then we return an error. Nothing we can do here. - return false, false, fmt.Errorf("ProcessBlock: Block is nil") + return false, false, nil, fmt.Errorf("ProcessBlock: Block is nil") } // If the block's height is after the PoS cut-over fork height, then we use the PoS block processing logic. Otherwise, fall back // to the PoW logic. if desoBlock.Header.Height >= uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { // TODO: call bc.processBlockPoS(desoBlock, verifySignatures) instead - return bc.processBlockPoW(desoBlock, verifySignatures) + isMainChain, isOrphan, err := bc.processBlockPoW(desoBlock, verifySignatures) + return isMainChain, isOrphan, nil, err } - return bc.processBlockPoW(desoBlock, verifySignatures) + isMainChain, isOrphan, err := bc.processBlockPoW(desoBlock, verifySignatures) + return isMainChain, isOrphan, nil, err } func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 26ae76d5d..4f85996ba 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -616,7 +616,7 @@ func TestBasicTransferReorg(t *testing.T) { // Process all of the fork blocks on the original chain to make it // experience a reorg. for _, forkBlock := range forkBlocks { - _, _, err := chain1.ProcessBlock(forkBlock, true /*verifySignatures*/) + _, _, _, err := chain1.ProcessBlock(forkBlock, true /*verifySignatures*/) require.NoError(err) } @@ -659,7 +659,7 @@ func _shouldConnectBlock(blk *MsgDeSoBlock, t *testing.T, chain *Blockchain) { blockHash, _ := blk.Hash() verifySignatures := true - isMainChain, isOrphan, err := chain.ProcessBlock(blk, verifySignatures) + isMainChain, isOrphan, _, err := chain.ProcessBlock(blk, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block %v should not be an orphan", blockHash) require.Truef(isMainChain, "Block %v should be on the main chain", blockHash) @@ -824,7 +824,7 @@ func TestProcessBlockReorgBlocks(t *testing.T) { // Block b1 fmt.Println("Connecting block b1") require.Equal(uint64(3), GetUtxoNumEntries(db, chain.snapshot)) - isMainChain, isOrphan, err := chain.ProcessBlock(blockB1, verifySignatures) + isMainChain, isOrphan, _, err := chain.ProcessBlock(blockB1, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block b1 should not be an orphan") require.Falsef(isMainChain, "Block b1 should not be on the main chain") @@ -840,7 +840,7 @@ func TestProcessBlockReorgBlocks(t *testing.T) { // Block b2 fmt.Println("Connecting block b2") require.Equal(uint64(3), GetUtxoNumEntries(db, chain.snapshot)) - isMainChain, isOrphan, err := chain.ProcessBlock(blockB2, verifySignatures) + isMainChain, isOrphan, _, err := chain.ProcessBlock(blockB2, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block b2 should not be an orphan") require.Falsef(isMainChain, "Block b2 should not be on the main chain") @@ -1673,7 +1673,7 @@ func TestBadBlockSignature(t *testing.T) { // A bad signature with the right public key should fail. finalBlock1.BlockProducerInfo.PublicKey = senderPkBytes - _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorInvalidBlockProducerSIgnature) @@ -1682,20 +1682,20 @@ func TestBadBlockSignature(t *testing.T) { require.NoError(err) finalBlock1.BlockProducerInfo.PublicKey = blockSignerPkBytes finalBlock1.BlockProducerInfo.Signature = nil - _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorMissingBlockProducerSignature) // If all the BlockProducerInfo is missing, things should fail finalBlock1.BlockProducerInfo = nil - _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorMissingBlockProducerSignature) // Now let's add blockSignerPK to the map of trusted keys and confirm that the block processes. chain.trustedBlockProducerPublicKeys[MakePkMapKey(blockSignerPkBytes)] = true finalBlock1.BlockProducerInfo = blockProducerInfoCopy - _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, true) require.NoError(err) _, _ = finalBlock1, db diff --git a/lib/load_test.go b/lib/load_test.go index af413511c..a8afc8718 100644 --- a/lib/load_test.go +++ b/lib/load_test.go @@ -190,7 +190,7 @@ func TestComputeMaxTPS(t *testing.T) { _, _ = newParams, newDB timeStart := time.Now() for _, blockToConnect := range blocksMined { - _, _, err := newChain.ProcessBlock(blockToConnect, true /*verifySignatures*/) + _, _, _, err := newChain.ProcessBlock(blockToConnect, true /*verifySignatures*/) require.NoError(err) } elapsedSecs := (time.Since(timeStart)).Seconds() @@ -237,7 +237,7 @@ func TestConnectBlocksLoadTest(t *testing.T) { pprof.StartCPUProfile(ff) timeStart := time.Now() for _, blockToConnect := range blocksMined { - _, _, err := newChain.ProcessBlock(blockToConnect, false /*verifySignatures*/) + _, _, _, err := newChain.ProcessBlock(blockToConnect, false /*verifySignatures*/) require.NoError(err) } elapsedSecs := (time.Since(timeStart)).Seconds() diff --git a/lib/miner.go b/lib/miner.go index 26fcf436b..6a2453486 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -239,7 +239,7 @@ func (desoMiner *DeSoMiner) MineAndProcessSingleBlock(threadIndex uint32, mempoo // will be informed about it. This will cause it to be relayed appropriately. verifySignatures := true // TODO(miner): Replace with a call to SubmitBlock. - isMainChain, isOrphan, err := desoMiner.BlockProducer.chain.ProcessBlock( + isMainChain, isOrphan, _, err := desoMiner.BlockProducer.chain.ProcessBlock( blockToMine, verifySignatures) glog.V(2).Infof("Called ProcessBlock: isMainChain=(%v), isOrphan=(%v), err=(%v)", isMainChain, isOrphan, err) diff --git a/lib/server.go b/lib/server.go index 8ea318fb1..9eda7b279 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1875,7 +1875,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { glog.V(1).Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Processing block %v WITHOUT "+ "signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp))) - _, isOrphan, err = srv.blockchain.ProcessBlock(blk, false) + _, isOrphan, _, err = srv.blockchain.ProcessBlock(blk, false) } else { // TODO: Signature checking slows things down because it acquires the ChainLock. @@ -1884,7 +1884,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { glog.V(1).Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Processing block %v WITH "+ "signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp))) - _, isOrphan, err = srv.blockchain.ProcessBlock(blk, true) + _, isOrphan, _, err = srv.blockchain.ProcessBlock(blk, true) } // If we hit an error then abort mission entirely. We should generally never diff --git a/lib/txindex.go b/lib/txindex.go index 3db429a83..0189d5a45 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -438,7 +438,7 @@ func (txi *TXIndex) Update() error { // Now that we have added all the txns to our TxIndex db, attach the block // to update our chain. - _, _, err = txi.TXIndexChain.ProcessBlock(blockMsg, false /*verifySignatures*/) + _, _, _, err = txi.TXIndexChain.ProcessBlock(blockMsg, false /*verifySignatures*/) if err != nil { return fmt.Errorf("Update: Problem attaching block %v: %v", blockToAttach, err) From 710f55864da1bacefc94a1843a5f7e38d6007ac5 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 26 Sep 2023 13:18:18 -0400 Subject: [PATCH 207/762] add-start-height-and-view-to-epoch-entry (#688) --- lib/pos_epoch.go | 24 +++++++++- lib/pos_epoch_complete_hook.go | 9 ++-- lib/pos_epoch_complete_hook_test.go | 71 +++++++++++++++++++++-------- lib/pos_epoch_test.go | 8 ++++ 4 files changed, 87 insertions(+), 25 deletions(-) diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index 2a7f8439f..de7034abd 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -13,8 +13,10 @@ import ( // type EpochEntry struct { - EpochNumber uint64 - FinalBlockHeight uint64 + EpochNumber uint64 + InitialBlockHeight uint64 + InitialView uint64 + FinalBlockHeight uint64 // This captures the on-chain timestamp when this epoch entry was created. This does not // represent the timestamp for first block of the epoch, but rather when this epoch entry @@ -25,6 +27,8 @@ type EpochEntry struct { func (epochEntry *EpochEntry) Copy() *EpochEntry { return &EpochEntry{ EpochNumber: epochEntry.EpochNumber, + InitialBlockHeight: epochEntry.InitialBlockHeight, + InitialView: epochEntry.InitialView, FinalBlockHeight: epochEntry.FinalBlockHeight, CreatedAtBlockTimestampNanoSecs: epochEntry.CreatedAtBlockTimestampNanoSecs, } @@ -33,6 +37,8 @@ func (epochEntry *EpochEntry) Copy() *EpochEntry { func (epochEntry *EpochEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte data = append(data, UintToBuf(epochEntry.EpochNumber)...) + data = append(data, UintToBuf(epochEntry.InitialBlockHeight)...) + data = append(data, UintToBuf(epochEntry.InitialView)...) data = append(data, UintToBuf(epochEntry.FinalBlockHeight)...) data = append(data, UintToBuf(epochEntry.CreatedAtBlockTimestampNanoSecs)...) return data @@ -47,6 +53,18 @@ func (epochEntry *EpochEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *b return errors.Wrapf(err, "EpochEntry.Decode: Problem reading EpochNumber: ") } + // InitialBlockHeight + epochEntry.InitialBlockHeight, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "EpochEntry.Decode: Problem reading InitialBlockHeight: ") + } + + // InitialView + epochEntry.InitialView, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "EpochEntry.Decode: Problem reading InitialView: ") + } + // FinalBlockHeight epochEntry.FinalBlockHeight, err = ReadUvarint(rr) if err != nil { @@ -99,6 +117,8 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { // case prior to the first execution of the OnEpochCompleteHook. genesisEpochEntry := &EpochEntry{ EpochNumber: 0, + InitialBlockHeight: 0, + InitialView: 0, FinalBlockHeight: uint64(bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight), CreatedAtBlockTimestampNanoSecs: 0, } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 9ecb90636..9a3a3f5dd 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -47,9 +47,10 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // - Snapshot the current top N stake entries, who will receive staking rewards. // // Step 3: Roll over to the next epoch. +// - Compute the start block height and view number for the next epoch. // - Compute the final block height for the next epoch. // - Update CurrentEpochEntry to the next epoch's. -func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, blockTimestampNanoSecs uint64) error { +func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, view uint64, blockTimestampNanoSecs uint64) error { // Sanity-check that the current block is the last block in the current epoch. // // Note that this will also return true if we're currently at the ProofOfStake1StateSetupBlockHeight @@ -84,7 +85,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, blockTimestampNano // TODO: Evict old snapshots when safe to do so. // Step 3: Roll Over to The Next Epoch - if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, blockTimestampNanoSecs); err != nil { + if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, view, blockTimestampNanoSecs); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: ") } @@ -138,7 +139,7 @@ func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) erro } // Updates the currentEpochEntry to the next epoch's. -func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, blockTimestampNanoSecs uint64) error { +func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, view uint64, blockTimestampNanoSecs uint64) error { // Retrieve the SnapshotGlobalParamsEntry to determine the next epoch's final block height. We use the // snapshot global params here because the next epoch begin immediately, and its length is used in the PoS // consensus. The validator set for the next epoch needs to be in agreement on the length of the epoch @@ -157,6 +158,8 @@ func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeig // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ EpochNumber: epochNumber + 1, + InitialBlockHeight: blockHeight + 1, + InitialView: view + 1, FinalBlockHeight: nextEpochFinalBlockHeight, CreatedAtBlockTimestampNanoSecs: blockTimestampNanoSecs, } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 269c25486..16fe99fa2 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -84,6 +84,11 @@ func TestRunEpochCompleteHook(t *testing.T) { blockHeight += 1 return blockHeight } + viewNumber := uint64(0) + incrViewNumber := func() uint64 { + viewNumber += 1 + return viewNumber + } // Seed a CurrentEpochEntry. tmpUtxoView := _newUtxoView(testMeta) @@ -140,7 +145,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook() with no validators or stakers. - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Test the state of the snapshots after running our first OnEpochCompleteHook @@ -151,6 +156,13 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(1)) + // Test CurrentEpochEntry + currentEpoch, err := _newUtxoView(testMeta).GetCurrentEpochEntry() + require.NoError(t, err) + require.NotNil(t, currentEpoch) + require.Equal(t, currentEpoch.InitialBlockHeight, uint64(13)) + require.Equal(t, currentEpoch.InitialView, uint64(2)) + // Test SnapshotGlobalParamsEntry is nil. snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) @@ -182,7 +194,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Test CurrentEpochNumber. @@ -190,6 +202,13 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(2)) + // Test CurrentEpochEntry + currentEpoch, err := _newUtxoView(testMeta).GetCurrentEpochEntry() + require.NoError(t, err) + require.NotNil(t, currentEpoch) + require.Equal(t, currentEpoch.InitialBlockHeight, uint64(14)) + require.Equal(t, currentEpoch.InitialView, uint64(3)) + // Test SnapshotGlobalParamsEntry is populated. snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) @@ -203,7 +222,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Test CurrentEpochNumber. @@ -211,6 +230,13 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(3)) + // Test CurrentEpochEntry + currentEpoch, err := _newUtxoView(testMeta).GetCurrentEpochEntry() + require.NoError(t, err) + require.NotNil(t, currentEpoch) + require.Equal(t, currentEpoch.InitialBlockHeight, uint64(15)) + require.Equal(t, currentEpoch.InitialView, uint64(4)) + // Test SnapshotGlobalParamsEntry is populated. snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) @@ -275,7 +301,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // Snapshot m5 still has 600 staked. validatorEntry, err = _newUtxoView(testMeta).GetSnapshotValidatorSetEntryByPKID(m5PKID) @@ -291,7 +317,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(600)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // Snapshot m5 now has 800 staked. validatorEntry, err = _newUtxoView(testMeta).GetSnapshotValidatorSetEntryByPKID(m5PKID) @@ -325,7 +351,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, _newUtxoView(testMeta).GlobalParamsEntry.StakeLockupEpochDuration, uint64(2)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // Snapshot StakeLockupEpochDuration is still 3. snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() @@ -333,7 +359,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(3)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // Snapshot StakeLockupEpochDuration is updated to 2. snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() @@ -352,7 +378,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // m0 is still in the snapshot validator set. snapshotValidatorSet, err = _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) @@ -364,7 +390,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Len(t, snapshotStakeEntries, 7) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // m0 is dropped from the snapshot validator set. snapshotValidatorSet, err = _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) @@ -377,11 +403,11 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Run OnEpochCompleteHook() - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Test jailing inactive validators. @@ -440,7 +466,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getNumSnapshotStakes(), 6) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // In epoch 12, all current registered validators have Status = Jailed. // In snapshot 10, all snapshot validators have Status = Active. @@ -454,7 +480,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentValidator(m6PKID).JailedAtEpochNumber, uint64(11)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // In epoch 13, all current registered validators have Status = Jailed. // In snapshot 11, the validator set is empty because all validators have Status = Jailed. @@ -465,7 +491,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Empty(t, getNumSnapshotStakes()) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // In epoch 14, all current registered validators have Status = Jailed. // In snapshot 12, the validator set is empty because all validators have Status = Jailed. @@ -502,6 +528,11 @@ func TestStakingRewardDistribution(t *testing.T) { blockHeight += 1 return blockHeight } + viewNumber := uint64(0) + incrViewNumber := func() uint64 { + viewNumber += 1 + return viewNumber + } // Seed a CurrentEpochEntry. tmpUtxoView := _newUtxoView(testMeta) @@ -558,12 +589,12 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { @@ -611,7 +642,7 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { @@ -723,7 +754,7 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { @@ -836,12 +867,12 @@ func _newUtxoView(testMeta *TestMeta) *UtxoView { return newUtxoView } -func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64) { +func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64, viewNumber uint64) { tmpUtxoView := _newUtxoView(testMeta) // Set blockTimestampNanoSecs to 1 year * block height. Every time the block height increments, // the timestamp increases by 1 year blockTimestampNanoSecs := blockHeight * 365 * 24 * 3600 * 1e9 - require.NoError(testMeta.t, tmpUtxoView.RunEpochCompleteHook(blockHeight, blockTimestampNanoSecs)) + require.NoError(testMeta.t, tmpUtxoView.RunEpochCompleteHook(blockHeight, viewNumber, blockTimestampNanoSecs)) require.NoError(testMeta.t, tmpUtxoView.FlushToDb(blockHeight)) } diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index 7ea66a52a..0c9f85d18 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -35,6 +35,8 @@ func TestCurrentEpoch(t *testing.T) { // Set the CurrentEpoch. epochEntry = &EpochEntry{ EpochNumber: 1, + InitialBlockHeight: blockHeight + 1, + InitialView: 1, FinalBlockHeight: blockHeight + 5, CreatedAtBlockTimestampNanoSecs: blockTimestampNanoSecs + 5*1e9, } @@ -46,6 +48,8 @@ func TestCurrentEpoch(t *testing.T) { require.NoError(t, err) require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) + require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) + require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) @@ -57,6 +61,8 @@ func TestCurrentEpoch(t *testing.T) { require.NoError(t, err) require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) + require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) + require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) @@ -64,6 +70,8 @@ func TestCurrentEpoch(t *testing.T) { epochEntry = utxoView.CurrentEpochEntry require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) + require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) + require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) From 603362667bfc79c34da5e729ba3e401b76cc1b3f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 27 Sep 2023 18:38:30 -0400 Subject: [PATCH 208/762] Create AggregateQuorumCertificate interface type (#697) * Create AggregatedQuorumCertificate type * aggregateQuorumCertificate * Rename file to types_internal.go --- consensus/types.go | 13 ++++- .../{types_test.go => types_internal.go} | 51 ++++++++++++++++--- consensus/utils.go | 24 +++++---- consensus/utils_test.go | 33 +++++++----- 4 files changed, 90 insertions(+), 31 deletions(-) rename consensus/{types_test.go => types_internal.go} (69%) diff --git a/consensus/types.go b/consensus/types.go index 141e55acd..969991813 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -48,11 +48,22 @@ type Validator interface { GetStakeAmount() *uint256.Int } +type AggregateQuorumCertificate interface { + GetView() uint64 + GetHighQC() QuorumCertificate + GetHighQCViews() []uint64 + GetAggregatedSignature() AggregatedSignature +} + type QuorumCertificate interface { GetBlockHash() BlockHash GetView() uint64 + GetAggregatedSignature() AggregatedSignature +} + +type AggregatedSignature interface { GetSignersList() *bitset.Bitset - GetAggregatedSignature() *bls.Signature + GetSignature() *bls.Signature } type VoteMessage interface { diff --git a/consensus/types_test.go b/consensus/types_internal.go similarity index 69% rename from consensus/types_test.go rename to consensus/types_internal.go index 952745e60..260227317 100644 --- a/consensus/types_test.go +++ b/consensus/types_internal.go @@ -35,6 +35,33 @@ func (v *validator) GetStakeAmount() *uint256.Int { return v.stakeAmount } +//////////////////////////////////////////////////////////////////////// +// AggregateQuorumCertificate interface implementation for internal use +//////////////////////////////////////////////////////////////////////// + +type aggregateQuorumCertificate struct { + view uint64 + highQC QuorumCertificate + highQCViews []uint64 + aggregatedSignature AggregatedSignature +} + +func (qc *aggregateQuorumCertificate) GetView() uint64 { + return qc.view +} + +func (qc *aggregateQuorumCertificate) GetHighQC() QuorumCertificate { + return qc.highQC +} + +func (qc *aggregateQuorumCertificate) GetHighQCViews() []uint64 { + return qc.highQCViews +} + +func (qc *aggregateQuorumCertificate) GetAggregatedSignature() AggregatedSignature { + return qc.aggregatedSignature +} + ////////////////////////////////////////////////////////// // QuorumCertificate interface implementation for testing ////////////////////////////////////////////////////////// @@ -42,8 +69,7 @@ func (v *validator) GetStakeAmount() *uint256.Int { type quorumCertificate struct { blockHash *blockHash view uint64 - signersList *bitset.Bitset - aggregatedSignature *bls.Signature + aggregatedSignature AggregatedSignature } func (qc *quorumCertificate) GetBlockHash() BlockHash { @@ -54,12 +80,25 @@ func (qc *quorumCertificate) GetView() uint64 { return qc.view } -func (qc *quorumCertificate) GetSignersList() *bitset.Bitset { - return qc.signersList +func (qc *quorumCertificate) GetAggregatedSignature() AggregatedSignature { + return qc.aggregatedSignature } -func (qc *quorumCertificate) GetAggregatedSignature() *bls.Signature { - return qc.aggregatedSignature +////////////////////////////////////////////////////////// +// AggregatedSignature interface implementation for testing +////////////////////////////////////////////////////////// + +type aggregatedSignature struct { + signersList *bitset.Bitset + signature *bls.Signature +} + +func (as *aggregatedSignature) GetSignersList() *bitset.Bitset { + return as.signersList +} + +func (as *aggregatedSignature) GetSignature() *bls.Signature { + return as.signature } ////////////////////////////////////////////////////////// diff --git a/consensus/utils.go b/consensus/utils.go index 1645ff439..3c69ba801 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -27,9 +27,12 @@ func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Va // Fetch the validators in the QC validatorPublicKeysInQC := []*bls.PublicKey{} + // Fetch the aggregated signature in the QC + aggregatedSignature := qc.GetAggregatedSignature() + // Fetch the validators in the QC, and compute the sum of stake in the QC and in the network for ii := range validators { - if qc.GetSignersList().Get(ii) { + if aggregatedSignature.GetSignersList().Get(ii) { stakeInQC.Add(stakeInQC, validators[ii].GetStakeAmount()) validatorPublicKeysInQC = append(validatorPublicKeysInQC, validators[ii].GetPublicKey()) } @@ -42,7 +45,7 @@ func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Va } // Finally, validate the signature - return isValidSignatureManyPublicKeys(validatorPublicKeysInQC, qc.GetAggregatedSignature(), signaturePayload[:]) + return isValidSignatureManyPublicKeys(validatorPublicKeysInQC, aggregatedSignature.GetSignature(), signaturePayload[:]) } // When voting on a block, validators sign the payload sha3-256(View, BlockHash) with their BLS @@ -161,22 +164,21 @@ func isProperlyFormedQC(qc QuorumCertificate) bool { return false } - // The block hash must be non-nil - if isInterfaceNil(qc.GetBlockHash()) { + // The view must be non-zero and the block hash must be non-nil + if qc.GetView() == 0 || isInterfaceNil(qc.GetBlockHash()) { return false } - // The view must be non-zero and the aggregated signature non-nil - if qc.GetView() == 0 || isInterfaceNil(qc.GetAggregatedSignature()) { - return false - } + return isProperlyFormedAggregateSignature(qc.GetAggregatedSignature()) +} - // The signers list must be non-nil - if qc.GetSignersList() == nil { +func isProperlyFormedAggregateSignature(agg AggregatedSignature) bool { + // The signature must be non-nil + if isInterfaceNil(agg) { return false } - return true + return agg.GetSignersList() != nil && agg.GetSignature() != nil } // golang interface types are stored as a tuple of (type, value). A single i==nil check is not enough to diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 10e769775..0995a750e 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -58,10 +58,12 @@ func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { require.NoError(t, err) qc := quorumCertificate{ - blockHash: blockHash, - view: view, - signersList: bitset.NewBitset().FromBytes([]byte{0x1}), // 0b0001, which represents validator 1 - aggregatedSignature: validator1Signature, + blockHash: blockHash, + view: view, + aggregatedSignature: &aggregatedSignature{ + signersList: bitset.NewBitset().FromBytes([]byte{0x1}), // 0b0001, which represents validator 1 + signature: validator1Signature, + }, } require.False(t, IsValidSuperMajorityQuorumCertificate(&qc, validators)) @@ -75,14 +77,17 @@ func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { validator2Signature, err := validatorPrivateKey2.Sign(signaturePayload[:]) require.NoError(t, err) - aggregatedSignature, err := bls.AggregateSignatures([]*bls.Signature{validator1Signature, validator2Signature}) + // Aggregate the two validators' signatures + signature, err := bls.AggregateSignatures([]*bls.Signature{validator1Signature, validator2Signature}) require.NoError(t, err) qc := quorumCertificate{ - blockHash: blockHash, - view: view, - signersList: bitset.NewBitset().FromBytes([]byte{0x3}), // 0b0011, which represents validators 1 and 2 - aggregatedSignature: aggregatedSignature, + blockHash: blockHash, + view: view, + aggregatedSignature: &aggregatedSignature{ + signersList: bitset.NewBitset().FromBytes([]byte{0x3}), // 0b0011, which represents validators 1 and 2 + signature: signature, + }, } require.True(t, IsValidSuperMajorityQuorumCertificate(&qc, validators)) @@ -358,10 +363,12 @@ func createDummyTimeoutMessage(view uint64) *timeoutMessage { func createDummyQC(view uint64) *quorumCertificate { return &quorumCertificate{ - blockHash: createDummyBlockHash(), - view: view, - signersList: bitset.NewBitset().FromBytes([]byte{0x3}), - aggregatedSignature: createDummyBLSSignature(), + blockHash: createDummyBlockHash(), + view: view, + aggregatedSignature: &aggregatedSignature{ + signersList: bitset.NewBitset().FromBytes([]byte{0x3}), + signature: createDummyBLSSignature(), + }, } } From 30752737349e54577bf4b059f576bca6af92fd36 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 28 Sep 2023 10:02:28 -0400 Subject: [PATCH 209/762] Generate random bytes for test block hash (#698) --- consensus/event_loop_test.go | 8 +++++--- consensus/utils_test.go | 17 +++++++++++------ 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index a7c2eb703..064a9a488 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -48,8 +48,10 @@ func TestFastHotStuffInitialization(t *testing.T) { // Test Init() function with valid parameters { + block := createDummyBlock(2) + fc := NewFastHotStuffEventLoop() - err := fc.Init(100, 101, createDummyBlock(2), createDummyValidatorSet()) + err := fc.Init(100, 101, block, createDummyValidatorSet()) require.NoError(t, err) require.Equal(t, consensusStatusInitialized, fc.status) @@ -57,7 +59,7 @@ func TestFastHotStuffInitialization(t *testing.T) { require.NotPanics(t, fc.Stop) // Calling Stop() on an initialized instance should be a no-op require.Equal(t, fc.status, consensusStatusInitialized) - require.Equal(t, fc.chainTip.GetBlockHash().GetValue(), createDummyBlockHash().GetValue()) + require.Equal(t, fc.chainTip.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) require.Equal(t, fc.chainTip.GetView(), uint64(2)) require.Equal(t, fc.chainTip.GetHeight(), uint64(1)) @@ -151,7 +153,7 @@ func TestFastHotStuffProcessSafeBlock(t *testing.T) { err := fc.ProcessSafeBlock(nextBlock, createDummyValidatorSet()) require.NoError(t, err) - require.Equal(t, createDummyBlockHash().GetValue(), fc.chainTip.GetBlockHash().GetValue()) + require.Equal(t, nextBlock.GetBlockHash().GetValue(), fc.chainTip.GetBlockHash().GetValue()) require.Equal(t, uint64(3), fc.chainTip.GetView()) require.Equal(t, uint64(2), fc.chainTip.GetHeight()) diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 0995a750e..47c467d55 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -3,6 +3,7 @@ package consensus import ( + "crypto/rand" "testing" "github.com/deso-protocol/core/bls" @@ -390,12 +391,16 @@ func createDummyBLSPrivateKey() *bls.PrivateKey { } func createDummyBlockHash() *blockHash { + byteArray := [32]byte{} + copy(byteArray[:], generateRandomBytes(32)) + return &blockHash{ - value: [32]byte{ - 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0xf, - 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, - 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, - }, + value: byteArray, } } + +func generateRandomBytes(numBytes int) []byte { + randomBytes := make([]byte, numBytes) + rand.Read(randomBytes) + return randomBytes +} From 4f262b64f756cbc26f2acec80959eb2e6c8c89e3 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 28 Sep 2023 17:03:33 -0400 Subject: [PATCH 210/762] Pass In All Safe Blocks to Fast-HotStuff Event Loop (#685) * Pass In All Safe Blocks to Fast-HotStuff Event Loop * Address Nina's feedback * Address Piotr's feedback --- collections/slice.go | 8 ++ consensus/event_loop.go | 161 ++++++++++++++++++++++------------- consensus/event_loop_test.go | 160 +++++++++++++++++++++++++++------- consensus/types.go | 22 +++-- consensus/types_internal.go | 10 +++ lib/server.go | 5 -- 6 files changed, 264 insertions(+), 102 deletions(-) diff --git a/collections/slice.go b/collections/slice.go index 112b28d1a..4bd6cc133 100644 --- a/collections/slice.go +++ b/collections/slice.go @@ -16,3 +16,11 @@ func Transform[TInput any, TOutput any](slice []TInput, transformFn func(TInput) } return result } + +func ToMap[TKey comparable, TValue any](slice []TValue, keyFn func(TValue) TKey) map[TKey]TValue { + result := make(map[TKey]TValue) + for _, val := range slice { + result[keyFn(val)] = val + } + return result +} diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 09e00333a..3f4627229 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -6,6 +6,7 @@ import ( "github.com/pkg/errors" "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" ) func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { @@ -21,18 +22,20 @@ func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { // params: // - blockConstructionInterval: block construction duration must be > 0 // - timeoutBaseDuration: timeout base duration must be > 0 -// - chainTip: the input block must have a valid block hash, block height, view, and QC -// - validators: the validators must be sorted in decreasing order of stake, with a -// consistent tie breaking scheme. The validator set is expected to be valid for -// validating votes and timeouts for the next block height. +// - tip: the current tip of the blockchain, with the validator set at that block height. This may +// be a committed or uncommitted block. +// - safeBlocks: an unordered slice of blocks including the committed tip, the uncommitted tip, +// all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks +// that are safe to extend from. This function does not validate the collection of blocks. It +// expects caller to know and decide what blocks are safe to extend from. // -// Given the above, This function updates the chain tip internally, and re-initializes all internal -// data structures that are used to track incoming votes and timeout messages for QC construction. +// Given the above, This function updates the tip internally, stores the safe blocks, and re-initializes +// all internal data structures that are used to track incoming votes and timeout messages for QC construction. func (fc *FastHotStuffEventLoop) Init( blockConstructionInterval time.Duration, timeoutBaseDuration time.Duration, - chainTip Block, - validators []Validator, + tip BlockWithValidators, + safeBlocks []BlockWithValidators, ) error { // Grab the consensus instance's lock fc.lock.Lock() @@ -51,20 +54,13 @@ func (fc *FastHotStuffEventLoop) Init( return errors.New("FastHotStuffEventLoop.Init: Timeout base duration must be > 0") } - // Validate the integrity of the block - if !isProperlyFormedBlock(chainTip) { - return errors.New("FastHotStuffEventLoop.Init: Invalid block") + // Validate the safe blocks and validator sets, and store them + if err := fc.storeBlocks(tip, safeBlocks); err != nil { + return errors.Wrap(err, "FastHotStuffEventLoop.Init: ") } - // Validate the integrity of the validator set - if !isProperlyFormedValidatorSet(validators) { - return errors.New("FastHotStuffEventLoop.Init: Invalid validator set") - } - - // Update the latest safe block and validator set - fc.chainTip = chainTip - fc.currentView = chainTip.GetView() + 1 - fc.validatorsAtChainTip = validators + // We track the current view here so we know which view to time out on later on. + fc.currentView = tip.Block.GetView() + 1 // Reset all internal data structures for votes and timeouts fc.votesSeen = make(map[[32]byte]map[string]VoteMessage) @@ -83,9 +79,9 @@ func (fc *FastHotStuffEventLoop) Init( return nil } -// AdvanceView is called when the chain tip has not changed but the consensus instance has signaled a -// timeout, and can advance to the next view. This function resets the timeout scheduled task and block -// production scheduled task for the next view. +// AdvanceView is called when the tip has not changed but the consensus instance has timed out. This +// function advances the view and resets the timeout scheduled task and block production scheduled +// tasks. func (fc *FastHotStuffEventLoop) AdvanceView() (uint64, error) { // Grab the consensus instance's lock fc.lock.Lock() @@ -109,41 +105,34 @@ func (fc *FastHotStuffEventLoop) AdvanceView() (uint64, error) { return fc.currentView, nil } -// ProcessSafeBlock must only be called when the caller has accepted a new block, connected it +// ProcessTipBlock must only be called when the caller has accepted a new block, connected it // to the tip of the blockchain, and determined that the block is safe to vote on. Given such a // block, this function resets internal state and schedules the next block construction and timeout -// determine the next action. The functions expects the following for the input params: -// - block: the input block that was safely added to the blockchain and is safe to vote on -// - validators: the validator set for the next block height -func (fc *FastHotStuffEventLoop) ProcessSafeBlock(block Block, validators []Validator) error { +// timers. +// +// Expected params: +// - tip: the current uncommitted tip of the blockchain, with the validator set at that block height +// - safeBlocks: an unordered slice of blocks including the committed tip, the uncommitted tip, +// all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks +// that are safe to extend from. This function does not validate the collection of blocks. It +// expects the caller to know and decide what blocks are safe to extend from. +func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBlocks []BlockWithValidators) error { // Grab the consensus instance's lock fc.lock.Lock() defer fc.lock.Unlock() // Ensure the consensus instance is running if fc.status != consensusStatusRunning { - return errors.New("FastHotStuffEventLoop.ProcessSafeBlock: Consensus instance is not running") + return errors.New("FastHotStuffEventLoop.ProcessTipBlock: Consensus instance is not running") } - // Do a basic integrity check on the block - if !isProperlyFormedBlock(block) { - return errors.New("FastHotStuffEventLoop.ProcessSafeBlock: Invalid block") + // Validate the safe blocks and validator sets, and store them + if err := fc.storeBlocks(tip, safeBlocks); err != nil { + return errors.Wrap(err, "FastHotStuffEventLoop.ProcessTipBlock: ") } - // Do a basic integrity check on the validator set - if !isProperlyFormedValidatorSet(validators) { - return errors.New("FastHotStuffEventLoop.ProcessSafeBlock: Invalid validator set") - } - - // Update the chain tip and validator set - fc.chainTip = block - // We track the current view here so we know which view to time out on later on. - fc.currentView = block.GetView() + 1 - - // Update the validator set so we know when we have a QC from votes at the next block height - // and view. - fc.validatorsAtChainTip = validators + fc.currentView = fc.tip.block.GetView() + 1 // Evict all stale votes and timeouts fc.evictStaleVotesAndTimeouts() @@ -152,9 +141,9 @@ func (fc *FastHotStuffEventLoop) ProcessSafeBlock(block Block, validators []Vali // broadcast the vote. fc.ConsensusEvents <- &ConsensusEvent{ EventType: ConsensusEventTypeVote, - BlockHash: fc.chainTip.GetBlockHash(), - BlockHeight: fc.chainTip.GetHeight(), - View: fc.chainTip.GetView(), + BlockHash: fc.tip.block.GetBlockHash(), + BlockHeight: fc.tip.block.GetHeight(), + View: fc.tip.block.GetView(), } // Schedule the next block construction and timeout scheduled tasks @@ -163,6 +152,47 @@ func (fc *FastHotStuffEventLoop) ProcessSafeBlock(block Block, validators []Vali return nil } +// setSafeBlocks is a helper function that validates the provided blocks, validator sets, and stores them. +// It must be called while holding the consensus instance's lock. +func (fc *FastHotStuffEventLoop) storeBlocks(tip BlockWithValidators, safeBlocks []BlockWithValidators) error { + // Do a basic integrity check on the tip block and validator set + if !isProperlyFormedBlock(tip.Block) || !isProperlyFormedValidatorSet(tip.Validators) { + return errors.New("Invalid tip block or validator set") + } + + // Do a basic integrity check on the blocks and validator sets + hasMalformedInput := collections.Any(safeBlocks, func(block BlockWithValidators) bool { + return !isProperlyFormedBlock(block.Block) || !isProperlyFormedValidatorSet(block.Validators) + }) + + // There must be at least one block + if len(safeBlocks) == 0 || hasMalformedInput { + return errors.New("Invalid safe blocks or validator sets") + } + + // Store the tip block and validator set + fc.tip = blockWithValidatorLookup{ + block: tip.Block, + validatorSet: tip.Validators, + validatorLookup: collections.ToMap(tip.Validators, func(validator Validator) string { + return validator.GetPublicKey().ToString() + }), + } + + // Store the blocks and validator sets + fc.safeBlocks = collections.Transform(safeBlocks, func(block BlockWithValidators) blockWithValidatorLookup { + return blockWithValidatorLookup{ + block: block.Block, + validatorSet: block.Validators, + validatorLookup: collections.ToMap(block.Validators, func(validator Validator) string { + return validator.GetPublicKey().ToString() + }), + } + }) + + return nil +} + // ProcessValidatorVote captures an incoming vote message from a validator. This module has no knowledge // of who the leader is for a given view, so it is up to the caller to decide whether to process the vote // message or not. If a vote message is passed here, then the consensus instance will store it until @@ -369,16 +399,16 @@ func (fc *FastHotStuffEventLoop) resetScheduledTasks() { // multiple consecutive timeouts. We use the difference between the current view and the // chain tip's view to determine this. The current view can only drift from the chain tip's // view as a result of timeouts. This guarantees that the number of consecutive timeouts is - // always: max(currentView - chainTip.GetView() - 1, 0). + // always: max(currentView - tip.block.GetView() - 1, 0). timeoutDuration := fc.timeoutBaseDuration - // Check if we have timed out at all for the last n view. If so, we apply exponential + // Check if we have timed out at for the last n views. If so, we apply exponential // back-off to the timeout base duration. - if fc.chainTip.GetView() < fc.currentView-1 { + if fc.tip.block.GetView() < fc.currentView-1 { // Note, there is no risk of underflow here because the following is guaranteed: - // currentView > chainTip.GetView() + 1. - numTimeouts := fc.currentView - fc.chainTip.GetView() - 1 + // currentView > tip.block.GetView() + 1. + numTimeouts := fc.currentView - fc.tip.block.GetView() - 1 // Compute the exponential back-off: nextTimeoutDuration * 2^numTimeouts timeoutDuration = fc.timeoutBaseDuration << numTimeouts @@ -417,9 +447,9 @@ func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin // Signal the server that we are ready to time out fc.ConsensusEvents <- &ConsensusEvent{ - EventType: ConsensusEventTypeTimeout, // The timeout event type - View: timedOutView, // The view we timed out - BlockHash: fc.chainTip.GetBlockHash(), // The last block we saw + EventType: ConsensusEventTypeTimeout, // The timeout event type + View: timedOutView, // The view we timed out + BlockHash: fc.tip.block.GetBlockHash(), // The last block we saw } // Cancel the timeout task. The server will reschedule it when it advances the view. @@ -439,7 +469,7 @@ func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin // // The eviction works as follows: // - Votes: if the next block were to be a regular block with a QC aggregated from votes, then the it must -// satisfy nextBlock.GetView() = chainTip.GetView() + 1, which means that currentView = chainTip.GetView() + 1. +// satisfy nextBlock.GetView() = tip.block.GetView() + 1, which means that currentView = tip.block.GetView() + 1. // We can safely evict all votes where vote.GetView() < currentView - 1. // - Timeouts: if the next block were be an empty block with a timeout QC aggregated from timeout messages, // then it must satisfy nextBlock.GetView() = timeout.GetView() + 1. We can safely evict all timeout messages with @@ -521,6 +551,23 @@ func (fc *FastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, vi return ok } +func (fc *FastHotStuffEventLoop) getBlockAndValidatorSetByHash(blockHash BlockHash) ( + bool, Block, []Validator, map[string]Validator, +) { + // A linear search here is fine. The safeBlocks slice is expected to be extremely small as it represents the + // number of uncommitted blocks in the blockchain. During steady stake, it will have a size of 3 blocks + // (one committed, two uncommitted). In the worse case, where the network has an unlucky series of + // timeout -> block -> timeout -> block,... it can still be expected to have < 10 blocks. + blockHashValue := blockHash.GetValue() + for _, block := range fc.safeBlocks { + if block.block.GetBlockHash().GetValue() == blockHashValue { + return true, block.block, block.validatorSet, block.validatorLookup + } + } + + return false, nil, nil, nil +} + func isStaleView(currentView uint64, testView uint64) bool { return currentView > testView+1 } diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 064a9a488..fb237dfb0 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestFastHotStuffInitialization(t *testing.T) { +func TestInit(t *testing.T) { // Test initial status for newly constructed instance { @@ -18,31 +18,63 @@ func TestFastHotStuffInitialization(t *testing.T) { require.NotPanics(t, fc.Stop) // Calling Stop() on an uninitialized instance should be a no-op } - // Test Init() function with invalid block construction cadence + // Test Init() function with invalid block construction interval { fc := NewFastHotStuffEventLoop() - err := fc.Init(0, 1, createDummyBlock(2), createDummyValidatorSet()) + err := fc.Init(0, 1, + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) require.Error(t, err) } // Test Init() function with invalid timeout duration { fc := NewFastHotStuffEventLoop() - err := fc.Init(1, 0, createDummyBlock(2), createDummyValidatorSet()) + err := fc.Init(1, 0, + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) require.Error(t, err) } - // Test Init() function with malformed block + // Test Init() function with malformed tip block { fc := NewFastHotStuffEventLoop() - err := fc.Init(1, 1, nil, createDummyValidatorSet()) + err := fc.Init(1, 1, + BlockWithValidators{nil, createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) require.Error(t, err) } - // Test Init() function with malformed validator set + // Test Init() function with malformed validator set for tip block { fc := NewFastHotStuffEventLoop() - err := fc.Init(1, 1, createDummyBlock(2), nil) + err := fc.Init(1, 1, + BlockWithValidators{createDummyBlock(2), nil}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) + require.Error(t, err) + } + + // Test Init() function with malformed safe block + { + fc := NewFastHotStuffEventLoop() + err := fc.Init(1, 1, + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{nil, createDummyValidatorSet()}}, // safeBlocks + ) + require.Error(t, err) + } + + // Test Init() function with malformed validator set for safe block + { + fc := NewFastHotStuffEventLoop() + err := fc.Init(1, 1, + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), nil}}, // safeBlocks + ) require.Error(t, err) } @@ -51,7 +83,10 @@ func TestFastHotStuffInitialization(t *testing.T) { block := createDummyBlock(2) fc := NewFastHotStuffEventLoop() - err := fc.Init(100, 101, block, createDummyValidatorSet()) + err := fc.Init(100, 101, + BlockWithValidators{block, createDummyValidatorSet()}, // tip + []BlockWithValidators{{block, createDummyValidatorSet()}}, // safeBlocks + ) require.NoError(t, err) require.Equal(t, consensusStatusInitialized, fc.status) @@ -59,43 +94,81 @@ func TestFastHotStuffInitialization(t *testing.T) { require.NotPanics(t, fc.Stop) // Calling Stop() on an initialized instance should be a no-op require.Equal(t, fc.status, consensusStatusInitialized) - require.Equal(t, fc.chainTip.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) - require.Equal(t, fc.chainTip.GetView(), uint64(2)) - require.Equal(t, fc.chainTip.GetHeight(), uint64(1)) + require.Equal(t, fc.tip.block.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) + require.Equal(t, fc.tip.block.GetView(), uint64(2)) + require.Equal(t, fc.tip.block.GetHeight(), uint64(1)) require.Equal(t, fc.blockConstructionInterval, time.Duration(100)) require.Equal(t, fc.timeoutBaseDuration, time.Duration(101)) require.Equal(t, fc.currentView, uint64(3)) - require.Equal(t, len(fc.validatorsAtChainTip), 2) + require.Equal(t, len(fc.tip.validatorSet), 2) + require.Equal(t, len(fc.tip.validatorLookup), 2) + + require.Equal(t, len(fc.safeBlocks), 1) + require.Equal(t, fc.safeBlocks[0].block.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) + require.Equal(t, fc.safeBlocks[0].block.GetView(), uint64(2)) + require.Equal(t, fc.safeBlocks[0].block.GetHeight(), uint64(1)) + require.Equal(t, len(fc.safeBlocks[0].validatorSet), 2) + require.Equal(t, len(fc.safeBlocks[0].validatorLookup), 2) } } -func TestFastHotStuffProcessSafeBlock(t *testing.T) { +func TestProcessTipBlock(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) fc := NewFastHotStuffEventLoop() - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) require.NoError(t, err) - // Test ProcessSafeBlock() function when consensus event loop is not running + // Test ProcessTipBlock() function when consensus event loop is not running { - err := fc.ProcessSafeBlock(createDummyBlock(2), createDummyValidatorSet()) + err := fc.ProcessTipBlock( + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) require.Error(t, err) } // Start the consensus event loop fc.Start() - // Test ProcessSafeBlock() function with malformed block + // Test ProcessTipBlock() function with malformed tip block + { + err := fc.ProcessTipBlock( + BlockWithValidators{nil, createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) + require.Error(t, err) + } + + // Test ProcessTipBlock() function with malformed tip validator set + { + err := fc.ProcessTipBlock( + BlockWithValidators{createDummyBlock(2), nil}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) + require.Error(t, err) + } + + // Test ProcessTipBlock() function with malformed safe block { - err := fc.ProcessSafeBlock(nil, createDummyValidatorSet()) + err := fc.ProcessTipBlock( + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{nil, createDummyValidatorSet()}}, // safeBlocks + ) require.Error(t, err) } - // Test ProcessSafeBlock() function with malformed validator set + // Test ProcessTipBlock() function with malformed safe block's validator set { - err := fc.ProcessSafeBlock(createDummyBlock(2), nil) + err := fc.ProcessTipBlock( + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), nil}}, // safeBlocks + ) require.Error(t, err) } @@ -144,21 +217,24 @@ func TestFastHotStuffProcessSafeBlock(t *testing.T) { require.Equal(t, len(fc.timeoutsSeen), 5) } - // Test ProcessSafeBlock() function with valid parameters + // Test ProcessTipBlock() function with valid parameters { nextBlock := createDummyBlock(2) nextBlock.height = 2 nextBlock.view = 3 - err := fc.ProcessSafeBlock(nextBlock, createDummyValidatorSet()) + err := fc.ProcessTipBlock( + BlockWithValidators{nextBlock, createDummyValidatorSet()}, // tip + []BlockWithValidators{{nextBlock, createDummyValidatorSet()}}, // safeBlocks + ) require.NoError(t, err) - require.Equal(t, nextBlock.GetBlockHash().GetValue(), fc.chainTip.GetBlockHash().GetValue()) - require.Equal(t, uint64(3), fc.chainTip.GetView()) - require.Equal(t, uint64(2), fc.chainTip.GetHeight()) + require.Equal(t, nextBlock.GetBlockHash().GetValue(), fc.tip.block.GetBlockHash().GetValue()) + require.Equal(t, uint64(3), fc.tip.block.GetView()) + require.Equal(t, uint64(2), fc.tip.block.GetHeight()) require.Equal(t, uint64(4), fc.currentView) - require.Equal(t, 2, len(fc.validatorsAtChainTip)) + require.Equal(t, 2, len(fc.tip.validatorSet)) } // Verify that stale votes and timeouts have been evicted @@ -177,7 +253,10 @@ func TestAdvanceView(t *testing.T) { fc := NewFastHotStuffEventLoop() // BlockHeight = 1, Current View = 3 - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) require.NoError(t, err) // Running AdvanceView() should fail because the consensus event loop is not running @@ -264,7 +343,10 @@ func TestProcessValidatorVote(t *testing.T) { fc := NewFastHotStuffEventLoop() // BlockHeight = 1, Current View = 3 - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) require.NoError(t, err) // Start the event loop @@ -345,7 +427,10 @@ func TestProcessValidatorTimeout(t *testing.T) { fc := NewFastHotStuffEventLoop() // BlockHeight = 1, Current View = 3 - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) require.NoError(t, err) // Start the event loop @@ -427,7 +512,10 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { dummyBlock := createDummyBlock(2) fc := NewFastHotStuffEventLoop() - err := fc.Init(oneHourInNanoSecs, oneMilliSecondInNanoSeconds, dummyBlock, createDummyValidatorSet()) + err := fc.Init(oneHourInNanoSecs, oneMilliSecondInNanoSeconds, + BlockWithValidators{dummyBlock, createDummyValidatorSet()}, // tip + []BlockWithValidators{{dummyBlock, createDummyValidatorSet()}}, // safeBlocks + ) require.NoError(t, err) // Start the event loop @@ -466,7 +554,10 @@ func TestResetEventLoopSignal(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) fc := NewFastHotStuffEventLoop() - err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) + err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) require.NoError(t, err) // Start the event loop @@ -508,7 +599,10 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) fc := NewFastHotStuffEventLoop() - err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, createDummyBlock(2), createDummyValidatorSet()) + err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, + BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip + []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + ) require.NoError(t, err) // Start the event loop diff --git a/consensus/types.go b/consensus/types.go index 969991813..8247bc8d4 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -100,6 +100,14 @@ type Block interface { GetQC() QuorumCertificate } +type BlockWithValidators struct { + Block Block + // The validator set for the next block height after the block. This validator set can be used to validate + // votes and timeouts used to build a QC that extends from the block. The validator set must be sorted + // in descending order of stake amount with a consistent tie breaking scheme. + Validators []Validator +} + // We want a large buffer for the signal channels to ensure threads don't block when trying to push new // signals. // @@ -144,16 +152,16 @@ type FastHotStuffEventLoop struct { nextBlockConstructionTask *ScheduledTask[uint64] nextTimeoutTask *ScheduledTask[uint64] - // The latest block accepted by the caller. We only keep track of the latest safe block here because - // it's the block we vote on, and construct a vote QC for. - chainTip Block // The current view at which we expect to see or propose the next block. In the event of a timeout, // the timeout signal will be triggered for this view. currentView uint64 - // The validator set sorted in decreasing order of stake amount, with a consistent tie-breaking - // scheme. This validator set is expected to be valid for validating votes and timeouts for the - // next block height. - validatorsAtChainTip []Validator + + // Block hash of the current tip of the block-chain. + tip blockWithValidatorLookup + + // All blocks that are safe to extend from. This will include the committed tip and all uncommitted + // descendants that are safe to extend from. This slice also includes the tip block itself. + safeBlocks []blockWithValidatorLookup // votesSeen is an in-memory map of all the votes we've seen so far. It's a nested map with the // following nested key structure: diff --git a/consensus/types_internal.go b/consensus/types_internal.go index 260227317..3b919fc8d 100644 --- a/consensus/types_internal.go +++ b/consensus/types_internal.go @@ -181,3 +181,13 @@ func (tm *timeoutMessage) GetPublicKey() *bls.PublicKey { func (tm *timeoutMessage) GetSignature() *bls.Signature { return tm.signature } + +////////////////////////////////////////////////////////// +// Internal blockWithValidatorLookup type +////////////////////////////////////////////////////////// + +type blockWithValidatorLookup struct { + block Block + validatorSet []Validator // Ordered slice of validators + validatorLookup map[string]Validator // Lookup of validators by validator public key string +} diff --git a/lib/server.go b/lib/server.go index 9eda7b279..e28334ff7 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1774,11 +1774,6 @@ func (srv *Server) _handleBlockAccepted(event *BlockEvent) { return } - // Notify the consensus event loop that a block was accepted. - if srv.fastHotStuffEventLoop != nil { - srv.fastHotStuffEventLoop.ProcessSafeBlock(nil, nil) // TODO: fill these out - } - // Construct an inventory vector to relay to peers. blockHash, _ := blk.Header.Hash() invVect := &InvVect{ From 5ed6b8f8bec2e2724affde918b25f167dd597272 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 28 Sep 2023 18:15:46 -0400 Subject: [PATCH 211/762] Implement Fast-HotStuff Block Construction Signal (#663) * Implement Fast-HotStuff Block Construction Signal * Address Nina's comments * Address Piotr's feedback * Fix unit tests --- consensus/event_loop.go | 140 ++++++++++++++++++++++++++++++++--- consensus/event_loop_test.go | 119 ++++++++++++++++++++++++++++- consensus/types.go | 17 +++-- consensus/types_internal.go | 10 +-- consensus/utils_test.go | 19 ++++- lib/server.go | 2 +- 6 files changed, 278 insertions(+), 29 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 3f4627229..ade5328a0 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -3,10 +3,12 @@ package consensus import ( "time" + "github.com/holiman/uint256" "github.com/pkg/errors" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/collections/bitset" ) func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { @@ -140,10 +142,10 @@ func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBl // Signal the caller that we can vote for the block. The caller will decide whether to construct and // broadcast the vote. fc.ConsensusEvents <- &ConsensusEvent{ - EventType: ConsensusEventTypeVote, - BlockHash: fc.tip.block.GetBlockHash(), - BlockHeight: fc.tip.block.GetHeight(), - View: fc.tip.block.GetView(), + EventType: ConsensusEventTypeVote, + TipBlockHash: fc.tip.block.GetBlockHash(), + TipBlockHeight: fc.tip.block.GetHeight(), + View: fc.tip.block.GetView(), } // Schedule the next block construction and timeout scheduled tasks @@ -415,14 +417,131 @@ func (fc *FastHotStuffEventLoop) resetScheduledTasks() { } // Schedule the next block construction task. This will run with currentView param. - fc.nextBlockConstructionTask.Schedule(fc.blockConstructionInterval, fc.currentView, fc.onBlockConstructionScheduledTask) + fc.nextBlockConstructionTask.Schedule(fc.blockConstructionInterval, fc.currentView, fc.onBlockConstructionScheduledTaskExecuted) // Schedule the next timeout task. This will run with currentView param. fc.nextTimeoutTask.Schedule(timeoutDuration, fc.currentView, fc.onTimeoutScheduledTaskExecuted) } -func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTask(blockConstructionView uint64) { - // TODO +// When this function is triggered, it means that we have reached the block construction +// time ETA for blockConstructionView. If we have a QC or timeout QC for the view, then we +// signal the server. +func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockConstructionView uint64) { + fc.lock.Lock() + defer fc.lock.Unlock() + + // Check if the consensus instance is running. If it's not running, then there's nothing + // to do here. + if fc.status != consensusStatusRunning { + return + } + + // Check for race conditions where the view advanced at the exact moment this task began + // or we have already signaled for this view. If so, then there's nothing to do here. + if fc.currentView != blockConstructionView { + return + } + + // Check if the conditions are met to construct a QC from votes for the chain tip. If so, + // we send a signal to the server and cancel the block construction task. The server will + // reschedule the task when it advances the view. + if canConstructQC, signersList, signature := fc.tryConstructVoteQCInCurrentView(); canConstructQC { + // Signal the server that we can construct a QC for the chain tip + fc.ConsensusEvents <- &ConsensusEvent{ + EventType: ConsensusEventTypeConstructVoteQC, // The event type + View: fc.currentView, // The current view in which we can construct a block + TipBlockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from + TipBlockHeight: fc.tip.block.GetHeight(), // Block height for the tip, which we are extending from + QC: &quorumCertificate{ + blockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from + view: fc.tip.block.GetView(), // The view from the tip block. This is always fc.currentView - 1 + aggregatedSignature: &aggregatedSignature{ + signersList: signersList, // The signers list who voted on the tip block + signature: signature, // Aggregated signature from votes on the tip block + }, + }, + } + + return + } + + // TODO: Check if we can construct a timeout QC for the current view, and send the signal to the + // server + + return +} + +// tryConstructVoteQCInCurrentView is a helper function that attempts to construct a QC for the tip block +// so that it can be proposed in a block in the current view. The function internally performs all view and vote +// validations to ensure that the resulting QC is valid. If a QC can be constructed, the function returns +// the signers list and aggregate signature that can be used to construct the QC. +// +// This function must be called while holding the consensus instance's lock. +func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() ( + _success bool, // true if and only if we are able to construct a vote QC for the tip block in the current view + _signersList *bitset.Bitset, // bitset of signers for the aggregated signature for the tip block + _aggregateSignature *bls.Signature, // aggregated signature for the tip block +) { + // If currentView != tipBlock.View + 1, then we have timed out at some point, and can no longer + // construct a block with a QC of votes for the tip block. + tipBlock := fc.tip.block + if fc.currentView != tipBlock.GetView()+1 { + return false, nil, nil + } + + // Fetch the validator set at the tip. + validatorSet := fc.tip.validatorSet + + // Compute the chain tip's signature payload. + voteSignaturePayload := GetVoteSignaturePayload(tipBlock.GetView(), tipBlock.GetBlockHash()) + + // Fetch the validator votes for the tip block. + votesByValidator := fc.votesSeen[voteSignaturePayload] + + // Compute the total stake and total stake with votes + totalStake := uint256.NewInt() + totalVotingStake := uint256.NewInt() + + // Track the signatures and signers list for the chain tip + signersList := bitset.NewBitset() + signatures := []*bls.Signature{} + + // Iterate through the entire validator set and check if each one has voted for the tip block. Track + // all voters and their stakes. + for ii, validator := range validatorSet { + totalStake = uint256.NewInt().Add(totalStake, validator.GetStakeAmount()) + + // Skip the validator if it hasn't voted for the the block + vote, hasVoted := votesByValidator[validator.GetPublicKey().ToString()] + if !hasVoted { + continue + } + + // Verify the vote signature + if !isValidSignatureSinglePublicKey(vote.GetPublicKey(), vote.GetSignature(), voteSignaturePayload[:]) { + continue + } + + // Track the vote's signature, stake, and place in the validator set + totalVotingStake = uint256.NewInt().Add(totalVotingStake, validator.GetStakeAmount()) + signersList.Set(ii, true) + signatures = append(signatures, vote.GetSignature()) + } + + // If we don't have a super-majority vote for the chain tip, then we can't build a QC. + if !isSuperMajorityStake(totalVotingStake, totalStake) { + return false, nil, nil + } + + // If we reach this point, then we have enough signatures to build a QC for the tip block. Try to + // aggregate the signatures. This should never fail. + aggregateSignature, err := bls.AggregateSignatures(signatures) + if err != nil { + return false, nil, nil + } + + // Happy path + return true, signersList, aggregateSignature } // When this function is triggered, it means that we have reached out the timeout ETA for the @@ -447,9 +566,10 @@ func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin // Signal the server that we are ready to time out fc.ConsensusEvents <- &ConsensusEvent{ - EventType: ConsensusEventTypeTimeout, // The timeout event type - View: timedOutView, // The view we timed out - BlockHash: fc.tip.block.GetBlockHash(), // The last block we saw + EventType: ConsensusEventTypeTimeout, // The timeout event type + View: timedOutView, // The view we timed out + TipBlockHash: fc.tip.block.GetBlockHash(), // The last block we saw + TipBlockHeight: fc.tip.block.GetHeight(), // The last block we saw } // Cancel the timeout task. The server will reschedule it when it advances the view. diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index fb237dfb0..d5eb49ac8 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -6,6 +6,9 @@ import ( "testing" "time" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" + "github.com/holiman/uint256" "github.com/stretchr/testify/require" ) @@ -527,7 +530,7 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { // Confirm that the timeout signal is for the the expected view require.Equal(t, timeoutSignal.EventType, ConsensusEventTypeTimeout) require.Equal(t, timeoutSignal.View, dummyBlock.GetView()+1) - require.Equal(t, timeoutSignal.BlockHash.GetValue(), dummyBlock.GetBlockHash().GetValue()) + require.Equal(t, timeoutSignal.TipBlockHash.GetValue(), dummyBlock.GetBlockHash().GetValue()) // Confirm that the timeout is no longer running require.False(t, fc.nextTimeoutTask.IsScheduled()) @@ -541,7 +544,7 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { // Confirm that the timeout signal is for the the expected view require.Equal(t, timeoutSignal.EventType, ConsensusEventTypeTimeout) require.Equal(t, timeoutSignal.View, dummyBlock.GetView()+2) - require.Equal(t, timeoutSignal.BlockHash.GetValue(), dummyBlock.GetBlockHash().GetValue()) + require.Equal(t, timeoutSignal.TipBlockHash.GetValue(), dummyBlock.GetBlockHash().GetValue()) // Confirm that the timeout is no longer running require.False(t, fc.nextTimeoutTask.IsScheduled()) @@ -595,6 +598,118 @@ func TestResetEventLoopSignal(t *testing.T) { fc.Stop() } +func TestVoteQCConstructionSignal(t *testing.T) { + + // Create a valid dummy block at view 2 + block := createDummyBlock(2) + + // Create a valid validator set + validatorPrivateKey1, _ := bls.NewPrivateKey() + validatorPrivateKey2, _ := bls.NewPrivateKey() + + validatorSet := []Validator{ + &validator{ + publicKey: validatorPrivateKey1.PublicKey(), + stakeAmount: uint256.NewInt().SetUint64(70), + }, + &validator{ + publicKey: validatorPrivateKey2.PublicKey(), + stakeAmount: uint256.NewInt().SetUint64(30), + }, + } + + voteSignaturePayload := GetVoteSignaturePayload(2, block.GetBlockHash()) + + validator1Vote, _ := validatorPrivateKey1.Sign(voteSignaturePayload[:]) + validator2Vote, _ := validatorPrivateKey2.Sign(voteSignaturePayload[:]) + + // Sad path, not enough votes to construct a QC + { + fc := NewFastHotStuffEventLoop() + err := fc.Init(time.Microsecond, time.Hour, + BlockWithValidators{block, validatorSet}, // tip + []BlockWithValidators{{block, validatorSet}}, // safeBlocks + ) + require.NoError(t, err) + + // Create a vote from validator 2 + vote := voteMessage{ + view: 2, // The view the block was proposed in + blockHash: block.GetBlockHash(), // Block hash is the same as the block hash of the dummy block + publicKey: validatorPrivateKey2.PublicKey(), // Validator 2 with 30/100 stake votes + signature: validator2Vote, // Validator 2's vote + } + + // Store the vote in the event loop's votesSeen map + fc.votesSeen[voteSignaturePayload] = map[string]VoteMessage{ + vote.publicKey.ToString(): &vote, + } + + // Start the event loop + fc.Start() + + // Wait up to 100 milliseconds for a block construction signal to be sent + select { + case <-fc.ConsensusEvents: + require.Fail(t, "Received a block construction signal when there were not enough votes to construct a QC") + case <-time.After(100 * time.Millisecond): + // Do nothing + } + + // Stop the event loop + fc.Stop() + } + + // Happy path, there are votes with a super-majority of stake to construct a QC + { + fc := NewFastHotStuffEventLoop() + err := fc.Init(time.Microsecond, time.Hour, + BlockWithValidators{block, validatorSet}, // tip + []BlockWithValidators{{block, validatorSet}}, // safeBlocks + ) + require.NoError(t, err) + + // Create a vote from validator 1 + vote := voteMessage{ + view: 2, // The view the block was proposed in + blockHash: block.GetBlockHash(), // Block hash is the same as the block hash of the dummy block + publicKey: validatorPrivateKey1.PublicKey(), // Validator 1 with 70/100 stake votes + signature: validator1Vote, // Validator 1's vote + } + + // Store the vote in the event loop's votesSeen map + fc.votesSeen[voteSignaturePayload] = map[string]VoteMessage{ + vote.publicKey.ToString(): &vote, + } + + // Start the event loop + fc.Start() + + var blockConstructionSignal *ConsensusEvent + + // Wait up to 100 milliseconds for a block construction signal to be sent + select { + case blockConstructionSignal = <-fc.ConsensusEvents: + // Do nothing + case <-time.After(100 * time.Millisecond): + require.Fail(t, "Did not receive a block construction signal when there were enough votes to construct a QC") + } + + // Stop the event loop + fc.Stop() + + // Confirm that the block construction signal has the expected parameters + require.Equal(t, blockConstructionSignal.EventType, ConsensusEventTypeConstructVoteQC) + require.Equal(t, blockConstructionSignal.View, block.GetView()+1) + require.Equal(t, blockConstructionSignal.TipBlockHash.GetValue(), block.GetBlockHash().GetValue()) + require.Equal(t, blockConstructionSignal.TipBlockHeight, block.GetHeight()) + require.Equal(t, blockConstructionSignal.QC.GetView(), block.GetView()) + require.Equal(t, blockConstructionSignal.QC.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) + require.Equal(t, blockConstructionSignal.QC.GetAggregatedSignature().GetSignersList().ToBytes(), bitset.NewBitset().Set(0, true).ToBytes()) + require.Equal(t, blockConstructionSignal.QC.GetAggregatedSignature().GetSignature().ToBytes(), validator1Vote.ToBytes()) + } +} + func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) diff --git a/consensus/types.go b/consensus/types.go index 8247bc8d4..078c6d6db 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -28,17 +28,18 @@ const ( ) type ConsensusEvent struct { - EventType ConsensusEventType - BlockHash BlockHash - BlockHeight uint64 - View uint64 + EventType ConsensusEventType + TipBlockHash BlockHash + TipBlockHeight uint64 + View uint64 + QC QuorumCertificate } // BlockHash is a 32-byte hash of a block used to uniquely identify a block. It's re-defined here // as an interface that matches the exact structure of the BlockHash type in core, so that the two -// packages are decoupled and the Fast HotStuff consensus can be tested end-to-end independently. -// When using the Fast HotStuff, the lib package can convert its own BlockHash type to and from this -// type trivially. +// packages are decoupled and the Fast HotStuff event loop can be tested end-to-end independently. +// When using the Fast HotStuff event loop, the lib package can convert its own BlockHash type to +// and from this type trivially. type BlockHash interface { GetValue() [32]byte } @@ -166,7 +167,7 @@ type FastHotStuffEventLoop struct { // votesSeen is an in-memory map of all the votes we've seen so far. It's a nested map with the // following nested key structure: // - // sha256(vote.View, vote.BlockHash) - > string(vote.PublicKey) -> VoteMessage + // sha3-256(vote.View, vote.BlockHash) - > string(vote.PublicKey) -> VoteMessage // // We use a nested map as above because we want to be able to efficiently fetch all votes by block hash. votesSeen map[[32]byte]map[string]VoteMessage diff --git a/consensus/types_internal.go b/consensus/types_internal.go index 3b919fc8d..f73941c96 100644 --- a/consensus/types_internal.go +++ b/consensus/types_internal.go @@ -67,7 +67,7 @@ func (qc *aggregateQuorumCertificate) GetAggregatedSignature() AggregatedSignatu ////////////////////////////////////////////////////////// type quorumCertificate struct { - blockHash *blockHash + blockHash BlockHash view uint64 aggregatedSignature AggregatedSignature } @@ -106,10 +106,10 @@ func (as *aggregatedSignature) GetSignature() *bls.Signature { ////////////////////////////////////////////////////////// type block struct { - blockHash *blockHash + blockHash BlockHash height uint64 view uint64 - qc *quorumCertificate + qc QuorumCertificate } func (b *block) GetBlockHash() BlockHash { @@ -134,7 +134,7 @@ func (b *block) GetQC() QuorumCertificate { type voteMessage struct { view uint64 - blockHash *blockHash + blockHash BlockHash publicKey *bls.PublicKey signature *bls.Signature } @@ -161,7 +161,7 @@ func (vm *voteMessage) GetSignature() *bls.Signature { type timeoutMessage struct { view uint64 - highQC *quorumCertificate + highQC QuorumCertificate publicKey *bls.PublicKey signature *bls.Signature } diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 47c467d55..43c3aff1a 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -363,12 +363,25 @@ func createDummyTimeoutMessage(view uint64) *timeoutMessage { } func createDummyQC(view uint64) *quorumCertificate { + blockHash := createDummyBlockHash() + + signaturePayload := GetVoteSignaturePayload(view, blockHash) + + blsPrivateKey1, _ := bls.NewPrivateKey() + blsSignature1, _ := blsPrivateKey1.Sign(signaturePayload[:]) + + blsPrivateKey2, _ := bls.NewPrivateKey() + blsSignature2, _ := blsPrivateKey2.Sign(signaturePayload[:]) + + signersList := bitset.NewBitset().Set(0, true).Set(1, true) + aggregateSignature, _ := bls.AggregateSignatures([]*bls.Signature{blsSignature1, blsSignature2}) + return &quorumCertificate{ - blockHash: createDummyBlockHash(), + blockHash: blockHash, view: view, aggregatedSignature: &aggregatedSignature{ - signersList: bitset.NewBitset().FromBytes([]byte{0x3}), - signature: createDummyBLSSignature(), + signersList: signersList, + signature: aggregateSignature, }, } } diff --git a/lib/server.go b/lib/server.go index e28334ff7..9d6291ddb 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2299,7 +2299,7 @@ func (srv *Server) _startConsensus() { select { case consensusEvent := <-srv.fastHotStuffEventLoop.ConsensusEvents: { - glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.BlockHeight) + glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) srv._handleFastHostStuffConsensusEvent(consensusEvent) } From b69d16b0972daaace013eb7e6c835a784c46070a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:13:11 -0700 Subject: [PATCH 212/762] Implement Fast-HotStuff Timeout Block Construction Signal (#696) * Implement Fast-HotStuff Timeout Block Construction Signal * Cleanup * Address Nina's feedback * Rename TipBlockHeight and TipBlockHash * Fix unit tests * Address Piotr's comments --- bls/signature.go | 4 +- consensus/event_loop.go | 158 +++++++++++++++++++++++++++++++++-- consensus/event_loop_test.go | 157 ++++++++++++++++++++++++++++++++++ consensus/types.go | 1 + consensus/utils_test.go | 16 ++-- 5 files changed, 320 insertions(+), 16 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 6a83d1079..9bcfdb4a9 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -29,8 +29,8 @@ const signingAlgorithm = flowCrypto.BLSBLS12381 var hashingAlgorithm = flowCrypto.NewExpandMsgXOFKMAC128("deso-V1-CS01-with-") // AggregateSignatures takes in an input slice of bls.Signatures and aggregates them -// into a single bls.Signature. The assumption is that each of the input bls.Signatures -// were generated by different bls.PrivateKeys signing the same payload. +// into a single bls.Signature. This signature aggregation supports signatures on a +// single or different payloads. func AggregateSignatures(signatures []*Signature) (*Signature, error) { var flowSignatures []flowCrypto.Signature for _, signature := range signatures { diff --git a/consensus/event_loop.go b/consensus/event_loop.go index ade5328a0..24f6d2e36 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -445,7 +445,7 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC // Check if the conditions are met to construct a QC from votes for the chain tip. If so, // we send a signal to the server and cancel the block construction task. The server will // reschedule the task when it advances the view. - if canConstructQC, signersList, signature := fc.tryConstructVoteQCInCurrentView(); canConstructQC { + if success, signersList, signature := fc.tryConstructVoteQCInCurrentView(); success { // Signal the server that we can construct a QC for the chain tip fc.ConsensusEvents <- &ConsensusEvent{ EventType: ConsensusEventTypeConstructVoteQC, // The event type @@ -465,8 +465,38 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC return } - // TODO: Check if we can construct a timeout QC for the current view, and send the signal to the - // server + // Check if we have enough timeouts to build an aggregate QC for the previous view. If so, + // we send a signal to the server and cancel all scheduled tasks. + if success, safeBlock, highQC, highQCViews, signersList, signature := fc.tryConstructTimeoutQCInCurrentView(); success { + // Signal the server that we can construct a timeout QC for the current view + fc.ConsensusEvents <- &ConsensusEvent{ + EventType: ConsensusEventTypeConstructTimeoutQC, // The event type + View: fc.currentView, // The view that we have a timeout QC for + TipBlockHash: highQC.GetBlockHash(), // The block hash that we extend from + TipBlockHeight: safeBlock.GetHeight(), // The block height that we extend from + AggregateQC: &aggregateQuorumCertificate{ + view: fc.currentView - 1, // The timed out view is always the previous view + highQC: highQC, // The high QC aggregated from the timeout messages + highQCViews: highQCViews, // The high view for each validator who timed out + aggregatedSignature: &aggregatedSignature{ + signersList: signersList, // The signers list of validators who timed out + signature: signature, // The aggregated signature from validators who timed out + }, + }, + } + + // Cancel the block construction task since we know we can construct a timeout QC in the current view. + // It will be rescheduled when we advance view. + fc.nextBlockConstructionTask.Cancel() + return + } + + // We have not found a super majority of votes or timeouts. We can schedule the task to check again later. + fc.nextBlockConstructionTask.Schedule( + fc.blockConstructionInterval, + fc.currentView, + fc.onBlockConstructionScheduledTaskExecuted, + ) return } @@ -544,6 +574,121 @@ func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() ( return true, signersList, aggregateSignature } +// tryConstructTimeoutQCInCurrentView is a helper function that attempts to construct a timeout QC for the +// previous view, so that it can be proposed in an empty block in the current view. The function internally performs +// all view and timeout message validations to ensure that the resulting timeout QC is valid and extends from a +// known safe block. If a timeout QC can be constructed, the function returns the safe block that it extends +// from, the highQC, highQC views from validator timeout messages, signers list, and aggregate signature needed +// to construct the timeout QC. +// +// This function must be called while holding the consensus instance's lock. +func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() ( + _success bool, // true if and only if we are able to construct a timeout QC in the current view + _safeBlock Block, // the safe block that the high QC is from; the timeout QC proposed will extend from this block + _highQC QuorumCertificate, // high QC aggregated from validators who timed out + _highQCViews []uint64, // high QC views for each validator who timed out + _signersList *bitset.Bitset, // bitset of signers for the aggregated signature from the timeout messages + _aggregatedSignature *bls.Signature, // aggregated signature from the validators' timeout messages +) { + + // Fetch all timeouts for the previous view. All timeout messages for a view are aggregated and + // proposed in the next view. So if we want to propose a timeout QC in the current view, we need + // to aggregate timeouts from the previous one. + timeoutsByValidator := fc.timeoutsSeen[fc.currentView-1] + + // Tracks the highQC from validators as we go along. + var validatorsHighQC QuorumCertificate + + // Iterate through all timeouts for the previous view to find the highQC + for _, timeout := range timeoutsByValidator { + // Check if the high QC from the timeout messages is for a block in our safeBlocks slice. If not, + // then we have no knowledge of the block, or the block is not safe to extend from. This should never + // happen, but may be possible in the event we receive a timeout message at the same time the block + // becomes unsafe to extend from (ex: it's part of an stale reorg). We check for the edge case here to + // be 100% safe. + isSafeBlock, _, _, validatorLookup := fc.fetchSafeBlockInfo(timeout.GetHighQC().GetBlockHash()) + if !isSafeBlock { + continue + } + + // Make sure the timeout message was sent by a validator registered at the block height of the extracted QC. + if _, ok := validatorLookup[timeout.GetPublicKey().ToString()]; !ok { + continue + } + + // Update the highQC if the timeout message has a higher QC view than the current highQC's view + if isInterfaceNil(validatorsHighQC) || timeout.GetHighQC().GetView() > validatorsHighQC.GetView() { + validatorsHighQC = timeout.GetHighQC() + } + } + + // If we didn't find a high QC or didn't find any valid timeout messages, then we can't build a timeout QC. + if isInterfaceNil(validatorsHighQC) { + return false, nil, nil, nil, nil, nil + } + + // Fetch the validator set for the block height of the high QC. This lookup is guaranteed to succeed + // because it succeeded above. + ok, safeBlock, validatorSet, _ := fc.fetchSafeBlockInfo(validatorsHighQC.GetBlockHash()) + if !ok { + return false, nil, nil, nil, nil, nil + } + + // Compute the total stake and total stake with timeouts + totalStake := uint256.NewInt() + totalTimedOutStake := uint256.NewInt() + + // Track the high QC view for each validator + highQCViews := make([]uint64, len(validatorSet)) + + // Track the signatures and signers list for validators who timed out + signersList := bitset.NewBitset() + signatures := []*bls.Signature{} + + // Iterate through the entire validator set and check if each one has timed out for the previous + // view. Track all validators who timed out and their stakes. We iterate through the validator set + // here rather than the timeoutsByValidator map because we want to preserve the order of the validator + // for the signersList bitset. In practice, the validator set is expected to be <= 1000 in size, so + // this loop will be fast. + for ii, validator := range validatorSet { + totalStake = uint256.NewInt().Add(totalStake, validator.GetStakeAmount()) + + // Skip the validator if it hasn't timed out for the previous view + timeout, hasTimedOut := timeoutsByValidator[validator.GetPublicKey().ToString()] + if !hasTimedOut { + continue + } + + // Compute the signature payload that the validator should have signed + signaturePayload := GetTimeoutSignaturePayload(timeout.GetView(), timeout.GetHighQC().GetView()) + + // Verify the timeout signature + if !isValidSignatureSinglePublicKey(timeout.GetPublicKey(), timeout.GetSignature(), signaturePayload[:]) { + continue + } + + // Track the signatures, timed out stake, and high QC views for the validator + totalTimedOutStake = uint256.NewInt().Add(totalTimedOutStake, validator.GetStakeAmount()) + signersList.Set(ii, true) + signatures = append(signatures, timeout.GetSignature()) + highQCViews[ii] = timeout.GetHighQC().GetView() + } + + // Check if we have a super majority of stake that has timed out + if !isSuperMajorityStake(totalTimedOutStake, totalStake) { + return false, nil, nil, nil, nil, nil + } + + // Finally aggregate the signatures from the timeouts + aggregateSignature, err := bls.AggregateSignatures(signatures) + if err != nil { + return false, nil, nil, nil, nil, nil + } + + // Happy path + return true, safeBlock, validatorsHighQC, highQCViews, signersList, aggregateSignature +} + // When this function is triggered, it means that we have reached out the timeout ETA for the // timedOutView. In the event of a timeout, we signal the server that we are ready to time out // and cancel the timeout task. @@ -671,8 +816,11 @@ func (fc *FastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, vi return ok } -func (fc *FastHotStuffEventLoop) getBlockAndValidatorSetByHash(blockHash BlockHash) ( - bool, Block, []Validator, map[string]Validator, +func (fc *FastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( + _isSafeBlock bool, + _safeBlock Block, + _validatorSet []Validator, + _validatorLookup map[string]Validator, ) { // A linear search here is fine. The safeBlocks slice is expected to be extremely small as it represents the // number of uncommitted blocks in the blockchain. During steady stake, it will have a size of 3 blocks diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index d5eb49ac8..23eafcaf5 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -710,6 +710,163 @@ func TestVoteQCConstructionSignal(t *testing.T) { } } +func TestTimeoutQCConstructionSignal(t *testing.T) { + // Create a valid dummy block at view 2 + block1 := createDummyBlock(2) + + // Create a valid dummy block that extends from the above block at view 3 + block2 := &block{ + blockHash: createDummyBlockHash(), + view: 3, + height: 2, + qc: createDummyQC(2, block1.GetBlockHash()), + } + + // Create a valid validator set + validatorPrivateKey1, _ := bls.NewPrivateKey() + validatorPrivateKey2, _ := bls.NewPrivateKey() + + validatorSet := []Validator{ + &validator{ + publicKey: validatorPrivateKey1.PublicKey(), + stakeAmount: uint256.NewInt().SetUint64(70), + }, + &validator{ + publicKey: validatorPrivateKey2.PublicKey(), + stakeAmount: uint256.NewInt().SetUint64(30), + }, + } + + // Both validators will timeout for view 4. Validator 1 will timeout with a highQC from view 2, and + // validator 2 will timeout with a highQC from view 3 + timeoutSignaturePayload1 := GetTimeoutSignaturePayload(4, 1) + timeoutSignaturePayload2 := GetTimeoutSignaturePayload(4, 2) + + validator1TimeoutSignature, _ := validatorPrivateKey1.Sign(timeoutSignaturePayload1[:]) + validator2TimeoutSignature, _ := validatorPrivateKey2.Sign(timeoutSignaturePayload2[:]) + + // Sad path, not enough timeouts to construct a timeout QC + { + fc := NewFastHotStuffEventLoop() + err := fc.Init(time.Microsecond, time.Hour, + BlockWithValidators{block2, validatorSet}, // tip + []BlockWithValidators{ // safeBlocks + {block1, validatorSet}, + {block2, validatorSet}, + }, + ) + require.NoError(t, err) + + // Manually set the view to view 5 to simulate a timeout at view 4 + fc.currentView = 5 + + // Create a timeout message from validator 2 + timeout := timeoutMessage{ + view: 4, // The view which the validator is timing out for + highQC: block2.GetQC(), // The highest QC the validator has seen + publicKey: validatorPrivateKey2.PublicKey(), // Validator 2 with 30/100 stake + signature: validator2TimeoutSignature, // Validator 2's timeout signature on payload (view 4, highQCview 2) + } + + // Store the timeout in the event loop's timeoutsSeen map + fc.timeoutsSeen[4] = map[string]TimeoutMessage{ + timeout.publicKey.ToString(): &timeout, + } + + // Start the event loop + fc.Start() + + // Wait up to 100 milliseconds for a block construction signal to be sent + select { + case <-fc.ConsensusEvents: + require.Fail(t, "Received a block construction signal when there were not enough timeouts to construct a timeout QC") + case <-time.After(100 * time.Millisecond): + // Do nothing + } + + // Stop the event loop + fc.Stop() + } + + // Happy path, there are enough timeouts with a super-majority of stake to construct a timeout QC + { + fc := NewFastHotStuffEventLoop() + err := fc.Init(time.Microsecond, time.Hour, + BlockWithValidators{block2, validatorSet}, // tip + []BlockWithValidators{ // safeBlocks + {block1, validatorSet}, + {block2, validatorSet}, + }, + ) + require.NoError(t, err) + + // Manually set the currentView to 5 to simulate a timeout on view 4 + fc.currentView = 5 + + // Create a timeout message from validator 1 with highQC from block 1 + timeout1 := timeoutMessage{ + view: 4, // The view which the validator is timing out for + highQC: block1.GetQC(), // The highest QC this validator has seen + publicKey: validatorPrivateKey1.PublicKey(), // Validator 1 with 70/100 stake + signature: validator1TimeoutSignature, // Validator 1's timeout signature on payload (view 4, highQCview 1) + } + + // Create a timeout message from validator 2 with highQC from block 2 + timeout2 := timeoutMessage{ + view: 4, // The view which the validator is timing out for + highQC: block2.GetQC(), // The highest QC the validator has seen + publicKey: validatorPrivateKey2.PublicKey(), // Validator 2 with 30/100 stake + signature: validator2TimeoutSignature, // Validator 2's timeout signature on payload (view 4, highQCview 2) + } + + // Store the timeout in the event loop's timeoutsSeen map + fc.timeoutsSeen[4] = map[string]TimeoutMessage{ + timeout1.publicKey.ToString(): &timeout1, + timeout2.publicKey.ToString(): &timeout2, + } + + // Start the event loop + fc.Start() + + var signal *ConsensusEvent + + // Wait up to 100 milliseconds for a block construction signal to be sent + select { + case signal = <-fc.ConsensusEvents: + // Do nothing + case <-time.After(100 * time.Second): + require.Fail(t, "Did not receive a block construction signal when there were enough timeouts to construct a timeout QC") + } + + // Stop the event loop + fc.Stop() + + // Confirm that the block construction signal has the expected parameters + require.Equal(t, signal.EventType, ConsensusEventTypeConstructTimeoutQC) + require.Equal(t, signal.View, uint64(5)) // The timeout QC will be proposed in view 5 + require.Equal(t, signal.TipBlockHash.GetValue(), block1.GetBlockHash().GetValue()) // The timeout QC will be proposed in a block that extends from block 1 + require.Equal(t, signal.TipBlockHeight, block1.GetHeight()) // The timeout QC will be proposed at the block height after block 1 + require.Equal(t, signal.AggregateQC.GetView(), uint64(4)) // The timed out view is 4 + require.Equal(t, signal.AggregateQC.GetHighQCViews(), []uint64{1, 2}) // The high QC view is 1 for validator 1 and 2 for validator 2 + require.Equal(t, + signal.AggregateQC.GetAggregatedSignature().GetSignersList().ToBytes(), + bitset.NewBitset().Set(0, true).Set(1, true).ToBytes(), // Both validators have timed out, so both validators are in the timeout QC + ) + + // Verify that the high QC is the QC from block 2. It should be unchanged. + require.Equal(t, signal.AggregateQC.GetHighQC().GetBlockHash(), block2.GetQC().GetBlockHash()) + require.Equal(t, signal.AggregateQC.GetHighQC().GetView(), block2.GetQC().GetView()) + require.Equal(t, + signal.AggregateQC.GetHighQC().GetAggregatedSignature().GetSignersList().ToBytes(), + block2.GetQC().GetAggregatedSignature().GetSignersList().ToBytes(), + ) + require.Equal(t, + signal.AggregateQC.GetHighQC().GetAggregatedSignature().GetSignature().ToBytes(), + block2.GetQC().GetAggregatedSignature().GetSignature().ToBytes(), + ) + } +} + func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) diff --git a/consensus/types.go b/consensus/types.go index 078c6d6db..614a2bc88 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -33,6 +33,7 @@ type ConsensusEvent struct { TipBlockHeight uint64 View uint64 QC QuorumCertificate + AggregateQC AggregateQuorumCertificate } // BlockHash is a 32-byte hash of a block used to uniquely identify a block. It's re-defined here diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 43c3aff1a..e4522681e 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -21,7 +21,7 @@ func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { // Test malformed validator set { - require.False(t, IsValidSuperMajorityQuorumCertificate(createDummyQC(1), nil)) + require.False(t, IsValidSuperMajorityQuorumCertificate(createDummyQC(1, createDummyBlockHash()), nil)) } // Set up test validator data @@ -103,19 +103,19 @@ func TestIsProperlyFormedBlock(t *testing.T) { // Test zero height { - block := block{height: 0, view: 2, blockHash: createDummyBlockHash(), qc: createDummyQC(1)} + block := block{height: 0, view: 2, blockHash: createDummyBlockHash(), qc: createDummyQC(1, createDummyBlockHash())} require.False(t, isProperlyFormedBlock(&block)) } // Test zero view { - block := block{height: 1, view: 0, blockHash: createDummyBlockHash(), qc: createDummyQC(0)} + block := block{height: 1, view: 0, blockHash: createDummyBlockHash(), qc: createDummyQC(0, createDummyBlockHash())} require.False(t, isProperlyFormedBlock(&block)) } // Test nil block hash { - block := block{height: 1, view: 1, blockHash: nil, qc: createDummyQC(0)} + block := block{height: 1, view: 1, blockHash: nil, qc: createDummyQC(0, createDummyBlockHash())} require.False(t, isProperlyFormedBlock(&block)) } @@ -327,7 +327,7 @@ func createDummyBlock(view uint64) *block { blockHash: createDummyBlockHash(), view: view, height: 1, - qc: createDummyQC(view - 1), + qc: createDummyQC(view-1, createDummyBlockHash()), } } @@ -347,7 +347,7 @@ func createDummyVoteMessage(view uint64) *voteMessage { } func createDummyTimeoutMessage(view uint64) *timeoutMessage { - highQC := createDummyQC(view - 1) + highQC := createDummyQC(view-1, createDummyBlockHash()) signaturePayload := GetTimeoutSignaturePayload(view, highQC.view) @@ -362,9 +362,7 @@ func createDummyTimeoutMessage(view uint64) *timeoutMessage { } } -func createDummyQC(view uint64) *quorumCertificate { - blockHash := createDummyBlockHash() - +func createDummyQC(view uint64, blockHash BlockHash) *quorumCertificate { signaturePayload := GetVoteSignaturePayload(view, blockHash) blsPrivateKey1, _ := bls.NewPrivateKey() From 8e7315b002a5024f1ea49b2a2d45268963bf4335 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:19:57 -0700 Subject: [PATCH 213/762] Easy Fast-HotStuff Event Loop Code Cleanup (#699) * Easy Fast-HotStuff cleanup * Fix server.go * Rename AdvanceViewOnTimeout() --- consensus/event_loop.go | 148 +++++++++++++++++------------------ consensus/event_loop_test.go | 70 ++++++++--------- consensus/types.go | 67 ++++++++-------- lib/server.go | 20 ++--- 4 files changed, 147 insertions(+), 158 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 24f6d2e36..9b38cebd6 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -13,13 +13,13 @@ import ( func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { return &FastHotStuffEventLoop{ - status: consensusStatusNotInitialized, + status: eventLoopStatusNotInitialized, nextBlockConstructionTask: NewScheduledTask[uint64](), nextTimeoutTask: NewScheduledTask[uint64](), } } -// Initializes the consensus instance with the latest known valid block in the blockchain, and +// Initializes the consensus event loop with the latest known valid block in the blockchain, and // the validator set for the next block height. The functions expects the following for the input // params: // - blockConstructionInterval: block construction duration must be > 0 @@ -29,7 +29,7 @@ func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { // - safeBlocks: an unordered slice of blocks including the committed tip, the uncommitted tip, // all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks // that are safe to extend from. This function does not validate the collection of blocks. It -// expects caller to know and decide what blocks are safe to extend from. +// expects the server to know and decide what blocks are safe to extend from. // // Given the above, This function updates the tip internally, stores the safe blocks, and re-initializes // all internal data structures that are used to track incoming votes and timeout messages for QC construction. @@ -39,13 +39,13 @@ func (fc *FastHotStuffEventLoop) Init( tip BlockWithValidators, safeBlocks []BlockWithValidators, ) error { - // Grab the consensus instance's lock + // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() - // Ensure the consensus instance is not already running - if fc.status == consensusStatusRunning { - return errors.New("FastHotStuffEventLoop.Init: Consensus instance is already running") + // Ensure the event loop is not already running + if fc.status == eventLoopStatusRunning { + return errors.New("FastHotStuffEventLoop.Init: event loop is already running") } // Validate the scheduled task durations @@ -69,30 +69,30 @@ func (fc *FastHotStuffEventLoop) Init( fc.timeoutsSeen = make(map[uint64]map[string]TimeoutMessage) // Reset the external channel used for signaling - fc.ConsensusEvents = make(chan *ConsensusEvent, signalChannelBufferSize) + fc.Events = make(chan *FastHotStuffEvent, signalChannelBufferSize) // Set the block construction and timeout base durations fc.blockConstructionInterval = blockConstructionInterval fc.timeoutBaseDuration = timeoutBaseDuration - // Update the consensus status - fc.status = consensusStatusInitialized + // Update the event loop's status + fc.status = eventLoopStatusInitialized return nil } -// AdvanceView is called when the tip has not changed but the consensus instance has timed out. This +// AdvanceViewOnTimeout is called when the tip has not changed but the event loop has timed out. This // function advances the view and resets the timeout scheduled task and block production scheduled // tasks. -func (fc *FastHotStuffEventLoop) AdvanceView() (uint64, error) { - // Grab the consensus instance's lock +func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { + // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() - // Ensure the consensus instance is running. This guarantees that the chain tip and validator set + // Ensure the event loop is running. This guarantees that the chain tip and validator set // have already been set. - if fc.status != consensusStatusRunning { - return 0, errors.New("FastHotStuffEventLoop.AdvanceView: Consensus instance is not running") + if fc.status != eventLoopStatusRunning { + return 0, errors.New("FastHotStuffEventLoop.AdvanceViewOnTimeout: Event loop is not running") } // Advance the view @@ -107,7 +107,7 @@ func (fc *FastHotStuffEventLoop) AdvanceView() (uint64, error) { return fc.currentView, nil } -// ProcessTipBlock must only be called when the caller has accepted a new block, connected it +// ProcessTipBlock must only be called when the server has accepted a new block, connected it // to the tip of the blockchain, and determined that the block is safe to vote on. Given such a // block, this function resets internal state and schedules the next block construction and timeout // timers. @@ -117,15 +117,15 @@ func (fc *FastHotStuffEventLoop) AdvanceView() (uint64, error) { // - safeBlocks: an unordered slice of blocks including the committed tip, the uncommitted tip, // all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks // that are safe to extend from. This function does not validate the collection of blocks. It -// expects the caller to know and decide what blocks are safe to extend from. +// expects the server to know and decide what blocks are safe to extend from. func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBlocks []BlockWithValidators) error { - // Grab the consensus instance's lock + // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() - // Ensure the consensus instance is running - if fc.status != consensusStatusRunning { - return errors.New("FastHotStuffEventLoop.ProcessTipBlock: Consensus instance is not running") + // Ensure the event loop is running + if fc.status != eventLoopStatusRunning { + return errors.New("FastHotStuffEventLoop.ProcessTipBlock: Event loop is not running") } // Validate the safe blocks and validator sets, and store them @@ -139,10 +139,10 @@ func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBl // Evict all stale votes and timeouts fc.evictStaleVotesAndTimeouts() - // Signal the caller that we can vote for the block. The caller will decide whether to construct and + // Signal the server that we can vote for the block. The server will decide whether to construct and // broadcast the vote. - fc.ConsensusEvents <- &ConsensusEvent{ - EventType: ConsensusEventTypeVote, + fc.Events <- &FastHotStuffEvent{ + EventType: FastHotStuffEventTypeVote, TipBlockHash: fc.tip.block.GetBlockHash(), TipBlockHeight: fc.tip.block.GetHeight(), View: fc.tip.block.GetView(), @@ -155,7 +155,7 @@ func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBl } // setSafeBlocks is a helper function that validates the provided blocks, validator sets, and stores them. -// It must be called while holding the consensus instance's lock. +// It must be called while holding the event loop's lock. func (fc *FastHotStuffEventLoop) storeBlocks(tip BlockWithValidators, safeBlocks []BlockWithValidators) error { // Do a basic integrity check on the tip block and validator set if !isProperlyFormedBlock(tip.Block) || !isProperlyFormedValidatorSet(tip.Validators) { @@ -196,25 +196,25 @@ func (fc *FastHotStuffEventLoop) storeBlocks(tip BlockWithValidators, safeBlocks } // ProcessValidatorVote captures an incoming vote message from a validator. This module has no knowledge -// of who the leader is for a given view, so it is up to the caller to decide whether to process the vote -// message or not. If a vote message is passed here, then the consensus instance will store it until +// of who the leader is for a given view, so it is up to the server to decide whether to process the vote +// message or not. If a vote message is passed here, then the event loop will store it until // it can construct a QC with it or until the vote's view has gone stale. // // This function does not directly check if the vote results in a stake weighted super majority vote // for the target block. Instead, it stores the vote locally and waits for the crank timer to determine -// when to run the super majority vote check, and to signal the caller that we can construct a QC. +// when to run the super majority vote check, and to signal the server that we can construct a QC. // // Reference implementation: // https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L756 func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { - // Grab the consensus instance's lock + // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() - // Ensure the consensus instance is running. This guarantees that the chain tip and validator set + // Ensure the event loop is running. This guarantees that the chain tip and validator set // have already been set. - if fc.status != consensusStatusRunning { - return errors.New("FastHotStuffEventLoop.ProcessValidatorVote: Consensus instance is not running") + if fc.status != eventLoopStatusRunning { + return errors.New("FastHotStuffEventLoop.ProcessValidatorVote: Event loop is not running") } // Do a basic integrity check on the vote message @@ -267,25 +267,25 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { } // ProcessValidatorTimeout captures an incoming timeout message from a validator. This module has no knowledge -// of who the leader is for a given view, so it is up to the caller to decide whether to process the timeout -// message or not. If a timeout message is passed here, then the consensus instance will store it until +// of who the leader is for a given view, so it is up to the server to decide whether to process the timeout +// message or not. If a timeout message is passed here, then the event loop will store it until // it can construct a QC with it or until the timeout's view has gone stale. // // This function does not directly check if the timeout results in a stake weighted super majority to build // a timeout QC. Instead, it stores the timeout locally and waits for the block production scheduled task to determine -// when to run the super majority timeout check, and to signal the caller that we can construct a timeout QC. +// when to run the super majority timeout check, and to signal the server that we can construct a timeout QC. // // Reference implementation: // https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L958 func (fc *FastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) error { - // Grab the consensus instance's lock + // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() - // Ensure the consensus instance is running. This guarantees that the chain tip and validator set + // Ensure the event loop is running. This guarantees that the chain tip and validator set // have already been set. - if fc.status != consensusStatusRunning { - return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Consensus instance is not running") + if fc.status != eventLoopStatusRunning { + return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Event loop is not running") } // Do a basic integrity check on the timeout message @@ -336,27 +336,19 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) return nil } -func (fc *FastHotStuffEventLoop) ConstructVoteQC( /* TODO */ ) { - // TODO -} - -func (fc *FastHotStuffEventLoop) ConstructTimeoutQC( /* TODO */ ) { - // TODO -} - // Sets the initial times for the block construction and timeouts and starts scheduled tasks. func (fc *FastHotStuffEventLoop) Start() { fc.lock.Lock() defer fc.lock.Unlock() - // Check if the consensus instance is either running or uninitialized. + // Check if the event loop is either running or uninitialized. // If it's running or uninitialized, then there's nothing to do here. - if fc.status != consensusStatusInitialized { + if fc.status != eventLoopStatusInitialized { return } - // Update the consensus status to mark it as running. - fc.status = consensusStatusRunning + // Update the event loop's status to mark it as running. + fc.status = eventLoopStatusRunning // Set the initial block construction and timeout scheduled tasks fc.resetScheduledTasks() @@ -366,9 +358,9 @@ func (fc *FastHotStuffEventLoop) Stop() { fc.lock.Lock() defer fc.lock.Unlock() - // Check if the consensus instance is no longer running. If it's not running + // Check if the event loop is no longer running. If it's not running // we can simply return here. - if fc.status != consensusStatusRunning { + if fc.status != eventLoopStatusRunning { return } @@ -376,22 +368,22 @@ func (fc *FastHotStuffEventLoop) Stop() { fc.nextBlockConstructionTask.Cancel() fc.nextTimeoutTask.Cancel() - // Update the consensus status so it is no longer marked as running. - fc.status = consensusStatusInitialized + // Update the event loop's status so it is no longer marked as running. + fc.status = eventLoopStatusInitialized } func (fc *FastHotStuffEventLoop) IsInitialized() bool { fc.lock.RLock() defer fc.lock.RUnlock() - return fc.status != consensusStatusNotInitialized + return fc.status != eventLoopStatusNotInitialized } func (fc *FastHotStuffEventLoop) IsRunning() bool { fc.lock.RLock() defer fc.lock.RUnlock() - return fc.status == consensusStatusRunning + return fc.status == eventLoopStatusRunning } // resetScheduledTasks recomputes the nextBlockConstructionTimeStamp and nextTimeoutTimeStamp @@ -430,9 +422,9 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC fc.lock.Lock() defer fc.lock.Unlock() - // Check if the consensus instance is running. If it's not running, then there's nothing + // Check if the event loop is running. If it's not running, then there's nothing // to do here. - if fc.status != consensusStatusRunning { + if fc.status != eventLoopStatusRunning { return } @@ -447,11 +439,11 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC // reschedule the task when it advances the view. if success, signersList, signature := fc.tryConstructVoteQCInCurrentView(); success { // Signal the server that we can construct a QC for the chain tip - fc.ConsensusEvents <- &ConsensusEvent{ - EventType: ConsensusEventTypeConstructVoteQC, // The event type - View: fc.currentView, // The current view in which we can construct a block - TipBlockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from - TipBlockHeight: fc.tip.block.GetHeight(), // Block height for the tip, which we are extending from + fc.Events <- &FastHotStuffEvent{ + EventType: FastHotStuffEventTypeConstructVoteQC, // The event type + View: fc.currentView, // The current view in which we can construct a block + TipBlockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from + TipBlockHeight: fc.tip.block.GetHeight(), // Block height for the tip, which we are extending from QC: &quorumCertificate{ blockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from view: fc.tip.block.GetView(), // The view from the tip block. This is always fc.currentView - 1 @@ -469,11 +461,11 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC // we send a signal to the server and cancel all scheduled tasks. if success, safeBlock, highQC, highQCViews, signersList, signature := fc.tryConstructTimeoutQCInCurrentView(); success { // Signal the server that we can construct a timeout QC for the current view - fc.ConsensusEvents <- &ConsensusEvent{ - EventType: ConsensusEventTypeConstructTimeoutQC, // The event type - View: fc.currentView, // The view that we have a timeout QC for - TipBlockHash: highQC.GetBlockHash(), // The block hash that we extend from - TipBlockHeight: safeBlock.GetHeight(), // The block height that we extend from + fc.Events <- &FastHotStuffEvent{ + EventType: FastHotStuffEventTypeConstructTimeoutQC, // The event type + View: fc.currentView, // The view that we have a timeout QC for + TipBlockHash: highQC.GetBlockHash(), // The block hash that we extend from + TipBlockHeight: safeBlock.GetHeight(), // The block height that we extend from AggregateQC: &aggregateQuorumCertificate{ view: fc.currentView - 1, // The timed out view is always the previous view highQC: highQC, // The high QC aggregated from the timeout messages @@ -506,7 +498,7 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC // validations to ensure that the resulting QC is valid. If a QC can be constructed, the function returns // the signers list and aggregate signature that can be used to construct the QC. // -// This function must be called while holding the consensus instance's lock. +// This function must be called while holding the event loop's lock. func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() ( _success bool, // true if and only if we are able to construct a vote QC for the tip block in the current view _signersList *bitset.Bitset, // bitset of signers for the aggregated signature for the tip block @@ -696,9 +688,9 @@ func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin fc.lock.Lock() defer fc.lock.Unlock() - // Check if the consensus instance is running. If it's not running, then there's nothing + // Check if the event loop is running. If it's not running, then there's nothing // to do here. - if fc.status != consensusStatusRunning { + if fc.status != eventLoopStatusRunning { return } @@ -710,11 +702,11 @@ func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin } // Signal the server that we are ready to time out - fc.ConsensusEvents <- &ConsensusEvent{ - EventType: ConsensusEventTypeTimeout, // The timeout event type - View: timedOutView, // The view we timed out - TipBlockHash: fc.tip.block.GetBlockHash(), // The last block we saw - TipBlockHeight: fc.tip.block.GetHeight(), // The last block we saw + fc.Events <- &FastHotStuffEvent{ + EventType: FastHotStuffEventTypeTimeout, // The timeout event type + View: timedOutView, // The view we timed out + TipBlockHash: fc.tip.block.GetBlockHash(), // The last block we saw + TipBlockHeight: fc.tip.block.GetHeight(), // The last block we saw } // Cancel the timeout task. The server will reschedule it when it advances the view. diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 23eafcaf5..3eb9e15ec 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -17,7 +17,7 @@ func TestInit(t *testing.T) { // Test initial status for newly constructed instance { fc := NewFastHotStuffEventLoop() - require.Equal(t, consensusStatusNotInitialized, fc.status) + require.Equal(t, eventLoopStatusNotInitialized, fc.status) require.NotPanics(t, fc.Stop) // Calling Stop() on an uninitialized instance should be a no-op } @@ -92,10 +92,10 @@ func TestInit(t *testing.T) { ) require.NoError(t, err) - require.Equal(t, consensusStatusInitialized, fc.status) + require.Equal(t, eventLoopStatusInitialized, fc.status) require.NotPanics(t, fc.Stop) // Calling Stop() on an initialized instance should be a no-op - require.Equal(t, fc.status, consensusStatusInitialized) + require.Equal(t, fc.status, eventLoopStatusInitialized) require.Equal(t, fc.tip.block.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) require.Equal(t, fc.tip.block.GetView(), uint64(2)) @@ -127,7 +127,7 @@ func TestProcessTipBlock(t *testing.T) { ) require.NoError(t, err) - // Test ProcessTipBlock() function when consensus event loop is not running + // Test ProcessTipBlock() function when event loop is not running { err := fc.ProcessTipBlock( BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip @@ -136,7 +136,7 @@ func TestProcessTipBlock(t *testing.T) { require.Error(t, err) } - // Start the consensus event loop + // Start the event loop fc.Start() // Test ProcessTipBlock() function with malformed tip block @@ -250,7 +250,7 @@ func TestProcessTipBlock(t *testing.T) { fc.Stop() } -func TestAdvanceView(t *testing.T) { +func TestAdvanceViewOnTimeout(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) fc := NewFastHotStuffEventLoop() @@ -262,13 +262,13 @@ func TestAdvanceView(t *testing.T) { ) require.NoError(t, err) - // Running AdvanceView() should fail because the consensus event loop is not running + // Running AdvanceViewOnTimeout() should fail because the event loop is not running { - _, err := fc.AdvanceView() + _, err := fc.AdvanceViewOnTimeout() require.Error(t, err) } - // Start the consensus event loop + // Start the event loop fc.Start() // Populate the votesSeen and timeoutsSeen maps with dummy data @@ -310,9 +310,9 @@ func TestAdvanceView(t *testing.T) { } } - // Run AdvanceView() to view 4 + // Run AdvanceViewOnTimeout() to view 4 { - newView, err := fc.AdvanceView() + newView, err := fc.AdvanceViewOnTimeout() require.NoError(t, err) require.Equal(t, uint64(4), newView) } @@ -323,9 +323,9 @@ func TestAdvanceView(t *testing.T) { require.Equal(t, len(fc.timeoutsSeen), 3) } - // Run AdvanceView() to view 5 + // Run AdvanceViewOnTimeout() to view 5 { - newView, err := fc.AdvanceView() + newView, err := fc.AdvanceViewOnTimeout() require.NoError(t, err) require.Equal(t, uint64(5), newView) } @@ -357,7 +357,7 @@ func TestProcessValidatorVote(t *testing.T) { // Current View = 4 { - currentView, err := fc.AdvanceView() + currentView, err := fc.AdvanceViewOnTimeout() require.NoError(t, err) require.Equal(t, uint64(4), currentView) } @@ -441,7 +441,7 @@ func TestProcessValidatorTimeout(t *testing.T) { // Current View = 4 { - currentView, err := fc.AdvanceView() + currentView, err := fc.AdvanceViewOnTimeout() require.NoError(t, err) require.Equal(t, uint64(4), currentView) } @@ -525,10 +525,10 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { fc.Start() // Wait for the timeout signal to be sent - timeoutSignal := <-fc.ConsensusEvents + timeoutSignal := <-fc.Events // Confirm that the timeout signal is for the the expected view - require.Equal(t, timeoutSignal.EventType, ConsensusEventTypeTimeout) + require.Equal(t, timeoutSignal.EventType, FastHotStuffEventTypeTimeout) require.Equal(t, timeoutSignal.View, dummyBlock.GetView()+1) require.Equal(t, timeoutSignal.TipBlockHash.GetValue(), dummyBlock.GetBlockHash().GetValue()) @@ -536,13 +536,13 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { require.False(t, fc.nextTimeoutTask.IsScheduled()) // Advance the view, which should reset the timeout scheduled task - fc.AdvanceView() + fc.AdvanceViewOnTimeout() // Wait for the timeout signal to be sent - timeoutSignal = <-fc.ConsensusEvents + timeoutSignal = <-fc.Events // Confirm that the timeout signal is for the the expected view - require.Equal(t, timeoutSignal.EventType, ConsensusEventTypeTimeout) + require.Equal(t, timeoutSignal.EventType, FastHotStuffEventTypeTimeout) require.Equal(t, timeoutSignal.View, dummyBlock.GetView()+2) require.Equal(t, timeoutSignal.TipBlockHash.GetValue(), dummyBlock.GetBlockHash().GetValue()) @@ -571,7 +571,7 @@ func TestResetEventLoopSignal(t *testing.T) { require.Equal(t, fc.nextTimeoutTask.GetDuration(), 2*oneHourInNanoSecs) // 2 hours away // Advance the view to simulate a timeout - _, err = fc.AdvanceView() + _, err = fc.AdvanceViewOnTimeout() require.NoError(t, err) // Confirm the ETAs for the block construction and timeout timers @@ -579,7 +579,7 @@ func TestResetEventLoopSignal(t *testing.T) { require.Equal(t, fc.nextTimeoutTask.GetDuration(), 4*oneHourInNanoSecs) // 2 hours * 2 = 4 hours away // Advance the view to simulate a 2nd timeout - _, err = fc.AdvanceView() + _, err = fc.AdvanceViewOnTimeout() require.NoError(t, err) // Confirm the ETAs for the block construction and timeout timers @@ -587,7 +587,7 @@ func TestResetEventLoopSignal(t *testing.T) { require.Equal(t, fc.nextTimeoutTask.GetDuration(), 8*oneHourInNanoSecs) // 2 hours * 2^2 = 8 hours away // Advance the view to simulate a 3nd timeout - _, err = fc.AdvanceView() + _, err = fc.AdvanceViewOnTimeout() require.NoError(t, err) // Confirm the ETAs for the block construction and timeout timers @@ -650,7 +650,7 @@ func TestVoteQCConstructionSignal(t *testing.T) { // Wait up to 100 milliseconds for a block construction signal to be sent select { - case <-fc.ConsensusEvents: + case <-fc.Events: require.Fail(t, "Received a block construction signal when there were not enough votes to construct a QC") case <-time.After(100 * time.Millisecond): // Do nothing @@ -685,11 +685,11 @@ func TestVoteQCConstructionSignal(t *testing.T) { // Start the event loop fc.Start() - var blockConstructionSignal *ConsensusEvent + var blockConstructionSignal *FastHotStuffEvent // Wait up to 100 milliseconds for a block construction signal to be sent select { - case blockConstructionSignal = <-fc.ConsensusEvents: + case blockConstructionSignal = <-fc.Events: // Do nothing case <-time.After(100 * time.Millisecond): require.Fail(t, "Did not receive a block construction signal when there were enough votes to construct a QC") @@ -699,7 +699,7 @@ func TestVoteQCConstructionSignal(t *testing.T) { fc.Stop() // Confirm that the block construction signal has the expected parameters - require.Equal(t, blockConstructionSignal.EventType, ConsensusEventTypeConstructVoteQC) + require.Equal(t, blockConstructionSignal.EventType, FastHotStuffEventTypeConstructVoteQC) require.Equal(t, blockConstructionSignal.View, block.GetView()+1) require.Equal(t, blockConstructionSignal.TipBlockHash.GetValue(), block.GetBlockHash().GetValue()) require.Equal(t, blockConstructionSignal.TipBlockHeight, block.GetHeight()) @@ -778,7 +778,7 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { // Wait up to 100 milliseconds for a block construction signal to be sent select { - case <-fc.ConsensusEvents: + case <-fc.Events: require.Fail(t, "Received a block construction signal when there were not enough timeouts to construct a timeout QC") case <-time.After(100 * time.Millisecond): // Do nothing @@ -828,11 +828,11 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { // Start the event loop fc.Start() - var signal *ConsensusEvent + var signal *FastHotStuffEvent // Wait up to 100 milliseconds for a block construction signal to be sent select { - case signal = <-fc.ConsensusEvents: + case signal = <-fc.Events: // Do nothing case <-time.After(100 * time.Second): require.Fail(t, "Did not receive a block construction signal when there were enough timeouts to construct a timeout QC") @@ -842,7 +842,7 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { fc.Stop() // Confirm that the block construction signal has the expected parameters - require.Equal(t, signal.EventType, ConsensusEventTypeConstructTimeoutQC) + require.Equal(t, signal.EventType, FastHotStuffEventTypeConstructTimeoutQC) require.Equal(t, signal.View, uint64(5)) // The timeout QC will be proposed in view 5 require.Equal(t, signal.TipBlockHash.GetValue(), block1.GetBlockHash().GetValue()) // The timeout QC will be proposed in a block that extends from block 1 require.Equal(t, signal.TipBlockHeight, block1.GetHeight()) // The timeout QC will be proposed at the block height after block 1 @@ -880,8 +880,8 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { // Start the event loop fc.Start() - // Confirm the consensus instance status has changed to running - require.Equal(t, consensusStatusRunning, fc.status) + // Confirm the event loop status has changed to running + require.Equal(t, eventLoopStatusRunning, fc.status) // Confirm that the ETAs for the block construction and timeout timers have been set require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) @@ -890,8 +890,8 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { // Stop the event loop fc.Stop() - // Confirm the consensus instance status has reverted to initialized - require.Equal(t, consensusStatusInitialized, fc.status) + // Confirm the event loop status has reverted to initialized + require.Equal(t, eventLoopStatusInitialized, fc.status) // Confirm that calling fc.Stop() again doesn't panic require.NotPanics(t, fc.Stop) diff --git a/consensus/types.go b/consensus/types.go index 614a2bc88..0e71cfb48 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -9,26 +9,26 @@ import ( "github.com/holiman/uint256" ) -// ConsensusEvent is a way for FastHotStuffEventLoop to send messages back to the Server. +// FastHotStuffEventType is a way for FastHotStuffEventLoop to send messages back to the Server. // There are four types of events that can be sent: -// - ConsensusEventTypeVote: The consensus is ready to vote on a block at a given block height and view -// - ConsensusEventTypeTimeout: The consensus has timed out on a view -// - ConsensusEventTypeConstructVoteQC: The consensus has a QC for a block and is ready to construct the +// - FastHotStuffEventTypeVote: The event loop is ready to vote on a block at a given block height and view +// - FastHotStuffEventTypeTimeout: The event loop has timed out on a view +// - FastHotStuffEventTypeConstructVoteQC: The event loop has a QC for a block and is ready to construct the // next block at the next block height and the current view -// - ConsensusEventTypeConstructTimeoutQC: The consensus has a timeout QC for a view and is ready to +// - FastHotStuffEventTypeConstructTimeoutQC: The event loop has a timeout QC for a view and is ready to // construct an empty block with the timeout QC at the next block height and the current view -type ConsensusEventType byte +type FastHotStuffEventType byte const ( - ConsensusEventTypeVote ConsensusEventType = 0 - ConsensusEventTypeTimeout ConsensusEventType = 1 - ConsensusEventTypeConstructVoteQC ConsensusEventType = 2 - ConsensusEventTypeConstructTimeoutQC ConsensusEventType = 3 + FastHotStuffEventTypeVote FastHotStuffEventType = 0 + FastHotStuffEventTypeTimeout FastHotStuffEventType = 1 + FastHotStuffEventTypeConstructVoteQC FastHotStuffEventType = 2 + FastHotStuffEventTypeConstructTimeoutQC FastHotStuffEventType = 3 ) -type ConsensusEvent struct { - EventType ConsensusEventType +type FastHotStuffEvent struct { + EventType FastHotStuffEventType TipBlockHash BlockHash TipBlockHeight uint64 View uint64 @@ -119,32 +119,29 @@ type BlockWithValidators struct { const signalChannelBufferSize = 100 // An instance of FastHotStuffEventLoop is a self-contained module that represents a single node running -// the event loop for the Fast HotStuff consensus protocol. The module is initialized at the current chain's -// tip, with a given block hash, block height, view number, and validator set. The module is simplified and +// the event loop for the Fast HotStuff consensus protocol. The event loop is initialized at the current chain's +// tip, with a given block hash, block height, view number, and validator set. The event loop is simplified and // does not know whether its role is that of a block proposer or a replica validator. // -// Given a block that's at the tip of the current chain, this module maintains its own internal data structures +// Given a block that's at the tip of the current chain, the event loop maintains its own internal data structures // and runs an internal event loop that handles all of the following: // - Tracking of the current view, incrementing the view during timeouts, and computing exponential // back-off durations during consecutive timeouts // - Aggregation of votes and QC construction for the current block // - Aggregation of timeout messages for the current view -// - Signaling its caller when it can vote on the current chain tip -// - Signaling its caller when it has timed out the current view -// - Signaling its caller when it has a QC for the current block -// - Signaling its caller when it has a timeout QC for the current view +// - Signaling the server when it can vote on the current tip block +// - Signaling the server when it has timed out the current view +// - Signaling the server when it has a QC for the current tip block +// - Signaling the server when it has a timeout QC for the current view // -// When a new block is connected to the chain, the caller is expected to update the chain tip. The module -// resets all internal data structures and scheduled tasks to handle all of the above based on the new chain tip. +// When a new block is connected to the chain, the server is expected to update the tip block. The event loop +// resets all internal data structures and scheduled tasks to handle all of the above based on the new tip. // -// This module is very simple and only houses the logic that decides what action to perform next given the -// current chain tip. The module does not track the history of blocks, and instead needs its caller to -// update the block at the current chain tip. It expects its caller to maintain the block chain, -// the index of all past blocks, to perform QC validations for incoming blocks, to handle the commit rule, -// and only then to pass the validated chain tip. Note: this module takes the provided chain tip as a -// trusted input and does NOT validate any incoming blocks. This also mean the module expects its caller to -// track historical vote and timeout messages it has sent so as to not vote more than once at a given view -// or block height. +// This event loop is simple and only houses the logic that decides what action to perform next given the +// current tip block. The event loop does not track the full history of blocks, and instead needs the server +// to pass in the tip block and safe extendable blocks. It expects the server to maintain the block chain, +// the index of all past blocks, to perform QC validations for incoming blocks, to handle the commit rule, to +// handle reorgs, and to only then to pass the the new validated tip. type FastHotStuffEventLoop struct { lock sync.RWMutex @@ -179,17 +176,17 @@ type FastHotStuffEventLoop struct { timeoutsSeen map[uint64]map[string]TimeoutMessage // Externally accessible channel for signals sent to the Server. - ConsensusEvents chan *ConsensusEvent + Events chan *FastHotStuffEvent // Internal statuses and wait groups used to coordinate the start and stop operations for // the event loop. - status consensusStatus + status eventLoopStatus } -type consensusStatus byte +type eventLoopStatus byte const ( - consensusStatusNotInitialized consensusStatus = 0 // Not initialized and the event loop is not running - consensusStatusInitialized consensusStatus = 1 // Initialized but the event loop is not running - consensusStatusRunning consensusStatus = 2 // Initialized and the event loop is running + eventLoopStatusNotInitialized eventLoopStatus = 0 // Not initialized and the event loop is not running + eventLoopStatusInitialized eventLoopStatus = 1 // Initialized but the event loop is not running + eventLoopStatusRunning eventLoopStatus = 2 // Initialized and the event loop is running ) diff --git a/lib/server.go b/lib/server.go index 9d6291ddb..d455e08d0 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2216,7 +2216,7 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { } } -func (srv *Server) _handleFastHostStuffBlockProposal(event *consensus.ConsensusEvent) { +func (srv *Server) _handleFastHostStuffBlockProposal(event *consensus.FastHotStuffEvent) { // The consensus module has signaled that we can propose a block at a certain block // height. We construct the block and broadcast it here: // 1. Verify that the block height we want to propose at is valid @@ -2230,7 +2230,7 @@ func (srv *Server) _handleFastHostStuffBlockProposal(event *consensus.ConsensusE // 7. Broadcast the block to the network } -func (srv *Server) _handleFastHostStuffEmptyTimeoutBlockProposal(event *consensus.ConsensusEvent) { +func (srv *Server) _handleFastHostStuffEmptyTimeoutBlockProposal(event *consensus.FastHotStuffEvent) { // The consensus module has signaled that we have a timeout QC and can propose one at a certain // block height. We construct an empty block with a timeout QC and broadcast it here: // 1. Verify that the block height and view we want to propose at is valid @@ -2241,7 +2241,7 @@ func (srv *Server) _handleFastHostStuffEmptyTimeoutBlockProposal(event *consensu // 6. Broadcast the block to the network } -func (srv *Server) _handleFastHostStuffVote(event *consensus.ConsensusEvent) { +func (srv *Server) _handleFastHostStuffVote(event *consensus.FastHotStuffEvent) { // The consensus module has signaled that we can vote on a block. We construct and // broadcast the vote here: // 1. Verify that the block height we want to vote on is valid @@ -2250,7 +2250,7 @@ func (srv *Server) _handleFastHostStuffVote(event *consensus.ConsensusEvent) { // 4. Broadcast the timeout msg to the network } -func (srv *Server) _handleFastHostStuffTimeout(event *consensus.ConsensusEvent) { +func (srv *Server) _handleFastHostStuffTimeout(event *consensus.FastHotStuffEvent) { // The consensus module has signaled that we have timed out for a view. We construct and // broadcast the timeout here: // 1. Verify the block height and view we want to timeout on are valid @@ -2259,15 +2259,15 @@ func (srv *Server) _handleFastHostStuffTimeout(event *consensus.ConsensusEvent) // 4. Broadcast the timeout msg to the network } -func (srv *Server) _handleFastHostStuffConsensusEvent(event *consensus.ConsensusEvent) { +func (srv *Server) _handleFastHostStuffConsensusEvent(event *consensus.FastHotStuffEvent) { switch event.EventType { - case consensus.ConsensusEventTypeVote: + case consensus.FastHotStuffEventTypeVote: srv._handleFastHostStuffVote(event) - case consensus.ConsensusEventTypeTimeout: + case consensus.FastHotStuffEventTypeTimeout: srv._handleFastHostStuffTimeout(event) - case consensus.ConsensusEventTypeConstructVoteQC: + case consensus.FastHotStuffEventTypeConstructVoteQC: srv._handleFastHostStuffBlockProposal(event) - case consensus.ConsensusEventTypeConstructTimeoutQC: + case consensus.FastHotStuffEventTypeConstructTimeoutQC: srv._handleFastHostStuffEmptyTimeoutBlockProposal(event) } } @@ -2297,7 +2297,7 @@ func (srv *Server) _startConsensus() { } select { - case consensusEvent := <-srv.fastHotStuffEventLoop.ConsensusEvents: + case consensusEvent := <-srv.fastHotStuffEventLoop.Events: { glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) srv._handleFastHostStuffConsensusEvent(consensusEvent) From ba66d8ace2544572e2d01eb9374825af9a47d87e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:48:18 -0700 Subject: [PATCH 214/762] Refactor QC event construction into helper functions (#709) --- consensus/event_loop.go | 104 +++++++++++++++++----------------------- 1 file changed, 44 insertions(+), 60 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 9b38cebd6..46c006505 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -437,49 +437,17 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC // Check if the conditions are met to construct a QC from votes for the chain tip. If so, // we send a signal to the server and cancel the block construction task. The server will // reschedule the task when it advances the view. - if success, signersList, signature := fc.tryConstructVoteQCInCurrentView(); success { + if voteQCEvent := fc.tryConstructVoteQCInCurrentView(); voteQCEvent != nil { // Signal the server that we can construct a QC for the chain tip - fc.Events <- &FastHotStuffEvent{ - EventType: FastHotStuffEventTypeConstructVoteQC, // The event type - View: fc.currentView, // The current view in which we can construct a block - TipBlockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from - TipBlockHeight: fc.tip.block.GetHeight(), // Block height for the tip, which we are extending from - QC: &quorumCertificate{ - blockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from - view: fc.tip.block.GetView(), // The view from the tip block. This is always fc.currentView - 1 - aggregatedSignature: &aggregatedSignature{ - signersList: signersList, // The signers list who voted on the tip block - signature: signature, // Aggregated signature from votes on the tip block - }, - }, - } - + fc.Events <- voteQCEvent return } // Check if we have enough timeouts to build an aggregate QC for the previous view. If so, // we send a signal to the server and cancel all scheduled tasks. - if success, safeBlock, highQC, highQCViews, signersList, signature := fc.tryConstructTimeoutQCInCurrentView(); success { + if timeoutQCEvent := fc.tryConstructTimeoutQCInCurrentView(); timeoutQCEvent != nil { // Signal the server that we can construct a timeout QC for the current view - fc.Events <- &FastHotStuffEvent{ - EventType: FastHotStuffEventTypeConstructTimeoutQC, // The event type - View: fc.currentView, // The view that we have a timeout QC for - TipBlockHash: highQC.GetBlockHash(), // The block hash that we extend from - TipBlockHeight: safeBlock.GetHeight(), // The block height that we extend from - AggregateQC: &aggregateQuorumCertificate{ - view: fc.currentView - 1, // The timed out view is always the previous view - highQC: highQC, // The high QC aggregated from the timeout messages - highQCViews: highQCViews, // The high view for each validator who timed out - aggregatedSignature: &aggregatedSignature{ - signersList: signersList, // The signers list of validators who timed out - signature: signature, // The aggregated signature from validators who timed out - }, - }, - } - - // Cancel the block construction task since we know we can construct a timeout QC in the current view. - // It will be rescheduled when we advance view. - fc.nextBlockConstructionTask.Cancel() + fc.Events <- timeoutQCEvent return } @@ -499,16 +467,12 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC // the signers list and aggregate signature that can be used to construct the QC. // // This function must be called while holding the event loop's lock. -func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() ( - _success bool, // true if and only if we are able to construct a vote QC for the tip block in the current view - _signersList *bitset.Bitset, // bitset of signers for the aggregated signature for the tip block - _aggregateSignature *bls.Signature, // aggregated signature for the tip block -) { +func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuffEvent { // If currentView != tipBlock.View + 1, then we have timed out at some point, and can no longer // construct a block with a QC of votes for the tip block. tipBlock := fc.tip.block if fc.currentView != tipBlock.GetView()+1 { - return false, nil, nil + return nil } // Fetch the validator set at the tip. @@ -552,18 +516,31 @@ func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() ( // If we don't have a super-majority vote for the chain tip, then we can't build a QC. if !isSuperMajorityStake(totalVotingStake, totalStake) { - return false, nil, nil + return nil } // If we reach this point, then we have enough signatures to build a QC for the tip block. Try to // aggregate the signatures. This should never fail. aggregateSignature, err := bls.AggregateSignatures(signatures) if err != nil { - return false, nil, nil + return nil + } + + // Happy path. Construct the QC and return as an event to signal to the server. + return &FastHotStuffEvent{ + EventType: FastHotStuffEventTypeConstructVoteQC, // The event type + View: fc.currentView, // The current view in which we can construct a block + TipBlockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from + TipBlockHeight: fc.tip.block.GetHeight(), // Block height for the tip, which we are extending from + QC: &quorumCertificate{ + blockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from + view: fc.tip.block.GetView(), // The view from the tip block. This is always fc.currentView - 1 + aggregatedSignature: &aggregatedSignature{ + signersList: signersList, // The signers list who voted on the tip block + signature: aggregateSignature, // Aggregated signature from votes on the tip block + }, + }, } - - // Happy path - return true, signersList, aggregateSignature } // tryConstructTimeoutQCInCurrentView is a helper function that attempts to construct a timeout QC for the @@ -574,14 +551,7 @@ func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() ( // to construct the timeout QC. // // This function must be called while holding the consensus instance's lock. -func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() ( - _success bool, // true if and only if we are able to construct a timeout QC in the current view - _safeBlock Block, // the safe block that the high QC is from; the timeout QC proposed will extend from this block - _highQC QuorumCertificate, // high QC aggregated from validators who timed out - _highQCViews []uint64, // high QC views for each validator who timed out - _signersList *bitset.Bitset, // bitset of signers for the aggregated signature from the timeout messages - _aggregatedSignature *bls.Signature, // aggregated signature from the validators' timeout messages -) { +func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotStuffEvent { // Fetch all timeouts for the previous view. All timeout messages for a view are aggregated and // proposed in the next view. So if we want to propose a timeout QC in the current view, we need @@ -616,14 +586,14 @@ func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() ( // If we didn't find a high QC or didn't find any valid timeout messages, then we can't build a timeout QC. if isInterfaceNil(validatorsHighQC) { - return false, nil, nil, nil, nil, nil + return nil } // Fetch the validator set for the block height of the high QC. This lookup is guaranteed to succeed // because it succeeded above. ok, safeBlock, validatorSet, _ := fc.fetchSafeBlockInfo(validatorsHighQC.GetBlockHash()) if !ok { - return false, nil, nil, nil, nil, nil + return nil } // Compute the total stake and total stake with timeouts @@ -668,17 +638,31 @@ func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() ( // Check if we have a super majority of stake that has timed out if !isSuperMajorityStake(totalTimedOutStake, totalStake) { - return false, nil, nil, nil, nil, nil + return nil } // Finally aggregate the signatures from the timeouts aggregateSignature, err := bls.AggregateSignatures(signatures) if err != nil { - return false, nil, nil, nil, nil, nil + return nil } // Happy path - return true, safeBlock, validatorsHighQC, highQCViews, signersList, aggregateSignature + return &FastHotStuffEvent{ + EventType: FastHotStuffEventTypeConstructTimeoutQC, // The event type + View: fc.currentView, // The view that we have a timeout QC for + TipBlockHash: validatorsHighQC.GetBlockHash(), // The block hash that we extend from + TipBlockHeight: safeBlock.GetHeight(), // The block height that we extend from + AggregateQC: &aggregateQuorumCertificate{ + view: fc.currentView - 1, // The timed out view is always the previous view + highQC: validatorsHighQC, // The high QC aggregated from the timeout messages + highQCViews: highQCViews, // The high view for each validator who timed out + aggregatedSignature: &aggregatedSignature{ + signersList: signersList, // The signers list of validators who timed out + signature: aggregateSignature, // The aggregated signature from validators who timed out + }, + }, + } } // When this function is triggered, it means that we have reached out the timeout ETA for the From 53b33c552e0d8b1535c3e35034070b5f2f7b87c9 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 3 Oct 2023 13:58:55 -0400 Subject: [PATCH 215/762] process-pos-block-skeleton (#686) * process-pos-block-skeleton * update return type of ProcessBlockPoS * update the skeleton after looking at reference implementation more * refactor * address SA feedback --------- Co-authored-by: Lazy Nina <> --- lib/blockchain.go | 21 +--- lib/pos_blockchain.go | 250 ++++++++++++++++++++++++++++++++++++++++++ lib/pos_network.go | 10 ++ 3 files changed, 264 insertions(+), 17 deletions(-) create mode 100644 lib/pos_blockchain.go diff --git a/lib/blockchain.go b/lib/blockchain.go index 76786930e..d4fb4662a 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -482,6 +482,10 @@ type Blockchain struct { bestHeaderChain []*BlockNode bestHeaderChainMap map[BlockHash]*BlockNode + // Tracks all uncommitted blocks in memory. This includes blocks that are not part + // of the best chain. + uncommittedBlocksMap map[BlockHash]*MsgDeSoBlock + // We keep track of orphan blocks with the following data structures. Orphans // are not written to disk and are only cached in memory. Moreover we only keep // up to MaxOrphansInMemory of them in order to prevent memory exhaustion. @@ -2632,23 +2636,6 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures return isMainChain, false, nil } -// processBlockPoS runs the Fast-Hotstuff block connect and commit rule as follows: -// 1. Validate on an incoming block and its header -// 2. Store the block in the db -// 3. Resolves forks within the last two blocks -// 4. Connect the block to the blockchain's tip -// 5. If applicable, flush the incoming block's grandparent to the DB -// 6. Notify the block proposer, pacemaker, and voting logic that the incoming block has been accepted -func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { - // TODO: Implement me - return false, false, fmt.Errorf("ProcessBlockPoS: Not implemented yet") -} - -func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { - // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks - panic("GetUncommittedTipView: Not implemented yet") -} - // DisconnectBlocksToHeight will rollback blocks from the db and blockchain structs until block tip reaches the provided // blockHeight parameter. func (bc *Blockchain) DisconnectBlocksToHeight(blockHeight uint64, snap *Snapshot) error { diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go new file mode 100644 index 000000000..217faa767 --- /dev/null +++ b/lib/pos_blockchain.go @@ -0,0 +1,250 @@ +package lib + +import ( + "github.com/golang/glog" + "github.com/pkg/errors" +) + +// processBlockPoS runs the Fast-Hotstuff block connect and commit rule as follows: +// 1. Validate on an incoming block, its header, its block height, the leader, and its QCs (vote or timeout) +// 2. Store the block in the block index and uncommitted blocks map. +// 3. Determine if we're missing a parent block of this block and any of its parents from the block index. +// If so, return the hash of the missing block. +// 4. Resolves forks within the last two blocks +// 5. Connect the block to the blockchain's tip +// 6. Run the commit rule - If applicable, flushes the incoming block's grandparent to the DB +// 7. Prune in-memory struct holding uncommitted block. +// 8. Update the currentView to this new block's view + 1 +func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, verifySignatures bool) (_success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { + // TODO: Implement me + // 1. Start with all sanity checks of the block. + // TODO: Check if err is for view > latest committed block view and <= latest uncommitted block. + // If so, we need to perform the rest of the validations and then add to our block index. + if err := bc.validateDeSoBlockPoS(desoBlock); err != nil { + + } + // TODO: Get validator set for current block height. Alternatively, we could do this in + // validateQC, but we may need the validator set elsewhere in this function anyway. + var validatorSet []*ValidatorEntry + // 1e. Validate QC + if err := bc.validateQC(desoBlock, validatorSet); err != nil { + return false, false, nil, err + } + + // @sofonias @piotr - should we move this to + // If the block doesn’t contain a ValidatorsTimeoutAggregateQC, then that indicates that we + // did NOT timeout in the previous view, which means we should just check that + // the QC corresponds to the previous view. + if desoBlock.Header.ValidatorsTimeoutAggregateQC.isEmpty() { + // The block is safe to vote on if it is a direct child of the previous + // block. This means that the parent and child blocks have consecutive + // views. We use the current block’s QC to find the view of the parent. + // TODO: Any processing related to the block's vote QC. + } else { + // If we have a ValidatorsTimeoutAggregateQC set on the block, it means the nodes decided + // to skip a view by sending TimeoutMessages to the leader, so we process + // the block accordingly. + // 1f. If timeout QC, validate that block hash isn't too far back from the latest. + if err := bc.validateTimeoutQC(desoBlock, validatorSet); err != nil { + return false, false, nil, err + } + // TODO: Get highest timeout QC from the block. + // We find the QC with the highest view among the QCs contained in the + // AggregateQC. + var highestTimeoutQC *QuorumCertificate + // TODO: Check if our local highestQC has a smaller view than the highestTimeoutQC. + // If our local highestQC has a smaller view than the highestTimeoutQC, + // we update our local highestQC. + _ = highestTimeoutQC + } + + // 2. We can now add this block to the block index since we have performed + // all basic validations. We can also add it to the uncommittedBlocksMap + if err := bc.addBlockToBlockIndex(desoBlock); err != nil { + return false, false, nil, err + } + + // 3. Determine if we're missing a parent block of this block and any of its parents from the block index. + // If so, add block to block index and return the hash of the missing block. + missingBlockHash, err := bc.validateAncestorsExist(desoBlock) + if err != nil { + return false, false, nil, err + } + if missingBlockHash != nil { + return false, true, []*BlockHash{missingBlockHash}, nil + } + + // 4. Handle reorgs if necessary + if bc.shouldReorg(desoBlock) { + if err = bc.handleReorg(desoBlock); err != nil { + return false, false, nil, err + } + } + + // Happy path + // 5. Add block to best chain. + if err = bc.addBlockToBestChain(desoBlock); err != nil { + return false, false, nil, err + } + + // 6. Commit grandparent if possible. + if err = bc.commitGrandparents(desoBlock); err != nil { + return false, false, nil, err + } + + // 7. Update in-memory struct holding uncommitted blocks. + if err = bc.pruneUncommittedBlocks(desoBlock); err != nil { + // We glog and continue here as failing to prune the uncommitted blocks map is not a + // critical error. + glog.Errorf("processBlockPoS: Error pruning uncommitted blocks: %v", err) + } + + // 8. Update current view to block's view + 1 + bc.updateCurrentView(desoBlock) + + return true, false, nil, nil +} + +// validateDeSoBlockPoS performs all basic validations on a block as it relates to +// the Blockchain struct. +func (bc *Blockchain) validateDeSoBlockPoS(desoBlock *MsgDeSoBlock) error { + // Surface Level validation of the block + if err := bc.validateBlockGeneral(desoBlock); err != nil { + return err + } + // Validate Block Height + if err := bc.validateBlockHeight(desoBlock); err != nil { + return err + } + // Validate View + if err := bc.validateBlockView(desoBlock); err != nil { + // Check if err is for view > latest committed block view and <= latest uncommitted block. + // If so, we need to perform the rest of the validations and then add to our block index. + // TODO: implement check on error described above. Caller will handle this. + return err + } + // Validate Leader + if err := bc.validateBlockLeader(desoBlock); err != nil { + return err + } + return nil +} + +// validateBlockGeneral validates the block at a surface level. It checks +// that the timestamp is valid, that the version of the header is valid, +// and other general integrity checks (such as not malformed). +func (bc *Blockchain) validateBlockGeneral(desoBlock *MsgDeSoBlock) error { + // TODO: Implement me + return errors.New("IMPLEMENT ME") +} + +// validateBlockHeight validates the block height for a given block. It checks +// that this block height is exactly one greater than the current block height. +// TODO: Are we sure that's the correct validation here? +func (bc *Blockchain) validateBlockHeight(desoBlock *MsgDeSoBlock) error { + // TODO: Implement me + return errors.New("IMPLEMENT ME") +} + +// validateBlockView validates the view for a given block. First, it checks that +// the view is greater than the latest committed block view. If not, +// we return an error indicating that we'll never accept this block. Next, +// it checks that the view is less than or equal to the latest uncommitted block. +// If not, we return an error indicating that we'll want to add this block as an +// orphan. Then it will check if that the view is exactly one greater than the +// latest uncommitted block if we have an regular vote QC. If this block has a +// timeout QC, it will check that the view is at least greater than the latest +// uncommitted block's view + 1. +func (bc *Blockchain) validateBlockView(desoBlock *MsgDeSoBlock) error { + // TODO: Implement me + return errors.New("IMPLEMENT ME") +} + +// validateBlockLeader validates that the proposer is the expected proposer for the +// block height + view number pair. +func (bc *Blockchain) validateBlockLeader(desoBlock *MsgDeSoBlock) error { + // TODO: Implement me + return errors.New("IMPLEMENT ME") +} + +// validateQC validates that the QC of this block is valid, meaning a super majority +// of the validator set has voted (or timed out). Assumes ValidatorEntry list is sorted. +func (bc *Blockchain) validateQC(desoBlock *MsgDeSoBlock, validatorSet []*ValidatorEntry) error { + // TODO: Implement me + return errors.New("IMPLEMENT ME") +} + +// validateTimeoutQC validates that the parent block hash is not too far back from the latest. +// Specifically, it checks that the parent block hash is at least the latest committed block. +func (bc *Blockchain) validateTimeoutQC(desoBlock *MsgDeSoBlock, validatorSet []*ValidatorEntry) error { + // TODO: Implement me + return errors.New("IMPLEMENT ME") +} + +// validateAncestorsExist checks that all ancestors of this block exist in the block index. +// If an ancestor is not found, we'll return the block hash of the missing ancestor so the +// caller can request this block. +func (bc *Blockchain) validateAncestorsExist(desoBlock *MsgDeSoBlock) (_missingBlockHash *BlockHash, _err error) { + // Notes: starting from the block passed in, we'll look for the parent in the block index. + // 1. If the parent does not appear in the block index, we'll return the parent's hash. + // 2. If the parent exists in the block index AND is in the best chain, we can safely assume + // that all ancestors exist in the block index. + // 3. If the parent exists in the block index but is not in the best chain, we repeat from + // step 1 with the parent as the block passed in. + // TODO: Implement me + return nil, errors.New("IMPLEMENT ME") +} + +// addBlockToBlockIndex adds the block to the block index. +func (bc *Blockchain) addBlockToBlockIndex(desoBlock *MsgDeSoBlock) error { + // TODO: Implement me. + return errors.New("IMPLEMENT ME") +} + +// shouldReorg determines if we should reorg to the block provided. We should reorg if +// this block has a higher QC than our current tip and extends from either the committed +// tip OR any uncommitted safe block in our block index. +func (bc *Blockchain) shouldReorg(desoBlock *MsgDeSoBlock) bool { + return false +} + +// handleReorg handles a reorg to the block provided. It does not check whether or not we should +// perform a reorg, so this should be called after shouldReorg. It will do the following: +// 1. Update the bestChain and bestChainMap by removing blocks that are not uncommitted ancestor of this block. +// 2. Update the bestChain and bestChainMap by adding blocks that are uncommitted ancestors of this block. +// Note: addBlockToBestChain will be called after this to handle adding THIS block to the best chain. +func (bc *Blockchain) handleReorg(desoBlock *MsgDeSoBlock) error { + // TODO: Implement me. + return errors.New("IMPLEMENT ME") +} + +// addBlockToBestChain adds the block to the best chain. +func (bc *Blockchain) addBlockToBestChain(desoBlock *MsgDeSoBlock) error { + // TODO: Implement me. + return errors.New("IMPLEMENT ME") +} + +// pruneUncommittedBlocks prunes the in-memory struct holding uncommitted blocks. +func (bc *Blockchain) pruneUncommittedBlocks(desoBlock *MsgDeSoBlock) error { + // TODO: Implement me. + return errors.New("IMPLEMENT ME") +} + +// commitGrandparents commits the grandparent of the block if possible. +// Specifically, this updates the CommittedBlockStatus of its grandparent +// and flushes the view after connecting the grandparent block to the DB. +func (bc *Blockchain) commitGrandparents(desoBlock *MsgDeSoBlock) error { + // TODO: Implement me. + return errors.New("IMPLEMENT ME") +} + +// updateCurrentView updates the current view to the block's view + 1. +func (bc *Blockchain) updateCurrentView(desoBlock *MsgDeSoBlock) { + // TODO: Implement me. + panic(errors.New("IMPLEMENT ME")) +} + +func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { + // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks + panic("GetUncommittedTipView: Not implemented yet") +} diff --git a/lib/pos_network.go b/lib/pos_network.go index d09697009..9017e95e7 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -548,6 +548,16 @@ func (aggQC *TimeoutAggregateQuorumCertificate) FromBytes(rr io.Reader) error { return nil } +// isEmpty returns true if the TimeoutAggregateQuorumCertificate is nil or if it +// contains no data. +// Reference implementation: https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L119 +func (aggQC *TimeoutAggregateQuorumCertificate) isEmpty() bool { + return aggQC == nil || + aggQC.TimedOutView == 0 || + aggQC.ValidatorsTimeoutAggregatedSignature == nil || + aggQC.ValidatorsTimeoutAggregatedSignature.Signature == nil +} + // ================================================================== // Bitset Utils // ================================================================== From bf2a7179441ebd02f7bcab709c9a41880526cb55 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 3 Oct 2023 13:15:13 -0700 Subject: [PATCH 216/762] Rename block construction task to crank timer (#710) * Rename block construction task to crank timer * Fix straggler comment --- consensus/event_loop.go | 51 ++++++++++++++++++------------------ consensus/event_loop_test.go | 28 ++++++++++---------- consensus/types.go | 8 +++--- 3 files changed, 43 insertions(+), 44 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 46c006505..567992226 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -13,16 +13,16 @@ import ( func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { return &FastHotStuffEventLoop{ - status: eventLoopStatusNotInitialized, - nextBlockConstructionTask: NewScheduledTask[uint64](), - nextTimeoutTask: NewScheduledTask[uint64](), + status: eventLoopStatusNotInitialized, + crankTimerTask: NewScheduledTask[uint64](), + nextTimeoutTask: NewScheduledTask[uint64](), } } // Initializes the consensus event loop with the latest known valid block in the blockchain, and // the validator set for the next block height. The functions expects the following for the input // params: -// - blockConstructionInterval: block construction duration must be > 0 +// - crankTimerInterval: crank timer interval duration must be > 0 // - timeoutBaseDuration: timeout base duration must be > 0 // - tip: the current tip of the blockchain, with the validator set at that block height. This may // be a committed or uncommitted block. @@ -34,7 +34,7 @@ func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { // Given the above, This function updates the tip internally, stores the safe blocks, and re-initializes // all internal data structures that are used to track incoming votes and timeout messages for QC construction. func (fc *FastHotStuffEventLoop) Init( - blockConstructionInterval time.Duration, + crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, tip BlockWithValidators, safeBlocks []BlockWithValidators, @@ -49,8 +49,8 @@ func (fc *FastHotStuffEventLoop) Init( } // Validate the scheduled task durations - if blockConstructionInterval <= 0 { - return errors.New("FastHotStuffEventLoop.Init: Block construction duration must be > 0") + if crankTimerInterval <= 0 { + return errors.New("FastHotStuffEventLoop.Init: Crank timer interval must be > 0") } if timeoutBaseDuration <= 0 { return errors.New("FastHotStuffEventLoop.Init: Timeout base duration must be > 0") @@ -71,8 +71,8 @@ func (fc *FastHotStuffEventLoop) Init( // Reset the external channel used for signaling fc.Events = make(chan *FastHotStuffEvent, signalChannelBufferSize) - // Set the block construction and timeout base durations - fc.blockConstructionInterval = blockConstructionInterval + // Set the crank timer interval and timeout base duration + fc.crankTimerInterval = crankTimerInterval fc.timeoutBaseDuration = timeoutBaseDuration // Update the event loop's status @@ -82,8 +82,7 @@ func (fc *FastHotStuffEventLoop) Init( } // AdvanceViewOnTimeout is called when the tip has not changed but the event loop has timed out. This -// function advances the view and resets the timeout scheduled task and block production scheduled -// tasks. +// function advances the view and resets the crank timer and timeout scheduled tasks. func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { // Grab the event loop's lock fc.lock.Lock() @@ -101,7 +100,7 @@ func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { // Evict all stale votes and timeouts fc.evictStaleVotesAndTimeouts() - // Schedule the next block construction and timeout scheduled tasks + // Schedule the next crank timer and timeout scheduled tasks fc.resetScheduledTasks() return fc.currentView, nil @@ -109,8 +108,8 @@ func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { // ProcessTipBlock must only be called when the server has accepted a new block, connected it // to the tip of the blockchain, and determined that the block is safe to vote on. Given such a -// block, this function resets internal state and schedules the next block construction and timeout -// timers. +// block, this function resets internal state and schedules the next crank timer and timeout +// timer. // // Expected params: // - tip: the current uncommitted tip of the blockchain, with the validator set at that block height @@ -148,7 +147,7 @@ func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBl View: fc.tip.block.GetView(), } - // Schedule the next block construction and timeout scheduled tasks + // Schedule the next crank timer and timeout scheduled tasks fc.resetScheduledTasks() return nil @@ -336,7 +335,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) return nil } -// Sets the initial times for the block construction and timeouts and starts scheduled tasks. +// Sets the initial times for the crank timer and timeouts and starts scheduled tasks. func (fc *FastHotStuffEventLoop) Start() { fc.lock.Lock() defer fc.lock.Unlock() @@ -350,7 +349,7 @@ func (fc *FastHotStuffEventLoop) Start() { // Update the event loop's status to mark it as running. fc.status = eventLoopStatusRunning - // Set the initial block construction and timeout scheduled tasks + // Set the initial crank timer and timeout scheduled tasks fc.resetScheduledTasks() } @@ -364,8 +363,8 @@ func (fc *FastHotStuffEventLoop) Stop() { return } - // Cancel the next block construction and timeout scheduled tasks, if any. - fc.nextBlockConstructionTask.Cancel() + // Cancel the crank timer and timeout scheduled tasks, if any. + fc.crankTimerTask.Cancel() fc.nextTimeoutTask.Cancel() // Update the event loop's status so it is no longer marked as running. @@ -387,7 +386,7 @@ func (fc *FastHotStuffEventLoop) IsRunning() bool { } // resetScheduledTasks recomputes the nextBlockConstructionTimeStamp and nextTimeoutTimeStamp -// values, and reschedules the next block construction and timeout tasks. +// values, and reschedules the crank timer and timeout tasks. func (fc *FastHotStuffEventLoop) resetScheduledTasks() { // Compute the next timeout ETA. We use exponential back-off for timeouts when there are // multiple consecutive timeouts. We use the difference between the current view and the @@ -408,14 +407,14 @@ func (fc *FastHotStuffEventLoop) resetScheduledTasks() { timeoutDuration = fc.timeoutBaseDuration << numTimeouts } - // Schedule the next block construction task. This will run with currentView param. - fc.nextBlockConstructionTask.Schedule(fc.blockConstructionInterval, fc.currentView, fc.onBlockConstructionScheduledTaskExecuted) + // Schedule the next crank timer task. This will run with currentView param. + fc.crankTimerTask.Schedule(fc.crankTimerInterval, fc.currentView, fc.onBlockConstructionScheduledTaskExecuted) // Schedule the next timeout task. This will run with currentView param. fc.nextTimeoutTask.Schedule(timeoutDuration, fc.currentView, fc.onTimeoutScheduledTaskExecuted) } -// When this function is triggered, it means that we have reached the block construction +// When this function is triggered, it means that we have reached the crank timer // time ETA for blockConstructionView. If we have a QC or timeout QC for the view, then we // signal the server. func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockConstructionView uint64) { @@ -435,7 +434,7 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC } // Check if the conditions are met to construct a QC from votes for the chain tip. If so, - // we send a signal to the server and cancel the block construction task. The server will + // we send a signal to the server and cancel the crank timer task. The server will // reschedule the task when it advances the view. if voteQCEvent := fc.tryConstructVoteQCInCurrentView(); voteQCEvent != nil { // Signal the server that we can construct a QC for the chain tip @@ -452,8 +451,8 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC } // We have not found a super majority of votes or timeouts. We can schedule the task to check again later. - fc.nextBlockConstructionTask.Schedule( - fc.blockConstructionInterval, + fc.crankTimerTask.Schedule( + fc.crankTimerInterval, fc.currentView, fc.onBlockConstructionScheduledTaskExecuted, ) diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 3eb9e15ec..677968cb2 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -101,7 +101,7 @@ func TestInit(t *testing.T) { require.Equal(t, fc.tip.block.GetView(), uint64(2)) require.Equal(t, fc.tip.block.GetHeight(), uint64(1)) - require.Equal(t, fc.blockConstructionInterval, time.Duration(100)) + require.Equal(t, fc.crankTimerInterval, time.Duration(100)) require.Equal(t, fc.timeoutBaseDuration, time.Duration(101)) require.Equal(t, fc.currentView, uint64(3)) @@ -566,33 +566,33 @@ func TestResetEventLoopSignal(t *testing.T) { // Start the event loop fc.Start() - // Confirm the ETAs for the block construction and timeout timers - require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) // 1 hour away - require.Equal(t, fc.nextTimeoutTask.GetDuration(), 2*oneHourInNanoSecs) // 2 hours away + // Confirm the ETAs for the crank timer and timeout timer + require.Equal(t, fc.crankTimerTask.GetDuration(), oneHourInNanoSecs) // 1 hour away + require.Equal(t, fc.nextTimeoutTask.GetDuration(), 2*oneHourInNanoSecs) // 2 hours away // Advance the view to simulate a timeout _, err = fc.AdvanceViewOnTimeout() require.NoError(t, err) - // Confirm the ETAs for the block construction and timeout timers - require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) // 1 hour away - require.Equal(t, fc.nextTimeoutTask.GetDuration(), 4*oneHourInNanoSecs) // 2 hours * 2 = 4 hours away + // Confirm the ETAs for the crank timer and timeout timer + require.Equal(t, fc.crankTimerTask.GetDuration(), oneHourInNanoSecs) // 1 hour away + require.Equal(t, fc.nextTimeoutTask.GetDuration(), 4*oneHourInNanoSecs) // 2 hours * 2 = 4 hours away // Advance the view to simulate a 2nd timeout _, err = fc.AdvanceViewOnTimeout() require.NoError(t, err) - // Confirm the ETAs for the block construction and timeout timers - require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) // 1 hour away - require.Equal(t, fc.nextTimeoutTask.GetDuration(), 8*oneHourInNanoSecs) // 2 hours * 2^2 = 8 hours away + // Confirm the ETAs for the crank timer and timeout timer + require.Equal(t, fc.crankTimerTask.GetDuration(), oneHourInNanoSecs) // 1 hour away + require.Equal(t, fc.nextTimeoutTask.GetDuration(), 8*oneHourInNanoSecs) // 2 hours * 2^2 = 8 hours away // Advance the view to simulate a 3nd timeout _, err = fc.AdvanceViewOnTimeout() require.NoError(t, err) - // Confirm the ETAs for the block construction and timeout timers - require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) // 1 hour away - require.Equal(t, fc.nextTimeoutTask.GetDuration(), 16*oneHourInNanoSecs) // 2 hours * 2^3 = 16 hours away + // Confirm the ETAs for the crank timer and timeout timer + require.Equal(t, fc.crankTimerTask.GetDuration(), oneHourInNanoSecs) // 1 hour away + require.Equal(t, fc.nextTimeoutTask.GetDuration(), 16*oneHourInNanoSecs) // 2 hours * 2^3 = 16 hours away // Stop the event loop fc.Stop() @@ -884,7 +884,7 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { require.Equal(t, eventLoopStatusRunning, fc.status) // Confirm that the ETAs for the block construction and timeout timers have been set - require.Equal(t, fc.nextBlockConstructionTask.GetDuration(), oneHourInNanoSecs) + require.Equal(t, fc.crankTimerTask.GetDuration(), oneHourInNanoSecs) require.Equal(t, fc.nextTimeoutTask.GetDuration(), 2*oneHourInNanoSecs) // Stop the event loop diff --git a/consensus/types.go b/consensus/types.go index 0e71cfb48..6119847fd 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -145,11 +145,11 @@ const signalChannelBufferSize = 100 type FastHotStuffEventLoop struct { lock sync.RWMutex - blockConstructionInterval time.Duration - timeoutBaseDuration time.Duration + crankTimerInterval time.Duration + timeoutBaseDuration time.Duration - nextBlockConstructionTask *ScheduledTask[uint64] - nextTimeoutTask *ScheduledTask[uint64] + crankTimerTask *ScheduledTask[uint64] + nextTimeoutTask *ScheduledTask[uint64] // The current view at which we expect to see or propose the next block. In the event of a timeout, // the timeout signal will be triggered for this view. From 11dab57ec129337329e8fb157171667ac78354e1 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Tue, 3 Oct 2023 13:28:44 -0700 Subject: [PATCH 217/762] Rename onCrankTimerTaskExecuted --- consensus/event_loop.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 567992226..831a3a39b 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -408,7 +408,7 @@ func (fc *FastHotStuffEventLoop) resetScheduledTasks() { } // Schedule the next crank timer task. This will run with currentView param. - fc.crankTimerTask.Schedule(fc.crankTimerInterval, fc.currentView, fc.onBlockConstructionScheduledTaskExecuted) + fc.crankTimerTask.Schedule(fc.crankTimerInterval, fc.currentView, fc.onCrankTimerTaskExecuted) // Schedule the next timeout task. This will run with currentView param. fc.nextTimeoutTask.Schedule(timeoutDuration, fc.currentView, fc.onTimeoutScheduledTaskExecuted) @@ -417,7 +417,7 @@ func (fc *FastHotStuffEventLoop) resetScheduledTasks() { // When this function is triggered, it means that we have reached the crank timer // time ETA for blockConstructionView. If we have a QC or timeout QC for the view, then we // signal the server. -func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockConstructionView uint64) { +func (fc *FastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView uint64) { fc.lock.Lock() defer fc.lock.Unlock() @@ -451,11 +451,7 @@ func (fc *FastHotStuffEventLoop) onBlockConstructionScheduledTaskExecuted(blockC } // We have not found a super majority of votes or timeouts. We can schedule the task to check again later. - fc.crankTimerTask.Schedule( - fc.crankTimerInterval, - fc.currentView, - fc.onBlockConstructionScheduledTaskExecuted, - ) + fc.crankTimerTask.Schedule(fc.crankTimerInterval, fc.currentView, fc.onCrankTimerTaskExecuted) return } From 5a29673450e8e8348a5f9e135b1f29875b265836 Mon Sep 17 00:00:00 2001 From: Jon Pollock <135658176+poolcoke@users.noreply.github.com> Date: Wed, 4 Oct 2023 15:49:22 -0700 Subject: [PATCH 218/762] Add block_view_lockups.go file for lockup transactions and types. (#678) --- lib/block_view_lockups.go | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 lib/block_view_lockups.go diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go new file mode 100644 index 000000000..e69de29bb From b78874c68d6a4c1e95e9d721fcd70c42bc3fc5de Mon Sep 17 00:00:00 2001 From: Jon Pollock <135658176+poolcoke@users.noreply.github.com> Date: Wed, 4 Oct 2023 16:02:57 -0700 Subject: [PATCH 219/762] Coin Lockup Structs, Types, and DB Prefixes (#679) --- lib/block_view_lockups.go | 477 ++++++++++++++++++++++++++++++++++++++ lib/block_view_types.go | 59 ++++- lib/db_utils.go | 22 +- lib/network.go | 36 ++- 4 files changed, 582 insertions(+), 12 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index e69de29bb..66ce35a3c 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -0,0 +1,477 @@ +package lib + +import ( + "bytes" + "github.com/golang/glog" + "github.com/holiman/uint256" + "github.com/pkg/errors" +) + +// +// TYPES: LockedBalanceEntry +// + +type LockedBalanceEntry struct { + HODLerPKID *PKID + ProfilePKID *PKID + ExpirationTimestampNanoSecs int64 + BalanceBaseUnits uint256.Int + isDeleted bool +} + +type LockedBalanceEntryKey struct { + HODLerPKID PKID + ProfilePKID PKID + ExpirationTimestampUnixNanoSecs int64 +} + +func (lockedBalanceEntry *LockedBalanceEntry) Copy() *LockedBalanceEntry { + return &LockedBalanceEntry{ + HODLerPKID: lockedBalanceEntry.HODLerPKID.NewPKID(), + ProfilePKID: lockedBalanceEntry.ProfilePKID.NewPKID(), + ExpirationTimestampNanoSecs: lockedBalanceEntry.ExpirationTimestampNanoSecs, + BalanceBaseUnits: lockedBalanceEntry.BalanceBaseUnits, + isDeleted: lockedBalanceEntry.isDeleted, + } +} + +func (lockedBalanceEntry *LockedBalanceEntry) Eq(other *LockedBalanceEntry) bool { + return lockedBalanceEntry.ToMapKey() == other.ToMapKey() +} + +func (lockedBalanceEntry *LockedBalanceEntry) ToMapKey() LockedBalanceEntryKey { + return LockedBalanceEntryKey{ + HODLerPKID: *lockedBalanceEntry.HODLerPKID, + ProfilePKID: *lockedBalanceEntry.ProfilePKID, + ExpirationTimestampUnixNanoSecs: lockedBalanceEntry.ExpirationTimestampNanoSecs, + } +} + +// DeSoEncoder Interface Implementation for LockedBalanceEntry + +func (lockedBalanceEntry *LockedBalanceEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeToBytes(blockHeight, lockedBalanceEntry.HODLerPKID, skipMetadata...)...) + data = append(data, EncodeToBytes(blockHeight, lockedBalanceEntry.ProfilePKID, skipMetadata...)...) + data = append(data, UintToBuf(uint64(lockedBalanceEntry.ExpirationTimestampNanoSecs))...) + data = append(data, VariableEncodeUint256(&lockedBalanceEntry.BalanceBaseUnits)...) + return data +} + +func (lockedBalanceEntry *LockedBalanceEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // HODLerPKID + lockedBalanceEntry.HODLerPKID, err = DecodeDeSoEncoder(&PKID{}, rr) + if err != nil { + return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading HODLerPKID") + } + + // ProfilePKID + lockedBalanceEntry.ProfilePKID, err = DecodeDeSoEncoder(&PKID{}, rr) + if err != nil { + return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading ProfilePKID") + } + + // ExpirationTimestampNanoSecs + uint64ExpirationTimestampUnixNanoSecs, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading ExpirationTimestampNanoSecs") + } + lockedBalanceEntry.ExpirationTimestampNanoSecs = int64(uint64ExpirationTimestampUnixNanoSecs) + + // BalanceBaseUnits + balanceBaseUnits, err := VariableDecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading BalanceBaseUnits") + } + lockedBalanceEntry.BalanceBaseUnits = *balanceBaseUnits + + return err +} + +func (lockedBalanceEntry *LockedBalanceEntry) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (lockedBalanceEntry *LockedBalanceEntry) GetEncoderType() EncoderType { + return EncoderTypeLockedBalanceEntry +} + +// Set and Delete Functions for LockedBalanceEntry + +func (bav *UtxoView) _setLockedBalanceEntry(lockedBalanceEntry *LockedBalanceEntry) { + // This function shouldn't be called with nil. + if lockedBalanceEntry == nil { + glog.Errorf("_setLockedBalanceEntryMappingsWithPKIDsTimestampType: Called with nil LockedBalanceEntry; " + + "this should never happen.") + return + } + + // Add a mapping for the LockedBalanceEntry in the view. + bav.LockedBalanceEntryKeyToLockedBalanceEntry[lockedBalanceEntry.ToMapKey()] = lockedBalanceEntry +} + +func (bav *UtxoView) _deleteLockedBalanceEntry(lockedBalanceEntry *LockedBalanceEntry) { + // Create a tombstone entry. + tombstoneLockedBalanceEntry := *lockedBalanceEntry + tombstoneLockedBalanceEntry.isDeleted = true + + // Set the LockupYieldCurvePoint as deleted in the view. + bav._setLockedBalanceEntry(&tombstoneLockedBalanceEntry) +} + +// TODO: Get Helper Functions for LockedBalanceEntry + +// +// TYPES: LockupYieldCurvePoint +// + +type LockupYieldCurvePoint struct { + ProfilePKID *PKID + LockupDurationNanoSecs int64 + LockupYieldAPYBasisPoints uint64 + isDeleted bool +} + +type LockupYieldCurvePointKey struct { + ProfilePKID *PKID + LockupDurationNanoSecs int64 +} + +func (lockupYieldCurvePoint *LockupYieldCurvePoint) Copy() *LockupYieldCurvePoint { + return &LockupYieldCurvePoint{ + ProfilePKID: lockupYieldCurvePoint.ProfilePKID.NewPKID(), + LockupDurationNanoSecs: lockupYieldCurvePoint.LockupDurationNanoSecs, + LockupYieldAPYBasisPoints: lockupYieldCurvePoint.LockupYieldAPYBasisPoints, + } +} + +func (lockupYieldCurvePoint *LockupYieldCurvePoint) Eq(other *LockupYieldCurvePoint) bool { + return lockupYieldCurvePoint.ToMapKey() == other.ToMapKey() +} + +func (lockupYieldCurvePoint *LockupYieldCurvePoint) ToMapKey() LockupYieldCurvePointKey { + return LockupYieldCurvePointKey{ + ProfilePKID: lockupYieldCurvePoint.ProfilePKID, + LockupDurationNanoSecs: lockupYieldCurvePoint.LockupDurationNanoSecs, + } +} + +// DeSoEncoder Interface Implementation for LockupYieldCurvePoint + +func (lockupYieldCurvePoint *LockupYieldCurvePoint) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeToBytes(blockHeight, lockupYieldCurvePoint.ProfilePKID, skipMetadata...)...) + data = append(data, UintToBuf(uint64(lockupYieldCurvePoint.LockupDurationNanoSecs))...) + data = append(data, UintToBuf(lockupYieldCurvePoint.LockupYieldAPYBasisPoints)...) + return data +} + +func (lockupYieldCurvePoint *LockupYieldCurvePoint) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + + // ProfilePKID + lockupYieldCurvePoint.ProfilePKID, err = DecodeDeSoEncoder(&PKID{}, rr) + if err != nil { + return errors.Wrapf(err, "LockupYieldCurvePoint.Decode: Problem reading ProfilePKID") + } + + // LockupDurationNanoSecs + uint64LockupDurationNanoSecs, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "LockupYieldCurvePoint.Decode: Problem reading LockupDurationNanoSecs") + } + lockupYieldCurvePoint.LockupDurationNanoSecs = int64(uint64LockupDurationNanoSecs) + + // LockupYieldAPYBasisPoints + lockupYieldAPYBasisPoints, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "LockupYieldCurvePoint.Decode: Problem reading LockupYieldAPYBasisPoints") + } + lockupYieldCurvePoint.LockupYieldAPYBasisPoints = lockupYieldAPYBasisPoints + + return err +} + +func (lockupYieldCurvePoint *LockupYieldCurvePoint) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (lockupYieldCurvePoint *LockupYieldCurvePoint) GetEncoderType() EncoderType { + return EncoderTypeLockupYieldCurvePoint +} + +// Set and Delete Functions for LockupYieldCurvePoints + +func (bav *UtxoView) _setLockupYieldCurvePoint(point *LockupYieldCurvePoint) { + // This function shouldn't be called with nil. + if point == nil { + glog.Errorf("_setLockupYieldCurvePoint: Called with nil LockupYieldCurvePoint; " + + "this should never happen.") + return + } + + // Check if the PKID needs a map added to the view. + if _, mapExists := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*point.ProfilePKID]; !mapExists { + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*point.ProfilePKID] = + make(map[LockupYieldCurvePointKey]*LockupYieldCurvePoint) + } + + // Set the LockupYieldCurvePoint in the view. + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*point.ProfilePKID][point.ToMapKey()] = point +} + +func (bav *UtxoView) _deleteLockupYieldCurvePoint(point *LockupYieldCurvePoint) { + // Create a tombstone entry. + tombstoneLockupYieldCurvePoint := *point + tombstoneLockupYieldCurvePoint.isDeleted = true + + // Set the LockupYieldCurvePoint as deleted in the view. + bav._setLockupYieldCurvePoint(&tombstoneLockupYieldCurvePoint) +} + +// TODO: Get Helper Functions for LockupYieldCurvePoint + +// +// TYPES: CoinLockupMetadata +// + +type CoinLockupMetadata struct { + ProfilePublicKey *PublicKey + UnlockTimestampNanoSecs int64 + LockupAmountBaseUnits *uint256.Int +} + +func (txnData *CoinLockupMetadata) GetTxnType() TxnType { + return TxnTypeCoinLockup +} + +func (txnData *CoinLockupMetadata) ToBytes(preSignature bool) ([]byte, error) { + var data []byte + data = append(data, EncodeByteArray(txnData.ProfilePublicKey.ToBytes())...) + data = append(data, UintToBuf(uint64(txnData.UnlockTimestampNanoSecs))...) + data = append(data, VariableEncodeUint256(txnData.LockupAmountBaseUnits)...) + return data, nil +} + +func (txnData *CoinLockupMetadata) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + // ProfilePublicKey + profilePublicKeyBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "CoinLockupMetadata.FromBytes: Problem reading ProfilePublicKey") + } + txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) + + // UnlockTimestampNanoSecs + uint64UnlockTimestampNanoSecs, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "CoinLockupMetadata.FromBytes: Problem reading UnlockTimestampNanoSecs") + } + txnData.UnlockTimestampNanoSecs = int64(uint64UnlockTimestampNanoSecs) + + // LockupAmountBaseUnits + txnData.LockupAmountBaseUnits, err = VariableDecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "CoinLockupMetadata.FromBytes: Problem reading LockupAmountBaseUnits") + } + + return nil +} + +func (txnData *CoinLockupMetadata) New() DeSoTxnMetadata { + return &CoinLockupMetadata{} +} + +// +// TYPES: UpdateDAOCoinLockupParamsMetadata +// + +type UpdateCoinLockupParamsMetadata struct { + // LockupYieldDurationNanoSecs and LockupYieldAPYBasisPoints describe a coordinate pair + // of (duration, APY yield) on a yield curve. + // + // A yield curve consists of a series of (duration, APY yield) points. For example, + // the following points describe a simple yield curve: + // {(6mo, 3%), (12mo, 3.5%), (18mo, 4%), (24mo, 4.5%)} + // + // Assuming RemoveYieldCurvePoint is false: + // The point (LockupYieldDurationNanoSecs, LockupYieldAPYBasisPoints) + // is added to the profile's yield curve. If a point with the same duration already exists + // on the profile's yield curve, it will be updated with the new yield. + // Note if LockupYieldDurationNanoSecs=0, nothing is modified or added at t=0. + // Assuming RemoveYieldCurvePoint is true: + // The point (LockupYieldDurationNanoSecs, XXX) is removed from the profile's yield curve. + // Note that LockupYieldAPYBasisPoints is ignored in this transaction. + // + // By setting LockupYieldDurationNanoSecs to zero, the yield curve attached to the profile + // is left unmodified. In any UpdateDAOCoinLockupParams transaction looking to modify only + // LockupTransferRestrictions, LockupYieldDurationNanoSecs would be set to zero. + LockupYieldDurationNanoSecs int64 + LockupYieldAPYBasisPoints uint64 + RemoveYieldCurvePoint bool + + // When NewLockupTransferRestrictions is set true, the TransferRestrictionStatus specified + // in the transaction is updated in the transactor's profile for locked coins. + // Any subsequent transfers utilizing the transactor's locked coins are validated against + // the updated locked transfer restriction status. + NewLockupTransferRestrictions bool + LockupTransferRestrictionStatus TransferRestrictionStatus +} + +func (txnData *UpdateCoinLockupParamsMetadata) GetTxnType() TxnType { + return TxnTypeUpdateCoinLockupParams +} + +func (txnData *UpdateCoinLockupParamsMetadata) ToBytes(preSignature bool) ([]byte, error) { + var data []byte + data = append(data, UintToBuf(uint64(txnData.LockupYieldDurationNanoSecs))...) + data = append(data, UintToBuf(txnData.LockupYieldAPYBasisPoints)...) + data = append(data, BoolToByte(txnData.RemoveYieldCurvePoint)) + data = append(data, BoolToByte(txnData.NewLockupTransferRestrictions)) + data = append(data, byte(txnData.LockupTransferRestrictionStatus)) + return data, nil +} + +func (txnData *UpdateCoinLockupParamsMetadata) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + lockupYieldDurationNanoSecs, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UpdateDAOCoinLockupParams.FromBytes: Problem reading LockupYieldDurationNanoSecs") + } + txnData.LockupYieldDurationNanoSecs = int64(lockupYieldDurationNanoSecs) + + txnData.LockupYieldAPYBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UpdateDAOCoinLockupParams.FromBytes: Problem reading LockupYieldAPYBasisPoints") + } + + txnData.RemoveYieldCurvePoint, err = ReadBoolByte(rr) + if err != nil { + return errors.Wrapf(err, "UpdateDAOCoinLockupParams.FromBytes: Problem reading RemoveYieldCurvePoint") + } + + txnData.NewLockupTransferRestrictions, err = ReadBoolByte(rr) + if err != nil { + return errors.Wrapf(err, "UpdateDAOCoinLockupParams.FromBytes: Problem reading NewLockupTransferRestrictions") + } + + lockedStatusByte, err := rr.ReadByte() + if err != nil { + return errors.Wrapf(err, "UpdateDAOCoinLockupParams.FromBytes: Problem reading LockupTransferRestrictionStatus") + } + txnData.LockupTransferRestrictionStatus = TransferRestrictionStatus(lockedStatusByte) + + return nil +} + +func (txnData *UpdateCoinLockupParamsMetadata) New() DeSoTxnMetadata { + return &UpdateCoinLockupParamsMetadata{} +} + +// +// TYPES: DAOCoinLockupTransferMetadata +// + +type CoinLockupTransferMetadata struct { + RecipientPublicKey *PublicKey + ProfilePublicKey *PublicKey + ExpirationTimestampUnixNanoSecs int64 + LockedCoinsToTransferBaseUnits *uint256.Int +} + +func (txnData *CoinLockupTransferMetadata) GetTxnType() TxnType { + return TxnTypeCoinLockupTransfer +} + +func (txnData *CoinLockupTransferMetadata) ToBytes(preSignature bool) ([]byte, error) { + var data []byte + data = append(data, EncodeByteArray(txnData.RecipientPublicKey.ToBytes())...) + data = append(data, EncodeByteArray(txnData.ProfilePublicKey.ToBytes())...) + data = append(data, UintToBuf(uint64(txnData.ExpirationTimestampUnixNanoSecs))...) + data = append(data, VariableEncodeUint256(txnData.LockedCoinsToTransferBaseUnits)...) + return data, nil +} + +func (txnData *CoinLockupTransferMetadata) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + // RecipientPublicKey + recipientPublicKeyBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading RecipientPublicKey") + } + txnData.RecipientPublicKey = NewPublicKey(recipientPublicKeyBytes) + + // ProfilePublicKey + profilePublicKeyBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading ProfilePublicKey") + } + txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) + + // ExpirationTimestampNanoSecs + uint64ExpirationTimestampUnixNanoSecs, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading ExpirationTimestampNanoSecs") + } + txnData.ExpirationTimestampUnixNanoSecs = int64(uint64ExpirationTimestampUnixNanoSecs) + + // LockedDAOCoinToTransferBaseUnits + txnData.LockedCoinsToTransferBaseUnits, err = VariableDecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading LockedDAOCoinToTransferBaseUnits") + } + + return nil +} + +func (txnData *CoinLockupTransferMetadata) New() DeSoTxnMetadata { + return &CoinLockupTransferMetadata{} +} + +// +// TYPES: CoinUnlockMetadata +// + +type CoinUnlockMetadata struct { + ProfilePublicKey *PublicKey + CoinsToUnlockBaseUnits *uint256.Int +} + +func (txnData *CoinUnlockMetadata) GetTxnType() TxnType { + return TxnTypeCoinUnlock +} + +func (txnData *CoinUnlockMetadata) ToBytes(preSignature bool) ([]byte, error) { + var data []byte + data = append(data, EncodeByteArray(txnData.ProfilePublicKey.ToBytes())...) + data = append(data, VariableEncodeUint256(txnData.CoinsToUnlockBaseUnits)...) + return data, nil +} + +func (txnData *CoinUnlockMetadata) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + // ProfilePublicKey + profilePublicKeyBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "CoinUnlockMetadata.FromBytes: Problem reading ProfilePublicKey") + } + txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) + + // CoinToUnlockBaseUnits + txnData.CoinsToUnlockBaseUnits, err = VariableDecodeUint256(rr) + if err != nil { + return errors.Wrapf(err, "CoinUnlockMetadata.FromBytes: Problem reading DAOCoinToUnlockBaseUnits") + } + + return nil +} + +func (txnData *CoinUnlockMetadata) New() DeSoTxnMetadata { + return &CoinUnlockMetadata{} +} diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 91c9aa3bb..ac627693c 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -114,9 +114,11 @@ const ( EncoderTypeStakeEntry EncoderType = 41 EncoderTypeLockedStakeEntry EncoderType = 42 EncoderTypeEpochEntry EncoderType = 43 + EncoderTypeLockedBalanceEntry EncoderType = 44 + EncoderTypeLockupYieldCurvePoint EncoderType = 45 // EncoderTypeEndBlockView encoder type should be at the end and is used for automated tests. - EncoderTypeEndBlockView EncoderType = 44 + EncoderTypeEndBlockView EncoderType = 46 ) // Txindex encoder types. @@ -254,6 +256,10 @@ func (encoderType EncoderType) New() DeSoEncoder { return &LockedStakeEntry{} case EncoderTypeEpochEntry: return &EpochEntry{} + case EncoderTypeLockedBalanceEntry: + return &LockedBalanceEntry{} + case EncoderTypeLockupYieldCurvePoint: + return &LockupYieldCurvePoint{} } // Txindex encoder types @@ -629,8 +635,12 @@ const ( OperationTypeUnstake OperationType = 42 OperationTypeUnlockStake OperationType = 43 OperationTypeUnjailValidator OperationType = 44 + OperationTypeCoinLockup OperationType = 45 + OperationTypeCoinLockupTransfer OperationType = 46 + OperationTypeCoinUnlock OperationType = 47 + OperationTypeUpdateCoinLockupParams OperationType = 48 - // NEXT_TAG = 45 + // NEXT_TAG = 49 ) func (op OperationType) String() string { @@ -723,6 +733,14 @@ func (op OperationType) String() string { return "OperationTypeUnlockStake" case OperationTypeUnjailValidator: return "OperationTypeUnjailValidator" + case OperationTypeCoinLockup: + return "OperationTypeCoinLockup" + case OperationTypeUpdateCoinLockupParams: + return "OperationTypeUpdateCoinLockupParams" + case OperationTypeCoinLockupTransfer: + return "OperationTypeCoinLockupTransfer" + case OperationTypeCoinUnlock: + return "OperationTypeCoinUnlock" } return "OperationTypeUNKNOWN" } @@ -4417,17 +4435,23 @@ type CoinEntry struct { MintingDisabled bool TransferRestrictionStatus TransferRestrictionStatus + + // ===== ENCODER MIGRATION ProofOfStake1StateSetupMigration ===== + // LockupTransferRestrictionStatus specifies transfer restrictions + // for only those DAO coins actively locked up. + LockupTransferRestrictionStatus TransferRestrictionStatus } func (ce *CoinEntry) Copy() *CoinEntry { return &CoinEntry{ - CreatorBasisPoints: ce.CreatorBasisPoints, - DeSoLockedNanos: ce.DeSoLockedNanos, - NumberOfHolders: ce.NumberOfHolders, - CoinsInCirculationNanos: *uint256.NewInt().Set(&ce.CoinsInCirculationNanos), - CoinWatermarkNanos: ce.CoinWatermarkNanos, - MintingDisabled: ce.MintingDisabled, - TransferRestrictionStatus: ce.TransferRestrictionStatus, + CreatorBasisPoints: ce.CreatorBasisPoints, + DeSoLockedNanos: ce.DeSoLockedNanos, + NumberOfHolders: ce.NumberOfHolders, + CoinsInCirculationNanos: *uint256.NewInt().Set(&ce.CoinsInCirculationNanos), + CoinWatermarkNanos: ce.CoinWatermarkNanos, + MintingDisabled: ce.MintingDisabled, + TransferRestrictionStatus: ce.TransferRestrictionStatus, + LockupTransferRestrictionStatus: ce.LockupTransferRestrictionStatus, } } @@ -4443,6 +4467,10 @@ func (ce *CoinEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata . data = append(data, BoolToByte(ce.MintingDisabled)) data = append(data, byte(ce.TransferRestrictionStatus)) + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { + data = append(data, byte(ce.LockupTransferRestrictionStatus)) + } + return data } @@ -4484,11 +4512,22 @@ func (ce *CoinEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Read } ce.TransferRestrictionStatus = TransferRestrictionStatus(statusByte) + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { + lockedStatusByte, err := rr.ReadByte() + if err != nil { + return errors.Wrapf(err, "CoinEntry.Decode: Problem reading LockupTransferRestrictionStatus") + } + ce.LockupTransferRestrictionStatus = TransferRestrictionStatus(lockedStatusByte) + } + return nil } func (ce *CoinEntry) GetVersionByte(blockHeight uint64) byte { - return 0 + return GetMigrationVersion( + blockHeight, + ProofOfStake1StateSetupMigration, + ) } func (ce *CoinEntry) GetEncoderType() EncoderType { diff --git a/lib/db_utils.go b/lib/db_utils.go index eaedb4b20..9d26501f0 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -559,7 +559,21 @@ type DBPrefixes struct { // Note, we parse the ValidatorPKID and StakerPKID from the key. PrefixSnapshotStakeToRewardByValidatorAndStaker []byte `prefix_id:"[90]" is_state:"true"` - // NEXT_TAG: 91 + // PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs: + // Retrieves LockedBalanceEntries that may or may not be claimable for unlock. + // LockedBalanceEntries can be retrieved by HodlerPKID and CreatorPKID are have their + // corresponding unlock timestamp appended to sort by timestamp. + // Prefix, , , -> + PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs []byte `prefix_id:"[91]" is_state:"true"` + + // PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs: + // Retrieves a LockupYieldCurvePoint. + // The structure of the key enables quick lookups for a (ProfilePKID, Duration) pair as well + // as quick construction of yield curve plots over time. + // Prefix, , -> + PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs []byte `prefix_id:"[92]" is_state:"true"` + + // NEXT_TAG: 93 } // StatePrefixToDeSoEncoder maps each state prefix to a DeSoEncoder type that is stored under that prefix. @@ -800,6 +814,12 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotStakeToRewardByValidatorAndStaker) { // prefix_id:"[90]" return true, &StakeEntry{} + } else if bytes.Equal(prefix, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs) { + // prefix_id:"[91]" + return true, &LockedBalanceEntry{} + } else if bytes.Equal(prefix, Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { + // prefix_id:"[92]" + return true, &LockupYieldCurvePoint{} } return true, nil diff --git a/lib/network.go b/lib/network.go index f38fbbc31..723d3fae2 100644 --- a/lib/network.go +++ b/lib/network.go @@ -255,8 +255,12 @@ const ( TxnTypeUnstake TxnType = 37 TxnTypeUnlockStake TxnType = 38 TxnTypeUnjailValidator TxnType = 39 + TxnTypeCoinLockup TxnType = 40 + TxnTypeUpdateCoinLockupParams TxnType = 41 + TxnTypeCoinLockupTransfer TxnType = 42 + TxnTypeCoinUnlock TxnType = 43 - // NEXT_ID = 40 + // NEXT_ID = 44 ) type TxnString string @@ -302,6 +306,10 @@ const ( TxnStringUnstake TxnString = "UNSTAKE" TxnStringUnlockStake TxnString = "UNLOCK_STAKE" TxnStringUnjailValidator TxnString = "UNJAIL_VALIDATOR" + TxnStringCoinLockup TxnString = "COIN_LOCKUP" + TxnStringUpdateCoinLockupParams TxnString = "UPDATE_COIN_LOCKUP_PARAMS" + TxnStringCoinLockupTransfer TxnString = "COIN_LOCKUP_TRANSFER" + TxnStringCoinUnlock TxnString = "COIN_UNLOCK" ) var ( @@ -315,6 +323,7 @@ var ( TxnTypeDeleteUserAssociation, TxnTypeCreatePostAssociation, TxnTypeDeletePostAssociation, TxnTypeAccessGroup, TxnTypeAccessGroupMembers, TxnTypeNewMessage, TxnTypeRegisterAsValidator, TxnTypeUnregisterAsValidator, TxnTypeStake, TxnTypeUnstake, TxnTypeUnlockStake, TxnTypeUnjailValidator, + TxnTypeCoinLockup, TxnTypeUpdateCoinLockupParams, TxnTypeCoinLockupTransfer, TxnTypeCoinUnlock, } AllTxnString = []TxnString{ TxnStringUnset, TxnStringBlockReward, TxnStringBasicTransfer, TxnStringBitcoinExchange, TxnStringPrivateMessage, @@ -326,6 +335,7 @@ var ( TxnStringDeleteUserAssociation, TxnStringCreatePostAssociation, TxnStringDeletePostAssociation, TxnStringAccessGroup, TxnStringAccessGroupMembers, TxnStringNewMessage, TxnStringRegisterAsValidator, TxnStringUnregisterAsValidator, TxnStringStake, TxnStringUnstake, TxnStringUnlockStake, TxnStringUnjailValidator, + TxnStringCoinLockup, TxnStringUpdateCoinLockupParams, TxnStringCoinLockupTransfer, TxnStringCoinUnlock, } ) @@ -417,6 +427,14 @@ func (txnType TxnType) GetTxnString() TxnString { return TxnStringUnlockStake case TxnTypeUnjailValidator: return TxnStringUnjailValidator + case TxnTypeCoinLockup: + return TxnStringCoinLockup + case TxnTypeUpdateCoinLockupParams: + return TxnStringUpdateCoinLockupParams + case TxnTypeCoinLockupTransfer: + return TxnStringCoinLockupTransfer + case TxnTypeCoinUnlock: + return TxnStringCoinUnlock default: return TxnStringUndefined } @@ -502,6 +520,14 @@ func GetTxnTypeFromString(txnString TxnString) TxnType { return TxnTypeUnlockStake case TxnStringUnjailValidator: return TxnTypeUnjailValidator + case TxnStringCoinLockup: + return TxnTypeCoinLockup + case TxnStringUpdateCoinLockupParams: + return TxnTypeUpdateCoinLockupParams + case TxnStringCoinLockupTransfer: + return TxnTypeCoinLockupTransfer + case TxnStringCoinUnlock: + return TxnTypeCoinUnlock default: // TxnTypeUnset means we couldn't find a matching txn type return TxnTypeUnset @@ -595,6 +621,14 @@ func NewTxnMetadata(txType TxnType) (DeSoTxnMetadata, error) { return (&UnlockStakeMetadata{}).New(), nil case TxnTypeUnjailValidator: return (&UnjailValidatorMetadata{}).New(), nil + case TxnTypeCoinLockup: + return (&CoinLockupMetadata{}).New(), nil + case TxnTypeUpdateCoinLockupParams: + return (&UpdateCoinLockupParamsMetadata{}).New(), nil + case TxnTypeCoinLockupTransfer: + return (&CoinLockupTransferMetadata{}).New(), nil + case TxnTypeCoinUnlock: + return (&CoinUnlockMetadata{}).New(), nil default: return nil, fmt.Errorf("NewTxnMetadata: Unrecognized TxnType: %v; make sure you add the new type of transaction to NewTxnMetadata", txType) } From 05b3af7fda4c779fd05315bbf62affd4fa1aff61 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 21 Sep 2023 10:11:39 -0700 Subject: [PATCH 220/762] Coin Lockup Scaffolding in ConnectTransaction, DB Utils, and UTXO View --- lib/block_view.go | 58 ++++++ lib/block_view_flush.go | 6 + lib/block_view_lockups.go | 255 +++++++++++++++++++++++- lib/block_view_types.go | 84 ++++++++ lib/db_utils.go | 403 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 804 insertions(+), 2 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 41e524f57..0c1abb4f6 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -124,6 +124,12 @@ type UtxoView struct { // Locked stake mappings LockedStakeMapKeyToLockedStakeEntry map[LockedStakeMapKey]*LockedStakeEntry + // Locked DAO coin balance entry mapping. + LockedBalanceEntryKeyToLockedBalanceEntry map[LockedBalanceEntryKey]*LockedBalanceEntry + + // Lockup yield curve points. + PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints map[PKID]map[LockupYieldCurvePointKey]*LockupYieldCurvePoint + // Current EpochEntry CurrentEpochEntry *EpochEntry @@ -242,6 +248,12 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // Transaction nonce map bav.TransactorNonceMapKeyToTransactorNonceEntry = make(map[TransactorNonceMapKey]*TransactorNonceEntry) + // Locked Balance Entries Map + bav.LockedBalanceEntryKeyToLockedBalanceEntry = make(map[LockedBalanceEntryKey]*LockedBalanceEntry) + + // Lockup Yield Curve Points Map + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints = make(map[PKID]map[LockupYieldCurvePointKey]*LockupYieldCurvePoint) + // ValidatorEntries bav.ValidatorPKIDToValidatorEntry = make(map[PKID]*ValidatorEntry) @@ -520,6 +532,29 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.TransactorNonceMapKeyToTransactorNonceEntry[entryKey] = &newEntry } + // Copy the LockedBalanceEntries + newView.LockedBalanceEntryKeyToLockedBalanceEntry = make(map[LockedBalanceEntryKey]*LockedBalanceEntry, + len(bav.LockedBalanceEntryKeyToLockedBalanceEntry)) + for entryKey, entry := range bav.LockedBalanceEntryKeyToLockedBalanceEntry { + newEntry := *entry + newView.LockedBalanceEntryKeyToLockedBalanceEntry[entryKey] = &newEntry + } + + // Copy the LockupYieldCurvePoints + newView.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints = + make(map[PKID]map[LockupYieldCurvePointKey]*LockupYieldCurvePoint, len(bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints)) + for pkid, lockupYieldCurvePointMap := range bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints { + // Copy the map for the given PKID + newView.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[pkid] = + make(map[LockupYieldCurvePointKey]*LockupYieldCurvePoint, len(lockupYieldCurvePointMap)) + + // Go through all LockupYieldCurvePoints in the LockupYieldCurvePoint map. + for entryKey, entry := range lockupYieldCurvePointMap { + newLockupYieldCurvePoint := *entry + newView.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[pkid][entryKey] = &newLockupYieldCurvePoint + } + } + // Copy the ValidatorEntries newView.ValidatorPKIDToValidatorEntry = make(map[PKID]*ValidatorEntry, len(bav.ValidatorPKIDToValidatorEntry)) for entryKey, entry := range bav.ValidatorPKIDToValidatorEntry { @@ -1439,6 +1474,18 @@ func (bav *UtxoView) DisconnectTransaction(currentTxn *MsgDeSoTxn, txnHash *Bloc case TxnTypeUnjailValidator: return bav._disconnectUnjailValidator( OperationTypeUnjailValidator, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + + case TxnTypeCoinLockup: + return bav._disconnectCoinLockup(OperationTypeCoinLockup, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + case TxnTypeUpdateCoinLockupParams: + return bav._disconnectUpdateCoinLockupParams( + OperationTypeUpdateCoinLockupParams, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + case TxnTypeCoinLockupTransfer: + return bav._disconnectCoinLockupTransfer( + OperationTypeCoinLockupTransfer, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + case TxnTypeCoinUnlock: + return bav._disconnectCoinUnlock(OperationTypeCoinUnlock, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + } return fmt.Errorf("DisconnectBlock: Unimplemented txn type %v", currentTxn.TxnMeta.GetTxnType().String()) @@ -3443,6 +3490,17 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, case TxnTypeUnjailValidator: totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUnjailValidator(txn, txHash, blockHeight, verifySignatures) + case TxnTypeCoinLockup: + // TODO: Once merged, update blockTimestamp parameter + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinLockup(txn, txHash, blockHeight, 0, verifySignatures) + case TxnTypeUpdateCoinLockupParams: + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUpdateCoinLockupParams(txn, txHash, blockHeight, verifySignatures) + case TxnTypeCoinLockupTransfer: + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinLockupTransfer(txn, txHash, blockHeight, verifySignatures) + case TxnTypeCoinUnlock: + // TODO: Once merged, update blockTimestamp parameter + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinUnlock(txn, txHash, blockHeight, 0, verifySignatures) + default: err = fmt.Errorf("ConnectTransaction: Unimplemented txn type %v", txn.TxnMeta.GetTxnType().String()) } diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 5d85dd0c5..bebfc21f8 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -141,6 +141,12 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushNonceEntriesToDbWithTxn(txn); err != nil { return err } + if err := bav._flushLockedBalanceEntriesToDbWithTxn(txn, blockHeight); err != nil { + return err + } + if err := bav._flushLockupYieldCurvePointEntriesToDbWithTxn(txn, blockHeight); err != nil { + return err + } if err := bav._flushValidatorEntriesToDbWithTxn(txn, blockHeight); err != nil { return err } diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 66ce35a3c..6a89b3b8f 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -2,9 +2,12 @@ package lib import ( "bytes" + "fmt" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" + "sort" ) // @@ -121,7 +124,82 @@ func (bav *UtxoView) _deleteLockedBalanceEntry(lockedBalanceEntry *LockedBalance bav._setLockedBalanceEntry(&tombstoneLockedBalanceEntry) } -// TODO: Get Helper Functions for LockedBalanceEntry +// Get Helper Functions for LockedBalanceEntry + +func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + hodlerPKID *PKID, profilePKID *PKID, expirationTimestampNanoSecs int64) (_lockedBalanceEntry *LockedBalanceEntry) { + // Create a key associated with the LockedBalanceEntry. + lockedBalanceEntryKey := (&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + ExpirationTimestampNanoSecs: expirationTimestampNanoSecs, + }).ToMapKey() + + // Check if the key exists in the view. + if viewEntry, viewEntryExists := + bav.LockedBalanceEntryKeyToLockedBalanceEntry[lockedBalanceEntryKey]; viewEntryExists { + return viewEntry + } + + // No mapping exists in the view, check for an entry in the DB. + lockedBalanceEntry := DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + bav.Handle, bav.Snapshot, hodlerPKID, profilePKID, expirationTimestampNanoSecs) + + // Cache the DB entry in the in-memory map. + if lockedBalanceEntry != nil { + bav._setLockedBalanceEntry(lockedBalanceEntry) + } + + return lockedBalanceEntry +} + +func (bav *UtxoView) GetUnlockableLockedBalanceEntries( + hodlerPKID *PKID, + profilePKID *PKID, + currentTimestampNanoSecs int64, +) ([]*LockedBalanceEntry, error) { + // Validate inputs. + if hodlerPKID == nil { + return nil, errors.New("UtxoView.GetUnlockableLockedBalanceEntries: nil hodlerPKID provided as input") + } + if profilePKID == nil { + return nil, errors.New("UtxoView.GetUnlockableLockedBalanceEntries: nil profilePKID provided as input") + } + + // First, pull unlockable LockedBalanceEntries from the db and cache them in the UtxoView. + dbUnlockableLockedBalanceEntries, err := DBGetUnlockableLockedBalanceEntries( + bav.Handle, bav.Snapshot, hodlerPKID, profilePKID, currentTimestampNanoSecs) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetUnlockableLockedBalanceEntries") + } + for _, lockedBalanceEntry := range dbUnlockableLockedBalanceEntries { + // Cache results in the UtxoView. + if _, exists := bav.LockedBalanceEntryKeyToLockedBalanceEntry[lockedBalanceEntry.ToMapKey()]; !exists { + bav._setLockedBalanceEntry(lockedBalanceEntry) + } + } + + // Then, pull unlockable LockedBalanceEntries from the UtxoView. + var unlockableLockedBalanceEntries []*LockedBalanceEntry + for _, lockedBalanceEntry := range bav.LockedBalanceEntryKeyToLockedBalanceEntry { + // Filter to matching LockedBalanceEntries. + if !lockedBalanceEntry.HODLerPKID.Eq(hodlerPKID) || + !lockedBalanceEntry.ProfilePKID.Eq(profilePKID) || + lockedBalanceEntry.ExpirationTimestampNanoSecs > currentTimestampNanoSecs || + lockedBalanceEntry.BalanceBaseUnits.IsZero() || + lockedBalanceEntry.isDeleted { + continue + } + unlockableLockedBalanceEntries = append(unlockableLockedBalanceEntries, lockedBalanceEntry) + } + + // Sort UnlockableLockedBalanceEntries by timestamp ASC. + sort.Slice(unlockableLockedBalanceEntries, func(ii, jj int) bool { + return unlockableLockedBalanceEntries[ii].ExpirationTimestampNanoSecs < + unlockableLockedBalanceEntries[jj].ExpirationTimestampNanoSecs + }) + return unlockableLockedBalanceEntries, nil +} // // TYPES: LockupYieldCurvePoint @@ -231,7 +309,96 @@ func (bav *UtxoView) _deleteLockupYieldCurvePoint(point *LockupYieldCurvePoint) bav._setLockupYieldCurvePoint(&tombstoneLockupYieldCurvePoint) } -// TODO: Get Helper Functions for LockupYieldCurvePoint +// Get Helper Functions for LockupYieldCurvePoint + +func (bav *UtxoView) GetYieldCurvePointByProfilePKIDAndDurationNanoSecs(profilePKID *PKID, + lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint) { + var lockupYieldCurvePoint *LockupYieldCurvePoint + + // Check the view for a yield curve point. + if _, pointsInView := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID]; pointsInView { + lockupYieldCurvePointKey := (&LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: lockupDurationNanoSecs, + }).ToMapKey() + if inMemoryYieldCurvePoint, pointExists := + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID][lockupYieldCurvePointKey]; pointExists { + return inMemoryYieldCurvePoint + } + } + + // No mapping exists in the view, check for an entry in the DB. + lockupYieldCurvePoint = DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecs(bav.GetDbAdapter().badgerDb, + bav.Snapshot, profilePKID, lockupDurationNanoSecs) + + // Cache the DB entry in the in-memory map. + if lockupYieldCurvePoint != nil { + bav._setLockupYieldCurvePoint(lockupYieldCurvePoint) + } + + return lockupYieldCurvePoint +} + +func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration int64) ( + _leftLockupPoint *LockupYieldCurvePoint, _rightLockupPoint *LockupYieldCurvePoint) { + var leftLockupPoint *LockupYieldCurvePoint + var rightLockupPoint *LockupYieldCurvePoint + + // Check the view for yield curve points. + if _, pointsInView := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID]; pointsInView { + for _, lockupYieldCurvePoint := range bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID] { + // Check for nil pointer cases. + if lockupYieldCurvePoint.LockupDurationNanoSecs < lockupDuration && leftLockupPoint == nil { + leftLockupPoint = lockupYieldCurvePoint + } + if lockupYieldCurvePoint.LockupDurationNanoSecs >= lockupDuration && rightLockupPoint == nil { + rightLockupPoint = lockupYieldCurvePoint + } + + // Check if the point is "more left" than the current left point. + if lockupYieldCurvePoint.LockupDurationNanoSecs < lockupDuration && + lockupYieldCurvePoint.LockupDurationNanoSecs > leftLockupPoint.LockupDurationNanoSecs { + leftLockupPoint = lockupYieldCurvePoint.Copy() + } + + // Check if the point is "more right" than the current right point. + if lockupYieldCurvePoint.LockupDurationNanoSecs >= lockupDuration && + lockupYieldCurvePoint.LockupDurationNanoSecs < leftLockupPoint.LockupDurationNanoSecs { + rightLockupPoint = lockupYieldCurvePoint.Copy() + } + } + } + + // Now we quickly fetch left and right local yield curve points from the DB using careful seek operations. + leftDBLockupPoint, rightDBLockupPoint := DBGetLocalYieldCurvePoints( + bav.GetDbAdapter().badgerDb, bav.Snapshot, profilePKID, lockupDuration) + + // Check for nil pointer cases. + if leftDBLockupPoint != nil && + leftDBLockupPoint.LockupDurationNanoSecs < lockupDuration { + leftLockupPoint = leftDBLockupPoint + } + if rightDBLockupPoint != nil && + rightDBLockupPoint.LockupDurationNanoSecs >= lockupDuration { + rightLockupPoint = rightDBLockupPoint + } + + // Check for an updated left and right yield curve point from the DB. + if leftDBLockupPoint != nil && + leftDBLockupPoint.ProfilePKID.Eq(profilePKID) && + leftDBLockupPoint.LockupDurationNanoSecs < lockupDuration && + leftDBLockupPoint.LockupDurationNanoSecs > leftLockupPoint.LockupDurationNanoSecs { + leftLockupPoint = leftDBLockupPoint + } + if rightDBLockupPoint != nil && + rightDBLockupPoint.ProfilePKID.Eq(profilePKID) && + rightDBLockupPoint.LockupDurationNanoSecs >= lockupDuration && + rightDBLockupPoint.LockupDurationNanoSecs < rightLockupPoint.LockupDurationNanoSecs { + rightLockupPoint = rightDBLockupPoint + } + + return leftLockupPoint, rightLockupPoint +} // // TYPES: CoinLockupMetadata @@ -475,3 +642,87 @@ func (txnData *CoinUnlockMetadata) FromBytes(data []byte) error { func (txnData *CoinUnlockMetadata) New() DeSoTxnMetadata { return &CoinUnlockMetadata{} } + +// +// DB FLUSHES +// + +func (bav *UtxoView) _flushLockedBalanceEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Go through all entries in the LockedBalanceEntryMapKeyToLockedBalanceEntry map. + for lockedBalanceEntryMapKeyIter, lockedBalanceEntry := range bav.LockedBalanceEntryKeyToLockedBalanceEntry { + lockedBalanceEntryKey := lockedBalanceEntryMapKeyIter + + // Sanity check the key computed from the lockedBalanceEntry is equal + // to the lockedBalanceEntryKey that maps to that entry. + lockedBalanceEntryKeyInEntry := lockedBalanceEntry.ToMapKey() + if lockedBalanceEntryKeyInEntry != lockedBalanceEntryKey { + return fmt.Errorf("_flushLockedBalanceEntriesToDbWithTxn: LockedBalanceEntry has "+ + "LockedBalanceEntryKey: %v, which doesn't match the LockedBalanceEntryMapKeyToLockedBalanceEntry map key %v", + &lockedBalanceEntryKeyInEntry, &lockedBalanceEntry) + } + + // Delete the existing mappings in the db for this LockedBalanceEntry. + // They will be re-added if the corresponding entry in memory has isDeleted=false. + if err := DbDeleteLockedBalanceEntryWithTxn(txn, bav.Snapshot, *lockedBalanceEntry); err != nil { + return errors.Wrapf( + err, "_flushLockedBalanceEntriesToDbWithTxn: Problem deleting mappings "+ + "for LockedBalanceEntry: %v", &lockedBalanceEntryKey) + } + } + for _, lockedBalanceEntry := range bav.LockedBalanceEntryKeyToLockedBalanceEntry { + if lockedBalanceEntry.isDeleted || lockedBalanceEntry.BalanceBaseUnits.IsZero() { + // We do nothing as we've already deleted the entry above or the balance is zero. + } else { + if err := DbPutLockedBalanceEntryMappingsWithTxn(txn, bav.Snapshot, blockHeight, + *lockedBalanceEntry); err != nil { + return errors.Wrapf(err, "_flushLockedBalanceEntriesToDbWithTxn") + } + } + } + + // By here the LockedBalanceEntry mappings in the db should be up-to-date. + return nil +} + +func (bav *UtxoView) _flushLockupYieldCurvePointEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Go through all PKIDs with changes to their yield curves. + for _, LockupYieldCurvePointMap := range bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints { + // Go through all LockupYieldCurvePoints in the LockupYieldCurvePoint map. + for lockupYieldCurvePointKey, lockupYieldCurvePoint := range LockupYieldCurvePointMap { + + // Sanity check the key computed from the lockupYieldCurvePoint is equal + // to the lockupYieldCurvePointKey that maps to that entry. + lockupYieldCurvePointKeyInEntry := lockupYieldCurvePoint.ToMapKey() + if lockupYieldCurvePointKeyInEntry != lockupYieldCurvePointKey { + return fmt.Errorf("_flushYieldCurveEntriesToDbWithTxn: LockupYieldCurvePoint has "+ + "LockupYieldCurvePoint: %v, which doesn't match the LockupYieldCurvePoint map key %v", + &lockupYieldCurvePointKeyInEntry, &lockupYieldCurvePointKey) + } + + // Delete the existing mappings in the db for this LockupYieldCurvePoint. + // They will be re-added if the corresponding entry in memory has isDeleted=false. + if err := DbDeleteLockupYieldCurvePointWithTxn(txn, bav.Snapshot, *lockupYieldCurvePoint); err != nil { + return errors.Wrapf( + err, "_flushYieldCurveEntriesToDbWithTxn: Problem deleting mappings "+ + "for LockupYieldCurvePoint: %v", &lockupYieldCurvePoint) + } + } + } + // Go through all PKIDs with changes to their yield curves. + for _, LockupYieldCurvePointMap := range bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints { + // Go through all LockupYieldCurvePoints in the LockupYieldCurvePoint map. + for _, lockupYieldCurvePoint := range LockupYieldCurvePointMap { + if lockupYieldCurvePoint.isDeleted { + // We do nothing as we've already deleted the entry above. + } else { + if err := DbPutLockupYieldCurvePointMappingsWithTxn(txn, bav.Snapshot, blockHeight, + *lockupYieldCurvePoint); err != nil { + return errors.Wrapf(err, "_flushYieldCurveEntriesToDbWithTxn") + } + } + } + } + + // By here the LockupYieldCurvePoint mappings in the db should be up-to-date. + return nil +} diff --git a/lib/block_view_types.go b/lib/block_view_types.go index ac627693c..fe44860ee 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -937,6 +937,28 @@ type UtxoOperation struct { // PrevLockedStakeEntries is a slice of LockedStakeEntries // prior to a unstake or unlock stake txn. PrevLockedStakeEntries []*LockedStakeEntry + + // + // Coin Lockup fields + // + + // PrevLockedBalanceEntry is the previous LockedBalanceEntry prior + // to a DAO coin lockup. + PrevLockedBalanceEntry *LockedBalanceEntry + + // PrevSenderLockedBalanceEntry and PrevReceiverLockedBalanceEntry are the previous LockedBalanceEntry + // for both the sender and receiver in the coin lockup transfer operation. + PrevSenderLockedBalanceEntry *LockedBalanceEntry + PrevReceiverLockedBalanceEntry *LockedBalanceEntry + + // PrevLockedBalanceEntries is a slice of LockedBalanceEntry prior to a coin unlock. + PrevLockedBalanceEntries []*LockedBalanceEntry + + // PrevLockupYieldCurvePoint and PrevLockupTransferRestriction are + // the previous yield curve and transfer restrictions associated + // with an UpdateCoinLockupParams transaction. + PrevLockupYieldCurvePoint *LockupYieldCurvePoint + PrevLockupTransferRestriction TransferRestrictionStatus } func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -1263,6 +1285,26 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada // PrevLockedStakeEntries data = append(data, EncodeDeSoEncoderSlice(op.PrevLockedStakeEntries, blockHeight, skipMetadata...)...) + + // Lockup Fields + + // PrevLockedBalanceEntry + data = append(data, EncodeToBytes(blockHeight, op.PrevLockedBalanceEntry, skipMetadata...)...) + + // PrevLockupYieldCurvePoint, PrevLockupTransferRestrictions + data = append(data, EncodeToBytes(blockHeight, op.PrevLockupYieldCurvePoint, skipMetadata...)...) + data = append(data, byte(op.PrevLockupTransferRestriction)) + + // PrevSenderLockedBalanceEntry, PrevReceiverLockedBalanceEntry + data = append(data, EncodeToBytes(blockHeight, op.PrevSenderLockedBalanceEntry, skipMetadata...)...) + data = append(data, EncodeToBytes(blockHeight, op.PrevReceiverLockedBalanceEntry, skipMetadata...)...) + + // PrevTransactorBalanceEntry, PrevLockedBalanceEntries + data = append(data, EncodeToBytes(blockHeight, op.PrevTransactorBalanceEntry, skipMetadata...)...) + data = append(data, UintToBuf(uint64(len(op.PrevLockedBalanceEntries)))...) + for _, entry := range op.PrevLockedBalanceEntries { + data = append(data, EncodeToBytes(blockHeight, entry, skipMetadata...)...) + } } return data @@ -1895,6 +1937,48 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. if op.PrevLockedStakeEntries, err = DecodeDeSoEncoderSlice[*LockedStakeEntry](rr); err != nil { return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockedStakeEntries: ") } + + // Lockup Fields + + // PrevLockedBalanceEntry + if op.PrevLockedBalanceEntry, err = DecodeDeSoEncoder(&LockedBalanceEntry{}, rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockedBalanceEntry") + } + + // PrevLockupYieldCurvePoint, PrevLockupTransferRestriction + if op.PrevLockupYieldCurvePoint, err = DecodeDeSoEncoder(&LockupYieldCurvePoint{}, rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockupYieldCurvePoint") + } + lockupTransferRestriction, err := rr.ReadByte() + if err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockupTransferRestriction") + } + op.PrevLockupTransferRestriction = TransferRestrictionStatus(lockupTransferRestriction) + + // PrevSenderLockedBalanceEntry, PrevReceiverLockedBalanceEntry + if op.PrevSenderLockedBalanceEntry, err = DecodeDeSoEncoder(&LockedBalanceEntry{}, rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevSenderLockedBalanceEntry") + } + if op.PrevReceiverLockedBalanceEntry, err = DecodeDeSoEncoder(&LockedBalanceEntry{}, rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem Reading PrevReceiverLockedBalanceEntry") + } + + // PrevTransactorBalanceEntry, PrevLockedBalanceEntries + if op.PrevTransactorBalanceEntry, err = DecodeDeSoEncoder(&BalanceEntry{}, rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevTransactorBalanceEntry") + } + var numPrevLockedBalanceEntries uint64 + numPrevLockedBalanceEntries, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading numPrevLockedBalanceEntries") + } + for ; numPrevLockedBalanceEntries > 0; numPrevLockedBalanceEntries-- { + prevLockedBalanceEntry := &LockedBalanceEntry{} + if _, err = DecodeFromBytes(prevLockedBalanceEntry, rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading prevLockedBalanceEntry") + } + op.PrevLockedBalanceEntries = append(op.PrevLockedBalanceEntries, prevLockedBalanceEntry) + } } return nil diff --git a/lib/db_utils.go b/lib/db_utils.go index 9d26501f0..348790ea5 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -10681,6 +10681,409 @@ func DBDeletePostAssociationWithTxn(txn *badger.Txn, snap *Snapshot, association return nil } +// ------------------------------------------------------------------------------------- +// Lockup DB Operations +// ------------------------------------------------------------------------------------- + +// LockedBalanceEntry DB Key Operations + +func _dbKeyForLockedBalanceEntry(lockedBalanceEntry LockedBalanceEntry) []byte { + key := append([]byte{}, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs...) + key = append(key, lockedBalanceEntry.HODLerPKID[:]...) + key = append(key, lockedBalanceEntry.ProfilePKID[:]...) + return append(key, EncodeUint64(uint64(lockedBalanceEntry.ExpirationTimestampNanoSecs))...) +} + +func DBPrefixKeyForLockedBalanceEntryByHODLerPKIDandProfilePKID(lockedBalanceEntry *LockedBalanceEntry) []byte { + data := append([]byte{}, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs...) + data = append(data, lockedBalanceEntry.HODLerPKID.ToBytes()...) + data = append(data, lockedBalanceEntry.ProfilePKID.ToBytes()...) + return data +} + +// LockedBalanceEntry Put/Delete Operations (Badger Writes) + +func DbPutLockedBalanceEntryMappingsWithTxn(txn *badger.Txn, snap *Snapshot, blockHeight uint64, + lockedBalanceEntry LockedBalanceEntry) error { + // Sanity check the fields in the LockedBalanceEntry used in constructing the key. + if len(lockedBalanceEntry.HODLerPKID) != btcec.PubKeyBytesLenCompressed { + return fmt.Errorf("DbPutLockedBalanceEntryMappingsWithTxn: HODLer PKID "+ + "length %d != %d", len(lockedBalanceEntry.HODLerPKID), btcec.PubKeyBytesLenCompressed) + } + if len(lockedBalanceEntry.ProfilePKID) != btcec.PubKeyBytesLenCompressed { + return fmt.Errorf("DbPutLockedBalanceEntryMappingsWithTxn: Profile PKID "+ + "length %d != %d", len(lockedBalanceEntry.ProfilePKID), btcec.PubKeyBytesLenCompressed) + } + + if err := DBSetWithTxn(txn, snap, _dbKeyForLockedBalanceEntry(lockedBalanceEntry), + EncodeToBytes(blockHeight, &lockedBalanceEntry)); err != nil { + return errors.Wrapf(err, "DbPutLockedBalanceEntryMappingsWithTxn: "+ + "Problem adding locked balance entry to db") + } + return nil +} + +func DbDeleteLockedBalanceEntryWithTxn(txn *badger.Txn, snap *Snapshot, lockedBalanceEntry LockedBalanceEntry) error { + // First check that a mapping exists. If one doesn't then there's nothing to do. + _, err := DBGetWithTxn(txn, snap, _dbKeyForLockedBalanceEntry(lockedBalanceEntry)) + if err != nil { + return nil + } + + // When a locked balance entry exists, delete the locked balance entry mapping. + if err := DBDeleteWithTxn(txn, snap, _dbKeyForLockedBalanceEntry(lockedBalanceEntry)); err != nil { + return errors.Wrapf(err, "DbDeleteRepostMappingsWithTxn: Deleting "+ + "locked balance entry for HODLer PKID %s, Profile PKID %s, expiration timestamp %d", + lockedBalanceEntry.HODLerPKID.ToString(), lockedBalanceEntry.ProfilePKID.ToString(), + lockedBalanceEntry.ExpirationTimestampNanoSecs) + } + return nil +} + +// LockedBalanceEntry Get Operations (Badger Reads) + +func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs(handle *badger.DB, snap *Snapshot, + hodlerPKID *PKID, profilePKID *PKID, expirationTimestamp int64) *LockedBalanceEntry { + + var ret *LockedBalanceEntry + handle.View(func(txn *badger.Txn) error { + ret = DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecsWithTxn( + txn, snap, hodlerPKID, profilePKID, expirationTimestamp) + return nil + }) + return ret +} + +func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecsWithTxn(txn *badger.Txn, snap *Snapshot, + hodlerPKID *PKID, profilePKID *PKID, expirationTimestamp int64) *LockedBalanceEntry { + + key := _dbKeyForLockedBalanceEntry(LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + ExpirationTimestampNanoSecs: expirationTimestamp, + }) + lockedBalanceEntryBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + return &LockedBalanceEntry{ + HODLerPKID: hodlerPKID.NewPKID(), + ProfilePKID: profilePKID.NewPKID(), + ExpirationTimestampNanoSecs: expirationTimestamp, + BalanceBaseUnits: *uint256.NewInt(), + } + } + lockedBalanceEntryObj := &LockedBalanceEntry{} + rr := bytes.NewReader(lockedBalanceEntryBytes) + DecodeFromBytes(lockedBalanceEntryObj, rr) + return lockedBalanceEntryObj +} + +func DBGetUnlockableLockedBalanceEntries( + handle *badger.DB, + snap *Snapshot, + hodlerPKID *PKID, + profilePKID *PKID, + currentTimestampUnixNanoSecs int64, +) ([]*LockedBalanceEntry, error) { + var ret []*LockedBalanceEntry + var err error + handle.View(func(txn *badger.Txn) error { + ret, err = DBGetUnlockableLockedBalanceEntriesWithTxn( + txn, snap, hodlerPKID, profilePKID, currentTimestampUnixNanoSecs) + return nil + }) + return ret, err +} + +func DBGetUnlockableLockedBalanceEntriesWithTxn( + txn *badger.Txn, + snap *Snapshot, + hodlerPKID *PKID, + profilePKID *PKID, + currentTimestampUnixNanoSecs int64, +) ([]*LockedBalanceEntry, error) { + // Retrieve all LockedBalanceEntries from db matching hodlerPKID, profilePKID, and + // ExpirationTimestampNanoSecs <= currentTimestampUnixNanoSecs. + // NOTE: While ideally we would start with and + // seek till , + // Badger does not support this functionality as the ValidForPrefix() function + // stops when a mismatched prefix occurs, not a "lexicographically less than" prefix. + // For this reason, we start with + // and iterate backwards while we're valid for the prefix . + + if currentTimestampUnixNanoSecs < 0 || currentTimestampUnixNanoSecs == math.MaxInt64-1 { + return nil, fmt.Errorf("DBGetUnlockableLockedBalanceEntriesWithTxn: invalid " + + "block timestamp; this shouldn't be possible") + } + + // Start at + startKey := _dbKeyForLockedBalanceEntry(LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + ExpirationTimestampNanoSecs: currentTimestampUnixNanoSecs, + }) + + // Valid for prefix + prefixKey := DBPrefixKeyForLockedBalanceEntryByHODLerPKIDandProfilePKID(&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + }) + + // Create an iterator. We set the iterator to reverse in o + opts := badger.DefaultIteratorOptions + opts.Reverse = true + iterator := txn.NewIterator(opts) + defer iterator.Close() + + // Store matching LockedBalanceEntries to return + var lockedBalanceEntries []*LockedBalanceEntry + + // Loop. + for iterator.Seek(startKey); iterator.ValidForPrefix(prefixKey); iterator.Next() { + // Retrieve the LockedBalanceEntryBytes. + lockedBalanceEntryBytes, err := iterator.Item().ValueCopy(nil) + if err != nil { + return nil, errors.Wrapf(err, "DBGetUnlockableLockedBalanceEntriesWithTxn: "+ + "error retrieveing LockedBalanceEntry: ") + } + + // Convert LockedBalanceEntryBytes to LockedBalanceEntry. + rr := bytes.NewReader(lockedBalanceEntryBytes) + lockedBalanceEntry, err := DecodeDeSoEncoder(&LockedBalanceEntry{}, rr) + if err != nil { + return nil, errors.Wrapf(err, "DBGetUnlockableLockedBalanceEntriesWithTxn: "+ + "error decoding LockedBalanceEntry: ") + } + + if lockedBalanceEntry.ExpirationTimestampNanoSecs < currentTimestampUnixNanoSecs { + lockedBalanceEntries = append(lockedBalanceEntries) + } + } + + return lockedBalanceEntries, nil +} + +// LockupYieldCurvePoint DB Key Operations + +func _dbKeyForLockupYieldCurvePoint(lockupYieldCurvePoint LockupYieldCurvePoint) []byte { + // Make a copy to avoid multiple calls to this function re-using the same slice. + prefixCopy := append([]byte{}, Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs...) + key := append(prefixCopy, lockupYieldCurvePoint.ProfilePKID[:]...) + + // Note that while we typically use UintToBuf to encode int64 and uint64 data, + // we cannot use that here. The variable length encoding of the int64 LockupDuration + // would make unpredictable badgerDB seeks. We must ensure the int64 and uint64 + // encodings to be fixed length (i.e. 8-bytes) to ensure proper BadgerDB seeks. + // Hence, we use the encoding/binary library in place of the lib/varint package. + // + // Also note we explicitly use BigEndian formatting for encoding the lockup duration. + // BigEndian means for the uint64 0x0123456789ABCDEF, the resulting byte slice will be: + // []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}. For comparison, LittleEndian would result in + // a byte slice of: []byte{0xEF, 0xCD, 0xAB, 0x89, 0x67, 0x45, 0x23, 0x01} + // This is crucial for badgerDB seeks as badger lexicographically seeks to nearest keys and + // BigEndian formatting ensures the lexicographic seeks function properly. + + lockupDurationBytes := make([]byte, 8) + binary.BigEndian.PutUint64(lockupDurationBytes, uint64(lockupYieldCurvePoint.LockupDurationNanoSecs)) + key = append(key, lockupDurationBytes...) + + return key +} + +// LockupYieldCurvePoint Put/Delete Operations (Badger Writes) + +func DbPutLockupYieldCurvePointMappingsWithTxn(txn *badger.Txn, snap *Snapshot, blockHeight uint64, + lockupYieldCurvePoint LockupYieldCurvePoint) error { + // Sanity check the fields in the LockupYieldCurvePoint used in constructing the key. + if len(lockupYieldCurvePoint.ProfilePKID) != btcec.PubKeyBytesLenCompressed { + return fmt.Errorf("DbPutLockupYieldCurvePointMappingsWithTxn: Profile PKID "+ + "length %d != %d", len(lockupYieldCurvePoint.ProfilePKID), btcec.PubKeyBytesLenCompressed) + } + if lockupYieldCurvePoint.LockupDurationNanoSecs <= 0 { + return fmt.Errorf("DbPutLockupYieldCurvePointMappingsWithTxn: Trying to put "+ + "lockup yield curve point with negative duration: %d", lockupYieldCurvePoint.LockupDurationNanoSecs) + } + + if err := DBSetWithTxn(txn, snap, _dbKeyForLockupYieldCurvePoint(lockupYieldCurvePoint), + EncodeToBytes(blockHeight, &lockupYieldCurvePoint)); err != nil { + return errors.Wrapf(err, "DbPutLockupYieldCurvePointMappingsWithTxn: "+ + "Problem adding locked balance entry to db") + } + return nil +} + +func DbDeleteLockupYieldCurvePointWithTxn(txn *badger.Txn, snap *Snapshot, + lockupYieldCurvePoint LockupYieldCurvePoint) error { + // First check that a mapping exists. If one doesn't then there's nothing to do. + _, err := DBGetWithTxn(txn, snap, _dbKeyForLockupYieldCurvePoint(lockupYieldCurvePoint)) + if err != nil { + return nil + } + + // When a locked balance entry exists, delete the locked balance entry mapping. + if err := DBDeleteWithTxn(txn, snap, _dbKeyForLockupYieldCurvePoint(lockupYieldCurvePoint)); err != nil { + return errors.Wrapf(err, "DbDeleteRepostMappingsWithTxn: Deleting "+ + "locked balance entry for Profile PKID %s, Duration %d, APY Yield Basis Points %d", + lockupYieldCurvePoint.ProfilePKID.ToString(), lockupYieldCurvePoint.LockupDurationNanoSecs, + lockupYieldCurvePoint.LockupYieldAPYBasisPoints) + } + return nil +} + +// LockupYieldCurvePoint Get Operations (Badger Reads) + +func DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecs(handle *badger.DB, snap *Snapshot, profilePKID *PKID, + lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint) { + var lockupYieldCurvePoint *LockupYieldCurvePoint + handle.View(func(txn *badger.Txn) error { + lockupYieldCurvePoint = DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecsWithTxn( + txn, snap, profilePKID, lockupDurationNanoSecs) + return nil + }) + return lockupYieldCurvePoint +} + +func DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecsWithTxn(txn *badger.Txn, snap *Snapshot, + profilePKID *PKID, lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint) { + // Construct the key. + key := _dbKeyForLockupYieldCurvePoint(LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: lockupDurationNanoSecs, + }) + + // Fetch the point from the database. + lockupYieldCurvePointBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + return nil + } + + // Parse the bytes beneath the key. + lockupYieldCurvePointObj := &LockupYieldCurvePoint{} + rr := bytes.NewReader(lockupYieldCurvePointBytes) + DecodeFromBytes(lockupYieldCurvePointObj, rr) + return lockupYieldCurvePointObj +} + +func DBGetLocalYieldCurvePoints(handle *badger.DB, snap *Snapshot, profilePKID *PKID, lockupDurationNanoSecs int64) ( + _leftLockupYieldCurvePoint *LockupYieldCurvePoint, _rightLockupYieldCurvePoint *LockupYieldCurvePoint) { + var leftLockupYieldCurvePoint *LockupYieldCurvePoint + handle.View(func(txn *badger.Txn) error { + leftLockupYieldCurvePoint = DBGetLeftLockupYieldCurvePointWithTxn( + txn, snap, profilePKID, lockupDurationNanoSecs) + return nil + }) + var rightLockupYieldCurvePoint *LockupYieldCurvePoint + handle.View(func(txn *badger.Txn) error { + rightLockupYieldCurvePoint = DBGetRightLockupYieldCurvePointWithTxn( + txn, snap, profilePKID, lockupDurationNanoSecs) + return nil + }) + return leftLockupYieldCurvePoint, rightLockupYieldCurvePoint +} + +func DBGetLeftLockupYieldCurvePointWithTxn(txn *badger.Txn, snap *Snapshot, profilePKID *PKID, + lockupDurationNanoSecs int64) (_leftLockupYieldCurvePoint *LockupYieldCurvePoint) { + key := _dbKeyForLockupYieldCurvePoint(LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: lockupDurationNanoSecs, + }) + + // Seek left of the yield curve point. + iterLeftOpts := badger.DefaultIteratorOptions + iterLeftOpts.Reverse = true + iterLeft := txn.NewIterator(iterLeftOpts) + iterLeft.Seek(key) + iterLeftKey := iterLeft.Item().Key() + + // There's a chance our seek yield a key in a different prefix (i.e. not a yield curve point). + // In this case, we know _dbKeyToLockupYieldCurvePoint will fail in parsing the key. + // We can return early in this case as there's no relevant yield points in the DB. + if len(iterLeftKey) < len(Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { + return nil + } + if !bytes.Equal(iterLeftKey[:len(Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs)], + Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { + return nil + } + + // Fetch the LockupYieldCurvePoint beneath the key. + leftLockupYieldCurvePointBytes, err := DBGetWithTxn(txn, snap, iterLeftKey) + if err != nil { + return nil + } + + // Parse the bytes beneath the key. + leftLockupYieldCurvePointObj := &LockupYieldCurvePoint{} + rr := bytes.NewReader(leftLockupYieldCurvePointBytes) + DecodeFromBytes(leftLockupYieldCurvePointObj, rr) + return leftLockupYieldCurvePointObj +} + +func DBGetRightLockupYieldCurvePointWithTxn(txn *badger.Txn, snap *Snapshot, profilePKID *PKID, + lockupDurationNanoSecs int64) (_rightLockupYieldCurvePoint *LockupYieldCurvePoint) { + key := _dbKeyForLockupYieldCurvePoint(LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: lockupDurationNanoSecs, + }) + + // Seek left of the yield curve point. + iterRightOpts := badger.DefaultIteratorOptions + iterRight := txn.NewIterator(iterRightOpts) + iterRight.Seek(key) + iterRightKey := iterRight.Item().Key() + + // There's a chance our seek yield a key in a different prefix (i.e. not a yield curve point). + // In this case, we know _dbKeyToLockupYieldCurvePoint will fail in parsing the key. + // We can return early in this case as there's no relevant yield points in the DB. + if len(iterRightKey) < len(Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { + return nil + } + if !bytes.Equal(iterRightKey[:len(Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs)], + Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { + return nil + } + + // Fetch the LockupYieldCurvePoint beneath the key. + rightLockupYieldCurvePointBytes, err := DBGetWithTxn(txn, snap, iterRightKey) + if err != nil { + return nil + } + + // Parse the bytes beneath the key. + rightLockupYieldCurvePointObj := &LockupYieldCurvePoint{} + rr := bytes.NewReader(rightLockupYieldCurvePointBytes) + DecodeFromBytes(rightLockupYieldCurvePointObj, rr) + return rightLockupYieldCurvePointObj +} + +func DBGetLockupYieldCurvePointForProfilePKIDAndLockupDurationNanoSecs(handle *badger.DB, snap *Snapshot, profilePKID *PKID, lockupDurationNanoSecs int64) *LockupYieldCurvePoint { + + var ret *LockupYieldCurvePoint + handle.View(func(txn *badger.Txn) error { + ret = DBGetLockupYieldCurvePointForProfilePKIDAndLockupDurationNanoSecsWithTxn( + txn, snap, profilePKID, lockupDurationNanoSecs) + return nil + }) + return ret +} + +func DBGetLockupYieldCurvePointForProfilePKIDAndLockupDurationNanoSecsWithTxn(txn *badger.Txn, snap *Snapshot, + profilePKID *PKID, lockupDurationNanoSecs int64) *LockupYieldCurvePoint { + + key := _dbKeyForLockupYieldCurvePoint(LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: lockupDurationNanoSecs, + }) + lockupYieldCurvePointBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + return nil + } + + lockupYieldCurvePointObj := &LockupYieldCurvePoint{} + rr := bytes.NewReader(lockupYieldCurvePointBytes) + DecodeFromBytes(lockupYieldCurvePointObj, rr) + return lockupYieldCurvePointObj +} + // ------------------------------------------------------------------------------------- // DeSo nonce mapping functions // ------------------------------------------------------------------------------------- From 7fb99fd4cbc55a900ec9b2a13d8b124a91318c1a Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 21 Sep 2023 10:14:00 -0700 Subject: [PATCH 221/762] Coin Lockup Connect/Disconnect Logic --- lib/block_view_lockups.go | 368 ++++++++++++++++++++++++++++++++++++++ lib/errors.go | 15 ++ 2 files changed, 383 insertions(+) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 6a89b3b8f..882b7ec55 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -3,10 +3,13 @@ package lib import ( "bytes" "fmt" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" + "math" + "math/big" "sort" ) @@ -643,6 +646,371 @@ func (txnData *CoinUnlockMetadata) New() DeSoTxnMetadata { return &CoinUnlockMetadata{} } +// +// CoinLockup Transaction Logic +// + +func (bav *UtxoView) _connectCoinLockup( + txn *MsgDeSoTxn, txHash *BlockHash, + blockHeight uint32, blockTimestamp int64, + verifySignatures bool) (_totalInput uint64, + _totalOutput uint64, _utxoOps []*UtxoOperation, _err error) { + var utxoOpsForTxn []*UtxoOperation + + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || + blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { + return 0, 0, nil, + errors.Wrapf(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinLockup") + } + + // Validate the txn TxnType. + if txn.TxnMeta.GetTxnType() != TxnTypeCoinLockup { + return 0, 0, nil, fmt.Errorf( + "_connectCoinLockup: called with bad TxnType %s", txn.TxnMeta.GetTxnType().String(), + ) + } + + // Try connecting the basic transfer without considering transaction metadata. + _, _, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectCoinLockup") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpsForBasicTransfer...) + + // Grab the txn metadata. + txMeta := txn.TxnMeta.(*CoinLockupMetadata) + + // Check that the target profile public key is valid and that a profile corresponding to that public key exists. + var profileEntry *ProfileEntry + if len(txMeta.ProfilePublicKey) != btcec.PubKeyBytesLenCompressed { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupInvalidProfilePubKey, "_connectCoinLockup") + } + if !txMeta.ProfilePublicKey.IsZeroPublicKey() { + profileEntry = bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profileEntry == nil || profileEntry.isDeleted { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupOnNonExistentProfile, "_connectCoinLockup") + } + } + + // Validate the lockup amount as non-zero. This is meant to prevent wasteful "no-op" transactions. + if txMeta.LockupAmountBaseUnits.IsZero() { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupOfAmountZero, "_connectCoinLockup") + } + + // If this is a DeSo lockup, ensure the amount is less than 2**64 (maximum DeSo balance). + if txMeta.ProfilePublicKey.IsZeroPublicKey() && !txMeta.LockupAmountBaseUnits.IsUint64() { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupExcessiveDeSoLockup, "_connectCoinLockup") + } + + // Validate the lockup expires in the future. + if txMeta.UnlockTimestampNanoSecs <= blockTimestamp { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupInvalidLockupDuration, "_connectCoinLockup") + } + + // Compute the lockup duration in nanoseconds. + lockupDurationNanoSeconds := txMeta.UnlockTimestampNanoSecs - blockTimestamp + + // Determine the hodler PKID to use. + transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupInvalidTransactorPKID, "_connectCoinLockup") + } + hodlerPKID := transactorPKIDEntry.PKID + + // Determine which profile PKID to use. + var profilePKID *PKID + if txMeta.ProfilePublicKey.IsZeroPublicKey() { + profilePKID = ZeroPKID.NewPKID() + } else { + profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupNonExistentProfile, "_connectCoinLockup") + } + profilePKID = profilePKIDEntry.PKID.NewPKID() + } + + // Validate the transactor as having sufficient DAO Coin or DESO balance for the transaction. + var transactorBalanceNanos256 *uint256.Int + var prevTransactorBalanceEntry *BalanceEntry + if profilePKID.IsZeroPKID() { + // Check the DeSo balance of the user. + transactorBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(txn.PublicKey) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") + } + + // Construct a uint256 balance and validate the transactor as having sufficient DeSo. + transactorBalanceNanos256, _ = uint256.FromBig(big.NewInt(0).SetUint64(transactorBalanceNanos)) + if txMeta.LockupAmountBaseUnits.Gt(transactorBalanceNanos256) { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupInsufficientDeSo, "_connectCoinLockup") + } + + // Spend the transactor's DeSo balance. + lockupAmount64 := txMeta.LockupAmountBaseUnits.Uint64() + newUtxoOp, err := bav._spendBalance(lockupAmount64, txn.PublicKey, blockHeight) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectCoinLockup") + } + utxoOpsForTxn = append(utxoOpsForTxn, newUtxoOp) + } else { + // Check the BalanceEntry of the user. + transactorBalanceEntry, _, _ := bav.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( + txn.PublicKey, + txMeta.ProfilePublicKey.ToBytes(), + true) + if transactorBalanceEntry == nil || transactorBalanceEntry.isDeleted { + return 0, 0, nil, + errors.Wrapf(RuleErrorCoinLockupBalanceEntryDoesNotExist, "_connectCoinLockup") + } + + // Validate the balance entry as having sufficient funds. + transactorBalanceNanos256 = transactorBalanceEntry.BalanceNanos.Clone() + if txMeta.LockupAmountBaseUnits.Gt(transactorBalanceNanos256) { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupInsufficientCoins, "_connectCoinLockup") + } + + // We store the previous transactor balance entry in the event we need to revert the transaction. + prevTransactorBalanceEntry = transactorBalanceEntry + + // Spend the transactor's DAO coin balance. + transactorBalanceEntry.BalanceNanos = + *uint256.NewInt().Sub(&transactorBalanceEntry.BalanceNanos, txMeta.LockupAmountBaseUnits) + bav._setDAOCoinBalanceEntryMappings(transactorBalanceEntry) + } + + // By now we know the transaction to be valid. We now source yield information from either + // the profile's yield curve or the raw DeSo yield curve. Because there's some choice in how + // to determine the yield when the lockup duration falls between two profile specified yield curve + // points, we return here the two local points and choose/interpolate between them below. + leftYieldCurvePoint, rightYieldCurvePoint := bav.GetLocalYieldCurvePoints(profilePKID, lockupDurationNanoSeconds) + + // Here we interpolate (choose) the yield between the two returned local yield curve points. + // + // If we fall between two points, we choose the left yield curve point (i.e. the one with lesser lockup duration). + // The transactor earns yield only for the lockup duration specified by the left yield curve point but will + // be unable to unlock the coins until the transaction specified lockup duration expires. + txnYieldBasisPoints := uint64(0) + txnYieldEarningDurationNanoSecs := int64(0) + if leftYieldCurvePoint.LockupDurationNanoSecs < lockupDurationNanoSeconds { + txnYieldBasisPoints = leftYieldCurvePoint.LockupYieldAPYBasisPoints + txnYieldEarningDurationNanoSecs = leftYieldCurvePoint.LockupDurationNanoSecs + } + if rightYieldCurvePoint.LockupDurationNanoSecs == lockupDurationNanoSeconds { + txnYieldBasisPoints = rightYieldCurvePoint.LockupYieldAPYBasisPoints + txnYieldEarningDurationNanoSecs = rightYieldCurvePoint.LockupDurationNanoSecs + } + + // Convert variables to a consistent uint256 representation. This is to use them in SafeUint256 math. + txnYieldBasisPoints256 := uint256.NewInt().SetUint64(txnYieldBasisPoints) + txnYieldEarningDurationNanoSecs256 := uint256.NewInt().SetUint64(uint64(txnYieldEarningDurationNanoSecs)) + + // Compute the yield associated with this operation, checking to ensure there's no overflow. + yieldFromTxn, err := + CalculateLockupYield(txMeta.LockupAmountBaseUnits, txnYieldBasisPoints256, txnYieldEarningDurationNanoSecs256) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") + } + + // We check that the minted yield does not cause an overflow in the transactor's balance. + // In the case of DeSo being locked up, we must check that the resulting amount is less than 2**64. + if uint256.NewInt().Sub(MaxUint256, yieldFromTxn).Lt(transactorBalanceNanos256) { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup") + } + + // Compute the amount to be added to the locked balance entry. + lockupValue := *uint256.NewInt().Add(transactorBalanceNanos256, yieldFromTxn) + + // In the case of DeSo being locked up, we ensure that the resulting amount is less than 2**64. + if profilePKID.IsZeroPKID() && !lockupValue.IsUint64() { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup") + } + + // NOTE: While we could check for "global" overflow here, we let this occur on the unlock transaction instead. + // Global overflow is where the yield causes fields like CoinEntry.CoinsInCirculationNanos to overflow. + // Performing the check here would be redundant and may lead to worse UX in the case of coins being + // burned in the future making current lockups no longer an overflow. Checking here would also + // create a DoS attack vector where a malicious entity takes out an extremely long-dated lockup + // with the sole intent of saturating the CoinsInCirculationNanos field preventing others from locking up. + + // For consolidation, we fetch equivalent LockedBalanceEntries. + lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + hodlerPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) + if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { + lockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + ExpirationTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), + } + } + previousLockedBalanceEntry := *lockedBalanceEntry + + // Check for overflow within the locked balance entry itself. + if uint256.NewInt().Sub(MaxUint256, yieldFromTxn).Lt(transactorBalanceNanos256) { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup") + } + if profilePKID.IsZeroPKID() { + // Check if DeSo minted would overflow 2**64 in the transactor balance. + if uint256.NewInt().Sub(uint256.NewInt().SetUint64(math.MaxUint64), yieldFromTxn).Lt(transactorBalanceNanos256) { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup") + } + } + + // Increment the lockedBalanceEntry and update the view. + lockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt().Add(&lockedBalanceEntry.BalanceBaseUnits, &lockupValue) + bav._setLockedBalanceEntry(lockedBalanceEntry) + + // Add a UtxoOperation for easy reversion during disconnect. + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeCoinLockup, + PrevTransactorBalanceEntry: prevTransactorBalanceEntry, + PrevLockedBalanceEntry: &previousLockedBalanceEntry, + }) + + // Construct UtxoOps in the event this transaction is reverted. + return 0, 0, utxoOpsForTxn, nil +} + +func CalculateLockupYield( + principal *uint256.Int, + apyYieldBasisPoints *uint256.Int, + durationNanoSecs *uint256.Int, +) (*uint256.Int, error) { + // Note: We could compute either simple of compounding interest. While compounding interest is ideal from an + // application perspective, it becomes incredibly difficult to implement from a numerical perspective. + // This is because compound interest requires fractional exponents rather for computing the yield. + // Determining overflow and preventing excessive money-printers becomes tricky in the compound interest case. + // For this reason, we opt to use simple interest. + // + // Simple interest formula: + // yield = principal * apy_yield * time_in_years + // + // Notice this formula makes detecting computational overflow trivial by utilizing the DeSo SafeUint256 library. + + // Compute the denominators from the nanosecond to year conversion and the basis point computation. + denominators, err := SafeUint256().Mul( + uint256.NewInt().SetUint64(_nanoSecsPerYear), + uint256.NewInt().SetUint64(10000)) + if err != nil { + return nil, + errors.Wrap(RuleErrorCoinLockupCoinYieldOverflow, "CalculateLockupYield (nanoSecsPerYear * 10000)") + } + + // Compute the numerators from the principal, apy yield, and time in nanoseconds. + numerators, err := SafeUint256().Mul(principal, apyYieldBasisPoints) + if err != nil { + return nil, + errors.Wrap(RuleErrorCoinLockupCoinYieldOverflow, "CalculateLockupYield (principal * yield)") + } + numerators, err = SafeUint256().Mul(numerators, durationNanoSecs) + if err != nil { + return nil, + errors.Wrap(RuleErrorCoinLockupCoinYieldOverflow, "CalculateLockupYield ((principal * yield) * duration)") + } + + // Compute the yield for the transaction. + yield, err := SafeUint256().Div(numerators, denominators) + if err != nil { + return nil, + errors.Wrap(err, "CalculateLockupYield (numerator / denominator)") + } + + return yield, nil +} + +func (bav *UtxoView) _disconnectCoinLockup( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txnHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32) error { + + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectCoinLockup: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + + // Verify the last operation as being a CoinLockup operation. + if utxoOpsForTxn[operationIndex].Type != OperationTypeCoinLockup { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert "+ + "OperationTypeCoinLockup but found type %v", utxoOpsForTxn[operationIndex].Type) + } + + // Sanity check the CoinLockup operation exists. + operationData := utxoOpsForTxn[operationIndex] + if operationData.PrevLockedBalanceEntry == nil || operationData.PrevLockedBalanceEntry.isDeleted { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeCoinLockup " + + "but found nil or deleted previous locked balance entry") + } + operationIndex-- + if operationIndex < 0 { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeCoinLockup " + + "but malformed utxoOpsForTxn") + } + + // Sanity check the data within the CoinLockup. Reverting a lockup should not result in more coins. + lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + operationData.PrevLockedBalanceEntry.HODLerPKID, operationData.PrevLockedBalanceEntry.ProfilePKID, + operationData.PrevLockedBalanceEntry.ExpirationTimestampNanoSecs) + if lockedBalanceEntry.BalanceBaseUnits.Lt(&operationData.PrevLockedBalanceEntry.BalanceBaseUnits) { + return fmt.Errorf("_disconnectCoinLockup: Reversion of coin lockup would result in " + + "more coins in the lockup") + } + + // Reset the transactor's LockedBalanceEntry to what it was previously. + bav._setLockedBalanceEntry(operationData.PrevLockedBalanceEntry) + + // Depending on whether the lockup dealt with DeSo, we should have either a UtxoOp or a PrevTransactorBalanceEntry. + isDeSoLockup := operationData.PrevLockedBalanceEntry.ProfilePKID.IsZeroPKID() + if isDeSoLockup { + // Revert the spent DeSo. + operationData = utxoOpsForTxn[operationIndex] + if operationData.Type != OperationTypeSpendBalance { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeSpendBalance "+ + "but found type %v", operationData.Type) + } + if !bytes.Equal(operationData.BalancePublicKey, currentTxn.PublicKey) { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeSpendBalance but found " + + "mismatched public keys") + } + err := bav._unSpendBalance(operationData.BalanceAmountNanos, currentTxn.PublicKey) + if err != nil { + return errors.Wrapf(err, "_disconnectCoinLockup: Problem unSpending balance of %v "+ + "for the transactor", operationData.BalanceAmountNanos) + } + operationIndex-- + if operationIndex < 0 { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeDAOCoinLockup " + + "but malformed utxoOpsForTxn") + } + } else { + // Revert the transactor's DAO coin balance. + bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) + } + + // By here we only need to disconnect the basic transfer associated with the transaction. + basicTransferOps := utxoOpsForTxn[:operationIndex] + err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) + if err != nil { + return errors.Wrapf(err, "_disconnectCoinLockup") + } + return nil +} + // // DB FLUSHES // diff --git a/lib/errors.go b/lib/errors.go index 70d33128f..01904b921 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -486,6 +486,21 @@ const ( // EpochCompleteHook RuleErrorBlockTimestampBeforeEpochStartTimestamp RuleError = "RuleErrorBlockTimestampBeforeEpochStartTimestamp" + // Lockups + RuleErrorLockupTxnBeforeBlockHeight RuleError = "RuleErrorLockupTxnBeforeBlockHeight" + RuleErrorCoinLockupInvalidTransactorPKID RuleError = "RuleErrorCoinLockupInvalidTransactorPKID" + RuleErrorCoinLockupInvalidLockupDuration RuleError = "RuleErrorCoinLockupInvalidLockupDuration" + RuleErrorCoinLockupBalanceEntryDoesNotExist RuleError = "RuleErrorCoinLockupBalanceEntryDoesNotExist" + RuleErrorCoinLockupInsufficientCoins RuleError = "RuleErrorCoinLockupInsufficientCoins" + RuleErrorCoinLockupInsufficientDeSo RuleError = "RuleErrorCoinLockupInsufficientDeSo" + RuleErrorCoinLockupInvalidProfilePubKey RuleError = "RuleErrorCoinLockupInvalidProfilePubKey" + RuleErrorCoinLockupOnNonExistentProfile RuleError = "RuleErrorCoinLockupOnNonExistentProfile" + RuleErrorCoinLockupOfAmountZero RuleError = "RuleErrorCoinLockupOfAmountZero" + RuleErrorCoinLockupExcessiveDeSoLockup RuleError = "RuleErrorCoinLockupExcessiveDeSoLockup" + RuleErrorCoinLockupNonExistentProfile RuleError = "RuleErrorCoinLockupNonExistentProfile" + RuleErrorCoinLockupCoinYieldOverflow RuleError = "RuleErrorCoinLockupCoinYieldOverflow" + RuleErrorCoinLockupYieldCausesOverflow RuleError = "RuleErrorCoinLockupYieldCausesOverflow" + HeaderErrorDuplicateHeader RuleError = "HeaderErrorDuplicateHeader" HeaderErrorNilPrevHash RuleError = "HeaderErrorNilPrevHash" HeaderErrorInvalidParent RuleError = "HeaderErrorInvalidParent" From cb0b7c5a6e8c1fb255e8c80790310dc84bd2423f Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 21 Sep 2023 10:15:45 -0700 Subject: [PATCH 222/762] Update Coin Lockup Params Connect/Disconnect Logic --- lib/block_view_lockups.go | 235 ++++++++++++++++++++++++++++++++++++++ lib/errors.go | 32 +++--- 2 files changed, 254 insertions(+), 13 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 882b7ec55..22e1bf176 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1011,6 +1011,241 @@ func (bav *UtxoView) _disconnectCoinLockup( return nil } +// +// UpdateCoinLockupParams Transaction Logic +// + +func (bav *UtxoView) _connectUpdateCoinLockupParams( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + verifySignatures bool, +) (_totalInput uint64, + _totalOutput uint64, + _utxoOps []*UtxoOperation, + _err error) { + var utxoOpsForTxn []*UtxoOperation + + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || + blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { + return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectDAOCoinLockup") + } + + // Validate the txn TxnType. + if txn.TxnMeta.GetTxnType() != TxnTypeUpdateCoinLockupParams { + return 0, 0, nil, fmt.Errorf("_connectUpdateCoinLockupParams: "+ + "called with bad TxnType %s", txn.TxnMeta.GetTxnType().String()) + } + + // Try connecting the basic transfer without considering transaction metadata. + _, _, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectUpdateCoinLockupParams") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpsForBasicTransfer...) + + // Grab the txn metadata. + txMeta := txn.TxnMeta.(*UpdateCoinLockupParamsMetadata) + + // Get the profilePKID from the transactor public key. + var profilePKID *PKID + _, updaterIsParamUpdater := GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(txn.PublicKey)] + if updaterIsParamUpdater { + profilePKID = ZeroPKID.NewPKID() + } else { + profilePKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { + return 0, 0, nil, errors.Wrap(RuleErrorUpdateCoinLockupParamsOnInvalidPKID, + "_connectUpdateCoinLockupParams") + } + profilePKID = profilePKIDEntry.PKID + } + + // Sanity check the lockup duration as valid. + if txMeta.LockupYieldDurationNanoSecs < 0 { + return 0, 0, nil, errors.Wrapf(RuleErrorUpdateCoinLockupParamsNegativeDuration, + "_connectUpdateCoinLockupParams") + } + + // Fetch the previous yield curve point associated with this pair. + prevLockupYieldCurvePoint := + bav.GetYieldCurvePointByProfilePKIDAndDurationNanoSecs(profilePKID, txMeta.LockupYieldDurationNanoSecs) + + // Check if a yield curve point is being added. + if !txMeta.RemoveYieldCurvePoint && txMeta.LockupYieldDurationNanoSecs > 0 { + // NOTE: During the view flush, any comparable LockupYieldCurvePoint with the unique + // pair will be deleted prior to this new + // point being added. Above we saved the previous LockupYieldCurvePoint + // in the even this is reverted. + bav._setLockupYieldCurvePoint(&LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: txMeta.LockupYieldDurationNanoSecs, + LockupYieldAPYBasisPoints: txMeta.LockupYieldAPYBasisPoints, + }) + } + + // Check if a yield curve point is being removed. + if txMeta.RemoveYieldCurvePoint && txMeta.LockupYieldDurationNanoSecs > 0 { + // Check that we're not deleting a point which doesn't exist. This ensures that disconnects function properly, + // as well ensures there's no wasteful "no-ops" executed. + if prevLockupYieldCurvePoint == nil { + return 0, 0, nil, + errors.Wrapf(RuleErrorUpdateCoinLockupParamsDeletingNonExistentPoint, "_connectUpdateCoinLockupParams") + } + + // NOTE: The "LockupYieldAPYBasisPoints" field is effectively irrelevant here. + // The DB operations will seek to the unique + // pair and delete it during the view flush. The "isDeleted" field ensures + // nothing else is put in its place. + bav._deleteLockupYieldCurvePoint(&LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: txMeta.LockupYieldDurationNanoSecs, + }) + } + + // Check if we're updating transfer restriction. + var prevLockupTransferRestriction TransferRestrictionStatus + if txMeta.NewLockupTransferRestrictions { + // Fetch the profile entry and LockupTransferRestriction status. + profileEntry := bav.GetProfileEntryForPKID(profilePKID) + if profileEntry == nil || profileEntry.isDeleted { + return 0, 0, nil, + errors.Wrapf(RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile, "_connectUpdateCoinLockupParams") + } + + // Store a copy of the previous LockupTransferRestrictionStatus for easy transaction disconnect. + prevLockupTransferRestriction = profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus + + // Ensure we're not updating a permanent transfer restriction. + if prevLockupTransferRestriction == TransferRestrictionStatusPermanentlyUnrestricted { + return 0, 0, nil, errors.Wrapf( + RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction, "_connectUpdateCoinLockupParams") + } + + // Check that the new transfer restrictions are valid. + if !(txMeta.LockupTransferRestrictionStatus == TransferRestrictionStatusUnrestricted) && + !(txMeta.LockupTransferRestrictionStatus == TransferRestrictionStatusProfileOwnerOnly) && + !(txMeta.LockupTransferRestrictionStatus == TransferRestrictionStatusDAOMembersOnly) && + !(txMeta.LockupTransferRestrictionStatus == TransferRestrictionStatusPermanentlyUnrestricted) { + return 0, 0, nil, + errors.Wrapf(RuleErrorUpdateCoinLockupParamsInvalidRestrictions, "_connectUpdateCoinLockupParams") + } + + // Update the transfer restrictions. + profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus = txMeta.LockupTransferRestrictionStatus + bav._setProfileEntryMappings(profileEntry) + } + + // Add a UtxoOperation for easy reversion during disconnect. + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeUpdateCoinLockupParams, + PrevLockupYieldCurvePoint: prevLockupYieldCurvePoint, + PrevLockupTransferRestriction: prevLockupTransferRestriction, + }) + + return 0, 0, utxoOpsForTxn, nil +} + +func (bav *UtxoView) _disconnectUpdateCoinLockupParams( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txnHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32) error { + + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectUpdateCoinLockupParams: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + + // Verify the last operation as being a UpdateCoinLockupParams operation. + if utxoOpsForTxn[operationIndex].Type != OperationTypeUpdateCoinLockupParams { + return fmt.Errorf("_disconnectUpdateCoinLockupParams: Trying to revert "+ + "OperationTypeUpdateCoinLockupParams but found type %v", utxoOpsForTxn[operationIndex].Type) + } + + // Fetch the UpdateCoinLockupParams operation. + operationData := utxoOpsForTxn[operationIndex] + + // Grab the txn metadata. + txMeta := currentTxn.TxnMeta.(*UpdateCoinLockupParamsMetadata) + + // Fetch the profilePKID for the transactor. + var profilePKID *PKID + _, updaterIsParamUpdater := GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(currentTxn.PublicKey)] + if updaterIsParamUpdater { + profilePKID = ZeroPKID.NewPKID() + } else { + profilePKIDEntry := bav.GetPKIDForPublicKey(currentTxn.PublicKey) + if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { + return errors.Wrap(RuleErrorUpdateCoinLockupParamsOnInvalidPKID, + "_connectUpdateCoinLockupParams") + } + profilePKID = profilePKIDEntry.PKID + } + + // Check if the transaction added a yield curve point. If it did, we restore the previous point. + // If the previous point is nil meaning this point didn't have a previous, then we simply delete the current point. + if !txMeta.RemoveYieldCurvePoint && txMeta.LockupYieldDurationNanoSecs > 0 { + if operationData.PrevLockupYieldCurvePoint == nil { + bav._deleteLockupYieldCurvePoint(&LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: txMeta.LockupYieldDurationNanoSecs, + }) + } else { + bav._setLockupYieldCurvePoint(&LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: operationData.PrevLockupYieldCurvePoint.LockupDurationNanoSecs, + LockupYieldAPYBasisPoints: operationData.PrevLockupYieldCurvePoint.LockupYieldAPYBasisPoints, + }) + } + } + + // Check if the transaction deleted a yield curve point. If it did, we add back the previous point. + // If the previous point is nil, we throw an error. This shouldn't be possible. + if txMeta.RemoveYieldCurvePoint && txMeta.LockupYieldDurationNanoSecs > 0 { + if operationData.PrevLockupYieldCurvePoint == nil { + return fmt.Errorf("_connectUpdateCoinLockupParams: trying to revert point deletion " + + "but found nil previous yield curve point; this shouldn't be possible") + } + bav._setLockupYieldCurvePoint(&LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: operationData.PrevLockupYieldCurvePoint.LockupDurationNanoSecs, + LockupYieldAPYBasisPoints: operationData.PrevLockupYieldCurvePoint.LockupYieldAPYBasisPoints, + }) + } + + // Check if the transaction updated transfer restrictions. If it did, we reset the previous transfer restrictions. + if txMeta.NewLockupTransferRestrictions { + // Fetch the profile entry and LockupTransferRestriction status. + profileEntry := bav.GetProfileEntryForPKID(profilePKID) + if profileEntry == nil || profileEntry.isDeleted { + return fmt.Errorf("_connectUpdateCoinLockupParams: Trying to revert lockup transfer restriction " + + "update but found nil profile entry; this shouldn't be possible") + } + + // Update the transfer restrictions. + profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus = operationData.PrevLockupTransferRestriction + bav._setProfileEntryMappings(profileEntry) + } + + // Decrement the operationIndex. We expect to find the basic transfer UtxoOps next. + operationIndex-- + if operationIndex < 0 { + return fmt.Errorf("_disconnectUpdateCoinLockupParams: Trying to revert OperationTypeUpdateCoinLockupParams " + + "but found malformed utxoOpsForTxn") + } + + // By here we only need to disconnect the basic transfer associated with the transaction. + basicTransferOps := utxoOpsForTxn[:operationIndex] + err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) + if err != nil { + return errors.Wrapf(err, "_disconnectUpdateCoinLockupParams") + } + return nil +} + // // DB FLUSHES // diff --git a/lib/errors.go b/lib/errors.go index 01904b921..e433ba529 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -487,19 +487,25 @@ const ( RuleErrorBlockTimestampBeforeEpochStartTimestamp RuleError = "RuleErrorBlockTimestampBeforeEpochStartTimestamp" // Lockups - RuleErrorLockupTxnBeforeBlockHeight RuleError = "RuleErrorLockupTxnBeforeBlockHeight" - RuleErrorCoinLockupInvalidTransactorPKID RuleError = "RuleErrorCoinLockupInvalidTransactorPKID" - RuleErrorCoinLockupInvalidLockupDuration RuleError = "RuleErrorCoinLockupInvalidLockupDuration" - RuleErrorCoinLockupBalanceEntryDoesNotExist RuleError = "RuleErrorCoinLockupBalanceEntryDoesNotExist" - RuleErrorCoinLockupInsufficientCoins RuleError = "RuleErrorCoinLockupInsufficientCoins" - RuleErrorCoinLockupInsufficientDeSo RuleError = "RuleErrorCoinLockupInsufficientDeSo" - RuleErrorCoinLockupInvalidProfilePubKey RuleError = "RuleErrorCoinLockupInvalidProfilePubKey" - RuleErrorCoinLockupOnNonExistentProfile RuleError = "RuleErrorCoinLockupOnNonExistentProfile" - RuleErrorCoinLockupOfAmountZero RuleError = "RuleErrorCoinLockupOfAmountZero" - RuleErrorCoinLockupExcessiveDeSoLockup RuleError = "RuleErrorCoinLockupExcessiveDeSoLockup" - RuleErrorCoinLockupNonExistentProfile RuleError = "RuleErrorCoinLockupNonExistentProfile" - RuleErrorCoinLockupCoinYieldOverflow RuleError = "RuleErrorCoinLockupCoinYieldOverflow" - RuleErrorCoinLockupYieldCausesOverflow RuleError = "RuleErrorCoinLockupYieldCausesOverflow" + RuleErrorLockupTxnBeforeBlockHeight RuleError = "RuleErrorLockupTxnBeforeBlockHeight" + RuleErrorCoinLockupInvalidTransactorPKID RuleError = "RuleErrorCoinLockupInvalidTransactorPKID" + RuleErrorCoinLockupInvalidLockupDuration RuleError = "RuleErrorCoinLockupInvalidLockupDuration" + RuleErrorCoinLockupBalanceEntryDoesNotExist RuleError = "RuleErrorCoinLockupBalanceEntryDoesNotExist" + RuleErrorCoinLockupInsufficientCoins RuleError = "RuleErrorCoinLockupInsufficientCoins" + RuleErrorCoinLockupInsufficientDeSo RuleError = "RuleErrorCoinLockupInsufficientDeSo" + RuleErrorCoinLockupInvalidProfilePubKey RuleError = "RuleErrorCoinLockupInvalidProfilePubKey" + RuleErrorCoinLockupOnNonExistentProfile RuleError = "RuleErrorCoinLockupOnNonExistentProfile" + RuleErrorCoinLockupOfAmountZero RuleError = "RuleErrorCoinLockupOfAmountZero" + RuleErrorCoinLockupExcessiveDeSoLockup RuleError = "RuleErrorCoinLockupExcessiveDeSoLockup" + RuleErrorCoinLockupNonExistentProfile RuleError = "RuleErrorCoinLockupNonExistentProfile" + RuleErrorCoinLockupCoinYieldOverflow RuleError = "RuleErrorCoinLockupCoinYieldOverflow" + RuleErrorCoinLockupYieldCausesOverflow RuleError = "RuleErrorCoinLockupYieldCausesOverflow" + RuleErrorUpdateCoinLockupParamsOnInvalidPKID RuleError = "RuleErrorUpdateCoinLockupParamsOnInvalidPKID" + RuleErrorUpdateCoinLockupParamsInvalidRestrictions RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" + RuleErrorUpdateCoinLockupParamsNegativeDuration RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" + RuleErrorUpdateCoinLockupParamsDeletingNonExistentPoint RuleError = "RuleErrorUpdateCoinLockupParamsDeletingNonExistentPoint" + RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile RuleError = "RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile" + RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction RuleError = "RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction" HeaderErrorDuplicateHeader RuleError = "HeaderErrorDuplicateHeader" HeaderErrorNilPrevHash RuleError = "HeaderErrorNilPrevHash" From 78b40ebbb8421d0c68914c40bd1885c6ebd07707 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 21 Sep 2023 10:17:09 -0700 Subject: [PATCH 223/762] Coin Lockup Transfer Connect/Disconnect Logic --- lib/block_view_lockups.go | 195 ++++++++++++++++++++++++++++++++++++++ lib/errors.go | 7 ++ 2 files changed, 202 insertions(+) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 22e1bf176..ccc866b5a 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1246,6 +1246,201 @@ func (bav *UtxoView) _disconnectUpdateCoinLockupParams( return nil } +// +// CoinLockupTransfer Transaction Logic +// + +func (bav *UtxoView) _connectCoinLockupTransfer( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + verifySignatures bool, +) (_totalInput uint64, + _totalOutput uint64, + _utxoOps []*UtxoOperation, + _err error) { + var utxoOpsForTxn []*UtxoOperation + + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || + blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { + return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectCoinLockupTransfer") + } + + // Validate the txn TxnType. + if txn.TxnMeta.GetTxnType() != TxnTypeCoinLockupTransfer { + return 0, 0, nil, fmt.Errorf( + "_connectCoinLockupTransfer: called with bad TxnType: %s", txn.TxnMeta.GetTxnType().String()) + } + + // Try connecting the basic transfer without considering transaction metadata. + _, _, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectCoinLockupTransfer") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpsForBasicTransfer...) + + // Grab the txn metadata. + txMeta := txn.TxnMeta.(*CoinLockupTransferMetadata) + + // Validate the transfer amount as non-zero. + if txMeta.LockedCoinsToTransferBaseUnits.IsZero() { + return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupTransferOfAmountZero, + "_connectCoinLockupTransfer") + } + + // If this is a DeSo lockup, ensure the amount is less than 2**64. + if txMeta.ProfilePublicKey.IsZeroPublicKey() { + maxUint64, _ := uint256.FromBig(big.NewInt(0).SetUint64(math.MaxUint64)) + if txMeta.LockedCoinsToTransferBaseUnits.Gt(maxUint64) { + return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupTransferOfDeSoCausesOverflow, + "_connectCoinLockupTransfer") + } + } + + // Fetch PKIDs for the recipient and sender. + senderPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + senderPKID := senderPKIDEntry.PKID + receiverPKIDEntry := bav.GetPKIDForPublicKey(txMeta.RecipientPublicKey.ToBytes()) + receiverPKID := receiverPKIDEntry.PKID + profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + profilePKID := profilePKIDEntry.PKID + + // Ensure the sender and receiver are different. + if senderPKID.Eq(receiverPKID) { + return 0, 0, nil, errors.Wrapf(RuleErrorCoinLockupTransferSenderEqualsReceiver, + "_connectCoinLockupTransfer") + } + + // Verify the transfer restrictions attached to the transfer. + profileEntry := bav.GetProfileEntryForPKID(profilePKID) + if profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus == TransferRestrictionStatusProfileOwnerOnly && + !profilePKID.Eq(senderPKID) { + return 0, 0, nil, errors.Wrapf(RuleErrorCoinLockupTransferRestrictedToProfileOwner, + "_connectCoinLockupTransfer") + } + if profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus == TransferRestrictionStatusDAOMembersOnly { + // TODO: Determine if this is desired behavior. We assume the sender must be part of the DAO to have + // transferable coins. It seems weird to tie locked DAO coin transfers to unlocked DAO coin balances. + // An alternative approach is not allow the "TransferRestrictionStatusDAOMembersOnly" restriction. + receiverBalanceEntry := bav.GetBalanceEntry(receiverPKID, profilePKID, true) + if receiverBalanceEntry.BalanceNanos.IsZero() { + return 0, 0, nil, + errors.Wrapf(RuleErrorCoinLockupTransferRestrictedToDAOMembers, "_connectCoinLockupTransfer") + } + } + + // Fetch the sender's balance entries. + senderLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + senderPKID, profilePKID, txMeta.ExpirationTimestampUnixNanoSecs) + prevSenderLockedBalanceEntry := senderLockedBalanceEntry.Copy() + + // Check that the sender's balance entry has sufficient balance. + if txMeta.LockedCoinsToTransferBaseUnits.Gt(&senderLockedBalanceEntry.BalanceBaseUnits) { + return 0, 0, nil, errors.Wrapf(RuleErrorCoinLockupTransferInsufficientBalance, + "_connectCoinLockupTransfer") + } + + // Credit the sender's balance entry. + senderLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt().Sub( + &senderLockedBalanceEntry.BalanceBaseUnits, txMeta.LockedCoinsToTransferBaseUnits) + + // Fetch the recipient's balance entry. + receiverLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + receiverPKID, profilePKID, txMeta.ExpirationTimestampUnixNanoSecs) + prevReceiverLockedBalanceEntry := receiverLockedBalanceEntry + + // Add to the recipient's balance entry, checking for overflow. + newRecipientBalanceBaseUnits, err := SafeUint256().Add(&receiverLockedBalanceEntry.BalanceBaseUnits, + txMeta.LockedCoinsToTransferBaseUnits) + if err != nil { + return 0, 0, nil, errors.Wrapf(RuleErrorCoinLockupTransferBalanceOverflowAtReceiver, + "_connectCoinLockupTransfer") + } + receiverLockedBalanceEntry.BalanceBaseUnits = *newRecipientBalanceBaseUnits + + // Update the balances in the view. + bav._setLockedBalanceEntry(senderLockedBalanceEntry) + bav._setLockedBalanceEntry(receiverLockedBalanceEntry) + + // Create a UtxoOperation for easily disconnecting the transaction. + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeCoinLockupTransfer, + PrevSenderLockedBalanceEntry: prevSenderLockedBalanceEntry, + PrevReceiverLockedBalanceEntry: prevReceiverLockedBalanceEntry, + }) + + return 0, 0, utxoOpsForTxn, nil +} + +func (bav *UtxoView) _disconnectCoinLockupTransfer( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txnHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32) error { + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectCoinLockupTransfer: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + + // Verify the last operation as being a CoinLockupTransfer operation. + if utxoOpsForTxn[operationIndex].Type != OperationTypeCoinLockupTransfer { + return fmt.Errorf("_disconnectDAOCoinLockup: Trying to revert "+ + "OperationTypeCoinLockupTransfer but found type %v", utxoOpsForTxn[operationIndex].Type) + } + + // Sanity check the OperationTypeCoinLockupTransfer exists. + operationData := utxoOpsForTxn[operationIndex] + operationIndex-- + if operationIndex < 0 { + return fmt.Errorf("_disconnectCoinLockupTransfer: Trying to revert OperationTypeCoinLockupTransfer " + + "but malformed utxoOpsForTxn") + } + if operationData.PrevSenderLockedBalanceEntry == nil || operationData.PrevSenderLockedBalanceEntry.isDeleted { + return fmt.Errorf("_disconnectCoinLockupTransfer: Trying to revert OperationTypeCoinLockupTransfer " + + "but found nil or deleted PrevSenderLockedBalanceEntry") + } + if operationData.PrevReceiverLockedBalanceEntry == nil || operationData.PrevReceiverLockedBalanceEntry.isDeleted { + return fmt.Errorf("_disconnectCoinLockupTransfer: Trying to revert OperationTypeCoinLockupTransfer " + + "but found nil or deleted PrevReceiverLockedBalanceEntry") + } + + // Fetch the LockedBalanceEntries in the view. + senderLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + operationData.PrevSenderLockedBalanceEntry.HODLerPKID, + operationData.PrevSenderLockedBalanceEntry.ProfilePKID, + operationData.PrevSenderLockedBalanceEntry.ExpirationTimestampNanoSecs) + receiverLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + operationData.PrevReceiverLockedBalanceEntry.HODLerPKID, + operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, + operationData.PrevReceiverLockedBalanceEntry.ExpirationTimestampNanoSecs) + + // Ensure reverting the transaction won't cause the recipients balances to increase + // or cause the senders balances to decrease. + if operationData.PrevSenderLockedBalanceEntry.BalanceBaseUnits.Lt(&senderLockedBalanceEntry.BalanceBaseUnits) { + return fmt.Errorf("_disconnectCoinLockupTransfer: Reversion of coin lockup transfer would " + + "result in less coins for sender") + } + if operationData.PrevReceiverLockedBalanceEntry.BalanceBaseUnits.Gt(&receiverLockedBalanceEntry.BalanceBaseUnits) { + return fmt.Errorf("_disconnectCoinLockupTransfer: Reversion of coin lockup transfer would " + + "result in more coins for receiver") + } + + // Set the balance entry mappings. + bav._setLockedBalanceEntry(operationData.PrevSenderLockedBalanceEntry) + bav._setLockedBalanceEntry(operationData.PrevReceiverLockedBalanceEntry) + + // By here we only need to disconnect the basic transfer associated with the transaction. + basicTransferOps := utxoOpsForTxn[:operationIndex] + err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) + if err != nil { + return errors.Wrapf(err, "_disconnectCoinLockupTransfer") + } + + return nil +} + // // DB FLUSHES // diff --git a/lib/errors.go b/lib/errors.go index e433ba529..16e28c71e 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -500,6 +500,13 @@ const ( RuleErrorCoinLockupNonExistentProfile RuleError = "RuleErrorCoinLockupNonExistentProfile" RuleErrorCoinLockupCoinYieldOverflow RuleError = "RuleErrorCoinLockupCoinYieldOverflow" RuleErrorCoinLockupYieldCausesOverflow RuleError = "RuleErrorCoinLockupYieldCausesOverflow" + RuleErrorCoinLockupTransferOfAmountZero RuleError = "RuleErrorCoinLockupTransferOfAmountZero" + RuleErrorCoinLockupTransferOfDeSoCausesOverflow RuleError = "RuleErrorCoinLockupTransferOfDeSoCausesOverflow" + RuleErrorCoinLockupTransferRestrictedToProfileOwner RuleError = "RuleErrorCoinLockupTransferRestrictedToCreator" + RuleErrorCoinLockupTransferRestrictedToDAOMembers RuleError = "RuleErrorCoinLockupTransferRestrictedToDAOMembers" + RuleErrorCoinLockupTransferSenderEqualsReceiver RuleError = "RuleErrorCoinLockupTransferSenderEqualsReceiver" + RuleErrorCoinLockupTransferInsufficientBalance RuleError = "RuleErrorCoinLockupTransferInsufficientBalance" + RuleErrorCoinLockupTransferBalanceOverflowAtReceiver RuleError = "RuleErrorCoinLockupTransferBalanceOverflowAtReceiver" RuleErrorUpdateCoinLockupParamsOnInvalidPKID RuleError = "RuleErrorUpdateCoinLockupParamsOnInvalidPKID" RuleErrorUpdateCoinLockupParamsInvalidRestrictions RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" RuleErrorUpdateCoinLockupParamsNegativeDuration RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" From d86e2a1819f2c12ed28e185935a5dd4f1ab3f9e5 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 21 Sep 2023 10:17:45 -0700 Subject: [PATCH 224/762] Coin Unlock Connect/Disconnect Logic --- lib/block_view_lockups.go | 258 ++++++++++++++++++++++++++++++++++++++ lib/errors.go | 7 ++ 2 files changed, 265 insertions(+) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index ccc866b5a..264b21d5e 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1441,6 +1441,264 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( return nil } +// +// CoinUnlock Transaction Logic +// + +func (bav *UtxoView) _connectCoinUnlock( + txn *MsgDeSoTxn, txHash *BlockHash, + blockHeight uint32, blockTimestamp int64, + verifySignatures bool) (_totalInput uint64, _totalOutput uint64, _utxoOps []*UtxoOperation, _err error) { + var utxoOpsForTxn []*UtxoOperation + + // Validate the starting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || + blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { + return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectCoinLockup") + } + + // Validate the txn TxnType. + if txn.TxnMeta.GetTxnType() != TxnTypeCoinUnlock { + return 0, 0, nil, fmt.Errorf( + "_connectCoinUnlock: called with bad TxnType %s", txn.TxnMeta.GetTxnType().String(), + ) + } + + // Try connecting the basic transfer without considering transaction metadata. + _, _, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectCoinUnlock") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOpsForBasicTransfer...) + + // Grab the txn metadata. + txMeta := txn.TxnMeta.(*CoinUnlockMetadata) + + // Check for a valid profile public key. + if len(txMeta.ProfilePublicKey) != btcec.PubKeyBytesLenCompressed { + return 0, 0, nil, errors.Wrap(RuleErrorDAOCoinInvalidPubKey, + "_connectCoinUnlock") + } + if !txMeta.ProfilePublicKey.IsZeroPublicKey() { + profileEntry := bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profileEntry == nil || profileEntry.isDeleted { + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockOnNonExistentProfile, + "_connectCoinUnlock") + } + } + + // Validate the unlock amount as non-zero. This is meant to prevent wasteful "no-op" transactions. + if txMeta.CoinsToUnlockBaseUnits.IsZero() { + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockOfAmountZero, + "_connectCoinUnlock") + } + + // Ensure the DeSo unlock amount is less than 2**64 (maximum DeSo balance). + if txMeta.ProfilePublicKey.IsZeroPublicKey() && !txMeta.CoinsToUnlockBaseUnits.IsUint64() { + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockOfAmountZero, + "_connectCoinUnlock") + } + + // Convert the TransactorPublicKey to HODLerPKID + transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockInvalidHODLerPKID, + "_connectCoinUnlock") + } + hodlerPKID := transactorPKIDEntry.PKID + + // Convert the ProfilePublicKey to ProfilePKID. + var profilePKID *PKID + if txMeta.ProfilePublicKey.IsZeroPublicKey() { + profilePKID = ZeroPKID.NewPKID() + } else { + profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { + return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockInvalidProfilePKID, + "_connectCoinUnlock") + } + profilePKID = profilePKIDEntry.PKID + } + + // Retrieve unlockable locked balance entries. + unlockableLockedBalanceEntries, err := bav.GetUnlockableLockedBalanceEntries( + hodlerPKID, profilePKID, blockTimestamp) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectCoinUnlock") + } + if len(unlockableLockedBalanceEntries) == 0 { + return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockNoUnlockableCoinsFound, + "_connectCoinUnlock") + } + + // Unlock coins until the amount specified by the transaction is deducted. + var newLockedBalanceEntries []*LockedBalanceEntry + var prevLockedBalanceEntries []*LockedBalanceEntry + remainingUnlockBalance := txMeta.CoinsToUnlockBaseUnits.Clone() + for _, unlockableLockedBalanceEntry := range unlockableLockedBalanceEntries { + newLockedBalanceEntry := unlockableLockedBalanceEntry.Copy() + if newLockedBalanceEntry.BalanceBaseUnits.Gt(remainingUnlockBalance) || + newLockedBalanceEntry.BalanceBaseUnits.Eq(remainingUnlockBalance) { + remainingUnlockBalance = uint256.NewInt() + newLockedBalanceEntry.BalanceBaseUnits = + *uint256.NewInt().Sub(&newLockedBalanceEntry.BalanceBaseUnits, remainingUnlockBalance) + } else { + remainingUnlockBalance = + uint256.NewInt().Sub(remainingUnlockBalance, &newLockedBalanceEntry.BalanceBaseUnits) + newLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt() + } + + // Append the new LockedBalanceEntry and prev in the event we rollback the transaction. + newLockedBalanceEntries = append(newLockedBalanceEntries, newLockedBalanceEntry) + prevLockedBalanceEntries = append(prevLockedBalanceEntries, unlockableLockedBalanceEntry) + + // Break if we've satisfied the unlock amount. + if remainingUnlockBalance.IsZero() { + break + } + } + if !remainingUnlockBalance.IsZero() { + return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockInsufficientUnlockableCoins, + "_connectCoinUnlock") + } + + // Update the LockedBalanceEntries. + for _, lockedBalanceEntry := range newLockedBalanceEntries { + bav._setLockedBalanceEntry(lockedBalanceEntry) + } + + // Credit the transactor with either DAO coins or DeSo for this unlock. + var prevTransactorBalanceEntry *BalanceEntry + if profilePKID.IsZeroPKID() { + utxoOp, err := bav._addBalance(txMeta.CoinsToUnlockBaseUnits.Uint64(), txn.PublicKey) + if err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectCoinUnlock: error"+ + "adding CoinToUnlockBaseUnits to the transactor balance: ") + } + utxoOpsForTxn = append(utxoOpsForTxn, utxoOp) + } else { + prevTransactorBalanceEntry = bav.GetBalanceEntry(hodlerPKID, profilePKID, true) + + // Credit the transactor with the unlock amount. + newTransactorBalanceEntry := prevTransactorBalanceEntry.Copy() + newTransactorBalanceNanos, err := SafeUint256().Add(&newTransactorBalanceEntry.BalanceNanos, txMeta.CoinsToUnlockBaseUnits) + if err != nil { + return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockCausesBalanceOverflow, + "_connectCoinUnlock") + } + newTransactorBalanceEntry.BalanceNanos = *newTransactorBalanceNanos + bav._setBalanceEntryMappings(newTransactorBalanceEntry, true) + } + + // Create a UtxoOp for the operation. + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeCoinUnlock, + PrevTransactorBalanceEntry: prevTransactorBalanceEntry, + PrevLockedBalanceEntries: prevLockedBalanceEntries, + }) + + return 0, 0, utxoOpsForTxn, nil +} + +func (bav *UtxoView) _disconnectCoinUnlock( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txnHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32) error { + + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectCoinUnlock: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + + // Verify the last operation as being a CoinUnlock operation. + if utxoOpsForTxn[operationIndex].Type != OperationTypeCoinUnlock { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert "+ + "OperationTypeCoinUnlock but found type %v", utxoOpsForTxn[operationIndex].Type) + } + + // Sanity check the CoinUnlock operation exists. + operationData := utxoOpsForTxn[operationIndex] + if operationData.PrevLockedBalanceEntries == nil || len(operationData.PrevLockedBalanceEntries) == 0 { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + + "but found nil or empty previous locked balance entries slice") + } + for _, prevLockedBalanceEntry := range operationData.PrevLockedBalanceEntries { + if prevLockedBalanceEntry == nil || prevLockedBalanceEntry.isDeleted { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + + "but found nil or deleted previous locked balance entry") + } + } + operationIndex-- + if operationIndex < 0 { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + + "but found malformed utxoOpsForTxn") + } + + // Sanity check the data within the CoinUnlock. + // Reverting an unlock of LockedBalanceEntry should not result in less coins. + for _, prevLockedBalanceEntry := range operationData.PrevLockedBalanceEntries { + lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + prevLockedBalanceEntry.HODLerPKID, + prevLockedBalanceEntry.ProfilePKID, + prevLockedBalanceEntry.ExpirationTimestampNanoSecs) + if prevLockedBalanceEntry.BalanceBaseUnits.Lt(&lockedBalanceEntry.BalanceBaseUnits) { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + + "would cause locked balance entry balance to decrease") + } + bav._setLockedBalanceEntry(prevLockedBalanceEntry) + } + + // Reverting the BalanceEntry (if applicable) should not result in more coins. + profilePKID := operationData.PrevLockedBalanceEntries[0].ProfilePKID + hodlerPKID := operationData.PrevLockedBalanceEntries[0].HODLerPKID + if !profilePKID.IsZeroPKID() { + balanceEntry := bav.GetBalanceEntry(hodlerPKID, profilePKID, true) + if operationData.PrevTransactorBalanceEntry == nil || operationData.PrevTransactorBalanceEntry.isDeleted { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + + "but found nil or deleted previous balance entry") + } + if operationData.PrevTransactorBalanceEntry.BalanceNanos.Gt(&balanceEntry.BalanceNanos) { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + + "would cause balance entry balance to increase") + } + bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) + } + + // Reverting the DeSo addition should not result in more coins. + if profilePKID.IsZeroPKID() { + // Revert the DeSo add. + operationData = utxoOpsForTxn[operationIndex] + if operationData.Type != OperationTypeAddBalance { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeAddBalance "+ + "but found type %v", operationData.Type) + } + if !bytes.Equal(operationData.BalancePublicKey, currentTxn.PublicKey) { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeAddBalance " + + "but found mismatched public keys") + } + err := bav._unAddBalance(operationData.BalanceAmountNanos, operationData.BalancePublicKey) + if err != nil { + return errors.Wrapf(err, "_disconnectCoinLockup: Problem unAdding balance of %v for the "+ + "transactor", operationData.BalanceAmountNanos) + } + operationIndex-- + if operationIndex < 0 { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeCoinUnlock " + + "but malformed utxoOpsForTxn") + } + } + + // By here we only need to disconnect the basic transfer associated with the transaction. + basicTransferOps := utxoOpsForTxn[:operationIndex] + err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) + if err != nil { + return errors.Wrapf(err, "_disconnectCoinLockup") + } + return nil +} + // // DB FLUSHES // diff --git a/lib/errors.go b/lib/errors.go index 16e28c71e..0a9794b7b 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -507,6 +507,13 @@ const ( RuleErrorCoinLockupTransferSenderEqualsReceiver RuleError = "RuleErrorCoinLockupTransferSenderEqualsReceiver" RuleErrorCoinLockupTransferInsufficientBalance RuleError = "RuleErrorCoinLockupTransferInsufficientBalance" RuleErrorCoinLockupTransferBalanceOverflowAtReceiver RuleError = "RuleErrorCoinLockupTransferBalanceOverflowAtReceiver" + RuleErrorCoinUnlockOnNonExistentProfile RuleError = "RuleErrorCoinUnlockOnNonExistentProfile" + RuleErrorCoinUnlockOfAmountZero RuleError = "RuleErrorCoinUnlockOfAmountZero" + RuleErrorCoinUnlockInvalidHODLerPKID RuleError = "RuleErrorCoinUnlockInvalidHODLerPKID" + RuleErrorCoinUnlockInvalidProfilePKID RuleError = "RuleErrorCoinUnlockInvalidProfilePKID" + RuleErrorCoinUnlockNoUnlockableCoinsFound RuleError = "RuleErrorCoinUnlockNoUnlockableCoinsFound" + RuleErrorCoinUnlockInsufficientUnlockableCoins RuleError = "RuleErrorCoinUnlockInsufficientUnlockableCoins" + RuleErrorCoinUnlockCausesBalanceOverflow RuleError = "RuleErrorCoinUnlockCausesBalanceOverflow" RuleErrorUpdateCoinLockupParamsOnInvalidPKID RuleError = "RuleErrorUpdateCoinLockupParamsOnInvalidPKID" RuleErrorUpdateCoinLockupParamsInvalidRestrictions RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" RuleErrorUpdateCoinLockupParamsNegativeDuration RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" From a90867ef6e919a7fd9cea9689bbe60d650fb67a2 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 29 Sep 2023 14:04:24 -0700 Subject: [PATCH 225/762] Move from ExpirationTimestamp... to UnlockTimestamp... --- lib/block_view.go | 4 +-- lib/block_view_lockups.go | 72 +++++++++++++++++++-------------------- lib/db_utils.go | 28 +++++++-------- 3 files changed, 52 insertions(+), 52 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 0c1abb4f6..14738dade 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3491,14 +3491,14 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUnjailValidator(txn, txHash, blockHeight, verifySignatures) case TxnTypeCoinLockup: - // TODO: Once merged, update blockTimestamp parameter + // FIXME: Once merged, update blockTimestamp parameter totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinLockup(txn, txHash, blockHeight, 0, verifySignatures) case TxnTypeUpdateCoinLockupParams: totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUpdateCoinLockupParams(txn, txHash, blockHeight, verifySignatures) case TxnTypeCoinLockupTransfer: totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinLockupTransfer(txn, txHash, blockHeight, verifySignatures) case TxnTypeCoinUnlock: - // TODO: Once merged, update blockTimestamp parameter + // FIXME: Once merged, update blockTimestamp parameter totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinUnlock(txn, txHash, blockHeight, 0, verifySignatures) default: diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 264b21d5e..087130088 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -18,26 +18,26 @@ import ( // type LockedBalanceEntry struct { - HODLerPKID *PKID - ProfilePKID *PKID - ExpirationTimestampNanoSecs int64 - BalanceBaseUnits uint256.Int - isDeleted bool + HODLerPKID *PKID + ProfilePKID *PKID + UnlockTimestampNanoSecs int64 + BalanceBaseUnits uint256.Int + isDeleted bool } type LockedBalanceEntryKey struct { - HODLerPKID PKID - ProfilePKID PKID - ExpirationTimestampUnixNanoSecs int64 + HODLerPKID PKID + ProfilePKID PKID + UnlockTimestampNanoSecs int64 } func (lockedBalanceEntry *LockedBalanceEntry) Copy() *LockedBalanceEntry { return &LockedBalanceEntry{ - HODLerPKID: lockedBalanceEntry.HODLerPKID.NewPKID(), - ProfilePKID: lockedBalanceEntry.ProfilePKID.NewPKID(), - ExpirationTimestampNanoSecs: lockedBalanceEntry.ExpirationTimestampNanoSecs, - BalanceBaseUnits: lockedBalanceEntry.BalanceBaseUnits, - isDeleted: lockedBalanceEntry.isDeleted, + HODLerPKID: lockedBalanceEntry.HODLerPKID.NewPKID(), + ProfilePKID: lockedBalanceEntry.ProfilePKID.NewPKID(), + UnlockTimestampNanoSecs: lockedBalanceEntry.UnlockTimestampNanoSecs, + BalanceBaseUnits: lockedBalanceEntry.BalanceBaseUnits, + isDeleted: lockedBalanceEntry.isDeleted, } } @@ -47,9 +47,9 @@ func (lockedBalanceEntry *LockedBalanceEntry) Eq(other *LockedBalanceEntry) bool func (lockedBalanceEntry *LockedBalanceEntry) ToMapKey() LockedBalanceEntryKey { return LockedBalanceEntryKey{ - HODLerPKID: *lockedBalanceEntry.HODLerPKID, - ProfilePKID: *lockedBalanceEntry.ProfilePKID, - ExpirationTimestampUnixNanoSecs: lockedBalanceEntry.ExpirationTimestampNanoSecs, + HODLerPKID: *lockedBalanceEntry.HODLerPKID, + ProfilePKID: *lockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: lockedBalanceEntry.UnlockTimestampNanoSecs, } } @@ -59,7 +59,7 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawEncodeWithoutMetadata(blockHeig var data []byte data = append(data, EncodeToBytes(blockHeight, lockedBalanceEntry.HODLerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, lockedBalanceEntry.ProfilePKID, skipMetadata...)...) - data = append(data, UintToBuf(uint64(lockedBalanceEntry.ExpirationTimestampNanoSecs))...) + data = append(data, UintToBuf(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))...) data = append(data, VariableEncodeUint256(&lockedBalanceEntry.BalanceBaseUnits)...) return data } @@ -79,12 +79,12 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawDecodeWithoutMetadata(blockHeig return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading ProfilePKID") } - // ExpirationTimestampNanoSecs + // UnlockTimestampNanoSecs uint64ExpirationTimestampUnixNanoSecs, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading ExpirationTimestampNanoSecs") + return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading UnlockTimestampNanoSecs") } - lockedBalanceEntry.ExpirationTimestampNanoSecs = int64(uint64ExpirationTimestampUnixNanoSecs) + lockedBalanceEntry.UnlockTimestampNanoSecs = int64(uint64ExpirationTimestampUnixNanoSecs) // BalanceBaseUnits balanceBaseUnits, err := VariableDecodeUint256(rr) @@ -133,9 +133,9 @@ func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTime hodlerPKID *PKID, profilePKID *PKID, expirationTimestampNanoSecs int64) (_lockedBalanceEntry *LockedBalanceEntry) { // Create a key associated with the LockedBalanceEntry. lockedBalanceEntryKey := (&LockedBalanceEntry{ - HODLerPKID: hodlerPKID, - ProfilePKID: profilePKID, - ExpirationTimestampNanoSecs: expirationTimestampNanoSecs, + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: expirationTimestampNanoSecs, }).ToMapKey() // Check if the key exists in the view. @@ -188,7 +188,7 @@ func (bav *UtxoView) GetUnlockableLockedBalanceEntries( // Filter to matching LockedBalanceEntries. if !lockedBalanceEntry.HODLerPKID.Eq(hodlerPKID) || !lockedBalanceEntry.ProfilePKID.Eq(profilePKID) || - lockedBalanceEntry.ExpirationTimestampNanoSecs > currentTimestampNanoSecs || + lockedBalanceEntry.UnlockTimestampNanoSecs > currentTimestampNanoSecs || lockedBalanceEntry.BalanceBaseUnits.IsZero() || lockedBalanceEntry.isDeleted { continue @@ -198,8 +198,8 @@ func (bav *UtxoView) GetUnlockableLockedBalanceEntries( // Sort UnlockableLockedBalanceEntries by timestamp ASC. sort.Slice(unlockableLockedBalanceEntries, func(ii, jj int) bool { - return unlockableLockedBalanceEntries[ii].ExpirationTimestampNanoSecs < - unlockableLockedBalanceEntries[jj].ExpirationTimestampNanoSecs + return unlockableLockedBalanceEntries[ii].UnlockTimestampNanoSecs < + unlockableLockedBalanceEntries[jj].UnlockTimestampNanoSecs }) return unlockableLockedBalanceEntries, nil } @@ -583,10 +583,10 @@ func (txnData *CoinLockupTransferMetadata) FromBytes(data []byte) error { } txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) - // ExpirationTimestampNanoSecs + // UnlockTimestampNanoSecs uint64ExpirationTimestampUnixNanoSecs, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading ExpirationTimestampNanoSecs") + return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading UnlockTimestampNanoSecs") } txnData.ExpirationTimestampUnixNanoSecs = int64(uint64ExpirationTimestampUnixNanoSecs) @@ -849,10 +849,10 @@ func (bav *UtxoView) _connectCoinLockup( hodlerPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { lockedBalanceEntry = &LockedBalanceEntry{ - HODLerPKID: hodlerPKID, - ProfilePKID: profilePKID, - ExpirationTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), } } previousLockedBalanceEntry := *lockedBalanceEntry @@ -965,7 +965,7 @@ func (bav *UtxoView) _disconnectCoinLockup( // Sanity check the data within the CoinLockup. Reverting a lockup should not result in more coins. lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( operationData.PrevLockedBalanceEntry.HODLerPKID, operationData.PrevLockedBalanceEntry.ProfilePKID, - operationData.PrevLockedBalanceEntry.ExpirationTimestampNanoSecs) + operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs) if lockedBalanceEntry.BalanceBaseUnits.Lt(&operationData.PrevLockedBalanceEntry.BalanceBaseUnits) { return fmt.Errorf("_disconnectCoinLockup: Reversion of coin lockup would result in " + "more coins in the lockup") @@ -1410,11 +1410,11 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( senderLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( operationData.PrevSenderLockedBalanceEntry.HODLerPKID, operationData.PrevSenderLockedBalanceEntry.ProfilePKID, - operationData.PrevSenderLockedBalanceEntry.ExpirationTimestampNanoSecs) + operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs) receiverLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( operationData.PrevReceiverLockedBalanceEntry.HODLerPKID, operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, - operationData.PrevReceiverLockedBalanceEntry.ExpirationTimestampNanoSecs) + operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs) // Ensure reverting the transaction won't cause the recipients balances to increase // or cause the senders balances to decrease. @@ -1642,7 +1642,7 @@ func (bav *UtxoView) _disconnectCoinUnlock( lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( prevLockedBalanceEntry.HODLerPKID, prevLockedBalanceEntry.ProfilePKID, - prevLockedBalanceEntry.ExpirationTimestampNanoSecs) + prevLockedBalanceEntry.UnlockTimestampNanoSecs) if prevLockedBalanceEntry.BalanceBaseUnits.Lt(&lockedBalanceEntry.BalanceBaseUnits) { return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + "would cause locked balance entry balance to decrease") diff --git a/lib/db_utils.go b/lib/db_utils.go index 348790ea5..646bdb17c 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -10691,7 +10691,7 @@ func _dbKeyForLockedBalanceEntry(lockedBalanceEntry LockedBalanceEntry) []byte { key := append([]byte{}, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs...) key = append(key, lockedBalanceEntry.HODLerPKID[:]...) key = append(key, lockedBalanceEntry.ProfilePKID[:]...) - return append(key, EncodeUint64(uint64(lockedBalanceEntry.ExpirationTimestampNanoSecs))...) + return append(key, EncodeUint64(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))...) } func DBPrefixKeyForLockedBalanceEntryByHODLerPKIDandProfilePKID(lockedBalanceEntry *LockedBalanceEntry) []byte { @@ -10735,7 +10735,7 @@ func DbDeleteLockedBalanceEntryWithTxn(txn *badger.Txn, snap *Snapshot, lockedBa return errors.Wrapf(err, "DbDeleteRepostMappingsWithTxn: Deleting "+ "locked balance entry for HODLer PKID %s, Profile PKID %s, expiration timestamp %d", lockedBalanceEntry.HODLerPKID.ToString(), lockedBalanceEntry.ProfilePKID.ToString(), - lockedBalanceEntry.ExpirationTimestampNanoSecs) + lockedBalanceEntry.UnlockTimestampNanoSecs) } return nil } @@ -10758,17 +10758,17 @@ func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecsW hodlerPKID *PKID, profilePKID *PKID, expirationTimestamp int64) *LockedBalanceEntry { key := _dbKeyForLockedBalanceEntry(LockedBalanceEntry{ - HODLerPKID: hodlerPKID, - ProfilePKID: profilePKID, - ExpirationTimestampNanoSecs: expirationTimestamp, + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: expirationTimestamp, }) lockedBalanceEntryBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { return &LockedBalanceEntry{ - HODLerPKID: hodlerPKID.NewPKID(), - ProfilePKID: profilePKID.NewPKID(), - ExpirationTimestampNanoSecs: expirationTimestamp, - BalanceBaseUnits: *uint256.NewInt(), + HODLerPKID: hodlerPKID.NewPKID(), + ProfilePKID: profilePKID.NewPKID(), + UnlockTimestampNanoSecs: expirationTimestamp, + BalanceBaseUnits: *uint256.NewInt(), } } lockedBalanceEntryObj := &LockedBalanceEntry{} @@ -10802,7 +10802,7 @@ func DBGetUnlockableLockedBalanceEntriesWithTxn( currentTimestampUnixNanoSecs int64, ) ([]*LockedBalanceEntry, error) { // Retrieve all LockedBalanceEntries from db matching hodlerPKID, profilePKID, and - // ExpirationTimestampNanoSecs <= currentTimestampUnixNanoSecs. + // UnlockTimestampNanoSecs <= currentTimestampUnixNanoSecs. // NOTE: While ideally we would start with and // seek till , // Badger does not support this functionality as the ValidForPrefix() function @@ -10817,9 +10817,9 @@ func DBGetUnlockableLockedBalanceEntriesWithTxn( // Start at startKey := _dbKeyForLockedBalanceEntry(LockedBalanceEntry{ - HODLerPKID: hodlerPKID, - ProfilePKID: profilePKID, - ExpirationTimestampNanoSecs: currentTimestampUnixNanoSecs, + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: currentTimestampUnixNanoSecs, }) // Valid for prefix @@ -10854,7 +10854,7 @@ func DBGetUnlockableLockedBalanceEntriesWithTxn( "error decoding LockedBalanceEntry: ") } - if lockedBalanceEntry.ExpirationTimestampNanoSecs < currentTimestampUnixNanoSecs { + if lockedBalanceEntry.UnlockTimestampNanoSecs < currentTimestampUnixNanoSecs { lockedBalanceEntries = append(lockedBalanceEntries) } } From a3fc73f7fee5471888cdf3368f558e5244dd251a Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 29 Sep 2023 14:27:05 -0700 Subject: [PATCH 226/762] Change function signatures from ExpirationTimestamp to UnlockTimestamp --- lib/block_view_lockups.go | 63 +++++++++++++++++++++------------------ lib/db_utils.go | 26 ++++++++-------- 2 files changed, 47 insertions(+), 42 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 087130088..37a157618 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -80,11 +80,11 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawDecodeWithoutMetadata(blockHeig } // UnlockTimestampNanoSecs - uint64ExpirationTimestampUnixNanoSecs, err := ReadUvarint(rr) + uint64UnlockTimestampUnixNanoSecs, err := ReadUvarint(rr) if err != nil { return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading UnlockTimestampNanoSecs") } - lockedBalanceEntry.UnlockTimestampNanoSecs = int64(uint64ExpirationTimestampUnixNanoSecs) + lockedBalanceEntry.UnlockTimestampNanoSecs = int64(uint64UnlockTimestampUnixNanoSecs) // BalanceBaseUnits balanceBaseUnits, err := VariableDecodeUint256(rr) @@ -120,22 +120,22 @@ func (bav *UtxoView) _setLockedBalanceEntry(lockedBalanceEntry *LockedBalanceEnt func (bav *UtxoView) _deleteLockedBalanceEntry(lockedBalanceEntry *LockedBalanceEntry) { // Create a tombstone entry. - tombstoneLockedBalanceEntry := *lockedBalanceEntry + tombstoneLockedBalanceEntry := lockedBalanceEntry.Copy() tombstoneLockedBalanceEntry.isDeleted = true // Set the LockupYieldCurvePoint as deleted in the view. - bav._setLockedBalanceEntry(&tombstoneLockedBalanceEntry) + bav._setLockedBalanceEntry(tombstoneLockedBalanceEntry) } // Get Helper Functions for LockedBalanceEntry -func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( - hodlerPKID *PKID, profilePKID *PKID, expirationTimestampNanoSecs int64) (_lockedBalanceEntry *LockedBalanceEntry) { +func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + hodlerPKID *PKID, profilePKID *PKID, unlockTimestampNanoSecs int64) (_lockedBalanceEntry *LockedBalanceEntry) { // Create a key associated with the LockedBalanceEntry. lockedBalanceEntryKey := (&LockedBalanceEntry{ HODLerPKID: hodlerPKID, ProfilePKID: profilePKID, - UnlockTimestampNanoSecs: expirationTimestampNanoSecs, + UnlockTimestampNanoSecs: unlockTimestampNanoSecs, }).ToMapKey() // Check if the key exists in the view. @@ -145,8 +145,8 @@ func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTime } // No mapping exists in the view, check for an entry in the DB. - lockedBalanceEntry := DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( - bav.Handle, bav.Snapshot, hodlerPKID, profilePKID, expirationTimestampNanoSecs) + lockedBalanceEntry := DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + bav.Handle, bav.Snapshot, hodlerPKID, profilePKID, unlockTimestampNanoSecs) // Cache the DB entry in the in-memory map. if lockedBalanceEntry != nil { @@ -305,11 +305,11 @@ func (bav *UtxoView) _setLockupYieldCurvePoint(point *LockupYieldCurvePoint) { func (bav *UtxoView) _deleteLockupYieldCurvePoint(point *LockupYieldCurvePoint) { // Create a tombstone entry. - tombstoneLockupYieldCurvePoint := *point + tombstoneLockupYieldCurvePoint := point.Copy() tombstoneLockupYieldCurvePoint.isDeleted = true // Set the LockupYieldCurvePoint as deleted in the view. - bav._setLockupYieldCurvePoint(&tombstoneLockupYieldCurvePoint) + bav._setLockupYieldCurvePoint(tombstoneLockupYieldCurvePoint) } // Get Helper Functions for LockupYieldCurvePoint @@ -350,6 +350,11 @@ func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration // Check the view for yield curve points. if _, pointsInView := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID]; pointsInView { for _, lockupYieldCurvePoint := range bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID] { + // Ensure the point is not deleted. + if lockupYieldCurvePoint.isDeleted { + continue + } + // Check for nil pointer cases. if lockupYieldCurvePoint.LockupDurationNanoSecs < lockupDuration && leftLockupPoint == nil { leftLockupPoint = lockupYieldCurvePoint @@ -366,7 +371,7 @@ func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration // Check if the point is "more right" than the current right point. if lockupYieldCurvePoint.LockupDurationNanoSecs >= lockupDuration && - lockupYieldCurvePoint.LockupDurationNanoSecs < leftLockupPoint.LockupDurationNanoSecs { + lockupYieldCurvePoint.LockupDurationNanoSecs < rightLockupPoint.LockupDurationNanoSecs { rightLockupPoint = lockupYieldCurvePoint.Copy() } } @@ -547,10 +552,10 @@ func (txnData *UpdateCoinLockupParamsMetadata) New() DeSoTxnMetadata { // type CoinLockupTransferMetadata struct { - RecipientPublicKey *PublicKey - ProfilePublicKey *PublicKey - ExpirationTimestampUnixNanoSecs int64 - LockedCoinsToTransferBaseUnits *uint256.Int + RecipientPublicKey *PublicKey + ProfilePublicKey *PublicKey + UnlockTimestampUnixNanoSecs int64 + LockedCoinsToTransferBaseUnits *uint256.Int } func (txnData *CoinLockupTransferMetadata) GetTxnType() TxnType { @@ -561,7 +566,7 @@ func (txnData *CoinLockupTransferMetadata) ToBytes(preSignature bool) ([]byte, e var data []byte data = append(data, EncodeByteArray(txnData.RecipientPublicKey.ToBytes())...) data = append(data, EncodeByteArray(txnData.ProfilePublicKey.ToBytes())...) - data = append(data, UintToBuf(uint64(txnData.ExpirationTimestampUnixNanoSecs))...) + data = append(data, UintToBuf(uint64(txnData.UnlockTimestampUnixNanoSecs))...) data = append(data, VariableEncodeUint256(txnData.LockedCoinsToTransferBaseUnits)...) return data, nil } @@ -584,11 +589,11 @@ func (txnData *CoinLockupTransferMetadata) FromBytes(data []byte) error { txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) // UnlockTimestampNanoSecs - uint64ExpirationTimestampUnixNanoSecs, err := ReadUvarint(rr) + uint64UnlockTimestampUnixNanoSecs, err := ReadUvarint(rr) if err != nil { return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading UnlockTimestampNanoSecs") } - txnData.ExpirationTimestampUnixNanoSecs = int64(uint64ExpirationTimestampUnixNanoSecs) + txnData.UnlockTimestampUnixNanoSecs = int64(uint64UnlockTimestampUnixNanoSecs) // LockedDAOCoinToTransferBaseUnits txnData.LockedCoinsToTransferBaseUnits, err = VariableDecodeUint256(rr) @@ -845,7 +850,7 @@ func (bav *UtxoView) _connectCoinLockup( // with the sole intent of saturating the CoinsInCirculationNanos field preventing others from locking up. // For consolidation, we fetch equivalent LockedBalanceEntries. - lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( hodlerPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { lockedBalanceEntry = &LockedBalanceEntry{ @@ -890,7 +895,7 @@ func CalculateLockupYield( apyYieldBasisPoints *uint256.Int, durationNanoSecs *uint256.Int, ) (*uint256.Int, error) { - // Note: We could compute either simple of compounding interest. While compounding interest is ideal from an + // Note: We could compute either simple or compounding interest. While compounding interest is ideal from an // application perspective, it becomes incredibly difficult to implement from a numerical perspective. // This is because compound interest requires fractional exponents rather for computing the yield. // Determining overflow and preventing excessive money-printers becomes tricky in the compound interest case. @@ -963,7 +968,7 @@ func (bav *UtxoView) _disconnectCoinLockup( } // Sanity check the data within the CoinLockup. Reverting a lockup should not result in more coins. - lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( operationData.PrevLockedBalanceEntry.HODLerPKID, operationData.PrevLockedBalanceEntry.ProfilePKID, operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs) if lockedBalanceEntry.BalanceBaseUnits.Lt(&operationData.PrevLockedBalanceEntry.BalanceBaseUnits) { @@ -1331,8 +1336,8 @@ func (bav *UtxoView) _connectCoinLockupTransfer( } // Fetch the sender's balance entries. - senderLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( - senderPKID, profilePKID, txMeta.ExpirationTimestampUnixNanoSecs) + senderLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + senderPKID, profilePKID, txMeta.UnlockTimestampUnixNanoSecs) prevSenderLockedBalanceEntry := senderLockedBalanceEntry.Copy() // Check that the sender's balance entry has sufficient balance. @@ -1346,8 +1351,8 @@ func (bav *UtxoView) _connectCoinLockupTransfer( &senderLockedBalanceEntry.BalanceBaseUnits, txMeta.LockedCoinsToTransferBaseUnits) // Fetch the recipient's balance entry. - receiverLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( - receiverPKID, profilePKID, txMeta.ExpirationTimestampUnixNanoSecs) + receiverLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + receiverPKID, profilePKID, txMeta.UnlockTimestampUnixNanoSecs) prevReceiverLockedBalanceEntry := receiverLockedBalanceEntry // Add to the recipient's balance entry, checking for overflow. @@ -1407,11 +1412,11 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( } // Fetch the LockedBalanceEntries in the view. - senderLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + senderLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( operationData.PrevSenderLockedBalanceEntry.HODLerPKID, operationData.PrevSenderLockedBalanceEntry.ProfilePKID, operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs) - receiverLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + receiverLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( operationData.PrevReceiverLockedBalanceEntry.HODLerPKID, operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs) @@ -1639,7 +1644,7 @@ func (bav *UtxoView) _disconnectCoinUnlock( // Sanity check the data within the CoinUnlock. // Reverting an unlock of LockedBalanceEntry should not result in less coins. for _, prevLockedBalanceEntry := range operationData.PrevLockedBalanceEntries { - lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs( + lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( prevLockedBalanceEntry.HODLerPKID, prevLockedBalanceEntry.ProfilePKID, prevLockedBalanceEntry.UnlockTimestampNanoSecs) diff --git a/lib/db_utils.go b/lib/db_utils.go index 646bdb17c..9039bb3ba 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -559,12 +559,12 @@ type DBPrefixes struct { // Note, we parse the ValidatorPKID and StakerPKID from the key. PrefixSnapshotStakeToRewardByValidatorAndStaker []byte `prefix_id:"[90]" is_state:"true"` - // PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs: + // PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs: // Retrieves LockedBalanceEntries that may or may not be claimable for unlock. // LockedBalanceEntries can be retrieved by HodlerPKID and CreatorPKID are have their // corresponding unlock timestamp appended to sort by timestamp. // Prefix, , , -> - PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs []byte `prefix_id:"[91]" is_state:"true"` + PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs []byte `prefix_id:"[91]" is_state:"true"` // PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs: // Retrieves a LockupYieldCurvePoint. @@ -814,7 +814,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotStakeToRewardByValidatorAndStaker) { // prefix_id:"[90]" return true, &StakeEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs) { + } else if bytes.Equal(prefix, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs) { // prefix_id:"[91]" return true, &LockedBalanceEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { @@ -10688,14 +10688,14 @@ func DBDeletePostAssociationWithTxn(txn *badger.Txn, snap *Snapshot, association // LockedBalanceEntry DB Key Operations func _dbKeyForLockedBalanceEntry(lockedBalanceEntry LockedBalanceEntry) []byte { - key := append([]byte{}, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs...) + key := append([]byte{}, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs...) key = append(key, lockedBalanceEntry.HODLerPKID[:]...) key = append(key, lockedBalanceEntry.ProfilePKID[:]...) return append(key, EncodeUint64(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))...) } func DBPrefixKeyForLockedBalanceEntryByHODLerPKIDandProfilePKID(lockedBalanceEntry *LockedBalanceEntry) []byte { - data := append([]byte{}, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDExpirationTimestampNanoSecs...) + data := append([]byte{}, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs...) data = append(data, lockedBalanceEntry.HODLerPKID.ToBytes()...) data = append(data, lockedBalanceEntry.ProfilePKID.ToBytes()...) return data @@ -10742,32 +10742,32 @@ func DbDeleteLockedBalanceEntryWithTxn(txn *badger.Txn, snap *Snapshot, lockedBa // LockedBalanceEntry Get Operations (Badger Reads) -func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecs(handle *badger.DB, snap *Snapshot, - hodlerPKID *PKID, profilePKID *PKID, expirationTimestamp int64) *LockedBalanceEntry { +func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + handle *badger.DB, snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, unlockTimestamp int64) *LockedBalanceEntry { var ret *LockedBalanceEntry handle.View(func(txn *badger.Txn) error { - ret = DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecsWithTxn( - txn, snap, hodlerPKID, profilePKID, expirationTimestamp) + ret = DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithTxn( + txn, snap, hodlerPKID, profilePKID, unlockTimestamp) return nil }) return ret } -func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDExpirationTimestampNanoSecsWithTxn(txn *badger.Txn, snap *Snapshot, - hodlerPKID *PKID, profilePKID *PKID, expirationTimestamp int64) *LockedBalanceEntry { +func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithTxn( + txn *badger.Txn, snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, unlockTimestamp int64) *LockedBalanceEntry { key := _dbKeyForLockedBalanceEntry(LockedBalanceEntry{ HODLerPKID: hodlerPKID, ProfilePKID: profilePKID, - UnlockTimestampNanoSecs: expirationTimestamp, + UnlockTimestampNanoSecs: unlockTimestamp, }) lockedBalanceEntryBytes, err := DBGetWithTxn(txn, snap, key) if err != nil { return &LockedBalanceEntry{ HODLerPKID: hodlerPKID.NewPKID(), ProfilePKID: profilePKID.NewPKID(), - UnlockTimestampNanoSecs: expirationTimestamp, + UnlockTimestampNanoSecs: unlockTimestamp, BalanceBaseUnits: *uint256.NewInt(), } } From b527904c1b8f3644c5099dbef70794371c425cd7 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 29 Sep 2023 16:11:13 -0700 Subject: [PATCH 227/762] Have unlocks unlock all outstanding balance rather than a user specificed amount. --- lib/block_view_lockups.go | 71 ++++++++++++--------------------------- lib/errors.go | 4 +-- 2 files changed, 23 insertions(+), 52 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 37a157618..fdb5bf59f 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -613,8 +613,7 @@ func (txnData *CoinLockupTransferMetadata) New() DeSoTxnMetadata { // type CoinUnlockMetadata struct { - ProfilePublicKey *PublicKey - CoinsToUnlockBaseUnits *uint256.Int + ProfilePublicKey *PublicKey } func (txnData *CoinUnlockMetadata) GetTxnType() TxnType { @@ -624,7 +623,6 @@ func (txnData *CoinUnlockMetadata) GetTxnType() TxnType { func (txnData *CoinUnlockMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte data = append(data, EncodeByteArray(txnData.ProfilePublicKey.ToBytes())...) - data = append(data, VariableEncodeUint256(txnData.CoinsToUnlockBaseUnits)...) return data, nil } @@ -638,12 +636,6 @@ func (txnData *CoinUnlockMetadata) FromBytes(data []byte) error { } txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) - // CoinToUnlockBaseUnits - txnData.CoinsToUnlockBaseUnits, err = VariableDecodeUint256(rr) - if err != nil { - return errors.Wrapf(err, "CoinUnlockMetadata.FromBytes: Problem reading DAOCoinToUnlockBaseUnits") - } - return nil } @@ -1492,18 +1484,6 @@ func (bav *UtxoView) _connectCoinUnlock( } } - // Validate the unlock amount as non-zero. This is meant to prevent wasteful "no-op" transactions. - if txMeta.CoinsToUnlockBaseUnits.IsZero() { - return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockOfAmountZero, - "_connectCoinUnlock") - } - - // Ensure the DeSo unlock amount is less than 2**64 (maximum DeSo balance). - if txMeta.ProfilePublicKey.IsZeroPublicKey() && !txMeta.CoinsToUnlockBaseUnits.IsUint64() { - return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockOfAmountZero, - "_connectCoinUnlock") - } - // Convert the TransactorPublicKey to HODLerPKID transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { @@ -1537,45 +1517,36 @@ func (bav *UtxoView) _connectCoinUnlock( } // Unlock coins until the amount specified by the transaction is deducted. - var newLockedBalanceEntries []*LockedBalanceEntry var prevLockedBalanceEntries []*LockedBalanceEntry - remainingUnlockBalance := txMeta.CoinsToUnlockBaseUnits.Clone() + var unlockedBalance *uint256.Int for _, unlockableLockedBalanceEntry := range unlockableLockedBalanceEntries { - newLockedBalanceEntry := unlockableLockedBalanceEntry.Copy() - if newLockedBalanceEntry.BalanceBaseUnits.Gt(remainingUnlockBalance) || - newLockedBalanceEntry.BalanceBaseUnits.Eq(remainingUnlockBalance) { - remainingUnlockBalance = uint256.NewInt() - newLockedBalanceEntry.BalanceBaseUnits = - *uint256.NewInt().Sub(&newLockedBalanceEntry.BalanceBaseUnits, remainingUnlockBalance) - } else { - remainingUnlockBalance = - uint256.NewInt().Sub(remainingUnlockBalance, &newLockedBalanceEntry.BalanceBaseUnits) - newLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt() + unlockedBalance, err = + SafeUint256().Add(unlockedBalance, &unlockableLockedBalanceEntry.BalanceBaseUnits) + if err != nil { + return 0, 0, nil, + errors.Wrapf(RuleErrorCoinUnlockUnlockableCoinsOverflow, "_connectCoinUnlock") } - // Append the new LockedBalanceEntry and prev in the event we rollback the transaction. - newLockedBalanceEntries = append(newLockedBalanceEntries, newLockedBalanceEntry) + // Append the LockedBalanceEntry in the event we rollback the transaction. prevLockedBalanceEntries = append(prevLockedBalanceEntries, unlockableLockedBalanceEntry) - // Break if we've satisfied the unlock amount. - if remainingUnlockBalance.IsZero() { - break - } - } - if !remainingUnlockBalance.IsZero() { - return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockInsufficientUnlockableCoins, - "_connectCoinUnlock") - } - - // Update the LockedBalanceEntries. - for _, lockedBalanceEntry := range newLockedBalanceEntries { - bav._setLockedBalanceEntry(lockedBalanceEntry) + // Update the LockedBalanceEntry and delete the record. + unlockableLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt() + bav._deleteLockedBalanceEntry(unlockableLockedBalanceEntry) } // Credit the transactor with either DAO coins or DeSo for this unlock. var prevTransactorBalanceEntry *BalanceEntry if profilePKID.IsZeroPKID() { - utxoOp, err := bav._addBalance(txMeta.CoinsToUnlockBaseUnits.Uint64(), txn.PublicKey) + // Ensure the uint256 can be properly represented as a uint64. + if !unlockedBalance.IsUint64() { + return 0, 0, nil, + errors.Wrapf(RuleErrorCoinUnlockUnlockableDeSoOverflow, "_connectCoinUnlock") + } + + // Add the unlockedBalance to the transactors DeSo balance. + // NOTE: _addBalance checks for balance overflow. + utxoOp, err := bav._addBalance(unlockedBalance.Uint64(), txn.PublicKey) if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectCoinUnlock: error"+ "adding CoinToUnlockBaseUnits to the transactor balance: ") @@ -1586,7 +1557,7 @@ func (bav *UtxoView) _connectCoinUnlock( // Credit the transactor with the unlock amount. newTransactorBalanceEntry := prevTransactorBalanceEntry.Copy() - newTransactorBalanceNanos, err := SafeUint256().Add(&newTransactorBalanceEntry.BalanceNanos, txMeta.CoinsToUnlockBaseUnits) + newTransactorBalanceNanos, err := SafeUint256().Add(&newTransactorBalanceEntry.BalanceNanos, unlockedBalance) if err != nil { return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockCausesBalanceOverflow, "_connectCoinUnlock") diff --git a/lib/errors.go b/lib/errors.go index 0a9794b7b..5d84ee471 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -508,11 +508,11 @@ const ( RuleErrorCoinLockupTransferInsufficientBalance RuleError = "RuleErrorCoinLockupTransferInsufficientBalance" RuleErrorCoinLockupTransferBalanceOverflowAtReceiver RuleError = "RuleErrorCoinLockupTransferBalanceOverflowAtReceiver" RuleErrorCoinUnlockOnNonExistentProfile RuleError = "RuleErrorCoinUnlockOnNonExistentProfile" - RuleErrorCoinUnlockOfAmountZero RuleError = "RuleErrorCoinUnlockOfAmountZero" RuleErrorCoinUnlockInvalidHODLerPKID RuleError = "RuleErrorCoinUnlockInvalidHODLerPKID" RuleErrorCoinUnlockInvalidProfilePKID RuleError = "RuleErrorCoinUnlockInvalidProfilePKID" RuleErrorCoinUnlockNoUnlockableCoinsFound RuleError = "RuleErrorCoinUnlockNoUnlockableCoinsFound" - RuleErrorCoinUnlockInsufficientUnlockableCoins RuleError = "RuleErrorCoinUnlockInsufficientUnlockableCoins" + RuleErrorCoinUnlockUnlockableCoinsOverflow RuleError = "RuleErrorCoinUnlockUnlockableCoinsOverflow" + RuleErrorCoinUnlockUnlockableDeSoOverflow RuleError = "RuleErrorCoinUnlockUnlockableCoinsOverflow" RuleErrorCoinUnlockCausesBalanceOverflow RuleError = "RuleErrorCoinUnlockCausesBalanceOverflow" RuleErrorUpdateCoinLockupParamsOnInvalidPKID RuleError = "RuleErrorUpdateCoinLockupParamsOnInvalidPKID" RuleErrorUpdateCoinLockupParamsInvalidRestrictions RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" From 57c52c7416e11deaa53c90133434b9440a041239 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 2 Oct 2023 11:52:10 -0700 Subject: [PATCH 228/762] Cache DB points in the view before computing GetLocalYieldCurvePoints. --- lib/block_view_lockups.go | 63 ++++++++++++++++++++------------------- lib/db_utils.go | 59 +++++++++++++++++++++++++++++++++++- 2 files changed, 90 insertions(+), 32 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index fdb5bf59f..32c723567 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -343,10 +343,35 @@ func (bav *UtxoView) GetYieldCurvePointByProfilePKIDAndDurationNanoSecs(profileP } func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration int64) ( - _leftLockupPoint *LockupYieldCurvePoint, _rightLockupPoint *LockupYieldCurvePoint) { + _leftLockupPoint *LockupYieldCurvePoint, _rightLockupPoint *LockupYieldCurvePoint, _err error) { var leftLockupPoint *LockupYieldCurvePoint var rightLockupPoint *LockupYieldCurvePoint + // Fetch all yield curve points in the db. + dbYieldCurvePoints, err := DBGetAllYieldCurvePointsByProfilePKID( + bav.GetDbAdapter().badgerDb, bav.Snapshot, profilePKID) + if err != nil { + return nil, nil, errors.Wrap(err, "GetLocalYieldCurvePoints") + } + + // Cache the db points in the view. + // While there's more efficient ways to do this with specialized badger seek operations, this is sufficient for now. + if len(dbYieldCurvePoints) > 0 { + // Check if there's a yield curve in the view for the associated profile. + if _, mapInView := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID]; !mapInView { + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID] = + make(map[LockupYieldCurvePointKey]*LockupYieldCurvePoint) + } + + // Check if any of the points needs to be cached in the view. + for _, yieldCurvePoint := range dbYieldCurvePoints { + if _, pointInView := + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID][yieldCurvePoint.ToMapKey()]; !pointInView { + bav._setLockupYieldCurvePoint(yieldCurvePoint) + } + } + } + // Check the view for yield curve points. if _, pointsInView := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID]; pointsInView { for _, lockupYieldCurvePoint := range bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID] { @@ -377,35 +402,7 @@ func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration } } - // Now we quickly fetch left and right local yield curve points from the DB using careful seek operations. - leftDBLockupPoint, rightDBLockupPoint := DBGetLocalYieldCurvePoints( - bav.GetDbAdapter().badgerDb, bav.Snapshot, profilePKID, lockupDuration) - - // Check for nil pointer cases. - if leftDBLockupPoint != nil && - leftDBLockupPoint.LockupDurationNanoSecs < lockupDuration { - leftLockupPoint = leftDBLockupPoint - } - if rightDBLockupPoint != nil && - rightDBLockupPoint.LockupDurationNanoSecs >= lockupDuration { - rightLockupPoint = rightDBLockupPoint - } - - // Check for an updated left and right yield curve point from the DB. - if leftDBLockupPoint != nil && - leftDBLockupPoint.ProfilePKID.Eq(profilePKID) && - leftDBLockupPoint.LockupDurationNanoSecs < lockupDuration && - leftDBLockupPoint.LockupDurationNanoSecs > leftLockupPoint.LockupDurationNanoSecs { - leftLockupPoint = leftDBLockupPoint - } - if rightDBLockupPoint != nil && - rightDBLockupPoint.ProfilePKID.Eq(profilePKID) && - rightDBLockupPoint.LockupDurationNanoSecs >= lockupDuration && - rightDBLockupPoint.LockupDurationNanoSecs < rightLockupPoint.LockupDurationNanoSecs { - rightLockupPoint = rightDBLockupPoint - } - - return leftLockupPoint, rightLockupPoint + return leftLockupPoint, rightLockupPoint, nil } // @@ -789,7 +786,11 @@ func (bav *UtxoView) _connectCoinLockup( // the profile's yield curve or the raw DeSo yield curve. Because there's some choice in how // to determine the yield when the lockup duration falls between two profile specified yield curve // points, we return here the two local points and choose/interpolate between them below. - leftYieldCurvePoint, rightYieldCurvePoint := bav.GetLocalYieldCurvePoints(profilePKID, lockupDurationNanoSeconds) + leftYieldCurvePoint, rightYieldCurvePoint, err := bav.GetLocalYieldCurvePoints(profilePKID, lockupDurationNanoSeconds) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinLockup failed to fetch yield curve points") + } // Here we interpolate (choose) the yield between the two returned local yield curve points. // diff --git a/lib/db_utils.go b/lib/db_utils.go index 9039bb3ba..c4c36a7eb 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -10828,7 +10828,7 @@ func DBGetUnlockableLockedBalanceEntriesWithTxn( ProfilePKID: profilePKID, }) - // Create an iterator. We set the iterator to reverse in o + // Create an iterator. We set the iterator to reverse as per the comment at the top of this function. opts := badger.DefaultIteratorOptions opts.Reverse = true iterator := txn.NewIterator(opts) @@ -10854,6 +10854,7 @@ func DBGetUnlockableLockedBalanceEntriesWithTxn( "error decoding LockedBalanceEntry: ") } + // This check is redundant. It's included to be extra safe only unlockable locked balance entries are included. if lockedBalanceEntry.UnlockTimestampNanoSecs < currentTimestampUnixNanoSecs { lockedBalanceEntries = append(lockedBalanceEntries) } @@ -10889,6 +10890,13 @@ func _dbKeyForLockupYieldCurvePoint(lockupYieldCurvePoint LockupYieldCurvePoint) return key } +func DBPrefixKeyForLockupYieldCurvePointsByProfilePKID(profilePKID *PKID) []byte { + // Make a copy to avoid multiple calls to this function re-using the same slice. + prefixCopy := append([]byte{}, Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs...) + key := append(prefixCopy, profilePKID[:]...) + return key +} + // LockupYieldCurvePoint Put/Delete Operations (Badger Writes) func DbPutLockupYieldCurvePointMappingsWithTxn(txn *badger.Txn, snap *Snapshot, blockHeight uint64, @@ -10931,6 +10939,55 @@ func DbDeleteLockupYieldCurvePointWithTxn(txn *badger.Txn, snap *Snapshot, // LockupYieldCurvePoint Get Operations (Badger Reads) +func DBGetAllYieldCurvePointsByProfilePKID(handle *badger.DB, snap *Snapshot, + profilePKID *PKID) (_lockupYieldCurvePoints []*LockupYieldCurvePoint, _err error) { + var lockupYieldCurvePoints []*LockupYieldCurvePoint + err := handle.View(func(txn *badger.Txn) error { + var err error + lockupYieldCurvePoints, err = DBGetAllYieldCurvePointsByProfilePKIDWithTxn( + txn, snap, profilePKID) + return err + }) + return lockupYieldCurvePoints, err +} + +func DBGetAllYieldCurvePointsByProfilePKIDWithTxn(txn *badger.Txn, snap *Snapshot, + profilePKID *PKID) (_lockupYieldCurvePoints []*LockupYieldCurvePoint, _err error) { + // Construct the key prefix. + startKey := DBPrefixKeyForLockupYieldCurvePointsByProfilePKID(profilePKID) + + // Create an iterator. + opts := badger.DefaultIteratorOptions + iterator := txn.NewIterator(opts) + defer iterator.Close() + + // Store matching LockupYieldCurvePoints to return. + var lockupYieldCurvePoints []*LockupYieldCurvePoint + + // Loop. + for iterator.Seek(startKey); iterator.ValidForPrefix(startKey); iterator.Next() { + // Retrieve the LockupYieldCurvePointBytes. + lockupYieldCurvePointBytes, err := iterator.Item().ValueCopy(nil) + if err != nil { + return nil, errors.Wrapf(err, "DBGetAllYieldCurvePointsByProfilePKIDWithTxn: "+ + "error retrieveing LockupYieldCurvePoint: ") + } + + // Convert LockedBalanceEntryBytes to LockedBalanceEntry. + rr := bytes.NewReader(lockupYieldCurvePointBytes) + lockupYieldCurvePoint, err := DecodeDeSoEncoder(&LockupYieldCurvePoint{}, rr) + if err != nil { + return nil, errors.Wrapf(err, "DBGetAllYieldCurvePointsByProfilePKIDWithTxn: "+ + "error decoding LockupYieldCurvePoint: ") + } + + // Append to the array to return. + lockupYieldCurvePoints = append(lockupYieldCurvePoints, lockupYieldCurvePoint) + } + + return lockupYieldCurvePoints, nil +} + func DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecs(handle *badger.DB, snap *Snapshot, profilePKID *PKID, lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint) { var lockupYieldCurvePoint *LockupYieldCurvePoint From 59dd8d7bdfd11428f820944ed8acbe793149b003 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 2 Oct 2023 13:41:52 -0700 Subject: [PATCH 229/762] Bubble errors out of GetLockedBalanceEntry... to prevent eating DB errors. --- lib/block_view_lockups.go | 104 +++++++++++++++++++++++++++++++++----- lib/db_utils.go | 31 +++++++----- 2 files changed, 108 insertions(+), 27 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 32c723567..a7ef4c45c 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -130,7 +130,7 @@ func (bav *UtxoView) _deleteLockedBalanceEntry(lockedBalanceEntry *LockedBalance // Get Helper Functions for LockedBalanceEntry func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - hodlerPKID *PKID, profilePKID *PKID, unlockTimestampNanoSecs int64) (_lockedBalanceEntry *LockedBalanceEntry) { + hodlerPKID *PKID, profilePKID *PKID, unlockTimestampNanoSecs int64) (_lockedBalanceEntry *LockedBalanceEntry, _err error) { // Create a key associated with the LockedBalanceEntry. lockedBalanceEntryKey := (&LockedBalanceEntry{ HODLerPKID: hodlerPKID, @@ -141,19 +141,23 @@ func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestam // Check if the key exists in the view. if viewEntry, viewEntryExists := bav.LockedBalanceEntryKeyToLockedBalanceEntry[lockedBalanceEntryKey]; viewEntryExists { - return viewEntry + return viewEntry, nil } // No mapping exists in the view, check for an entry in the DB. - lockedBalanceEntry := DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + lockedBalanceEntry, err := DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( bav.Handle, bav.Snapshot, hodlerPKID, profilePKID, unlockTimestampNanoSecs) + if err != nil { + return nil, + errors.Wrap(err, "GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs") + } // Cache the DB entry in the in-memory map. if lockedBalanceEntry != nil { bav._setLockedBalanceEntry(lockedBalanceEntry) } - return lockedBalanceEntry + return lockedBalanceEntry, nil } func (bav *UtxoView) GetUnlockableLockedBalanceEntries( @@ -843,8 +847,12 @@ func (bav *UtxoView) _connectCoinLockup( // with the sole intent of saturating the CoinsInCirculationNanos field preventing others from locking up. // For consolidation, we fetch equivalent LockedBalanceEntries. - lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + lockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( hodlerPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinLockup failed to fetch lockedBalanceEntry") + } if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { lockedBalanceEntry = &LockedBalanceEntry{ HODLerPKID: hodlerPKID, @@ -961,9 +969,20 @@ func (bav *UtxoView) _disconnectCoinLockup( } // Sanity check the data within the CoinLockup. Reverting a lockup should not result in more coins. - lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + lockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( operationData.PrevLockedBalanceEntry.HODLerPKID, operationData.PrevLockedBalanceEntry.ProfilePKID, operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs) + if err != nil { + return errors.Wrap(err, "_disconnectCoinLockup failed to fetch current lockedBalanceEntry") + } + if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { + lockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: operationData.PrevLockedBalanceEntry.HODLerPKID, + ProfilePKID: operationData.PrevLockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), + } + } if lockedBalanceEntry.BalanceBaseUnits.Lt(&operationData.PrevLockedBalanceEntry.BalanceBaseUnits) { return fmt.Errorf("_disconnectCoinLockup: Reversion of coin lockup would result in " + "more coins in the lockup") @@ -1002,7 +1021,7 @@ func (bav *UtxoView) _disconnectCoinLockup( // By here we only need to disconnect the basic transfer associated with the transaction. basicTransferOps := utxoOpsForTxn[:operationIndex] - err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) + err = bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) if err != nil { return errors.Wrapf(err, "_disconnectCoinLockup") } @@ -1329,8 +1348,20 @@ func (bav *UtxoView) _connectCoinLockupTransfer( } // Fetch the sender's balance entries. - senderLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + senderLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( senderPKID, profilePKID, txMeta.UnlockTimestampUnixNanoSecs) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "connectCoinLockupTransfer failed to fetch senderLockedBalanceEntry:w") + } + if senderLockedBalanceEntry == nil || senderLockedBalanceEntry.isDeleted { + senderLockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: senderPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampUnixNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), + } + } prevSenderLockedBalanceEntry := senderLockedBalanceEntry.Copy() // Check that the sender's balance entry has sufficient balance. @@ -1344,9 +1375,21 @@ func (bav *UtxoView) _connectCoinLockupTransfer( &senderLockedBalanceEntry.BalanceBaseUnits, txMeta.LockedCoinsToTransferBaseUnits) // Fetch the recipient's balance entry. - receiverLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + receiverLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( receiverPKID, profilePKID, txMeta.UnlockTimestampUnixNanoSecs) - prevReceiverLockedBalanceEntry := receiverLockedBalanceEntry + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "connectCoinLockupTransfer failed to fetch receiverLockedBalanceEntry") + } + if receiverLockedBalanceEntry == nil || receiverLockedBalanceEntry.isDeleted { + receiverLockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: receiverPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampUnixNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), + } + } + prevReceiverLockedBalanceEntry := receiverLockedBalanceEntry.Copy() // Add to the recipient's balance entry, checking for overflow. newRecipientBalanceBaseUnits, err := SafeUint256().Add(&receiverLockedBalanceEntry.BalanceBaseUnits, @@ -1405,14 +1448,36 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( } // Fetch the LockedBalanceEntries in the view. - senderLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + senderLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( operationData.PrevSenderLockedBalanceEntry.HODLerPKID, operationData.PrevSenderLockedBalanceEntry.ProfilePKID, operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs) - receiverLockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + if err != nil { + return errors.Wrap(err, "_disconnectCoinLockupTransfer failed to fetch senderLockedBalanceEntry") + } + if senderLockedBalanceEntry == nil || senderLockedBalanceEntry.isDeleted { + senderLockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: operationData.PrevSenderLockedBalanceEntry.HODLerPKID, + ProfilePKID: operationData.PrevSenderLockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), + } + } + receiverLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( operationData.PrevReceiverLockedBalanceEntry.HODLerPKID, operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs) + if err != nil { + return errors.Wrap(err, "_disconnectCoinLockupTransfer failed to fetch receiverLockedBalanceEntry") + } + if receiverLockedBalanceEntry == nil || receiverLockedBalanceEntry.isDeleted { + receiverLockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: operationData.PrevReceiverLockedBalanceEntry.HODLerPKID, + ProfilePKID: operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), + } + } // Ensure reverting the transaction won't cause the recipients balances to increase // or cause the senders balances to decrease. @@ -1431,7 +1496,7 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( // By here we only need to disconnect the basic transfer associated with the transaction. basicTransferOps := utxoOpsForTxn[:operationIndex] - err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) + err = bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) if err != nil { return errors.Wrapf(err, "_disconnectCoinLockupTransfer") } @@ -1616,10 +1681,21 @@ func (bav *UtxoView) _disconnectCoinUnlock( // Sanity check the data within the CoinUnlock. // Reverting an unlock of LockedBalanceEntry should not result in less coins. for _, prevLockedBalanceEntry := range operationData.PrevLockedBalanceEntries { - lockedBalanceEntry := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + lockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( prevLockedBalanceEntry.HODLerPKID, prevLockedBalanceEntry.ProfilePKID, prevLockedBalanceEntry.UnlockTimestampNanoSecs) + if err != nil { + return errors.Wrap(err, "_disconnectCoinUnlock failed to fetch lockedBalanceEntry") + } + if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { + lockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: operationData.PrevLockedBalanceEntry.HODLerPKID, + ProfilePKID: operationData.PrevLockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), + } + } if prevLockedBalanceEntry.BalanceBaseUnits.Lt(&lockedBalanceEntry.BalanceBaseUnits) { return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + "would cause locked balance entry balance to decrease") diff --git a/lib/db_utils.go b/lib/db_utils.go index c4c36a7eb..c8fadda14 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -10743,38 +10743,43 @@ func DbDeleteLockedBalanceEntryWithTxn(txn *badger.Txn, snap *Snapshot, lockedBa // LockedBalanceEntry Get Operations (Badger Reads) func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - handle *badger.DB, snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, unlockTimestamp int64) *LockedBalanceEntry { + handle *badger.DB, snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, + unlockTimestamp int64) (_lockedBalanceEntry *LockedBalanceEntry, _err error) { var ret *LockedBalanceEntry - handle.View(func(txn *badger.Txn) error { - ret = DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithTxn( + err := handle.View(func(txn *badger.Txn) error { + var err error + ret, err = DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithTxn( txn, snap, hodlerPKID, profilePKID, unlockTimestamp) - return nil + return err }) - return ret + return ret, err } func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithTxn( - txn *badger.Txn, snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, unlockTimestamp int64) *LockedBalanceEntry { + txn *badger.Txn, snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, + unlockTimestamp int64) (_lockedBalanceEntry *LockedBalanceEntry, _err error) { key := _dbKeyForLockedBalanceEntry(LockedBalanceEntry{ HODLerPKID: hodlerPKID, ProfilePKID: profilePKID, UnlockTimestampNanoSecs: unlockTimestamp, }) + + // Get the key from the db. lockedBalanceEntryBytes, err := DBGetWithTxn(txn, snap, key) + if err == badger.ErrKeyNotFound { + return nil, nil + } if err != nil { - return &LockedBalanceEntry{ - HODLerPKID: hodlerPKID.NewPKID(), - ProfilePKID: profilePKID.NewPKID(), - UnlockTimestampNanoSecs: unlockTimestamp, - BalanceBaseUnits: *uint256.NewInt(), - } + return nil, + errors.Wrap(err, "DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithTxn") } + lockedBalanceEntryObj := &LockedBalanceEntry{} rr := bytes.NewReader(lockedBalanceEntryBytes) DecodeFromBytes(lockedBalanceEntryObj, rr) - return lockedBalanceEntryObj + return lockedBalanceEntryObj, nil } func DBGetUnlockableLockedBalanceEntries( From 5b5eadba3ad824a15a78aca80ef27d24a3a234e1 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 2 Oct 2023 13:46:40 -0700 Subject: [PATCH 230/762] Remove Unix from timestamp variables and comments. --- lib/block_view_lockups.go | 20 ++++++++++---------- lib/db_utils.go | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index a7ef4c45c..de82b8767 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -80,11 +80,11 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawDecodeWithoutMetadata(blockHeig } // UnlockTimestampNanoSecs - uint64UnlockTimestampUnixNanoSecs, err := ReadUvarint(rr) + uint64UnlockTimestampNanoSecs, err := ReadUvarint(rr) if err != nil { return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading UnlockTimestampNanoSecs") } - lockedBalanceEntry.UnlockTimestampNanoSecs = int64(uint64UnlockTimestampUnixNanoSecs) + lockedBalanceEntry.UnlockTimestampNanoSecs = int64(uint64UnlockTimestampNanoSecs) // BalanceBaseUnits balanceBaseUnits, err := VariableDecodeUint256(rr) @@ -555,7 +555,7 @@ func (txnData *UpdateCoinLockupParamsMetadata) New() DeSoTxnMetadata { type CoinLockupTransferMetadata struct { RecipientPublicKey *PublicKey ProfilePublicKey *PublicKey - UnlockTimestampUnixNanoSecs int64 + UnlockTimestampNanoSecs int64 LockedCoinsToTransferBaseUnits *uint256.Int } @@ -567,7 +567,7 @@ func (txnData *CoinLockupTransferMetadata) ToBytes(preSignature bool) ([]byte, e var data []byte data = append(data, EncodeByteArray(txnData.RecipientPublicKey.ToBytes())...) data = append(data, EncodeByteArray(txnData.ProfilePublicKey.ToBytes())...) - data = append(data, UintToBuf(uint64(txnData.UnlockTimestampUnixNanoSecs))...) + data = append(data, UintToBuf(uint64(txnData.UnlockTimestampNanoSecs))...) data = append(data, VariableEncodeUint256(txnData.LockedCoinsToTransferBaseUnits)...) return data, nil } @@ -590,11 +590,11 @@ func (txnData *CoinLockupTransferMetadata) FromBytes(data []byte) error { txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) // UnlockTimestampNanoSecs - uint64UnlockTimestampUnixNanoSecs, err := ReadUvarint(rr) + uint64UnlockTimestampNanoSecs, err := ReadUvarint(rr) if err != nil { return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading UnlockTimestampNanoSecs") } - txnData.UnlockTimestampUnixNanoSecs = int64(uint64UnlockTimestampUnixNanoSecs) + txnData.UnlockTimestampNanoSecs = int64(uint64UnlockTimestampNanoSecs) // LockedDAOCoinToTransferBaseUnits txnData.LockedCoinsToTransferBaseUnits, err = VariableDecodeUint256(rr) @@ -1349,7 +1349,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( // Fetch the sender's balance entries. senderLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - senderPKID, profilePKID, txMeta.UnlockTimestampUnixNanoSecs) + senderPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) if err != nil { return 0, 0, nil, errors.Wrap(err, "connectCoinLockupTransfer failed to fetch senderLockedBalanceEntry:w") @@ -1358,7 +1358,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( senderLockedBalanceEntry = &LockedBalanceEntry{ HODLerPKID: senderPKID, ProfilePKID: profilePKID, - UnlockTimestampNanoSecs: txMeta.UnlockTimestampUnixNanoSecs, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, BalanceBaseUnits: *uint256.NewInt(), } } @@ -1376,7 +1376,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( // Fetch the recipient's balance entry. receiverLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - receiverPKID, profilePKID, txMeta.UnlockTimestampUnixNanoSecs) + receiverPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) if err != nil { return 0, 0, nil, errors.Wrap(err, "connectCoinLockupTransfer failed to fetch receiverLockedBalanceEntry") @@ -1385,7 +1385,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( receiverLockedBalanceEntry = &LockedBalanceEntry{ HODLerPKID: receiverPKID, ProfilePKID: profilePKID, - UnlockTimestampNanoSecs: txMeta.UnlockTimestampUnixNanoSecs, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, BalanceBaseUnits: *uint256.NewInt(), } } diff --git a/lib/db_utils.go b/lib/db_utils.go index c8fadda14..8ba979003 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -563,7 +563,7 @@ type DBPrefixes struct { // Retrieves LockedBalanceEntries that may or may not be claimable for unlock. // LockedBalanceEntries can be retrieved by HodlerPKID and CreatorPKID are have their // corresponding unlock timestamp appended to sort by timestamp. - // Prefix, , , -> + // Prefix, , , -> PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs []byte `prefix_id:"[91]" is_state:"true"` // PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs: From 4f13a0f25ab3ff2ff7db840e767067be47410262 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 2 Oct 2023 13:50:55 -0700 Subject: [PATCH 231/762] Add check for profile match in DBGetRight/LeftLockupYieldCurvePointWithTxn. --- lib/db_utils.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/db_utils.go b/lib/db_utils.go index 8ba979003..e2e28dbea 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -11077,6 +11077,10 @@ func DBGetLeftLockupYieldCurvePointWithTxn(txn *badger.Txn, snap *Snapshot, prof leftLockupYieldCurvePointObj := &LockupYieldCurvePoint{} rr := bytes.NewReader(leftLockupYieldCurvePointBytes) DecodeFromBytes(leftLockupYieldCurvePointObj, rr) + if !leftLockupYieldCurvePointObj.ProfilePKID.Eq(profilePKID) { + return nil + } + return leftLockupYieldCurvePointObj } @@ -11114,6 +11118,10 @@ func DBGetRightLockupYieldCurvePointWithTxn(txn *badger.Txn, snap *Snapshot, pro rightLockupYieldCurvePointObj := &LockupYieldCurvePoint{} rr := bytes.NewReader(rightLockupYieldCurvePointBytes) DecodeFromBytes(rightLockupYieldCurvePointObj, rr) + if !rightLockupYieldCurvePointObj.ProfilePKID.Eq(profilePKID) { + return nil + } + return rightLockupYieldCurvePointObj } From 3dc989aefb162853f75dac73f0bdeaa0598026f7 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 2 Oct 2023 14:28:38 -0700 Subject: [PATCH 232/762] Cleanup lockup transfer restrictions and add public key check. --- lib/block_view_lockups.go | 82 ++++++++++++++++++++++++++------------- lib/block_view_types.go | 4 ++ lib/errors.go | 3 ++ 3 files changed, 62 insertions(+), 27 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index de82b8767..6940fc904 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1281,7 +1281,8 @@ func (bav *UtxoView) _connectCoinLockupTransfer( // Validate the starting block height. if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { - return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectCoinLockupTransfer") + return 0, 0, nil, + errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectCoinLockupTransfer") } // Validate the txn TxnType. @@ -1307,17 +1308,38 @@ func (bav *UtxoView) _connectCoinLockupTransfer( } // If this is a DeSo lockup, ensure the amount is less than 2**64. - if txMeta.ProfilePublicKey.IsZeroPublicKey() { - maxUint64, _ := uint256.FromBig(big.NewInt(0).SetUint64(math.MaxUint64)) - if txMeta.LockedCoinsToTransferBaseUnits.Gt(maxUint64) { - return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupTransferOfDeSoCausesOverflow, - "_connectCoinLockupTransfer") + if txMeta.ProfilePublicKey.IsZeroPublicKey() && !txMeta.LockedCoinsToTransferBaseUnits.IsUint64() { + return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupTransferOfDeSoCausesOverflow, + "_connectCoinLockupTransfer") + } + + // Validate recipient and profile public keys as valid. + var profileEntry *ProfileEntry + if len(txMeta.RecipientPublicKey) != btcec.PubKeyBytesLenCompressed { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupTransferInvalidRecipientPubKey, "_connectCoinLockupTransfer") + } + if len(txMeta.ProfilePublicKey) != btcec.PubKeyBytesLenCompressed { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupTransferInvalidProfilePubKey, "_connectCoinLockupTransfer") + } + if !txMeta.ProfilePublicKey.IsZeroPublicKey() { + profileEntry = bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profileEntry == nil || profileEntry.isDeleted { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupOnNonExistentProfile, "_connectCoinLockupTransfer") } } - // Fetch PKIDs for the recipient and sender. - senderPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) - senderPKID := senderPKIDEntry.PKID + // Fetch PKIDs for the recipient, sender, and profile. + var senderPKID *PKID + if _, updaterIsParamUpdater := + GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(txn.PublicKey)]; !updaterIsParamUpdater { + senderPKID = ZeroPKID.NewPKID() + } else { + senderPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + senderPKID = senderPKIDEntry.PKID + } receiverPKIDEntry := bav.GetPKIDForPublicKey(txMeta.RecipientPublicKey.ToBytes()) receiverPKID := receiverPKIDEntry.PKID profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) @@ -1329,24 +1351,6 @@ func (bav *UtxoView) _connectCoinLockupTransfer( "_connectCoinLockupTransfer") } - // Verify the transfer restrictions attached to the transfer. - profileEntry := bav.GetProfileEntryForPKID(profilePKID) - if profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus == TransferRestrictionStatusProfileOwnerOnly && - !profilePKID.Eq(senderPKID) { - return 0, 0, nil, errors.Wrapf(RuleErrorCoinLockupTransferRestrictedToProfileOwner, - "_connectCoinLockupTransfer") - } - if profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus == TransferRestrictionStatusDAOMembersOnly { - // TODO: Determine if this is desired behavior. We assume the sender must be part of the DAO to have - // transferable coins. It seems weird to tie locked DAO coin transfers to unlocked DAO coin balances. - // An alternative approach is not allow the "TransferRestrictionStatusDAOMembersOnly" restriction. - receiverBalanceEntry := bav.GetBalanceEntry(receiverPKID, profilePKID, true) - if receiverBalanceEntry.BalanceNanos.IsZero() { - return 0, 0, nil, - errors.Wrapf(RuleErrorCoinLockupTransferRestrictedToDAOMembers, "_connectCoinLockupTransfer") - } - } - // Fetch the sender's balance entries. senderLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( senderPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) @@ -1391,6 +1395,30 @@ func (bav *UtxoView) _connectCoinLockupTransfer( } prevReceiverLockedBalanceEntry := receiverLockedBalanceEntry.Copy() + // Fetch the transfer restrictions attached to the transfer. + var transferRestrictionStatus TransferRestrictionStatus + if profilePKID.IsZeroPKID() { + transferRestrictionStatus = bav.GlobalParamsEntry.LockedDESOTransferRestrictions + } else { + transferRestrictionStatus = profileEntry.DAOCoinEntry.TransferRestrictionStatus + } + + // Check if transfers are limited to profile owner only. + if transferRestrictionStatus == TransferRestrictionStatusProfileOwnerOnly && !profilePKID.Eq(senderPKID) { + return 0, 0, nil, + errors.Wrapf(RuleErrorCoinLockupTransferRestrictedToProfileOwner, "_connectCoinLockupTransfer") + } + + // Check if the transfers are limited to DAO members only. + // Here, a "DAO member" is anyone who holds either unlocked or locked DAO coins associated with the profile. + if transferRestrictionStatus == TransferRestrictionStatusDAOMembersOnly { + receiverBalanceEntry := bav.GetBalanceEntry(receiverPKID, profilePKID, true) + if receiverBalanceEntry.BalanceNanos.IsZero() && receiverLockedBalanceEntry.BalanceBaseUnits.IsZero() { + return 0, 0, nil, + errors.Wrapf(RuleErrorCoinLockupTransferRestrictedToDAOMembers, "_connectCoinLockupTransfer") + } + } + // Add to the recipient's balance entry, checking for overflow. newRecipientBalanceBaseUnits, err := SafeUint256().Add(&receiverLockedBalanceEntry.BalanceBaseUnits, txMeta.LockedCoinsToTransferBaseUnits) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index fe44860ee..b32413adf 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3898,6 +3898,10 @@ type GlobalParamsEntry struct { // allow a validator to be inactive for (neither voting nor proposing // blocks) before they are jailed. JailInactiveValidatorGracePeriodEpochs uint64 + + // LockedDESOTransferRestrictions is the transfer restrictions on Locked raw DESO. + // We place it here to prevent the creation of a ZeroPKID profile entry. + LockedDESOTransferRestrictions TransferRestrictionStatus } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { diff --git a/lib/errors.go b/lib/errors.go index 5d84ee471..77a2bfce9 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -507,6 +507,9 @@ const ( RuleErrorCoinLockupTransferSenderEqualsReceiver RuleError = "RuleErrorCoinLockupTransferSenderEqualsReceiver" RuleErrorCoinLockupTransferInsufficientBalance RuleError = "RuleErrorCoinLockupTransferInsufficientBalance" RuleErrorCoinLockupTransferBalanceOverflowAtReceiver RuleError = "RuleErrorCoinLockupTransferBalanceOverflowAtReceiver" + RuleErrorCoinLockupTransferInvalidRecipientPubKey RuleError = "RuleErrorCoinLockupTransferInvalidRecipientPubKey" + RuleErrorCoinLockupTransferInvalidProfilePubKey RuleError = "RuleErrorCoinLockupTransferInvalidProfilePubKey" + RuleErrorCoinLockupTransferOnNonExistentProfile RuleError = "RuleErrorCoinLockupTransferOnNonExistentProfile" RuleErrorCoinUnlockOnNonExistentProfile RuleError = "RuleErrorCoinUnlockOnNonExistentProfile" RuleErrorCoinUnlockInvalidHODLerPKID RuleError = "RuleErrorCoinUnlockInvalidHODLerPKID" RuleErrorCoinUnlockInvalidProfilePKID RuleError = "RuleErrorCoinUnlockInvalidProfilePKID" From 0235ed0cc1c35c40aafac53fe0f446a3faa653d9 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 2 Oct 2023 14:47:09 -0700 Subject: [PATCH 233/762] Move LockupTransferRestrictions to GlobalParamsEntry for raw DESO. --- lib/block_view_lockups.go | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 6940fc904..a57cec989 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1123,7 +1123,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( // Check if we're updating transfer restriction. var prevLockupTransferRestriction TransferRestrictionStatus - if txMeta.NewLockupTransferRestrictions { + if txMeta.NewLockupTransferRestrictions && !profilePKID.IsZeroPKID() { // Fetch the profile entry and LockupTransferRestriction status. profileEntry := bav.GetProfileEntryForPKID(profilePKID) if profileEntry == nil || profileEntry.isDeleted { @@ -1134,6 +1134,20 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( // Store a copy of the previous LockupTransferRestrictionStatus for easy transaction disconnect. prevLockupTransferRestriction = profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus + // Update the transfer restrictions. + profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus = txMeta.LockupTransferRestrictionStatus + bav._setProfileEntryMappings(profileEntry) + } + if txMeta.NewLockupTransferRestrictions && profilePKID.IsZeroPKID() { + // Store a copy of the previous TransferRestrictionStatus. + prevLockupTransferRestriction = bav.GlobalParamsEntry.LockedDESOTransferRestrictions + + // Update the transfer restrictions in global params. + bav.GlobalParamsEntry.LockedDESOTransferRestrictions = txMeta.LockupTransferRestrictionStatus + } + + // Check that the new transfer restriction is valid. + if txMeta.NewLockupTransferRestrictions { // Ensure we're not updating a permanent transfer restriction. if prevLockupTransferRestriction == TransferRestrictionStatusPermanentlyUnrestricted { return 0, 0, nil, errors.Wrapf( @@ -1148,10 +1162,6 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( return 0, 0, nil, errors.Wrapf(RuleErrorUpdateCoinLockupParamsInvalidRestrictions, "_connectUpdateCoinLockupParams") } - - // Update the transfer restrictions. - profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus = txMeta.LockupTransferRestrictionStatus - bav._setProfileEntryMappings(profileEntry) } // Add a UtxoOperation for easy reversion during disconnect. @@ -1234,7 +1244,7 @@ func (bav *UtxoView) _disconnectUpdateCoinLockupParams( } // Check if the transaction updated transfer restrictions. If it did, we reset the previous transfer restrictions. - if txMeta.NewLockupTransferRestrictions { + if txMeta.NewLockupTransferRestrictions && !profilePKID.IsZeroPKID() { // Fetch the profile entry and LockupTransferRestriction status. profileEntry := bav.GetProfileEntryForPKID(profilePKID) if profileEntry == nil || profileEntry.isDeleted { @@ -1246,6 +1256,9 @@ func (bav *UtxoView) _disconnectUpdateCoinLockupParams( profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus = operationData.PrevLockupTransferRestriction bav._setProfileEntryMappings(profileEntry) } + if txMeta.NewLockupTransferRestrictions && profilePKID.IsZeroPKID() { + bav.GlobalParamsEntry.LockedDESOTransferRestrictions = operationData.PrevLockupTransferRestriction + } // Decrement the operationIndex. We expect to find the basic transfer UtxoOps next. operationIndex-- From 2d0d7ac24acaf3ac873c1d488e183ff3db49c79f Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 2 Oct 2023 14:59:40 -0700 Subject: [PATCH 234/762] Remove bug where GetBalanceEntry is used in place of _getBalanceEntryForHODLerPKIDAndCreatorPKID. --- lib/block_view_lockups.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index a57cec989..ed6b60db8 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1425,7 +1425,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( // Check if the transfers are limited to DAO members only. // Here, a "DAO member" is anyone who holds either unlocked or locked DAO coins associated with the profile. if transferRestrictionStatus == TransferRestrictionStatusDAOMembersOnly { - receiverBalanceEntry := bav.GetBalanceEntry(receiverPKID, profilePKID, true) + receiverBalanceEntry := bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(receiverPKID, profilePKID, true) if receiverBalanceEntry.BalanceNanos.IsZero() && receiverLockedBalanceEntry.BalanceBaseUnits.IsZero() { return 0, 0, nil, errors.Wrapf(RuleErrorCoinLockupTransferRestrictedToDAOMembers, "_connectCoinLockupTransfer") @@ -1660,7 +1660,7 @@ func (bav *UtxoView) _connectCoinUnlock( } utxoOpsForTxn = append(utxoOpsForTxn, utxoOp) } else { - prevTransactorBalanceEntry = bav.GetBalanceEntry(hodlerPKID, profilePKID, true) + prevTransactorBalanceEntry = bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(hodlerPKID, profilePKID, true) // Credit the transactor with the unlock amount. newTransactorBalanceEntry := prevTransactorBalanceEntry.Copy() @@ -1748,7 +1748,7 @@ func (bav *UtxoView) _disconnectCoinUnlock( profilePKID := operationData.PrevLockedBalanceEntries[0].ProfilePKID hodlerPKID := operationData.PrevLockedBalanceEntries[0].HODLerPKID if !profilePKID.IsZeroPKID() { - balanceEntry := bav.GetBalanceEntry(hodlerPKID, profilePKID, true) + balanceEntry := bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(hodlerPKID, profilePKID, true) if operationData.PrevTransactorBalanceEntry == nil || operationData.PrevTransactorBalanceEntry.isDeleted { return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + "but found nil or deleted previous balance entry") From b8103d341177c856a029ef6cd9e7c903e37fd340 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 4 Oct 2023 15:28:50 -0700 Subject: [PATCH 235/762] Lockups derived key spending limit implementation. --- lib/block_view.go | 34 +++++ lib/block_view_derived_key.go | 16 +++ lib/block_view_lockups.go | 231 +++++++++++++++++++++++++++++++++- lib/errors.go | 20 +-- lib/network.go | 83 ++++++++++++ 5 files changed, 374 insertions(+), 10 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 14738dade..7812081eb 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -2433,6 +2433,40 @@ func (bav *UtxoView) _checkAndUpdateDerivedKeySpendingLimit( derivedKeyEntry, txnMeta); err != nil { return utxoOpsForTxn, err } + case TxnTypeCoinLockup: + txnMeta := txn.TxnMeta.(*CoinLockupMetadata) + if derivedKeyEntry, err = bav._checkLockupTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry, txnMeta.ProfilePublicKey, CoinLockupOperation); err != nil { + return utxoOpsForTxn, err + } + case TxnTypeUpdateCoinLockupParams: + txnMeta := txn.TxnMeta.(*UpdateCoinLockupParamsMetadata) + // Check if we're updating the transactor's yield curve. + if txnMeta.LockupYieldDurationNanoSecs > 0 { + if derivedKeyEntry, err = bav._checkLockupTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry, NewPublicKey(txn.PublicKey), UpdateCoinLockupYieldCurveOperation); err != nil { + return utxoOpsForTxn, err + } + } + // Check if we're updating the transactor's transfer restrictions. + if txnMeta.NewLockupTransferRestrictions { + if derivedKeyEntry, err = bav._checkLockupTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry, NewPublicKey(txn.PublicKey), UpdateCoinLockupTransferRestrictionsOperation); err != nil { + return utxoOpsForTxn, err + } + } + case TxnTypeCoinLockupTransfer: + txnMeta := txn.TxnMeta.(*CoinLockupTransferMetadata) + if derivedKeyEntry, err = bav._checkLockupTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry, txnMeta.ProfilePublicKey, CoinLockupTransferOperation); err != nil { + return utxoOpsForTxn, err + } + case TxnTypeCoinUnlock: + txnMeta := txn.TxnMeta.(*CoinUnlockMetadata) + if derivedKeyEntry, err = bav._checkLockupTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry, txnMeta.ProfilePublicKey, CoinLockupUnlockOperation); err != nil { + return utxoOpsForTxn, err + } case TxnTypeStake: txnMeta := txn.TxnMeta.(*StakeMetadata) if derivedKeyEntry, err = bav._checkStakeTxnSpendingLimitAndUpdateDerivedKey( diff --git a/lib/block_view_derived_key.go b/lib/block_view_derived_key.go index 0ed063d47..29bb43546 100644 --- a/lib/block_view_derived_key.go +++ b/lib/block_view_derived_key.go @@ -189,6 +189,7 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( AssociationLimitMap: make(map[AssociationLimitKey]uint64), AccessGroupMap: make(map[AccessGroupLimitKey]uint64), AccessGroupMemberMap: make(map[AccessGroupMemberLimitKey]uint64), + LockupLimitMap: make(map[LockupLimitKey]uint64), StakeLimitMap: make(map[StakeLimitKey]*uint256.Int), UnstakeLimitMap: make(map[StakeLimitKey]*uint256.Int), UnlockStakeLimitMap: make(map[StakeLimitKey]uint64), @@ -312,6 +313,21 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( // ====== Proof of Stake State Setup Fork ====== if blockHeight >= bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { + // LockupLimitMap + for lockupLimitKey, lockupLimit := range transactionSpendingLimit.LockupLimitMap { + // Check for key validity + if lockupLimitKey.ScopeType == LockupLimitScopeTypeAnyCoins && + !lockupLimitKey.ProfilePKID.IsZeroPKID() { + return 0, 0, nil, + errors.New("error creating Lockups spending limit: cannot " + + "specify a lockup profile PKID if ScopeType is Any") + } + if lockupLimit == 0 { + delete(transactionSpendingLimit.LockupLimitMap, lockupLimitKey) + } else { + transactionSpendingLimit.LockupLimitMap[lockupLimitKey] = lockupLimit + } + } // StakeLimitMap for stakeLimitKey, stakingLimit := range transactionSpendingLimit.StakeLimitMap { if err = bav.IsValidStakeLimitKey(txn.PublicKey, stakeLimitKey); err != nil { diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index ed6b60db8..e32fdb9f1 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -549,7 +549,7 @@ func (txnData *UpdateCoinLockupParamsMetadata) New() DeSoTxnMetadata { } // -// TYPES: DAOCoinLockupTransferMetadata +// TYPES: CoinLockupTransferMetadata // type CoinLockupTransferMetadata struct { @@ -1876,3 +1876,232 @@ func (bav *UtxoView) _flushLockupYieldCurvePointEntriesToDbWithTxn(txn *badger.T // By here the LockupYieldCurvePoint mappings in the db should be up-to-date. return nil } + +// +// Derived Key Transactional Limits +// + +type LockupLimitKey struct { + ProfilePKID PKID + ScopeType LockupLimitScopeType + Operation LockupLimitOperation +} + +func MakeLockupLimitKey(profilePKID *PKID, scopeType LockupLimitScopeType, operation LockupLimitOperation) LockupLimitKey { + return LockupLimitKey{ + ProfilePKID: *profilePKID, + ScopeType: scopeType, + Operation: operation, + } +} + +func (lockupLimitKey *LockupLimitKey) Encode() []byte { + var data []byte + data = append(data, lockupLimitKey.ProfilePKID.ToBytes()...) + data = append(data, byte(lockupLimitKey.ScopeType)) + data = append(data, byte(lockupLimitKey.Operation)) + return data +} + +func (lockupLimitKey *LockupLimitKey) Decode(rr *bytes.Reader) error { + var err error + + // ProfilePKID + profilePKID := &PKID{} + if err = profilePKID.FromBytes(rr); err != nil { + return errors.Wrap(err, "LockupLimitKey.Decode: Problem reading ProfilePKID: ") + } + lockupLimitKey.ProfilePKID = *profilePKID + + // ScopeType + var scopeTypeByte byte + if scopeTypeByte, err = rr.ReadByte(); err != nil { + return errors.Wrap(err, "LockupLimitKey.Decode: Problem reading ScopeType: ") + } + lockupLimitKey.ScopeType = LockupLimitScopeType(scopeTypeByte) + + // Operation + var operationByte byte + if operationByte, err = rr.ReadByte(); err != nil { + return errors.Wrap(err, "LockupLimitKey.Decode: Problem reading Operation: ") + } + lockupLimitKey.Operation = LockupLimitOperation(operationByte) + + return nil +} + +type LockupLimitOperation uint8 +type LockupLimitOperationString string + +const ( + AnyLockupOperation LockupLimitOperation = 0 + CoinLockupOperation LockupLimitOperation = 1 + UpdateCoinLockupYieldCurveOperation LockupLimitOperation = 2 + UpdateCoinLockupTransferRestrictionsOperation LockupLimitOperation = 3 + CoinLockupTransferOperation LockupLimitOperation = 4 + CoinLockupUnlockOperation LockupLimitOperation = 5 + UndefinedCoinLockupOperation LockupLimitOperation = 6 +) + +const ( + AnyLockupOperationString LockupLimitOperationString = "Any" + CoinLockupOperationString LockupLimitOperationString = "CoinLockup" + UpdateCoinLockupYieldCurveOperationString LockupLimitOperationString = "UpdateCoinLockupYieldCurve" + UpdateCoinLockupTransferRestrictionsOperationString LockupLimitOperationString = "UpdateCoinLockupTransferRestrictions" + CoinLockupTransferOperationString LockupLimitOperationString = "CoinLockupTransferOperationString" + CoinLockupUnlockOperationString LockupLimitOperationString = "CoinLockupUnlock" + UndefinedCoinLockupOperationString LockupLimitOperationString = "Undefined" +) + +func (lockupLimitOperation LockupLimitOperation) ToString() string { + return string(lockupLimitOperation.ToOperationString()) +} + +func (lockupLimitOperation LockupLimitOperation) ToOperationString() LockupLimitOperationString { + switch lockupLimitOperation { + case AnyLockupOperation: + return AnyLockupOperationString + case CoinLockupOperation: + return CoinLockupOperationString + case UpdateCoinLockupYieldCurveOperation: + return UpdateCoinLockupYieldCurveOperationString + case UpdateCoinLockupTransferRestrictionsOperation: + return UpdateCoinLockupTransferRestrictionsOperationString + case CoinLockupTransferOperation: + return CoinLockupTransferOperationString + case CoinLockupUnlockOperation: + return CoinLockupUnlockOperationString + default: + return UndefinedCoinLockupOperationString + } +} + +func (lockupLimitOperationString LockupLimitOperationString) ToOperationType() LockupLimitOperation { + switch lockupLimitOperationString { + case AnyLockupOperationString: + return AnyLockupOperation + case CoinLockupOperationString: + return CoinLockupOperation + case UpdateCoinLockupYieldCurveOperationString: + return UpdateCoinLockupYieldCurveOperation + case UpdateCoinLockupTransferRestrictionsOperationString: + return UpdateCoinLockupTransferRestrictionsOperation + case CoinLockupTransferOperationString: + return CoinLockupTransferOperation + case CoinLockupUnlockOperationString: + return CoinLockupUnlockOperation + default: + return UndefinedCoinLockupOperation + } +} + +type LockupLimitScopeType uint8 +type LockupLimitScopeTypeString string + +const ( + LockupLimitScopeTypeUndefined LockupLimitScopeType = 0 + LockupLimitScopeTypeAnyCoins LockupLimitScopeType = 1 + LockupLimitScopeTypeScopedCoins LockupLimitScopeType = 2 +) + +const ( + LockupLimitScopeTypeUndefinedString LockupLimitScopeTypeString = "Undefined" + LockupLimitScopeTypeAnyCoinsString LockupLimitScopeTypeString = "AnyCoins" + LockupLimitScopeTypeScopedCoinsString LockupLimitScopeTypeString = "ScopedCoins" +) + +func (lockupLimitScopeType LockupLimitScopeType) ToString() string { + return string(lockupLimitScopeType.ToScopeString()) +} + +func (lockupLimitScopeType LockupLimitScopeType) ToScopeString() LockupLimitScopeTypeString { + switch lockupLimitScopeType { + case LockupLimitScopeTypeAnyCoins: + return LockupLimitScopeTypeAnyCoinsString + case LockupLimitScopeTypeScopedCoins: + return LockupLimitScopeTypeScopedCoinsString + default: + return LockupLimitScopeTypeUndefinedString + } +} + +func (lockupLimitScopeType LockupLimitScopeTypeString) ToScopeType() LockupLimitScopeType { + switch lockupLimitScopeType { + case LockupLimitScopeTypeAnyCoinsString: + return LockupLimitScopeTypeAnyCoins + case LockupLimitScopeTypeScopedCoinsString: + return LockupLimitScopeTypeScopedCoins + default: + return LockupLimitScopeTypeUndefined + } +} + +func (bav *UtxoView) _checkLockupTxnSpendingLimitAndUpdateDerivedKey( + derivedKeyEntry DerivedKeyEntry, + profilePublicKey *PublicKey, + lockupOperation LockupLimitOperation, +) (DerivedKeyEntry, error) { + // Convert profile public key to PKID. + var profilePKID *PKID + if profilePublicKey.IsZeroPublicKey() { + profilePKID = ZeroPKID.NewPKID() + } else { + profilePKIDEntry := bav.GetPKIDForPublicKey(profilePublicKey.ToBytes()) + if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { + return derivedKeyEntry, + errors.Wrap(RuleErrorDerivedKeyCoinLockupOperationInvalidProfilePKID, + "_checkCoinLockupTxnSpendingLimitAndUpdateDerivedKey") + } + profilePKID = profilePKIDEntry.PKID.NewPKID() + } + + // Start by checking (specific profile PKID || specific operation) key + profilePKIDOperationKey := MakeLockupLimitKey(profilePKID, LockupLimitScopeTypeScopedCoins, lockupOperation) + if _checkLimitKeyAndUpdateDerivedKeyEntry(profilePKIDOperationKey, derivedKeyEntry) { + return derivedKeyEntry, nil + } + + // Next check (specific profile PKID || any operation) key + profilePKIDAnyOperationKey := MakeLockupLimitKey(profilePKID, LockupLimitScopeTypeScopedCoins, AnyLockupOperation) + if _checkLimitKeyAndUpdateDerivedKeyEntry(profilePKIDAnyOperationKey, derivedKeyEntry) { + return derivedKeyEntry, nil + } + + // Next check (any creator PKID || specific operation) key + anyProfilePKIDOperationKey := MakeLockupLimitKey(profilePKID, LockupLimitScopeTypeAnyCoins, lockupOperation) + if _checkLimitKeyAndUpdateDerivedKeyEntry(anyProfilePKIDOperationKey, derivedKeyEntry) { + return derivedKeyEntry, nil + } + + // Next check (any creator PKID || any operation) key + anyProfilePKIDAnyOperationKey := MakeLockupLimitKey(profilePKID, LockupLimitScopeTypeAnyCoins, AnyLockupOperation) + if _checkLimitKeyAndUpdateDerivedKeyEntry(anyProfilePKIDAnyOperationKey, derivedKeyEntry) { + return derivedKeyEntry, nil + } + + return derivedKeyEntry, errors.Wrapf(RuleErrorDerivedKeyCoinLockupOperationNotAuthorized, ""+ + "_checkCoinLockupTxnSpendingLimitAndUpdateDerivedKey: coin lockup operation (type %s) not authorized: ", + lockupOperation.ToString()) +} + +func _checkLimitKeyAndUpdateDerivedKeyEntry(key LockupLimitKey, derivedKeyEntry DerivedKeyEntry) bool { + if derivedKeyEntry.TransactionSpendingLimitTracker == nil || + derivedKeyEntry.TransactionSpendingLimitTracker.LockupLimitMap == nil { + return false + } + // If the key is present in the LockupLimitMap... + lockupOperationLimit, lockupOperationLimitExists := + derivedKeyEntry.TransactionSpendingLimitTracker.LockupLimitMap[key] + if !lockupOperationLimitExists || lockupOperationLimit <= 0 { + return false + } + // If this is the last operation allowed for this key, we delete the key from the map. + if lockupOperationLimit == 1 { + delete(derivedKeyEntry.TransactionSpendingLimitTracker.LockupLimitMap, key) + } else { + // Otherwise we decrement the number of operations remaining for this key + derivedKeyEntry.TransactionSpendingLimitTracker.LockupLimitMap[key]-- + } + // Return true because we found the key and decremented the remaining operations + return true +} diff --git a/lib/errors.go b/lib/errors.go index 77a2bfce9..0f7829ed0 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -452,15 +452,17 @@ const ( RuleErrorOldToPublicKeyHasDeletedPKID RuleError = "RuleErrorOldToPublicKeyHasDeletedPKID" // Derived Key Transaction Spending Limits - RuleErrorDerivedKeyTxnTypeNotAuthorized RuleError = "RuleErrorDerivedKeyTxnTypeNotAuthorized" - RuleErrorDerivedKeyTxnSpendsMoreThanGlobalDESOLimit RuleError = "RuleErrorDerivedKeyTxnSpendsMoreThanGlobalDESOLimit" - RuleErrorDerivedKeyInvalidCreatorCoinLimitOperation RuleError = "RuleErrorInvalidCreatorCoinLimitOperation" - RuleErrorDerivedKeyInvalidDAOCoinLimitOperation RuleError = "RuleErrorInvalidDAOCoinLimitOperation" - RuleErrorDerivedKeyNFTOperationNotAuthorized RuleError = "RuleErrorDerivedKeyNFTOperationNotAuthorized" - RuleErrorDerivedKeyCreatorCoinOperationNotAuthorized RuleError = "RuleErrorDerivedKeyCreatorCoinOperationNotAuthorized" - RuleErrorDerivedKeyDAOCoinOperationNotAuthorized RuleError = "RuleErrorDerivedKeyDAOCoinOperationNotAuthorized" - RuleErrorDerivedKeyInvalidDAOCoinLimitOrderOrderID RuleError = "RuleErrorDerivedKeyInvalidDAOCoinLimitOrderOrderID" - RuleErrorDerivedKeyDAOCoinLimitOrderNotAuthorized RuleError = "RuleErrorDerivedKeyDAOCoinLimitOrderNotAuthorized" + RuleErrorDerivedKeyTxnTypeNotAuthorized RuleError = "RuleErrorDerivedKeyTxnTypeNotAuthorized" + RuleErrorDerivedKeyTxnSpendsMoreThanGlobalDESOLimit RuleError = "RuleErrorDerivedKeyTxnSpendsMoreThanGlobalDESOLimit" + RuleErrorDerivedKeyInvalidCreatorCoinLimitOperation RuleError = "RuleErrorInvalidCreatorCoinLimitOperation" + RuleErrorDerivedKeyInvalidDAOCoinLimitOperation RuleError = "RuleErrorInvalidDAOCoinLimitOperation" + RuleErrorDerivedKeyNFTOperationNotAuthorized RuleError = "RuleErrorDerivedKeyNFTOperationNotAuthorized" + RuleErrorDerivedKeyCreatorCoinOperationNotAuthorized RuleError = "RuleErrorDerivedKeyCreatorCoinOperationNotAuthorized" + RuleErrorDerivedKeyDAOCoinOperationNotAuthorized RuleError = "RuleErrorDerivedKeyDAOCoinOperationNotAuthorized" + RuleErrorDerivedKeyInvalidDAOCoinLimitOrderOrderID RuleError = "RuleErrorDerivedKeyInvalidDAOCoinLimitOrderOrderID" + RuleErrorDerivedKeyDAOCoinLimitOrderNotAuthorized RuleError = "RuleErrorDerivedKeyDAOCoinLimitOrderNotAuthorized" + RuleErrorDerivedKeyCoinLockupOperationNotAuthorized RuleError = "RuleErrorDerivedKeyCoinLockupOperationNotAuthorized" + RuleErrorDerivedKeyCoinLockupOperationInvalidProfilePKID RuleError = "RuleErrorDerivedKeyCoinLockupOperationInvalidProfilePKID" // Association Errors RuleErrorAssociationBeforeBlockHeight RuleError = "RuleErrorAssociationBeforeBlockHeight" diff --git a/lib/network.go b/lib/network.go index 723d3fae2..ba068c2b7 100644 --- a/lib/network.go +++ b/lib/network.go @@ -5699,6 +5699,11 @@ type TransactionSpendingLimit struct { AssociationLimitMap map[AssociationLimitKey]uint64 // ===== ENCODER MIGRATION ProofOfStake1StateSetupMigration ===== + // ProfilePKID || LockupLimitOperation || LockupLimitScopeType to number of transactions. + // - ProfilePKID: A PKID to scope transactions by. + // If using the "Any" scope, then ProfilePKID has to be the ZeroPKID. + // - LockupLimitOperationType: One of {Any, Scoped} + LockupLimitMap map[LockupLimitKey]uint64 // ValidatorPKID || StakerPKID to amount of stake-able $DESO. // Note that this is not a limit on the number of Stake txns that // this derived key can perform but instead a limit on the amount @@ -5955,6 +5960,33 @@ func (tsl *TransactionSpendingLimit) ToMetamaskString(params *DeSoParams) string indentationCounter-- } + // LockupLimitMap + if len(tsl.LockupLimitMap) > 0 { + var lockupLimitStr []string + str += _indt(indentationCounter) + "Lockup Restrictions:\n" + indentationCounter++ + for limitKey, limit := range tsl.LockupLimitMap { + opString := _indt(indentationCounter) + "[\n" + + indentationCounter++ + opString += _indt(indentationCounter) + "Lockup Profile PKID: " + + Base58CheckEncode(limitKey.ProfilePKID.ToBytes(), false, params) + "\n" + opString += _indt(indentationCounter) + "Lockup Scope: " + + limitKey.ScopeType.ToString() + "\n" + opString += _indt(indentationCounter) + "Lockup Operation: " + + limitKey.Operation.ToString() + "\n" + opString += _indt(indentationCounter) + "Transaction Count: " + + strconv.FormatUint(limit, 10) + "\n" + indentationCounter-- + + opString += _indt(indentationCounter) + "]\n" + lockupLimitStr = append(lockupLimitStr, opString) + } + // Ensure deterministic ordering of the transaction count limit strings by doing a lexicographical sort. + sortStringsAndAddToLimitStr(lockupLimitStr) + indentationCounter-- + } + // StakeLimitMap if len(tsl.StakeLimitMap) > 0 { var stakeLimitStr []string @@ -6266,6 +6298,27 @@ func (tsl *TransactionSpendingLimit) ToBytes(blockHeight uint64) ([]byte, error) // StakeLimitMap, UnstakeLimitMap, and UnlockStakeLimitMap, gated by the encoder migration. if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { + // LockupLimitMap + lockupLimitMapLength := uint64(len(tsl.LockupLimitMap)) + data = append(data, UintToBuf(lockupLimitMapLength)...) + if lockupLimitMapLength > 0 { + keys, err := SafeMakeSliceWithLengthAndCapacity[LockupLimitKey](0, lockupLimitMapLength) + if err != nil { + return nil, err + } + for key := range tsl.LockupLimitMap { + keys = append(keys, key) + } + // Sort the keys to ensure deterministic ordering. + sort.Slice(keys, func(ii, jj int) bool { + return hex.EncodeToString(keys[ii].Encode()) < hex.EncodeToString(keys[jj].Encode()) + }) + for _, key := range keys { + data = append(data, key.Encode()...) + data = append(data, UintToBuf(tsl.LockupLimitMap[key])...) + } + } + // StakeLimitMap stakeLimitMapLength := uint64(len(tsl.StakeLimitMap)) data = append(data, UintToBuf(stakeLimitMapLength)...) @@ -6541,6 +6594,30 @@ func (tsl *TransactionSpendingLimit) FromBytes(blockHeight uint64, rr *bytes.Rea // StakeLimitMap, UnstakeLimitMap, and UnlockStakeLimitMap, gated by the encoder migration. if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { + // LockupLimitMap + lockupLimitMapLen, err := ReadUvarint(rr) + if err != nil { + return err + } + tsl.LockupLimitMap = make(map[LockupLimitKey]uint64) + if lockupLimitMapLen > 0 { + for ii := uint64(0); ii < lockupLimitMapLen; ii++ { + lockupLimitKey := &LockupLimitKey{} + if err = lockupLimitKey.Decode(rr); err != nil { + return errors.Wrap(err, "Error decoding LockupLimitKey: ") + } + var operationCount uint64 + operationCount, err = ReadUvarint(rr) + if err != nil { + return errors.Wrap(err, "Error decoding LockupLimitKey: ") + } + if _, keyExists := tsl.LockupLimitMap[*lockupLimitKey]; keyExists { + return fmt.Errorf("LockupLimitKey already exists") + } + tsl.LockupLimitMap[*lockupLimitKey] = operationCount + } + } + // StakeLimitMap stakeLimitMapLen, err := ReadUvarint(rr) if err != nil { @@ -6670,6 +6747,7 @@ func (tsl *TransactionSpendingLimit) Copy() *TransactionSpendingLimit { DAOCoinLimitOrderLimitMap: make(map[DAOCoinLimitOrderLimitKey]uint64), AccessGroupMap: make(map[AccessGroupLimitKey]uint64), AccessGroupMemberMap: make(map[AccessGroupMemberLimitKey]uint64), + LockupLimitMap: make(map[LockupLimitKey]uint64), StakeLimitMap: make(map[StakeLimitKey]*uint256.Int), UnstakeLimitMap: make(map[StakeLimitKey]*uint256.Int), UnlockStakeLimitMap: make(map[StakeLimitKey]uint64), @@ -6713,6 +6791,10 @@ func (tsl *TransactionSpendingLimit) Copy() *TransactionSpendingLimit { copyTSL.AccessGroupMemberMap[accessGroupMemberLimitKey] = accessGroupMemberCount } + for lockupLimitKey, lockupLimit := range tsl.LockupLimitMap { + copyTSL.LockupLimitMap[lockupLimitKey] = lockupLimit + } + for stakeLimitKey, stakeLimitDESONanos := range tsl.StakeLimitMap { copyTSL.StakeLimitMap[stakeLimitKey] = stakeLimitDESONanos.Clone() } @@ -6746,6 +6828,7 @@ func (bav *UtxoView) CheckIfValidUnlimitedSpendingLimit(tsl *TransactionSpending len(tsl.AssociationLimitMap) > 0 || len(tsl.AccessGroupMap) > 0 || len(tsl.AccessGroupMemberMap) > 0 || + len(tsl.LockupLimitMap) > 0 || len(tsl.StakeLimitMap) > 0 || len(tsl.UnstakeLimitMap) > 0 || len(tsl.UnlockStakeLimitMap) > 0) { From 701f0d052aee6ae7a8540501c8e729a1ccbb9af3 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 6 Oct 2023 10:09:46 -0700 Subject: [PATCH 236/762] Encode only QC or AggregateQC in PoS blocks (#715) --- lib/network.go | 21 ++++++------ lib/network_test.go | 73 +++++++++++++++++++++++----------------- lib/pos_network.go | 81 +++++++++++++++++++++++++++++++++++++++------ 3 files changed, 125 insertions(+), 50 deletions(-) diff --git a/lib/network.go b/lib/network.go index f38fbbc31..93a04f34b 100644 --- a/lib/network.go +++ b/lib/network.go @@ -2153,21 +2153,22 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error // ProposedInView retBytes = append(retBytes, UintToBuf(msg.ProposedInView)...) - // ValidatorsVoteQC - if msg.ValidatorsVoteQC == nil { - return nil, fmt.Errorf("EncodeHeaderVersion2: ValidatorsVoteQC must be non-nil") + // Only one of ValidatorsVoteQC or ValidatorsTimeoutAggregateQC must be defined. + if (msg.ValidatorsVoteQC == nil) == (msg.ValidatorsTimeoutAggregateQC == nil) { + return nil, fmt.Errorf( + "EncodeHeaderVersion2: Exactly one of ValidatorsVoteQC or ValidatorsTimeoutAggregateQC must be non-nil", + ) } - encodedValidatorsVoteQC, err := msg.ValidatorsVoteQC.ToBytes() + + // ValidatorsVoteQC + encodedValidatorsVoteQC, err := EncodeQuorumCertificate(msg.ValidatorsVoteQC) if err != nil { return nil, errors.Wrapf(err, "EncodeHeaderVersion2: error encoding ValidatorsVoteQC") } retBytes = append(retBytes, encodedValidatorsVoteQC...) // ValidatorsTimeoutAggregateQC - if msg.ValidatorsTimeoutAggregateQC == nil { - return nil, fmt.Errorf("EncodeHeaderVersion2: ValidatorsTimeoutAggregateQC must be non-nil") - } - encodedValidatorsTimeoutAggregateQC, err := msg.ValidatorsTimeoutAggregateQC.ToBytes() + encodedValidatorsTimeoutAggregateQC, err := EncodeTimeoutAggregateQuorumCertificate(msg.ValidatorsTimeoutAggregateQC) if err != nil { return nil, errors.Wrapf(err, "EncodeHeaderVersion2: error encoding ValidatorsTimeoutAggregateQC") } @@ -2377,8 +2378,8 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { } // ValidatorsTimeoutAggregateQC - retHeader.ValidatorsTimeoutAggregateQC = &TimeoutAggregateQuorumCertificate{} - if err = retHeader.ValidatorsTimeoutAggregateQC.FromBytes(rr); err != nil { + retHeader.ValidatorsTimeoutAggregateQC, err = DecodeTimeoutAggregateQuorumCertificate(rr) + if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ValidatorsTimeoutAggregateQC") } diff --git a/lib/network_test.go b/lib/network_test.go index 73d11683c..43d40d4d5 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -108,7 +108,7 @@ var expectedBlockHeaderVersion1 = &MsgDeSoHeader{ // Creates fully formatted a PoS block header with random signatures // and block hashes -func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { +func createTestBlockHeaderVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSoHeader { testBlockHash := BlockHash{ 0x00, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20, 0x21, @@ -131,7 +131,33 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { testBitset := bitset.NewBitset().Set(0, true).Set(3, true) testBLSPublicKey, testBLSSignature := _generateValidatorVotingPublicKeyAndSignature(t) - return &MsgDeSoHeader{ + validatorsVoteQC := &QuorumCertificate{ + BlockHash: &testBlockHash, + ProposedInView: uint64(123456789123), + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + SignersList: testBitset, + Signature: testBLSSignature, + }, + } + + validatorsTimeoutAggregateQC := &TimeoutAggregateQuorumCertificate{ + TimedOutView: uint64(234567891234), + ValidatorsHighQC: &QuorumCertificate{ + BlockHash: &testBlockHash, + ProposedInView: uint64(345678912345), + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + SignersList: testBitset, + Signature: testBLSSignature, + }, + }, + ValidatorsTimeoutHighQCViews: []uint64{456789123456}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + SignersList: testBitset, + Signature: testBLSSignature, + }, + } + + header := &MsgDeSoHeader{ Version: 2, PrevBlockHash: &testBlockHash, TransactionMerkleRoot: &testMerkleRoot, @@ -145,32 +171,17 @@ func createTestBlockHeaderVersion2(t *testing.T) *MsgDeSoHeader { ProposerRandomSeedHash: &testRandomSeedHash, ProposedInView: uint64(1432101234), // Use real signatures and public keys for the PoS fields - ValidatorsVoteQC: &QuorumCertificate{ - BlockHash: &testBlockHash, - ProposedInView: uint64(123456789123), - ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - SignersList: testBitset, - Signature: testBLSSignature, - }, - }, - ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ - TimedOutView: uint64(234567891234), - ValidatorsHighQC: &QuorumCertificate{ - BlockHash: &testBlockHash, - ProposedInView: uint64(345678912345), - ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - SignersList: testBitset, - Signature: testBLSSignature, - }, - }, - ValidatorsTimeoutHighQCViews: []uint64{456789123456}, - ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ - SignersList: testBitset, - Signature: testBLSSignature, - }, - }, ProposerVotePartialSignature: testBLSSignature, } + + // Only set one of the two fields. + if includeTimeoutQC { + header.ValidatorsTimeoutAggregateQC = validatorsTimeoutAggregateQC + } else { + header.ValidatorsVoteQC = validatorsVoteQC + } + + return header } func TestHeaderConversionAndReadWriteMessage(t *testing.T) { @@ -182,7 +193,8 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { expectedBlockHeadersToTest := []*MsgDeSoHeader{ expectedBlockHeaderVersion1, - createTestBlockHeaderVersion2(t), + createTestBlockHeaderVersion2(t, true), + createTestBlockHeaderVersion2(t, false), } // Performs a full E2E byte encode and decode of all the block header @@ -233,7 +245,7 @@ func TestHeaderVersion2SignatureByteEncoding(t *testing.T) { _ = assert _ = require - expectedBlockHeader := createTestBlockHeaderVersion2(t) + expectedBlockHeader := createTestBlockHeaderVersion2(t, true) preSignatureBytes, err := expectedBlockHeader.ToBytes(true) require.NoError(err) @@ -264,7 +276,7 @@ func TestHeaderVersion2Hash(t *testing.T) { _ = assert _ = require - expectedBlockHeader := createTestBlockHeaderVersion2(t) + expectedBlockHeader := createTestBlockHeaderVersion2(t, true) headerHash, err := expectedBlockHeader.Hash() require.NoError(err) @@ -313,7 +325,8 @@ func TestHeaderBundleSerialization(t *testing.T) { headerBundle := &MsgDeSoHeaderBundle{ Headers: []*MsgDeSoHeader{ expectedBlockHeaderVersion1, - createTestBlockHeaderVersion2(t), + createTestBlockHeaderVersion2(t, true), + createTestBlockHeaderVersion2(t, false), }, TipHash: hash1, TipHeight: 12345, diff --git a/lib/pos_network.go b/lib/pos_network.go index 775281ee7..7abe7c2c4 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -256,8 +256,8 @@ func (msg *MsgDeSoValidatorTimeout) FromBytes(data []byte) error { } // HighQC - msg.HighQC, err = DecodeQuorumCertificate(rr) - if err != nil { + msg.HighQC = &QuorumCertificate{} + if msg.HighQC.FromBytes(rr); err != nil { return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding HighQC") } @@ -340,26 +340,56 @@ func (qc *QuorumCertificate) ToBytes() ([]byte, error) { return retBytes, nil } -func DecodeQuorumCertificate(rr io.Reader) (*QuorumCertificate, error) { - var qc QuorumCertificate +func (qc *QuorumCertificate) FromBytes(rr io.Reader) error { var err error qc.BlockHash, err = ReadBlockHash(rr) if err != nil { - return nil, errors.Wrapf(err, "DecodeQuorumCertificate: Error decoding BlockHash") + return errors.Wrapf(err, "QuorumCertificate.FromBytes: Error decoding BlockHash") } qc.ProposedInView, err = ReadUvarint(rr) if err != nil { - return nil, errors.Wrapf(err, "DecodeQuorumCertificate: Error decoding ProposedInView") + return errors.Wrapf(err, "QuorumCertificate.FromBytes: Error decoding ProposedInView") } qc.ValidatorsVoteAggregatedSignature = &AggregatedBLSSignature{} if err = qc.ValidatorsVoteAggregatedSignature.FromBytes(rr); err != nil { - return nil, errors.Wrapf(err, "DecodeQuorumCertificate: Error decoding ValidatorsVoteAggregatedSignature") + return errors.Wrapf(err, "QuorumCertificate.FromBytes: Error decoding ValidatorsVoteAggregatedSignature") + } + + return nil +} + +func EncodeQuorumCertificate(qc *QuorumCertificate) ([]byte, error) { + if qc == nil { + return EncodeByteArray(nil), nil + } + + encodedBytes, err := qc.ToBytes() + if err != nil { + return nil, errors.Wrapf(err, "EncodeQuorumCertificate: Error encoding qc") } - return &qc, nil + return EncodeByteArray(encodedBytes), nil +} + +func DecodeQuorumCertificate(rr io.Reader) (*QuorumCertificate, error) { + encodedBytes, err := DecodeByteArray(rr) + if err != nil { + return nil, errors.Wrapf(err, "DecodeQuorumCertificate: Error decoding encodedBytes") + } + + if len(encodedBytes) == 0 { + return nil, nil + } + + qc := &QuorumCertificate{} + if err := qc.FromBytes(bytes.NewReader(encodedBytes)); err != nil { + return nil, errors.Wrapf(err, "DecodeQuorumCertificate: Error decoding qc") + } + + return qc, nil } // This is an aggregated BLS signature from a set of validators. Each validator's @@ -530,8 +560,8 @@ func (aggQC *TimeoutAggregateQuorumCertificate) FromBytes(rr io.Reader) error { return errors.Wrapf(err, "TimeoutAggregateQuorumCertificate.FromBytes: Error decoding TimedOutView") } - aggQC.ValidatorsHighQC, err = DecodeQuorumCertificate(rr) - if err != nil { + aggQC.ValidatorsHighQC = &QuorumCertificate{} + if aggQC.ValidatorsHighQC.FromBytes(rr); err != nil { return errors.Wrapf(err, "TimeoutAggregateQuorumCertificate.FromBytes: Error decoding ValidatorsHighQC") } @@ -548,6 +578,37 @@ func (aggQC *TimeoutAggregateQuorumCertificate) FromBytes(rr io.Reader) error { return nil } +func EncodeTimeoutAggregateQuorumCertificate(aggQC *TimeoutAggregateQuorumCertificate) ([]byte, error) { + if aggQC == nil { + return EncodeByteArray(nil), nil + } + + encodedBytes, err := aggQC.ToBytes() + if err != nil { + return nil, errors.Wrapf(err, "EncodeTimeoutAggregateQuorumCertificate: Error encoding aggQC") + } + + return EncodeByteArray(encodedBytes), nil +} + +func DecodeTimeoutAggregateQuorumCertificate(rr io.Reader) (*TimeoutAggregateQuorumCertificate, error) { + encodedBytes, err := DecodeByteArray(rr) + if err != nil { + return nil, errors.Wrapf(err, "DecodeTimeoutAggregateQuorumCertificate: Error decoding encodedBytes") + } + + if len(encodedBytes) == 0 { + return nil, nil + } + + aggQC := &TimeoutAggregateQuorumCertificate{} + if err := aggQC.FromBytes(bytes.NewReader(encodedBytes)); err != nil { + return nil, errors.Wrapf(err, "DecodeTimeoutAggregateQuorumCertificate: Error decoding aggQC") + } + + return aggQC, nil +} + // ================================================================== // Bitset Utils // ================================================================== From 01989ddc9985cdef3b03b952667d66657e1847e4 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 9 Oct 2023 20:35:14 -0700 Subject: [PATCH 237/762] Merge --- consensus/consensus.go | 10 +++++----- lib/block_view_derived_key_test.go | 4 ---- lib/block_view_test.go | 7 ++----- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/consensus/consensus.go b/consensus/consensus.go index 16136d3d7..d72ca8eba 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -15,23 +15,23 @@ func NewFastHotStuffConsensus() *FastHotStuffConsensus { } } -func (fc *FastHotStuffConsensus) Init( /*TODO */ ) { +func (fc *FastHotStuffConsensus) Init( /*TODO */) { // TODO } -func (fc *FastHotStuffConsensus) HandleAcceptedBlock( /* TODO */ ) { +func (fc *FastHotStuffConsensus) HandleAcceptedBlock( /* TODO */) { // TODO } -func (fc *FastHotStuffConsensus) HandleVoteMessage( /* TODO */ ) { +func (fc *FastHotStuffConsensus) HandleVoteMessage( /* TODO */) { // TODO } -func (pc *FastHotStuffConsensus) HandleTimeoutMessage( /* TODO */ ) { +func (pc *FastHotStuffConsensus) HandleTimeoutMessage( /* TODO */) { // TODO } -func (fc *FastHotStuffConsensus) HandleBlockProposal( /* TODO */ ) { +func (fc *FastHotStuffConsensus) HandleBlockProposal( /* TODO */) { // TODO } diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index 5dee4f164..2e6d5d7a3 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -5,10 +5,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "math/rand" - "testing" - "time" - "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v3" "github.com/holiman/uint256" diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 6a8a082a0..fa69e25a2 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" + "math" _ "net/http/pprof" "reflect" "sort" @@ -16,10 +17,6 @@ import ( "github.com/golang/glog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - _ "net/http/pprof" - "reflect" - "sort" - "testing" ) func _strToPk(t *testing.T, pkStr string) []byte { @@ -95,7 +92,7 @@ func resetBalanceModelBlockHeights() { DeSoTestnetParams.ForkHeights.ExtraDataOnEntriesBlockHeight = uint32(304087) DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = uint32(596555) DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = uint32(683058) - DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = 1000000 + DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(math.MaxUint32) DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams From 208225b19eae03237b447513474b1103d390450c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 10 Oct 2023 11:09:50 -0400 Subject: [PATCH 238/762] Update QC construction to trigger on vote and timeout receipt (#711) * Update QC construction to trigger on vote and timeout receipt * Cleanup * Remove redundant elapsed flag * Add flag to prevent duplicated QC construction per view --- consensus/event_loop.go | 72 ++++++++++++++++++++++++++++-------- consensus/event_loop_test.go | 4 +- consensus/types.go | 5 +++ 3 files changed, 63 insertions(+), 18 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 831a3a39b..3f3f55f35 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -64,6 +64,9 @@ func (fc *FastHotStuffEventLoop) Init( // We track the current view here so we know which view to time out on later on. fc.currentView = tip.Block.GetView() + 1 + // Reset QC construction status for the current view + fc.hasConstructedQCInCurrentView = false + // Reset all internal data structures for votes and timeouts fc.votesSeen = make(map[[32]byte]map[string]VoteMessage) fc.timeoutsSeen = make(map[uint64]map[string]TimeoutMessage) @@ -97,6 +100,9 @@ func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { // Advance the view fc.currentView++ + // Reset QC construction status for the current view + fc.hasConstructedQCInCurrentView = false + // Evict all stale votes and timeouts fc.evictStaleVotesAndTimeouts() @@ -135,6 +141,9 @@ func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBl // We track the current view here so we know which view to time out on later on. fc.currentView = fc.tip.block.GetView() + 1 + // Reset QC construction status for the current view + fc.hasConstructedQCInCurrentView = false + // Evict all stale votes and timeouts fc.evictStaleVotesAndTimeouts() @@ -254,14 +263,28 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { return errors.New("FastHotStuffEventLoop.ProcessValidatorVote: Invalid signature") } - // Note: we do not check if the vote is for the current chain tip's blockhash. During leader changes - // where we will be the next block proposer, it is possible for us to receive a vote for a block that - // we haven't seen yet, but we will need to construct the QC for the block as we are the next leader. - // To make this code resilient to these race conditions during leader changes, we simply store the vote - // as long as it's properly formed and not stale. - + // Cache the vote in case we need it for later fc.storeVote(voteSignaturePayload, vote) + // Check if the crank timer has elapsed or the event loop has constructed a QC in the current view. + // If so, then there's nothing more to do. + if fc.crankTimerTask.IsScheduled() || fc.hasConstructedQCInCurrentView { + return nil + } + + // Check if the vote is for the chain tip. If not, then there's nothing more to do. + if vote.GetBlockHash() != fc.tip.block.GetBlockHash() { + return nil + } + + // Check if we have a super-majority vote for the chain tip. + if voteQCEvent := fc.tryConstructVoteQCInCurrentView(); voteQCEvent != nil { + // Signal the server that we can construct a QC for the chain tip, and mark that we have + // constructed a QC for the current view. + fc.hasConstructedQCInCurrentView = true + fc.Events <- voteQCEvent + } + return nil } @@ -325,13 +348,29 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Invalid signature") } - // Note: we do not check if the timeout is for the current view. Nodes in the network are expected to have - // slightly different timings and may be at different views. To make this code resilient to timing - // differences between nodes, we simply store the timeout as long as it's properly formed and not stale. - // Stored timeouts will be evicted once we advance beyond them. - + // Cache the timeout message in case we need it for later fc.storeTimeout(timeout) + // Check if the crank timer has elapsed or the event loop has constructed a QC in the current view. + // If so, then there's nothing more to do. + if fc.crankTimerTask.IsScheduled() || fc.hasConstructedQCInCurrentView { + return nil + } + + // Check if the timeout is not for the previous view. If not, then there's nothing more to do. + if timeout.GetView() != fc.currentView-1 { + return nil + } + + // Check if we have a super-majority of stake has timed out of the previous view. If so, we signal + // the server that we can construct a timeoutQC in the current view. + if timeoutQCEvent := fc.tryConstructTimeoutQCInCurrentView(); timeoutQCEvent != nil { + // Signal the server that we can construct a timeout QC for the current view, and mark + // that we have constructed a QC for the current view. + fc.hasConstructedQCInCurrentView = true + fc.Events <- timeoutQCEvent + } + return nil } @@ -433,11 +472,16 @@ func (fc *FastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView return } + // Mark the crank timer task as not scheduled, so we can verify from the rest of the + // event loop when the timer has elapsed. + defer fc.crankTimerTask.Cancel() + // Check if the conditions are met to construct a QC from votes for the chain tip. If so, // we send a signal to the server and cancel the crank timer task. The server will // reschedule the task when it advances the view. if voteQCEvent := fc.tryConstructVoteQCInCurrentView(); voteQCEvent != nil { // Signal the server that we can construct a QC for the chain tip + fc.hasConstructedQCInCurrentView = true fc.Events <- voteQCEvent return } @@ -446,14 +490,10 @@ func (fc *FastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView // we send a signal to the server and cancel all scheduled tasks. if timeoutQCEvent := fc.tryConstructTimeoutQCInCurrentView(); timeoutQCEvent != nil { // Signal the server that we can construct a timeout QC for the current view + fc.hasConstructedQCInCurrentView = true fc.Events <- timeoutQCEvent return } - - // We have not found a super majority of votes or timeouts. We can schedule the task to check again later. - fc.crankTimerTask.Schedule(fc.crankTimerInterval, fc.currentView, fc.onCrankTimerTaskExecuted) - - return } // tryConstructVoteQCInCurrentView is a helper function that attempts to construct a QC for the tip block diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 677968cb2..a64413966 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -691,7 +691,7 @@ func TestVoteQCConstructionSignal(t *testing.T) { select { case blockConstructionSignal = <-fc.Events: // Do nothing - case <-time.After(100 * time.Millisecond): + case <-time.After(10 * time.Millisecond): require.Fail(t, "Did not receive a block construction signal when there were enough votes to construct a QC") } @@ -834,7 +834,7 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { select { case signal = <-fc.Events: // Do nothing - case <-time.After(100 * time.Second): + case <-time.After(10 * time.Second): require.Fail(t, "Did not receive a block construction signal when there were enough timeouts to construct a timeout QC") } diff --git a/consensus/types.go b/consensus/types.go index 6119847fd..a6a885619 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -155,6 +155,11 @@ type FastHotStuffEventLoop struct { // the timeout signal will be triggered for this view. currentView uint64 + // Signifies whether or not the event loop has constructed a QC or timeout QC for the current view. + // This is an optimization that is useful to prevent the event loop from signaling the server multiple + // times for the same view. + hasConstructedQCInCurrentView bool + // Block hash of the current tip of the block-chain. tip blockWithValidatorLookup From 328053447ec81b7694fd48b671a509293a208bdb Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Tue, 10 Oct 2023 11:24:35 -0400 Subject: [PATCH 239/762] gofmt --- consensus/consensus.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/consensus.go b/consensus/consensus.go index d72ca8eba..16136d3d7 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -15,23 +15,23 @@ func NewFastHotStuffConsensus() *FastHotStuffConsensus { } } -func (fc *FastHotStuffConsensus) Init( /*TODO */) { +func (fc *FastHotStuffConsensus) Init( /*TODO */ ) { // TODO } -func (fc *FastHotStuffConsensus) HandleAcceptedBlock( /* TODO */) { +func (fc *FastHotStuffConsensus) HandleAcceptedBlock( /* TODO */ ) { // TODO } -func (fc *FastHotStuffConsensus) HandleVoteMessage( /* TODO */) { +func (fc *FastHotStuffConsensus) HandleVoteMessage( /* TODO */ ) { // TODO } -func (pc *FastHotStuffConsensus) HandleTimeoutMessage( /* TODO */) { +func (pc *FastHotStuffConsensus) HandleTimeoutMessage( /* TODO */ ) { // TODO } -func (fc *FastHotStuffConsensus) HandleBlockProposal( /* TODO */) { +func (fc *FastHotStuffConsensus) HandleBlockProposal( /* TODO */ ) { // TODO } From 0545dd7df9293d21240a8e7e10844241d5f828c3 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 10 Oct 2023 10:19:42 -0700 Subject: [PATCH 240/762] implement interfaces from consensus module for QuorumCertificate, AggregatedBLSSignature, TimeoutAggregateQuorumCertificate, ValidatorEntry, MsgDeSoHeader, and BlockHash (#712) * implement interfaces from consensus module for QuorumCertificate, AggregatedBLSSignature, TimeoutAggregateQuorumCertificate, and ValidatorEntry * implement block hash and block interfaces --------- Co-authored-by: Lazy Nina <> --- lib/block_view_validator.go | 8 ++++++++ lib/network.go | 27 +++++++++++++++++++++++++++ lib/pos_network.go | 37 +++++++++++++++++++++++++++++++++++++ lib/types.go | 4 ++++ 4 files changed, 76 insertions(+) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 8ee6d768b..94d7b20ff 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -84,6 +84,14 @@ type ValidatorEntry struct { isDeleted bool } +func (validatorEntry *ValidatorEntry) GetPublicKey() *bls.PublicKey { + return validatorEntry.VotingPublicKey +} + +func (validatorEntry *ValidatorEntry) GetStakeAmount() *uint256.Int { + return validatorEntry.TotalStakeAmountNanos +} + func (validatorEntry *ValidatorEntry) Status() ValidatorStatus { // ValidatorEntry.Status() is a virtual/derived field that is not stored in // the database, but instead constructed from other ValidatorEntry fields. diff --git a/lib/network.go b/lib/network.go index ab77e70ab..0a70711de 100644 --- a/lib/network.go +++ b/lib/network.go @@ -8,6 +8,8 @@ import ( "encoding/hex" "encoding/json" "fmt" + "github.com/deso-protocol/core/consensus" + "github.com/golang/glog" "io" "math" "math/big" @@ -1962,6 +1964,31 @@ type MsgDeSoHeader struct { ProposerVotePartialSignature *bls.Signature } +func (msg *MsgDeSoHeader) GetBlockHash() consensus.BlockHash { + hash, err := msg.Hash() + if err != nil { + glog.Errorf("MsgDeSoHeader.GetBlockHash: Problem hashing header: %v", err) + // TODO: Should we return nil? + return &BlockHash{} + } + return hash +} + +func (msg *MsgDeSoHeader) GetHeight() uint64 { + return msg.Height +} + +func (msg *MsgDeSoHeader) GetView() uint64 { + return msg.ProposedInView +} + +func (msg *MsgDeSoHeader) GetQC() consensus.QuorumCertificate { + if msg.ValidatorsTimeoutAggregateQC.isEmpty() { + return msg.ValidatorsVoteQC + } + return msg.ValidatorsTimeoutAggregateQC.ValidatorsHighQC +} + func HeaderSizeBytes() int { header := NewMessage(MsgTypeHeader) headerBytes, _ := header.ToBytes(false) diff --git a/lib/pos_network.go b/lib/pos_network.go index ded6d43fd..7a01d4463 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -3,6 +3,7 @@ package lib import ( "bytes" "fmt" + "github.com/deso-protocol/core/consensus" "io" "github.com/deso-protocol/core/bls" @@ -294,6 +295,18 @@ type QuorumCertificate struct { ValidatorsVoteAggregatedSignature *AggregatedBLSSignature } +func (qc *QuorumCertificate) GetBlockHash() consensus.BlockHash { + return qc.BlockHash +} + +func (qc *QuorumCertificate) GetView() uint64 { + return qc.ProposedInView +} + +func (qc *QuorumCertificate) GetAggregatedSignature() consensus.AggregatedSignature { + return qc.ValidatorsVoteAggregatedSignature +} + // Performs a deep equality check between two QuorumCertificates, and returns true // if the two are fully initialized and have identical values. In all other cases, // it return false. @@ -406,6 +419,14 @@ type AggregatedBLSSignature struct { Signature *bls.Signature } +func (sig *AggregatedBLSSignature) GetSignersList() *bitset.Bitset { + return sig.SignersList +} + +func (sig *AggregatedBLSSignature) GetSignature() *bls.Signature { + return sig.Signature +} + // Performs a deep equality check between two AggregatedBLSSignatures, and returns true // if the two are fully initialized and have identical values. In all other cases, // it return false. @@ -497,6 +518,22 @@ type TimeoutAggregateQuorumCertificate struct { ValidatorsTimeoutAggregatedSignature *AggregatedBLSSignature } +func (aggQC *TimeoutAggregateQuorumCertificate) GetView() uint64 { + return aggQC.TimedOutView +} + +func (aggQC *TimeoutAggregateQuorumCertificate) GetHighQC() *QuorumCertificate { + return aggQC.ValidatorsHighQC +} + +func (aggQC *TimeoutAggregateQuorumCertificate) GetHighQCViews() []uint64 { + return aggQC.ValidatorsTimeoutHighQCViews +} + +func (aggQC *TimeoutAggregateQuorumCertificate) GetAggregatedSignature() consensus.AggregatedSignature { + return aggQC.ValidatorsTimeoutAggregatedSignature +} + // Performs a deep equality check between two TimeoutAggregateQuorumCertificates, and // returns true if the two are fully initialized and have identical values. In all other // cases, it return false. diff --git a/lib/types.go b/lib/types.go index 3e531f2ca..9182883a7 100644 --- a/lib/types.go +++ b/lib/types.go @@ -200,6 +200,10 @@ func NewBlockHash(input []byte) *BlockHash { return blockHash } +func (bh *BlockHash) GetValue() [HashSizeBytes]byte { + return *bh +} + func (bh *BlockHash) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { return EncodeByteArray(bh[:]) } From f1d0e735058ea78732cac6f2dd22d716546ff9b9 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 10 Oct 2023 16:55:58 -0400 Subject: [PATCH 241/762] Bump timeouts to 100 milliseconds for all tests in event loop (#722) Co-authored-by: Lazy Nina <> --- consensus/event_loop_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index a64413966..c81d68a16 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -691,7 +691,7 @@ func TestVoteQCConstructionSignal(t *testing.T) { select { case blockConstructionSignal = <-fc.Events: // Do nothing - case <-time.After(10 * time.Millisecond): + case <-time.After(100 * time.Millisecond): require.Fail(t, "Did not receive a block construction signal when there were enough votes to construct a QC") } @@ -834,7 +834,7 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { select { case signal = <-fc.Events: // Do nothing - case <-time.After(10 * time.Second): + case <-time.After(100 * time.Millisecond): require.Fail(t, "Did not receive a block construction signal when there were enough timeouts to construct a timeout QC") } From 16ed9e42b69bbe4c099e6b0f53b2ce42511108e6 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 10 Oct 2023 19:27:11 -0400 Subject: [PATCH 242/762] Pass HighQC in Timeout Signal (#723) --- consensus/event_loop.go | 1 + consensus/event_loop_test.go | 1 + 2 files changed, 2 insertions(+) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 3f3f55f35..d50a7f482 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -726,6 +726,7 @@ func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin View: timedOutView, // The view we timed out TipBlockHash: fc.tip.block.GetBlockHash(), // The last block we saw TipBlockHeight: fc.tip.block.GetHeight(), // The last block we saw + QC: fc.tip.block.GetQC(), // The highest QC we have } // Cancel the timeout task. The server will reschedule it when it advances the view. diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index c81d68a16..2dac72b73 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -545,6 +545,7 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { require.Equal(t, timeoutSignal.EventType, FastHotStuffEventTypeTimeout) require.Equal(t, timeoutSignal.View, dummyBlock.GetView()+2) require.Equal(t, timeoutSignal.TipBlockHash.GetValue(), dummyBlock.GetBlockHash().GetValue()) + require.Equal(t, timeoutSignal.QC.GetBlockHash().GetValue(), dummyBlock.qc.GetBlockHash().GetValue()) // Confirm that the timeout is no longer running require.False(t, fc.nextTimeoutTask.IsScheduled()) From 65738f2456b430f93b6217951b90e7cdace0bdc2 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Tue, 10 Oct 2023 16:29:39 -0700 Subject: [PATCH 243/762] Fix encoding related testing errors for LockupLimitMap --- lib/network.go | 2 +- lib/network_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/network.go b/lib/network.go index ba068c2b7..6d6eca68d 100644 --- a/lib/network.go +++ b/lib/network.go @@ -6811,7 +6811,7 @@ func (tsl *TransactionSpendingLimit) Copy() *TransactionSpendingLimit { } func (bav *UtxoView) CheckIfValidUnlimitedSpendingLimit(tsl *TransactionSpendingLimit, blockHeight uint32) (_isUnlimited bool, _err error) { - AssertDependencyStructFieldNumbers(&TransactionSpendingLimit{}, 13) + AssertDependencyStructFieldNumbers(&TransactionSpendingLimit{}, 14) if tsl.IsUnlimited && blockHeight < bav.Params.ForkHeights.DeSoUnlimitedDerivedKeysBlockHeight { return false, RuleErrorUnlimitedDerivedKeyBeforeBlockHeight diff --git a/lib/network_test.go b/lib/network_test.go index 73d11683c..efd42ffde 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -1687,7 +1687,7 @@ func TestUnlimitedSpendingLimitMetamaskEncoding(t *testing.T) { // Test the spending limit encoding using the standard scheme. spendingLimitBytes, err := spendingLimit.ToBytes(1) require.NoError(err) - require.Equal(true, reflect.DeepEqual(spendingLimitBytes, []byte{0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0})) + require.Equal(true, reflect.DeepEqual(spendingLimitBytes, []byte{0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0})) // Test the spending limit encoding using the metamask scheme. require.Equal(true, reflect.DeepEqual( From 6be34a1bb2d43fa6e4cd68568ecedea280b72474 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Tue, 10 Oct 2023 16:57:40 -0700 Subject: [PATCH 244/762] Add comment discussing convention break in UpdateCoinLockupParams and derived key limits. --- lib/block_view.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/block_view.go b/lib/block_view.go index 7812081eb..0da2f0168 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -2440,8 +2440,17 @@ func (bav *UtxoView) _checkAndUpdateDerivedKeySpendingLimit( return utxoOpsForTxn, err } case TxnTypeUpdateCoinLockupParams: + // NOTE: While this breaks convention, we allow the UpdateCoinLockupParamsMetadata to decrement + // two different derived key limits independently for added flexibility. We could + // have a limit as to the number of UpdateCoinLockupParams transactions but given the + // importance of security regarding the lockup yield curve it makes more sense to break + // derived key limits for UpdateCoinLockupParams into multiple behavior specific limits. txnMeta := txn.TxnMeta.(*UpdateCoinLockupParamsMetadata) // Check if we're updating the transactor's yield curve. + // NOTE: It's described in a longer comment in UpdateCoinLockupParamsMetadata that if + // LockupYieldDurationNanoSecs is zero, the other fields associated with updating + // the yield curve are ignored. Hence, the check below checks that any update + // to the yield curve exists in the given transaction. if txnMeta.LockupYieldDurationNanoSecs > 0 { if derivedKeyEntry, err = bav._checkLockupTxnSpendingLimitAndUpdateDerivedKey( derivedKeyEntry, NewPublicKey(txn.PublicKey), UpdateCoinLockupYieldCurveOperation); err != nil { From 0a900b730b9116621fc46e8bb15700d9ab5ea47e Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 11 Oct 2023 09:59:48 -0700 Subject: [PATCH 245/762] Add check and throw error in derived keys for no-op transactions. --- lib/block_view.go | 9 +++++++++ lib/errors.go | 1 + 2 files changed, 10 insertions(+) diff --git a/lib/block_view.go b/lib/block_view.go index 0da2f0168..886b1618a 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -2440,6 +2440,8 @@ func (bav *UtxoView) _checkAndUpdateDerivedKeySpendingLimit( return utxoOpsForTxn, err } case TxnTypeUpdateCoinLockupParams: + txnUpdatesYieldCurve := false + txnUpdatesTransferRestrictions := false // NOTE: While this breaks convention, we allow the UpdateCoinLockupParamsMetadata to decrement // two different derived key limits independently for added flexibility. We could // have a limit as to the number of UpdateCoinLockupParams transactions but given the @@ -2452,6 +2454,7 @@ func (bav *UtxoView) _checkAndUpdateDerivedKeySpendingLimit( // the yield curve are ignored. Hence, the check below checks that any update // to the yield curve exists in the given transaction. if txnMeta.LockupYieldDurationNanoSecs > 0 { + txnUpdatesYieldCurve = true if derivedKeyEntry, err = bav._checkLockupTxnSpendingLimitAndUpdateDerivedKey( derivedKeyEntry, NewPublicKey(txn.PublicKey), UpdateCoinLockupYieldCurveOperation); err != nil { return utxoOpsForTxn, err @@ -2459,11 +2462,17 @@ func (bav *UtxoView) _checkAndUpdateDerivedKeySpendingLimit( } // Check if we're updating the transactor's transfer restrictions. if txnMeta.NewLockupTransferRestrictions { + txnUpdatesTransferRestrictions = true if derivedKeyEntry, err = bav._checkLockupTxnSpendingLimitAndUpdateDerivedKey( derivedKeyEntry, NewPublicKey(txn.PublicKey), UpdateCoinLockupTransferRestrictionsOperation); err != nil { return utxoOpsForTxn, err } } + // Throw an error if this transaction does nothing. A derived key transaction should decrement + // at least one limit as otherwise it's spending fees and accomplishing nothing. + if !txnUpdatesYieldCurve && !txnUpdatesTransferRestrictions { + return utxoOpsForTxn, RuleErrorDerivedKeyUpdateCoinLockupParamsIsNoOp + } case TxnTypeCoinLockupTransfer: txnMeta := txn.TxnMeta.(*CoinLockupTransferMetadata) if derivedKeyEntry, err = bav._checkLockupTxnSpendingLimitAndUpdateDerivedKey( diff --git a/lib/errors.go b/lib/errors.go index 0f7829ed0..829c90f44 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -463,6 +463,7 @@ const ( RuleErrorDerivedKeyDAOCoinLimitOrderNotAuthorized RuleError = "RuleErrorDerivedKeyDAOCoinLimitOrderNotAuthorized" RuleErrorDerivedKeyCoinLockupOperationNotAuthorized RuleError = "RuleErrorDerivedKeyCoinLockupOperationNotAuthorized" RuleErrorDerivedKeyCoinLockupOperationInvalidProfilePKID RuleError = "RuleErrorDerivedKeyCoinLockupOperationInvalidProfilePKID" + RuleErrorDerivedKeyUpdateCoinLockupParamsIsNoOp RuleError = "RuleErrorDerivedKeyUpdateCoinLockupParamsIsNoOp" // Association Errors RuleErrorAssociationBeforeBlockHeight RuleError = "RuleErrorAssociationBeforeBlockHeight" From 474353df2353819cf76cc3166616083fc78a3e19 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 11 Oct 2023 10:07:40 -0700 Subject: [PATCH 246/762] Pass profilePKID by value rather than reference to remove possible future nil pointer errors. --- lib/block_view_lockups.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index e32fdb9f1..40ed18224 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1887,9 +1887,9 @@ type LockupLimitKey struct { Operation LockupLimitOperation } -func MakeLockupLimitKey(profilePKID *PKID, scopeType LockupLimitScopeType, operation LockupLimitOperation) LockupLimitKey { +func MakeLockupLimitKey(profilePKID PKID, scopeType LockupLimitScopeType, operation LockupLimitOperation) LockupLimitKey { return LockupLimitKey{ - ProfilePKID: *profilePKID, + ProfilePKID: profilePKID, ScopeType: scopeType, Operation: operation, } @@ -2056,25 +2056,25 @@ func (bav *UtxoView) _checkLockupTxnSpendingLimitAndUpdateDerivedKey( } // Start by checking (specific profile PKID || specific operation) key - profilePKIDOperationKey := MakeLockupLimitKey(profilePKID, LockupLimitScopeTypeScopedCoins, lockupOperation) + profilePKIDOperationKey := MakeLockupLimitKey(*profilePKID, LockupLimitScopeTypeScopedCoins, lockupOperation) if _checkLimitKeyAndUpdateDerivedKeyEntry(profilePKIDOperationKey, derivedKeyEntry) { return derivedKeyEntry, nil } // Next check (specific profile PKID || any operation) key - profilePKIDAnyOperationKey := MakeLockupLimitKey(profilePKID, LockupLimitScopeTypeScopedCoins, AnyLockupOperation) + profilePKIDAnyOperationKey := MakeLockupLimitKey(*profilePKID, LockupLimitScopeTypeScopedCoins, AnyLockupOperation) if _checkLimitKeyAndUpdateDerivedKeyEntry(profilePKIDAnyOperationKey, derivedKeyEntry) { return derivedKeyEntry, nil } // Next check (any creator PKID || specific operation) key - anyProfilePKIDOperationKey := MakeLockupLimitKey(profilePKID, LockupLimitScopeTypeAnyCoins, lockupOperation) + anyProfilePKIDOperationKey := MakeLockupLimitKey(*profilePKID, LockupLimitScopeTypeAnyCoins, lockupOperation) if _checkLimitKeyAndUpdateDerivedKeyEntry(anyProfilePKIDOperationKey, derivedKeyEntry) { return derivedKeyEntry, nil } // Next check (any creator PKID || any operation) key - anyProfilePKIDAnyOperationKey := MakeLockupLimitKey(profilePKID, LockupLimitScopeTypeAnyCoins, AnyLockupOperation) + anyProfilePKIDAnyOperationKey := MakeLockupLimitKey(*profilePKID, LockupLimitScopeTypeAnyCoins, AnyLockupOperation) if _checkLimitKeyAndUpdateDerivedKeyEntry(anyProfilePKIDAnyOperationKey, derivedKeyEntry) { return derivedKeyEntry, nil } From 383d7b01eaa37972d3a283efb30070691a11cad9 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:11:00 -0400 Subject: [PATCH 247/762] Validate Block Integrity (#705) * Bump timeouts to 100 milliseconds for all tests in event loop * ln/validate-block-general-pos * add tests and finish initial surface level validation function * validate num transactions along w/ type of QC * address SA feedback - add more validations and check for ancestors earlier * address SA feedback round 2 --------- Co-authored-by: Lazy Nina <> --- bls/signature.go | 4 + bls/signature_no_relic.go | 4 + lib/blockchain.go | 1 + lib/constants.go | 11 ++ lib/pos_blockchain.go | 160 +++++++++++++++++++++++---- lib/pos_blockchain_test.go | 221 +++++++++++++++++++++++++++++++++++++ lib/pos_network.go | 10 ++ lib/pos_random_seed.go | 4 + 8 files changed, 393 insertions(+), 22 deletions(-) create mode 100644 lib/pos_blockchain_test.go diff --git a/bls/signature.go b/bls/signature.go index 9bcfdb4a9..a3fe19282 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -251,6 +251,10 @@ func (publicKey *PublicKey) Copy() *PublicKey { } } +func (publicKey *PublicKey) IsEmpty() bool { + return publicKey == nil || publicKey.flowPublicKey == nil +} + // // TYPES: Signature // diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index 093a06331..d264b0449 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -96,6 +96,10 @@ func (publicKey *PublicKey) Copy() *PublicKey { panic(BLSNoRelicError) } +func (publicKey *PublicKey) IsEmpty() bool { + panic(BLSNoRelicError) +} + // // TYPES: Signature // diff --git a/lib/blockchain.go b/lib/blockchain.go index d4fb4662a..d3b6243b2 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1345,6 +1345,7 @@ func (bc *Blockchain) SetBestChainMap(bestChain []*BlockNode, bestChainMap map[B bc.blockIndex = blockIndex } +// TODO: update to support validating orphan PoS Blocks func (bc *Blockchain) _validateOrphanBlock(desoBlock *MsgDeSoBlock) error { // Error if the block is missing a parent hash or header. if desoBlock.Header == nil { diff --git a/lib/constants.go b/lib/constants.go index 2302aff93..abcada148 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -699,6 +699,11 @@ type DeSoParams struct { // before they are jailed. DefaultJailInactiveValidatorGracePeriodEpochs uint64 + // TODO: add support for putting the drift in global params. + // DefaultBlockTimestampDriftNanoSecs is the default number of nanoseconds + // from the current timestamp that we will allow a PoS block to be submitted. + DefaultBlockTimestampDriftNanoSecs uint64 + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -1103,6 +1108,9 @@ var DeSoMainnetParams = DeSoParams{ // The number of epochs before an inactive validator is jailed DefaultJailInactiveValidatorGracePeriodEpochs: uint64(48), + // The number of nanoseconds from the current timestamp that we will allow a PoS block to be submitted. + DefaultBlockTimestampDriftNanoSecs: uint64((time.Minute * 10).Nanoseconds()), + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1361,6 +1369,9 @@ var DeSoTestnetParams = DeSoParams{ // The number of epochs before an inactive validator is jailed DefaultJailInactiveValidatorGracePeriodEpochs: uint64(48), + // The number of nanoseconds from the current timestamp that we will allow a PoS block to be submitted. + DefaultBlockTimestampDriftNanoSecs: uint64((time.Minute * 10).Nanoseconds()), + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 217faa767..5bdb19ab2 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -3,13 +3,14 @@ package lib import ( "github.com/golang/glog" "github.com/pkg/errors" + "time" ) // processBlockPoS runs the Fast-Hotstuff block connect and commit rule as follows: -// 1. Validate on an incoming block, its header, its block height, the leader, and its QCs (vote or timeout) -// 2. Store the block in the block index and uncommitted blocks map. -// 3. Determine if we're missing a parent block of this block and any of its parents from the block index. -// If so, return the hash of the missing block. +// 1. Determine if we're missing a parent block of this block and any of its parents from the block index. +// If so, return the hash of the missing block and add this block to the orphans list. +// 2. Validate on an incoming block, its header, its block height, the leader, and its QCs (vote or timeout) +// 3. Store the block in the block index and uncommitted blocks map. // 4. Resolves forks within the last two blocks // 5. Connect the block to the blockchain's tip // 6. Run the commit rule - If applicable, flushes the incoming block's grandparent to the DB @@ -17,17 +18,40 @@ import ( // 8. Update the currentView to this new block's view + 1 func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, verifySignatures bool) (_success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { // TODO: Implement me - // 1. Start with all sanity checks of the block. + // 1. Determine if we're missing a parent block of this block and any of its parents from the block index. + // If so, process the orphan, but don't add to the block index or uncommitted block map. + missingBlockHash, err := bc.validateAncestorsExist(desoBlock) + if err != nil { + return false, false, nil, err + } + if missingBlockHash != nil { + missingBlockHashes := []*BlockHash{missingBlockHash} + var blockHash *BlockHash + blockHash, err = desoBlock.Header.Hash() + // If we fail to get the block hash, this block isn't valid at all, so we + // don't need to worry about adding it to the orphan list or block index. + if err != nil { + return false, true, missingBlockHashes, err + } + // ProcessOrphanBlock validates the block and adds it to the orphan list. + // TODO: update _validateOrphanBlock to perform additional validation required. + if err = bc.ProcessOrphanBlock(desoBlock, blockHash); err != nil { + return false, true, missingBlockHashes, err + } + return false, true, missingBlockHashes, nil + } + + // 2. Start with all sanity checks of the block. // TODO: Check if err is for view > latest committed block view and <= latest uncommitted block. // If so, we need to perform the rest of the validations and then add to our block index. - if err := bc.validateDeSoBlockPoS(desoBlock); err != nil { + if err = bc.validateDeSoBlockPoS(desoBlock); err != nil { } // TODO: Get validator set for current block height. Alternatively, we could do this in // validateQC, but we may need the validator set elsewhere in this function anyway. var validatorSet []*ValidatorEntry // 1e. Validate QC - if err := bc.validateQC(desoBlock, validatorSet); err != nil { + if err = bc.validateQC(desoBlock, validatorSet); err != nil { return false, false, nil, err } @@ -64,16 +88,6 @@ func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, verifySignatures return false, false, nil, err } - // 3. Determine if we're missing a parent block of this block and any of its parents from the block index. - // If so, add block to block index and return the hash of the missing block. - missingBlockHash, err := bc.validateAncestorsExist(desoBlock) - if err != nil { - return false, false, nil, err - } - if missingBlockHash != nil { - return false, true, []*BlockHash{missingBlockHash}, nil - } - // 4. Handle reorgs if necessary if bc.shouldReorg(desoBlock) { if err = bc.handleReorg(desoBlock); err != nil { @@ -109,7 +123,7 @@ func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, verifySignatures // the Blockchain struct. func (bc *Blockchain) validateDeSoBlockPoS(desoBlock *MsgDeSoBlock) error { // Surface Level validation of the block - if err := bc.validateBlockGeneral(desoBlock); err != nil { + if err := bc.validateBlockIntegrity(desoBlock); err != nil { return err } // Validate Block Height @@ -130,12 +144,92 @@ func (bc *Blockchain) validateDeSoBlockPoS(desoBlock *MsgDeSoBlock) error { return nil } -// validateBlockGeneral validates the block at a surface level. It checks +// validateBlockIntegrity validates the block at a surface level. It checks // that the timestamp is valid, that the version of the header is valid, // and other general integrity checks (such as not malformed). -func (bc *Blockchain) validateBlockGeneral(desoBlock *MsgDeSoBlock) error { - // TODO: Implement me - return errors.New("IMPLEMENT ME") +func (bc *Blockchain) validateBlockIntegrity(desoBlock *MsgDeSoBlock) error { + // First make sure we have a non-nil header + if desoBlock.Header == nil { + return RuleErrorNilBlockHeader + } + + // Make sure we have a prevBlockHash + if desoBlock.Header.PrevBlockHash == nil { + return RuleErrorNilPrevBlockHash + } + + // Timestamp validation + + // Validate that the timestamp is not less than its parent. + parentBlock, exists := bc.blockIndex[*desoBlock.Header.PrevBlockHash] + if !exists { + // Note: this should never happen as we only call this function after + // we've validated that all ancestors exist in the block index. + return RuleErrorMissingParentBlock + } + if desoBlock.Header.TstampNanoSecs < parentBlock.Header.TstampNanoSecs { + return RuleErrorPoSBlockTstampNanoSecsTooOld + } + // TODO: Add support for putting the drift into global params. + if desoBlock.Header.TstampNanoSecs > uint64(time.Now().UnixNano())+bc.params.DefaultBlockTimestampDriftNanoSecs { + return RuleErrorPoSBlockTstampNanoSecsInFuture + } + + // Header validation + if desoBlock.Header.Version != HeaderVersion2 { + return RuleErrorInvalidPoSBlockHeaderVersion + } + + // Malformed block checks + // Require header to have either vote or timeout QC + isTimeoutQCEmpty := desoBlock.Header.ValidatorsTimeoutAggregateQC.isEmpty() + isVoteQCEmpty := desoBlock.Header.ValidatorsVoteQC.isEmpty() + if isTimeoutQCEmpty && isVoteQCEmpty { + return RuleErrorNoTimeoutOrVoteQC + } + + if !isTimeoutQCEmpty && !isVoteQCEmpty { + return RuleErrorBothTimeoutAndVoteQC + } + + if !isTimeoutQCEmpty && len(desoBlock.Txns) != 0 { + return RuleErrorTimeoutQCWithTransactions + } + + if desoBlock.Header.ProposerVotingPublicKey.IsEmpty() { + return RuleErrorInvalidProposerVotingPublicKey + } + + if desoBlock.Header.ProposerPublicKey == nil || desoBlock.Header.ProposerPublicKey.IsZeroPublicKey() { + return RuleErrorInvalidProposerPublicKey + } + + if desoBlock.Header.ProposerRandomSeedHash.isEmpty() { + return RuleErrorInvalidRandomSeedHash + } + + merkleRoot := desoBlock.Header.TransactionMerkleRoot + + // We only want to check the merkle root if we have more than 0 transactions. + if len(desoBlock.Txns) > 0 { + if merkleRoot == nil { + return RuleErrorNilMerkleRoot + } + computedMerkleRoot, _, err := ComputeMerkleRoot(desoBlock.Txns) + if err != nil { + return errors.Wrapf(err, "validateBlockIntegrity: Problem computing merkle root") + } + if !merkleRoot.IsEqual(computedMerkleRoot) { + return RuleErrorInvalidMerkleRoot + } + } else { + if merkleRoot != nil { + return RuleErrorNoTxnsWithMerkleRoot + } + } + + // TODO: What other checks do we need to do here? + return nil } // validateBlockHeight validates the block height for a given block. It checks @@ -248,3 +342,25 @@ func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks panic("GetUncommittedTipView: Not implemented yet") } + +func (bc *Blockchain) GetBestChainTip() *BlockNode { + return bc.bestChain[len(bc.bestChain)-1] +} + +const ( + RuleErrorNilBlockHeader RuleError = "RuleErrorNilBlockHeader" + RuleErrorNilPrevBlockHash RuleError = "RuleErrorNilPrevBlockHash" + RuleErrorPoSBlockTstampNanoSecsTooOld RuleError = "RuleErrorPoSBlockTstampNanoSecsTooOld" + RuleErrorPoSBlockTstampNanoSecsInFuture RuleError = "RuleErrorPoSBlockTstampNanoSecsInFuture" + RuleErrorInvalidPoSBlockHeaderVersion RuleError = "RuleErrorInvalidPoSBlockHeaderVersion" + RuleErrorNoTimeoutOrVoteQC RuleError = "RuleErrorNoTimeoutOrVoteQC" + RuleErrorBothTimeoutAndVoteQC RuleError = "RuleErrorBothTimeoutAndVoteQC" + RuleErrorTimeoutQCWithTransactions RuleError = "RuleErrorTimeoutQCWithTransactions" + RuleErrorMissingParentBlock RuleError = "RuleErrorMissingParentBlock" + RuleErrorNilMerkleRoot RuleError = "RuleErrorNilMerkleRoot" + RuleErrorInvalidMerkleRoot RuleError = "RuleErrorInvalidMerkleRoot" + RuleErrorNoTxnsWithMerkleRoot RuleError = "RuleErrorNoTxnsWithMerkleRoot" + RuleErrorInvalidProposerVotingPublicKey RuleError = "RuleErrorInvalidProposerVotingPublicKey" + RuleErrorInvalidProposerPublicKey RuleError = "RuleErrorInvalidProposerPublicKey" + RuleErrorInvalidRandomSeedHash RuleError = "RuleErrorInvalidRandomSeedHash" +) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go new file mode 100644 index 000000000..89ef98d23 --- /dev/null +++ b/lib/pos_blockchain_test.go @@ -0,0 +1,221 @@ +//go:build relic + +package lib + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" + "github.com/stretchr/testify/require" + "testing" + "time" +) + +func TestValidateBlockIntegrity(t *testing.T) { + bc, params, _ := NewTestBlockchain(t) + // TODO: update for PoS + mempool, miner := NewTestMiner(t, bc, params, true) + + // Mine a few blocks to give the senderPkString some money. + var err error + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + // Create a block with a valid header. + randomPayload := RandomBytes(256) + randomSeedHashBytes := RandomBytes(32) + randomSeedHash := &RandomSeedHash{} + _, err = randomSeedHash.FromBytes(randomSeedHashBytes) + require.NoError(t, err) + randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) + signature, err := randomBLSPrivateKey.Sign(randomPayload) + require.NoError(t, err) + block := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + Version: 2, + TstampNanoSecs: bc.GetBestChainTip().Header.TstampNanoSecs + 10, + Height: 2, + ProposedInView: 2, + PrevBlockHash: bc.GetBestChainTip().Hash, + ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ + TimedOutView: 2, + ValidatorsHighQC: &QuorumCertificate{ + BlockHash: bc.GetBestChainTip().Hash, + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + ValidatorsTimeoutHighQCViews: []uint64{28934}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + ProposerRandomSeedHash: randomSeedHash, + ProposerPublicKey: NewPublicKey(RandomBytes(33)), + ProposerVotingPublicKey: randomBLSPrivateKey.PublicKey(), + }, + Txns: nil, + } + + // Validate the block with a valid timeout QC and header. + err = bc.validateBlockIntegrity(block) + // There should be no error. + require.Nil(t, err) + + // Timeout QC shouldn't have any transactions + block.Txns = []*MsgDeSoTxn{ + { // The validation just checks the length of transactions. + // Connecting the block elsewhere will ensure that the transactions themselves are valid. + TxInputs: nil, + }, + } + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorTimeoutQCWithTransactions) + + // Timeout QC shouldn't have a merkle root + block.Txns = nil + block.Header.TransactionMerkleRoot = &ZeroBlockHash + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorNoTxnsWithMerkleRoot) + + // Make sure block can't have both timeout and vote QC. + validatorVoteQC := &QuorumCertificate{ + BlockHash: bc.GetBestChainTip().Hash, + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + } + block.Header.ValidatorsVoteQC = validatorVoteQC + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorBothTimeoutAndVoteQC) + + // Make sure block has either timeout or vote QC. + block.Header.ValidatorsTimeoutAggregateQC = nil + block.Header.ValidatorsVoteQC = nil + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorNoTimeoutOrVoteQC) + + // Reset validator vote QC. + block.Header.ValidatorsVoteQC = validatorVoteQC + + // Validate the block with a valid vote QC and header. Vote QCs must have at least 1 transaction. + txn := _assembleBasicTransferTxnFullySigned(t, bc, 100, 1000, + senderPkString, recipientPkString, senderPrivString, nil) + block.Txns = []*MsgDeSoTxn{ + // The validation just checks the length of transactions. + // Connecting the block elsewhere will ensure that the transactions themselves are valid. + txn, + } + merkleRoot, _, err := ComputeMerkleRoot(block.Txns) + require.NoError(t, err) + block.Header.TransactionMerkleRoot = merkleRoot + // There should be no error. + err = bc.validateBlockIntegrity(block) + require.Nil(t, err) + + // Block must have non-nil Merkle root iff we have non-zero transactions + block.Header.TransactionMerkleRoot = nil + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorNilMerkleRoot) + + // Block must have a matching merkle root + block.Header.TransactionMerkleRoot = &ZeroBlockHash + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorInvalidMerkleRoot) + + // Vote QC with no transactions and no merkle root is valid + block.Header.TransactionMerkleRoot = nil + block.Txns = nil + err = bc.validateBlockIntegrity(block) + require.Nil(t, err) + + // Vote QC with no transactions but includes a merkle is invalid + block.Header.TransactionMerkleRoot = merkleRoot + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorNoTxnsWithMerkleRoot) + + // Reset transactions + block.Txns = []*MsgDeSoTxn{txn} + + // Block must have valid proposer voting public key + block.Header.ProposerVotingPublicKey = nil + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorInvalidProposerVotingPublicKey) + + block.Header.ProposerVotingPublicKey = &bls.PublicKey{} + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorInvalidProposerVotingPublicKey) + + // Reset proposer voting public key + block.Header.ProposerVotingPublicKey = randomBLSPrivateKey.PublicKey() + + // Block must have valid proposer public key + block.Header.ProposerPublicKey = nil + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorInvalidProposerPublicKey) + + block.Header.ProposerPublicKey = &ZeroPublicKey + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorInvalidProposerPublicKey) + + block.Header.ProposerPublicKey = NewPublicKey(RandomBytes(33)) + + // Block must have valid proposer random seed hash + block.Header.ProposerRandomSeedHash = nil + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorInvalidRandomSeedHash) + + block.Header.ProposerRandomSeedHash = &RandomSeedHash{} + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorInvalidRandomSeedHash) + + block.Header.ProposerRandomSeedHash = randomSeedHash + + // Timestamp validations + // Block timestamp must be greater than the previous block timestamp + block.Header.TstampNanoSecs = bc.GetBestChainTip().Header.GetTstampSecs() - 1 + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsTooOld) + + // Block timestamps can't be in the future. + block.Header.TstampNanoSecs = uint64(time.Now().UnixNano() + (11 * time.Minute).Nanoseconds()) + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsInFuture) + + // Revert the Header's timestamp + block.Header.TstampNanoSecs = bc.GetBestChainTip().Header.TstampNanoSecs + 10 + + // Block Header version must be 2 + block.Header.Version = 1 + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorInvalidPoSBlockHeaderVersion) + + // Revert block header version + block.Header.Version = 2 + + // Nil prev block hash not allowed + block.Header.PrevBlockHash = nil + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorNilPrevBlockHash) + + // Parent must exist in the block index. + block.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorMissingParentBlock) + + // Nil block header not allowed + block.Header = nil + err = bc.validateBlockIntegrity(block) + require.Equal(t, err, RuleErrorNilBlockHeader) +} + +func _generateRandomBLSPrivateKey(t *testing.T) *bls.PrivateKey { + privateKey, err := bls.NewPrivateKey() + require.NoError(t, err) + return privateKey +} diff --git a/lib/pos_network.go b/lib/pos_network.go index 7a01d4463..a3e4ed688 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -328,6 +328,14 @@ func (qc *QuorumCertificate) Eq(other *QuorumCertificate) bool { return bytes.Equal(qcEncodedBytes, otherEncodedBytes) } +func (qc *QuorumCertificate) isEmpty() bool { + return qc == nil || + qc.BlockHash == nil || + qc.ProposedInView == 0 || + qc.ValidatorsVoteAggregatedSignature == nil || + qc.ValidatorsVoteAggregatedSignature.Signature == nil +} + func (qc *QuorumCertificate) ToBytes() ([]byte, error) { retBytes := []byte{} @@ -620,6 +628,8 @@ func (aggQC *TimeoutAggregateQuorumCertificate) FromBytes(rr io.Reader) error { func (aggQC *TimeoutAggregateQuorumCertificate) isEmpty() bool { return aggQC == nil || aggQC.TimedOutView == 0 || + aggQC.ValidatorsHighQC.isEmpty() || + len(aggQC.ValidatorsTimeoutHighQCViews) == 0 || aggQC.ValidatorsTimeoutAggregatedSignature == nil || aggQC.ValidatorsTimeoutAggregatedSignature.Signature == nil } diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index 567025199..1b8eb03fc 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -55,6 +55,10 @@ func DecodeRandomSeedHash(rr io.Reader) (*RandomSeedHash, error) { return (&RandomSeedHash{}).FromBytes(randomSeedHashBytes) } +func (randomSeedHash *RandomSeedHash) isEmpty() bool { + return randomSeedHash == nil || randomSeedHash.Eq(&RandomSeedHash{}) +} + // // UTXO VIEW UTILS // From 86e982c4b67e9a1e059a1512f72f85ea58d2369e Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:13:57 -0400 Subject: [PATCH 248/762] add validateBlockHeight function and tests (#706) * add validateBlockHeight function and tests * address SA feedback --------- Co-authored-by: Lazy Nina <> --- lib/pos_blockchain.go | 26 ++++++++++++--- lib/pos_blockchain_test.go | 67 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 5 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 5bdb19ab2..c186935f7 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -232,12 +232,25 @@ func (bc *Blockchain) validateBlockIntegrity(desoBlock *MsgDeSoBlock) error { return nil } -// validateBlockHeight validates the block height for a given block. It checks -// that this block height is exactly one greater than the current block height. -// TODO: Are we sure that's the correct validation here? +// validateBlockHeight validates the block height for a given block. First, +// it checks that we've passed the PoS cutover fork height. Then it checks +// that this block height is exactly one greater than its parent's block height. func (bc *Blockchain) validateBlockHeight(desoBlock *MsgDeSoBlock) error { - // TODO: Implement me - return errors.New("IMPLEMENT ME") + blockHeight := desoBlock.Header.Height + if blockHeight < uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + return RuleErrorPoSBlockBeforeCutoverHeight + } + // Validate that the block height is exactly one greater than its parent. + parentBlock, exists := bc.blockIndex[*desoBlock.Header.PrevBlockHash] + if !exists { + // Note: this should never happen as we only call this function after + // we've validated that all ancestors exist in the block index. + return RuleErrorMissingParentBlock + } + if desoBlock.Header.Height != parentBlock.Header.Height+1 { + return RuleErrorInvalidPoSBlockHeight + } + return nil } // validateBlockView validates the view for a given block. First, it checks that @@ -363,4 +376,7 @@ const ( RuleErrorInvalidProposerVotingPublicKey RuleError = "RuleErrorInvalidProposerVotingPublicKey" RuleErrorInvalidProposerPublicKey RuleError = "RuleErrorInvalidProposerPublicKey" RuleErrorInvalidRandomSeedHash RuleError = "RuleErrorInvalidRandomSeedHash" + + RuleErrorInvalidPoSBlockHeight RuleError = "RuleErrorInvalidPoSBlockHeight" + RuleErrorPoSBlockBeforeCutoverHeight RuleError = "RuleErrorPoSBlockBeforeCutoverHeight" ) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 89ef98d23..d7ba6bcc4 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -214,6 +214,73 @@ func TestValidateBlockIntegrity(t *testing.T) { require.Equal(t, err, RuleErrorNilBlockHeader) } +func TestValidateBlockHeight(t *testing.T) { + bc, _, _ := NewTestBlockchain(t) + hash := NewBlockHash(RandomBytes(32)) + nowTimestamp := uint64(time.Now().UnixNano()) + genesisBlock := NewPoSBlockNode(nil, hash, 1, &MsgDeSoHeader{ + Version: 2, + TstampNanoSecs: nowTimestamp - uint64(time.Minute.Nanoseconds()), + Height: 1, + ProposedInView: 1, + ValidatorsVoteQC: nil, + ValidatorsTimeoutAggregateQC: nil, + }, StatusBlockValidated, UNCOMMITTED) + bc.bestChain = []*BlockNode{genesisBlock} + bc.blockIndex[*genesisBlock.Hash] = genesisBlock + // Create a block with a valid header. + randomPayload := RandomBytes(256) + randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) + signature, err := randomBLSPrivateKey.Sign(randomPayload) + require.NoError(t, err) + block := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + PrevBlockHash: genesisBlock.Hash, + Version: 2, + TstampNanoSecs: uint64(time.Now().UnixNano()) - 10, + Height: 2, + ProposedInView: 1, + ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ + TimedOutView: 2, + ValidatorsHighQC: &QuorumCertificate{ + BlockHash: bc.GetBestChainTip().Hash, + ProposedInView: bc.GetBestChainTip().Header.ProposedInView, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + ValidatorsTimeoutHighQCViews: []uint64{28934}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + }, + Txns: nil, + } + + // validate that we've cutover to PoS + bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 3 + err = bc.validateBlockHeight(block) + require.Equal(t, err, RuleErrorPoSBlockBeforeCutoverHeight) + + // Update the fork height + bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 0 + + err = bc.validateBlockHeight(block) + require.Nil(t, err) + + block.Header.Height = 1 + err = bc.validateBlockHeight(block) + require.Equal(t, err, RuleErrorInvalidPoSBlockHeight) + + block.Header.Height = 2 + bc.blockIndex = map[BlockHash]*BlockNode{} + err = bc.validateBlockHeight(block) + require.Equal(t, err, RuleErrorMissingParentBlock) +} + func _generateRandomBLSPrivateKey(t *testing.T) *bls.PrivateKey { privateKey, err := bls.NewPrivateKey() require.NoError(t, err) From 91dc8dcc4ec7210f2e3b63d4f607d3b3820d6907 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:18:16 -0400 Subject: [PATCH 249/762] implement add block to block index and get uncommitted tip view (#707) * implement add block to block index and get uncommitted tip view * fix error string * add test * address SA feedback * fix test * address SA feedback --------- Co-authored-by: Lazy Nina <> --- lib/blockchain.go | 5 +-- lib/pos_blockchain.go | 55 +++++++++++++++++++++++++++--- lib/pos_blockchain_test.go | 68 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+), 6 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index d3b6243b2..fe1cc5de7 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -696,8 +696,9 @@ func NewBlockchain( eventManager: eventManager, archivalMode: archivalMode, - blockIndex: make(map[BlockHash]*BlockNode), - bestChainMap: make(map[BlockHash]*BlockNode), + blockIndex: make(map[BlockHash]*BlockNode), + uncommittedBlocksMap: make(map[BlockHash]*MsgDeSoBlock), + bestChainMap: make(map[BlockHash]*BlockNode), bestHeaderChainMap: make(map[BlockHash]*BlockNode), diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index c186935f7..b7bf88b8f 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1,6 +1,7 @@ package lib import ( + "github.com/deso-protocol/core/collections" "github.com/golang/glog" "github.com/pkg/errors" "time" @@ -302,10 +303,19 @@ func (bc *Blockchain) validateAncestorsExist(desoBlock *MsgDeSoBlock) (_missingB return nil, errors.New("IMPLEMENT ME") } -// addBlockToBlockIndex adds the block to the block index. +// addBlockToBlockIndex adds the block to the block index and uncommitted blocks map. func (bc *Blockchain) addBlockToBlockIndex(desoBlock *MsgDeSoBlock) error { - // TODO: Implement me. - return errors.New("IMPLEMENT ME") + hash, err := desoBlock.Hash() + if err != nil { + return errors.Wrapf(err, "addBlockToBlockIndex: Problem hashing block %v", desoBlock) + } + // Need to get parent block node from block index + prevBlock := bc.blockIndex[*desoBlock.Header.PrevBlockHash] + // TODO: What should the block status be here? Validated? What combo is correct? Need to check in with Diamondhands. + bc.blockIndex[*hash] = NewPoSBlockNode(prevBlock, hash, uint32(desoBlock.Header.Height), desoBlock.Header, StatusHeaderValidated|StatusBlockValidated, UNCOMMITTED) + + bc.uncommittedBlocksMap[*hash] = desoBlock + return nil } // shouldReorg determines if we should reorg to the block provided. We should reorg if @@ -353,13 +363,50 @@ func (bc *Blockchain) updateCurrentView(desoBlock *MsgDeSoBlock) { func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks - panic("GetUncommittedTipView: Not implemented yet") + highestCommittedBlock, committedBlockIndex := bc.getHighestCommittedBlock() + if highestCommittedBlock == nil { + // This is an edge case we'll never hit in practice since all the PoW blocks + // are committed. + return nil, errors.New("GetUncommittedTipView: No committed blocks found") + } + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + if err != nil { + return nil, errors.Wrapf(err, "GetUncommittedTipView: Problem initializing UtxoView") + } + if committedBlockIndex == len(bc.bestChain)-1 { + return utxoView, nil + } + for ii := committedBlockIndex + 1; ii < len(bc.bestChain); ii++ { + // We need to get these blocks from the uncommitted blocks map + fullBlock, exists := bc.uncommittedBlocksMap[*bc.bestChain[ii].Hash] + if !exists { + return nil, errors.Errorf("GetUncommittedTipView: Block %v not found in block index", bc.bestChain[ii].Hash) + } + txnHashes := collections.Transform(fullBlock.Txns, func(txn *MsgDeSoTxn) *BlockHash { + return txn.Hash() + }) + _, err = utxoView.ConnectBlock(fullBlock, txnHashes, false, nil, fullBlock.Header.Height) + if err != nil { + hash, _ := fullBlock.Hash() + return nil, errors.Wrapf(err, "GetUncommittedTipView: Problem connecting block hash %v", hash.String()) + } + } + return utxoView, nil } func (bc *Blockchain) GetBestChainTip() *BlockNode { return bc.bestChain[len(bc.bestChain)-1] } +func (bc *Blockchain) getHighestCommittedBlock() (*BlockNode, int) { + for ii := len(bc.bestChain) - 1; ii >= 0; ii-- { + if bc.bestChain[ii].CommittedStatus == COMMITTED { + return bc.bestChain[ii], ii + } + } + return nil, 0 +} + const ( RuleErrorNilBlockHeader RuleError = "RuleErrorNilBlockHeader" RuleErrorNilPrevBlockHash RuleError = "RuleErrorNilPrevBlockHash" diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index d7ba6bcc4..89e2ef6b8 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -3,6 +3,7 @@ package lib import ( + "bytes" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" "github.com/stretchr/testify/require" @@ -281,6 +282,73 @@ func TestValidateBlockHeight(t *testing.T) { require.Equal(t, err, RuleErrorMissingParentBlock) } +func TestAddBlockToBlockIndex(t *testing.T) { + bc, _, _ := NewTestBlockchain(t) + hash := NewBlockHash(RandomBytes(32)) + genesisBlockNode := NewPoSBlockNode(nil, hash, 1, &MsgDeSoHeader{ + Version: 2, + Height: 1, + ProposedInView: 1, + ValidatorsVoteQC: nil, + ValidatorsTimeoutAggregateQC: nil, + }, StatusBlockValidated, COMMITTED) + _ = genesisBlockNode + derefedHash := *hash + bc.blockIndex = map[BlockHash]*BlockNode{ + derefedHash: genesisBlockNode, + } + proposerVotingPublicKey := _generateRandomBLSPrivateKey(t) + dummySig, err := proposerVotingPublicKey.Sign(RandomBytes(32)) + require.NoError(t, err) + block := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + Version: 2, + PrevBlockHash: hash, + TstampNanoSecs: uint64(time.Now().UnixNano()), + Height: 2, + ProposerPublicKey: NewPublicKey(RandomBytes(33)), + ProposerVotingPublicKey: proposerVotingPublicKey.PublicKey(), + ProposerRandomSeedHash: &RandomSeedHash{}, + ProposedInView: 1, + ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ + TimedOutView: 2, + ValidatorsHighQC: &QuorumCertificate{ + BlockHash: NewBlockHash(RandomBytes(32)), + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + SignersList: bitset.NewBitset(), + Signature: dummySig, + }, + }, + ValidatorsTimeoutHighQCViews: []uint64{28934}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + SignersList: bitset.NewBitset(), + Signature: dummySig, + }, + }, + ProposerVotePartialSignature: dummySig, + }, + Txns: nil, + } + err = bc.addBlockToBlockIndex(block) + require.Nil(t, err) + newHash, err := block.Hash() + require.NoError(t, err) + // Check the block index + blockNode, exists := bc.blockIndex[*newHash] + require.True(t, exists) + require.True(t, bytes.Equal(blockNode.Hash[:], newHash[:])) + + // Check the uncommitted blocks map + uncommittedBlock, uncommittedExists := bc.uncommittedBlocksMap[*newHash] + require.True(t, uncommittedExists) + uncommittedBytes, err := uncommittedBlock.ToBytes(false) + require.NoError(t, err) + origBlockBytes, err := block.ToBytes(false) + require.NoError(t, err) + require.True(t, bytes.Equal(uncommittedBytes, origBlockBytes)) +} + func _generateRandomBLSPrivateKey(t *testing.T) *bls.PrivateKey { privateKey, err := bls.NewPrivateKey() require.NoError(t, err) From 3a151d5750ca704df56b3558461efd3eccb60421 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:23:47 -0400 Subject: [PATCH 250/762] ln/validate-block-view-pos (#713) * ln/validate-block-view-pos * add test for validateBlockView * address SA feedback * address SA feedback part 1 --------- Co-authored-by: Lazy Nina <> --- lib/pos_blockchain.go | 28 +++++++++++-- lib/pos_blockchain_test.go | 85 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+), 3 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index b7bf88b8f..36eb7139e 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -257,15 +257,34 @@ func (bc *Blockchain) validateBlockHeight(desoBlock *MsgDeSoBlock) error { // validateBlockView validates the view for a given block. First, it checks that // the view is greater than the latest committed block view. If not, // we return an error indicating that we'll never accept this block. Next, -// it checks that the view is less than or equal to the latest uncommitted block. +// it checks that the view is less than or equal to its parent. // If not, we return an error indicating that we'll want to add this block as an // orphan. Then it will check if that the view is exactly one greater than the // latest uncommitted block if we have an regular vote QC. If this block has a // timeout QC, it will check that the view is at least greater than the latest // uncommitted block's view + 1. func (bc *Blockchain) validateBlockView(desoBlock *MsgDeSoBlock) error { - // TODO: Implement me - return errors.New("IMPLEMENT ME") + // Validate that the view is greater than the latest uncommitted block. + parentBlock, exists := bc.blockIndex[*desoBlock.Header.PrevBlockHash] + if !exists { + // Note: this should never happen as we only call this function after + // we've validated that all ancestors exist in the block index. + return RuleErrorMissingParentBlock + } + // If our current block has a vote QC, then we need to validate that the + // view is exactly one greater than the latest uncommitted block. + if desoBlock.Header.ValidatorsTimeoutAggregateQC.isEmpty() { + if desoBlock.Header.ProposedInView != parentBlock.Header.ProposedInView+1 { + return RuleErrorPoSVoteBlockViewNotOneGreaterThanParent + } + } else { + // If our current block has a timeout QC, then we need to validate that the + // view is strictly greater than the latest uncommitted block's view. + if desoBlock.Header.ProposedInView <= parentBlock.Header.ProposedInView { + return RuleErrorPoSTimeoutBlockViewNotGreaterThanParent + } + } + return nil } // validateBlockLeader validates that the proposer is the expected proposer for the @@ -426,4 +445,7 @@ const ( RuleErrorInvalidPoSBlockHeight RuleError = "RuleErrorInvalidPoSBlockHeight" RuleErrorPoSBlockBeforeCutoverHeight RuleError = "RuleErrorPoSBlockBeforeCutoverHeight" + + RuleErrorPoSVoteBlockViewNotOneGreaterThanParent RuleError = "RuleErrorPoSVoteBlockViewNotOneGreaterThanParent" + RuleErrorPoSTimeoutBlockViewNotGreaterThanParent RuleError = "RuleErrorPoSTimeoutBlockViewNotGreaterThanParent" ) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 89e2ef6b8..0d9d8ee14 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -349,6 +349,91 @@ func TestAddBlockToBlockIndex(t *testing.T) { require.True(t, bytes.Equal(uncommittedBytes, origBlockBytes)) } +func TestValidateBlockView(t *testing.T) { + bc, _, _ := NewTestBlockchain(t) + hash1 := NewBlockHash(RandomBytes(32)) + hash2 := NewBlockHash(RandomBytes(32)) + genesisNode := NewPoSBlockNode(nil, hash1, 1, &MsgDeSoHeader{ + Version: 2, + Height: 1, + ProposedInView: 1, + }, StatusBlockValidated, COMMITTED) + block2 := NewPoSBlockNode(genesisNode, hash2, 2, &MsgDeSoHeader{ + Version: 2, + Height: 2, + ProposedInView: 2, + ValidatorsVoteQC: nil, + ValidatorsTimeoutAggregateQC: nil, + }, StatusBlockValidated, UNCOMMITTED) + bc.bestChain = []*BlockNode{ + genesisNode, + block2, + } + bc.blockIndex = map[BlockHash]*BlockNode{ + *hash1: genesisNode, + *hash2: block2, + } + randomPayload := RandomBytes(256) + randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) + signature, err := randomBLSPrivateKey.Sign(randomPayload) + voteQC := &QuorumCertificate{ + BlockHash: bc.GetBestChainTip().Hash, + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + } + require.NoError(t, err) + block := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + PrevBlockHash: hash2, + Version: 2, + TstampNanoSecs: uint64(time.Now().UnixNano()) - 10, + Height: 2, + ProposedInView: 1, + ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ + TimedOutView: 2, + ValidatorsHighQC: voteQC, + ValidatorsTimeoutHighQCViews: []uint64{28934}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + }, + Txns: nil, + } + + block.Header.ProposedInView = 2 + + // Blocks with timeout QCs must have a view strictly greater than the parent. + err = bc.validateBlockView(block) + require.Equal(t, err, RuleErrorPoSTimeoutBlockViewNotGreaterThanParent) + + // Any arbitrary number GREATER than the parent's view is valid. + block.Header.ProposedInView = 10 + err = bc.validateBlockView(block) + require.Nil(t, err) + + // Now we set the timeout QC to nil and provide a vote QC, with height = 2 + block.Header.ValidatorsTimeoutAggregateQC = nil + block.Header.ValidatorsVoteQC = voteQC + block.Header.ProposedInView = 2 + err = bc.validateBlockView(block) + require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanParent) + + // An arbitrary number greater than its parents should fail. + block.Header.ProposedInView = 10 + err = bc.validateBlockView(block) + require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanParent) + + // Exactly one great w/ vote QC should pass. + block.Header.ProposedInView = 3 + err = bc.validateBlockView(block) + require.Nil(t, err) +} + func _generateRandomBLSPrivateKey(t *testing.T) *bls.PrivateKey { privateKey, err := bls.NewPrivateKey() require.NoError(t, err) From da86ddb60e910668cb2b17b0071c3c8ffafedfa3 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:25:38 -0400 Subject: [PATCH 251/762] add currentView in function sig (#726) Co-authored-by: Lazy Nina <> --- lib/pos_blockchain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 36eb7139e..5c96e2967 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -17,7 +17,7 @@ import ( // 6. Run the commit rule - If applicable, flushes the incoming block's grandparent to the DB // 7. Prune in-memory struct holding uncommitted block. // 8. Update the currentView to this new block's view + 1 -func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, verifySignatures bool) (_success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { +func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, currentView uint64, verifySignatures bool) (_success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { // TODO: Implement me // 1. Determine if we're missing a parent block of this block and any of its parents from the block index. // If so, process the orphan, but don't add to the block index or uncommitted block map. From e557d12015714a5edfb48fde681aeedbbe869edf Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 11 Oct 2023 21:27:11 -0400 Subject: [PATCH 252/762] Use Explicit Flag To Track When Crank Timer Has Elapsed (#724) * Use Explicit Flag To Track When Crank Timer Has Elapsed * Address Nina's review --- consensus/event_loop.go | 12 +++++++----- consensus/types.go | 9 +++++---- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index d50a7f482..ec3a4ee8f 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -65,6 +65,7 @@ func (fc *FastHotStuffEventLoop) Init( fc.currentView = tip.Block.GetView() + 1 // Reset QC construction status for the current view + fc.hasCrankTimerRunForCurrentView = false fc.hasConstructedQCInCurrentView = false // Reset all internal data structures for votes and timeouts @@ -101,6 +102,7 @@ func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { fc.currentView++ // Reset QC construction status for the current view + fc.hasCrankTimerRunForCurrentView = false fc.hasConstructedQCInCurrentView = false // Evict all stale votes and timeouts @@ -142,6 +144,7 @@ func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBl fc.currentView = fc.tip.block.GetView() + 1 // Reset QC construction status for the current view + fc.hasCrankTimerRunForCurrentView = false fc.hasConstructedQCInCurrentView = false // Evict all stale votes and timeouts @@ -268,7 +271,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { // Check if the crank timer has elapsed or the event loop has constructed a QC in the current view. // If so, then there's nothing more to do. - if fc.crankTimerTask.IsScheduled() || fc.hasConstructedQCInCurrentView { + if !fc.hasCrankTimerRunForCurrentView || fc.hasConstructedQCInCurrentView { return nil } @@ -353,7 +356,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) // Check if the crank timer has elapsed or the event loop has constructed a QC in the current view. // If so, then there's nothing more to do. - if fc.crankTimerTask.IsScheduled() || fc.hasConstructedQCInCurrentView { + if !fc.hasCrankTimerRunForCurrentView || fc.hasConstructedQCInCurrentView { return nil } @@ -472,9 +475,8 @@ func (fc *FastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView return } - // Mark the crank timer task as not scheduled, so we can verify from the rest of the - // event loop when the timer has elapsed. - defer fc.crankTimerTask.Cancel() + // Mark that the crank timer has elapsed + fc.hasCrankTimerRunForCurrentView = true // Check if the conditions are met to construct a QC from votes for the chain tip. If so, // we send a signal to the server and cancel the crank timer task. The server will diff --git a/consensus/types.go b/consensus/types.go index a6a885619..c0c841899 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -155,10 +155,11 @@ type FastHotStuffEventLoop struct { // the timeout signal will be triggered for this view. currentView uint64 - // Signifies whether or not the event loop has constructed a QC or timeout QC for the current view. - // This is an optimization that is useful to prevent the event loop from signaling the server multiple - // times for the same view. - hasConstructedQCInCurrentView bool + // These track whether the event loop has already run the crank timer for the current view and + // constructed a QC for the current view. They ensure that we only attempt to construct a QC once + // the crank timer has elapsed, and only signal for QC construction once per view. + hasCrankTimerRunForCurrentView bool + hasConstructedQCInCurrentView bool // Block hash of the current tip of the block-chain. tip blockWithValidatorLookup From 83078223c8f2bd32fae83c9e974d7184df69a30a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 11 Oct 2023 21:49:45 -0400 Subject: [PATCH 253/762] Add Byte Comparison Utility for BlockHash type (#725) --- consensus/event_loop.go | 3 +-- consensus/utils.go | 8 ++++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index ec3a4ee8f..3fe7e6504 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -840,9 +840,8 @@ func (fc *FastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( // number of uncommitted blocks in the blockchain. During steady stake, it will have a size of 3 blocks // (one committed, two uncommitted). In the worse case, where the network has an unlucky series of // timeout -> block -> timeout -> block,... it can still be expected to have < 10 blocks. - blockHashValue := blockHash.GetValue() for _, block := range fc.safeBlocks { - if block.block.GetBlockHash().GetValue() == blockHashValue { + if isEqualBlockHashes(block.block.GetBlockHash(), blockHash) { return true, block.block, block.validatorSet, block.validatorLookup } } diff --git a/consensus/utils.go b/consensus/utils.go index 3c69ba801..1c6dbeb5f 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -1,6 +1,7 @@ package consensus import ( + "bytes" "encoding/binary" "reflect" @@ -252,3 +253,10 @@ func isSuperMajorityStake(stake *uint256.Int, totalStake *uint256.Int) bool { // Check if 3Cq - 2N - 1 >= 0 return superMajorityConditionSum.Sign() >= 0 } + +func isEqualBlockHashes(hash1 BlockHash, hash2 BlockHash) bool { + hash1Value := hash1.GetValue() + hash2Value := hash2.GetValue() + + return bytes.Equal(hash1Value[:], hash2Value[:]) +} From 2381c89c3ca7ab0db9b68c3398e30b483b798ac7 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 11 Oct 2023 21:54:09 -0400 Subject: [PATCH 254/762] Shuffle Consensus Event Loop Utils for Integration Testing (#720) * Shuffle Consensus Event Loop Utils for Integraiton Testing Adapt reference implemetation * Fix unit tests --- collections/map.go | 9 +++ consensus/types_internal.go | 12 ++-- consensus/utils.go | 113 ++++++++++++++++++++++++++++++++++++ consensus/utils_test.go | 113 ------------------------------------ 4 files changed, 130 insertions(+), 117 deletions(-) create mode 100644 collections/map.go diff --git a/collections/map.go b/collections/map.go new file mode 100644 index 000000000..bf445215f --- /dev/null +++ b/collections/map.go @@ -0,0 +1,9 @@ +package collections + +func MapValues[TKey comparable, TValue any](m map[TKey]TValue) []TValue { + var result []TValue + for _, val := range m { + result = append(result, val) + } + return result +} diff --git a/consensus/types_internal.go b/consensus/types_internal.go index f73941c96..508c321dc 100644 --- a/consensus/types_internal.go +++ b/consensus/types_internal.go @@ -106,10 +106,11 @@ func (as *aggregatedSignature) GetSignature() *bls.Signature { ////////////////////////////////////////////////////////// type block struct { - blockHash BlockHash - height uint64 - view uint64 - qc QuorumCertificate + blockHash BlockHash + height uint64 + view uint64 + qc QuorumCertificate + aggregateQC AggregateQuorumCertificate } func (b *block) GetBlockHash() BlockHash { @@ -125,6 +126,9 @@ func (b *block) GetView() uint64 { } func (b *block) GetQC() QuorumCertificate { + if !isInterfaceNil(b.aggregateQC) { + return b.aggregateQC.GetHighQC() + } return b.qc } diff --git a/consensus/utils.go b/consensus/utils.go index 1c6dbeb5f..4805a8e10 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -2,11 +2,13 @@ package consensus import ( "bytes" + "crypto/rand" "encoding/binary" "reflect" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/collections/bitset" "github.com/holiman/uint256" "golang.org/x/crypto/sha3" ) @@ -260,3 +262,114 @@ func isEqualBlockHashes(hash1 BlockHash, hash2 BlockHash) bool { return bytes.Equal(hash1Value[:], hash2Value[:]) } + +func createDummyValidatorSet() []Validator { + validators := []*validator{ + { + publicKey: createDummyBLSPublicKey(), + stakeAmount: uint256.NewInt().SetUint64(100), + }, + { + publicKey: createDummyBLSPublicKey(), + stakeAmount: uint256.NewInt().SetUint64(50), + }, + } + // Cast the slice of concrete structs []*validators to a slice of interfaces []Validator + return collections.Transform(validators, func(v *validator) Validator { + return v + }) +} + +func createDummyBlock(view uint64) *block { + return &block{ + blockHash: createDummyBlockHash(), + view: view, + height: 1, + qc: createDummyQC(view-1, createDummyBlockHash()), + } +} + +func createDummyVoteMessage(view uint64) *voteMessage { + blockHash := createDummyBlockHash() + signaturePayload := GetVoteSignaturePayload(view, blockHash) + + blsPrivateKey, _ := bls.NewPrivateKey() + blsSignature, _ := blsPrivateKey.Sign(signaturePayload[:]) + + return &voteMessage{ + blockHash: blockHash, + view: view, + publicKey: blsPrivateKey.PublicKey(), + signature: blsSignature, + } +} + +func createDummyTimeoutMessage(view uint64) *timeoutMessage { + highQC := createDummyQC(view-1, createDummyBlockHash()) + + signaturePayload := GetTimeoutSignaturePayload(view, highQC.view) + + blsPrivateKey, _ := bls.NewPrivateKey() + blsSignature, _ := blsPrivateKey.Sign(signaturePayload[:]) + + return &timeoutMessage{ + highQC: highQC, + view: view, + publicKey: blsPrivateKey.PublicKey(), + signature: blsSignature, + } +} + +func createDummyQC(view uint64, blockHash BlockHash) *quorumCertificate { + signaturePayload := GetVoteSignaturePayload(view, blockHash) + + blsPrivateKey1, _ := bls.NewPrivateKey() + blsSignature1, _ := blsPrivateKey1.Sign(signaturePayload[:]) + + blsPrivateKey2, _ := bls.NewPrivateKey() + blsSignature2, _ := blsPrivateKey2.Sign(signaturePayload[:]) + + signersList := bitset.NewBitset().Set(0, true).Set(1, true) + aggregateSignature, _ := bls.AggregateSignatures([]*bls.Signature{blsSignature1, blsSignature2}) + + return &quorumCertificate{ + blockHash: blockHash, + view: view, + aggregatedSignature: &aggregatedSignature{ + signersList: signersList, + signature: aggregateSignature, + }, + } +} + +func createDummyBLSSignature() *bls.Signature { + blsPrivateKey, _ := bls.NewPrivateKey() + blockHashValue := createDummyBlockHash().GetValue() + blsSignature, _ := blsPrivateKey.Sign(blockHashValue[:]) + return blsSignature +} + +func createDummyBLSPublicKey() *bls.PublicKey { + blsPrivateKey, _ := bls.NewPrivateKey() + return blsPrivateKey.PublicKey() +} + +func createDummyBLSPrivateKey() *bls.PrivateKey { + blsPrivateKey, _ := bls.NewPrivateKey() + return blsPrivateKey +} + +func createDummyBlockHash() *blockHash { + byteArray := [32]byte{} + copy(byteArray[:], generateRandomBytes(32)) + + return &blockHash{ + value: byteArray, + } +} + +func generateRandomBytes(numBytes int) []byte { + randomBytes := make([]byte, numBytes) + rand.Read(randomBytes) + return randomBytes +} diff --git a/consensus/utils_test.go b/consensus/utils_test.go index e4522681e..e896c9610 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -3,11 +3,9 @@ package consensus import ( - "crypto/rand" "testing" "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/collections/bitset" "github.com/holiman/uint256" "github.com/stretchr/testify/require" @@ -304,114 +302,3 @@ func TestIsSuperMajorityStake(t *testing.T) { require.True(t, isSuperMajorityStake(totalStake, totalStake)) } } - -func createDummyValidatorSet() []Validator { - validators := []*validator{ - { - publicKey: createDummyBLSPublicKey(), - stakeAmount: uint256.NewInt().SetUint64(100), - }, - { - publicKey: createDummyBLSPublicKey(), - stakeAmount: uint256.NewInt().SetUint64(50), - }, - } - // Cast the slice of concrete structs []*validators to a slice of interfaces []Validator - return collections.Transform(validators, func(v *validator) Validator { - return v - }) -} - -func createDummyBlock(view uint64) *block { - return &block{ - blockHash: createDummyBlockHash(), - view: view, - height: 1, - qc: createDummyQC(view-1, createDummyBlockHash()), - } -} - -func createDummyVoteMessage(view uint64) *voteMessage { - blockHash := createDummyBlockHash() - signaturePayload := GetVoteSignaturePayload(view, blockHash) - - blsPrivateKey, _ := bls.NewPrivateKey() - blsSignature, _ := blsPrivateKey.Sign(signaturePayload[:]) - - return &voteMessage{ - blockHash: blockHash, - view: view, - publicKey: blsPrivateKey.PublicKey(), - signature: blsSignature, - } -} - -func createDummyTimeoutMessage(view uint64) *timeoutMessage { - highQC := createDummyQC(view-1, createDummyBlockHash()) - - signaturePayload := GetTimeoutSignaturePayload(view, highQC.view) - - blsPrivateKey, _ := bls.NewPrivateKey() - blsSignature, _ := blsPrivateKey.Sign(signaturePayload[:]) - - return &timeoutMessage{ - highQC: highQC, - view: view, - publicKey: blsPrivateKey.PublicKey(), - signature: blsSignature, - } -} - -func createDummyQC(view uint64, blockHash BlockHash) *quorumCertificate { - signaturePayload := GetVoteSignaturePayload(view, blockHash) - - blsPrivateKey1, _ := bls.NewPrivateKey() - blsSignature1, _ := blsPrivateKey1.Sign(signaturePayload[:]) - - blsPrivateKey2, _ := bls.NewPrivateKey() - blsSignature2, _ := blsPrivateKey2.Sign(signaturePayload[:]) - - signersList := bitset.NewBitset().Set(0, true).Set(1, true) - aggregateSignature, _ := bls.AggregateSignatures([]*bls.Signature{blsSignature1, blsSignature2}) - - return &quorumCertificate{ - blockHash: blockHash, - view: view, - aggregatedSignature: &aggregatedSignature{ - signersList: signersList, - signature: aggregateSignature, - }, - } -} - -func createDummyBLSSignature() *bls.Signature { - blsPrivateKey, _ := bls.NewPrivateKey() - blockHashValue := createDummyBlockHash().GetValue() - blsSignature, _ := blsPrivateKey.Sign(blockHashValue[:]) - return blsSignature -} - -func createDummyBLSPublicKey() *bls.PublicKey { - blsPrivateKey, _ := bls.NewPrivateKey() - return blsPrivateKey.PublicKey() -} - -func createDummyBLSPrivateKey() *bls.PrivateKey { - blsPrivateKey, _ := bls.NewPrivateKey() - return blsPrivateKey -} - -func createDummyBlockHash() *blockHash { - byteArray := [32]byte{} - copy(byteArray[:], generateRandomBytes(32)) - - return &blockHash{ - value: byteArray, - } -} - -func generateRandomBytes(numBytes int) []byte { - randomBytes := make([]byte, numBytes) - rand.Read(randomBytes) - return randomBytes -} From 2a74a3bc2c054648bd5cf57f2ccb1a46d93d3537 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 16 Oct 2023 08:35:40 -0700 Subject: [PATCH 255/762] Coin lockup tests. --- lib/block_producer.go | 12 +- lib/block_view.go | 33 +- lib/block_view_access_group_members_test.go | 9 +- lib/block_view_access_group_test.go | 9 +- lib/block_view_association_test.go | 18 +- lib/block_view_bitcoin_test.go | 38 +- lib/block_view_creator_coin_test.go | 29 +- lib/block_view_dao_coin_limit_order_test.go | 5 +- lib/block_view_dao_coin_test.go | 4 +- lib/block_view_derived_key_test.go | 13 +- lib/block_view_follow_test.go | 4 +- lib/block_view_like_test.go | 5 +- lib/block_view_lockups.go | 98 +- lib/block_view_lockups_test.go | 939 ++++++++++++++++++++ lib/block_view_message_test.go | 9 +- lib/block_view_nft_test.go | 14 +- lib/block_view_post_test.go | 11 +- lib/block_view_profile_test.go | 8 +- lib/block_view_stake_test.go | 36 +- lib/block_view_test.go | 27 +- lib/block_view_types_test.go | 6 +- lib/block_view_validator_test.go | 45 +- lib/blockchain.go | 208 ++++- lib/db_utils.go | 3 +- lib/load_test.go | 2 +- lib/mempool.go | 15 +- 26 files changed, 1300 insertions(+), 300 deletions(-) create mode 100644 lib/block_view_lockups_test.go diff --git a/lib/block_producer.go b/lib/block_producer.go index 63f24cde5..415ea9f16 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -225,9 +225,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) if err != nil { return nil, nil, nil, errors.Wrapf(err, "Error copying UtxoView: ") } - _, _, _, _, err = utxoViewCopy._connectTransaction( - mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), uint32(blockRet.Header.Height), true, - false /*ignoreUtxos*/) + _, _, _, _, err = utxoViewCopy._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), uint32(blockRet.Header.Height), 0, true, false) if err != nil { // Skip failing txns. This should happen super rarely. txnErrorString := fmt.Sprintf( @@ -237,9 +235,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) continue } // At this point, we know the transaction isn't going to break our view so attach it. - _, _, _, _, err = utxoView._connectTransaction( - mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), uint32(blockRet.Header.Height), true, - false /*ignoreUtxos*/) + _, _, _, _, err = utxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), uint32(blockRet.Header.Height), 0, true, false) if err != nil { // We should never get an error here since we just attached a txn to an indentical // view. @@ -295,9 +291,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) // Skip the block reward, which is the first txn in the block. for _, txnInBlock := range blockRet.Txns[1:] { var feeNanos uint64 - _, _, _, feeNanos, err = feesUtxoView._connectTransaction( - txnInBlock, txnInBlock.Hash(), 0, uint32(blockRet.Header.Height), false, /*verifySignatures*/ - false /*ignoreUtxos*/) + _, _, _, feeNanos, err = feesUtxoView._connectTransaction(txnInBlock, txnInBlock.Hash(), 0, uint32(blockRet.Header.Height), 0, false, false) if err != nil { return nil, nil, nil, fmt.Errorf( "DeSoBlockProducer._getBlockTemplate: Error attaching txn to UtxoView for computed block: %v", err) diff --git a/lib/block_view.go b/lib/block_view.go index 886b1618a..a3089ba2d 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3286,23 +3286,21 @@ func (bav *UtxoView) ValidateDiamondsAndGetNumDeSoNanos( return desoToTransferNanos, netNewDiamonds, nil } -func (bav *UtxoView) ConnectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, - txnSizeBytes int64, - blockHeight uint32, verifySignatures bool, ignoreUtxos bool) ( - _utxoOps []*UtxoOperation, _totalInput uint64, _totalOutput uint64, - _fees uint64, _err error) { +func (bav *UtxoView) ConnectTransaction( + txn *MsgDeSoTxn, txHash *BlockHash, txnSizeBytes int64, + blockHeight uint32, blockTimestamp int64, verifySignatures bool, + ignoreUtxos bool) (_utxoOps []*UtxoOperation, _totalInput uint64, + _totalOutput uint64, _fees uint64, _err error) { - return bav._connectTransaction(txn, txHash, - txnSizeBytes, - blockHeight, verifySignatures, - ignoreUtxos) + return bav._connectTransaction(txn, txHash, txnSizeBytes, blockHeight, 0, verifySignatures, ignoreUtxos) } -func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, - txnSizeBytes int64, blockHeight uint32, verifySignatures bool, ignoreUtxos bool) ( - _utxoOps []*UtxoOperation, _totalInput uint64, _totalOutput uint64, - _fees uint64, _err error) { +func (bav *UtxoView) _connectTransaction( + txn *MsgDeSoTxn, txHash *BlockHash, txnSizeBytes int64, + blockHeight uint32, blockTimestamp int64, verifySignatures bool, + ignoreUtxos bool) (_utxoOps []*UtxoOperation, _totalInput uint64, + _totalOutput uint64, _fees uint64, _err error) { // Do a quick sanity check before trying to connect. if err := CheckTransactionSanity(txn, blockHeight, bav.Params); err != nil { @@ -3543,15 +3541,13 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUnjailValidator(txn, txHash, blockHeight, verifySignatures) case TxnTypeCoinLockup: - // FIXME: Once merged, update blockTimestamp parameter - totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinLockup(txn, txHash, blockHeight, 0, verifySignatures) + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinLockup(txn, txHash, blockHeight, blockTimestamp, verifySignatures) case TxnTypeUpdateCoinLockupParams: totalInput, totalOutput, utxoOpsForTxn, err = bav._connectUpdateCoinLockupParams(txn, txHash, blockHeight, verifySignatures) case TxnTypeCoinLockupTransfer: totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinLockupTransfer(txn, txHash, blockHeight, verifySignatures) case TxnTypeCoinUnlock: - // FIXME: Once merged, update blockTimestamp parameter - totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinUnlock(txn, txHash, blockHeight, 0, verifySignatures) + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinUnlock(txn, txHash, blockHeight, blockTimestamp, verifySignatures) default: err = fmt.Errorf("ConnectTransaction: Unimplemented txn type %v", txn.TxnMeta.GetTxnType().String()) @@ -3775,8 +3771,7 @@ func (bav *UtxoView) ConnectBlock( // would slow down block processing significantly. We should figure out a way to // enforce this check in the future, but for now the only attack vector is one in // which a miner is trying to spam the network, which should generally never happen. - utxoOpsForTxn, totalInput, totalOutput, currentFees, err := bav.ConnectTransaction( - txn, txHash, 0, uint32(blockHeader.Height), verifySignatures, false /*ignoreUtxos*/) + utxoOpsForTxn, totalInput, totalOutput, currentFees, err := bav.ConnectTransaction(txn, txHash, 0, uint32(blockHeader.Height), 0, verifySignatures, false) _, _ = totalInput, totalOutput // A bit surprising we don't use these if err != nil { return nil, errors.Wrapf(err, "ConnectBlock: error connecting txn #%d", txIndex) diff --git a/lib/block_view_access_group_members_test.go b/lib/block_view_access_group_members_test.go index fd357236f..d89e6b641 100644 --- a/lib/block_view_access_group_members_test.go +++ b/lib/block_view_access_group_members_test.go @@ -1764,14 +1764,7 @@ func TestAccessGroupMembersTxnWithDerivedKey(t *testing.T) { // Sign txn. _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return err } diff --git a/lib/block_view_access_group_test.go b/lib/block_view_access_group_test.go index 0518acbd8..986f45f66 100644 --- a/lib/block_view_access_group_test.go +++ b/lib/block_view_access_group_test.go @@ -613,14 +613,7 @@ func TestAccessGroupTxnWithDerivedKey(t *testing.T) { // Sign txn. _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return err } diff --git a/lib/block_view_association_test.go b/lib/block_view_association_test.go index 43b5f9b65..4d6a97d91 100644 --- a/lib/block_view_association_test.go +++ b/lib/block_view_association_test.go @@ -2165,14 +2165,7 @@ func _submitAssociationTxn( _signTxn(testMeta.t, txn, TransactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -2339,14 +2332,7 @@ func _testAssociationsWithDerivedKey(t *testing.T) { // Sign txn. _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return err } diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index 6a422dec5..eeed76ac8 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -202,7 +202,7 @@ func _updateUSDCentsPerBitcoinExchangeRate(t *testing.T, chain *Blockchain, db * // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -482,8 +482,7 @@ func TestBitcoinExchange(t *testing.T) { burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction( - burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) _, _, _ = totalInput, totalOutput, fees require.NoError(err) utxoOpsList = append(utxoOpsList, utxoOps) @@ -494,8 +493,7 @@ func TestBitcoinExchange(t *testing.T) { burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction( - burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) require.NoError(err) require.Equal(2, len(utxoOps)) @@ -647,7 +645,7 @@ func TestBitcoinExchange(t *testing.T) { blockHeight := chain.blockTip().Height + 1 burnTxnSize := getTxnSize(*burnTxn) utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) require.NoError(err) if ii < len(expectedBitcoinBurnAmounts) { @@ -1164,13 +1162,13 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { { utxoView, _ := NewUtxoView(db, paramsCopy, nil, chain.snapshot) blockHeight := chain.blockTip().Height + 1 - utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, 0, true, false) } { utxoView, _ := NewUtxoView(db, paramsCopy, nil, chain.snapshot) blockHeight := chain.blockTip().Height + 1 - utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, 0, true, false) } // The transaction should pass now @@ -1178,7 +1176,7 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { utxoView, _ := NewUtxoView(db, paramsCopy, nil, chain.snapshot) blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, 0, true, false) require.NoError(err) } @@ -1216,8 +1214,7 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction( - burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) _, _, _ = totalInput, totalOutput, fees require.NoError(err) utxoOpsList = append(utxoOpsList, utxoOps) @@ -1228,8 +1225,7 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction( - burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) require.NoError(err) require.Equal(2, len(utxoOps)) @@ -1381,7 +1377,7 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { blockHeight := chain.blockTip().Height + 1 burnTxnSize := getTxnSize(*burnTxn) utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) require.NoError(err) if ii < len(expectedBitcoinBurnAmounts) { @@ -2062,7 +2058,7 @@ func TestSpendOffOfUnminedTxnsBitcoinExchange(t *testing.T) { blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, 0, true, false) require.NoError(err) } @@ -2517,8 +2513,7 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction( - burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) _, _, _ = totalInput, totalOutput, fees require.NoError(err) utxoOpsList = append(utxoOpsList, utxoOps) @@ -2529,8 +2524,7 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction( - burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) require.NoError(err) require.Equal(2, len(utxoOps)) @@ -2682,7 +2676,7 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { blockHeight := chain.blockTip().Height + 1 burnTxnSize := getTxnSize(*burnTxn) utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) require.NoError(err) if ii < len(expectedBitcoinBurnAmounts) { @@ -2988,9 +2982,7 @@ func TestUpdateExchangeRate(t *testing.T) { txnSize := getTxnSize(*updateExchangeRateTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(updateExchangeRateTxn, - updateExchangeRateTxn.Hash(), txnSize, blockHeight, true, /*verifySignature*/ - false /*ignoreUtxos*/) + utxoView.ConnectTransaction(updateExchangeRateTxn, updateExchangeRateTxn.Hash(), txnSize, blockHeight, 0, true, false) require.NoError(err) _, _, _, _ = utxoOps, totalInput, totalOutput, fees require.NoError(utxoView.FlushToDb(0)) diff --git a/lib/block_view_creator_coin_test.go b/lib/block_view_creator_coin_test.go index 62f32bc7e..bd2e59628 100644 --- a/lib/block_view_creator_coin_test.go +++ b/lib/block_view_creator_coin_test.go @@ -609,8 +609,7 @@ func _helpTestCreatorCoinBuySell( blockHeight := chain.blockTip().Height + 1 txnSize := getTxnSize(*txn) _, _, _, _, err := - utxoView.ConnectTransaction( - txn, txn.Hash(), txnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txn.Hash(), txnSize, blockHeight, 0, true, false) require.NoError(err) _checkTestData(testData, fmt.Sprintf("SimpleConnect: Index: %v", testIndex), utxoView, nil) } @@ -1140,7 +1139,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferHasDiamondPostHashWithoutDiamondLevel) } @@ -1174,7 +1173,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), "level 15 not allowed") } @@ -1207,7 +1206,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), "level 0 not allowed") } @@ -1240,7 +1239,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferCantSendDiamondsForOtherProfiles) } @@ -1273,7 +1272,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCoinTransferCannotTransferToSelf) } @@ -1307,7 +1306,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferDiamondPostEntryDoesNotExist) } @@ -1340,7 +1339,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferInsufficientCreatorCoinsForDiamondLevel) } @@ -1373,7 +1372,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.NoError(err) _, err = mempool.processTransaction(txn, false, false, 0, false) require.NoError(err) @@ -1404,7 +1403,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferPostAlreadyHasSufficientDiamonds) } @@ -1535,7 +1534,7 @@ func TestCreatorCoinDiamondAfterDeSoDiamondsBlockHeight(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferHasDiamondsAfterDeSoBlockHeight) } @@ -4350,7 +4349,7 @@ func _creatorCoinTxn(t *testing.T, chain *Blockchain, db *badger.DB, txHash := txn.Hash() utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -4454,7 +4453,7 @@ func _doCreatorCoinTransferTxnWithDiamonds(t *testing.T, chain *Blockchain, db * // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -4527,7 +4526,7 @@ func _doCreatorCoinTransferTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index e50030b8d..5d15b58bc 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -4093,8 +4093,7 @@ func _connectDAOCoinLimitOrderTxn( _signTxn(testMeta.t, txn, privateKey) // Always use savedHeight (blockHeight+1) for validation since it's // assumed the transaction will get mined into the next block. - utxoOps, totalInput, totalOutput, fees, err := currentUtxoView.ConnectTransaction( - txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, true, false) + utxoOps, totalInput, totalOutput, fees, err := currentUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { // If error, remove most-recent expected sender balance added for this txn. testMeta.expectedSenderBalances = testMeta.expectedSenderBalances[:len(testMeta.expectedSenderBalances)-1] @@ -4167,7 +4166,7 @@ func _doDAOCoinLimitOrderTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } diff --git a/lib/block_view_dao_coin_test.go b/lib/block_view_dao_coin_test.go index 0d22658ac..26de58f7a 100644 --- a/lib/block_view_dao_coin_test.go +++ b/lib/block_view_dao_coin_test.go @@ -51,7 +51,7 @@ func _daoCoinTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -132,7 +132,7 @@ func _daoCoinTransferTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index 97ba47eb4..994da58ed 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -91,8 +91,7 @@ func _derivedKeyBasicTransfer(t *testing.T, db *badger.DB, chain *Blockchain, pa txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 utxoOps, _, _, _, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, - true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) return utxoOps, txn, err } @@ -519,7 +518,7 @@ func _doTxnWithBlockHeight( txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -837,7 +836,7 @@ func _doAuthorizeTxnWithExtraDataAndSpendingLimits(testMeta *TestMeta, utxoView txHash := txn.Hash() utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -1147,8 +1146,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { blockHeight := chain.blockTip().Height + 1 txnSize := getTxnSize(*txn) _, _, _, _, err := - utxoView.ConnectTransaction( - txn, txn.Hash(), txnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txn.Hash(), txnSize, blockHeight, 0, true, false) require.NoError(err) } @@ -2001,8 +1999,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { blockHeight := chain.blockTip().Height + 1 txnSize := getTxnSize(*txn) _, _, _, _, err := - utxoView.ConnectTransaction( - txn, txn.Hash(), txnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txn.Hash(), txnSize, blockHeight, 0, true, false) require.NoError(err) } diff --git a/lib/block_view_follow_test.go b/lib/block_view_follow_test.go index fb819796d..4b70402f3 100644 --- a/lib/block_view_follow_test.go +++ b/lib/block_view_follow_test.go @@ -43,7 +43,7 @@ func _doFollowTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -653,7 +653,7 @@ func TestFollowTxns(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.NoError(err) } // Flush the utxoView after having added all the transactions. diff --git a/lib/block_view_like_test.go b/lib/block_view_like_test.go index a59bee193..ab4d99ff8 100644 --- a/lib/block_view_like_test.go +++ b/lib/block_view_like_test.go @@ -40,8 +40,7 @@ func _doLikeTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true, /*verifySignature*/ - false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -577,7 +576,7 @@ func TestLikeTxns(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.NoError(err) } // Flush the utxoView after having added all the transactions. diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 40ed18224..6d304978a 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -8,7 +8,6 @@ import ( "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "math" "math/big" "sort" ) @@ -670,7 +669,10 @@ func (bav *UtxoView) _connectCoinLockup( } // Try connecting the basic transfer without considering transaction metadata. - _, _, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) + // NOTE: Even in the balance model era, we require totalInput and totalOutput + // to test that fees are being paid accurately. + totalInput, totalOutput, utxoOpsForBasicTransfer, err := + bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectCoinLockup") } @@ -778,7 +780,7 @@ func (bav *UtxoView) _connectCoinLockup( } // We store the previous transactor balance entry in the event we need to revert the transaction. - prevTransactorBalanceEntry = transactorBalanceEntry + prevTransactorBalanceEntry = transactorBalanceEntry.Copy() // Spend the transactor's DAO coin balance. transactorBalanceEntry.BalanceNanos = @@ -803,11 +805,13 @@ func (bav *UtxoView) _connectCoinLockup( // be unable to unlock the coins until the transaction specified lockup duration expires. txnYieldBasisPoints := uint64(0) txnYieldEarningDurationNanoSecs := int64(0) - if leftYieldCurvePoint.LockupDurationNanoSecs < lockupDurationNanoSeconds { + if leftYieldCurvePoint != nil && + leftYieldCurvePoint.LockupDurationNanoSecs < lockupDurationNanoSeconds { txnYieldBasisPoints = leftYieldCurvePoint.LockupYieldAPYBasisPoints txnYieldEarningDurationNanoSecs = leftYieldCurvePoint.LockupDurationNanoSecs } - if rightYieldCurvePoint.LockupDurationNanoSecs == lockupDurationNanoSeconds { + if rightYieldCurvePoint != nil && + rightYieldCurvePoint.LockupDurationNanoSecs == lockupDurationNanoSeconds { txnYieldBasisPoints = rightYieldCurvePoint.LockupYieldAPYBasisPoints txnYieldEarningDurationNanoSecs = rightYieldCurvePoint.LockupDurationNanoSecs } @@ -823,30 +827,15 @@ func (bav *UtxoView) _connectCoinLockup( return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") } - // We check that the minted yield does not cause an overflow in the transactor's balance. - // In the case of DeSo being locked up, we must check that the resulting amount is less than 2**64. - if uint256.NewInt().Sub(MaxUint256, yieldFromTxn).Lt(transactorBalanceNanos256) { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup") - } - - // Compute the amount to be added to the locked balance entry. - lockupValue := *uint256.NewInt().Add(transactorBalanceNanos256, yieldFromTxn) - - // In the case of DeSo being locked up, we ensure that the resulting amount is less than 2**64. - if profilePKID.IsZeroPKID() && !lockupValue.IsUint64() { + // Compute the amount awarded to the HODLer on unlock. + lockupValue, err := SafeUint256().Add(txMeta.LockupAmountBaseUnits, yieldFromTxn) + if err != nil { return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup") + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup: lockupValue") } - // NOTE: While we could check for "global" overflow here, we let this occur on the unlock transaction instead. - // Global overflow is where the yield causes fields like CoinEntry.CoinsInCirculationNanos to overflow. - // Performing the check here would be redundant and may lead to worse UX in the case of coins being - // burned in the future making current lockups no longer an overflow. Checking here would also - // create a DoS attack vector where a malicious entity takes out an extremely long-dated lockup - // with the sole intent of saturating the CoinsInCirculationNanos field preventing others from locking up. - // For consolidation, we fetch equivalent LockedBalanceEntries. + // An equivalent LockedBalanceEntry has the same unlock timestamp and the same profile PKID. lockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( hodlerPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) if err != nil { @@ -863,21 +852,30 @@ func (bav *UtxoView) _connectCoinLockup( } previousLockedBalanceEntry := *lockedBalanceEntry - // Check for overflow within the locked balance entry itself. - if uint256.NewInt().Sub(MaxUint256, yieldFromTxn).Lt(transactorBalanceNanos256) { + // Attempt to add the value of this lockup transaction to the locked balance entry. + newLockedBalanceEntryBalance, err := SafeUint256().Add(&lockedBalanceEntry.BalanceBaseUnits, lockupValue) + if err != nil { return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup") + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, + "_connectCoinLockup: New Locked Balance Entry Balance") } - if profilePKID.IsZeroPKID() { - // Check if DeSo minted would overflow 2**64 in the transactor balance. - if uint256.NewInt().Sub(uint256.NewInt().SetUint64(math.MaxUint64), yieldFromTxn).Lt(transactorBalanceNanos256) { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup") - } + + // Ensure in the case of DESO the resulting locked amount is less than 2**64. + if profilePKID.IsZeroPKID() && !newLockedBalanceEntryBalance.IsUint64() { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup: "+ + "New DESO Locked Balance Entry Balance") } - // Increment the lockedBalanceEntry and update the view. - lockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt().Add(&lockedBalanceEntry.BalanceBaseUnits, &lockupValue) + // NOTE: While we could check for "global" overflow here, we let this occur on the unlock transaction instead. + // Global overflow is where the yield causes fields like CoinEntry.CoinsInCirculationNanos to overflow. + // Performing the check here would be redundant and may lead to worse UX in the case of coins being + // burned in the future making current lockups no longer an overflow. Checking here would also + // create a DoS attack vector where a malicious entity takes out an extremely long-dated lockup + // with the sole intent of saturating the CoinsInCirculationNanos field preventing others from locking up. + + // Update the lockedBalanceEntry and update the view. + lockedBalanceEntry.BalanceBaseUnits = *newLockedBalanceEntryBalance bav._setLockedBalanceEntry(lockedBalanceEntry) // Add a UtxoOperation for easy reversion during disconnect. @@ -888,7 +886,7 @@ func (bav *UtxoView) _connectCoinLockup( }) // Construct UtxoOps in the event this transaction is reverted. - return 0, 0, utxoOpsForTxn, nil + return totalInput, totalOutput, utxoOpsForTxn, nil } func CalculateLockupYield( @@ -907,6 +905,12 @@ func CalculateLockupYield( // // Notice this formula makes detecting computational overflow trivial by utilizing the DeSo SafeUint256 library. + // The SafeUint256 Library uses division to ensure there's no overflow. This leads to possible + // unnecessary false overflows in the event the duration or the yield is 0. Hence, we do a separate check here. + if apyYieldBasisPoints.IsZero() || durationNanoSecs.IsZero() { + return uint256.NewInt(), nil + } + // Compute the denominators from the nanosecond to year conversion and the basis point computation. denominators, err := SafeUint256().Mul( uint256.NewInt().SetUint64(_nanoSecsPerYear), @@ -1056,7 +1060,8 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( } // Try connecting the basic transfer without considering transaction metadata. - _, _, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) + totalInput, totalOutput, utxoOpsForBasicTransfer, err := + bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUpdateCoinLockupParams") } @@ -1171,7 +1176,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( PrevLockupTransferRestriction: prevLockupTransferRestriction, }) - return 0, 0, utxoOpsForTxn, nil + return totalInput, totalOutput, utxoOpsForTxn, nil } func (bav *UtxoView) _disconnectUpdateCoinLockupParams( @@ -1305,7 +1310,8 @@ func (bav *UtxoView) _connectCoinLockupTransfer( } // Try connecting the basic transfer without considering transaction metadata. - _, _, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) + totalInput, totalOutput, utxoOpsForBasicTransfer, err := + bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) if err != nil { return 0, 0, nil, errors.Wrap(err, "_connectCoinLockupTransfer") } @@ -1340,14 +1346,14 @@ func (bav *UtxoView) _connectCoinLockupTransfer( profileEntry = bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) if profileEntry == nil || profileEntry.isDeleted { return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupOnNonExistentProfile, "_connectCoinLockupTransfer") + errors.Wrap(RuleErrorCoinLockupTransferOnNonExistentProfile, "_connectCoinLockupTransfer") } } // Fetch PKIDs for the recipient, sender, and profile. var senderPKID *PKID if _, updaterIsParamUpdater := - GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(txn.PublicKey)]; !updaterIsParamUpdater { + GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(txn.PublicKey)]; updaterIsParamUpdater { senderPKID = ZeroPKID.NewPKID() } else { senderPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) @@ -1413,7 +1419,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( if profilePKID.IsZeroPKID() { transferRestrictionStatus = bav.GlobalParamsEntry.LockedDESOTransferRestrictions } else { - transferRestrictionStatus = profileEntry.DAOCoinEntry.TransferRestrictionStatus + transferRestrictionStatus = profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus } // Check if transfers are limited to profile owner only. @@ -1452,7 +1458,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( PrevReceiverLockedBalanceEntry: prevReceiverLockedBalanceEntry, }) - return 0, 0, utxoOpsForTxn, nil + return totalInput, totalOutput, utxoOpsForTxn, nil } func (bav *UtxoView) _disconnectCoinLockupTransfer( @@ -1569,7 +1575,7 @@ func (bav *UtxoView) _connectCoinUnlock( } // Try connecting the basic transfer without considering transaction metadata. - _, _, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) + totalInput, totalOutput, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectCoinUnlock") } @@ -1680,7 +1686,7 @@ func (bav *UtxoView) _connectCoinUnlock( PrevLockedBalanceEntries: prevLockedBalanceEntries, }) - return 0, 0, utxoOpsForTxn, nil + return totalInput, totalOutput, utxoOpsForTxn, nil } func (bav *UtxoView) _disconnectCoinUnlock( diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go new file mode 100644 index 000000000..3d3fcf1c5 --- /dev/null +++ b/lib/block_view_lockups_test.go @@ -0,0 +1,939 @@ +package lib + +import ( + "github.com/dgraph-io/badger/v3" + "github.com/holiman/uint256" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" +) + +func TestCoinLockupTxnRuleErrors(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Attempt to perform a lockup of amount zero. + // (This should fail -- RuleErrorCoinLockupOfAmountZero) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 0, + uint256.NewInt(), + 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupOfAmountZero) + } + + // Attempt to perform a lockup on a non-existent profile. + // (This should fail -- RuleErrorCoinLockupOnNonExistentProfile) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m2Pub, + 0, + uint256.NewInt().SetUint64(1), + 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupOnNonExistentProfile) + } + + // Attempt to perform an excessive DESO lockup (more than 2**64 DESO). + // (This should fail -- RuleErrorCoinLockupExcessiveDeSoLockup) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 0, + MaxUint256, + 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupExcessiveDeSoLockup) + } + + // Attempt to perform a lockup with zero lockup duration. + // (This should fail -- RuleErrorCoinLockupInvalidLockupDuration) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 0, + uint256.NewInt().SetUint64(1), + 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) + } + + // Attempt to perform a lockup with negative lockup duration. + // (This should fail -- RuleErrorCoinLockupInvalidLockupDuration) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 0, + uint256.NewInt().SetUint64(1), + 1) + require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) + } + + // Attempt to perform a lockup in excess of the user's DESO balance. + // (This should fail -- RuleErrorCoinLockupInsufficientDeSo) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 1, + uint256.NewInt().SetUint64(1e10), + 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupInsufficientDeSo) + } + + // Attempt to perform a lockup in excess of the user's coin balance. + // (This should fail -- RuleErrorCoinLockupInsufficientCoins) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 1, + uint256.NewInt().SetUint64(1e10), + 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupInsufficientCoins) + } + + // NOTE: The only other rule errors for coin lockup txns are related to yield curve overflows. + // This is tested separately and more comprehensively in a different test. + + // Attempt to perform a valid and simple coin lockup transaction. + // This should succeed :) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 1000, + uint256.NewInt().SetUint64(1000), + 0) + require.NoError(t, err) + } +} + +func TestUpdateCoinLockupParamsTxnRuleErrors(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Attempt to create a lockup yield point with negative duration. + // (This should fail -- RuleErrorUpdateCoinLockupParamsNegativeDuration) + { + _, _, _, err := _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + int64(-1), + 0, + false, + false, + TransferRestrictionStatusUnrestricted, + ) + require.Contains(t, err.Error(), RuleErrorUpdateCoinLockupParamsNegativeDuration) + } + + // Attempt to delete a non-existent yield curve point. + // (This should fail -- RuleErrorUpdateCoinLockupParamsDeletingNonExistentPoint) + { + _, _, _, err := _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + int64(1e9), + 0, + true, + false, + TransferRestrictionStatusUnrestricted, + ) + require.Contains(t, err.Error(), RuleErrorUpdateCoinLockupParamsDeletingNonExistentPoint) + } + + // Attempt to update the transfer restrictions of a non-existent profile. + // (This should fail -- RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile) + { + _, _, _, err := _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + int64(1e9), + 0, + false, + true, + TransferRestrictionStatusUnrestricted, + ) + require.Contains(t, err.Error(), RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile) + } + + // Attempt to update PermanentlyUnrestricted transfer restrictions. + // (This should fail -- RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction) + { + // First update transfer restrictions to TransferRestrictionStatusPermanentlyUnrestricted. + // This should be a valid transaction. + _updateCoinLockupParamsWithTestMeta(testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + 0, + 0, + false, + true, + TransferRestrictionStatusPermanentlyUnrestricted, + ) + + // Now attempt to further update the transfer restrictions. + // (This should fail -- RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction) + _, _, _, err := _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + 0, + 0, + false, + true, + TransferRestrictionStatusUnrestricted, + ) + require.Contains(t, err.Error(), RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction) + } + + // Attempt to update transfer restrictions to an invalid transfer restriction status. + // (This should fail -- RuleErrorUpdateCoinLockupParamsInvalidRestrictions) + { + _, _, _, err := _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + int64(1e9), + 0, + false, + true, + TransferRestrictionStatus(uint8(255)), + ) + require.Contains(t, err.Error(), RuleErrorUpdateCoinLockupParamsInvalidRestrictions) + } + + // Attempt to perform a valid and simple update coin lockup param transaction. + // This should succeed :) + { + _, _, _, err := _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + 365*25*60*60*1e9, + 500, + false, + true, + TransferRestrictionStatusProfileOwnerOnly, + ) + require.NoError(t, err) + } +} + +func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Attempt to perform a lockup transfer of zero coins. + // (This should fail -- RuleErrorCoinLockupTransferOfAmountZero) + { + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + NewPublicKey(m3PkBytes), + NewPublicKey(m0PkBytes), + 0, + uint256.NewInt()) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferOfAmountZero) + } + + // Attempt to perform a locked DESO transfer in excess of 2**64. + // (This should fail -- RuleErrorCoinLockupTransferOfDeSoCausesOverflow) + { + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + NewPublicKey(m3PkBytes), + &ZeroPublicKey, + 0, + MaxUint256) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferOfDeSoCausesOverflow) + } + + // Attempt to perform a coin lockup transfer on coins from a non-existent profile. + // (This should fail -- RuleErrorCoinLockupTransferOnNonExistentProfile) + { + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + NewPublicKey(m3PkBytes), + NewPublicKey(m3PkBytes), + 0, + MaxUint256) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferOnNonExistentProfile) + } + + // Attempt to perform a coin lockup transfer where the sender is the receiver. + // (This should fail -- RuleErrorCoinLockupTransferSenderEqualsReceiver) + { + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + NewPublicKey(m0PkBytes), + NewPublicKey(m0PkBytes), + 0, + MaxUint256) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferSenderEqualsReceiver) + } + + // Attempt to perform an excessive coin lockup transfer. + // (This should fail -- RuleErrorCoinLockupTransferInsufficientBalance) + { + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + NewPublicKey(m3PkBytes), + NewPublicKey(m0PkBytes), + 0, + MaxUint256) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferInsufficientBalance) + } + + // Attempt to violate profile owner only transfer restrictions. + // (This should fail -- RuleErrorCoinLockupTransferRestrictedToProfileOwner) + { + // Set M0 locked transfer restrictions to profile owner only. + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + 0, + 0, + false, + true, + TransferRestrictionStatusProfileOwnerOnly, + ) + + // Lockup 1000 M0 coins. + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 1, + uint256.NewInt().SetUint64(1e6), + 0, + ) + + // Send 1000 locked M0 coins to M2. + _coinLockupTransferWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + NewPublicKey(m2PkBytes), + NewPublicKey(m0PkBytes), + 1, + uint256.NewInt().SetUint64(1e6), + ) + + // Attempt to have M2 send locked M0 coins to M3. + // (This should fail -- RuleErrorCoinLockupTransferRestrictedToProfileOwner) + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + NewPublicKey(m3PkBytes), + NewPublicKey(m0PkBytes), + 1, + uint256.NewInt().SetUint64(1)) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) + } + + // Attempt to violate DAO member only transfer restrictions. + // (This should fail -- RuleErrorCoinLockupTransferRestrictedToDAOMembers) + { + // Set M0 locked transfer restrictions to dao members owner only. + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + 0, + 0, + false, + true, + TransferRestrictionStatusDAOMembersOnly, + ) + + // Attempt to have M2 send locked M0 coins to M3. + // (This should fail -- RuleErrorCoinLockupTransferRestrictedToDAOMembers) + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + NewPublicKey(m3PkBytes), + NewPublicKey(m0PkBytes), + 1, + uint256.NewInt().SetUint64(1)) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToDAOMembers) + } + + // NOTE: There's one more rule error: RuleErrorCoinLockupTransferBalanceOverflowAtReceiver + // This one is particularly challenging to trigger as it's yield based. + // For this reason we deal with it in other more specified yield focused tests. +} + +func TestCoinUnlockTxnRuleErrors(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Attempt to unlock coins associated with a non-existent profile. + // (This should fail -- RuleErrorCoinUnlockOnNonExistentProfile) + { + _, _, _, err := _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m2Pub, + 0) + require.Contains(t, err.Error(), RuleErrorCoinUnlockOnNonExistentProfile) + } + + // Attempt to unlock locked coins which do not exist. + // (This should fail -- RuleErrorCoinUnlockNoUnlockableCoinsFound) + { + _, _, _, err := _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 0) + require.Contains(t, err.Error(), RuleErrorCoinUnlockNoUnlockableCoinsFound) + } + + // TODO: Cause unlockable balance overflow test. Tricky as it relies on yield. +} + +//---------------------------------------------------------- +// (Testing) Lockup Setup Helper Functions +//---------------------------------------------------------- + +// _setUpProfilesAndMintM0M1DAOCoins is a simple helper function that takes as input +// a TestMeta struct with the following fields: +// +// miner - 10 blocks worth of DESO mined in their balance. Assumed to be senderPkString. +// (no other state) +// +// After running the _setUpProfilesAndMintM0M1DAOCoins, the following state is expected: +// +// miner - ~10 blocks worth of DESO mined in their balance +// m0Pub - ~10,000 nDESO, m0 profile, and 1,000,000 m0 DAO coins minted and held +// m1Pub - ~10,000 nDESO, m1 profile, and 1,000,000,000 m1 DAO coins minted and held +// m2Pub - 10,000 nDESO +// m3Pub - 10,000 nDESO +// m4Pub - 10,000 nDESO +// paramUpdaterPub - 10,000 nDESO +func _setUpProfilesAndMintM0M1DAOCoins(testMeta *TestMeta) { + // Create on-chain public keys with DESO sent from miner + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 10000) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 10000) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 10000) + _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 10000) + _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 10000) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 10000) + + // Create on-chain profile for m0 + { + _updateProfileWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + []byte{}, + "m0", + "i am the m0", + shortPic, + 10*100, + 1.25*100*100, + false, + ) + } + + // Create on-chain profile for m1 + { + _updateProfileWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + []byte{}, + "m1", + "i am the m1", + shortPic, + 10*100, + 1.25*100*100, + false, + ) + } + + // Create 1,000,000 m0 dao coins held by m0 + { + _daoCoinTxnWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + DAOCoinMetadata{ + ProfilePublicKey: m0PkBytes, + OperationType: DAOCoinOperationTypeMint, + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e6), + CoinsToBurnNanos: uint256.Int{}, + TransferRestrictionStatus: 0, + }) + } + + // Create 1,000,000,000 m1 dao coins held by m1 + { + _daoCoinTxnWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + DAOCoinMetadata{ + ProfilePublicKey: m1PkBytes, + OperationType: DAOCoinOperationTypeMint, + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e9), + CoinsToBurnNanos: uint256.Int{}, + TransferRestrictionStatus: 0, + }) + } +} + +func _setUpMinerAndTestMetaForTimestampBasedLockupTests(t *testing.T) *TestMeta { + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Ensure DAO coins are enabled (a pre-requisite for lockups) + params.ForkHeights.DAOCoinBlockHeight = uint32(0) + + // Initialize PoS fork heights. + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) + params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(1) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err := miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + + return &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } +} + +//---------------------------------------------------------- +// (Testing) Lockup Transaction Connection Helper Functions +//---------------------------------------------------------- + +func _coinLockupWithTestMetaAndConnectTimestamp( + testMeta *TestMeta, + feeRateNanosPerKB uint64, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + profilePublicKeyBase58Check string, + unlockTimestampNanoSecs int64, + lockupAmountBaseUnits *uint256.Int, + connectTimestamp int64) { + + testMeta.expectedSenderBalances = + append(testMeta.expectedSenderBalances, + _getBalance(testMeta.t, testMeta.chain, nil, transactorPublicKeyBase58Check)) + + currentOps, currentTxn, _, err := _coinLockupWithConnectTimestamp( + testMeta.t, testMeta.chain, testMeta.db, testMeta.params, + feeRateNanosPerKB, transactorPublicKeyBase58Check, transactorPrivateKeyBase58Check, + profilePublicKeyBase58Check, unlockTimestampNanoSecs, lockupAmountBaseUnits, connectTimestamp) + require.NoError(testMeta.t, err) + + testMeta.txnOps = append(testMeta.txnOps, currentOps) + testMeta.txns = append(testMeta.txns, currentTxn) +} + +func _coinLockupWithConnectTimestamp(t *testing.T, chain *Blockchain, db *badger.DB, + params *DeSoParams, feeRateNanosPerKB uint64, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + profilePublicKeyBase58Check string, + unlockTimestampNanoSecs int64, + lockupAmountBaseUnits *uint256.Int, + connectTimestamp int64) ( + _utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { + + assert := assert.New(t) + require := require.New(t) + _ = assert + _ = require + + transactorPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) + require.NoError(err) + + profilePkBytes, _, err := Base58CheckDecode(profilePublicKeyBase58Check) + require.NoError(err) + + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(err) + + // Create the coin lockup transaction. + txn, totalInputMake, _, feesMake, err := chain.CreateCoinLockupTxn( + transactorPkBytes, profilePkBytes, unlockTimestampNanoSecs, + lockupAmountBaseUnits, feeRateNanosPerKB, nil, []*DeSoOutput{}) + if err != nil { + return nil, nil, 0, err + } + require.Equal(totalInputMake, feesMake) + + // Sign the transaction now that its inputs are set up. + _signTxn(t, txn, transactorPrivateKeyBase58Check) + + // Connect the transaction. + txHash := txn.Hash() + blockHeight := chain.BlockTip().Height + 1 + utxoOps, totalInput, totalOutput, fees, err := + utxoView.ConnectTransaction( + txn, txHash, getTxnSize(*txn), blockHeight, connectTimestamp, true, false) + if err != nil { + return nil, nil, 0, err + } + + // Ensure that DESO is flowing in the correct amount. + require.Equal(totalInput, totalOutput+fees) + + // Check that UtxoOps following connection have the correct type + require.Equal(OperationTypeSpendBalance, utxoOps[0].Type) + if NewPublicKey(profilePkBytes).IsZeroPublicKey() { + require.Equal(OperationTypeSpendBalance, utxoOps[1].Type) + require.Equal(OperationTypeCoinLockup, utxoOps[2].Type) + } else { + require.Equal(OperationTypeCoinLockup, utxoOps[1].Type) + } + + // Ensure the transaction can be flushed without issue before returning + require.NoError(utxoView.FlushToDb(uint64(blockHeight))) + return utxoOps, txn, blockHeight, nil +} + +func _updateCoinLockupParamsWithTestMeta( + testMeta *TestMeta, + feeRateNanosPerKB uint64, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + lockupYieldDurationNanoSecs int64, + lockupYieldAPYBasisPoints uint64, + removeYieldCurvePoint bool, + newLockupTransferRestrictions bool, + lockupTransferRestrictionStatus TransferRestrictionStatus) { + + testMeta.expectedSenderBalances = + append(testMeta.expectedSenderBalances, + _getBalance(testMeta.t, testMeta.chain, nil, transactorPublicKeyBase58Check)) + + currentOps, currentTxn, _, err := _updateCoinLockupParams( + testMeta.t, testMeta.chain, testMeta.db, testMeta.params, + feeRateNanosPerKB, transactorPublicKeyBase58Check, transactorPrivateKeyBase58Check, + lockupYieldDurationNanoSecs, lockupYieldAPYBasisPoints, removeYieldCurvePoint, + newLockupTransferRestrictions, lockupTransferRestrictionStatus) + require.NoError(testMeta.t, err) + + testMeta.txnOps = append(testMeta.txnOps, currentOps) + testMeta.txns = append(testMeta.txns, currentTxn) +} + +func _updateCoinLockupParams(t *testing.T, chain *Blockchain, db *badger.DB, + params *DeSoParams, feeRateNanosPerKB uint64, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + lockupYieldDurationNanoSecs int64, + lockupYieldAPYBasisPoints uint64, + removeYieldCurvePoint bool, + newLockupTransferRestrictions bool, + lockupTransferRestrictionStatus TransferRestrictionStatus) ( + _utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { + + assert := assert.New(t) + require := require.New(t) + _ = assert + _ = require + + transactorPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) + require.NoError(err) + + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(err) + + // Create the update coin lockup params transaction. + txn, totalInputMake, _, feesMake, err := chain.CreateUpdateCoinLockupParamsTxn( + transactorPkBytes, lockupYieldDurationNanoSecs, lockupYieldAPYBasisPoints, removeYieldCurvePoint, + newLockupTransferRestrictions, lockupTransferRestrictionStatus, feeRateNanosPerKB, nil, []*DeSoOutput{}) + if err != nil { + return nil, nil, 0, err + } + require.Equal(totalInputMake, feesMake) + + // Sign the transaction now that its inputs are set up. + _signTxn(t, txn, transactorPrivateKeyBase58Check) + + // Connect the transaction. + txHash := txn.Hash() + blockHeight := chain.BlockTip().Height + 1 + utxoOps, totalInput, totalOutput, fees, err := + utxoView.ConnectTransaction( + txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + if err != nil { + return nil, nil, 0, err + } + + // Ensure that DESO is flowing in the correct amount. + require.Equal(totalInput, totalOutput+fees) + + // Check that UtxoOps following connection have the correct type + require.Equal(OperationTypeSpendBalance, utxoOps[0].Type) + require.Equal(OperationTypeUpdateCoinLockupParams, utxoOps[1].Type) + + // Ensure the transaction can be flushed without issue before returning + require.NoError(utxoView.FlushToDb(uint64(blockHeight))) + return utxoOps, txn, blockHeight, nil +} + +func _coinLockupTransferWithTestMeta( + testMeta *TestMeta, + feeRateNanosPerKB uint64, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + recipientPublicKey *PublicKey, + profilePublicKey *PublicKey, + unlockTimestampNanoSecs int64, + lockedCoinsToTransferBaseUnits *uint256.Int) { + + testMeta.expectedSenderBalances = + append(testMeta.expectedSenderBalances, + _getBalance(testMeta.t, testMeta.chain, nil, transactorPublicKeyBase58Check)) + + currentOps, currentTxn, _, err := _coinLockupTransfer( + testMeta.t, testMeta.chain, testMeta.db, testMeta.params, + feeRateNanosPerKB, transactorPublicKeyBase58Check, transactorPrivateKeyBase58Check, + recipientPublicKey, profilePublicKey, unlockTimestampNanoSecs, lockedCoinsToTransferBaseUnits) + require.NoError(testMeta.t, err) + + testMeta.txnOps = append(testMeta.txnOps, currentOps) + testMeta.txns = append(testMeta.txns, currentTxn) +} + +func _coinLockupTransfer(t *testing.T, chain *Blockchain, db *badger.DB, + params *DeSoParams, feeRateNanosPerKB uint64, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + recipientPublicKey *PublicKey, + profilePublicKey *PublicKey, + unlockTimestampNanoSecs int64, + lockedCoinsToTransferBaseUnits *uint256.Int) ( + _utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { + + assert := assert.New(t) + require := require.New(t) + _ = assert + _ = require + + transactorPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) + require.NoError(err) + + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(err) + + // Create the update coin lockup params transaction. + txn, totalInputMake, _, feesMake, err := chain.CreateCoinLockupTransferTxn( + transactorPkBytes, recipientPublicKey.ToBytes(), profilePublicKey.ToBytes(), unlockTimestampNanoSecs, + lockedCoinsToTransferBaseUnits, feeRateNanosPerKB, nil, []*DeSoOutput{}) + if err != nil { + return nil, nil, 0, err + } + require.Equal(totalInputMake, feesMake) + + // Sign the transaction now that its inputs are set up. + _signTxn(t, txn, transactorPrivateKeyBase58Check) + + // Connect the transaction. + txHash := txn.Hash() + blockHeight := chain.BlockTip().Height + 1 + utxoOps, totalInput, totalOutput, fees, err := + utxoView.ConnectTransaction( + txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + if err != nil { + return nil, nil, 0, err + } + + // Ensure that DESO is flowing in the correct amount. + require.Equal(totalInput, totalOutput+fees) + + // Check that UtxoOps following connection have the correct type + require.Equal(OperationTypeSpendBalance, utxoOps[0].Type) + require.Equal(OperationTypeCoinLockupTransfer, utxoOps[1].Type) + + // Ensure the transaction can be flushed without issue before returning + require.NoError(utxoView.FlushToDb(uint64(blockHeight))) + return utxoOps, txn, blockHeight, nil +} + +func _coinUnlockWithTestMetaAndConnectTimestamp( + testMeta *TestMeta, + feeRateNanosPerKB uint64, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + profilePublicKeyBase58Check string, + connectTimestamp int64) { + + testMeta.expectedSenderBalances = + append(testMeta.expectedSenderBalances, + _getBalance(testMeta.t, testMeta.chain, nil, transactorPublicKeyBase58Check)) + + currentOps, currentTxn, _, err := _coinUnlockWithConnectTimestamp( + testMeta.t, testMeta.chain, testMeta.db, testMeta.params, + feeRateNanosPerKB, transactorPublicKeyBase58Check, transactorPrivateKeyBase58Check, + profilePublicKeyBase58Check, connectTimestamp) + require.NoError(testMeta.t, err) + + testMeta.txnOps = append(testMeta.txnOps, currentOps) + testMeta.txns = append(testMeta.txns, currentTxn) +} + +func _coinUnlockWithConnectTimestamp(t *testing.T, chain *Blockchain, db *badger.DB, + params *DeSoParams, feeRateNanosPerKB uint64, + transactorPublicKeyBase58Check string, + transactorPrivateKeyBase58Check string, + profilePublicKeyBase58Check string, + connectTimestamp int64) ( + _utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { + + assert := assert.New(t) + require := require.New(t) + _ = assert + _ = require + + transactorPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) + require.NoError(err) + + profilePkBytes, _, err := Base58CheckDecode(profilePublicKeyBase58Check) + require.NoError(err) + + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + require.NoError(err) + + // Create the coin unlock transaction. + txn, totalInputMake, _, feesMake, err := chain.CreateCoinUnlockTxn( + transactorPkBytes, profilePkBytes, feeRateNanosPerKB, nil, []*DeSoOutput{}) + if err != nil { + return nil, nil, 0, err + } + require.Equal(totalInputMake, feesMake) + + // Sign the transaction now that its inputs are set up. + _signTxn(t, txn, transactorPrivateKeyBase58Check) + + // Connect the transaction. + txHash := txn.Hash() + blockHeight := chain.BlockTip().Height + 1 + utxoOps, totalInput, totalOutput, fees, err := + utxoView.ConnectTransaction( + txn, txHash, getTxnSize(*txn), blockHeight, connectTimestamp, true, false) + if err != nil { + return nil, nil, 0, err + } + + // Ensure that DESO is flowing in the correct amount. + require.Equal(totalInput, totalOutput+fees) + + // Check that UtxoOps following connection have the correct type + require.Equal(OperationTypeSpendBalance, utxoOps[0].Type) + if NewPublicKey(profilePkBytes).IsZeroPublicKey() { + require.Equal(OperationTypeAddBalance, utxoOps[1].Type) + require.Equal(OperationTypeCoinUnlock, utxoOps[2].Type) + } else { + require.Equal(OperationTypeCoinUnlock, utxoOps[1].Type) + } + + // Ensure the transaction can be flushed without issue before returning + require.NoError(utxoView.FlushToDb(uint64(blockHeight))) + return utxoOps, txn, blockHeight, nil +} diff --git a/lib/block_view_message_test.go b/lib/block_view_message_test.go index 8e3fcc836..420de5799 100644 --- a/lib/block_view_message_test.go +++ b/lib/block_view_message_test.go @@ -87,7 +87,7 @@ func _privateMessageWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -475,7 +475,7 @@ func TestPrivateMessage(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.NoError(err) } // Flush the utxoView after having added all the transactions. @@ -658,8 +658,7 @@ func _messagingKeyWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB, txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, - true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, err } @@ -1688,7 +1687,7 @@ func _connectPrivateMessageWithPartyWithExtraData(testMeta *TestMeta, senderPkBy utxoView, err := NewUtxoView(testMeta.db, testMeta.params, nil, testMeta.chain.snapshot) blockHeight := testMeta.chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the output. if expectedError != nil { assert.Equal(true, strings.Contains(err.Error(), expectedError.Error())) diff --git a/lib/block_view_nft_test.go b/lib/block_view_nft_test.go index 126109900..38b7763bd 100644 --- a/lib/block_view_nft_test.go +++ b/lib/block_view_nft_test.go @@ -91,7 +91,7 @@ func _createNFTWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB, par // Always use height+1 for validation since it's assumed the transaction will // get mined into the next block. utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -308,7 +308,7 @@ func _createNFTBid(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoP // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -402,7 +402,7 @@ func _acceptNFTBid(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoP // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -516,7 +516,7 @@ func _updateNFT(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoPara // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -612,7 +612,7 @@ func _transferNFT(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoPa // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -699,7 +699,7 @@ func _acceptNFTTransfer(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -782,7 +782,7 @@ func _burnNFT(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } diff --git a/lib/block_view_post_test.go b/lib/block_view_post_test.go index a630822e6..15994a011 100644 --- a/lib/block_view_post_test.go +++ b/lib/block_view_post_test.go @@ -71,7 +71,7 @@ func _submitPost(t *testing.T, chain *Blockchain, db *badger.DB, blockHeight := chain.blockTip().Height + 1 utxoView.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB = 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -168,7 +168,7 @@ func _giveDeSoDiamonds(t *testing.T, chain *Blockchain, db *badger.DB, params *D // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -264,7 +264,7 @@ func _doSubmitPostTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -1550,7 +1550,7 @@ func TestSubmitPost(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.NoError(err) // Assert "after" comment counts are correct at a few different spots @@ -1981,8 +1981,7 @@ func TestDeSoDiamondErrorCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction( - txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) if err != nil { return err } diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index 18651989a..1611b45f8 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -70,7 +70,7 @@ func _swapIdentity(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -153,7 +153,7 @@ func _updateProfileWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -1147,7 +1147,7 @@ func TestUpdateProfile(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.NoError(err) } // Flush the utxoView after having added all the transactions. @@ -3411,7 +3411,7 @@ func TestUpdateProfileChangeBack(t *testing.T) { utxoViewCopy, err := mempool.universalUtxoView.CopyUtxoView() require.NoError(err) txnSize := getTxnSize(*txn) - _, _, _, _, err = utxoViewCopy.ConnectTransaction(txn, txn.Hash(), txnSize, chain.blockTip().Height+1, false, false) + _, _, _, _, err = utxoViewCopy.ConnectTransaction(txn, txn.Hash(), txnSize, chain.blockTip().Height+1, 0, false, false) require.NoError(err) mempoolTxsAdded, err := mempool.processTransaction( diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 83f1692a4..bbe4eed03 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -636,14 +636,7 @@ func _submitStakeTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -695,14 +688,7 @@ func _submitUnstakeTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -754,14 +740,7 @@ func _submitUnlockStakeTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -935,14 +914,7 @@ func TestStakingWithDerivedKey(t *testing.T) { transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, params) prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) // Connect txn. - utxoOps, _, _, fees, err := utxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, _, _, fees, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 470c29d33..8c2edbd82 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -871,7 +871,7 @@ func _doBasicTransferWithViewFlush(t *testing.T, chain *Blockchain, db *badger.D txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.NoError(err) require.GreaterOrEqual(totalOutput, amountNanos) require.Equal(totalInput, totalOutput+fees) @@ -980,7 +980,7 @@ func _updateGlobalParamsEntryWithMempool(t *testing.T, chain *Blockchain, db *ba // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -1186,7 +1186,7 @@ func _applyTestMetaTxnsToViewAndFlush(testMeta *TestMeta) { txHash := txn.Hash() blockHeight := testMeta.chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.NoError(testMeta.t, err) } // Flush the utxoView after having added all the transactions. @@ -1321,9 +1321,7 @@ func TestUpdateGlobalParams(t *testing.T) { txnSize := getTxnSize(*updateGlobalParamsTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(updateGlobalParamsTxn, - updateGlobalParamsTxn.Hash(), txnSize, blockHeight, true, /*verifySignature*/ - false /*ignoreUtxos*/) + utxoView.ConnectTransaction(updateGlobalParamsTxn, updateGlobalParamsTxn.Hash(), txnSize, blockHeight, 0, true, false) require.NoError(err) _, _, _, _ = utxoOps, totalInput, totalOutput, fees require.NoError(utxoView.FlushToDb(0)) @@ -1521,8 +1519,7 @@ func TestBasicTransfer(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, - true /*verifySignatures*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) if blockHeight < params.ForkHeights.BalanceModelBlockHeight { require.Contains(err.Error(), RuleErrorInputWithPublicKeyDifferentFromTxnPublicKey) @@ -1564,8 +1561,7 @@ func TestBasicTransfer(t *testing.T) { _signTxn(t, txn, recipientPrivString) txHash := txn.Hash() _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, - true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorInvalidTransactionSignature) } @@ -1591,8 +1587,7 @@ func TestBasicTransfer(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, - true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorBlockRewardTxnNotAllowedToHaveSignature) } @@ -1626,8 +1621,7 @@ func TestBasicTransfer(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, - true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.Error(err) if blockHeight < params.ForkHeights.BalanceModelBlockHeight { require.Contains(err.Error(), RuleErrorBlockRewardTxnNotAllowedToHaveInputs) @@ -1716,8 +1710,7 @@ func TestBasicTransfer(t *testing.T) { utxoView, _ := NewUtxoView(db, params, chain.postgres, chain.snapshot) txHash := txn.Hash() _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, - true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) require.NoError(err) senderBalance, _ := utxoView.GetDeSoBalanceNanosForPublicKey(senderPkBytes) @@ -2169,7 +2162,7 @@ func TestBlockRewardPatch(t *testing.T) { _signTxn(t, txn, senderPrivString) utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) require.NoError(t, err) - _, _, _, fees, err := utxoView._connectTransaction(txn, txn.Hash(), getTxnSize(*txn), chain.blockTip().Height+1, true, false) + _, _, _, fees, err := utxoView._connectTransaction(txn, txn.Hash(), getTxnSize(*txn), chain.blockTip().Height+1, 0, true, false) require.NoError(t, err) blkToMine, _, _, err := miner._getBlockToMine(0) require.NoError(t, err) diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index 46bf612ad..05b1341fb 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -353,8 +353,7 @@ func TestUtxoEntryEncodeDecode(t *testing.T) { burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction( - burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) _, _, _ = totalInput, totalOutput, fees require.NoError(err) utxoOpsList = append(utxoOpsList, utxoOps) @@ -365,8 +364,7 @@ func TestUtxoEntryEncodeDecode(t *testing.T) { burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction( - burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, true /*verifySignature*/, false /*ignoreUtxos*/) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) require.NoError(err) require.Equal(2, len(utxoOps)) diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 8daeb056a..e53bd1342 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -332,14 +332,7 @@ func _submitRegisterAsValidatorTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -389,14 +382,7 @@ func _submitUnregisterAsValidatorTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -549,14 +535,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, params) prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return err } @@ -1799,14 +1778,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, params) prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return err } @@ -1958,14 +1930,7 @@ func _submitUnjailValidatorTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, - txn.Hash(), - getTxnSize(*txn), - testMeta.savedHeight, - true, - false, - ) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } diff --git a/lib/blockchain.go b/lib/blockchain.go index 0a8ba1077..484f47687 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -2777,14 +2777,7 @@ func (bc *Blockchain) ValidateTransaction( } txnSize := int64(len(txnBytes)) // We don't care about the utxoOps or the fee it returns. - _, _, _, _, err = utxoView._connectTransaction( - txnMsg, - txHash, - txnSize, - blockHeight, - verifySignatures, - false, /*ignoreUtxos*/ - ) + _, _, _, _, err = utxoView._connectTransaction(txnMsg, txHash, txnSize, blockHeight, 0, verifySignatures, false) if err != nil { return errors.Wrapf(err, "ValidateTransaction: Problem validating transaction: ") } @@ -5146,8 +5139,7 @@ func (bc *Blockchain) EstimateDefaultFeeRateNanosPerKB( return minFeeRateNanosPerKB } numBytesInTxn := len(txnBytes) - _, _, _, fees, err := utxoView.ConnectTransaction( - txn, txn.Hash(), int64(numBytesInTxn), tipNode.Height, false /*verifySignatures*/, false /*ignoreUtxos*/) + _, _, _, fees, err := utxoView.ConnectTransaction(txn, txn.Hash(), int64(numBytesInTxn), tipNode.Height, 0, false, false) if err != nil { return minFeeRateNanosPerKB } @@ -5366,3 +5358,199 @@ func (bc *Blockchain) _createAssociationTxn( } return txn, totalInput, changeAmount, fees, nil } + +// ------------------------------------------------- +// Lockup Transaction Creation Function +// ------------------------------------------------- + +func (bc *Blockchain) CreateCoinLockupTxn( + TransactorPublicKey []byte, + ProfilePublicKey []byte, + UnlockTimestampNanoSecs int64, + LockupAmountBaseUnits *uint256.Int, + // Standard transaction fields + minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { + + // NOTE: TxInputs is a remnant of the UTXO transaction model. + // It's assumed that lockup transactions follow balance model. + // For this reason, we ignore the TxInputs field in the MsgDeSoTxn struct. + + // Create a transaction containing the coin lockup fields. + txn := &MsgDeSoTxn{ + PublicKey: TransactorPublicKey, + TxnMeta: &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(ProfilePublicKey), + UnlockTimestampNanoSecs: UnlockTimestampNanoSecs, + LockupAmountBaseUnits: LockupAmountBaseUnits, + }, + TxOutputs: additionalOutputs, + // The signature will be added once other transaction fields are finalized. + } + + // NOTE: AddInputsAndChangeToTransaction returns four fields. + // Some of these are no longer relevant following the move to balance model. + // We list each of these fields and whether they're relevant below: + // (Still relevant) _totalInputAdded - Equals inputSubsidy + sum(txn.TxOutputs) + additionalFees + txn.TxnFees + // (Still relevant) _spendAmount - Equals sum(txn.TxOutputs) + additionalFees + // (No longer relevant) _totalChangeAdded - Always returns a zero in the balance model era. + // (Still relevant) _fee - Returns the computed fees based on the size of the transaction. + // (Still relevant) _err - Necessary for error checking. For obvious reasons. + totalInput, spendAmount, _, fees, err := + bc.AddInputsAndChangeToTransaction(txn, minFeeRateNanosPerKB, mempool) + if err != nil { + return nil, 0, 0, 0, + errors.Wrapf(err, "CreateCoinLockupTxn: Problem adding inputs: ") + } + _ = spendAmount + + // NOTE: Normally by convention here we check for the transaction to have at least one input in TxInputs. + // This is assumed to be no longer necessary given the requirement of + // lockup transactions to be after the transition to balance model. + + return txn, totalInput, 0, fees, nil +} + +func (bc *Blockchain) CreateCoinLockupTransferTxn( + TransactorPublicKey []byte, + RecipientPublicKey []byte, + ProfilePublicKey []byte, + UnlockTimestampNanoSecs int64, + LockedCoinsToTransferBaseUnits *uint256.Int, + // Standard transaction fields + minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { + + // NOTE: TxInputs is a remnant of the UTXO transaction model. + // It's assumed that lockup transactions follow balance model. + // For this reason, we ignore the TxInputs field in the MsgDeSoTxn struct. + + // Create a transaction containing the coin lockup transfer fields. + txn := &MsgDeSoTxn{ + PublicKey: TransactorPublicKey, + TxnMeta: &CoinLockupTransferMetadata{ + RecipientPublicKey: NewPublicKey(RecipientPublicKey), + ProfilePublicKey: NewPublicKey(ProfilePublicKey), + UnlockTimestampNanoSecs: UnlockTimestampNanoSecs, + LockedCoinsToTransferBaseUnits: LockedCoinsToTransferBaseUnits, + }, + TxOutputs: additionalOutputs, + // The signature will be added once other transaction fields are finalized. + } + + // NOTE: AddInputsAndChangeToTransaction returns four fields. + // Some of these are no longer relevant following the move to balance model. + // We list each of these fields and whether they're relevant below: + // (Still relevant) _totalInputAdded - Equals inputSubsidy + sum(txn.TxOutputs) + additionalFees + txn.TxnFees + // (Still relevant) _spendAmount - Equals sum(txn.TxOutputs) + additionalFees + // (No longer relevant) _totalChangeAdded - Always returns a zero in the balance model era. + // (Still relevant) _fee - Returns the computed fees based on the size of the transaction. + // (Still relevant) _err - Necessary for error checking. For obvious reasons. + totalInput, spendAmount, _, fees, err := + bc.AddInputsAndChangeToTransaction(txn, minFeeRateNanosPerKB, mempool) + if err != nil { + return nil, 0, 0, 0, + errors.Wrapf(err, "CreateCoinLockupTransferTxn: Problem adding inputs: ") + } + _ = spendAmount + + // NOTE: Normally by convention here we check for the transaction to have at least one input in TxInputs. + // This is assumed to be no longer necessary given the requirement of + // lockup transactions to be after the transition to balance model. + + return txn, totalInput, 0, fees, nil +} + +func (bc *Blockchain) CreateUpdateCoinLockupParamsTxn( + TransactorPublicKey []byte, + LockupYieldDurationNanoSecs int64, + LockupYieldAPYBasisPoints uint64, + RemoveYieldCurvePoint bool, + NewLockupTransferRestrictions bool, + LockupTransferRestrictionStatus TransferRestrictionStatus, + // Standard transaction fields + minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { + + // NOTE: TxInputs is a remnant of the UTXO transaction model. + // It's assumed that lockup transactions follow balance model. + // For this reason, we ignore the TxInputs field in the MsgDeSoTxn struct. + + // Create a transaction containing the update coin lockup params fields. + txn := &MsgDeSoTxn{ + PublicKey: TransactorPublicKey, + TxnMeta: &UpdateCoinLockupParamsMetadata{ + LockupYieldDurationNanoSecs: LockupYieldDurationNanoSecs, + LockupYieldAPYBasisPoints: LockupYieldAPYBasisPoints, + RemoveYieldCurvePoint: RemoveYieldCurvePoint, + NewLockupTransferRestrictions: NewLockupTransferRestrictions, + LockupTransferRestrictionStatus: LockupTransferRestrictionStatus, + }, + TxOutputs: additionalOutputs, + // The signature will be added once other transaction fields are finalized. + } + + // NOTE: AddInputsAndChangeToTransaction returns four fields. + // Some of these are no longer relevant following the move to balance model. + // We list each of these fields and whether they're relevant below: + // (Still relevant) _totalInputAdded - Equals inputSubsidy + sum(txn.TxOutputs) + additionalFees + txn.TxnFees + // (Still relevant) _spendAmount - Equals sum(txn.TxOutputs) + additionalFees + // (No longer relevant) _totalChangeAdded - Always returns a zero in the balance model era. + // (Still relevant) _fee - Returns the computed fees based on the size of the transaction. + // (Still relevant) _err - Necessary for error checking. For obvious reasons. + totalInput, spendAmount, _, fees, err := + bc.AddInputsAndChangeToTransaction(txn, minFeeRateNanosPerKB, mempool) + if err != nil { + return nil, 0, 0, 0, + errors.Wrapf(err, "CreateCoinLockupTransferTxn: Problem adding inputs: ") + } + _ = spendAmount + + // NOTE: Normally by convention here we check for the transaction to have at least one input in TxInputs. + // This is assumed to be no longer necessary given the requirement of + // lockup transactions to be after the transition to balance model. + + return txn, totalInput, 0, fees, nil +} + +func (bc *Blockchain) CreateCoinUnlockTxn( + TransactorPublicKey []byte, + ProfilePublicKey []byte, + // Standard transaction fields + minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + _txn *MsgDeSoTxn, _totalInput uint64, _chainAmount uint64, _fees uint64, _err error) { + + // NOTE: TxInputs is a remnant of the UTXO transaction model. + // It's assumed that lockup transactions follow balance model. + // For this reason, we ignore the TxInputs field in the MsgDeSoTxn struct. + + // Create a transaction containing the coin unlock fields. + txn := &MsgDeSoTxn{ + PublicKey: TransactorPublicKey, + TxnMeta: &CoinUnlockMetadata{ProfilePublicKey: NewPublicKey(ProfilePublicKey)}, + TxOutputs: additionalOutputs, + // The signature will be added once other transaction fields are finalized. + } + + // NOTE: AddInputsAndChangeToTransaction returns four fields. + // Some of these are no longer relevant following the move to balance model. + // We list each of these fields and whether they're relevant below: + // (Still relevant) _totalInputAdded - Equals inputSubsidy + sum(txn.TxOutputs) + additionalFees + txn.TxnFees + // (Still relevant) _spendAmount - Equals sum(txn.TxOutputs) + additionalFees + // (No longer relevant) _totalChangeAdded - Always returns a zero in the balance model era. + // (Still relevant) _fee - Returns the computed fees based on the size of the transaction. + // (Still relevant) _err - Necessary for error checking. For obvious reasons. + totalInput, spendAmount, _, fees, err := + bc.AddInputsAndChangeToTransaction(txn, minFeeRateNanosPerKB, mempool) + if err != nil { + return nil, 0, 0, 0, + errors.Wrapf(err, "CreateCoinLockupTransferTxn: Problem adding inputs: ") + } + _ = spendAmount + + // NOTE: Normally by convention here we check for the transaction to have at least one input in TxInputs. + // This is assumed to be no longer necessary given the requirement of + // lockup transactions to be after the transition to balance model. + + return txn, totalInput, 0, fees, nil +} diff --git a/lib/db_utils.go b/lib/db_utils.go index e2e28dbea..4b6061dc3 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -5236,8 +5236,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, // Set txnSizeBytes to 0 here as the minimum network fee is 0 at genesis block, so there is no need to serialize // these transactions to check if they meet the minimum network fee requirement. var utxoOpsForTxn []*UtxoOperation - utxoOpsForTxn, _, _, _, err = utxoView.ConnectTransaction( - txn, txn.Hash(), 0, 0 /*blockHeight*/, false /*verifySignatures*/, true /*ignoreUtxos*/) + utxoOpsForTxn, _, _, _, err = utxoView.ConnectTransaction(txn, txn.Hash(), 0, 0, 0, false, true) if err != nil { return fmt.Errorf( "InitDbWithDeSoGenesisBlock: Error connecting transaction: %v, "+ diff --git a/lib/load_test.go b/lib/load_test.go index af413511c..0d2973e8f 100644 --- a/lib/load_test.go +++ b/lib/load_test.go @@ -134,7 +134,7 @@ func TestComputeMaxTPS(t *testing.T) { timeStart := time.Now() for _, tx := range txns { - _, _, _, _, err := utxoView.ConnectTransaction(tx, tx.Hash(), 0, 1, false /*verifySignature*/, false /*ignoreUtxos*/) + _, _, _, _, err := utxoView.ConnectTransaction(tx, tx.Hash(), 0, 1, 0, false, false) require.NoError(err) } //require.NoError(utxoView.FlushToDb()) diff --git a/lib/mempool.go b/lib/mempool.go index 748b5977c..b859be11f 100644 --- a/lib/mempool.go +++ b/lib/mempool.go @@ -875,8 +875,7 @@ func (mp *DeSoMempool) addTransaction( // Add it to the universal view. We assume the txn was already added to the // backup view. - _, _, _, _, err = mp.universalUtxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), height, - false /*verifySignatures*/, false /*ignoreUtxos*/) + _, _, _, _, err = mp.universalUtxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), height, 0, false, false) if err != nil { return nil, errors.Wrap(err, "ERROR addTransaction: _connectTransaction "+ "failed on universalUtxoView; this is a HUGE problem and should never happen") @@ -884,8 +883,7 @@ func (mp *DeSoMempool) addTransaction( // Add it to the universalTransactionList if it made it through the view mp.universalTransactionList = append(mp.universalTransactionList, mempoolTx) if updateBackupView { - _, _, _, _, err = mp.backupUniversalUtxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), height, - false /*verifySignatures*/, false /*ignoreUtxos*/) + _, _, _, _, err = mp.backupUniversalUtxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), height, 0, false, false) if err != nil { return nil, errors.Wrap(err, "ERROR addTransaction: _connectTransaction "+ "failed on backupUniversalUtxoView; this is a HUGE problem and should never happen") @@ -965,8 +963,7 @@ func (mp *DeSoMempool) _quickCheckBitcoinExchangeTxn( // transaction will only get this far once we are positive the BitcoinManager // has the block corresponding to the transaction. // We skip verifying txn size for bitcoin exchange transactions. - _, _, _, txFee, err := utxoView._connectTransaction( - tx, txHash, 0, bestHeight, false, false) + _, _, _, txFee, err := utxoView._connectTransaction(tx, txHash, 0, bestHeight, 0, false, false) if err != nil { // Note this can happen in odd cases where a transaction's dependency was removed // but the transaction depending on it was not. See the comment on @@ -1058,8 +1055,7 @@ func (mp *DeSoMempool) tryAcceptTransaction( usdCentsPerBitcoinBefore := mp.backupUniversalUtxoView.GetCurrentUSDCentsPerBitcoin() bestHeight := uint32(mp.bc.blockTip().Height + 1) // We can skip verifying the transaction size as related to the minimum fee here. - utxoOps, totalInput, totalOutput, txFee, err := mp.backupUniversalUtxoView._connectTransaction( - tx, txHash, 0, bestHeight, verifySignatures, false) + utxoOps, totalInput, totalOutput, txFee, err := mp.backupUniversalUtxoView._connectTransaction(tx, txHash, 0, bestHeight, 0, verifySignatures, false) if err != nil { mp.rebuildBackupView() return nil, nil, errors.Wrapf(err, "tryAcceptTransaction: Problem "+ @@ -2073,8 +2069,7 @@ func ConnectTxnAndComputeTransactionMetadata( totalNanosPurchasedBefore := utxoView.NanosPurchased usdCentsPerBitcoinBefore := utxoView.GetCurrentUSDCentsPerBitcoin() - utxoOps, totalInput, totalOutput, fees, err := utxoView._connectTransaction( - txn, txn.Hash(), 0, blockHeight, false, false) + utxoOps, totalInput, totalOutput, fees, err := utxoView._connectTransaction(txn, txn.Hash(), 0, blockHeight, 0, false, false) if err != nil { return nil, fmt.Errorf( "UpdateTxindex: Error connecting txn to UtxoView: %v", err) From ea5ea906225b7c5f937ad126c007d6a0b4fe1b39 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Tue, 17 Oct 2023 14:47:31 -0700 Subject: [PATCH 256/762] Add basic logic for updating CoinEntry Circulation + Holders --- lib/block_view_lockups.go | 54 ++++++++++++++++++++++++++++++++++++--- lib/block_view_types.go | 3 ++- lib/errors.go | 1 + 3 files changed, 54 insertions(+), 4 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 6d304978a..2050d94eb 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -740,6 +740,7 @@ func (bav *UtxoView) _connectCoinLockup( // Validate the transactor as having sufficient DAO Coin or DESO balance for the transaction. var transactorBalanceNanos256 *uint256.Int var prevTransactorBalanceEntry *BalanceEntry + var prevCoinEntry *CoinEntry if profilePKID.IsZeroPKID() { // Check the DeSo balance of the user. transactorBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(txn.PublicKey) @@ -786,6 +787,18 @@ func (bav *UtxoView) _connectCoinLockup( transactorBalanceEntry.BalanceNanos = *uint256.NewInt().Sub(&transactorBalanceEntry.BalanceNanos, txMeta.LockupAmountBaseUnits) bav._setDAOCoinBalanceEntryMappings(transactorBalanceEntry) + + // Create a copy of the associated CoinEntry in the event we must roll back the transaction. + prevCoinEntry = profileEntry.DAOCoinEntry.Copy() + + // Update CoinsInCirculation and NumberOfHolders associated with the DAO coin balance. + profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().Sub( + &profileEntry.DAOCoinEntry.CoinsInCirculationNanos, + txMeta.LockupAmountBaseUnits) + if transactorBalanceEntry.BalanceNanos.IsZero() { + profileEntry.DAOCoinEntry.NumberOfHolders-- + } + bav._setProfileEntryMappings(profileEntry) } // By now we know the transaction to be valid. We now source yield information from either @@ -883,6 +896,7 @@ func (bav *UtxoView) _connectCoinLockup( Type: OperationTypeCoinLockup, PrevTransactorBalanceEntry: prevTransactorBalanceEntry, PrevLockedBalanceEntry: &previousLockedBalanceEntry, + PrevCoinEntry: prevCoinEntry, }) // Construct UtxoOps in the event this transaction is reverted. @@ -1021,6 +1035,23 @@ func (bav *UtxoView) _disconnectCoinLockup( } else { // Revert the transactor's DAO coin balance. bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) + + // Fetch the profile entry associated with the lockup. + profileEntry := bav.GetProfileEntryForPKID(operationData.PrevLockedBalanceEntry.ProfilePKID) + if profileEntry == nil || profileEntry.isDeleted { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert coin entry " + + "update but found nil profile entry; this shouldn't be possible") + } + + // Ensure the PrevCoinEntry is not nil. This shouldn't be possible. + if operationData.PrevCoinEntry == nil { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert coin entry " + + "update but found nil prev coin entry; this shouldn't be possible") + } + + // Revert the coin entry. + profileEntry.DAOCoinEntry = *operationData.PrevCoinEntry + bav._setProfileEntryMappings(profileEntry) } // By here we only need to disconnect the basic transfer associated with the transaction. @@ -1238,7 +1269,7 @@ func (bav *UtxoView) _disconnectUpdateCoinLockupParams( // If the previous point is nil, we throw an error. This shouldn't be possible. if txMeta.RemoveYieldCurvePoint && txMeta.LockupYieldDurationNanoSecs > 0 { if operationData.PrevLockupYieldCurvePoint == nil { - return fmt.Errorf("_connectUpdateCoinLockupParams: trying to revert point deletion " + + return fmt.Errorf("_disconnectUpdateCoinLockupParams: trying to revert point deletion " + "but found nil previous yield curve point; this shouldn't be possible") } bav._setLockupYieldCurvePoint(&LockupYieldCurvePoint{ @@ -1253,7 +1284,7 @@ func (bav *UtxoView) _disconnectUpdateCoinLockupParams( // Fetch the profile entry and LockupTransferRestriction status. profileEntry := bav.GetProfileEntryForPKID(profilePKID) if profileEntry == nil || profileEntry.isDeleted { - return fmt.Errorf("_connectUpdateCoinLockupParams: Trying to revert lockup transfer restriction " + + return fmt.Errorf("_disconnectUpdateCoinLockupParams: Trying to revert lockup transfer restriction " + "update but found nil profile entry; this shouldn't be possible") } @@ -1585,12 +1616,13 @@ func (bav *UtxoView) _connectCoinUnlock( txMeta := txn.TxnMeta.(*CoinUnlockMetadata) // Check for a valid profile public key. + var profileEntry *ProfileEntry if len(txMeta.ProfilePublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, errors.Wrap(RuleErrorDAOCoinInvalidPubKey, "_connectCoinUnlock") } if !txMeta.ProfilePublicKey.IsZeroPublicKey() { - profileEntry := bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + profileEntry = bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) if profileEntry == nil || profileEntry.isDeleted { return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockOnNonExistentProfile, "_connectCoinUnlock") @@ -1650,6 +1682,7 @@ func (bav *UtxoView) _connectCoinUnlock( // Credit the transactor with either DAO coins or DeSo for this unlock. var prevTransactorBalanceEntry *BalanceEntry + var prevCoinEntry *CoinEntry if profilePKID.IsZeroPKID() { // Ensure the uint256 can be properly represented as a uint64. if !unlockedBalance.IsUint64() { @@ -1677,6 +1710,21 @@ func (bav *UtxoView) _connectCoinUnlock( } newTransactorBalanceEntry.BalanceNanos = *newTransactorBalanceNanos bav._setBalanceEntryMappings(newTransactorBalanceEntry, true) + + // Update CoinsInCirculation and NumberOfHolders to accurately reflect the changing balance. + prevCoinEntry = profileEntry.DAOCoinEntry.Copy() + newCoinsInCirculationNanos, err := SafeUint256().Add( + &profileEntry.DAOCoinEntry.CoinsInCirculationNanos, + unlockedBalance) + if err != nil { + return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockCausesCoinsInCirculationOverflow, + "_connectCoinUnlock") + } + profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *newCoinsInCirculationNanos + if prevTransactorBalanceEntry.BalanceNanos.IsZero() { + profileEntry.DAOCoinEntry.NumberOfHolders++ + } + bav._setProfileEntryMappings(profileEntry) } // Create a UtxoOp for the operation. diff --git a/lib/block_view_types.go b/lib/block_view_types.go index b32413adf..7b91e0f43 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -943,7 +943,8 @@ type UtxoOperation struct { // // PrevLockedBalanceEntry is the previous LockedBalanceEntry prior - // to a DAO coin lockup. + // to a DAO coin lockup. PrevCoinEntry defined above stores the + // CoinsInCirculation and NumberOfHolders prior to a lockup transaction. PrevLockedBalanceEntry *LockedBalanceEntry // PrevSenderLockedBalanceEntry and PrevReceiverLockedBalanceEntry are the previous LockedBalanceEntry diff --git a/lib/errors.go b/lib/errors.go index 829c90f44..e198b51ff 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -520,6 +520,7 @@ const ( RuleErrorCoinUnlockUnlockableCoinsOverflow RuleError = "RuleErrorCoinUnlockUnlockableCoinsOverflow" RuleErrorCoinUnlockUnlockableDeSoOverflow RuleError = "RuleErrorCoinUnlockUnlockableCoinsOverflow" RuleErrorCoinUnlockCausesBalanceOverflow RuleError = "RuleErrorCoinUnlockCausesBalanceOverflow" + RuleErrorCoinUnlockCausesCoinsInCirculationOverflow RuleError = "RuleErrorCoinUnlockCausesCoinsInCirculationOverflow" RuleErrorUpdateCoinLockupParamsOnInvalidPKID RuleError = "RuleErrorUpdateCoinLockupParamsOnInvalidPKID" RuleErrorUpdateCoinLockupParamsInvalidRestrictions RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" RuleErrorUpdateCoinLockupParamsNegativeDuration RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" From d7e776301f6db5840e33f520510d3d9898328236 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 18 Oct 2023 21:54:50 -0400 Subject: [PATCH 257/762] Adapt Fast-HotStuff Reference Implementation Node For Integration Testing (#721) Adapt reference implemetation --- consensus/event_loop.go | 2 +- consensus/integration_test_types.go | 443 ++++++++++++++++++++++++++++ 2 files changed, 444 insertions(+), 1 deletion(-) create mode 100644 consensus/integration_test_types.go diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 3fe7e6504..4b70b10d0 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -687,7 +687,7 @@ func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt // Happy path return &FastHotStuffEvent{ EventType: FastHotStuffEventTypeConstructTimeoutQC, // The event type - View: fc.currentView, // The view that we have a timeout QC for + View: fc.currentView, // The view that the timeout QC is proposed in TipBlockHash: validatorsHighQC.GetBlockHash(), // The block hash that we extend from TipBlockHeight: safeBlock.GetHeight(), // The block height that we extend from AggregateQC: &aggregateQuorumCertificate{ diff --git a/consensus/integration_test_types.go b/consensus/integration_test_types.go new file mode 100644 index 000000000..3255b1d6d --- /dev/null +++ b/consensus/integration_test_types.go @@ -0,0 +1,443 @@ +package consensus + +import ( + "sync" + "time" + + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" + "github.com/holiman/uint256" +) + +// validatorNode is a simplified implementation of a Fast-HotStuff node that runs the Fast-HotStuff +// event loop. It is adapted into a working state from the reference implementation at: +// https://github.com/deso-protocol/hotstuff_pseudocode/blob/main/fast_hotstuff_bls.go +// +// This node implementation implementation has no networking, and no node syncing. It's a bare-bones +// implementation that runs the the Fast-HotStuff protocol for a single node. It is used to test the +// correctness of the Fast-HotStuff event loop in integration testing. +type validatorNode struct { + lock *sync.Mutex + + privateKey *bls.PrivateKey + stake *uint256.Int + + eventLoop *FastHotStuffEventLoop + + isBlockProposer bool + validatorNodes []*validatorNode + + latestCommittedBlock Block + + safeBlocks map[[32]byte]*block + quit chan struct{} +} + +func newValidatorNode(stake *uint256.Int, isBlockProposer bool) *validatorNode { + return &validatorNode{ + lock: &sync.Mutex{}, + privateKey: createDummyBLSPrivateKey(), + stake: stake, + eventLoop: NewFastHotStuffEventLoop(), + isBlockProposer: isBlockProposer, + + quit: make(chan struct{}), + } +} + +func (node *validatorNode) Init( + crankTimerInterval time.Duration, + timeoutBaseDuration time.Duration, + genesisBlock *block, + validatorNodes []*validatorNode, +) error { + node.lock.Lock() + defer node.lock.Unlock() + + node.validatorNodes = validatorNodes + node.safeBlocks = map[[32]byte]*block{ + genesisBlock.GetBlockHash().GetValue(): genesisBlock, + } + + return node.eventLoop.Init( + crankTimerInterval, + timeoutBaseDuration, + BlockWithValidators{genesisBlock, node.getValidators()}, + []BlockWithValidators{ + {genesisBlock, node.getValidators()}, + }, + ) +} + +func (node *validatorNode) Resync(tipBlock *block, safeBlocks []*block) error { + node.lock.Lock() + defer node.lock.Unlock() + + safeBlocks = append(safeBlocks, tipBlock) + node.safeBlocks = collections.ToMap(safeBlocks, func(bb *block) [32]byte { + return bb.GetBlockHash().GetValue() + }) + + return node.eventLoop.Init( + node.eventLoop.crankTimerInterval, + node.eventLoop.timeoutBaseDuration, + BlockWithValidators{tipBlock, node.getValidators()}, + collections.Transform(safeBlocks, func(bb *block) BlockWithValidators { + return BlockWithValidators{bb, node.getValidators()} + }), + ) +} + +func (node *validatorNode) getValidators() []Validator { + return collections.Transform(node.validatorNodes, func(validator *validatorNode) Validator { + return validator + }) +} + +func (node *validatorNode) GetPublicKey() *bls.PublicKey { + return node.privateKey.PublicKey() +} + +func (node *validatorNode) GetStakeAmount() *uint256.Int { + return node.stake +} + +func (node *validatorNode) ProcessBlock(incomingBlock *block) { + node.lock.Lock() + defer node.lock.Unlock() + + if node.eventLoop.status != eventLoopStatusRunning { + return + } + + // Make sure that the block contains a valid QC, signature, transactions, + // and that it's for the current view. + if !node.sanityCheckBlock(incomingBlock) { + return + } + + // The safeVote variable will tell us if we can accept this block and vote on it. + safeVote := false + + // If the block doesn’t contain an AggregateQC, then that indicates that we + // did NOT timeout in the previous view, which means we should just check that + // the QC corresponds to the previous view. + if isInterfaceNil(incomingBlock.aggregateQC) { + // The block is safe to vote on if it is a direct child of the previous + // block. This means that the parent and child blocks have consecutive + // views. We use the current block’s QC to find the view of the parent. + safeVote = incomingBlock.GetView() == incomingBlock.GetQC().GetView()+1 + } else { + // If we have an AggregateQC set on the block, it means the nodes decided + // to skip a view by sending TimeoutMessages to the leader, so we process + // the block accordingly. + + // We find the QC with the highest view among the QCs contained in the + // AggregateQC. + highestTimeoutQC := incomingBlock.aggregateQC.GetHighQC() + + // We make sure that the block’s QC matches the view of the highest QC that we’re aware of. + safeVote = incomingBlock.aggregateQC.GetHighQC().GetView() == highestTimeoutQC.GetView() && + incomingBlock.aggregateQC.GetView()+1 == node.eventLoop.currentView + } + + // If the block isn't safe to process locally, then there's nothing else to do. + if !safeVote { + return + } + + // Store the block locally. + node.safeBlocks[incomingBlock.blockHash.GetValue()] = incomingBlock + + // Run the commit rule. + node.commitChainFromGrandParent(incomingBlock) + + // Update the event loop with the new block as the chain tip. + node.eventLoop.ProcessTipBlock( + BlockWithValidators{incomingBlock, node.getValidators()}, + collections.Transform( + collections.MapValues(node.safeBlocks), + func(bb *block) BlockWithValidators { + return BlockWithValidators{bb, node.getValidators()} + }, + ), + ) +} + +// sanityCheckBlock is used to verify that the block contains valid information. +func (node *validatorNode) sanityCheckBlock(block *block) bool { + // We ensure the currently observed block is either for the current view, or for a future view. + if block.GetView() < node.eventLoop.currentView { + return false + } + + // The block's should be properly formatted. + if !isProperlyFormedBlock(block) { + return false + } + + // We make sure the QC contains valid signatures from 2/3rds of validators, weighted by stake. And that the + // combined signature is valid. + if !IsValidSuperMajorityQuorumCertificate(block.GetQC(), node.getValidators()) { + return false + } + + // If the block doesn't contain a timeout QC, then we're done. + if !isInterfaceNil(block.qc) && isInterfaceNil(block.aggregateQC) { + return true + } + + // If the block contains a timeout QC, then we make sure the it is a valid aggregate QC. + if !node.validateTimeoutProof(block.aggregateQC) { + return false + } + + return true +} + +// validateTimeoutProof is used to verify that the validators included in the QC collectively own at least 2/3rds +// of the stake. Also make sure there are no repeated public keys. Note the bitset in the signature allows us to +// determine how much stake the validators had. +func (node *validatorNode) validateTimeoutProof(aggregateQC AggregateQuorumCertificate) bool { + // Extract the highest QC view from the AggregateQC. + highestQCView := uint64(0) + for _, view := range aggregateQC.GetHighQCViews() { + if view > highestQCView { + highestQCView = view + } + } + + // The highest QC view found in the signatures should match the highest view + // of the HighestQC included in the AggregateQC. + if highestQCView != aggregateQC.GetHighQC().GetView() { + return false + } + + // Verify the HighQC included in the AggregateQC. + if !IsValidSuperMajorityQuorumCertificate(aggregateQC.GetHighQC(), node.getValidators()) { + return false + } + + // Extract the payload that every validator would have signed for the aggregate QC. + signedPayloads := [][]byte{} + for _, highQCView := range aggregateQC.GetHighQCViews() { + payload := GetTimeoutSignaturePayload(aggregateQC.GetView(), highQCView) + signedPayloads = append(signedPayloads, payload[:]) + } + + // Extract the public keys of the validators that signed the aggregate QC. + signersList := aggregateQC.GetAggregatedSignature().GetSignersList() + signerPublicKeys := []*bls.PublicKey{} + + for ii := 0; ii < signersList.Size(); ii++ { + signerPublicKeys = append(signerPublicKeys, node.validatorNodes[ii].GetPublicKey()) + } + + // Validate the signers' aggregate signatures. + isValidSignature, err := bls.VerifyAggregateSignatureMultiplePayloads( + signerPublicKeys, + aggregateQC.GetAggregatedSignature().GetSignature(), + signedPayloads, + ) + if err != nil || !isValidSignature { + return false + } + + return true +} + +// commitChainFromGrandParent represents our commit rule. It is called whenever we receive a new block to determine +// if we can commit any blocks that we've previously received. The Fast-HotStuff commit rule finalizes blocks once +// we observe a two-chain involving a direct one-chain. In other words, we must observe a sequence of three blocks: +// +// B1 - B2 - B3 +// +// such that B1 is the parent of B2, and B2 is an ancestor of B3. The ancestor-descendant relationship is established +// whenever a block contains the QC for another block. We say that this block is the descendant of the other block. +// In particular, if the two blocks were proposed with consecutive views, we say these blocks are in a parent-child +// relationship. So, when we observe the aforementioned configuration of B1, B2, and B3, we finalize all ancestors of +// B1 as well as B1. To see why this is safe, one is referred to read the Fast-HotStuff paper. +func (node *validatorNode) commitChainFromGrandParent(block *block) { + // In accordance to the above comment, B3 = block, B2 = parent, and B1 = grandParent. + parent := node.safeBlocks[block.GetQC().GetBlockHash().GetValue()] + if parent == nil { + return + } + + // We verify that B2 is the parent of B3. + if block.GetView() != (parent.GetView() + 1) { + return + } + + grandParent := node.safeBlocks[parent.GetQC().GetBlockHash().GetValue()] + if grandParent == nil { + return + } + + // We verify that B1 is the parent of B2. + if parent.GetView() != (grandParent.GetView() + 1) { + return + } + + // We have successfully observed a committing configuration, we will now commit all ancestors of B1 as well as B1. + node.latestCommittedBlock = grandParent +} + +func (node *validatorNode) ProcessVote(vote VoteMessage) { + node.lock.Lock() + defer node.lock.Unlock() + + if node.eventLoop.status != eventLoopStatusRunning { + return + } + + node.eventLoop.ProcessValidatorVote(vote) +} + +func (node *validatorNode) ProcessTimeout(timeout TimeoutMessage) { + node.lock.Lock() + defer node.lock.Unlock() + + if node.eventLoop.status != eventLoopStatusRunning { + return + } + + node.eventLoop.ProcessValidatorTimeout(timeout) +} + +func (node *validatorNode) Start() { + node.eventLoop.Start() + go node.runEventSignalLoop() +} + +func (node *validatorNode) runEventSignalLoop() { + for { + select { + case event := <-node.eventLoop.Events: + switch event.EventType { + case FastHotStuffEventTypeVote: + node.handleVoteEvent(event) + break + case FastHotStuffEventTypeTimeout: + node.handleTimeoutEvent(event) + break + case FastHotStuffEventTypeConstructVoteQC: + node.handleVoteQCConstructionEvent(event) + break + case FastHotStuffEventTypeConstructTimeoutQC: + node.handleTimeoutQCConstructionEvent(event) + break + } + case <-node.quit: + return + } + } +} + +func (node *validatorNode) handleVoteEvent(event *FastHotStuffEvent) { + payload := GetVoteSignaturePayload(event.View, event.TipBlockHash) + signature, err := node.privateKey.Sign(payload[:]) + if err != nil { + panic(err) + } + + vote := &voteMessage{ + view: event.View, + blockHash: event.TipBlockHash, + publicKey: node.privateKey.PublicKey(), + signature: signature, + } + + for _, validator := range node.validatorNodes { + go validator.ProcessVote(vote) + } +} + +func (node *validatorNode) handleTimeoutEvent(event *FastHotStuffEvent) { + node.lock.Lock() + defer node.lock.Unlock() + + // Skip if the timed out view from the event is stale + if node.eventLoop.currentView != event.View { + return + } + + // Skip if the node can't advance to the next view for some reason. + // This should never happen. + if _, err := node.eventLoop.AdvanceViewOnTimeout(); err == nil { + node.broadcastTimeout(event) + } +} + +func (node *validatorNode) broadcastTimeout(event *FastHotStuffEvent) { + payload := GetTimeoutSignaturePayload(event.View, event.QC.GetView()) + signature, err := node.privateKey.Sign(payload[:]) + if err != nil { + panic(err) + } + + timeout := &timeoutMessage{ + view: event.View, + highQC: event.QC, + publicKey: node.privateKey.PublicKey(), + signature: signature, + } + + // Broadcast the block to all validators. + for _, validator := range node.validatorNodes { + go validator.ProcessTimeout(timeout) + } +} + +func (node *validatorNode) handleVoteQCConstructionEvent(event *FastHotStuffEvent) { + if !node.isBlockProposer { + return + } + + // Skip if the view from the event is stale. This can happen if the node + // has advances to the next view before the block is processed. + if node.eventLoop.currentView != event.View { + return + } + + block := &block{ + view: event.View, + blockHash: createDummyBlockHash(), + height: event.TipBlockHeight + 1, + qc: event.QC, + } + + // Broadcast the block to all validators. + for _, validator := range node.validatorNodes { + go validator.ProcessBlock(block) + } +} + +func (node *validatorNode) handleTimeoutQCConstructionEvent(event *FastHotStuffEvent) { + if !node.isBlockProposer { + return + } + + // Skip if the view from the event is stale. This can happen if the node + // has advances to the next view before the block is processed. + if node.eventLoop.currentView != event.View { + return + } + + block := &block{ + view: event.View, + blockHash: createDummyBlockHash(), + height: event.TipBlockHeight + 1, + aggregateQC: event.AggregateQC, + } + + // Broadcast the block to all validators. + for _, validator := range node.validatorNodes { + go validator.ProcessBlock(block) + } +} + +func (node *validatorNode) Stop() { + node.eventLoop.Stop() + node.quit <- struct{}{} +} From 6c40be73ba4271c09a7dc33f8e4d40c7e16e9074 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 18 Oct 2023 22:01:52 -0400 Subject: [PATCH 258/762] Initial commit (#731) --- consensus/integration_test.go | 247 ++++++++++++++++++++++++++++ consensus/integration_test_utils.go | 118 +++++++++++++ 2 files changed, 365 insertions(+) create mode 100644 consensus/integration_test.go create mode 100644 consensus/integration_test_utils.go diff --git a/consensus/integration_test.go b/consensus/integration_test.go new file mode 100644 index 000000000..43a4e31e5 --- /dev/null +++ b/consensus/integration_test.go @@ -0,0 +1,247 @@ +package consensus + +import ( + "testing" + "time" + + "github.com/deso-protocol/core/collections" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" +) + +// This integration test simulates a network of 4 nodes with equal stake. It tests the +// a scenario in which a super-majority of stake is always online. Nodes sporadically +// go offline and come back online. The network should continue to produce blocks +// as long as a super-majority of stake is online. +func TestNetworkWithOfflineValidators(t *testing.T) { + // Create 4 nodes with equal stake. The network has super-majority of stake online + // as long as 3 out of 4 nodes are online. + node1 := newValidatorNode(uint256.NewInt().SetUint64(50), true) // block proposer + node2 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + node3 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + node4 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + + allNodes := []*validatorNode{node1, node2, node3, node4} + + // Create the genesis block signed by all four nodes. + genesisBlock := createDummyBlockWithVoteQC(createDummyBlockHash(), 2, 2, allNodes) + + // Set the crank and timeout durations + crankTimer := time.Millisecond * 100 // Produce a block every 100ms + timeoutTimer := time.Millisecond * 1000 // Time out if there is no block broadcast within 1000ms + + // Initialize all nodes and and connect them to each other + for _, validator := range allNodes { + require.NoError(t, validator.Init(crankTimer, timeoutTimer, genesisBlock, allNodes)) + } + + // Start all nodes' consensus event loops. + for _, node := range allNodes { + node.Start() + } + + // Broadcast a block with a valid QC to all nodes. This kicks off the steady state flow + // of the Fast-HotStuff consensus. All nodes will begin voting starting from this block. + initialBlockToBroadcast := createDummyBlockWithVoteQC(genesisBlock.GetBlockHash(), 3, 3, allNodes) + for _, node := range allNodes { + node.ProcessBlock(initialBlockToBroadcast) + } + + // Let all nodes run for 0.5 seconds so that the network produces at least one block. + time.Sleep(time.Millisecond * 500) + + // Stop node 2 to simulate it going offline. The network has enough stake online + // to continue producing blocks. + node2.Stop() + + // Let all nodes run for 0.5 seconds so that the network produces at least one block. + time.Sleep(time.Millisecond * 500) + + // Restart node 2 to simulate it going back online, and stop node 3 to simulate it + // going offline. The network has enough stake online to continue producing blocks. + node2.Start() + node3.Stop() + + // Let all nodes run for 0.5 seconds so that the network produces at least one block. + time.Sleep(time.Millisecond * 500) + + // Restart node 3 to simulate it going back online, and stop node 4 to simulate it + // going offline. The network has enough stake online to continue producing blocks. + node3.Start() + node4.Stop() + + // Let all nodes run for 0.5 seconds so that the network produces at least one block. + time.Sleep(time.Millisecond * 500) + + // Stop all remaining nodes + node1.Stop() + node2.Stop() + node3.Stop() + + // Validate the resulting chain of blocks stored by all nodes + validateAndPrintBlockChain(t, node1, node1.safeBlocks) + validateAndPrintBlockChain(t, node2, node2.safeBlocks) + validateAndPrintBlockChain(t, node3, node3.safeBlocks) + validateAndPrintBlockChain(t, node4, node4.safeBlocks) + + // Verify that the network has produced more blocks after the node 4 stopped + require.Greater(t, node3.eventLoop.tip.block.GetView(), node4.eventLoop.tip.block.GetView()) + require.Greater(t, node3.eventLoop.tip.block.GetHeight(), node4.eventLoop.tip.block.GetHeight()) +} + +// This integration test simulates a network of 4 nodes with equal stake. It tests the +// a scenario in which a super-majority of stake is always online, but the block proposer +// goes offline causing other nodes to timeout. The block proposer comes back online and +// the network gracefully recovers. +func TestNetworkWithOfflineBlockProposer(t *testing.T) { + // Create 4 nodes with equal stake. The network has super-majority of stake online + // as long as 3 out of 4 nodes are online. + node1 := newValidatorNode(uint256.NewInt().SetUint64(50), true) // block proposer + node2 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + node3 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + node4 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + + allNodes := []*validatorNode{node1, node2, node3, node4} + + // Create the genesis block signed by all four nodes. + genesisBlock := createDummyBlockWithVoteQC(createDummyBlockHash(), 2, 2, allNodes) + + // Set the crank and timeout durations + crankTimer := time.Millisecond * 500 // Produce a block every 500ms + timeoutTimer := time.Millisecond * 1000 // Time out if there is no block broadcast within 1000ms + + // Initialize all nodes and and connect them to each other + for _, validator := range allNodes { + require.NoError(t, validator.Init(crankTimer, timeoutTimer, genesisBlock, allNodes)) + } + + // Start all nodes' consensus event loops. + for _, node := range allNodes { + node.Start() + } + + // Broadcast a block with a valid QC to all nodes. This kicks off the steady state flow + // of the Fast-HotStuff consensus. All nodes will begin voting starting from this block. + initialBlockToBroadcast := createDummyBlockWithVoteQC(genesisBlock.GetBlockHash(), 3, 3, allNodes) + for _, node := range allNodes { + node.ProcessBlock(initialBlockToBroadcast) + } + + // Let all nodes run for 1 second so that the network produces at least one block. + time.Sleep(time.Millisecond * 1000) + + // Stop node 1 to simulate the block proposer going offline. All other validators will + // begin to time out. The network does not switch leaders during timeouts and instead + // waits for the block proposer to come back online. + node1.Stop() + + // Cache node 1's tip during the network halt + node1TipDuringNetworkHalt := node1.eventLoop.tip.block.(*block) + + // Let all online nodes run for 2 seconds so they time out at least once. + time.Sleep(time.Millisecond * 2000) + + // Restart node 2 to simulate a block proposer coming online. + node1.Start() + + // Let all nodes run for 10 seconds. The block proposer's view start off lower than the + // all other validators. All nodes' timeout have exponential backoff so they should all + // converge on the same view eventually. + time.Sleep(time.Millisecond * 10000) + + // Stop remaining nodes + for _, node := range allNodes { + node.Stop() + } + + // Validate the resulting chain of blocks stored by all nodes + validateAndPrintBlockChain(t, node1, node1.safeBlocks) + validateAndPrintBlockChain(t, node2, node2.safeBlocks) + validateAndPrintBlockChain(t, node3, node3.safeBlocks) + validateAndPrintBlockChain(t, node4, node4.safeBlocks) + + // Verify that the network has produced at least one block since the block proposer returned + require.Greater(t, node3.eventLoop.tip.block.GetView(), node1TipDuringNetworkHalt.GetView()) + require.Greater(t, node3.eventLoop.tip.block.GetHeight(), node1TipDuringNetworkHalt.GetHeight()) +} + +// This integration test simulates a network of 3 nodes, where a super-majority of stake is +// concentrated on node. It tests the a scenario in which the super-majority of stake goes +// offline, killing liveness. The network can recover as long as the super-majority of stake +// can sync to the same starting block height once they come back online. It simulates recovery +// from a catastrophic network failure. +func TestNetworkRecoveryAfterCatastrophicFailure(t *testing.T) { + // Create 3 nodes with equal stake. Node 3 has a super-majority of the the stake + // and needs to stay online for the network to remain live. + node1 := newValidatorNode(uint256.NewInt().SetUint64(10), true) // block proposer + node2 := newValidatorNode(uint256.NewInt().SetUint64(10), false) // validator + node3 := newValidatorNode(uint256.NewInt().SetUint64(80), false) // validator + + allNodes := []*validatorNode{node1, node2, node3} + + // Create the genesis block signed by all three nodes. + genesisBlock := createDummyBlockWithVoteQC(createDummyBlockHash(), 2, 2, allNodes) + + // Set the crank and timeout durations + crankTimer := time.Millisecond * 500 // Produce a block every 500ms + timeoutTimer := time.Millisecond * 1000 // Time out if there is no block broadcast within 1000ms + + // Initialize all nodes and and connect them to each other + for _, validator := range allNodes { + require.NoError(t, validator.Init(crankTimer, timeoutTimer, genesisBlock, allNodes)) + } + + // Start all nodes' consensus event loops. + for _, node := range allNodes { + node.Start() + } + + // Broadcast a block with a valid QC to all nodes. This kicks off the steady state flow + // of the Fast-HotStuff consensus. All nodes will begin voting starting from this block. + initialBlockToBroadcast := createDummyBlockWithVoteQC(genesisBlock.GetBlockHash(), 3, 3, allNodes) + for _, node := range allNodes { + node.ProcessBlock(initialBlockToBroadcast) + } + + // Let all nodes run for 1 second so that the network produces at least one block. + time.Sleep(time.Millisecond * 1000) + + // Stop node 3 to simulate the network going down catastrophically. All nodes will begin to + // time out. No blocks will be proposed during this time. + node3.Stop() + + // Let all online nodes run for 2 seconds so they time out at least once. The block proposer + // may proposer at most one block during this time if it had enough votes stored for its current + // tip to build a QC. The QC will not reach node 3. + // + // The network has halted during this period. + time.Sleep(time.Millisecond * 2000) + + // Cache node 3's tip during the network halt. Nodes internal clocks and view will during this + // period. + node3TipDuringNetworkHalt := node3.eventLoop.tip.block.(*block) + + // After a catastrophic network failure, nodes with a super-majority of stake need to somehow + // agree on a starting state of the chain. As long as they are able to sync from or to any peer + // that eventually becomes a block proposer, the network will recover. + node3.Resync(node1.eventLoop.tip.block.(*block), collections.MapValues(node1.safeBlocks)) + node3.Start() + + // Let all nodes run for 10 seconds. Eventually all nodes will converge on a single view and + // the network will start producing blocks again. + time.Sleep(time.Millisecond * 10000) + + // Stop remaining nodes + for _, node := range allNodes { + node.Stop() + } + + // Validate and print resulting chain of blocks stored by all nodes + validateAndPrintBlockChain(t, node1, node1.safeBlocks) + validateAndPrintBlockChain(t, node2, node2.safeBlocks) + validateAndPrintBlockChain(t, node3, node3.safeBlocks) + + // Verify that the network has produced at least one block since it recovered + require.Greater(t, node3.eventLoop.tip.block.GetView(), node3TipDuringNetworkHalt.GetView()) + require.Greater(t, node3.eventLoop.tip.block.GetHeight(), node3TipDuringNetworkHalt.GetHeight()) +} diff --git a/consensus/integration_test_utils.go b/consensus/integration_test_utils.go new file mode 100644 index 000000000..d6b625472 --- /dev/null +++ b/consensus/integration_test_utils.go @@ -0,0 +1,118 @@ +package consensus + +import ( + "encoding/hex" + "fmt" + "testing" + + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" + "github.com/stretchr/testify/require" +) + +func createDummyBlockWithVoteQC(prevBlockHash BlockHash, view uint64, blockHeight uint64, validators []*validatorNode) *block { + signersList := bitset.NewBitset() + signatures := []*bls.Signature{} + + // All signers will vote on the previous block has and previous view to build the QC. + for ii, validator := range validators { + signaturePayload := GetVoteSignaturePayload(view-1, prevBlockHash) + signature, _ := validator.privateKey.Sign(signaturePayload[:]) + signatures = append(signatures, signature) + signersList.Set(ii, true) + } + + aggregateSignatures, _ := bls.AggregateSignatures(signatures) + + result := block{ + blockHash: createDummyBlockHash(), + view: view, + height: blockHeight, + qc: &quorumCertificate{ + view: view - 1, + blockHash: prevBlockHash, + aggregatedSignature: &aggregatedSignature{ + signature: aggregateSignatures, + signersList: signersList, + }, + }, + } + return &result +} + +func validateAndPrintBlockChain(t *testing.T, node *validatorNode, allBlocks map[[32]byte]*block) { + blockChainString := "" + + // Fetch the validator's tip block + tipBlock, ok := allBlocks[node.eventLoop.tip.block.GetBlockHash().GetValue()] + require.True(t, ok) + + require.Greater(t, tipBlock.GetView(), uint64(4)) // the network must have advanced more than two views + require.Greater(t, tipBlock.GetHeight(), uint64(4)) // the network must have produced at least two blocks + + // Format string that represents the chain of blocks. + blockChainString = "" + + // Validate the chain of blocks starting from the tip block and ending at the first block within + // the chain. + currentBlock := tipBlock + parentBlock, hasParentBlock := allBlocks[currentBlock.GetQC().GetBlockHash().GetValue()] + for hasParentBlock { + // Updated formatted string that represents the chain of blocks. + blockHashValue := currentBlock.GetBlockHash().GetValue() + blockChainString = fmt.Sprintf( + "->(view=%d,height=%d,hash=%s)%s", + currentBlock.view, + currentBlock.height, + hex.EncodeToString(blockHashValue[:2]), + blockChainString, + ) + + // Verify that the current block exists in in the allBlocks map + _, ok := allBlocks[currentBlock.GetBlockHash().GetValue()] + require.True(t, ok) + + // Cross-validate the current block's and the parent block's views + if isInterfaceNil(currentBlock.aggregateQC) { + // The current block contains a QC of votes + require.Equal(t, currentBlock.GetView(), currentBlock.GetQC().GetView()+1) + } else { + // The current block contains a timeout QC + require.Equal(t, currentBlock.GetView(), currentBlock.aggregateQC.GetView()+1) + require.Greater(t, currentBlock.GetView(), currentBlock.aggregateQC.GetHighQC().GetView()+1) + + // The difference in view between the current block and its parent should be equal to the number of + // timeout blocks between the current block and its parent. + for ii := currentBlock.view - 1; ii > parentBlock.view; ii-- { + blockChainString = fmt.Sprintf("->(view=%d,timeout)%s", ii, blockChainString) + } + } + + // Verify that the current block's height is one more than the parent + require.Equal(t, currentBlock.GetHeight(), parentBlock.GetHeight()+1) + + // Move on to the parent block + currentBlock = parentBlock + parentBlock, hasParentBlock = allBlocks[currentBlock.GetQC().GetBlockHash().GetValue()] + } + + // If we get here, we've validated the chain of blocks and reached the first block within the + // node's chain. The first block may or may not be the genesis block of the full blockchain, + // as the node may have left the network and rejoined at a later time. + + // Format the first block and the node's info. + blockHashValue := currentBlock.GetBlockHash().GetValue() + blockChainString = fmt.Sprintf( + "\nnode=[publicKey=%s, currentView=%d, stake=%s]\nblockchain=[(view=%d,height=%d,hash=%s)%s]\n", + node.privateKey.PublicKey().ToString()[:10], + node.eventLoop.currentView, + node.stake.ToBig().String(), + currentBlock.view, + currentBlock.height, + hex.EncodeToString(blockHashValue[:2]), + blockChainString, + ) + + // Log the formatted string + t.Log(blockChainString) +} From aad69d4e4490072aa45ab6bec70084813e3a4849 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 19 Oct 2023 11:32:30 -0700 Subject: [PATCH 259/762] Updated lockup tests to include happy path simulation. --- lib/block_view.go | 34 +- lib/block_view_lockups.go | 34 +- lib/block_view_lockups_test.go | 921 ++++++++++++++++++++++++++++++++- lib/block_view_types.go | 7 + lib/db_utils.go | 10 +- lib/errors.go | 1 + 6 files changed, 995 insertions(+), 12 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index a3089ba2d..67e9b5f77 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3292,7 +3292,7 @@ func (bav *UtxoView) ConnectTransaction( ignoreUtxos bool) (_utxoOps []*UtxoOperation, _totalInput uint64, _totalOutput uint64, _fees uint64, _err error) { - return bav._connectTransaction(txn, txHash, txnSizeBytes, blockHeight, 0, verifySignatures, ignoreUtxos) + return bav._connectTransaction(txn, txHash, txnSizeBytes, blockHeight, blockTimestamp, verifySignatures, ignoreUtxos) } @@ -3654,6 +3654,38 @@ func (bav *UtxoView) _connectTransaction( } desoLockedDelta = big.NewInt(0).Neg(totalLockedAmountNanos.ToBig()) } + if txn.TxnMeta.GetTxnType() == TxnTypeCoinUnlock { + if len(utxoOpsForTxn) == 0 { + return nil, 0, 0, 0, errors.New( + "ConnectTransaction: TxnTypeCoinUnlock must return UtxoOpsForTxn", + ) + } + coinUnlockMeta := txn.TxnMeta.(*CoinUnlockMetadata) + + // We only count DESO added if coin unlock was a locked DESO unlock. + if coinUnlockMeta.ProfilePublicKey.IsZeroPublicKey() { + utxoOp := utxoOpsForTxn[len(utxoOpsForTxn)-1] + if utxoOp == nil || utxoOp.Type != OperationTypeCoinUnlock { + return nil, 0, 0, 0, errors.New( + "ConnectTransaction: TxnTypeCoinUnlock must correspond to OperationTypeCoinUnlock", + ) + } + totalLockedDESOAmountNanos := uint256.NewInt() + for _, prevLockedBalanceEntry := range utxoOp.PrevLockedBalanceEntries { + totalLockedDESOAmountNanos, err = SafeUint256().Add( + totalLockedDESOAmountNanos, &prevLockedBalanceEntry.BalanceBaseUnits) + if err != nil { + return nil, 0, 0, 0, + errors.Wrapf(err, "ConnectTransaction: error computing TotalLockedCoinsAmountNanos: ") + } + if !totalLockedDESOAmountNanos.IsUint64() { + return nil, 0, 0, 0, + errors.Errorf("ConnectTransaction: totalLockedDESOAmountNanos overflows uint64") + } + } + desoLockedDelta = big.NewInt(0).Neg(totalLockedDESOAmountNanos.ToBig()) + } + } if big.NewInt(0).Add(balanceDelta, desoLockedDelta).Sign() > 0 { return nil, 0, 0, 0, RuleErrorBalanceChangeGreaterThanZero } diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 2050d94eb..4af43ea3f 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -219,7 +219,7 @@ type LockupYieldCurvePoint struct { } type LockupYieldCurvePointKey struct { - ProfilePKID *PKID + ProfilePKID PKID LockupDurationNanoSecs int64 } @@ -237,7 +237,7 @@ func (lockupYieldCurvePoint *LockupYieldCurvePoint) Eq(other *LockupYieldCurvePo func (lockupYieldCurvePoint *LockupYieldCurvePoint) ToMapKey() LockupYieldCurvePointKey { return LockupYieldCurvePointKey{ - ProfilePKID: lockupYieldCurvePoint.ProfilePKID, + ProfilePKID: *lockupYieldCurvePoint.ProfilePKID, LockupDurationNanoSecs: lockupYieldCurvePoint.LockupDurationNanoSecs, } } @@ -742,8 +742,12 @@ func (bav *UtxoView) _connectCoinLockup( var prevTransactorBalanceEntry *BalanceEntry var prevCoinEntry *CoinEntry if profilePKID.IsZeroPKID() { + // NOTE: When spending balances, we need to check for immature block rewards. Since we don't have + // the block rewards yet for the current block, we subtract one from the current block height + // when spending balances. + // Check the DeSo balance of the user. - transactorBalanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(txn.PublicKey) + transactorBalanceNanos, err := bav.GetSpendableDeSoBalanceNanosForPublicKey(txn.PublicKey, blockHeight-1) if err != nil { return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") } @@ -757,7 +761,7 @@ func (bav *UtxoView) _connectCoinLockup( // Spend the transactor's DeSo balance. lockupAmount64 := txMeta.LockupAmountBaseUnits.Uint64() - newUtxoOp, err := bav._spendBalance(lockupAmount64, txn.PublicKey, blockHeight) + newUtxoOp, err := bav._spendBalance(lockupAmount64, txn.PublicKey, blockHeight-1) if err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectCoinLockup") } @@ -869,7 +873,7 @@ func (bav *UtxoView) _connectCoinLockup( newLockedBalanceEntryBalance, err := SafeUint256().Add(&lockedBalanceEntry.BalanceBaseUnits, lockupValue) if err != nil { return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry, "_connectCoinLockup: New Locked Balance Entry Balance") } @@ -1663,7 +1667,7 @@ func (bav *UtxoView) _connectCoinUnlock( // Unlock coins until the amount specified by the transaction is deducted. var prevLockedBalanceEntries []*LockedBalanceEntry - var unlockedBalance *uint256.Int + unlockedBalance := uint256.NewInt() for _, unlockableLockedBalanceEntry := range unlockableLockedBalanceEntries { unlockedBalance, err = SafeUint256().Add(unlockedBalance, &unlockableLockedBalanceEntry.BalanceBaseUnits) @@ -1673,7 +1677,7 @@ func (bav *UtxoView) _connectCoinUnlock( } // Append the LockedBalanceEntry in the event we rollback the transaction. - prevLockedBalanceEntries = append(prevLockedBalanceEntries, unlockableLockedBalanceEntry) + prevLockedBalanceEntries = append(prevLockedBalanceEntries, unlockableLockedBalanceEntry.Copy()) // Update the LockedBalanceEntry and delete the record. unlockableLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt() @@ -1732,6 +1736,7 @@ func (bav *UtxoView) _connectCoinUnlock( Type: OperationTypeCoinUnlock, PrevTransactorBalanceEntry: prevTransactorBalanceEntry, PrevLockedBalanceEntries: prevLockedBalanceEntries, + PrevCoinEntry: prevCoinEntry, }) return totalInput, totalOutput, utxoOpsForTxn, nil @@ -1814,6 +1819,21 @@ func (bav *UtxoView) _disconnectCoinUnlock( bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) } + // Reverting the CoinEntry (if applicable) should not result in more coins in circulation. + if !profilePKID.IsZeroPKID() { + profileEntry := bav.GetProfileEntryForPKID(profilePKID) + if profileEntry == nil || profileEntry.isDeleted { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert coin unlock " + + "but found nil profile entry; this shouldn't be possible") + } + if operationData.PrevCoinEntry.CoinsInCirculationNanos.Gt(&profileEntry.DAOCoinEntry.CoinsInCirculationNanos) { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + + "would cause profile entry coin entry balance to increase") + } + profileEntry.DAOCoinEntry = *operationData.PrevCoinEntry + bav._setProfileEntryMappings(profileEntry) + } + // Reverting the DeSo addition should not result in more coins. if profilePKID.IsZeroPKID() { // Revert the DeSo add. diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 3d3fcf1c5..a723934a5 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -8,6 +8,127 @@ import ( "testing" ) +func TestForkHeightAndInitialState(t *testing.T) { + // TODO: Create test for forks. + + // TODO: Create test for initial creator state. + + // TODO: Create test for initial deso state. +} + +func TestCalculateLockupYield(t *testing.T) { + var yield *uint256.Int + var err error + + // Ensure that a lockup with zero duration has zero yield. + yield, err = CalculateLockupYield( + MaxUint256, + uint256.NewInt(), + uint256.NewInt().SetUint64(1)) + require.NoError(t, err) + require.Equal(t, *yield, *uint256.NewInt()) + + // Ensure that a lockup with zero apyYieldBasisPoints has zero yield. + yield, err = CalculateLockupYield( + MaxUint256, + uint256.NewInt().SetUint64(1), + uint256.NewInt()) + require.NoError(t, err) + require.Equal(t, *yield, *uint256.NewInt()) + + // Ensure that when principal is MaxUint256 and the apy yield is 2bp, + // the operation fails due to lack of precision. + _, err = CalculateLockupYield( + MaxUint256, + uint256.NewInt().SetUint64(2), + uint256.NewInt().SetUint64(1)) + require.Contains(t, err.Error(), RuleErrorCoinLockupCoinYieldOverflow) + + // Ensure that when principal is MaxUint256 and the duration is 2ns, + // the operation fails due to lack of precision. + _, err = CalculateLockupYield( + MaxUint256, + uint256.NewInt().SetUint64(1), + uint256.NewInt().SetUint64(2)) + require.Contains(t, err.Error(), RuleErrorCoinLockupCoinYieldOverflow) + + // Ensure that the CalculateLockupYield operation acts as a floor of + // the true infinite precision CalculateLockupYield operation. + // + // To do this, we note that the operation is numerically as follows: + // (principal * yield_bp * duration_ns) / (365 * 24 * 60 * 60 * 1e9 * 10000) + // + // Numerically we start by computing the denominator and numerator separately. + // To test that division rounding is functioning correctly, we set the following values: + // principal = 365 * 24 * 10000 + // yield_bp = 60 * 60 + // duration = 1e9 + // + // In theory, this should return a yield of 1 without any overflow in the operation. + // We test this below: + yield, err = CalculateLockupYield( + uint256.NewInt().SetUint64(365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9)) + require.NoError(t, err) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(1)) + + // Knowing this, we can now check to ensure the edges of the CalculateLockupYield + // operation are behaving correctly and never minting more coins than expected. + // We start by reducing the numerator. Any decrease to the numerator should return a yield of zero. + // To test this, we set duration = 1e9 - 1. + // (This decreases only the largest factor, leading to the smallest decrease possible in the numerator) + yield, err = CalculateLockupYield( + uint256.NewInt().SetUint64(365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9-1)) + require.NoError(t, err) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(0)) + + // If we only slightly increase the numerator, we should expect to see the yield remain the same. + // To test this, we set duration = 1e9 + 1 + // (This increases only the largest factor, leading to the smallest increase possible in the numerator) + yield, err = CalculateLockupYield( + uint256.NewInt().SetUint64(365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9+1)) + require.NoError(t, err) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(1)) + + // We should only see an increase to the output yield if the numerator is scaled by a constant. + // To do this, we can iterate through various constants and see if the output yield matches. + // These operations are quick and cheap, so we test all values between 0 and 100000. + // We also ensure that slight deviations do not alter the output. + for ii := uint64(0); ii < 100000; ii++ { + yield, err = CalculateLockupYield( + uint256.NewInt().SetUint64(ii*365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9)) + require.NoError(t, err) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(ii)) + + // Slight increase to the numerator. Ensure we don't create more yield than expected. + yield, err = CalculateLockupYield( + uint256.NewInt().SetUint64(ii*365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9+1)) + require.NoError(t, err) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(ii)) + + // Slight decrease to the numerator. Ensure we create strictly less yield. + expectedValue := ii - 1 + if ii == 0 { + expectedValue = 0 + } + yield, err = CalculateLockupYield( + uint256.NewInt().SetUint64(ii*365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9-1)) + require.NoError(t, err) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(expectedValue)) + } +} + func TestCoinLockupTxnRuleErrors(t *testing.T) { // Initialize test chain, miner, and testMeta testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) @@ -47,6 +168,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { // Attempt to perform an excessive DESO lockup (more than 2**64 DESO). // (This should fail -- RuleErrorCoinLockupExcessiveDeSoLockup) + // NOTE: This also checks that DESO lockups do not require an associated profile. { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, @@ -468,7 +590,804 @@ func TestCoinUnlockTxnRuleErrors(t *testing.T) { require.Contains(t, err.Error(), RuleErrorCoinUnlockNoUnlockableCoinsFound) } - // TODO: Cause unlockable balance overflow test. Tricky as it relies on yield. + // NOTE: We test overflow errors based on unlocks in tests below. + // In general, they're tricky to trigger. Because of this we have special tests for overflows. +} + +func TestLockupBasedOverflowsOnProfiles(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Create an on-chain profile for m2 + { + _updateProfileWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + []byte{}, + "m2", + "i am the m2", + shortPic, + 10*100, + 1.25*100*100, + false, + ) + } + + // Create MaxUint256 m2 DAO coins held by m2 + { + _daoCoinTxnWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + DAOCoinMetadata{ + ProfilePublicKey: m2PkBytes, + OperationType: DAOCoinOperationTypeMint, + CoinsToMintNanos: *MaxUint256, + CoinsToBurnNanos: uint256.Int{}, + TransferRestrictionStatus: 0, + }) + } + + // Try and lockup MaxUint256 m2 coins and ensure CoinsInCirculation and NumberOfHolders decreases + { + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + m2Pub, + 1000, + MaxUint256, + 0) + + // Ensure CoinsInCirculationNanos and NumberOfHolders are now zero + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + profileEntry := utxoView.GetProfileEntryForPublicKey(m2PkBytes) + require.Equal(t, *uint256.NewInt(), profileEntry.DAOCoinEntry.CoinsInCirculationNanos) + require.Equal(t, uint64(0), profileEntry.DAOCoinEntry.NumberOfHolders) + } + + // Create MaxUint256 m2 DAO coins held by m2. This should succeed as we locked all tokens previously. + { + _daoCoinTxnWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + DAOCoinMetadata{ + ProfilePublicKey: m2PkBytes, + OperationType: DAOCoinOperationTypeMint, + CoinsToMintNanos: *MaxUint256, + CoinsToBurnNanos: uint256.Int{}, + TransferRestrictionStatus: 0, + }) + } + + // Try and do a subsequent lockup of one base unit of m2 coin at the same timestamp as above. + // (This should fail -- RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + m2Pub, + 1000, + uint256.NewInt().SetUint64(1), + 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry) + } + + // Try and do a MaxUint256 lockup for one year at 1% yield. + // (This should fail in CalculateLockupYield -- RuleErrorCoinLockupCoinYieldOverflow) + { + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + 365*24*60*60*1e9, + 100, + false, + false, + TransferRestrictionStatusUnrestricted, + ) + + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + m2Pub, + 365*24*60*60*1e9, + MaxUint256, + 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupCoinYieldOverflow) + } + + // Try and do a MaxUint256 lockup with 1bp yield and 1ns duration. + // NOTE: This fails because principal + interest > MaxUint256, but the + // CalculateLockupYield operation does not overflow (unlike the above check). + // (This should fail -- RuleErrorCoinLockupYieldCausesOverflow) + { + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + 1, + 1, + false, + false, + TransferRestrictionStatusUnrestricted, + ) + + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + m2Pub, + 1, + MaxUint256, + 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupYieldCausesOverflow) + + // Remove the yield curve point. + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + 1, + 1, + true, + false, + TransferRestrictionStatusUnrestricted, + ) + } + + // Try and perform a lockup transfer to someone with an existing MaxUint256 balance. + // (This should fail -- RuleErrorCoinLockupTransferBalanceOverflowAtReceiver) + { + // Transfer MaxUint256 locked m2 tokens to m3. These were locked in an above test. + _coinLockupTransferWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + NewPublicKey(m3PkBytes), + NewPublicKey(m2PkBytes), + 1000, + MaxUint256, + ) + + // Lockup MaxUint256 m2 tokens. + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + m2Pub, + 1000, + MaxUint256, + 0, + ) + + // Try and perform another transfer. This should fail. + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + NewPublicKey(m3PkBytes), + NewPublicKey(m2PkBytes), + 1000, + MaxUint256, + ) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferBalanceOverflowAtReceiver) + } + + // Try and perform a lockup unlock on multiple locked balance entries such that the unlock balance overflows. + // (This should fail -- RuleErrorCoinUnlockUnlockableCoinsOverflow) + { + // Mint MaxUint256 m2 tokens. + _daoCoinTxnWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + DAOCoinMetadata{ + ProfilePublicKey: m2PkBytes, + OperationType: DAOCoinOperationTypeMint, + CoinsToMintNanos: *MaxUint256, + CoinsToBurnNanos: uint256.Int{}, + TransferRestrictionStatus: 0, + }) + + // Lockup MaxUint256 m2 tokens at a different timestamp. + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + m2Pub, + 1001, + MaxUint256, + 0, + ) + + // Try and unlock all locked balance entries simultaneously. + // This should cause an overflow. + _, _, _, err := _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + m2Pub, + 1002) + require.Contains(t, err.Error(), RuleErrorCoinUnlockUnlockableCoinsOverflow) + } + + // Try and perform a lockup unlock such that the transactor's balance entry overflows. + // (This should fail -- RuleErrorCoinUnlockCausesBalanceOverflow) + { + // Mint MaxUint256 m2 tokens. + _daoCoinTxnWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + DAOCoinMetadata{ + ProfilePublicKey: m2PkBytes, + OperationType: DAOCoinOperationTypeMint, + CoinsToMintNanos: *uint256.NewInt().SetUint64(1), + CoinsToBurnNanos: uint256.Int{}, + TransferRestrictionStatus: 0, + }) + + // Try and unlock one of the MaxUint256 locked balance entries. + // This should cause an overflow in the balance entry. + _, _, _, err := _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + m2Pub, + 1001) + require.Contains(t, err.Error(), RuleErrorCoinUnlockCausesBalanceOverflow) + } + + // Try and perform a lockup unlock such that the CoinsInCirculation overflows. + // (This should fail -- RuleErrorCoinLockupCausesCoinsInCirculationOverflow) + { + _, _, _, err := _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m3Pub, + m3Priv, + m2Pub, + 1001) + require.Contains(t, err.Error(), RuleErrorCoinUnlockCausesCoinsInCirculationOverflow) + } +} + +func TestLockupStandardProfileFlows(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Have m1 create a yield curve which consists of: + // 1 year @ 5% yield + // 2 years @ 10% yield + // Remove the yield curve point. + { + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + 365*24*60*60*1e9, + 500, + false, + false, + TransferRestrictionStatusUnrestricted, + ) + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + 2*365*24*60*60*1e9, + 1000, + false, + true, + TransferRestrictionStatusProfileOwnerOnly, + ) + } + + // Have m1 lockup 10000 m1 DAO tokens for half of a year. + // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. + // We expect this to create a locked balance entry with 10000 base units locked inside. + { + // Get the PKID associated with m1. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + + // Get the original BalanceEntry for the associated DAO coins. + originalBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( + m1PkBytes, m1PkBytes, true) + + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + m1Pub, + 365*24*60*60*1e9+365*12*60*60*1e9, + uint256.NewInt().SetUint64(10000), + 365*24*60*60*1e9, + ) + + // Check to ensure the resulting locked balance entry has 10000 base units. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, m1PKID, 365*24*60*60*1e9+365*12*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(10000), lockedBalanceEntry.BalanceBaseUnits) + + // Check to ensure that the BalanceEntry has decreased by exactly 10000. + newBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( + m1PkBytes, m1PkBytes, true) + require.True(t, originalBalanceEntry.BalanceNanos.Gt(&newBalanceEntry.BalanceNanos)) + require.Equal(t, + *uint256.NewInt().Sub(&originalBalanceEntry.BalanceNanos, &newBalanceEntry.BalanceNanos), + *uint256.NewInt().SetUint64(10000)) + } + + // Have m1 lockup 10000 m1 DAO tokens for one year. + // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. + // We expect this to create a locked balance entry with 10500 base units locked inside. + { + // Get the PKID associated with m1. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + + // Get the original BalanceEntry for the associated DAO coins. + originalBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( + m1PkBytes, m1PkBytes, true) + + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + m1Pub, + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(10000), + 365*24*60*60*1e9, + ) + + // Check to ensure the resulting locked balance entry has 10500 base units. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, m1PKID, 2*365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(10500), lockedBalanceEntry.BalanceBaseUnits) + + // Check to ensure that the BalanceEntry has decreased by exactly 10000. + newBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( + m1PkBytes, m1PkBytes, true) + require.True(t, originalBalanceEntry.BalanceNanos.Gt(&newBalanceEntry.BalanceNanos)) + require.Equal(t, + *uint256.NewInt().Sub(&originalBalanceEntry.BalanceNanos, &newBalanceEntry.BalanceNanos), + *uint256.NewInt().SetUint64(10000)) + } + + // Have m1 lockup 10000 m1 DAO tokens for one and a half year. + // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. + // We expect this to create a locked balance entry with 10500 base units locked inside. + // NOTE: This is testing the interpolation algorithm for lockups in the middle of two yield curve points. + { + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + m1Pub, + 2*365*24*60*60*1e9+365*12*60*60*1e9, + uint256.NewInt().SetUint64(10000), + 365*24*60*60*1e9, + ) + + // Check to ensure the resulting locked balance entry has 10500 base units. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, m1PKID, 2*365*24*60*60*1e9+365*12*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(10500), lockedBalanceEntry.BalanceBaseUnits) + } + + // Have m1 lockup 10000 m1 DAO tokens for two years. + // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. + // We expect this to create a locked balance entry with 12000 base units locked inside. + { + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + m1Pub, + 3*365*24*60*60*1e9, + uint256.NewInt().SetUint64(10000), + 365*24*60*60*1e9, + ) + + // Check to ensure the resulting locked balance entry has 12000 base units. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, m1PKID, 3*365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(12000), lockedBalanceEntry.BalanceBaseUnits) + } + + // Have m1 distribute 1 year locked tokens. + { + _coinLockupTransferWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + NewPublicKey(m2PkBytes), + NewPublicKey(m1PkBytes), + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(500), + ) + _coinLockupTransferWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + NewPublicKey(m3PkBytes), + NewPublicKey(m1PkBytes), + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(500), + ) + _coinLockupTransferWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + NewPublicKey(m4PkBytes), + NewPublicKey(m1PkBytes), + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(500), + ) + + // Check to ensure the resulting locked balance entry for m1 has 9000 base units. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, m1PKID, 2*365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(9000), lockedBalanceEntry.BalanceBaseUnits) + } + + // Check to make sure locked tokens are not liquid. + { + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + NewPublicKey(m3PkBytes), + NewPublicKey(m1PkBytes), + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(500), + ) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) + } + + // Check to make sure tokens can be unlocked following a year. + // Ensure that the associated balance entry increases by 500 on unlock. + // 500 base units of m1 DAO coins was given by m2 during the distribution phase. + { + // Get the original BalanceEntry for the associated DAO coins. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + originalBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( + m2PkBytes, m1PkBytes, true) + + _, _, _, err = _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + m1Pub, + 2*365*24*60*60*1e9+1, + ) + require.NoError(t, err) + + // Get the updated BalanceEntry for the associated DAO coins. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + newBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( + m2PkBytes, m1PkBytes, true) + require.True(t, newBalanceEntry.BalanceNanos.Gt(&originalBalanceEntry.BalanceNanos)) + require.Equal(t, *uint256.NewInt().SetUint64(500), *uint256.NewInt().Sub( + &newBalanceEntry.BalanceNanos, &originalBalanceEntry.BalanceNanos)) + } +} + +func TestLockupStandardDeSoFlows(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Ensure that paramUpdater is set in the testMeta + testMeta.params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + + // Have paramUpdater create a yield curve which consists of: + // 1 year @ 5% yield + // 2 years @ 10% yield + // Remove the yield curve point. + { + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + 365*24*60*60*1e9, + 500, + false, + false, + TransferRestrictionStatusUnrestricted, + ) + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + 2*365*24*60*60*1e9, + 1000, + false, + true, + TransferRestrictionStatusProfileOwnerOnly, + ) + } + + // Have m1 lockup 500 nDESO for half of a year. + // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. + // We expect this to create a locked balance entry with 500 base units locked inside. + { + // Get m1's DESO balance. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1OriginalBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 365*24*60*60*1e9+365*12*60*60*1e9, + uint256.NewInt().SetUint64(500), + 365*24*60*60*1e9, + ) + + // Check to ensure the resulting locked balance entry has 525 base units. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, ZeroPKID.NewPKID(), 365*24*60*60*1e9+365*12*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) + + // Check that m1's DESO balance has decreased by more than 500 nDESO (some extra was spent on fees). + m1NewBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + require.Greater(t, m1OriginalBalance, m1NewBalance) + require.Greater(t, m1OriginalBalance-m1NewBalance, uint64(500)) + } + + // Have m1 lockup 500 nDESO for one year. + // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. + // We expect this to create a locked balance entry with 525 base units locked inside. + { + // Get m1's DESO balance. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1OriginalBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(500), + 365*24*60*60*1e9, + ) + + // Check to ensure the resulting locked balance entry has 525 base units. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, ZeroPKID.NewPKID(), 2*365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(525), lockedBalanceEntry.BalanceBaseUnits) + + // Check that m1's DESO balance has decreased by more than 500 nDESO (some extra was spent on fees). + m1NewBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + require.Greater(t, m1OriginalBalance, m1NewBalance) + require.Greater(t, m1OriginalBalance-m1NewBalance, uint64(500)) + } + + // Have m1 lockup 500 nDESO for one and a half years. + // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. + // We expect this to create a locked balance entry with 525 base units locked inside. + { + // Get m1's DESO balance. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1OriginalBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 2*365*24*60*60*1e9+365*12*60*60*1e9, + uint256.NewInt().SetUint64(500), + 365*24*60*60*1e9, + ) + + // Check to ensure the resulting locked balance entry has 525 base units. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, ZeroPKID.NewPKID(), 2*365*24*60*60*1e9+365*12*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(525), lockedBalanceEntry.BalanceBaseUnits) + + // Check that m1's DESO balance has decreased by more than 500 nDESO (some extra was spent on fees). + m1NewBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + require.Greater(t, m1OriginalBalance, m1NewBalance) + require.Greater(t, m1OriginalBalance-m1NewBalance, uint64(500)) + } + + // Have m1 lockup 500 nDESO for two years. + // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. + // We expect this to create a locked balance entry with 600 base units locked inside. + { + // Get m1's DESO balance. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1OriginalBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 3*365*24*60*60*1e9, + uint256.NewInt().SetUint64(500), + 365*24*60*60*1e9, + ) + + // Check to ensure the resulting locked balance entry has 525 base units. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, ZeroPKID.NewPKID(), 3*365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(600), lockedBalanceEntry.BalanceBaseUnits) + + // Check that m1's DESO balance has decreased by more than 500 nDESO (some extra was spent on fees). + m1NewBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + require.Greater(t, m1OriginalBalance, m1NewBalance) + require.Greater(t, m1OriginalBalance-m1NewBalance, uint64(500)) + } + + // Check to make sure locked DESO is not liquid. + { + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + NewPublicKey(m3PkBytes), + &ZeroPublicKey, + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(100), + ) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) + } + + // Check to make sure tokens can be unlocked following a year. + // Ensure that the associated balance entry increases by less than 1025 on unlock. + // One locked balance entry exists at half a year and has 500 nDESO, the other at one year + // and has 525 nDESO. + { + // Get m1's DESO balance. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1OriginalBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + + _, _, _, err = _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 2*365*24*60*60*1e9+1, + ) + require.NoError(t, err) + + // Check to ensure the resulting locked balance entry is deleted. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, ZeroPKID.NewPKID(), 2*365*24*60*60*1e9) + require.True(t, lockedBalanceEntry == nil) + + // Check that m1's DESO balance has increased by less than 1025 nDESO (some extra was spent on fees). + m1NewBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) + require.NoError(t, err) + require.Greater(t, m1NewBalance, m1OriginalBalance) + require.Less(t, m1NewBalance-m1OriginalBalance, uint64(1025)) + } +} + +func TestLockupDisconnects(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) } //---------------------------------------------------------- diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7b91e0f43..de32be234 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3921,6 +3921,7 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { StakingRewardsAPYBasisPoints: gp.StakingRewardsAPYBasisPoints, EpochDurationNumBlocks: gp.EpochDurationNumBlocks, JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, + LockedDESOTransferRestrictions: gp.LockedDESOTransferRestrictions, } } @@ -3944,6 +3945,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.StakingRewardsAPYBasisPoints)...) data = append(data, UintToBuf(gp.EpochDurationNumBlocks)...) data = append(data, UintToBuf(gp.JailInactiveValidatorGracePeriodEpochs)...) + data = append(data, byte(gp.LockedDESOTransferRestrictions)) } return data } @@ -4010,6 +4012,11 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading JailInactiveValidatorGracePeriodEpochs: ") } + statusByte, err := rr.ReadByte() + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading LockedDESOTransferRestrictions") + } + gp.LockedDESOTransferRestrictions = TransferRestrictionStatus(statusByte) } return nil } diff --git a/lib/db_utils.go b/lib/db_utils.go index 4b6061dc3..32df1f5b9 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -10860,7 +10860,7 @@ func DBGetUnlockableLockedBalanceEntriesWithTxn( // This check is redundant. It's included to be extra safe only unlockable locked balance entries are included. if lockedBalanceEntry.UnlockTimestampNanoSecs < currentTimestampUnixNanoSecs { - lockedBalanceEntries = append(lockedBalanceEntries) + lockedBalanceEntries = append(lockedBalanceEntries, lockedBalanceEntry) } } @@ -10958,7 +10958,11 @@ func DBGetAllYieldCurvePointsByProfilePKID(handle *badger.DB, snap *Snapshot, func DBGetAllYieldCurvePointsByProfilePKIDWithTxn(txn *badger.Txn, snap *Snapshot, profilePKID *PKID) (_lockupYieldCurvePoints []*LockupYieldCurvePoint, _err error) { // Construct the key prefix. - startKey := DBPrefixKeyForLockupYieldCurvePointsByProfilePKID(profilePKID) + startKey := _dbKeyForLockupYieldCurvePoint(LockupYieldCurvePoint{ + ProfilePKID: profilePKID, + LockupDurationNanoSecs: 0, + }) + validKey := DBPrefixKeyForLockupYieldCurvePointsByProfilePKID(profilePKID) // Create an iterator. opts := badger.DefaultIteratorOptions @@ -10969,7 +10973,7 @@ func DBGetAllYieldCurvePointsByProfilePKIDWithTxn(txn *badger.Txn, snap *Snapsho var lockupYieldCurvePoints []*LockupYieldCurvePoint // Loop. - for iterator.Seek(startKey); iterator.ValidForPrefix(startKey); iterator.Next() { + for iterator.Seek(startKey); iterator.ValidForPrefix(validKey); iterator.Next() { // Retrieve the LockupYieldCurvePointBytes. lockupYieldCurvePointBytes, err := iterator.Item().ValueCopy(nil) if err != nil { diff --git a/lib/errors.go b/lib/errors.go index e198b51ff..2bfc7bfde 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -503,6 +503,7 @@ const ( RuleErrorCoinLockupNonExistentProfile RuleError = "RuleErrorCoinLockupNonExistentProfile" RuleErrorCoinLockupCoinYieldOverflow RuleError = "RuleErrorCoinLockupCoinYieldOverflow" RuleErrorCoinLockupYieldCausesOverflow RuleError = "RuleErrorCoinLockupYieldCausesOverflow" + RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry RuleError = "RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry" RuleErrorCoinLockupTransferOfAmountZero RuleError = "RuleErrorCoinLockupTransferOfAmountZero" RuleErrorCoinLockupTransferOfDeSoCausesOverflow RuleError = "RuleErrorCoinLockupTransferOfDeSoCausesOverflow" RuleErrorCoinLockupTransferRestrictedToProfileOwner RuleError = "RuleErrorCoinLockupTransferRestrictedToCreator" From a97e442bfb4ab054d39e94316603257ea356c07b Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 19 Oct 2023 17:48:19 -0700 Subject: [PATCH 260/762] Add disconnect tests for all lockup txn types. --- lib/block_view_lockups.go | 16 +- lib/block_view_lockups_test.go | 512 +++++++++++++++++++++++++++++++++ 2 files changed, 515 insertions(+), 13 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 4af43ea3f..77a768464 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1031,11 +1031,6 @@ func (bav *UtxoView) _disconnectCoinLockup( return errors.Wrapf(err, "_disconnectCoinLockup: Problem unSpending balance of %v "+ "for the transactor", operationData.BalanceAmountNanos) } - operationIndex-- - if operationIndex < 0 { - return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeDAOCoinLockup " + - "but malformed utxoOpsForTxn") - } } else { // Revert the transactor's DAO coin balance. bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) @@ -1790,9 +1785,9 @@ func (bav *UtxoView) _disconnectCoinUnlock( } if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { lockedBalanceEntry = &LockedBalanceEntry{ - HODLerPKID: operationData.PrevLockedBalanceEntry.HODLerPKID, - ProfilePKID: operationData.PrevLockedBalanceEntry.ProfilePKID, - UnlockTimestampNanoSecs: operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, + HODLerPKID: prevLockedBalanceEntry.HODLerPKID, + ProfilePKID: prevLockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: prevLockedBalanceEntry.UnlockTimestampNanoSecs, BalanceBaseUnits: *uint256.NewInt(), } } @@ -1851,11 +1846,6 @@ func (bav *UtxoView) _disconnectCoinUnlock( return errors.Wrapf(err, "_disconnectCoinLockup: Problem unAdding balance of %v for the "+ "transactor", operationData.BalanceAmountNanos) } - operationIndex-- - if operationIndex < 0 { - return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeCoinUnlock " + - "but malformed utxoOpsForTxn") - } } // By here we only need to disconnect the basic transfer associated with the transaction. diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index a723934a5..d61b77f0c 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -1388,6 +1388,518 @@ func TestLockupDisconnects(t *testing.T) { // Initialize m0, m1, m2, m3, m4, and paramUpdater _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Ensure that paramUpdater is set in the testMeta + testMeta.params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + + // + // Test Coin Lockup for Profiles + // + utxoOps1, txn1, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(1000), + 365*24*60*60*1e9) + require.NoError(t, err) + utxoOps2, txn2, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(1000), + 365*24*60*60*1e9) + require.NoError(t, err) + txHash := txn2.Hash() + blockHeight := testMeta.chain.BlockTip().Height + 1 + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + err = utxoView.DisconnectTransaction(txn2, txHash, utxoOps2, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m0PKID := utxoView.GetPKIDForPublicKey(m0PkBytes).PKID + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, 2*365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) + balanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) + require.Equal(t, *uint256.NewInt().SetUint64(999000), balanceEntry.BalanceNanos) + err = utxoView.DisconnectTransaction(txn1, txn1.Hash(), utxoOps1, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, 2*365*24*60*60*1e9) + require.True(t, lockedBalanceEntry == nil) + balanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) + require.Equal(t, *uint256.NewInt().SetUint64(1000000), balanceEntry.BalanceNanos) + + // + // Test Coin Lockup for DESO + // + + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + originalBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( + m2PkBytes, testMeta.chain.BlockTip().Height) + require.NoError(t, err) + utxoOps1, txn1, _, err = _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(500), + 365*24*60*60*1e9) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + intermediateBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( + m2PkBytes, testMeta.chain.BlockTip().Height) + require.NoError(t, err) + utxoOps2, txn2, _, err = _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m2Pub, + m2Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 2*365*24*60*60*1e9, + uint256.NewInt().SetUint64(500), + 365*24*60*60*1e9) + require.NoError(t, err) + txHash = txn2.Hash() + blockHeight = testMeta.chain.BlockTip().Height + 1 + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + err = utxoView.DisconnectTransaction(txn2, txHash, utxoOps2, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m2PKID := utxoView.GetPKIDForPublicKey(m2PkBytes).PKID + lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m2PKID, &ZeroPKID, 2*365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) + currentBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( + m2PkBytes, testMeta.chain.BlockTip().Height) + require.NoError(t, err) + require.Equal(t, currentBalance, intermediateBalance) + err = utxoView.DisconnectTransaction(txn1, txn1.Hash(), utxoOps1, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m2PKID, &ZeroPKID, 2*365*24*60*60*1e9) + require.True(t, lockedBalanceEntry == nil) + currentBalance, err = utxoView.GetSpendableDeSoBalanceNanosForPublicKey( + m2PkBytes, testMeta.chain.BlockTip().Height) + require.NoError(t, err) + require.Equal(t, currentBalance, originalBalance) + + // + // Test Update Coin Lockup Params for Profiles + // + + // Test adding a lockup curve point and modifying lockup transfer restrictions. + // Ensure upon disconnect the original point and restrictions remain. + _, _, _, err = _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + 365*24*60*60*1e9, + 1000, + false, + true, + TransferRestrictionStatusProfileOwnerOnly, + ) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + utxoOps, txn, _, err := _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + 365*24*60*60*1e9, + 2500, + false, + true, + TransferRestrictionStatusPermanentlyUnrestricted, + ) + require.NoError(t, err) + txHash = txn.Hash() + blockHeight = testMeta.chain.BlockTip().Height + 1 + err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m1PKID := utxoView.GetPKIDForPublicKey(m1PkBytes).PKID + leftYieldCurvePoint, rightYieldCurvePoint, err := + utxoView.GetLocalYieldCurvePoints(m1PKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.True(t, leftYieldCurvePoint == nil) + require.Equal(t, rightYieldCurvePoint.LockupYieldAPYBasisPoints, uint64(1000)) + require.Equal(t, rightYieldCurvePoint.LockupDurationNanoSecs, int64(365*24*60*60*1e9)) + profileEntry := utxoView.GetProfileEntryForPKID(m1PKID) + require.Equal(t, profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus, TransferRestrictionStatusProfileOwnerOnly) + + // Test Deleting a Yield Curve Point and Reverting Said Transaction + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + utxoOps, txn, _, err = _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + 365*24*60*60*1e9, + 0, + true, + false, + TransferRestrictionStatusUnrestricted, + ) + require.NoError(t, err) + leftYieldCurvePoint, rightYieldCurvePoint, err = + utxoView.GetLocalYieldCurvePoints(m1PKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.True(t, leftYieldCurvePoint == nil) + require.True(t, rightYieldCurvePoint == nil) + txHash = txn.Hash() + blockHeight = testMeta.chain.BlockTip().Height + 1 + err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + leftYieldCurvePoint, rightYieldCurvePoint, err = + utxoView.GetLocalYieldCurvePoints(m1PKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.True(t, leftYieldCurvePoint == nil) + require.Equal(t, rightYieldCurvePoint.LockupYieldAPYBasisPoints, uint64(1000)) + require.Equal(t, rightYieldCurvePoint.LockupDurationNanoSecs, int64(365*24*60*60*1e9)) + profileEntry = utxoView.GetProfileEntryForPKID(m1PKID) + require.Equal(t, profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus, TransferRestrictionStatusProfileOwnerOnly) + + // + // Test Update Coin Lockup Params for DESO + // + + // Test adding a lockup curve point and modifying lockup transfer restrictions. + // Ensure upon disconnect the original point and restrictions remain. + _, _, _, err = _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + 365*24*60*60*1e9, + 1000, + false, + true, + TransferRestrictionStatusProfileOwnerOnly, + ) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + utxoOps, txn, _, err = _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + 365*24*60*60*1e9, + 2500, + false, + true, + TransferRestrictionStatusPermanentlyUnrestricted, + ) + require.NoError(t, err) + txHash = txn.Hash() + blockHeight = testMeta.chain.BlockTip().Height + 1 + err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + leftYieldCurvePoint, rightYieldCurvePoint, err = + utxoView.GetLocalYieldCurvePoints(&ZeroPKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.True(t, leftYieldCurvePoint == nil) + require.Equal(t, rightYieldCurvePoint.LockupYieldAPYBasisPoints, uint64(1000)) + require.Equal(t, rightYieldCurvePoint.LockupDurationNanoSecs, int64(365*24*60*60*1e9)) + require.Equal(t, utxoView.GlobalParamsEntry.LockedDESOTransferRestrictions, TransferRestrictionStatusProfileOwnerOnly) + + // Test Deleting a Yield Curve Point and Reverting Said Transaction + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + utxoOps, txn, _, err = _updateCoinLockupParams( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + paramUpdaterPub, + paramUpdaterPriv, + 365*24*60*60*1e9, + 0, + true, + false, + TransferRestrictionStatusUnrestricted, + ) + require.NoError(t, err) + leftYieldCurvePoint, rightYieldCurvePoint, err = + utxoView.GetLocalYieldCurvePoints(&ZeroPKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.True(t, leftYieldCurvePoint == nil) + require.True(t, rightYieldCurvePoint == nil) + txHash = txn.Hash() + blockHeight = testMeta.chain.BlockTip().Height + 1 + err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + leftYieldCurvePoint, rightYieldCurvePoint, err = + utxoView.GetLocalYieldCurvePoints(&ZeroPKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.True(t, leftYieldCurvePoint == nil) + require.Equal(t, rightYieldCurvePoint.LockupYieldAPYBasisPoints, uint64(1000)) + require.Equal(t, rightYieldCurvePoint.LockupDurationNanoSecs, int64(365*24*60*60*1e9)) + require.Equal(t, utxoView.GlobalParamsEntry.LockedDESOTransferRestrictions, TransferRestrictionStatusProfileOwnerOnly) + + // + // Test Coin Lockup Transfers + // + + // Create an on-chain profile for m3 with MaxUint256 Locked Tokens + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + _updateProfileWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m3Pub, + m3Priv, + []byte{}, + "m3", + "i am the m3", + shortPic, + 10*100, + 1.25*100*100, + false, + ) + _daoCoinTxnWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m3Pub, + m3Priv, + DAOCoinMetadata{ + ProfilePublicKey: m3PkBytes, + OperationType: DAOCoinOperationTypeMint, + CoinsToMintNanos: *MaxUint256, + CoinsToBurnNanos: uint256.Int{}, + TransferRestrictionStatus: 0, + }) + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m3Pub, + m3Priv, + m3Pub, + 1000, + MaxUint256, + 0) + utxoOps, txn, _, err = _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m3Pub, + m3Priv, + NewPublicKey(m4PkBytes), + NewPublicKey(m3PkBytes), + 1000, + MaxUint256) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m3PKID := utxoView.GetPKIDForPublicKey(m3PkBytes).PKID + m4PKID := utxoView.GetPKIDForPublicKey(m4PkBytes).PKID + m3BalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs(m3PKID, m3PKID, 1000) + m4BalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs(m4PKID, m3PKID, 1000) + require.True(t, nil == m3BalanceEntry) + require.Equal(t, *MaxUint256, m4BalanceEntry.BalanceBaseUnits) + txHash = txn.Hash() + blockHeight = testMeta.chain.BlockTip().Height + 1 + err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m3PKID = utxoView.GetPKIDForPublicKey(m3PkBytes).PKID + m4PKID = utxoView.GetPKIDForPublicKey(m4PkBytes).PKID + m3BalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs(m3PKID, m3PKID, 1000) + m4BalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs(m4PKID, m3PKID, 1000) + require.True(t, nil == m4BalanceEntry) + require.Equal(t, *MaxUint256, m3BalanceEntry.BalanceBaseUnits) + + // + // Test Coin Unlocks for Profiles + // + + // Create an on-chain profile for m4 with MaxUint256 Locked Tokens + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + _updateProfileWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m4Pub, + m4Priv, + []byte{}, + "m4", + "i am the m4", + shortPic, + 10*100, + 1.25*100*100, + false, + ) + _daoCoinTxnWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m4Pub, + m4Priv, + DAOCoinMetadata{ + ProfilePublicKey: m4PkBytes, + OperationType: DAOCoinOperationTypeMint, + CoinsToMintNanos: *MaxUint256, + CoinsToBurnNanos: uint256.Int{}, + TransferRestrictionStatus: 0, + }) + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m4Pub, + m4Priv, + m4Pub, + 1000, + MaxUint256, + 0) + + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m4LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m4PKID, m4PKID, 1000) + m4be, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) + require.NoError(t, err) + require.Equal(t, *MaxUint256, m4LockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(), m4be.BalanceNanos) + + utxoOps, txn, _, err = _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m4Pub, + m4Priv, + m4Pub, + 1001) + + // Ensure unlock functioned properly + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m4PKID, m4PKID, 1000) + require.NoError(t, err) + m4be, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) + require.True(t, nil == m4LockedBalanceEntry) + require.Equal(t, *MaxUint256, m4be.BalanceNanos) + + // Execute the disconnect and ensure it functions correctly + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + txHash = txn.Hash() + blockHeight = testMeta.chain.BlockTip().Height + 1 + err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m4PKID, m4PKID, 1000) + require.NoError(t, err) + m4be, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) + require.Equal(t, *uint256.NewInt(), m4be.BalanceNanos) + require.Equal(t, *MaxUint256, m4LockedBalanceEntry.BalanceBaseUnits) + + // + // Test Coin Unlocks for DESO + // + + // Lockup 500 nDESO with m4. Check to ensure balances are accurately updated. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + startingBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( + m4PkBytes, testMeta.chain.BlockTip().Height) + require.NoError(t, err) + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m4Pub, + m4Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 1000, + uint256.NewInt().SetUint64(500), + 0) + + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m4PKID, &ZeroPKID, 1000) + preUnlockBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( + m4PkBytes, testMeta.chain.BlockTip().Height) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(500), m4LockedBalanceEntry.BalanceBaseUnits) + require.Greater(t, startingBalance, preUnlockBalance) + require.Greater(t, startingBalance-preUnlockBalance, uint64(500)) + + utxoOps, txn, _, err = _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m4Pub, + m4Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 1001) + + // Ensure unlock functioned properly + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m4PKID, &ZeroPKID, 1000) + currentBalance, err = utxoView.GetSpendableDeSoBalanceNanosForPublicKey( + m4PkBytes, testMeta.chain.BlockTip().Height) + require.NoError(t, err) + require.True(t, m4LockedBalanceEntry == nil) + require.Greater(t, startingBalance, currentBalance) + require.Less(t, startingBalance-currentBalance, uint64(500)) + + // Execute the disconnect and ensure it functions correctly + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + txHash = txn.Hash() + blockHeight = testMeta.chain.BlockTip().Height + 1 + err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m4PKID, &ZeroPKID, 1000) + currentBalance, err = utxoView.GetSpendableDeSoBalanceNanosForPublicKey( + m4PkBytes, testMeta.chain.BlockTip().Height) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(500), m4LockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, preUnlockBalance, currentBalance) } //---------------------------------------------------------- From c2054a2773aec83b60c903c332a68a15f2480e05 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 20 Oct 2023 15:27:13 -0700 Subject: [PATCH 261/762] Add fork test and block connect/disconnect test. --- lib/block_view.go | 2 +- lib/block_view_lockups.go | 19 +- lib/block_view_lockups_test.go | 338 ++++++++++++++++++++++++++++++++- lib/blockchain.go | 2 +- 4 files changed, 349 insertions(+), 12 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 67e9b5f77..559c7fc3c 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3803,7 +3803,7 @@ func (bav *UtxoView) ConnectBlock( // would slow down block processing significantly. We should figure out a way to // enforce this check in the future, but for now the only attack vector is one in // which a miner is trying to spam the network, which should generally never happen. - utxoOpsForTxn, totalInput, totalOutput, currentFees, err := bav.ConnectTransaction(txn, txHash, 0, uint32(blockHeader.Height), 0, verifySignatures, false) + utxoOpsForTxn, totalInput, totalOutput, currentFees, err := bav.ConnectTransaction(txn, txHash, 0, uint32(blockHeader.Height), int64(blockHeader.TstampNanoSecs), verifySignatures, false) _, _ = totalInput, totalOutput // A bit surprising we don't use these if err != nil { return nil, errors.Wrapf(err, "ConnectBlock: error connecting txn #%d", txIndex) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 77a768464..fe0a4a664 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1080,7 +1080,8 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( // Validate the starting block height. if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { - return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectDAOCoinLockup") + return 0, 0, nil, + errors.Wrapf(RuleErrorLockupTxnBeforeBlockHeight, "_connectUpdateCoinLockupParams") } // Validate the txn TxnType. @@ -1330,7 +1331,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, - errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectCoinLockupTransfer") + errors.Wrapf(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinLockupTransfer") } // Validate the txn TxnType. @@ -1382,8 +1383,8 @@ func (bav *UtxoView) _connectCoinLockupTransfer( // Fetch PKIDs for the recipient, sender, and profile. var senderPKID *PKID - if _, updaterIsParamUpdater := - GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(txn.PublicKey)]; updaterIsParamUpdater { + if _, senderIsParamUpdater := + GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(txn.PublicKey)]; senderIsParamUpdater { senderPKID = ZeroPKID.NewPKID() } else { senderPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) @@ -1594,7 +1595,8 @@ func (bav *UtxoView) _connectCoinUnlock( // Validate the starting block height. if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { - return 0, 0, nil, errors.Wrapf(RuleErrorProofofStakeTxnBeforeBlockHeight, "_connectCoinLockup") + return 0, 0, nil, + errors.Wrapf(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinUnlock") } // Validate the txn TxnType. @@ -1811,7 +1813,12 @@ func (bav *UtxoView) _disconnectCoinUnlock( return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + "would cause balance entry balance to increase") } - bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) + if operationData.PrevTransactorBalanceEntry.BalanceNanos.IsZero() { + bav._deleteBalanceEntryMappingsWithPKIDs(operationData.PrevTransactorBalanceEntry, + hodlerPKID, profilePKID, true) + } else { + bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) + } } // Reverting the CoinEntry (if applicable) should not result in more coins in circulation. diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index d61b77f0c..2eaa76bb0 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -8,12 +8,101 @@ import ( "testing" ) -func TestForkHeightAndInitialState(t *testing.T) { - // TODO: Create test for forks. +func TestForkHeight(t *testing.T) { + // Test and ensure lockup transactions cannot trigger without: + // (a) ProofOfStake1StateSetupBlockHeight Fork - // TODO: Create test for initial creator state. + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Ensure DAO coins and balance models are enabled (a pre-requisite for lockups) + params.ForkHeights.DAOCoinBlockHeight = uint32(1) + params.ForkHeights.BalanceModelBlockHeight = uint32(1) - // TODO: Create test for initial deso state. + // Initialize PoS fork heights. + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(25) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err := miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + feeRateNanosPerKb := uint64(101) + _setUpProfilesAndMintM0M1DAOCoins(&TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + }) + + // Simulate blocks being mined up and to the fork and ensure lockup transactions cannot be triggered early. + for ii := 0; ; ii++ { + _, err := miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + currentBlockHeight := uint64(chain.blockTip().Height) + 1 + if currentBlockHeight == uint64(params.ForkHeights.ProofOfStake1StateSetupBlockHeight) { + break + } + + _, _, _, err1 := _coinLockupWithConnectTimestamp( + t, chain, db, params, + feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 1000, + uint256.NewInt().SetUint64(100), + 0) + _, _, _, err2 := _updateCoinLockupParams( + t, chain, db, params, + feeRateNanosPerKb, + m1Pub, + m1Priv, + 365*25*60*60*1e9, + 500, + false, + true, + TransferRestrictionStatusProfileOwnerOnly, + ) + _, _, _, err3 := _coinLockupTransfer( + t, chain, db, params, + feeRateNanosPerKb, + m0Pub, + m0Priv, + NewPublicKey(m3PkBytes), + NewPublicKey(m0PkBytes), + 1000, + uint256.NewInt().SetUint64(1)) + _, _, _, err4 := _coinUnlockWithConnectTimestamp( + t, chain, db, params, + feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 0) + + if currentBlockHeight < uint64(params.ForkHeights.ProofOfStake1StateSetupBlockHeight) { + require.Contains(t, err1.Error(), RuleErrorLockupTxnBeforeBlockHeight) + require.Contains(t, err2.Error(), RuleErrorLockupTxnBeforeBlockHeight) + require.Contains(t, err3.Error(), RuleErrorLockupTxnBeforeBlockHeight) + require.Contains(t, err4.Error(), RuleErrorLockupTxnBeforeBlockHeight) + } + } } func TestCalculateLockupYield(t *testing.T) { @@ -1902,6 +1991,247 @@ func TestLockupDisconnects(t *testing.T) { require.Equal(t, preUnlockBalance, currentBalance) } +func TestLockupBlockConnectsAndDisconnects(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Get chain tip header timestamp + tipTimestamp := int64(testMeta.chain.blockTip().Header.TstampNanoSecs) + + // Validate the starting state + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m0PKID := utxoView.GetPKIDForPublicKey(m0PkBytes).PKID + m3PKID := utxoView.GetPKIDForPublicKey(m3PkBytes).PKID + m0Profile := utxoView.GetProfileEntryForPKID(m0PKID) + require.Equal(t, TransferRestrictionStatusUnrestricted, m0Profile.DAOCoinEntry.LockupTransferRestrictionStatus) + m0LeftYieldCurvePoint, m0RightYieldCurvePoint, err := utxoView.GetLocalYieldCurvePoints(m0PKID, 365*24*60*60*1e9+1) + require.NoError(t, err) + require.True(t, m0RightYieldCurvePoint == nil) + require.True(t, m0LeftYieldCurvePoint == nil) + m0BalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) + m3BalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) + require.Equal(t, *uint256.NewInt().SetUint64(1000000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, tipTimestamp+2e9) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + m3LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m3PKID, m0PKID, tipTimestamp+2e9) + require.NoError(t, err) + require.True(t, m3LockedBalanceEntry == nil) + + // + // Construct a block and test connect with a yield curve update, lockup, and transfer. + // + + // Construct transactions + updateTxn, _, _, _, err := testMeta.chain.CreateUpdateCoinLockupParamsTxn( + m0PkBytes, 365*24*60*60*1e9, 1000, false, + true, TransferRestrictionStatusProfileOwnerOnly, + testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + require.NoError(t, err) + _signTxn(t, updateTxn, m0Priv) + lockupTxn, _, _, _, err := testMeta.chain.CreateCoinLockupTxn( + m0PkBytes, m0PkBytes, tipTimestamp+2e9, uint256.NewInt().SetUint64(1000), + testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + require.NoError(t, err) + _signTxn(t, lockupTxn, m0Priv) + transferTxn, _, _, _, err := testMeta.chain.CreateCoinLockupTransferTxn( + m0PkBytes, m3PkBytes, m0PkBytes, tipTimestamp+2e9, + uint256.NewInt().SetUint64(1000), testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + require.NoError(t, err) + _signTxn(t, transferTxn, m0Priv) + + // Construct and attach the first block + senderPkBytes, _, _ := Base58CheckDecode(senderPkString) + blk1, _, _, err := testMeta.miner.BlockProducer._getBlockTemplate(senderPkBytes) + require.NoError(t, err) + blk1.Txns = append(blk1.Txns, updateTxn) + blk1.Txns = append(blk1.Txns, lockupTxn) + blk1.Txns = append(blk1.Txns, transferTxn) + blk1Root, _, err := ComputeMerkleRoot(blk1.Txns) + require.NoError(t, err) + blk1.Header.TransactionMerkleRoot = blk1Root + blk1.Header.TstampNanoSecs = uint64(tipTimestamp + 1e9) + + // Mine the first block to ensure the difficulty is sufficient for ProcessBlock + // NOTE: 10000 iterations is presumed sufficient for testing as seen in TestBasicTransfer. + _, bestNonce, err := FindLowestHash(blk1.Header, 10000) + require.NoError(t, err) + blk1.Header.Nonce = bestNonce + + // Process the first block + err = testMeta.miner.BlockProducer.SignBlock(blk1) + require.NoError(t, err) + _, _, err = testMeta.chain.ProcessBlock(blk1, false) + require.NoError(t, err) + + // Validate state update + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m0PKID = utxoView.GetPKIDForPublicKey(m0PkBytes).PKID + m3PKID = utxoView.GetPKIDForPublicKey(m3PkBytes).PKID + m0Profile = utxoView.GetProfileEntryForPKID(m0PKID) + require.Equal(t, TransferRestrictionStatusProfileOwnerOnly, m0Profile.DAOCoinEntry.LockupTransferRestrictionStatus) + m0LeftYieldCurvePoint, m0RightYieldCurvePoint, err = utxoView.GetLocalYieldCurvePoints(m0PKID, 365*24*60*60*1e9+1) + require.NoError(t, err) + require.True(t, m0RightYieldCurvePoint == nil) + require.Equal(t, int64(365*24*60*60*1e9), m0LeftYieldCurvePoint.LockupDurationNanoSecs) + require.Equal(t, uint64(1000), m0LeftYieldCurvePoint.LockupYieldAPYBasisPoints) + m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) + m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) + require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, tipTimestamp+2e9) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m3PKID, m0PKID, tipTimestamp+2e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(1000), m3LockedBalanceEntry.BalanceBaseUnits) + + // + // Construct a subsequent second block and test unlock. + // + + // Construct transactions + unlockTxn, _, _, _, err := testMeta.chain.CreateCoinUnlockTxn( + m3PkBytes, m0PkBytes, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + require.NoError(t, err) + _signTxn(t, unlockTxn, m3Priv) + + // Construct the second block + blk2, _, _, err := testMeta.miner.BlockProducer._getBlockTemplate(senderPkBytes) + require.NoError(t, err) + blk2.Txns = append(blk2.Txns, unlockTxn) + blk2Root, _, err := ComputeMerkleRoot(blk2.Txns) + require.NoError(t, err) + blk2.Header.TransactionMerkleRoot = blk2Root + blk2.Header.TstampNanoSecs = uint64(tipTimestamp + 3e9) + + // Mine the second block to ensure the difficulty is sufficient for ProcessBlock + // NOTE: 10000 iterations is presumed sufficient for testing as seen in TestBasicTransfer. + _, bestNonce, err = FindLowestHash(blk2.Header, 10000) + require.NoError(t, err) + blk2.Header.Nonce = bestNonce + + // Process the second block + err = testMeta.miner.BlockProducer.SignBlock(blk2) + require.NoError(t, err) + _, _, err = testMeta.chain.ProcessBlock(blk2, false) + require.NoError(t, err) + + // Validate state update + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) + m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) + require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt().SetUint64(1000), m3BalanceEntry.BalanceNanos) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, tipTimestamp+2e9) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m3PKID, m0PKID, tipTimestamp+2e9) + require.NoError(t, err) + require.True(t, m3LockedBalanceEntry == nil) + + // + // Disconnect the second block and ensure state is reverted. + // + + // Disconnect the second block + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + blk2Hash, err := blk2.Hash() + require.NoError(t, err) + utxoOps, err := GetUtxoOperationsForBlock(testMeta.db, nil, blk2Hash) + require.NoError(t, err) + txHashes, err := ComputeTransactionHashes(blk2.Txns) + require.NoError(t, err) + err = utxoView.DisconnectBlock(blk2, txHashes, utxoOps, blk2.Header.Height) + require.NoError(t, err) + require.NoError(t, utxoView.FlushToDb(blk2.Header.Height)) + + // Update the tip + testMeta.chain.bestChain = testMeta.chain.bestChain[:len(testMeta.chain.bestChain)-1] + + // Validate the state update + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m0PKID = utxoView.GetPKIDForPublicKey(m0PkBytes).PKID + m3PKID = utxoView.GetPKIDForPublicKey(m3PkBytes).PKID + m0Profile = utxoView.GetProfileEntryForPKID(m0PKID) + require.Equal(t, TransferRestrictionStatusProfileOwnerOnly, m0Profile.DAOCoinEntry.LockupTransferRestrictionStatus) + m0LeftYieldCurvePoint, m0RightYieldCurvePoint, err = utxoView.GetLocalYieldCurvePoints(m0PKID, 365*24*60*60*1e9+1) + require.NoError(t, err) + require.True(t, m0RightYieldCurvePoint == nil) + require.Equal(t, int64(365*24*60*60*1e9), m0LeftYieldCurvePoint.LockupDurationNanoSecs) + require.Equal(t, uint64(1000), m0LeftYieldCurvePoint.LockupYieldAPYBasisPoints) + m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) + m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) + require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, tipTimestamp+2e9) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m3PKID, m0PKID, tipTimestamp+2e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(1000), m3LockedBalanceEntry.BalanceBaseUnits) + + // + // Disconnect the first block and ensure state is reverted. + // + + // Disconnect the first block + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + blk1Hash, err := blk1.Hash() + require.NoError(t, err) + utxoView.TipHash = blk1Hash + require.NoError(t, err) + utxoOps, err = GetUtxoOperationsForBlock(testMeta.db, nil, blk1Hash) + require.NoError(t, err) + txHashes, err = ComputeTransactionHashes(blk1.Txns) + require.NoError(t, err) + err = utxoView.DisconnectBlock(blk1, txHashes, utxoOps, blk1.Header.Height) + require.NoError(t, err) + require.NoError(t, utxoView.FlushToDb(blk1.Header.Height)) + + // Update the tip + testMeta.chain.bestChain = testMeta.chain.bestChain[:len(testMeta.chain.bestChain)-1] + + // Verify we return back to the initial state + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m0Profile = utxoView.GetProfileEntryForPKID(m0PKID) + require.Equal(t, TransferRestrictionStatusUnrestricted, m0Profile.DAOCoinEntry.LockupTransferRestrictionStatus) + m0LeftYieldCurvePoint, m0RightYieldCurvePoint, err = utxoView.GetLocalYieldCurvePoints(m0PKID, 365*24*60*60*1e9+1) + require.NoError(t, err) + require.True(t, m0RightYieldCurvePoint == nil) + require.True(t, m0LeftYieldCurvePoint == nil) + m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) + m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) + require.Equal(t, *uint256.NewInt().SetUint64(1000000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, tipTimestamp+2e9) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m3PKID, m0PKID, tipTimestamp+2e9) + require.NoError(t, err) + require.True(t, m3LockedBalanceEntry == nil) +} + //---------------------------------------------------------- // (Testing) Lockup Setup Helper Functions //---------------------------------------------------------- diff --git a/lib/blockchain.go b/lib/blockchain.go index 484f47687..757145f1d 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5139,7 +5139,7 @@ func (bc *Blockchain) EstimateDefaultFeeRateNanosPerKB( return minFeeRateNanosPerKB } numBytesInTxn := len(txnBytes) - _, _, _, fees, err := utxoView.ConnectTransaction(txn, txn.Hash(), int64(numBytesInTxn), tipNode.Height, 0, false, false) + _, _, _, fees, err := utxoView.ConnectTransaction(txn, txn.Hash(), int64(numBytesInTxn), tipNode.Height, int64(tipNode.Header.TstampNanoSecs), false, false) if err != nil { return minFeeRateNanosPerKB } From 31a82fa451e66e945489984cd1e16e889912ddc5 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 23 Oct 2023 19:38:49 -0700 Subject: [PATCH 262/762] Derived key tests and modifications. --- lib/block_view_derived_key.go | 4 +- lib/block_view_lockups.go | 4 +- lib/block_view_lockups_test.go | 633 +++++++++++++++++++++++++++++++++ 3 files changed, 637 insertions(+), 4 deletions(-) diff --git a/lib/block_view_derived_key.go b/lib/block_view_derived_key.go index 29bb43546..bf4793efe 100644 --- a/lib/block_view_derived_key.go +++ b/lib/block_view_derived_key.go @@ -323,9 +323,9 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( "specify a lockup profile PKID if ScopeType is Any") } if lockupLimit == 0 { - delete(transactionSpendingLimit.LockupLimitMap, lockupLimitKey) + delete(newTransactionSpendingLimit.LockupLimitMap, lockupLimitKey) } else { - transactionSpendingLimit.LockupLimitMap[lockupLimitKey] = lockupLimit + newTransactionSpendingLimit.LockupLimitMap[lockupLimitKey] = lockupLimit } } // StakeLimitMap diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index fe0a4a664..da22364f4 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -2139,13 +2139,13 @@ func (bav *UtxoView) _checkLockupTxnSpendingLimitAndUpdateDerivedKey( } // Next check (any creator PKID || specific operation) key - anyProfilePKIDOperationKey := MakeLockupLimitKey(*profilePKID, LockupLimitScopeTypeAnyCoins, lockupOperation) + anyProfilePKIDOperationKey := MakeLockupLimitKey(ZeroPKID, LockupLimitScopeTypeAnyCoins, lockupOperation) if _checkLimitKeyAndUpdateDerivedKeyEntry(anyProfilePKIDOperationKey, derivedKeyEntry) { return derivedKeyEntry, nil } // Next check (any creator PKID || any operation) key - anyProfilePKIDAnyOperationKey := MakeLockupLimitKey(*profilePKID, LockupLimitScopeTypeAnyCoins, AnyLockupOperation) + anyProfilePKIDAnyOperationKey := MakeLockupLimitKey(ZeroPKID, LockupLimitScopeTypeAnyCoins, AnyLockupOperation) if _checkLimitKeyAndUpdateDerivedKeyEntry(anyProfilePKIDAnyOperationKey, derivedKeyEntry) { return derivedKeyEntry, nil } diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 2eaa76bb0..fcb4237e5 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -1,8 +1,10 @@ package lib import ( + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v3" "github.com/holiman/uint256" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" @@ -1471,6 +1473,637 @@ func TestLockupStandardDeSoFlows(t *testing.T) { } } +func TestLockupWithDerivedKey(t *testing.T) { + var derivedKeyPriv string + var derivedKeyPub string + + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + blockHeight := uint64(testMeta.chain.BlockTip().Height) + 1 + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + m0PKID := utxoView.GetPKIDForPublicKey(m0PkBytes).PKID + m1PKID := utxoView.GetPKIDForPublicKey(m1PkBytes).PKID + //m2PKID := utxoView.GetPKIDForPublicKey(m2PkBytes).PKID + + senderPrivBytes, _, err := Base58CheckDecode(m0Priv) + require.NoError(t, err) + m0PrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + + // Setup helper functions for creating m0 derived keys + newUtxoView := func() *UtxoView { + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + return utxoView + } + _submitAuthorizeDerivedKeyTxn := func(txnSpendingLimit *TransactionSpendingLimit) (string, string, error) { + utxoView := newUtxoView() + derivedKeyMetadata, derivedKeyAuthPriv := _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit( + t, m0PrivKey, blockHeight+5, txnSpendingLimit, false, blockHeight, + ) + derivedKeyAuthPrivBase58Check := Base58CheckEncode(derivedKeyAuthPriv.Serialize(), true, testMeta.params) + + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, m0Pub) + + utxoOps, txn, _, err := _doAuthorizeTxnWithExtraDataAndSpendingLimits( + testMeta, + utxoView, + testMeta.feeRateNanosPerKb, + m0PkBytes, + derivedKeyMetadata.DerivedPublicKey, + derivedKeyAuthPrivBase58Check, + derivedKeyMetadata.ExpirationBlock, + derivedKeyMetadata.AccessSignature, + false, + nil, + nil, + txnSpendingLimit, + ) + if err != nil { + return "", "", err + } + require.NoError(t, utxoView.FlushToDb(blockHeight)) + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + + err = utxoView.ValidateDerivedKey( + m0PkBytes, derivedKeyMetadata.DerivedPublicKey, blockHeight, + ) + require.NoError(t, err) + return derivedKeyAuthPrivBase58Check, + Base58CheckEncode(derivedKeyMetadata.DerivedPublicKey, false, testMeta.params), nil + } + _submitLockupTxnWithDerivedKeyAndTimestamp := func( + transactorPkBytes []byte, derivedKeyPrivBase58Check string, inputTxn MsgDeSoTxn, blockTimestamp int64, + ) (_fees uint64, _err error) { + utxoView := newUtxoView() + var txn *MsgDeSoTxn + + switch inputTxn.TxnMeta.GetTxnType() { + // Construct txn. + case TxnTypeCoinLockup: + txMeta := inputTxn.TxnMeta.(*CoinLockupMetadata) + txn, _, _, _, err = testMeta.chain.CreateCoinLockupTxn( + transactorPkBytes, + txMeta.ProfilePublicKey.ToBytes(), + txMeta.UnlockTimestampNanoSecs, + txMeta.LockupAmountBaseUnits, + testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + require.NoError(t, err) + case TxnTypeUpdateCoinLockupParams: + txMeta := inputTxn.TxnMeta.(*UpdateCoinLockupParamsMetadata) + txn, _, _, _, err = testMeta.chain.CreateUpdateCoinLockupParamsTxn( + transactorPkBytes, + txMeta.LockupYieldDurationNanoSecs, + txMeta.LockupYieldAPYBasisPoints, + txMeta.RemoveYieldCurvePoint, + txMeta.NewLockupTransferRestrictions, + txMeta.LockupTransferRestrictionStatus, + testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + require.NoError(t, err) + case TxnTypeCoinLockupTransfer: + txMeta := inputTxn.TxnMeta.(*CoinLockupTransferMetadata) + txn, _, _, _, err = testMeta.chain.CreateCoinLockupTransferTxn( + transactorPkBytes, + txMeta.RecipientPublicKey.ToBytes(), + txMeta.ProfilePublicKey.ToBytes(), + txMeta.UnlockTimestampNanoSecs, + txMeta.LockedCoinsToTransferBaseUnits, + testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + require.NoError(t, err) + case TxnTypeCoinUnlock: + txMeta := inputTxn.TxnMeta.(*CoinUnlockMetadata) + txn, _, _, _, err = testMeta.chain.CreateCoinUnlockTxn( + transactorPkBytes, + txMeta.ProfilePublicKey.ToBytes(), + testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + require.NoError(t, err) + default: + return 0, errors.New("invalid txn type") + } + if err != nil { + return 0, err + } + // Sign txn. + _signTxnWithDerivedKeyAndType(t, txn, derivedKeyPrivBase58Check, 1) + + // Store the original transactor balance. + transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, testMeta.params) + prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) + // Connect txn. + utxoOps, _, _, fees, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), + testMeta.savedHeight, blockTimestamp, true, false) + if err != nil { + return 0, err + } + // Flush UTXO view to the db. + require.NoError(t, utxoView.FlushToDb(blockHeight)) + // Track txn for rolling back. + testMeta.expectedSenderBalances = append(testMeta.expectedSenderBalances, prevBalance) + testMeta.txnOps = append(testMeta.txnOps, utxoOps) + testMeta.txns = append(testMeta.txns, txn) + return fees, nil + } + + { + // Error creating spending limit: cannot specify a lockup profile PKID if scope type is Any + lockupLimitKey := MakeLockupLimitKey(*m0PKID, LockupLimitScopeTypeAnyCoins, AnyLockupOperation) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + // NOTE: We must include TxnTypeAuthorizeDerivedKey as the helper function + // _doAuthorizeTxnWithExtraDataAndSpendingLimits signs with the derived key, + // NOT the owner key. This transaction will decrement this one type AuthorizeDerivedKey limit. + TxnTypeAuthorizeDerivedKey: 1, + }, + LockupLimitMap: map[LockupLimitKey]uint64{lockupLimitKey: uint64(1)}, + } + derivedKeyPriv, derivedKeyPub, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.Error(t, err) + } + + { + // Try and create an UpdateCoinLockupParams transaction that does nothing. + // (This should fail -- RueErrorDerivedKeyUpdateCoinLockupParamsISNoOp) + + // Create the derived key + lockupLimitKey := MakeLockupLimitKey(*m0PKID, LockupLimitScopeTypeScopedCoins, AnyLockupOperation) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + // NOTE: We must include TxnTypeAuthorizeDerivedKey as the helper function + // _doAuthorizeTxnWithExtraDataAndSpendingLimits signs with the derived key, + // NOT the owner key. This transaction will decrement this one type AuthorizeDerivedKey limit. + TxnTypeAuthorizeDerivedKey: 1, + }, + LockupLimitMap: map[LockupLimitKey]uint64{lockupLimitKey: uint64(1)}, + } + derivedKeyPriv, derivedKeyPub, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + derivedPubKeyBytes, _, err := Base58CheckDecode(derivedKeyPub) + require.NoError(t, err) + derivedKeyEntry := utxoView.GetDerivedKeyMappingForOwner(m0PkBytes, derivedPubKeyBytes) + require.Equal(t, uint64(1), derivedKeyEntry.TransactionSpendingLimitTracker.LockupLimitMap[lockupLimitKey]) + + // Submit the no-op transaction + updateCoinLockupParamsMetadata := &UpdateCoinLockupParamsMetadata{ + LockupYieldDurationNanoSecs: 0, + LockupYieldAPYBasisPoints: 0, + RemoveYieldCurvePoint: false, + NewLockupTransferRestrictions: false, + LockupTransferRestrictionStatus: TransferRestrictionStatusUnrestricted, + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: updateCoinLockupParamsMetadata}, 0, + ) + require.Contains(t, err.Error(), RuleErrorDerivedKeyUpdateCoinLockupParamsIsNoOp) + } + + // Testing (specific profile PKID || specific operation) limits + { + // Try and lockup tokens out-of-scope with the transactor's PKID. This should fail. + // To do this, we will try and have m0's derived key lockup m0's DeSo tokens while + // only allowing the DeSo token to do lockups on m1's PKID. + // This tests incorrect profile scope combined with the correct operation. + // + // Then, we will try and have m0 perform an unlock on locked m1 tokens. + // This tests correct profile scope combined with incorrect operation. + // + // After this, we try and have m0 lockup m1 tokens. + // This should succeed as it's the correct profile scope and correct operation. + + // Create the derived key + lockupLimitKey := MakeLockupLimitKey(*m1PKID, LockupLimitScopeTypeScopedCoins, CoinLockupOperation) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + // NOTE: We must include TxnTypeAuthorizeDerivedKey as the helper function + // _doAuthorizeTxnWithExtraDataAndSpendingLimits signs with the derived key, + // NOT the owner key. This transaction will decrement this one type AuthorizeDerivedKey limit. + TxnTypeAuthorizeDerivedKey: 1, + }, + LockupLimitMap: map[LockupLimitKey]uint64{lockupLimitKey: uint64(1)}, + } + derivedKeyPriv, derivedKeyPub, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + + // Have m0 try and lockup m0 tokens. (Incorrect profile + correct operation) + coinLockupMetadata := &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, + ) + require.Contains(t, err.Error(), RuleErrorDerivedKeyCoinLockupOperationNotAuthorized) + + // Have m1 transfer over 1,000 LOCKED m1 tokens for m0 to unlock. (Correct profile + incorrect operation) + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + m1Pub, + 365*24*60*60*1e9, + uint256.NewInt().SetUint64(1000), + 0, + ) + _coinLockupTransferWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + NewPublicKey(m0PkBytes), + NewPublicKey(m1PkBytes), + 365*24*60*60*1e9, + uint256.NewInt().SetUint64(1000), + ) + coinUnlockMetadata := &CoinUnlockMetadata{ProfilePublicKey: NewPublicKey(m1PkBytes)} + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinUnlockMetadata}, 365*24*60*60*1e9+1, + ) + require.Contains(t, err.Error(), RuleErrorDerivedKeyCoinLockupOperationNotAuthorized) + + // Have m1 transfer over 1,000 unlocked m1 tokens to m0 and have m0 lock them up. + // (Correct profile + correct operation) + _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, DAOCoinTransferMetadata{ + ProfilePublicKey: m1PkBytes, + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), + ReceiverPublicKey: m0PkBytes, + }) + coinLockupMetadata = &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m1PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, + ) + require.NoError(t, err) + + // Ensure the operation cannot be performed again as the transaction limit was set to 1. + _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, DAOCoinTransferMetadata{ + ProfilePublicKey: m1PkBytes, + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), + ReceiverPublicKey: m0PkBytes, + }) + coinLockupMetadata = &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m1PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, + ) + require.Contains(t, err.Error(), RuleErrorDerivedKeyCoinLockupOperationNotAuthorized) + } + + // Test (specific profile PKID || any operation) limits + { + // Create the derived key + lockupLimitKey := MakeLockupLimitKey(*m0PKID, LockupLimitScopeTypeScopedCoins, AnyLockupOperation) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + // NOTE: We must include TxnTypeAuthorizeDerivedKey as the helper function + // _doAuthorizeTxnWithExtraDataAndSpendingLimits signs with the derived key, + // NOT the owner key. This transaction will decrement this one type AuthorizeDerivedKey limit. + TxnTypeAuthorizeDerivedKey: 1, + }, + LockupLimitMap: map[LockupLimitKey]uint64{lockupLimitKey: uint64(2)}, + } + derivedKeyPriv, derivedKeyPub, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + + // Have m1 transfer 1000 unlocked m1 coins to m0 + _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, DAOCoinTransferMetadata{ + ProfilePublicKey: m1PkBytes, + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), + ReceiverPublicKey: m0PkBytes, + }) + + // Try to submit a transaction locking up 1000 m1 coins with m0's derived key. + // This should fail. (Incorrect Profile PKID + Correct Operation) + coinLockupMetadata := &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m1PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, + ) + require.Contains(t, err.Error(), RuleErrorDerivedKeyCoinLockupOperationNotAuthorized) + + // Try to submit a transaction locking up 1000 m0 coins with m0's derived key. + // This should succeed. (Correct Profile PKID + Correct Operation) + coinLockupMetadata = &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, + ) + require.NoError(t, err) + + // Try to submit a transaction unlocking 1000 m0 coins with m0's derived key. + // This should succeed. (Correct Profile PKID + Correct Operation) + // This tests that AnyLockupOperation is truly ANY lockup operation. + coinUnlockMetadata := &CoinUnlockMetadata{ProfilePublicKey: NewPublicKey(m0PkBytes)} + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinUnlockMetadata}, 365*24*60*60*1e9+1, + ) + require.NoError(t, err) + + // Try to submit a subsequent lockup transaction. This should fail as we've exhausted the derived key. + coinLockupMetadata = &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 365*24*60*60*1e9+1, + ) + require.Contains(t, err.Error(), RuleErrorDerivedKeyCoinLockupOperationNotAuthorized) + } + + // Test (any creator PKID || specific operation) limits + { + // To test this, we create a derived key that can unlock ANY locked coins ONCE. + // We have m1 send locked tokens to m0, and ensure m0's derived key can unlock them properly. + + // Create the derived key + lockupLimitKey := MakeLockupLimitKey(ZeroPKID, LockupLimitScopeTypeAnyCoins, CoinLockupUnlockOperation) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + // NOTE: We must include TxnTypeAuthorizeDerivedKey as the helper function + // _doAuthorizeTxnWithExtraDataAndSpendingLimits signs with the derived key, + // NOT the owner key. This transaction will decrement this one type AuthorizeDerivedKey limit. + TxnTypeAuthorizeDerivedKey: 1, + }, + LockupLimitMap: map[LockupLimitKey]uint64{lockupLimitKey: uint64(1)}, + } + derivedKeyPriv, derivedKeyPub, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + + // Have m0 lockup 1000 m0 tokens to be unlocked one year into the future. + // This should fail. (Correct PKID + Incorrect Operation Type) + coinLockupMetadata := &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, + ) + require.Contains(t, err.Error(), RuleErrorDerivedKeyCoinLockupOperationNotAuthorized) + + // Have m1 transfer over 1,000 LOCKED m1 tokens for m0 to unlock. + _coinLockupWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + m1Pub, + 365*24*60*60*1e9, + uint256.NewInt().SetUint64(1000), + 0, + ) + _coinLockupTransferWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + NewPublicKey(m0PkBytes), + NewPublicKey(m1PkBytes), + 365*24*60*60*1e9, + uint256.NewInt().SetUint64(1000), + ) + + // Have m0 unlock the 1,000 locked m1 tokens. + // This should succeed. (Correct PKID + Correct Operation) + coinUnlockMetadata := &CoinUnlockMetadata{ProfilePublicKey: NewPublicKey(m1PkBytes)} + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinUnlockMetadata}, 365*24*60*60*1e9+1, + ) + require.NoError(t, err) + + // Try to submit a subsequent lockup transaction. This should fail as we've exhausted the derived key. + coinLockupMetadata = &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 365*24*60*60*1e9+1, + ) + require.Contains(t, err.Error(), RuleErrorDerivedKeyCoinLockupOperationNotAuthorized) + } + + // Test (any creator PKID || any operation) limits + { + // To test (any creator PKID || any operation) we attempt to do the following from a derived key: + // - 2x Update the owner's yield curve + // - 1x Update Lockup Transfer Restrictions + // - 2x Lockup the owner's tokens + // - 2x Transfer the owner's locked tokens + // - 2x Unlock the transfers locked tokens + + // Create the derived key + lockupLimitKey := MakeLockupLimitKey(ZeroPKID, LockupLimitScopeTypeAnyCoins, AnyLockupOperation) + txnSpendingLimit := &TransactionSpendingLimit{ + GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit + TransactionCountLimitMap: map[TxnType]uint64{ + // NOTE: We must include TxnTypeAuthorizeDerivedKey as the helper function + // _doAuthorizeTxnWithExtraDataAndSpendingLimits signs with the derived key, + // NOT the owner key. This transaction will decrement this one type AuthorizeDerivedKey limit. + TxnTypeAuthorizeDerivedKey: 1, + }, + LockupLimitMap: map[LockupLimitKey]uint64{lockupLimitKey: uint64(9)}, + } + derivedKeyPriv, derivedKeyPub, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) + require.NoError(t, err) + + // Perform the first update to the yield curve + // NOTE: This will count as two operations against the limit as it's both + // updating the yield curve AND updating transfer restrictions. + updateCoinLockupParamsMetadata := &UpdateCoinLockupParamsMetadata{ + LockupYieldDurationNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupYieldAPYBasisPoints: 1000, + RemoveYieldCurvePoint: false, + NewLockupTransferRestrictions: true, + LockupTransferRestrictionStatus: TransferRestrictionStatusProfileOwnerOnly, + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: updateCoinLockupParamsMetadata}, 0, + ) + require.NoError(t, err) + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + profileEntry := utxoView.GetProfileEntryForPKID(m0PKID) + require.Equal(t, TransferRestrictionStatusProfileOwnerOnly, profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus) + leftYCP, rightYCP, err := utxoView.GetLocalYieldCurvePoints(m0PKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.True(t, leftYCP == nil) + require.Equal(t, int64(365*24*60*60*1e9), rightYCP.LockupDurationNanoSecs) + require.Equal(t, uint64(1000), rightYCP.LockupYieldAPYBasisPoints) + + // Perform the second update to the yield curve (a delete operation) + updateCoinLockupParamsMetadata = &UpdateCoinLockupParamsMetadata{ + LockupYieldDurationNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupYieldAPYBasisPoints: 1000, + RemoveYieldCurvePoint: true, + NewLockupTransferRestrictions: false, + LockupTransferRestrictionStatus: TransferRestrictionStatusDAOMembersOnly, + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: updateCoinLockupParamsMetadata}, 0, + ) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + profileEntry = utxoView.GetProfileEntryForPKID(m0PKID) + require.Equal(t, TransferRestrictionStatusProfileOwnerOnly, profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus) + leftYCP, rightYCP, err = utxoView.GetLocalYieldCurvePoints(m0PKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.True(t, leftYCP == nil) + require.True(t, rightYCP == nil) + + // Perform the first lockup operation of 1000 m0 coins at 1yr + coinLockupMetadata := &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, + ) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, int64(365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) + + // Perform the second lockup operation of 1000 m0 coins at 2yrs + coinLockupMetadata = &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, + ) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, 2*365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, int64(2*365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) + + // Perform the first transfer operation to m1 of 500 locked m0 coins @ 1yr + coinLockupTransferMetadata := &CoinLockupTransferMetadata{ + RecipientPublicKey: NewPublicKey(m1PkBytes), + ProfilePublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockedCoinsToTransferBaseUnits: uint256.NewInt().SetUint64(500), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupTransferMetadata}, 0, + ) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, m0PKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, int64(365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) + + // Perform the second transfer operation to m1 of 500 locked m0 coins @ 2yrs + coinLockupTransferMetadata = &CoinLockupTransferMetadata{ + RecipientPublicKey: NewPublicKey(m1PkBytes), + ProfilePublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, + LockedCoinsToTransferBaseUnits: uint256.NewInt().SetUint64(500), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupTransferMetadata}, 0, + ) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m1PKID, m0PKID, 2*365*24*60*60*1e9) + require.NoError(t, err) + require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, int64(2*365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) + + // Perform the first unlock operation of 500 m1 tokens @ 1yr + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + balanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) + startingBalance := balanceEntry.BalanceNanos + coinUnlockMetadata := &CoinUnlockMetadata{ProfilePublicKey: NewPublicKey(m0PkBytes)} + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinUnlockMetadata}, 365*24*60*60*1e9+1, + ) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + balanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) + require.True(t, balanceEntry.BalanceNanos.Gt(&startingBalance)) + require.Equal(t, *uint256.NewInt().SetUint64(500), + *uint256.NewInt().Sub(&balanceEntry.BalanceNanos, &startingBalance)) + lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, 365*24*60*60*1e9) + require.NoError(t, err) + require.True(t, lockedBalanceEntry == nil) + + // Perform the second unlock operation of 500 m1 tokens @ 2yrs + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinUnlockMetadata}, 2*365*24*60*60*1e9+1, + ) + require.NoError(t, err) + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + require.NoError(t, err) + balanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) + require.True(t, balanceEntry.BalanceNanos.Gt(&startingBalance)) + require.Equal(t, *uint256.NewInt().SetUint64(1000), + *uint256.NewInt().Sub(&balanceEntry.BalanceNanos, &startingBalance)) + lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( + m0PKID, m0PKID, 2*365*24*60*60*1e9) + require.NoError(t, err) + require.True(t, lockedBalanceEntry == nil) + + // Now we try and perform another operation. This should fail as we've depleted our lockup operations limit. + coinLockupMetadata = &CoinLockupMetadata{ + ProfilePublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 3 * 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + } + _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( + m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 2*365*24*60*60*1e9+2, + ) + require.Contains(t, err.Error(), RuleErrorDerivedKeyCoinLockupOperationNotAuthorized) + } +} + func TestLockupDisconnects(t *testing.T) { // Initialize test chain, miner, and testMeta testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) From 402346638f25f247593c7225a5f28517c30f7317 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 25 Oct 2023 23:08:27 -0700 Subject: [PATCH 263/762] PoS Block Producer (#671) * dummy commit * ln/add-transactor-as-affected-pub-key (#675) * ln/add-transactor-as-affected-pub-key * address DH feedback --------- Co-authored-by: Lazy Nina <> * PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * PoS Block Producer: _connectFailingTransaction (#673) * Revert "Revert connect failing transaction" This reverts commit e801101aefdf4ae6fe02b226f741bee0f7f1aa1c. * Revert * Tests * Lazy review * Simplify failing transaction fee math; add reused nonce validation * Review * PoS Block Producer: Mempool replace with higher fee (#676) * Add replace by higher fee feature to mempool * Add PKID and update map in NonceTracker * Fix nonce tracker on update global params; add more bulletproof tests * Change NonceTracker to use PublicKey * PoS Block Producer: Add Refresh Operation To PosMempool (#700) * Add Refresh; small test improvement * Rename receiver * Actually rename receiver * Remove block producer stuff * Fix Mempool interface * Fix test * PoS Block Producer: Safer ReadUvarint (#701) * Add Refresh; small test improvement * Rename receiver * Actually rename receiver * Remove block producer stuff * Fix Mempool interface * Fix test * Add RevolutionMetadata; Update varint encoding * Remove RevolutionMetadata * PoS Block Producer: MempoolTransaction Type (#703) * Revert "Remove Block Producer" This reverts commit 2f9fe64b34130f65a3a68f23835295c04d0444b7. * Add MempoolTransaction with timestamp * Remove BlockProducer * Fix tests * Simplify test * oops * PoS Block Producer: Data Structure (#704) * Revert "Remove Block Producer" This reverts commit 2f9fe64b34130f65a3a68f23835295c04d0444b7. * Add MempoolTransaction with timestamp * Remove BlockProducer * Fix tests * Simplify test * oops * Revert "Remove BlockProducer" This reverts commit 1c8bc9ded9838b33463b979b126ec519453cde4b. * Block producer code * Initial tests for block producer * More tests for block producer * Better comments and test * Move proposerRandomSeedHash to function signatures * Merge * Diamondhands review (#744) * Diamondhands review gofmt (#745) * Diamondhands review * gofmt * another gofmt * DH Pos Block Producer review (#754) --------- Co-authored-by: Lazy Nina <81658138+lazynina@users.noreply.github.com> Co-authored-by: iamsofonias --- cmd/config.go | 6 +- cmd/node.go | 3 - cmd/run.go | 4 +- lib/block_view.go | 268 +++++++++-- lib/block_view_test.go | 105 +++++ lib/block_view_types.go | 55 ++- lib/constants.go | 71 +-- lib/errors.go | 3 +- lib/{mempool.go => legacy_mempool.go} | 16 + ...mempool_test.go => legacy_mempool_test.go} | 0 lib/network.go | 48 +- lib/network_test.go | 158 ++++--- lib/pos_block_producer.go | 206 ++++++++ lib/pos_block_producer_test.go | 271 +++++++++++ lib/pos_mempool.go | 446 ++++++++++++------ lib/pos_mempool_nonce_tracker.go | 74 +++ lib/pos_mempool_persister.go | 24 +- lib/pos_mempool_test.go | 225 ++++++++- lib/pos_mempool_transaction.go | 15 +- lib/pos_network.go | 7 + lib/pos_snapshot_entries.go | 6 + lib/pos_staking_rewards.go | 6 +- lib/pos_transaction_register.go | 28 +- lib/pos_transaction_register_test.go | 2 +- lib/validation_deso_txn.go | 29 +- lib/varint.go | 6 +- 26 files changed, 1698 insertions(+), 384 deletions(-) rename lib/{mempool.go => legacy_mempool.go} (99%) rename lib/{mempool_test.go => legacy_mempool_test.go} (100%) create mode 100644 lib/pos_block_producer.go create mode 100644 lib/pos_block_producer_test.go create mode 100644 lib/pos_mempool_nonce_tracker.go diff --git a/cmd/config.go b/cmd/config.go index 52f74b083..42bfcc738 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -43,7 +43,8 @@ type Config struct { HypersyncMaxQueueSize uint32 // Mempool - MempoolBackupTimeMilliseconds uint64 + MempoolBackupIntervalMillis uint64 + MaxMempoolPosSizeBytes uint64 // Mining MinerPublicKeys []string @@ -108,7 +109,8 @@ func LoadConfig() *Config { config.HypersyncMaxQueueSize = viper.GetUint32("hypersync-max-queue-size") // Mempool - config.MempoolBackupTimeMilliseconds = viper.GetUint64("mempool-backup-time-milliseconds") + config.MempoolBackupIntervalMillis = viper.GetUint64("mempool-backup-time-millis") + config.MaxMempoolPosSizeBytes = viper.GetUint64("max-mempool-pos-size-bytes") // Peers config.ConnectIPs = viper.GetStringSlice("connect-ips") diff --git a/cmd/node.go b/cmd/node.go index 55704b83d..985df115b 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -85,9 +85,6 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Params.EnableRegtest() } - // Set the frequency of mempool's database backup. - node.Params.MempoolBackupTimeMilliseconds = node.Config.MempoolBackupTimeMilliseconds - // Validate params validateParams(node.Params) // This is a bit of a hack, and we should deprecate this. We rely on GlobalDeSoParams static variable in only one diff --git a/cmd/run.go b/cmd/run.go index 4dc424328..e6231fdd8 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -83,9 +83,11 @@ func SetupRunFlags(cmd *cobra.Command) { download historical blocks. Can only be set if HyperSync is true.`) // Mempool - cmd.PersistentFlags().Uint64("batch-persist-frequency-milliseconds", 30000, + cmd.PersistentFlags().Uint64("mempool-backup-time-millis", 30000, "The frequency in milliseconds with which the mempool will persist its state to disk. "+ "The default value is 30 seconds, or 30,000 milliseconds.") + cmd.PersistentFlags().Uint64("max-mempool-pos-size-bytes", 3000000000, + "The maximum size of the PoS mempool in bytes. The default value is 3GB.") // Peers cmd.PersistentFlags().StringSlice("connect-ips", []string{}, diff --git a/lib/block_view.go b/lib/block_view.go index d41592456..f4d83d6bb 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -2077,31 +2077,8 @@ func (bav *UtxoView) _connectBasicTransferWithExtraSpend( // If signature verification is requested then do that as well. if verifySignatures { - // When we looped through the inputs we verified that all of them belong - // to the public key specified in the transaction. So, as long as the transaction - // public key has signed the transaction as a whole, we can assume that - // all of the inputs are authorized to be spent. One signature to rule them - // all. - // - // UPDATE: Transaction can be signed by a different key, called a derived key. - // The derived key must be authorized through an AuthorizeDerivedKey transaction, - // and then passed along in ExtraData for evey transaction signed with it. - // - // We treat block rewards as a special case in that we actually require that they - // not have a transaction-level public key and that they not be signed. Doing this - // simplifies things operationally for miners because it means they can run their - // mining operation without having any private key material on any of the mining - // nodes. Block rewards are the only transactions that get a pass on this. They are - // also not allowed to have any inputs because they by construction cannot authorize - // the spending of any inputs. - if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { - if len(txn.PublicKey) != 0 || txn.Signature.Sign != nil { - return 0, 0, nil, RuleErrorBlockRewardTxnNotAllowedToHaveSignature - } - } else { - if _, err := bav._verifySignature(txn, blockHeight); err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectBasicTransferWithExtraSpend Problem verifying txn signature: ") - } + if err := bav._verifyTxnSignature(txn, blockHeight); err != nil { + return 0, 0, nil, errors.Wrapf(err, "_connectBasicTransferWithExtraSpend ") } } @@ -2128,6 +2105,36 @@ func (bav *UtxoView) _connectBasicTransferWithExtraSpend( return totalInput, totalOutput, utxoOpsForTxn, nil } +func (bav *UtxoView) _verifyTxnSignature(txn *MsgDeSoTxn, blockHeight uint32) error { + // When we looped through the inputs we verified that all of them belong + // to the public key specified in the transaction. So, as long as the transaction + // public key has signed the transaction as a whole, we can assume that + // all of the inputs are authorized to be spent. One signature to rule them + // all. + // + // UPDATE: Transaction can be signed by a different key, called a derived key. + // The derived key must be authorized through an AuthorizeDerivedKey transaction, + // and then passed along in ExtraData for evey transaction signed with it. + // + // We treat block rewards as a special case in that we actually require that they + // not have a transaction-level public key and that they not be signed. Doing this + // simplifies things operationally for miners because it means they can run their + // mining operation without having any private key material on any of the mining + // nodes. Block rewards are the only transactions that get a pass on this. They are + // also not allowed to have any inputs because they by construction cannot authorize + // the spending of any inputs. + if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { + if len(txn.PublicKey) != 0 || txn.Signature.Sign != nil { + return RuleErrorBlockRewardTxnNotAllowedToHaveSignature + } + } else { + if _, err := bav._verifySignature(txn, blockHeight); err != nil { + return errors.Wrapf(err, "_connectBasicTransferWithExtraSpend Problem verifying txn signature: ") + } + } + return nil +} + func (bav *UtxoView) _checkAndUpdateDerivedKeySpendingLimit( txn *MsgDeSoTxn, derivedPkBytes []byte, totalInput uint64, utxoOpsForTxn []*UtxoOperation, blockHeight uint32) ( _utxoOpsForTxn []*UtxoOperation, _err error) { @@ -3081,6 +3088,65 @@ func (bav *UtxoView) _connectUpdateGlobalParams( ) } } + if len(extraData[FeeBucketGrowthRateBasisPointsKey]) > 0 { + val, bytesRead := Uvarint( + extraData[FeeBucketGrowthRateBasisPointsKey], + ) + if val > _maxBasisPoints { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: FeeBucketGrowthRateBasisPoints must be <= %d", + _maxBasisPoints, + ) + } + newGlobalParamsEntry.FeeBucketGrowthRateBasisPoints = val + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode FeeBucketGrowthRateBasisPoints as uint64", + ) + } + } + if len(extraData[FailingTransactionBMFMultiplierBasisPointsKey]) > 0 { + val, bytesRead := Uvarint( + extraData[FailingTransactionBMFMultiplierBasisPointsKey], + ) + if val > _maxBasisPoints { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: FailingTransactionBMFMultiplierBasisPoints must be <= %d", + _maxBasisPoints, + ) + } + newGlobalParamsEntry.FailingTransactionBMFMultiplierBasisPoints = val + + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode FailingTransactionBMFMultiplierBasisPoints as uint64", + ) + } + } + } + + if blockHeight >= bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + var bytesRead int + if len(extraData[FeeBucketGrowthRateBasisPointsKey]) > 0 { + newGlobalParamsEntry.FeeBucketGrowthRateBasisPoints, bytesRead = Uvarint( + extraData[FeeBucketGrowthRateBasisPointsKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode FeeBucketGrowthRateBasisPoints as uint64", + ) + } + } + if len(extraData[FailingTransactionBMFMultiplierBasisPointsKey]) > 0 { + newGlobalParamsEntry.FailingTransactionBMFMultiplierBasisPoints, bytesRead = Uvarint( + extraData[FailingTransactionBMFMultiplierBasisPointsKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode FailingTransactionBMFMultiplierBasisPoints as uint64", + ) + } + } } var newForbiddenPubKeyEntry *ForbiddenPubKeyEntry @@ -3561,29 +3627,17 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, if blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight && txn.TxnMeta.GetTxnType() != TxnTypeBlockReward { - if uint64(blockHeight) > txn.TxnNonce.ExpirationBlockHeight { - return nil, 0, 0, 0, errors.Wrapf(RuleErrorNonceExpired, - "ConnectTransaction: Nonce %s has expired for public key %v", - txn.TxnNonce.String(), PkToStringBoth(txn.PublicKey)) + if err := bav.ValidateTransactionNonce(txn, uint64(blockHeight)); err != nil { + return nil, 0, 0, 0, errors.Wrapf(err, + "ConnectTransaction: error validating transaction nonce") } pkidEntry := bav.GetPKIDForPublicKey(txn.PublicKey) if pkidEntry == nil || pkidEntry.isDeleted { return nil, 0, 0, 0, fmt.Errorf( - "DisconnectTransaction: PKID for public key %s does not exist", + "ConnectTransaction: PKID for public key %s does not exist", PkToString(txn.PublicKey, bav.Params)) } - nonce, err := bav.GetTransactorNonceEntry(txn.TxnNonce, pkidEntry.PKID) - if err != nil { - return nil, 0, 0, 0, errors.Wrapf(err, - "ConnectTransaction: Problem getting transaction nonce entry for nonce %s and PKID %v", - txn.TxnNonce.String(), pkidEntry.PKID) - } - if nonce != nil && !nonce.isDeleted { - return nil, 0, 0, 0, errors.Wrapf(RuleErrorReusedNonce, - "ConnectTransaction: Nonce %s has already been used for PKID %v", - txn.TxnNonce.String(), pkidEntry.PKID) - } bav.SetTransactorNonceEntry(&TransactorNonceEntry{ Nonce: txn.TxnNonce, TransactorPKID: pkidEntry.PKID, @@ -3593,6 +3647,138 @@ func (bav *UtxoView) _connectTransaction(txn *MsgDeSoTxn, txHash *BlockHash, return utxoOpsForTxn, totalInput, totalOutput, fees, nil } +func (bav *UtxoView) ValidateTransactionNonce(txn *MsgDeSoTxn, blockHeight uint64) error { + if txn == nil || txn.TxnNonce == nil { + return fmt.Errorf("ValidateTransactionNonce: Nonce or txn is nil for public key %v", + PkToStringBoth(txn.PublicKey)) + } + + if blockHeight > txn.TxnNonce.ExpirationBlockHeight { + return errors.Wrapf(RuleErrorNonceExpired, + "ValidateTransactionNonce: Nonce %s has expired for public key %v", + txn.TxnNonce.String(), PkToStringBoth(txn.PublicKey)) + } + pkidEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if pkidEntry == nil || pkidEntry.isDeleted { + return fmt.Errorf( + "ValidateTransactionNonce: PKID for public key %s does not exist", + PkToString(txn.PublicKey, bav.Params)) + } + + nonce, err := bav.GetTransactorNonceEntry(txn.TxnNonce, pkidEntry.PKID) + if err != nil { + return errors.Wrapf(err, + "ValidateTransactionNonce: Problem getting transaction nonce entry for nonce %s and PKID %v", + txn.TxnNonce.String(), pkidEntry.PKID) + } + if nonce != nil && !nonce.isDeleted { + return errors.Wrapf(RuleErrorReusedNonce, + "ValidateTransactionNonce: Nonce %s has already been used for PKID %v", + txn.TxnNonce.String(), pkidEntry.PKID) + } + return nil +} + +// _connectFailingTransaction is used to process the fee and burn associated with the user submitting a failing transaction. +// A failing transaction is a txn that passes formatting validation, yet fails connecting to the UtxoView. This can happen for a +// number of reasons, such as insufficient DESO balance, wrong public key, etc. With Revolution's Fee-Time block ordering, these +// failing transactions are included in the blocks and their fees are burned. In addition, a major part of the effective +// fees of this transaction is burned with BMF. This makes spam attacks economically disadvantageous. Attacker's funds +// are burned, to the benefit of everyone else on the network. BMF algorithm also computes a utility fee, which is +// distributed to the block producer. +func (bav *UtxoView) _connectFailingTransaction(txn *MsgDeSoTxn, blockHeight uint32, verifySignatures bool) ( + _utxoOps []*UtxoOperation, _burnFee uint64, _utilityFee uint64, _err error) { + + // Failing transactions are only allowed after ProofOfStake2ConsensusCutoverBlockHeight. + if blockHeight <= bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + return nil, 0, 0, fmt.Errorf("_connectFailingTransaction: Failing transactions " + + "not allowed before ProofOfStake2ConsensusCutoverBlockHeight") + } + + // Sanity check the transaction to make sure it is properly formatted. + if err := CheckTransactionSanity(txn, blockHeight, bav.Params); err != nil { + return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: "+ + "Problem checking txn sanity") + } + + if err := ValidateDeSoTxnSanityBalanceModel(txn, uint64(blockHeight), bav.Params, bav.GlobalParamsEntry); err != nil { + return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: "+ + "Problem checking txn sanity under balance model") + } + + if err := bav.ValidateTransactionNonce(txn, uint64(blockHeight)); err != nil { + return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: "+ + "Problem validating transaction nonce") + } + + // Get the FailingTransactionBMFMultiplierBasisPoints from the global params entry. We then compute the effective fee + // as: effectiveFee = txn.TxnFeeNanos * FailingTransactionBMFMultiplierBasisPoints / 10000 + gp := bav.GetCurrentGlobalParamsEntry() + + failingTransactionRate := uint256.NewInt().SetUint64(gp.FailingTransactionBMFMultiplierBasisPoints) + failingTransactionFee := uint256.NewInt().SetUint64(txn.TxnFeeNanos) + basisPointsAsUint256 := uint256.NewInt().SetUint64(10000) + + effectiveFeeU256 := failingTransactionRate.Mul(failingTransactionRate, failingTransactionFee) + effectiveFeeU256.Div(effectiveFeeU256, basisPointsAsUint256) + // We should never overflow on the effective fee, since FailingTransactionBMFMultiplierBasisPoints is <= 10000. + // But if for some magical reason we do, we set the effective fee to the max uint64. We don't error, and + // instead let _spendBalance handle the overflow. + if !effectiveFeeU256.IsUint64() { + effectiveFeeU256.SetUint64(math.MaxUint64) + } + effectiveFee := effectiveFeeU256.Uint64() + // If the effective fee is less than the minimum network fee, we set it to the minimum network fee. + if effectiveFee < gp.MinimumNetworkFeeNanosPerKB { + effectiveFee = gp.MinimumNetworkFeeNanosPerKB + } + burnFee, utilityFee := computeBMF(effectiveFee) + + var utxoOps []*UtxoOperation + // When spending balances, we need to check for immature block rewards. Since we don't have + // the block rewards yet for the current block, we subtract one from the current block height + // when spending balances. + feeUtxoOp, err := bav._spendBalance(effectiveFee, txn.PublicKey, blockHeight-1) + if err != nil { + return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: Problem "+ + "spending balance") + } + utxoOps = append(utxoOps, feeUtxoOp) + + // If verifySignatures is passed, we check transaction signature. + if verifySignatures { + if err := bav._verifyTxnSignature(txn, blockHeight); err != nil { + return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: Problem "+ + "verifying signature") + } + } + + return utxoOps, burnFee, utilityFee, nil +} + +// computeBMF computes the burn fee and the utility fee for a given fee. The acronym stands for Burn Maximizing Fee, which +// entails that the burn function is designed to maximize the amount of DESO burned, while providing the minimal viable +// utility fee to the block producer. This is so that block producers have no advantage over other network participants +// in accruing DESO. The utility fee that block producers get from transaction fees is just enough so that it's economically +// advantageous to include as many transactions as possible in blocks. Reliably doing so for an extended period of time +// could accumulate to some DESO from transaction fees. +// The utility fee is computed by taking a binary logarithm of the fee, and the remainder is burned. BMF(fee) computes: +// +// burnFee := fee - log_2(fee), utilityFee := log_2(fee). +func computeBMF(fee uint64) (_burnFee uint64, _utilityFee uint64) { + // Compute the utility fee as log_2(fee). We can find it by taking the bit length of fee. + // Alternatively: uint64(bits.Len64(fee)) + utilityFee, _ := BigFloatLog2(NewFloat().SetUint64(fee)).Uint64() + + // This should never happen but just in case make sure utilityFee is not greater than fee. + if utilityFee > fee { + utilityFee = fee + } + // Compute the burn as fee - log_2(fee). + burnFee := fee - utilityFee + return burnFee, utilityFee +} + func (bav *UtxoView) _compareBalancesToSnapshot(balanceSnapshot map[PublicKey]uint64) ( *big.Int, map[PublicKey]*big.Int, error) { runningTotal := big.NewInt(0) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index fa69e25a2..6e708e9fc 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -17,6 +17,7 @@ import ( "github.com/golang/glog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "math/rand" ) func _strToPk(t *testing.T, pkStr string) []byte { @@ -2202,3 +2203,107 @@ func TestBlockRewardPatch(t *testing.T) { require.NoError(t, err) } } + +func TestConnectFailingTransaction(t *testing.T) { + require := require.New(t) + seed := int64(1011) + rand := rand.New(rand.NewSource(seed)) + + globalParams := _testGetDefaultGlobalParams() + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(10000) + + chain, params, db := NewLowDifficultyBlockchain(t) + params.ForkHeights.BalanceModelBlockHeight = 1 + params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = 1 + params.EncoderMigrationHeights.ProofOfStake1StateSetupMigration.Height = 1 + params.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + oldParams := GlobalDeSoParams + GlobalDeSoParams = *params + mempool, miner := NewTestMiner(t, chain, params, true) + // Mine a few blocks to give the senderPkString some money. + _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, mempool) + require.NoError(err) + _, err = miner.MineAndProcessSingleBlock(0 /*threadIndex*/, mempool) + require.NoError(err) + + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + m0PublicKeyBase58Check := Base58CheckEncode(m0PubBytes, false, params) + + _, _, _ = _doBasicTransferWithViewFlush( + t, chain, db, params, senderPkString, m0PublicKeyBase58Check, + senderPrivString, 200000, 11) + + blockHeight := chain.BlockTip().Height + 1 + blockView, err := NewUtxoView(db, params, nil, nil) + require.NoError(err) + txn1 := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) + utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn1, blockHeight, true) + require.NoError(err) + require.Equal(1, len(utxoOps)) + expectedBurnFee, expectedUtilityFee := _getBMFForTxn(txn1, globalParams) + require.Equal(expectedBurnFee, burnFee) + require.Equal(expectedUtilityFee, utilityFee) + + err = blockView.FlushToDb(uint64(blockHeight)) + + // Also test updating the global params for FailingTransactionBMFMultiplierBasisPoints and FeeBucketGrowthRateBasisPoints. + testMeta := &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: blockHeight, + feeRateNanosPerKb: uint64(201), + } + // Allow m0 to update global params. + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(m0PubBytes)] = true + { + // Set FailingTransactionBMFMultiplierBasisPoints=7000 or 70%. + _updateGlobalParamsEntryWithExtraData( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + map[string][]byte{FailingTransactionBMFMultiplierBasisPointsKey: UintToBuf(7000)}, + ) + } + { + // Set FeeBucketGrowthRateBasisPoints=7000 or 70%. + _updateGlobalParamsEntryWithExtraData( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + map[string][]byte{FeeBucketGrowthRateBasisPointsKey: UintToBuf(7000)}, + ) + } + blockView, err = NewUtxoView(db, params, nil, nil) + require.NoError(err) + newParams := blockView.GetCurrentGlobalParamsEntry() + require.Equal(uint64(7000), newParams.FailingTransactionBMFMultiplierBasisPoints) + require.Equal(uint64(7000), newParams.FeeBucketGrowthRateBasisPoints) + + // Try connecting another failing transaction, and make sure the burn and utility fees are computed accurately. + txn2 := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) + utxoOps, burnFee, utilityFee, err = blockView._connectFailingTransaction(txn2, blockHeight, true) + require.NoError(err) + require.Equal(1, len(utxoOps)) + expectedBurnFee, expectedUtilityFee = _getBMFForTxn(txn2, newParams) + require.Equal(expectedBurnFee, burnFee) + require.Equal(expectedUtilityFee, utilityFee) + + err = blockView.FlushToDb(uint64(blockHeight)) + + GlobalDeSoParams = oldParams +} + +func _getBMFForTxn(txn *MsgDeSoTxn, gp *GlobalParamsEntry) (_burnFee uint64, _utilityFee uint64) { + failingTransactionRate := NewFloat().SetUint64(gp.FailingTransactionBMFMultiplierBasisPoints) + failingTransactionRate.Quo(failingTransactionRate, NewFloat().SetUint64(10000)) + failingTransactionFee, _ := NewFloat().Mul(failingTransactionRate, NewFloat().SetUint64(txn.TxnFeeNanos)).Uint64() + return computeBMF(failingTransactionFee) +} diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 8158fff99..8171062c8 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3799,31 +3799,37 @@ type GlobalParamsEntry struct { // blocks) before they are jailed. JailInactiveValidatorGracePeriodEpochs uint64 - // FeeBucketRateMultiplierBasisPoints is the rate of growth of the fee bucket ranges. This is part of the new + // FeeBucketGrowthRateBasisPoints is the rate of growth of the fee bucket ranges. This is part of the new // PoS Mempool. The multiplier is given as basis points. For example a value of 1000 means that the fee bucket // ranges will grow by 10% each time. If, let's say, we start with MinimumNetworkFeeNanosPerKB of 1000 nanos, // then the first bucket will be [1000, 1099], the second bucket will be [1100, 1209], the third bucket will // be [1210, 1330], etc. - FeeBucketRateMultiplierBasisPoints uint64 + FeeBucketGrowthRateBasisPoints uint64 + + // FailingTransactionBMFMultiplierBasisPoints is the factor of the transaction fee that is used for the computation + // BMF. The value is expressed in basis points. For example a value of 2500 means that 25% of the fee will be + // failing transaction fee will be used in the BMF algorithm. + FailingTransactionBMFMultiplierBasisPoints uint64 } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { return &GlobalParamsEntry{ - USDCentsPerBitcoin: gp.USDCentsPerBitcoin, - CreateProfileFeeNanos: gp.CreateProfileFeeNanos, - CreateNFTFeeNanos: gp.CreateNFTFeeNanos, - MaxCopiesPerNFT: gp.MaxCopiesPerNFT, - MinimumNetworkFeeNanosPerKB: gp.MinimumNetworkFeeNanosPerKB, - MaxNonceExpirationBlockHeightOffset: gp.MaxNonceExpirationBlockHeightOffset, - StakeLockupEpochDuration: gp.StakeLockupEpochDuration, - ValidatorJailEpochDuration: gp.ValidatorJailEpochDuration, - LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, - ValidatorSetMaxNumValidators: gp.ValidatorSetMaxNumValidators, - StakingRewardsMaxNumStakes: gp.StakingRewardsMaxNumStakes, - StakingRewardsAPYBasisPoints: gp.StakingRewardsAPYBasisPoints, - EpochDurationNumBlocks: gp.EpochDurationNumBlocks, - JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, - FeeBucketRateMultiplierBasisPoints: gp.FeeBucketRateMultiplierBasisPoints, + USDCentsPerBitcoin: gp.USDCentsPerBitcoin, + CreateProfileFeeNanos: gp.CreateProfileFeeNanos, + CreateNFTFeeNanos: gp.CreateNFTFeeNanos, + MaxCopiesPerNFT: gp.MaxCopiesPerNFT, + MinimumNetworkFeeNanosPerKB: gp.MinimumNetworkFeeNanosPerKB, + MaxNonceExpirationBlockHeightOffset: gp.MaxNonceExpirationBlockHeightOffset, + StakeLockupEpochDuration: gp.StakeLockupEpochDuration, + ValidatorJailEpochDuration: gp.ValidatorJailEpochDuration, + LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, + ValidatorSetMaxNumValidators: gp.ValidatorSetMaxNumValidators, + StakingRewardsMaxNumStakes: gp.StakingRewardsMaxNumStakes, + StakingRewardsAPYBasisPoints: gp.StakingRewardsAPYBasisPoints, + EpochDurationNumBlocks: gp.EpochDurationNumBlocks, + JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, + FeeBucketGrowthRateBasisPoints: gp.FeeBucketGrowthRateBasisPoints, + FailingTransactionBMFMultiplierBasisPoints: gp.FailingTransactionBMFMultiplierBasisPoints, } } @@ -3847,7 +3853,8 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.StakingRewardsAPYBasisPoints)...) data = append(data, UintToBuf(gp.EpochDurationNumBlocks)...) data = append(data, UintToBuf(gp.JailInactiveValidatorGracePeriodEpochs)...) - data = append(data, UintToBuf(gp.FeeBucketRateMultiplierBasisPoints)...) + data = append(data, UintToBuf(gp.FeeBucketGrowthRateBasisPoints)...) + data = append(data, UintToBuf(gp.FailingTransactionBMFMultiplierBasisPoints)...) } return data } @@ -3914,9 +3921,13 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading JailInactiveValidatorGracePeriodEpochs: ") } - gp.FeeBucketRateMultiplierBasisPoints, err = ReadUvarint(rr) + gp.FeeBucketGrowthRateBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading FeeBucketGrowthRateBasisPoints") + } + gp.FailingTransactionBMFMultiplierBasisPoints, err = ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading FeeBucketRateMultiplierBasisPoints") + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading FailingTransactionBMFMultiplierBasisPoints") } } return nil @@ -3930,13 +3941,13 @@ func (gp *GlobalParamsEntry) GetEncoderType() EncoderType { return EncoderTypeGlobalParamsEntry } -// ComputeFeeTimeBucketMinimumFeeAndMultiplier takes the MinimumNetworkFeeNanosPerKB and FeeBucketRateMultiplierBasisPoints for +// ComputeFeeTimeBucketMinimumFeeAndMultiplier takes the MinimumNetworkFeeNanosPerKB and FeeBucketGrowthRateBasisPoints for // the GlobalParamsEntry, and returns them as big.Floats. func (gp *GlobalParamsEntry) ComputeFeeTimeBucketMinimumFeeAndMultiplier() ( _minimumRate *big.Float, _bucketMultiplier *big.Float) { minimumNetworkFeeNanosPerKB := NewFloat().SetUint64(gp.MinimumNetworkFeeNanosPerKB) - feeBucketMultiplier := NewFloat().SetUint64(10000 + gp.FeeBucketRateMultiplierBasisPoints) + feeBucketMultiplier := NewFloat().SetUint64(10000 + gp.FeeBucketGrowthRateBasisPoints) feeBucketMultiplier.Quo(feeBucketMultiplier, NewFloat().SetUint64(10000)) return minimumNetworkFeeNanosPerKB, feeBucketMultiplier } diff --git a/lib/constants.go b/lib/constants.go index 7b69371ba..c4363db03 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -699,16 +699,19 @@ type DeSoParams struct { // before they are jailed. DefaultJailInactiveValidatorGracePeriodEpochs uint64 + // DefaultFeeBucketGrowthRateBasisPoints is the rate of growth of the fee bucket ranges. The multiplier is given + // as basis points. For example a value of 1000 means that the fee bucket ranges will grow by 10% each time. + DefaultFeeBucketGrowthRateBasisPoints uint64 + + // DefaultFailingTransactionBMFMultiplierBasisPoints is the default rate for failing transaction fees, in basis points, + // used in BMF calculations. E.g. a value of 2500 means that 25% of the failing transaction's fee is used + // in BMF calculations. + DefaultFailingTransactionBMFMultiplierBasisPoints uint64 + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights EncoderMigrationHeightsList []*MigrationHeight - - // The maximum aggregate number of bytes of transactions included in the PoS mempool. - MaxMempoolPosSizeBytes uint64 - - // MempoolBackupTimeMilliseconds is the frequency with which pos mempool persists transactions to storage. - MempoolBackupTimeMilliseconds uint64 } var RegtestForkHeights = ForkHeights{ @@ -1109,12 +1112,15 @@ var DeSoMainnetParams = DeSoParams{ // The number of epochs before an inactive validator is jailed DefaultJailInactiveValidatorGracePeriodEpochs: uint64(48), + // The rate of growth of the fee bucket ranges. + DefaultFeeBucketGrowthRateBasisPoints: uint64(1000), + + // The rate of the failing transaction's fee used in BMF calculations. + DefaultFailingTransactionBMFMultiplierBasisPoints: uint64(2500), + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), - - MaxMempoolPosSizeBytes: 3 << 30, // 3Gb - MempoolBackupTimeMilliseconds: 30000, } func mustDecodeHexBlockHashBitcoin(ss string) *BlockHash { @@ -1370,12 +1376,15 @@ var DeSoTestnetParams = DeSoParams{ // The number of epochs before an inactive validator is jailed DefaultJailInactiveValidatorGracePeriodEpochs: uint64(48), + // The rate of growth of the fee bucket ranges. + DefaultFeeBucketGrowthRateBasisPoints: uint64(1000), + + // The rate of the failing transaction's fee used in BMF calculations. + DefaultFailingTransactionBMFMultiplierBasisPoints: uint64(2500), + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), - - MaxMempoolPosSizeBytes: 3 << 30, // 3Gb - MempoolBackupTimeMilliseconds: 30000, } // GetDataDir gets the user data directory where we store files @@ -1410,21 +1419,23 @@ const ( IsFrozenKey = "IsFrozen" // Keys for a GlobalParamUpdate transaction's extra data map. - USDCentsPerBitcoinKey = "USDCentsPerBitcoin" - MinNetworkFeeNanosPerKBKey = "MinNetworkFeeNanosPerKB" - CreateProfileFeeNanosKey = "CreateProfileFeeNanos" - CreateNFTFeeNanosKey = "CreateNFTFeeNanos" - MaxCopiesPerNFTKey = "MaxCopiesPerNFT" - MaxNonceExpirationBlockHeightOffsetKey = "MaxNonceExpirationBlockHeightOffset" - ForbiddenBlockSignaturePubKeyKey = "ForbiddenBlockSignaturePubKey" - StakeLockupEpochDurationKey = "StakeLockupEpochDuration" - ValidatorJailEpochDurationKey = "ValidatorJailEpochDuration" - LeaderScheduleMaxNumValidatorsKey = "LeaderScheduleMaxNumValidators" - ValidatorSetMaxNumValidatorsKey = "ValidatorSetMaxNumValidators" - StakingRewardsMaxNumStakesKey = "StakingRewardsMaxNumStakes" - StakingRewardsAPYBasisPointsKey = "StakingRewardsAPYBasisPoints" - EpochDurationNumBlocksKey = "EpochDurationNumBlocks" - JailInactiveValidatorGracePeriodEpochsKey = "JailInactiveValidatorGracePeriodEpochs" + USDCentsPerBitcoinKey = "USDCentsPerBitcoin" + MinNetworkFeeNanosPerKBKey = "MinNetworkFeeNanosPerKB" + CreateProfileFeeNanosKey = "CreateProfileFeeNanos" + CreateNFTFeeNanosKey = "CreateNFTFeeNanos" + MaxCopiesPerNFTKey = "MaxCopiesPerNFT" + MaxNonceExpirationBlockHeightOffsetKey = "MaxNonceExpirationBlockHeightOffset" + ForbiddenBlockSignaturePubKeyKey = "ForbiddenBlockSignaturePubKey" + StakeLockupEpochDurationKey = "StakeLockupEpochDuration" + ValidatorJailEpochDurationKey = "ValidatorJailEpochDuration" + LeaderScheduleMaxNumValidatorsKey = "LeaderScheduleMaxNumValidators" + ValidatorSetMaxNumValidatorsKey = "ValidatorSetMaxNumValidators" + StakingRewardsMaxNumStakesKey = "StakingRewardsMaxNumStakes" + StakingRewardsAPYBasisPointsKey = "StakingRewardsAPYBasisPoints" + EpochDurationNumBlocksKey = "EpochDurationNumBlocks" + JailInactiveValidatorGracePeriodEpochsKey = "JailInactiveValidatorGracePeriodEpochs" + FeeBucketGrowthRateBasisPointsKey = "FeeBucketGrowthRateBasisPointsKey" + FailingTransactionBMFMultiplierBasisPointsKey = "FailingTransactionBMFMultiplierBasisPoints" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" @@ -1498,8 +1509,10 @@ var ( // We initialize the CreateNFTFeeNanos to 0 so we do not assess a fee to create an NFT until specified by ParamUpdater. CreateNFTFeeNanos: 0, MaxCopiesPerNFT: 0, - // We initialize the FeeBucketRateMultiplierBasisPoints to 1000, or equivalently, a multiplier of 1.1x. - FeeBucketRateMultiplierBasisPoints: 1000, + // We initialize the FeeBucketGrowthRateBasisPoints to 1000, or equivalently, a multiplier of 1.1x. + FeeBucketGrowthRateBasisPoints: 1000, + // We initialize the FailingTransactionBMFMultiplierBasisPoints to 2500, or equivalently, a rate of 0.25. + FailingTransactionBMFMultiplierBasisPoints: 2500, } ) diff --git a/lib/errors.go b/lib/errors.go index 8146e06fb..194f47d88 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -507,7 +507,8 @@ const ( TxErrorNoNonceAfterBalanceModelBlockHeight RuleError = "TxErrorNoNonceAfterBalanceModelBlockHeight" // Mempool - MempoolErrorNotRunning RuleError = "MempoolErrorNotRunning" + MempoolErrorNotRunning RuleError = "MempoolErrorNotRunning" + MempoolFailedReplaceByHigherFee RuleError = "MempoolFailedReplaceByHigherFee" ) func (e RuleError) Error() string { diff --git a/lib/mempool.go b/lib/legacy_mempool.go similarity index 99% rename from lib/mempool.go rename to lib/legacy_mempool.go index 69687b2e1..930c37658 100644 --- a/lib/mempool.go +++ b/lib/legacy_mempool.go @@ -1942,6 +1942,22 @@ func ComputeTransactionMetadata(txn *MsgDeSoTxn, utxoView *UtxoView, blockHash * txnMeta.UnjailValidatorTxindexMetadata = txindexMetadata txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) } + // Check if the transactor is an affected public key. If not, add them. + if txnMeta.TransactorPublicKeyBase58Check != "" { + transactorPublicKeyFound := false + for _, affectedPublicKey := range txnMeta.AffectedPublicKeys { + if affectedPublicKey.PublicKeyBase58Check == txnMeta.TransactorPublicKeyBase58Check { + transactorPublicKeyFound = true + break + } + } + if !transactorPublicKeyFound { + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, &AffectedPublicKey{ + PublicKeyBase58Check: txnMeta.TransactorPublicKeyBase58Check, + Metadata: "TransactorPublicKeyBase58Check", + }) + } + } return txnMeta } diff --git a/lib/mempool_test.go b/lib/legacy_mempool_test.go similarity index 100% rename from lib/mempool_test.go rename to lib/legacy_mempool_test.go diff --git a/lib/network.go b/lib/network.go index d4b693b6c..3c20a6f8e 100644 --- a/lib/network.go +++ b/lib/network.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "github.com/deso-protocol/core/collections/bitset" "io" "math" "math/big" @@ -1914,6 +1915,14 @@ type MsgDeSoHeader struct { // event that ASICs become powerful enough to have birthday problems in the future. ExtraNonce uint64 + // TransactionsConnectStatus is only used for Proof of Stake blocks, starting with + // MsgDeSoHeader version 2. For all earlier versions, this field will default to nil. + // + // The hash of the TxnConnectStatusByIndex field in MsgDeSoBlock. It is stored to ensure + // that the TxnConnectStatusByIndex is part of the header hash, which is signed by the + // proposer. The full index is stored in the block to offload space complexity. + TxnConnectStatusByIndexHash *BlockHash + // ProposerPublicKey is only used for Proof of Stake blocks, starting with MsgDeSoHeader // version 2. For all earlier versions, this field will default to nil. // @@ -2132,6 +2141,12 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error // The Nonce and ExtraNonce fields are unused in version 2. We skip them // during both encoding and decoding. + // TxnConnectStatusByIndexHash + if msg.TxnConnectStatusByIndexHash == nil { + return nil, fmt.Errorf("EncodeHeaderVersion2: TxnConnectStatusByIndexHash must be non-nil") + } + retBytes = append(retBytes, msg.TxnConnectStatusByIndexHash[:]...) + // ProposerPublicKey if msg.ProposerPublicKey == nil { return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerPublicKey must be non-nil") @@ -2347,6 +2362,13 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { retHeader.Nonce = 0 retHeader.ExtraNonce = 0 + // TxnConnectStatusByIndexHash + retHeader.TxnConnectStatusByIndexHash = &BlockHash{} + _, err = io.ReadFull(rr, retHeader.TxnConnectStatusByIndexHash[:]) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TxnConnectStatusByIndexHash") + } + // ProposerPublicKey retHeader.ProposerPublicKey, err = ReadPublicKey(rr) if err != nil { @@ -2566,6 +2588,11 @@ type MsgDeSoBlock struct { // entity, which can be useful for nodes that want to restrict who they accept blocks // from. BlockProducerInfo *BlockProducerInfo + + // This bitset field stores information whether each transaction in the block passes + // or fails to connect. The bit at i-th position is set to 1 if the i-th transaction + // in the block passes connect, and 0 otherwise. + TxnConnectStatusByIndex *bitset.Bitset } func (msg *MsgDeSoBlock) EncodeBlockCommmon(preSignature bool) ([]byte, error) { @@ -2619,7 +2646,18 @@ func (msg *MsgDeSoBlock) EncodeBlockVersion1(preSignature bool) ([]byte, error) } func (msg *MsgDeSoBlock) EncodeBlockVersion2(preSignature bool) ([]byte, error) { - return msg.EncodeBlockCommmon(preSignature) + data, err := msg.EncodeBlockCommmon(preSignature) + if err != nil { + return nil, err + } + + // TxnConnectStatusByIndex + if msg.TxnConnectStatusByIndex == nil { + return nil, fmt.Errorf("MsgDeSoBlock.EncodeBlockVersion2: TxnConnectStatusByIndex should not be nil") + } + data = append(data, EncodeBitset(msg.TxnConnectStatusByIndex)...) + + return data, nil } func (msg *MsgDeSoBlock) ToBytes(preSignature bool) ([]byte, error) { @@ -2722,6 +2760,14 @@ func (msg *MsgDeSoBlock) FromBytes(data []byte) error { } } + // Version 2 blocks have a TxnStatusConnectedIndex attached to them. + if ret.Header.Version == HeaderVersion2 { + ret.TxnConnectStatusByIndex, err = DecodeBitset(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Error decoding TxnConnectStatusByIndex") + } + } + *msg = *ret return nil } diff --git a/lib/network_test.go b/lib/network_test.go index 43d40d4d5..7f47737a4 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -121,6 +121,12 @@ func createTestBlockHeaderVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSo 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, } + testTxnConnectStatusByIndex := BlockHash{ + 0x00, 0x03, 0x04, 0x21, 0x06, 0x07, 0x08, 0x09, 0x10, 0x19, + 0x12, 0x13, 0x14, 0x15, 0x44, 0x17, 0x18, 0x19, 0x20, 0x21, + 0x02, 0x23, 0x24, 0x25, 0x26, 0x27, 0x33, 0x29, 0x30, 0x31, + 0x32, 0x33, + } testRandomSeedHash := RandomSeedHash{ 0x00, 0x36, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x50, 0x51, 0x52, 0x53, @@ -164,12 +170,13 @@ func createTestBlockHeaderVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSo TstampNanoSecs: SecondsToNanoSeconds(1678943210), Height: uint64(1321012345), // Nonce and ExtraNonce are unused and set to 0 starting in version 2. - Nonce: uint64(0), - ExtraNonce: uint64(0), - ProposerPublicKey: NewPublicKey(pkForTesting1), - ProposerVotingPublicKey: testBLSPublicKey, - ProposerRandomSeedHash: &testRandomSeedHash, - ProposedInView: uint64(1432101234), + Nonce: uint64(0), + ExtraNonce: uint64(0), + TxnConnectStatusByIndexHash: &testTxnConnectStatusByIndex, + ProposerPublicKey: NewPublicKey(pkForTesting1), + ProposerVotingPublicKey: testBLSPublicKey, + ProposerRandomSeedHash: &testRandomSeedHash, + ProposedInView: uint64(1432101234), // Use real signatures and public keys for the PoS fields ProposerVotePartialSignature: testBLSSignature, } @@ -185,10 +192,7 @@ func createTestBlockHeaderVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSo } func TestHeaderConversionAndReadWriteMessage(t *testing.T) { - assert := assert.New(t) require := require.New(t) - _ = assert - _ = require networkType := NetworkType_MAINNET expectedBlockHeadersToTest := []*MsgDeSoHeader{ @@ -201,38 +205,38 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { // versions we want to test. for _, expectedBlockHeader := range expectedBlockHeadersToTest { data, err := expectedBlockHeader.ToBytes(false) - assert.NoError(err) + require.NoError(err) testHdr := NewMessage(MsgTypeHeader) err = testHdr.FromBytes(data) - assert.NoError(err) + require.NoError(err) - assert.Equal(expectedBlockHeader, testHdr) + require.Equal(expectedBlockHeader, testHdr) // Test read write. var buf bytes.Buffer payload, err := WriteMessage(&buf, expectedBlockHeader, networkType) - assert.NoError(err) + require.NoError(err) // Form the header from the payload and make sure it matches. hdrFromPayload := NewMessage(MsgTypeHeader).(*MsgDeSoHeader) - assert.NotNil(hdrFromPayload, "NewMessage(MsgTypeHeader) should not return nil.") - assert.Equal(uint64(0), hdrFromPayload.Nonce, "NewMessage(MsgTypeHeader) should initialize Nonce to empty byte slice.") + require.NotNil(hdrFromPayload, "NewMessage(MsgTypeHeader) should not return nil.") + require.Equal(uint64(0), hdrFromPayload.Nonce, "NewMessage(MsgTypeHeader) should initialize Nonce to empty byte slice.") err = hdrFromPayload.FromBytes(payload) - assert.NoError(err) - assert.Equal(expectedBlockHeader, hdrFromPayload) + require.NoError(err) + require.Equal(expectedBlockHeader, hdrFromPayload) hdrBytes := buf.Bytes() testMsg, data, err := ReadMessage(bytes.NewReader(hdrBytes), networkType) - assert.NoError(err) - assert.Equal(expectedBlockHeader, testMsg) + require.NoError(err) + require.Equal(expectedBlockHeader, testMsg) // Compute the header payload bytes so we can compare them. hdrPayload, err := expectedBlockHeader.ToBytes(false) - assert.NoError(err) - assert.Equal(hdrPayload, data) + require.NoError(err) + require.Equal(hdrPayload, data) - assert.Equalf(14, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), + require.Equalf(15, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), "Number of fields in HEADER message is different from expected. "+ "Did you add a new field? If so, make sure the serialization code "+ "works, add the new field to the test case, and fix this error.") @@ -415,6 +419,33 @@ var expectedBlock = &MsgDeSoBlock{ }, } +func createTestBlockVersion1(t *testing.T) *MsgDeSoBlock { + require := require.New(t) + + newBlockV1 := *expectedBlock + + // Add a signature to the block V1 + priv, err := btcec.NewPrivateKey(btcec.S256()) + require.NoError(err) + newBlockV1.BlockProducerInfo.Signature, err = priv.Sign([]byte{0x01, 0x02, 0x03}) + require.NoError(err) + return &newBlockV1 +} + +func createTestBlockVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSoBlock { + block := *expectedBlock + block.BlockProducerInfo = nil + + // Set V2 header. + block.Header = createTestBlockHeaderVersion2(t, includeTimeoutQC) + + // Set the block's TxnConnectStatusByIndex and update its hash in the header. + block.TxnConnectStatusByIndex = bitset.NewBitset().Set(0, true).Set(3, true) + block.Header.TxnConnectStatusByIndexHash = HashBitset(block.TxnConnectStatusByIndex) + + return &block +} + func expectedTransactions(includeV1Fields bool) []*MsgDeSoTxn { txns := []*MsgDeSoTxn{ { @@ -564,66 +595,67 @@ var expectedV0Header = &MsgDeSoHeader{ } func TestBlockSerialize(t *testing.T) { - assert := assert.New(t) require := require.New(t) - _ = assert - _ = require - // Add a signature to the block - priv, err := btcec.NewPrivateKey(btcec.S256()) - require.NoError(err) - expectedBlock.BlockProducerInfo.Signature, err = priv.Sign([]byte{0x01, 0x02, 0x03}) - require.NoError(err) + expectedBlocksToTest := []*MsgDeSoBlock{ + createTestBlockVersion1(t), + createTestBlockVersion2(t, false), + createTestBlockVersion2(t, true), + } - data, err := expectedBlock.ToBytes(false) - require.NoError(err) + for _, block := range expectedBlocksToTest { + data, err := block.ToBytes(false) + require.NoError(err) - testBlock := NewMessage(MsgTypeBlock).(*MsgDeSoBlock) - err = testBlock.FromBytes(data) - require.NoError(err) + testBlock := NewMessage(MsgTypeBlock).(*MsgDeSoBlock) + err = testBlock.FromBytes(data) + require.NoError(err) + + require.Equal(*block, *testBlock) + } - assert.Equal(*expectedBlock, *testBlock) } func TestBlockSerializeNoBlockProducerInfo(t *testing.T) { - assert := assert.New(t) require := require.New(t) - _ = assert - _ = require - - // Add a signature to the block - blockWithoutProducerInfo := *expectedBlock - blockWithoutProducerInfo.BlockProducerInfo = nil - data, err := blockWithoutProducerInfo.ToBytes(false) - require.NoError(err) + expectedBlocksToTest := []*MsgDeSoBlock{ + createTestBlockVersion1(t), + createTestBlockVersion2(t, false), + createTestBlockVersion2(t, true), + } + expectedBlocksToTest[0].BlockProducerInfo = nil + expectedBlocksToTest[1].BlockProducerInfo = nil - testBlock := NewMessage(MsgTypeBlock).(*MsgDeSoBlock) - err = testBlock.FromBytes(data) - require.NoError(err) + for _, block := range expectedBlocksToTest { + data, err := block.ToBytes(false) + require.NoError(err) - assert.Equal(blockWithoutProducerInfo, *testBlock) + testBlock := NewMessage(MsgTypeBlock).(*MsgDeSoBlock) + err = testBlock.FromBytes(data) + require.NoError(err) + require.Equal(*block, *testBlock) + } } func TestBlockRewardTransactionSerialize(t *testing.T) { - assert := assert.New(t) require := require.New(t) - _ = assert - _ = require - // Add a signature to the block - priv, err := btcec.NewPrivateKey(btcec.S256()) - require.NoError(err) - expectedBlock.BlockProducerInfo.Signature, err = priv.Sign([]byte{0x01, 0x02, 0x03}) - require.NoError(err) + expectedBlocksToTest := []*MsgDeSoBlock{ + createTestBlockVersion1(t), + createTestBlockVersion2(t, false), + createTestBlockVersion2(t, true), + } - data, err := expectedBlock.Txns[0].ToBytes(false) - require.NoError(err) + for _, block := range expectedBlocksToTest { + data, err := block.Txns[0].ToBytes(false) + require.NoError(err) - testTxn := NewMessage(MsgTypeTxn).(*MsgDeSoTxn) - err = testTxn.FromBytes(data) - require.NoError(err) - require.Equal(expectedBlock.Txns[0], testTxn) + testTxn := NewMessage(MsgTypeTxn).(*MsgDeSoTxn) + err = testTxn.FromBytes(data) + require.NoError(err) + require.Equal(block.Txns[0], testTxn) + } } func TestSerializeInv(t *testing.T) { diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go new file mode 100644 index 000000000..610a5528f --- /dev/null +++ b/lib/pos_block_producer.go @@ -0,0 +1,206 @@ +package lib + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" + "github.com/pkg/errors" + "math" + "time" +) + +// BlockTemplate is a dummy type that is used to label incomplete blocks. The only purpose of this type is to make it +// clear that the produced block is not yet ready to be processed, or sent to other nodes. Usually a BlockTemplate means +// that the block is missing the producer's signature. +type BlockTemplate *MsgDeSoBlock + +// PosBlockProducer is the new struct responsible for producing blocks in PoS. The PosBlockProducer struct is lightweight +// and does not maintain any new internal state. Instead, most of the information needed to produce a block is passed in +// as arguments. Both while instantiating the producer, or while creating a block to the CreateUnsignedBlock or +// CreateUnsignedTimeoutBlock methods. As such, PosBlockProducer exists primarily for the purpose of cleaner separation of +// concerns. Instantiating the PosBlockProducer can also be optional for nodes who do not wish to produce blocks. +type PosBlockProducer struct { + mp Mempool + params *DeSoParams + proposerPublicKey *PublicKey + proposerVotingPublicKey *bls.PublicKey +} + +func NewPosBlockProducer(mp Mempool, params *DeSoParams, proposerPublicKey *PublicKey, proposerVotingPublicKey *bls.PublicKey) *PosBlockProducer { + return &PosBlockProducer{ + mp: mp, + params: params, + proposerPublicKey: proposerPublicKey, + proposerVotingPublicKey: proposerVotingPublicKey, + } +} + +func (pbp *PosBlockProducer) SignBlock(blockTemplate BlockTemplate, signerPrivateKey *bls.PrivateKey) (*MsgDeSoBlock, error) { + // TODO + return nil, nil +} + +// CreateUnsignedBlock constructs an unsigned, PoS block with Fee-Time ordered transactions. This function should be used +// during happy path in consensus when a vote QC has been assembled. The block is unsigned, so to indicate its incompleteness, +// the block is returned as a BlockTemplate. +func (pbp *PosBlockProducer) CreateUnsignedBlock(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, + proposerRandomSeedHash *RandomSeedHash, validatorsVoteQC *QuorumCertificate) (BlockTemplate, error) { + + // Create the block template. + block, err := pbp.createBlockTemplate(latestBlockView, newBlockHeight, view, proposerRandomSeedHash) + if err != nil { + return nil, errors.Wrapf(err, "PosBlockProducer.CreateUnsignedTimeoutBlock: Problem creating block template") + } + + // Fill out the validators vote QC field. + block.Header.ValidatorsVoteQC = validatorsVoteQC + return block, nil +} + +// CreateUnsignedTimeoutBlock constructs an unsigned, PoS block with Fee-Time ordered transactions. This function should be used +// during a timeout in consensus when a validators timeout aggregate QC has been assembled. The block is unsigned, +// and so is returned as a BlockTemplate. +func (pbp *PosBlockProducer) CreateUnsignedTimeoutBlock(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, + proposerRandomSeedHash *RandomSeedHash, validatorsTimeoutAggregateQC *TimeoutAggregateQuorumCertificate) (BlockTemplate, error) { + + // Create the block template. + block, err := pbp.createBlockTemplate(latestBlockView, newBlockHeight, view, proposerRandomSeedHash) + if err != nil { + return nil, errors.Wrapf(err, "PosBlockProducer.CreateUnsignedTimeoutBlock: Problem creating block template") + } + + // Fill out the validators timeout aggregate QC field. + block.Header.ValidatorsTimeoutAggregateQC = validatorsTimeoutAggregateQC + return block, nil +} + +// createBlockTemplate is a helper function used by CreateUnsignedBlock and CreateUnsignedTimeoutBlock. It constructs +// a partially filled out block with Fee-Time ordered transactions. The returned block is complete except for +// the qc / aggregateQc fields, and the signature. +func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, + proposerRandomSeedHash *RandomSeedHash) (BlockTemplate, error) { + // First get the block without the header. + block, err := pbp.createBlockWithoutHeader(latestBlockView, newBlockHeight) + if err != nil { + return nil, errors.Wrapf(err, "PosBlockProducer.CreateBlockTemplate: Problem creating block without header") + } + + // Fill out what we can in the block header. This function won't fill out any of the consensus QC fields. + block.Header.Version = HeaderVersion2 + block.Header.PrevBlockHash = latestBlockView.TipHash + + // Compute the merkle root for the block now that all of the transactions have been added. + merkleRoot, _, err := ComputeMerkleRoot(block.Txns) + if err != nil { + return nil, err + } + block.Header.TransactionMerkleRoot = merkleRoot + // FIXME: Anything special that we should do with the timestamp? + block.Header.TstampNanoSecs = uint64(time.Now().UnixNano()) + block.Header.Height = newBlockHeight + block.Header.ProposedInView = view + + // Set the proposer information. + block.Header.ProposerPublicKey = pbp.proposerPublicKey + block.Header.ProposerVotingPublicKey = pbp.proposerVotingPublicKey + block.Header.ProposerRandomSeedHash = proposerRandomSeedHash + return block, nil +} + +// createBlockWithoutHeader is a helper function used by createBlockTemplate. It constructs a partially filled out +// block with Fee-Time ordered transactions. The returned block all its contents filled, except for the header. +func (pbp *PosBlockProducer) createBlockWithoutHeader(latestBlockView *UtxoView, newBlockHeight uint64) (BlockTemplate, error) { + block := NewMessage(MsgTypeBlock).(*MsgDeSoBlock) + + // Create the block reward transaction. + blockRewardTxn := NewMessage(MsgTypeTxn).(*MsgDeSoTxn) + blockRewardOutput := &DeSoOutput{} + blockRewardOutput.AmountNanos = math.MaxUint64 + blockRewardTxn.TxOutputs = append(blockRewardTxn.TxOutputs, blockRewardOutput) + blockRewardTxn.TxnMeta = &BlockRewardMetadataa{} + blockRewardTxnSizeBytes, err := blockRewardTxn.ToBytes(true) + if err != nil { + return nil, errors.Wrapf(err, "Error computing block reward txn size: ") + } + + // Get block transactions from the mempool. + feeTimeTxns, txnConnectStatusByIndex, maxUtilityFee, err := pbp.getBlockTransactions( + latestBlockView, newBlockHeight, pbp.params.MinerMaxBlockSizeBytes-uint64(len(blockRewardTxnSizeBytes))) + if err != nil { + return nil, errors.Wrapf(err, "PosBlockProducer.createBlockWithoutHeader: Problem retrieving block transactions: ") + } + + // Update the block reward output and block transactions. + blockRewardOutput.AmountNanos = maxUtilityFee + block.Txns = append([]*MsgDeSoTxn{blockRewardTxn}, feeTimeTxns...) + + // Set the RevolutionMetadata + block.TxnConnectStatusByIndex = txnConnectStatusByIndex + return block, nil +} + +// getBlockTransactions is used to retrieve fee-time ordered transactions from the mempool. +func (pbp *PosBlockProducer) getBlockTransactions(latestBlockView *UtxoView, newBlockHeight uint64, maxBlockSizeBytes uint64) ( + _txns []*MsgDeSoTxn, _txnConnectStatusByIndex *bitset.Bitset, _maxUtilityFee uint64, _err error) { + // Get Fee-Time ordered transactions from the mempool + feeTimeTxns := pbp.mp.GetTransactions() + + // Try to connect transactions one by one. + blocksTxns := []*MsgDeSoTxn{} + txnConnectStatusByIndex := bitset.NewBitset() + maxUtilityFee := uint64(0) + currentBlockSize := uint64(0) + blockUtxoView, err := latestBlockView.CopyUtxoView() + if err != nil { + return nil, nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") + } + for _, txn := range feeTimeTxns { + txnBytes, err := txn.ToBytes(false) + if err != nil { + return nil, nil, 0, errors.Wrapf(err, "Error getting transaction size: ") + } + // Skip over transactions that are too big. + if currentBlockSize+uint64(len(txnBytes)) > maxBlockSizeBytes { + continue + } + + blockUtxoViewCopy, err := blockUtxoView.CopyUtxoView() + if err != nil { + return nil, nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") + } + _, _, _, fees, err := blockUtxoViewCopy._connectTransaction( + txn.GetTxn(), txn.Hash(), int64(len(txnBytes)), uint32(newBlockHeight), true, false) + + // Check if the transaction connected. + if err == nil { + blockUtxoView = blockUtxoViewCopy + txnConnectStatusByIndex.Set(len(blocksTxns), true) + blocksTxns = append(blocksTxns, txn.GetTxn()) + currentBlockSize += uint64(len(txnBytes)) + // Compute BMF for the transaction. + _, utilityFee := computeBMF(fees) + maxUtilityFee += utilityFee + continue + } + // If the transaction didn't connect, we will try to add it as a failing transaction. + blockUtxoViewCopy, err = blockUtxoView.CopyUtxoView() + if err != nil { + return nil, nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") + } + _, _, utilityFee, err := blockUtxoViewCopy._connectFailingTransaction(txn.GetTxn(), uint32(newBlockHeight), true) + if err != nil { + // If the transaction still doesn't connect, this means we encountered an invalid transaction. We will skip + // it and let some other process figure out what to do with it. Removing invalid transactions is a fast + // process, so we don't need to worry about it here. + continue + } + // If we get to this point, it means the transaction didn't connect but it was a valid transaction. We will + // add it to the block as a failing transaction. + blockUtxoView = blockUtxoViewCopy + txnConnectStatusByIndex.Set(len(blocksTxns), false) + blocksTxns = append(blocksTxns, txn.GetTxn()) + currentBlockSize += uint64(len(txnBytes)) + maxUtilityFee += utilityFee + } + + return blocksTxns, txnConnectStatusByIndex, maxUtilityFee, nil +} diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go new file mode 100644 index 000000000..fc615b7e2 --- /dev/null +++ b/lib/pos_block_producer_test.go @@ -0,0 +1,271 @@ +package lib + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" + "github.com/stretchr/testify/require" + "math" + "math/rand" + "testing" + "time" +) + +func TestCreateBlockTemplate(t *testing.T) { + require := require.New(t) + seed := int64(887) + rand := rand.New(rand.NewSource(seed)) + globalParams := _testGetDefaultGlobalParams() + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(2000) + passingTransactions := 50 + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + + // Set the frequency of mempool's database backup. + maxMempoolPosSizeBytes := uint64(3000000000) + mempoolBackupIntervalMillis := uint64(30000) + + params, db := _posTestBlockchainSetupWithBalances(t, 200000, 200000) + params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 + latestBlockView, err := NewUtxoView(db, params, nil, nil) + require.NoError(err) + dir := _dbDirSetup(t) + + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Start()) + defer mempool.Stop() + require.True(mempool.IsRunning()) + + // Add a bunch of passing transactions to the mempool that we'll use to produce a block. + passingTxns := []*MsgDeSoTxn{} + totalUtilityFee := uint64(0) + for ii := 0; ii < passingTransactions; ii++ { + txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 20) + passingTxns = append(passingTxns, txn) + _, utilityFee := computeBMF(txn.TxnFeeNanos) + totalUtilityFee += utilityFee + _wrappedPosMempoolAddTransaction(t, mempool, txn) + } + + priv, err := bls.NewPrivateKey() + require.NoError(err) + pub := priv.PublicKey() + seedHash := &RandomSeedHash{} + _, err = seedHash.FromBytes(Sha256DoubleHash([]byte("seed")).ToBytes()) + require.NoError(err) + m0Pk := NewPublicKey(m0PubBytes) + pbp := NewPosBlockProducer(mempool, params, m0Pk, pub) + + blockTemplate, err := pbp.createBlockTemplate(latestBlockView, 3, 10, seedHash) + require.NoError(err) + require.NotNil(blockTemplate) + require.NotNil(blockTemplate.Header) + require.Equal(blockTemplate.Header.Version, HeaderVersion2) + require.Equal(blockTemplate.Header.PrevBlockHash, latestBlockView.TipHash) + root, _, err := ComputeMerkleRoot(blockTemplate.Txns) + require.NoError(err) + require.Equal(blockTemplate.Header.TransactionMerkleRoot, root) + require.Equal(true, blockTemplate.Header.TstampNanoSecs < uint64(time.Now().UnixNano())) + require.Equal(blockTemplate.Header.Height, uint64(3)) + require.Equal(blockTemplate.Header.ProposedInView, uint64(10)) + require.Equal(blockTemplate.Header.ProposerPublicKey, m0Pk) + require.Equal(blockTemplate.Header.ProposerVotingPublicKey, pub) + require.Equal(blockTemplate.Header.ProposerRandomSeedHash, seedHash) +} + +func TestCreateBlockWithoutHeader(t *testing.T) { + require := require.New(t) + seed := int64(881) + rand := rand.New(rand.NewSource(seed)) + globalParams := _testGetDefaultGlobalParams() + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(2000) + passingTransactions := 50 + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + params, db := _posTestBlockchainSetupWithBalances(t, 200000, 200000) + params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 + maxMempoolPosSizeBytes := uint64(3000000000) + mempoolBackupIntervalMillis := uint64(30000) + + latestBlockView, err := NewUtxoView(db, params, nil, nil) + require.NoError(err) + dir := _dbDirSetup(t) + + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Start()) + defer mempool.Stop() + require.True(mempool.IsRunning()) + + // Add a bunch of passing transactions to the mempool that we'll use to produce a block. + passingTxns := []*MsgDeSoTxn{} + totalUtilityFee := uint64(0) + for ii := 0; ii < passingTransactions; ii++ { + txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 20) + passingTxns = append(passingTxns, txn) + _, utilityFee := computeBMF(txn.TxnFeeNanos) + totalUtilityFee += utilityFee + _wrappedPosMempoolAddTransaction(t, mempool, txn) + } + + pbp := NewPosBlockProducer(mempool, params, nil, nil) + txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions( + latestBlockView, 3, 50000) + require.NoError(err) + + blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3) + require.NoError(err) + require.Equal(txns, blockTemplate.Txns[1:]) + require.Equal(txnConnectStatus, blockTemplate.TxnConnectStatusByIndex) + require.Equal(maxUtilityFee, blockTemplate.Txns[0].TxOutputs[0].AmountNanos) + require.Equal(NewMessage(MsgTypeHeader).(*MsgDeSoHeader), blockTemplate.Header) + require.Nil(blockTemplate.BlockProducerInfo) +} + +func TestGetBlockTransactions(t *testing.T) { + require := require.New(t) + seed := int64(381) + rand := rand.New(rand.NewSource(seed)) + passingTransactions := 50 + failingTransactions := 30 + invalidTransactions := 10 + m1InitialBalance := uint64(20000) + + globalParams := _testGetDefaultGlobalParams() + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(2000) + maxMempoolPosSizeBytes := uint64(3000000000) + mempoolBackupIntervalMillis := uint64(30000) + + params, db := _posTestBlockchainSetupWithBalances(t, 200000, m1InitialBalance) + params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + m1PubBytes, _, _ := Base58CheckDecode(m1Pub) + + latestBlockView, err := NewUtxoView(db, params, nil, nil) + require.NoError(err) + dir := _dbDirSetup(t) + + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Start()) + defer mempool.Stop() + require.True(mempool.IsRunning()) + + // First test happy path with a bunch of passing transactions. + passingTxns := []*MsgDeSoTxn{} + totalUtilityFee := uint64(0) + for ii := 0; ii < passingTransactions; ii++ { + txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 20) + passingTxns = append(passingTxns, txn) + _, utilityFee := computeBMF(txn.TxnFeeNanos) + totalUtilityFee += utilityFee + _wrappedPosMempoolAddTransaction(t, mempool, txn) + } + + pbp := NewPosBlockProducer(mempool, params, nil, nil) + _testProduceBlockNoSizeLimit(t, mempool, pbp, latestBlockView, 3, + len(passingTxns), 0, 0) + + // Now test the case where we have a bunch of transactions that don't pass. + // A failing transaction will try to send an excessive balance in a basic transfer. + failingTxns := []*MsgDeSoTxn{} + for ii := 0; ii < failingTransactions; ii++ { + failingTxn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 20) + failingTxn.TxOutputs = append(failingTxn.TxOutputs, &DeSoOutput{ + PublicKey: m1PubBytes, + AmountNanos: 1e10, + }) + _signTxn(t, failingTxn, m0Priv) + effectiveFee := failingTxn.TxnFeeNanos * globalParams.FailingTransactionBMFMultiplierBasisPoints / 10000 + _, utilityFee := computeBMF(effectiveFee) + totalUtilityFee += utilityFee + failingTxns = append(failingTxns, failingTxn) + _wrappedPosMempoolAddTransaction(t, mempool, failingTxn) + } + _testProduceBlockNoSizeLimit(t, mempool, pbp, latestBlockView, 3, + len(passingTxns), len(failingTxns), 0) + + // We will now test some invalid transactions, which make it in the mempool, yet will not connect to utxo view, + // nor as failing transactions. To do this, we will create a couple transactions with high spends compared to their + // fees. The spend will be high enough so that the public key won't have enough balance to cover the fees of + // the remaining transactions. + invalidTxns := []*MsgDeSoTxn{} + for ii := 0; ii < invalidTransactions; ii++ { + invalidTxn := _generateTestTxn(t, rand, feeMin, feeMax, m1PubBytes, m1Priv, 100, 20) + if m1InitialBalance < invalidTxn.TxnFeeNanos+1 { + t.Fatalf("m1InitialBalance (%d) must be greater than txn fee (%d) + 1", m1InitialBalance, invalidTxn.TxnFeeNanos+1) + } + invalidTxn.TxOutputs = append(invalidTxn.TxOutputs, &DeSoOutput{ + PublicKey: m2PkBytes, + AmountNanos: m1InitialBalance - invalidTxn.TxnFeeNanos - 1, + }) + _signTxn(t, invalidTxn, m1Priv) + invalidTxns = append(invalidTxns, invalidTxn) + _wrappedPosMempoolAddTransaction(t, mempool, invalidTxn) + } + + _testProduceBlockNoSizeLimit(t, mempool, pbp, latestBlockView, 3, + len(passingTxns)+1, len(failingTxns), len(invalidTxns)-1) + // Now test the case where we have too many transactions in the mempool compared to the max block size. + // In this case, some transactions should not make it into the block, despite being valid. The transactions + // that are rejected should have the lowest Fee-Time priority. + + latestBlockViewCopy, err := latestBlockView.CopyUtxoView() + require.NoError(err) + txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions(latestBlockView, 3, 1000) + require.NoError(err) + require.Equal(latestBlockViewCopy, latestBlockView) + require.Equal(true, len(passingTxns) > len(txns)) + require.Equal(true, len(passingTxns) > txnConnectStatus.Size()) + totalUtilityFee = 0 + for _, txn := range txns { + _, utilityFee := computeBMF(txn.TxnFeeNanos) + totalUtilityFee += utilityFee + } + require.Equal(totalUtilityFee, maxUtilityFee) + + // Create an in-memory mempool instance and add the transactions to it. Each transaction will be added with a + // Simulated Transaction Timestamp and afterward, mempool will be queried for the transactions. The transactions should + // be returned in the same order as the transaction from getBlockTransactions. + testMempool := NewPosMempool(params, globalParams, latestBlockView, 2, "", true, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(testMempool.Start()) + defer testMempool.Stop() + currentTime := uint64(time.Now().UnixMicro()) + for ii, txn := range txns { + // Use the Simulated Transaction Timestamp. + mtxn := NewMempoolTransaction(txn, currentTime+uint64(ii)) + require.NoError(testMempool.AddTransaction(mtxn, false)) + } + newTxns := testMempool.GetTransactions() + require.Equal(len(txns), len(newTxns)) + for ii := 0; ii < len(txns); ii++ { + require.Equal(txns[ii], newTxns[ii].GetTxn()) + } +} + +func _testProduceBlockNoSizeLimit(t *testing.T, mp *PosMempool, pbp *PosBlockProducer, latestBlockView *UtxoView, blockHeight uint64, + numPassing int, numFailing int, numInvalid int) (_txns []*MsgDeSoTxn, _txnConnectStatusByIndex *bitset.Bitset, _maxUtilityFee uint64) { + require := require.New(t) + + totalAcceptedTxns := numPassing + numFailing + totalTxns := numPassing + numFailing + numInvalid + require.Equal(totalTxns, len(mp.GetTransactions())) + + latestBlockViewCopy, err := latestBlockView.CopyUtxoView() + require.NoError(err) + txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions(latestBlockView, blockHeight, math.MaxUint64) + require.NoError(err) + require.Equal(latestBlockViewCopy, latestBlockView) + require.Equal(totalAcceptedTxns, len(txns)) + require.Equal(true, totalAcceptedTxns >= txnConnectStatus.Size()) + numConnected := 0 + for ii := range txns { + if txnConnectStatus.Get(ii) { + numConnected++ + } + } + require.Equal(numPassing, numConnected) + return txns, txnConnectStatus, maxUtilityFee +} diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 445910cd5..9f919ee83 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -1,6 +1,7 @@ package lib import ( + "fmt" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" @@ -20,21 +21,43 @@ type Mempool interface { Start() error Stop() IsRunning() bool - AddTransaction(txn *MsgDeSoTxn) error + AddTransaction(txn *MempoolTransaction, verifySignature bool) error RemoveTransaction(txnHash *BlockHash) error - GetTransaction(txnHash *BlockHash) *MsgDeSoTxn - GetTransactions() []*MsgDeSoTxn + GetTransaction(txnHash *BlockHash) *MempoolTransaction + GetTransactions() []*MempoolTransaction GetIterator() MempoolIterator + Refresh() error UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) UpdateGlobalParams(globalParams *GlobalParamsEntry) } type MempoolIterator interface { Next() bool - Value() (*MsgDeSoTxn, bool) + Value() (*MempoolTransaction, bool) Initialized() bool } +// MempoolTransaction is a simple wrapper around MsgDeSoTxn that adds a timestamp field. +type MempoolTransaction struct { + *MsgDeSoTxn + TimestampUnixMicro uint64 +} + +func NewMempoolTransaction(txn *MsgDeSoTxn, timestampUnixMicro uint64) *MempoolTransaction { + return &MempoolTransaction{ + MsgDeSoTxn: txn, + TimestampUnixMicro: timestampUnixMicro, + } +} + +func (mtxn *MempoolTransaction) GetTxn() *MsgDeSoTxn { + return mtxn.MsgDeSoTxn +} + +func (mtxn *MempoolTransaction) GetTimestampUnixMicro() uint64 { + return mtxn.TimestampUnixMicro +} + // PosMempool is used by the node to keep track of uncommitted transactions. The main responsibilities of the PosMempool // include addition/removal of transactions, back up of transaction to database, and retrieval of transactions ordered // by Fee-Time algorithm. More on the Fee-Time algorithm can be found in the documentation of TransactionRegister. @@ -46,6 +69,9 @@ type PosMempool struct { // globalParams are used to track the latest GlobalParamsEntry. In case the GlobalParamsEntry changes, the PosMempool // is equipped with UpdateGlobalParams method to handle upgrading GlobalParamsEntry. globalParams *GlobalParamsEntry + // inMemoryOnly is a setup flag that determines whether the mempool should be backed up to db or not. If set to true, + // the mempool will not open a db nor instantiate the persister. + inMemoryOnly bool // dir of the directory where the database should be stored. dir string // db is the database that the mempool will use to persist transactions. @@ -54,22 +80,32 @@ type PosMempool struct { // txnRegister is the in-memory data structure keeping track of the transactions in the mempool. The TransactionRegister // is responsible for ordering transactions by the Fee-Time algorithm. txnRegister *TransactionRegister - // ledger is a simple in-memory data structure that keeps track of cumulative transaction fees in the mempool. - // The ledger keeps track of how much each user would have spent in fees across all their transactions in the mempool. - ledger *BalanceLedger // persister is responsible for interfacing with the database. The persister backs up mempool transactions so not to // lose them when node reboots. The persister also retrieves transactions from the database when the node starts up. // The persister runs on its dedicated thread and events are used to notify the persister thread whenever // transactions are added/removed from the mempool. The persister thread then updates the database accordingly. persister *MempoolPersister + // ledger is a simple data structure that keeps track of cumulative transaction fees in the mempool. + // The ledger keeps track of how much each user would have spent in fees across all their transactions in the mempool. + ledger *BalanceLedger + // nonceTracker is responsible for keeping track of a (public key, nonce) -> Txn index. The index is useful in + // facilitating a "replace by higher fee" feature. This feature gives users the ability to replace their existing + // mempool transaction with a new transaction having the same nonce but higher fee. + nonceTracker *NonceTracker - // latestBlockView is used to check if a transaction is valid before being added to the mempool. The latestBlockView + // readOnlyLatestBlockView is used to check if a transaction is valid before being added to the mempool. The readOnlyLatestBlockView // checks if the transaction has a valid signature and if the transaction's sender has enough funds to cover the fee. - // The latestBlockView should be updated whenever a new block is added to the blockchain via UpdateLatestBlock. - latestBlockView *UtxoView + // The readOnlyLatestBlockView should be updated whenever a new block is added to the blockchain via UpdateLatestBlock. + // PosMempool only needs read-access to the block view. It isn't necessary to copy the block view before passing it + // to the mempool. + readOnlyLatestBlockView *UtxoView // latestBlockNode is used to infer the latest block height. The latestBlockNode should be updated whenever a new // block is added to the blockchain via UpdateLatestBlock. latestBlockHeight uint64 + // maxMempoolPosSizeBytes is the maximum aggregate number of bytes of transactions included in the PoS mempool. + maxMempoolPosSizeBytes uint64 + // mempoolBackupIntervalMillis is the frequency with which pos mempool persists transactions to storage. + mempoolBackupIntervalMillis uint64 } // PosMempoolIterator is a wrapper around FeeTimeIterator, modified to return MsgDeSoTxn instead of MempoolTx. @@ -81,12 +117,13 @@ func (it *PosMempoolIterator) Next() bool { return it.it.Next() } -func (it *PosMempoolIterator) Value() (*MsgDeSoTxn, bool) { +func (it *PosMempoolIterator) Value() (*MempoolTransaction, bool) { txn, ok := it.it.Value() if txn == nil || txn.Tx == nil { return nil, ok } - return txn.Tx, ok + added := uint64(txn.Added.UnixMicro()) + return NewMempoolTransaction(txn.Tx, added), ok } func (it *PosMempoolIterator) Initialized() bool { @@ -97,163 +134,207 @@ func NewPosMempoolIterator(it *FeeTimeIterator) *PosMempoolIterator { return &PosMempoolIterator{it: it} } -func NewPosMempool(params *DeSoParams, globalParams *GlobalParamsEntry, latestBlockView *UtxoView, - latestBlockHeight uint64, dir string) *PosMempool { +func NewPosMempool(params *DeSoParams, globalParams *GlobalParamsEntry, readOnlyLatestBlockView *UtxoView, + latestBlockHeight uint64, dir string, inMemoryOnly bool, maxMempoolPosSizeBytes uint64, + mempoolBackupIntervalMillis uint64) *PosMempool { return &PosMempool{ - status: PosMempoolStatusNotRunning, - params: params, - globalParams: globalParams, - dir: dir, - latestBlockView: latestBlockView, - latestBlockHeight: latestBlockHeight, + status: PosMempoolStatusNotRunning, + params: params, + globalParams: globalParams, + inMemoryOnly: inMemoryOnly, + dir: dir, + readOnlyLatestBlockView: readOnlyLatestBlockView, + latestBlockHeight: latestBlockHeight, + maxMempoolPosSizeBytes: maxMempoolPosSizeBytes, + mempoolBackupIntervalMillis: mempoolBackupIntervalMillis, } } -func (dmp *PosMempool) Start() error { - dmp.Lock() - defer dmp.Unlock() +func (mp *PosMempool) Start() error { + mp.Lock() + defer mp.Unlock() - if dmp.IsRunning() { + if mp.IsRunning() { return nil } - // Setup the database. - mempoolDirectory := filepath.Join(dmp.dir, "mempool") - opts := DefaultBadgerOptions(mempoolDirectory) - db, err := badger.Open(opts) - if err != nil { - return errors.Wrapf(err, "PosMempool.Start: Problem setting up database") - } - dmp.db = db - - // Create the transaction register and ledger - dmp.txnRegister = NewTransactionRegister(dmp.globalParams) - dmp.ledger = NewBalanceLedger() - - // Create the persister - dmp.persister = NewMempoolPersister(dmp.db, int(dmp.params.MempoolBackupTimeMilliseconds)) + // Create the transaction register, the ledger, and the nonce tracker, + mp.txnRegister = NewTransactionRegister(mp.globalParams) + mp.ledger = NewBalanceLedger() + mp.nonceTracker = NewNonceTracker() - // Start the persister and retrieve transactions from the database. - dmp.persister.Start() - err = dmp.loadPersistedTransactions() - if err != nil { - return errors.Wrapf(err, "PosMempool.Start: Problem loading persisted transactions") + // Setup the database and create the persister + if !mp.inMemoryOnly { + mempoolDirectory := filepath.Join(mp.dir, "mempool") + opts := DefaultBadgerOptions(mempoolDirectory) + db, err := badger.Open(opts) + if err != nil { + return errors.Wrapf(err, "PosMempool.Start: Problem setting up database") + } + mp.db = db + mp.persister = NewMempoolPersister(mp.db, int(mp.mempoolBackupIntervalMillis)) + + // Start the persister and retrieve transactions from the database. + mp.persister.Start() + err = mp.loadPersistedTransactions() + if err != nil { + return errors.Wrapf(err, "PosMempool.Start: Problem loading persisted transactions") + } } - dmp.status = PosMempoolStatusRunning + mp.status = PosMempoolStatusRunning return nil } -func (dmp *PosMempool) Stop() { - dmp.Lock() - defer dmp.Unlock() +func (mp *PosMempool) Stop() { + mp.Lock() + defer mp.Unlock() - if !dmp.IsRunning() { + if !mp.IsRunning() { return } // Close the persister and stop the database. - if err := dmp.persister.Stop(); err != nil { - glog.Errorf("PosMempool.Stop: Problem stopping persister: %v", err) - } - if err := dmp.db.Close(); err != nil { - glog.Errorf("PosMempool.Stop: Problem closing database: %v", err) + if !mp.inMemoryOnly { + if err := mp.persister.Stop(); err != nil { + glog.Errorf("PosMempool.Stop: Problem stopping persister: %v", err) + } + if err := mp.db.Close(); err != nil { + glog.Errorf("PosMempool.Stop: Problem closing database: %v", err) + } } - // Reset the transaction register and the ledger. - dmp.txnRegister.Reset() - dmp.ledger.Reset() - dmp.status = PosMempoolStatusNotRunning + // Reset the transaction register, the ledger, and the nonce tracker. + mp.txnRegister.Reset() + mp.ledger.Reset() + mp.nonceTracker.Reset() + + mp.status = PosMempoolStatusNotRunning } -func (dmp *PosMempool) IsRunning() bool { - return dmp.status == PosMempoolStatusRunning +func (mp *PosMempool) IsRunning() bool { + return mp.status == PosMempoolStatusRunning } // AddTransaction validates a MsgDeSoTxn transaction and adds it to the mempool if it is valid. -// If the mempool overflows as a result of adding the transaction, the mempool is pruned. -func (dmp *PosMempool) AddTransaction(txn *MsgDeSoTxn) error { +// If the mempool overflows as a result of adding the transaction, the mempool is pruned. The +// transaction signature verification can be skipped if verifySignature is passed as true. +func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction, verifySignature bool) error { + if mtxn == nil || mtxn.GetTxn() == nil { + return fmt.Errorf("PosMempool.AddTransaction: Cannot add a nil transaction") + } + // First, validate that the transaction is properly formatted according to BalanceModel. We acquire a read lock on // the mempool. This allows multiple goroutines to safely perform transaction validation concurrently. In particular, // transaction signature verification can be parallelized. - if err := dmp.validateTransaction(txn); err != nil { + if err := mp.validateTransaction(mtxn.GetTxn(), verifySignature); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem verifying transaction") } // If we get this far, it means that the transaction is valid. We can now add it to the mempool. // We lock the mempool to ensure that no other thread is modifying it while we add the transaction. - dmp.Lock() - defer dmp.Unlock() + mp.Lock() + defer mp.Unlock() - if !dmp.IsRunning() { + if !mp.IsRunning() { return errors.Wrapf(MempoolErrorNotRunning, "PosMempool.AddTransaction: ") } // Construct the MempoolTx from the MsgDeSoTxn. - mempoolTx, err := NewMempoolTx(txn, dmp.latestBlockHeight) + mempoolTx, err := NewMempoolTx(mtxn.GetTxn(), mtxn.GetTimestampUnixMicro(), mp.latestBlockHeight) if err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem constructing MempoolTx") } // Add the transaction to the mempool and then prune if needed. - if err := dmp.addTransactionNoLock(mempoolTx, true); err != nil { + if err := mp.addTransactionNoLock(mempoolTx, true); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem adding transaction to mempool") } - if err := dmp.pruneNoLock(); err != nil { + if err := mp.pruneNoLock(); err != nil { glog.Errorf("PosMempool.AddTransaction: Problem pruning mempool: %v", err) } return nil } -func (dmp *PosMempool) validateTransaction(txn *MsgDeSoTxn) error { - dmp.RLock() - defer dmp.RUnlock() +func (mp *PosMempool) validateTransaction(txn *MsgDeSoTxn, verifySignature bool) error { + mp.RLock() + defer mp.RUnlock() - if err := ValidateDeSoTxnSanityBalanceModel(txn, dmp.latestBlockHeight, dmp.params, dmp.globalParams); err != nil { + if err := CheckTransactionSanity(txn, uint32(mp.latestBlockHeight), mp.params); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction sanity") } - // Check transaction signature - if _, err := dmp.latestBlockView.VerifySignature(txn, uint32(dmp.latestBlockHeight)); err != nil { + if err := ValidateDeSoTxnSanityBalanceModel(txn, mp.latestBlockHeight, mp.params, mp.globalParams); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction sanity") + } + + if err := mp.readOnlyLatestBlockView.ValidateTransactionNonce(txn, mp.latestBlockHeight); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction nonce") + } + + if !verifySignature { + return nil + } + + // Check transaction signature. + if _, err := mp.readOnlyLatestBlockView.VerifySignature(txn, uint32(mp.latestBlockHeight)); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Signature validation failed") } return nil } -func (dmp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) error { +func (mp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) error { userPk := NewPublicKey(txn.Tx.PublicKey) txnFee := txn.Tx.TxnFeeNanos // Validate that the user has enough balance to cover the transaction fees. - spendableBalanceNanos, err := dmp.latestBlockView.GetSpendableDeSoBalanceNanosForPublicKey(userPk.ToBytes(), - uint32(dmp.latestBlockHeight)) + spendableBalanceNanos, err := mp.readOnlyLatestBlockView.GetSpendableDeSoBalanceNanosForPublicKey(userPk.ToBytes(), + uint32(mp.latestBlockHeight)) if err != nil { return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem getting spendable balance") } - if err := dmp.ledger.CanIncreaseEntryWithLimit(*userPk, txnFee, spendableBalanceNanos); err != nil { + if err := mp.ledger.CanIncreaseEntryWithLimit(*userPk, txnFee, spendableBalanceNanos); err != nil { return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem checking balance increase for transaction with"+ "hash %v, fee %v", txn.Tx.Hash(), txnFee) } - // If we get here, it means that the transaction's sender has enough balance to cover transaction fees. We can now - // add the transaction to mempool. - if err := dmp.txnRegister.AddTransaction(txn); err != nil { + // Check the nonceTracker to see if this transaction is meant to replace an existing one. + existingTxn := mp.nonceTracker.GetTxnByPublicKeyNonce(*userPk, *txn.Tx.TxnNonce) + if existingTxn != nil && existingTxn.FeePerKB > txn.FeePerKB { + return errors.Wrapf(MempoolFailedReplaceByHigherFee, "PosMempool.AddTransaction: Problem replacing transaction "+ + "by higher fee failed. New transaction has lower fee.") + } + + // If we get here, it means that the transaction's sender has enough balance to cover transaction fees. Moreover, if + // this transaction is meant to replace an existing one, at this point we know the new txn has a sufficient fee to + // do so. We can now add the transaction to mempool. + if err := mp.txnRegister.AddTransaction(txn); err != nil { return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem adding txn to register") } - // We update the reserved balance to include the newly added transaction's fee. - dmp.ledger.IncreaseEntry(*userPk, txnFee) + // If we've determined that this transaction is meant to replace an existing one, we remove the existing transaction now. + if existingTxn != nil { + if err := mp.removeTransactionNoLock(existingTxn, true); err != nil { + recoveryErr := mp.txnRegister.RemoveTransaction(txn) + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem removing old transaction from mempool during "+ + "replacement with higher fee. Recovery error: %v", recoveryErr) + } + } + + // At this point the transaction is in the mempool. We can now update the ledger and nonce tracker. + mp.ledger.IncreaseEntry(*userPk, txnFee) + mp.nonceTracker.AddTxnByPublicKeyNonce(txn, *userPk, *txn.Tx.TxnNonce) // Emit an event for the newly added transaction. - if persistToDb { + if persistToDb && !mp.inMemoryOnly { event := &MempoolEvent{ Txn: txn, Type: MempoolEventAdd, } - dmp.persister.EnqueueEvent(event) + mp.persister.EnqueueEvent(event) } return nil @@ -261,14 +342,18 @@ func (dmp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) er // loadPersistedTransactions fetches transactions from the persister's storage and adds the transactions to the mempool. // No lock is held and (persistToDb = false) flag is used when adding transactions internally. -func (dmp *PosMempool) loadPersistedTransactions() error { - txns, err := dmp.persister.GetPersistedTransactions() +func (mp *PosMempool) loadPersistedTransactions() error { + if mp.inMemoryOnly { + return nil + } + + txns, err := mp.persister.GetPersistedTransactions() if err != nil { return errors.Wrapf(err, "PosMempool.Start: Problem retrieving transactions from persister") } // We set the persistToDb flag to false so that persister doesn't try to save the transactions. for _, txn := range txns { - if err := dmp.addTransactionNoLock(txn, false); err != nil { + if err := mp.addTransactionNoLock(txn, false); err != nil { glog.Errorf("PosMempool.Start: Problem adding transaction with hash (%v) from persister: %v", txn.Hash, err) } @@ -277,81 +362,89 @@ func (dmp *PosMempool) loadPersistedTransactions() error { } // RemoveTransaction is the main function for removing a transaction from the mempool. -func (dmp *PosMempool) RemoveTransaction(txnHash *BlockHash) error { - dmp.Lock() - defer dmp.Unlock() +func (mp *PosMempool) RemoveTransaction(txnHash *BlockHash) error { + mp.Lock() + defer mp.Unlock() - if !dmp.IsRunning() { + if !mp.IsRunning() { return errors.Wrapf(MempoolErrorNotRunning, "PosMempool.RemoveTransaction: ") } // Get the transaction from the register. - txn := dmp.txnRegister.GetTransaction(txnHash) + txn := mp.txnRegister.GetTransaction(txnHash) if txn == nil { return nil } - return dmp.removeTransactionNoLock(txn, true) + return mp.removeTransactionNoLock(txn, true) } -func (dmp *PosMempool) removeTransactionNoLock(txn *MempoolTx, persistToDb bool) error { +func (mp *PosMempool) removeTransactionNoLock(txn *MempoolTx, persistToDb bool) error { // First, sanity check our reserved balance. userPk := NewPublicKey(txn.Tx.PublicKey) // Remove the transaction from the register. - if err := dmp.txnRegister.RemoveTransaction(txn); err != nil { + if err := mp.txnRegister.RemoveTransaction(txn); err != nil { return errors.Wrapf(err, "PosMempool.removeTransactionNoLock: Problem removing txn from register") } - // Decrease the appropriate ledger's balance by the transaction fee. - dmp.ledger.DecreaseEntry(*userPk, txn.Fee) + + // Remove the txn from the balance ledger and the nonce tracker. + mp.ledger.DecreaseEntry(*userPk, txn.Fee) + mp.nonceTracker.RemoveTxnByPublicKeyNonce(*userPk, *txn.Tx.TxnNonce) // Emit an event for the removed transaction. - if persistToDb { + if persistToDb && !mp.inMemoryOnly { event := &MempoolEvent{ Txn: txn, Type: MempoolEventRemove, } - dmp.persister.EnqueueEvent(event) + mp.persister.EnqueueEvent(event) } return nil } // GetTransaction returns the transaction with the given hash if it exists in the mempool. This function is thread-safe. -func (dmp *PosMempool) GetTransaction(txnHash *BlockHash) *MsgDeSoTxn { - dmp.RLock() - defer dmp.RUnlock() +func (mp *PosMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { + mp.RLock() + defer mp.RUnlock() - if !dmp.IsRunning() { + if !mp.IsRunning() { return nil } - txn := dmp.txnRegister.GetTransaction(txnHash) + txn := mp.txnRegister.GetTransaction(txnHash) if txn == nil || txn.Tx == nil { return nil } - return txn.Tx + return NewMempoolTransaction(txn.Tx, uint64(txn.Added.UnixMicro())) } // GetTransactions returns all transactions in the mempool ordered by the Fee-Time algorithm. This function is thread-safe. -func (dmp *PosMempool) GetTransactions() []*MsgDeSoTxn { - dmp.RLock() - defer dmp.RUnlock() +func (mp *PosMempool) GetTransactions() []*MempoolTransaction { + mp.RLock() + defer mp.RUnlock() - if !dmp.IsRunning() { + if !mp.IsRunning() { return nil } - var desoTxns []*MsgDeSoTxn - poolTxns := dmp.txnRegister.GetFeeTimeTransactions() + var mempoolTxns []*MempoolTransaction + poolTxns := mp.getTransactionsNoLock() for _, txn := range poolTxns { if txn == nil || txn.Tx == nil { continue } - desoTxns = append(desoTxns, txn.Tx) + + mtxn := NewMempoolTransaction(txn.Tx, uint64(txn.Added.UnixMicro())) + mempoolTxns = append(mempoolTxns, mtxn) } - return desoTxns + return mempoolTxns +} + +func (mp *PosMempool) getTransactionsNoLock() []*MempoolTx { + return mp.txnRegister.GetFeeTimeTransactions() } // GetIterator returns an iterator for the mempool transactions. The iterator can be used to peek transactions in the @@ -364,31 +457,91 @@ func (dmp *PosMempool) GetTransactions() []*MsgDeSoTxn { // } // // Note that the iteration pattern is not thread-safe. Another lock should be used to ensure thread-safety. -func (dmp *PosMempool) GetIterator() MempoolIterator { - dmp.RLock() - defer dmp.RUnlock() +func (mp *PosMempool) GetIterator() MempoolIterator { + mp.RLock() + defer mp.RUnlock() + + if !mp.IsRunning() { + return nil + } - if !dmp.IsRunning() { + return NewPosMempoolIterator(mp.txnRegister.GetFeeTimeIterator()) +} + +// Refresh can be used to evict stale transactions from the mempool. However, it is a bit expensive and should be used +// sparingly. Upon being called, Refresh will create an in-memory temp PosMempool and populate it with transactions from +// the main mempool. The temp mempool will have the most up-to-date readOnlyLatestBlockView, Height, and globalParams. Any +// transaction that fails to add to the temp mempool will be removed from the main mempool. +func (mp *PosMempool) Refresh() error { + mp.Lock() + defer mp.Unlock() + + if !mp.IsRunning() { return nil } - return NewPosMempoolIterator(dmp.txnRegister.GetFeeTimeIterator()) + if err := mp.refreshNoLock(); err != nil { + return errors.Wrapf(err, "PosMempool.Refresh: Problem refreshing mempool") + } + return nil +} + +func (mp *PosMempool) refreshNoLock() error { + // Create the temporary in-memory mempool with the most up-to-date readOnlyLatestBlockView, Height, and globalParams. + tempPool := NewPosMempool(mp.params, mp.globalParams, mp.readOnlyLatestBlockView, mp.latestBlockHeight, "", true, + mp.maxMempoolPosSizeBytes, mp.mempoolBackupIntervalMillis) + if err := tempPool.Start(); err != nil { + return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem starting temp pool") + } + defer tempPool.Stop() + + // Add all transactions from the main mempool to the temp mempool. Skip signature verification. + var txnsToRemove []*MempoolTx + txns := mp.getTransactionsNoLock() + for _, txn := range txns { + mtxn := NewMempoolTransaction(txn.Tx, uint64(txn.Added.UnixMicro())) + err := tempPool.AddTransaction(mtxn, false) + if err == nil { + continue + } + + // If we've encountered an error while adding the transaction to the temp mempool, we add it to our txnsToRemove list. + txnsToRemove = append(txnsToRemove, txn) + } + + // Now remove all transactions from the txnsToRemove list from the main mempool. + for _, txn := range txnsToRemove { + if err := mp.removeTransactionNoLock(txn, true); err != nil { + glog.Errorf("PosMempool.refreshNoLock: Problem removing transaction with hash (%v): %v", txn.Hash, err) + } + } + + // Log the hashes for transactions that were removed. + if len(txnsToRemove) > 0 { + var removedTxnHashes []string + for _, txn := range txnsToRemove { + removedTxnHashes = append(removedTxnHashes, txn.Hash.String()) + } + glog.Infof("PosMempool.refreshNoLock: Transactions with the following hashes were removed: %v", + strings.Join(removedTxnHashes, ",")) + } + return nil } // pruneNoLock removes transactions from the mempool until the mempool size is below the maximum allowed size. The transactions // are removed in lowest to highest Fee-Time priority, i.e. opposite way that transactions are ordered in // GetTransactions(). -func (dmp *PosMempool) pruneNoLock() error { - if dmp.txnRegister.Size() < dmp.params.MaxMempoolPosSizeBytes { +func (mp *PosMempool) pruneNoLock() error { + if mp.txnRegister.Size() < mp.maxMempoolPosSizeBytes { return nil } - prunedTxns, err := dmp.txnRegister.PruneToSize(dmp.params.MaxMempoolPosSizeBytes) + prunedTxns, err := mp.txnRegister.PruneToSize(mp.maxMempoolPosSizeBytes) if err != nil { return errors.Wrapf(err, "PosMempool.pruneNoLock: Problem pruning mempool") } for _, prunedTxn := range prunedTxns { - if err := dmp.removeTransactionNoLock(prunedTxn, true); err != nil { + if err := mp.removeTransactionNoLock(prunedTxn, true); err != nil { // We should never get to here since the transaction was already pruned from the TransactionRegister. glog.Errorf("PosMempool.pruneNoLock: Problem removing transaction from mempool: %v", err) } @@ -397,51 +550,32 @@ func (dmp *PosMempool) pruneNoLock() error { } // UpdateLatestBlock updates the latest block view and latest block node in the mempool. -func (dmp *PosMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) { - dmp.Lock() - defer dmp.Unlock() +func (mp *PosMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) { + mp.Lock() + defer mp.Unlock() - if !dmp.IsRunning() { + if !mp.IsRunning() { return } - dmp.latestBlockView = blockView - dmp.latestBlockHeight = blockHeight + mp.readOnlyLatestBlockView = blockView + mp.latestBlockHeight = blockHeight } // UpdateGlobalParams updates the global params in the mempool. Changing GlobalParamsEntry can impact the validity of // transactions in the mempool. For example, if the minimum network fee is increased, transactions with a fee below the // new minimum will be removed from the mempool. To safely handle this, this method re-creates the TransactionRegister // with the new global params and re-adds all transactions in the mempool to the new register. -func (dmp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { - dmp.Lock() - defer dmp.Unlock() +func (mp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { + mp.Lock() + defer mp.Unlock() - if !dmp.IsRunning() { + if !mp.IsRunning() { return } - dmp.globalParams = globalParams - mempoolTxns := dmp.txnRegister.GetFeeTimeTransactions() - newRegister := NewTransactionRegister(dmp.globalParams) - removedTxnHashes := []string{} - - for _, mempoolTx := range mempoolTxns { - if err := newRegister.AddTransaction(mempoolTx); err == nil { - continue - } - // If we get here, it means that the transaction is no longer valid. We remove it from the mempool. - removedTxnHashes = append(removedTxnHashes, mempoolTx.Hash.String()) - if err := dmp.removeTransactionNoLock(mempoolTx, true); err != nil { - glog.Errorf("PosMempool.UpdateGlobalParams: Problem removing txn with hash %v from register: %v", - mempoolTx.Hash.String(), err) - } - } - - if len(removedTxnHashes) > 0 { - glog.Infof("PosMempool.UpdateGlobalParams: Transactions with the following hashes were removed: %v", - strings.Join(removedTxnHashes, ",")) + mp.globalParams = globalParams + if err := mp.refreshNoLock(); err != nil { + glog.Errorf("PosMempool.UpdateGlobalParams: Problem refreshing mempool: %v", err) } - dmp.txnRegister.Reset() - dmp.txnRegister = newRegister } diff --git a/lib/pos_mempool_nonce_tracker.go b/lib/pos_mempool_nonce_tracker.go new file mode 100644 index 000000000..68da0aaae --- /dev/null +++ b/lib/pos_mempool_nonce_tracker.go @@ -0,0 +1,74 @@ +package lib + +import ( + "sync" +) + +// nonceTrackerKey is a private type used by the NonceTracker to index transactions by (public key, nonce) pairs. +// While the chain tracks nonces by (PKID, nonce), or TransactorNonceMapKey, the mempool uses the public key instead. +// This is because the mempool does not update its state on SwapIdentity transactions, unlike the chain. So if NonceTracker +// used PKID to index transactions, there's a possibility some related SwapIdentity gets committed, impacting the current +// NonceTracker's state. This could result in some weirdness of overlapping or outdated (PKID, nonce) pairs. +type nonceTrackerKey struct { + publicKey PublicKey + nonce DeSoNonce +} + +func newNonceTrackerKey(publicKey PublicKey, nonce DeSoNonce) nonceTrackerKey { + return nonceTrackerKey{ + publicKey: publicKey, + nonce: nonce, + } +} + +// NonceTracker is a helper struct that is used to track (public key, nonce) pairs in pos mempool. +// It is used to facilitate the "replace by higher fee" feature, which allows users to resubmit transactions. +// If a user submits a transaction with an existing DeSoNonce.PartialID (among this user's transactions in the mempool), +// then the new transaction will replace the old one if the new transaction has a higher fee. +type NonceTracker struct { + sync.RWMutex + + // nonceMap indexes mempool transactions by (PKID, nonce) pairs. + nonceMap map[nonceTrackerKey]*MempoolTx +} + +func NewNonceTracker() *NonceTracker { + return &NonceTracker{ + nonceMap: make(map[nonceTrackerKey]*MempoolTx), + } +} + +// GetTxnByPublicKeyNonce returns the transaction with the given public key and nonce pair. +func (pmnt *NonceTracker) GetTxnByPublicKeyNonce(pk PublicKey, nonce DeSoNonce) *MempoolTx { + pmnt.RLock() + defer pmnt.RUnlock() + + key := newNonceTrackerKey(pk, nonce) + txn, _ := pmnt.nonceMap[key] + return txn +} + +// RemoveTxnByPublicKeyNonce removes a (pk, nonce) pair from the nonce tracker. +func (pmnt *NonceTracker) RemoveTxnByPublicKeyNonce(pk PublicKey, nonce DeSoNonce) { + pmnt.Lock() + defer pmnt.Unlock() + + key := newNonceTrackerKey(pk, nonce) + delete(pmnt.nonceMap, key) +} + +// AddTxnByPublicKeyNonce adds a new (pk, nonce) -> txn mapping to the nonce tracker. +func (pmnt *NonceTracker) AddTxnByPublicKeyNonce(txn *MempoolTx, pk PublicKey, nonce DeSoNonce) { + pmnt.Lock() + defer pmnt.Unlock() + + key := newNonceTrackerKey(pk, nonce) + pmnt.nonceMap[key] = txn +} + +func (pmnt *NonceTracker) Reset() { + pmnt.Lock() + defer pmnt.Unlock() + + pmnt.nonceMap = make(map[nonceTrackerKey]*MempoolTx) +} diff --git a/lib/pos_mempool_persister.go b/lib/pos_mempool_persister.go index 25f6480f2..25e0a3fc8 100644 --- a/lib/pos_mempool_persister.go +++ b/lib/pos_mempool_persister.go @@ -38,7 +38,7 @@ type MempoolEvent struct { // will then add the event to a queue. Periodically, the transaction queue is flushed to the database and all the cached // transactions are persisted. To achieve this, the persister runs its own goroutine. type MempoolPersister struct { - sync.Mutex + sync.RWMutex status MempoolPersisterStatus // db is the database that the persister will write transactions to. @@ -48,22 +48,22 @@ type MempoolPersister struct { stopGroup sync.WaitGroup startGroup sync.WaitGroup - // mempoolBackupTimeMilliseconds is the time frequency at which the persister will flush the transaction queue to the database. - mempoolBackupTimeMilliseconds int + // mempoolBackupIntervalMillis is the time frequency at which the persister will flush the transaction queue to the database. + mempoolBackupIntervalMillis int // eventQueue is used to queue up transactions to be persisted. The queue receives events from the EnqueueEvent, // which is called whenever a transaction is added or removed from the mempool. eventQueue chan *MempoolEvent // updateBatch is used to cache transactions that need to be persisted to the database. The batch is flushed to the - // database periodically based on the mempoolBackupTimeMilliseconds. + // database periodically based on the mempoolBackupIntervalMillis. updateBatch []*MempoolEvent } -func NewMempoolPersister(db *badger.DB, mempoolBackupTimeMilliseconds int) *MempoolPersister { +func NewMempoolPersister(db *badger.DB, mempoolBackupIntervalMillis int) *MempoolPersister { return &MempoolPersister{ - mempoolBackupTimeMilliseconds: mempoolBackupTimeMilliseconds, - status: MempoolPersisterStatusNotRunning, - db: db, - eventQueue: make(chan *MempoolEvent, eventQueueSize), + mempoolBackupIntervalMillis: mempoolBackupIntervalMillis, + status: MempoolPersisterStatusNotRunning, + db: db, + eventQueue: make(chan *MempoolEvent, eventQueueSize), } } @@ -111,7 +111,7 @@ func (mp *MempoolPersister) run() { } continue - case <-time.After(time.Duration(mp.mempoolBackupTimeMilliseconds) * time.Millisecond): + case <-time.After(time.Duration(mp.mempoolBackupIntervalMillis) * time.Millisecond): if err := mp.persistBatch(); err != nil { glog.Errorf("MempoolPersister: Error persisting batch: %v", err) } @@ -212,8 +212,8 @@ func (mp *MempoolPersister) persistBatchNoLock() error { // GetPersistedTransactions is used to retrieve all transactions from the database. It will return an error if the persister // is not currently running or if there was an issue retrieving the transactions. func (mp *MempoolPersister) GetPersistedTransactions() ([]*MempoolTx, error) { - mp.Lock() - defer mp.Unlock() + mp.RLock() + defer mp.RUnlock() if !mp.IsRunning() { return nil, errors.Wrapf(MempoolErrorNotRunning, "MempoolPersister: Cannot retrieve transactions while not running") diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index bbf3462af..e0ced9db0 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -3,10 +3,12 @@ package lib import ( "bytes" "github.com/dgraph-io/badger/v3" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "math/rand" "os" "testing" + "time" ) func TestPosMempoolStart(t *testing.T) { @@ -15,10 +17,14 @@ func TestPosMempoolStart(t *testing.T) { params := DeSoTestnetParams globalParams := _testGetDefaultGlobalParams() dir := _dbDirSetup(t) + maxMempoolPosSizeBytes := uint64(3000000000) + mempoolBackupIntervalMillis := uint64(30000) - mempool := NewPosMempool(¶ms, globalParams, nil, 0, dir) + mempool := NewPosMempool(¶ms, globalParams, nil, 0, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) + require.NoError(mempool.Refresh()) mempool.Stop() require.False(mempool.IsRunning()) } @@ -31,34 +37,44 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { globalParams := _testGetDefaultGlobalParams() feeMin := globalParams.MinimumNetworkFeeNanosPerKB feeMax := uint64(10000) + maxMempoolPosSizeBytes := uint64(3000000000) + mempoolBackupIntervalMillis := uint64(30000) - params, db := _blockchainSetup(t) + params, db := _posTestBlockchainSetup(t) m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil) require.NoError(err) dir := _dbDirSetup(t) - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) txn1 := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) txn2 := _generateTestTxn(t, rand, feeMin, feeMax, m1PubBytes, m1Priv, 100, 0) - require.NoError(mempool.AddTransaction(txn1)) - require.NoError(mempool.AddTransaction(txn2)) + _wrappedPosMempoolAddTransaction(t, mempool, txn1) + _wrappedPosMempoolAddTransaction(t, mempool, txn2) poolTxns := mempool.GetTransactions() require.Equal(2, len(poolTxns)) + require.NoError(mempool.Refresh()) + require.Equal(2, len(mempool.GetTransactions())) mempool.Stop() require.False(mempool.IsRunning()) - newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) + newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() require.Equal(2, len(newPoolTxns)) + require.Equal(len(newPool.GetTransactions()), len(newPool.nonceTracker.nonceMap)) + require.NoError(newPool.Refresh()) + require.Equal(2, len(newPool.GetTransactions())) + _wrappedPosMempoolRemoveTransaction(t, newPool, txn1.Hash()) + _wrappedPosMempoolRemoveTransaction(t, newPool, txn2.Hash()) newPool.Stop() require.False(newPool.IsRunning()) } @@ -71,8 +87,10 @@ func TestPosMempoolPrune(t *testing.T) { globalParams := _testGetDefaultGlobalParams() feeMin := globalParams.MinimumNetworkFeeNanosPerKB feeMax := uint64(2000) + maxMempoolPosSizeBytes := uint64(500) + mempoolBackupIntervalMillis := uint64(30000) - params, db := _blockchainSetup(t) + params, db := _posTestBlockchainSetup(t) m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) @@ -80,8 +98,8 @@ func TestPosMempoolPrune(t *testing.T) { require.NoError(err) dir := _dbDirSetup(t) - params.MaxMempoolPosSizeBytes = 500 - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -95,7 +113,7 @@ func TestPosMempoolPrune(t *testing.T) { } txn := _generateTestTxn(t, rand, feeMin, feeMax, pk, priv, 100, 25) txns = append(txns, txn) - require.NoError(mempool.AddTransaction(txn)) + _wrappedPosMempoolAddTransaction(t, mempool, txn) } fetchedTxns := mempool.GetTransactions() @@ -108,25 +126,28 @@ func TestPosMempoolPrune(t *testing.T) { require.Equal(uint64(1776), mempool.GetTransaction(fetchedTxns[2].Hash()).TxnFeeNanos) // Remove one transaction. - require.NoError(mempool.RemoveTransaction(fetchedTxns[0].Hash())) + _wrappedPosMempoolRemoveTransaction(t, mempool, fetchedTxns[0].Hash()) + require.NoError(mempool.Refresh()) + require.Equal(2, len(mempool.GetTransactions())) mempool.Stop() require.False(mempool.IsRunning()) - newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) + newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) require.Equal(2, len(newPool.GetTransactions())) // Remove the other transactions. - require.NoError(newPool.RemoveTransaction(fetchedTxns[1].Hash())) - require.NoError(newPool.RemoveTransaction(fetchedTxns[2].Hash())) + _wrappedPosMempoolRemoveTransaction(t, newPool, fetchedTxns[1].Hash()) + _wrappedPosMempoolRemoveTransaction(t, newPool, fetchedTxns[2].Hash()) // Remove the same transaction twice - require.NoError(newPool.RemoveTransaction(fetchedTxns[1].Hash())) + _wrappedPosMempoolRemoveTransaction(t, newPool, fetchedTxns[1].Hash()) require.Equal(0, len(newPool.GetTransactions())) // Add the transactions back. for _, txn := range fetchedTxns { - require.NoError(newPool.AddTransaction(txn)) + _wrappedPosMempoolAddTransaction(t, newPool, txn.GetTxn()) } require.Equal(3, len(newPool.GetTransactions())) @@ -139,6 +160,13 @@ func TestPosMempoolPrune(t *testing.T) { require.True(bytes.Equal(tx.Hash().ToBytes(), fetchedTxns[index].Hash().ToBytes())) index++ } + require.Equal(len(newPool.GetTransactions()), len(newPool.nonceTracker.nonceMap)) + require.NoError(newPool.Refresh()) + newTxns := newPool.GetTransactions() + require.Equal(3, len(newTxns)) + for _, txn := range newTxns { + _wrappedPosMempoolRemoveTransaction(t, newPool, txn.Hash()) + } newPool.Stop() require.False(newPool.IsRunning()) } @@ -151,8 +179,10 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { globalParams := _testGetDefaultGlobalParams() feeMin := globalParams.MinimumNetworkFeeNanosPerKB feeMax := uint64(2000) + maxMempoolPosSizeBytes := uint64(3000000000) + mempoolBackupIntervalMillis := uint64(30000) - params, db := _blockchainSetup(t) + params, db := _posTestBlockchainSetup(t) m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) @@ -160,7 +190,8 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { require.NoError(err) dir := _dbDirSetup(t) - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir) + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -174,7 +205,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { } txn := _generateTestTxn(t, rand, feeMin, feeMax, pk, priv, 100, 25) txns = append(txns, txn) - require.NoError(mempool.AddTransaction(txn)) + _wrappedPosMempoolAddTransaction(t, mempool, txn) } require.Equal(100, len(mempool.GetTransactions())) @@ -185,19 +216,104 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { mempool.Stop() require.False(mempool.IsRunning()) - newPool := NewPosMempool(params, newGlobalParams, latestBlockView, 2, dir) + newPool := NewPosMempool(params, newGlobalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() require.Equal(0, len(newPoolTxns)) + require.Equal(len(mempool.GetTransactions()), len(mempool.nonceTracker.nonceMap)) newPool.Stop() require.False(newPool.IsRunning()) } -func _blockchainSetup(t *testing.T) (_params *DeSoParams, _db *badger.DB) { +func TestPosMempoolReplaceWithHigherFee(t *testing.T) { + require := require.New(t) + seed := int64(1077) + rand := rand.New(rand.NewSource(seed)) + + globalParams := _testGetDefaultGlobalParams() + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(2000) + maxMempoolPosSizeBytes := uint64(3000000000) + mempoolBackupIntervalMillis := uint64(30000) + + params, db := _posTestBlockchainSetup(t) + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + m1PubBytes, _, _ := Base58CheckDecode(m1Pub) + + latestBlockView, err := NewUtxoView(db, params, nil, nil) + require.NoError(err) + dir := _dbDirSetup(t) + + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Start()) + require.True(mempool.IsRunning()) + + // Add a single transaction from m0 + txn1 := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 25) + _wrappedPosMempoolAddTransaction(t, mempool, txn1) + require.Equal(1, len(mempool.GetTransactions())) + + txns := mempool.GetTransactions() + require.Equal(1, len(txns)) + // Now generate another transaction from m0 with same nonce yet higher fee. + txn1New := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 25) + txn1New.TxnFeeNanos = txn1.TxnFeeNanos + 1000 + *txn1New.TxnNonce = *txn1.TxnNonce + _signTxn(t, txn1New, m0Priv) + _wrappedPosMempoolAddTransaction(t, mempool, txn1New) + require.Equal(1, len(mempool.GetTransactions())) + require.Equal(txn1New.TxnNonce, mempool.GetTransactions()[0].TxnNonce) + + // Now generate a transaction coming from m1 + txn2 := _generateTestTxn(t, rand, feeMin, feeMax, m1PubBytes, m1Priv, 100, 25) + _wrappedPosMempoolAddTransaction(t, mempool, txn2) + require.Equal(2, len(mempool.GetTransactions())) + + // Generate a new transaction coming from m1, with same nonce, yet lower fee. This transaction should fail. + txn2Low := _generateTestTxn(t, rand, feeMin, feeMax, m1PubBytes, m1Priv, 100, 25) + txn2Low.TxnFeeNanos = txn2.TxnFeeNanos - 1000 + *txn2Low.TxnNonce = *txn2.TxnNonce + _signTxn(t, txn2Low, m1Priv) + added2Low := uint64(time.Now().UnixMicro()) + mtxn2Low := NewMempoolTransaction(txn2Low, added2Low) + err = mempool.AddTransaction(mtxn2Low, true) + require.Contains(err.Error(), MempoolFailedReplaceByHigherFee) + + // Now generate a proper new transaction for m1, with same nonce, and higher fee. + txn2New := _generateTestTxn(t, rand, feeMin, feeMax, m1PubBytes, m1Priv, 100, 25) + txn2New.TxnFeeNanos = txn2.TxnFeeNanos + 1000 + *txn2New.TxnNonce = *txn2.TxnNonce + _signTxn(t, txn2New, m1Priv) + _wrappedPosMempoolAddTransaction(t, mempool, txn2New) + require.Equal(2, len(mempool.GetTransactions())) + + // Verify that only the correct transactions are present in the mempool. Notice that on this seed, txn2 is positioned + // as first in the mempool's GetTransactions. + require.NotEqual(txn2, mempool.GetTransactions()[0].GetTxn()) + require.NotEqual(txn2Low, mempool.GetTransactions()[0].GetTxn()) + require.Equal(txn2New, mempool.GetTransactions()[0].GetTxn()) + require.NotEqual(txn1, mempool.GetTransactions()[1].GetTxn()) + require.Equal(txn1New, mempool.GetTransactions()[1].GetTxn()) + + require.Equal(len(mempool.GetTransactions()), len(mempool.nonceTracker.nonceMap)) + require.NoError(mempool.Refresh()) + require.Equal(2, len(mempool.GetTransactions())) + mempool.Stop() + require.False(mempool.IsRunning()) +} + +func _posTestBlockchainSetup(t *testing.T) (_params *DeSoParams, _db *badger.DB) { + return _posTestBlockchainSetupWithBalances(t, 200000, 200000) +} + +func _posTestBlockchainSetupWithBalances(t *testing.T, m0Balance uint64, m1Balance uint64) (_params *DeSoParams, _db *badger.DB) { require := require.New(t) chain, params, db := NewLowDifficultyBlockchain(t) + params.ForkHeights.BalanceModelBlockHeight = 1 oldPool, miner := NewTestMiner(t, chain, params, true) // Mine a few blocks to give the senderPkString some money. _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, oldPool) @@ -212,10 +328,10 @@ func _blockchainSetup(t *testing.T) (_params *DeSoParams, _db *badger.DB) { _, _, _ = _doBasicTransferWithViewFlush( t, chain, db, params, senderPkString, m0PublicKeyBase58Check, - senderPrivString, 200000, 11) + senderPrivString, m0Balance, 11) _, _, _ = _doBasicTransferWithViewFlush( t, chain, db, params, senderPkString, m1PublicKeyBase58Check, - senderPrivString, 200000, 11) + senderPrivString, m1Balance, 11) return params, db } @@ -244,9 +360,70 @@ func _generateTestTxn(t *testing.T, rand *rand.Rand, feeMin uint64, feeMax uint6 TxnFeeNanos: rand.Uint64()%(feeMax-feeMin) + feeMin, TxnNonce: &DeSoNonce{ ExpirationBlockHeight: expirationHeight, + PartialID: rand.Uint64() % 10000, }, ExtraData: extraData, } _signTxn(t, txn, priv) return txn } + +func _wrappedPosMempoolAddTransaction(t *testing.T, mp *PosMempool, txn *MsgDeSoTxn) { + added := uint64(time.Now().UnixMicro()) + mtxn := NewMempoolTransaction(txn, added) + require.NoError(t, mp.AddTransaction(mtxn, true)) + require.Equal(t, true, _checkPosMempoolIntegrity(t, mp)) +} + +func _wrappedPosMempoolRemoveTransaction(t *testing.T, mp *PosMempool, txnHash *BlockHash) { + require.NoError(t, mp.RemoveTransaction(txnHash)) + require.Equal(t, true, _checkPosMempoolIntegrity(t, mp)) +} + +func _checkPosMempoolIntegrity(t *testing.T, mp *PosMempool) bool { + if !mp.IsRunning() { + return true + } + + if len(mp.GetTransactions()) != len(mp.nonceTracker.nonceMap) { + t.Errorf("PosMempool transactions and nonceTracker are out of sync") + return false + } + + balances := make(map[PublicKey]uint64) + txns := mp.GetTransactions() + for _, txn := range txns { + if txn.TxnNonce == nil { + t.Errorf("PosMempool transaction has nil nonce") + return false + } + pk := NewPublicKey(txn.PublicKey) + if txnNt := mp.nonceTracker.GetTxnByPublicKeyNonce(*pk, *txn.TxnNonce); !assert.Equal(t, txn.GetTxn(), txnNt.Tx) { + t.Errorf("PosMempool nonceTracker and transactions are out of sync") + return false + } + balances[*pk] += txn.TxnFeeNanos + } + + if len(balances) > len(mp.ledger.balances) { + t.Errorf("PosMempool ledger is out of sync length balances (%v) > ledger (%v)", len(balances), len(mp.ledger.balances)) + return false + } + activeBalances := 0 + for pk, ledgerBalance := range mp.ledger.balances { + if ledgerBalance > 0 { + activeBalances++ + } else { + continue + } + if balance, exists := balances[pk]; !exists || ledgerBalance != balance { + t.Errorf("PosMempool ledger is out of sync pk %v", PkToStringTestnet(pk.ToBytes())) + return false + } + } + if len(balances) != activeBalances { + t.Errorf("PosMempool ledger is out of sync length") + return false + } + return true +} diff --git a/lib/pos_mempool_transaction.go b/lib/pos_mempool_transaction.go index 4163c0a69..7759a7ed0 100644 --- a/lib/pos_mempool_transaction.go +++ b/lib/pos_mempool_transaction.go @@ -39,7 +39,7 @@ type MempoolTx struct { index int } -func NewMempoolTx(txn *MsgDeSoTxn, blockHeight uint64) (*MempoolTx, error) { +func NewMempoolTx(txn *MsgDeSoTxn, addedUnixMicro uint64, blockHeight uint64) (*MempoolTx, error) { txnBytes, err := txn.ToBytes(false) if err != nil { return nil, errors.Wrapf(err, "PosMempool.GetMempoolTx: Problem serializing txn") @@ -54,12 +54,13 @@ func NewMempoolTx(txn *MsgDeSoTxn, blockHeight uint64) (*MempoolTx, error) { if err != nil { return nil, errors.Wrapf(err, "PosMempool.GetMempoolTx: Problem computing fee per KB") } + added := time.UnixMicro(int64(addedUnixMicro)) return &MempoolTx{ Tx: txn, Hash: txnHash, TxSizeBytes: serializedLen, - Added: time.Now(), + Added: added, Height: uint32(blockHeight), Fee: txn.TxnFeeNanos, FeePerKB: feePerKb, @@ -109,16 +110,14 @@ func (mempoolTx *MempoolTx) FromBytes(rr *bytes.Reader) error { return errors.Wrapf(err, "MempoolTx.Decode: Problem reading height") } - // Create a new MempoolTx - newTxn, err := NewMempoolTx(txn, height) - *mempoolTx = *newTxn - // Decode the timestamp - timestamp, err := ReadUvarint(rr) + timestampUnixMicro, err := ReadUvarint(rr) if err != nil { return errors.Wrapf(err, "MempoolTx.Decode: Problem reading timestamp") } - mempoolTx.Added = time.UnixMicro(int64(timestamp)) + // Create a new MempoolTx + newTxn, err := NewMempoolTx(txn, timestampUnixMicro, height) + *mempoolTx = *newTxn return nil } diff --git a/lib/pos_network.go b/lib/pos_network.go index 7abe7c2c4..91a0cafb9 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -3,6 +3,7 @@ package lib import ( "bytes" "fmt" + "golang.org/x/crypto/sha3" "io" "github.com/deso-protocol/core/bls" @@ -628,3 +629,9 @@ func DecodeBitset(rr io.Reader) (*bitset.Bitset, error) { } return (bitset.NewBitset()).FromBytes(encodedBytes), nil } + +func HashBitset(b *bitset.Bitset) *BlockHash { + encodedBytes := EncodeBitset(b) + hash := sha3.Sum256(encodedBytes) + return NewBlockHash(hash[:]) +} diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index de18d64e3..4c7ed3cce 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -128,6 +128,12 @@ func _mergeGlobalParamEntryDefaults(bav *UtxoView, globalParamsEntry *GlobalPara if globalParamsEntryCopy.JailInactiveValidatorGracePeriodEpochs == 0 { globalParamsEntryCopy.JailInactiveValidatorGracePeriodEpochs = bav.Params.DefaultJailInactiveValidatorGracePeriodEpochs } + if globalParamsEntryCopy.FeeBucketGrowthRateBasisPoints == 0 { + globalParamsEntryCopy.FeeBucketGrowthRateBasisPoints = bav.Params.DefaultFeeBucketGrowthRateBasisPoints + } + if globalParamsEntryCopy.FailingTransactionBMFMultiplierBasisPoints == 0 { + globalParamsEntryCopy.FailingTransactionBMFMultiplierBasisPoints = bav.Params.DefaultFailingTransactionBMFMultiplierBasisPoints + } // Return the merged result. return globalParamsEntryCopy diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index e4ed376a1..e40afb1d4 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -234,13 +234,13 @@ func (bav *UtxoView) distributeValidatorCommission(validatorPKID *PKID, commissi } const ( - _basisPoints = uint64(10000) // 1e4 + _maxBasisPoints = uint64(10000) // 1e4 _nanoSecsPerYear = uint64(365) * 24 * 60 * 60 * 1e9 // 365 days * 24 hours * 60 minutes * 60 seconds * 1e9 nanoseconds ) var ( - _basisPointsAsInt = big.NewInt(int64(_basisPoints)) - _basisPointsAsFloat = NewFloat().SetUint64(_basisPoints) + _basisPointsAsInt = big.NewInt(int64(_maxBasisPoints)) + _basisPointsAsFloat = NewFloat().SetUint64(_maxBasisPoints) _nanoSecsPerYearAsFloat = NewFloat().SetUint64(_nanoSecsPerYear) ) diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index e42ec5982..5cfdd2d8d 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -34,21 +34,21 @@ type TransactionRegister struct { // minimumNetworkFeeNanosPerKB is the base fee rate for the lowest fee FeeTimeBucket. This value corresponds to // GlobalParamsEntry's MinimumNetworkFeeNanosPerKB. minimumNetworkFeeNanosPerKB *big.Float - // feeBucketRateMultiplierBasisPoints is the fee rate multiplier for FeeTimeBucket objects. This value corresponds to - // GlobalParamsEntry's FeeBucketRateMultiplierBasisPoints. - feeBucketRateMultiplierBasisPoints *big.Float + // feeBucketGrowthRateBasisPoints is the fee rate multiplier for FeeTimeBucket objects. This value corresponds to + // GlobalParamsEntry's FeeBucketGrowthRateBasisPoints. + feeBucketGrowthRateBasisPoints *big.Float } func NewTransactionRegister(globalParams *GlobalParamsEntry) *TransactionRegister { feeTimeBucketSet := treeset.NewWith(feeTimeBucketComparator) minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() return &TransactionRegister{ - feeTimeBucketSet: feeTimeBucketSet, - feeTimeBucketsByMinFeeMap: make(map[uint64]*FeeTimeBucket), - txnMembership: make(map[BlockHash]*MempoolTx), - totalTxnsSizeBytes: 0, - minimumNetworkFeeNanosPerKB: minNetworkFee, - feeBucketRateMultiplierBasisPoints: bucketMultiplier, + feeTimeBucketSet: feeTimeBucketSet, + feeTimeBucketsByMinFeeMap: make(map[uint64]*FeeTimeBucket), + txnMembership: make(map[BlockHash]*MempoolTx), + totalTxnsSizeBytes: 0, + minimumNetworkFeeNanosPerKB: minNetworkFee, + feeBucketGrowthRateBasisPoints: bucketMultiplier, } } @@ -99,7 +99,7 @@ func (tr *TransactionRegister) addTransactionNoLock(txn *MempoolTx) error { // Determine the min fee of the bucket based on the transaction's fee rate. bucketMinFeeNanosPerKb, bucketMaxFeeNanosPerKB := computeFeeTimeBucketRangeFromFeeNanosPerKB(txn.FeePerKB, - tr.minimumNetworkFeeNanosPerKB, tr.feeBucketRateMultiplierBasisPoints) + tr.minimumNetworkFeeNanosPerKB, tr.feeBucketGrowthRateBasisPoints) // Lookup the bucket in the map. bucket, bucketExists := tr.feeTimeBucketsByMinFeeMap[bucketMinFeeNanosPerKb] if !bucketExists { @@ -141,6 +141,8 @@ func (tr *TransactionRegister) removeTransactionNoLock(txn *MempoolTx) error { return nil } + // Sanity-check that the size of the transaction doesn't exceed the current size of the TransactionRegister. + // This should never happen, unless somehow the underlying transaction was modified. Which won't happen. if tr.totalTxnsSizeBytes < txn.TxSizeBytes { return fmt.Errorf("TransactionRegister.RemoveTransaction: Transaction with transaction hash %v size %v "+ "exceeds total mempool size %v", txn.Hash.String(), txn.TxSizeBytes, tr.totalTxnsSizeBytes) @@ -148,7 +150,7 @@ func (tr *TransactionRegister) removeTransactionNoLock(txn *MempoolTx) error { // Determine the min fee of the bucket based on the transaction's fee rate. bucketMinFeeNanosPerKb, _ := computeFeeTimeBucketRangeFromFeeNanosPerKB(txn.FeePerKB, - tr.minimumNetworkFeeNanosPerKB, tr.feeBucketRateMultiplierBasisPoints) + tr.minimumNetworkFeeNanosPerKB, tr.feeBucketGrowthRateBasisPoints) // Remove the transaction from the bucket. if bucket, exists := tr.feeTimeBucketsByMinFeeMap[bucketMinFeeNanosPerKb]; exists { if bucket.minFeeNanosPerKB != bucketMinFeeNanosPerKb { @@ -161,6 +163,9 @@ func (tr *TransactionRegister) removeTransactionNoLock(txn *MempoolTx) error { if bucket.Empty() { tr.removeBucketNoLock(bucket) } + } else if !exists { + return fmt.Errorf("TransactionRegister.RemoveTransaction: Bucket with min fee %v does not exist", + bucketMinFeeNanosPerKb) } delete(tr.txnMembership, *txn.Hash) @@ -305,6 +310,7 @@ func (tr *TransactionRegister) getTransactionsToPrune(minPrunedBytes uint64) (_p prunedTxns := []*MempoolTx{} // Find the FeeTime bucket at the end of the Set. It'll have the smallest fee among the buckets in the register. + // We iterate in reverse order, starting from the end, so that we drop transactions ordered by least-to-highest priority. it := tr.feeTimeBucketSet.Iterator() it.End() // Iterate through the buckets in reverse order so that we drop transactions ordered by least-to-highest priority. diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index b5e223f05..d4f759368 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -362,7 +362,7 @@ func TestFeeTimeBucketBasic(t *testing.T) { func _testGetDefaultGlobalParams() *GlobalParamsEntry { globalParams := InitialGlobalParamsEntry globalParams.MinimumNetworkFeeNanosPerKB = 1000 - globalParams.FeeBucketRateMultiplierBasisPoints = 1000 + globalParams.FeeBucketGrowthRateBasisPoints = 1000 return &globalParams } diff --git a/lib/validation_deso_txn.go b/lib/validation_deso_txn.go index e7f2ad83d..1d230fd1e 100644 --- a/lib/validation_deso_txn.go +++ b/lib/validation_deso_txn.go @@ -12,6 +12,10 @@ import ( func ValidateDeSoTxnSanityBalanceModel(txn *MsgDeSoTxn, blockHeight uint64, params *DeSoParams, globalParams *GlobalParamsEntry) error { + if txn == nil || params == nil || globalParams == nil { + return fmt.Errorf("ValidateDeSoTxnSanityBalanceModel: Transaction, params, and globalParams cannot be nil") + } + // Validate encoding if err := ValidateDeSoTxnEncoding(txn, params); err != nil { return errors.Wrapf(err, "ValidateDeSoTxnSanityBalanceModel: ") @@ -41,6 +45,10 @@ func ValidateDeSoTxnSanityBalanceModel(txn *MsgDeSoTxn, blockHeight uint64, // ValidateDeSoTxnEncoding validates that the transaction encoding works as expected. func ValidateDeSoTxnEncoding(txn *MsgDeSoTxn, params *DeSoParams) error { + if txn == nil || params == nil { + return fmt.Errorf("ValidateDeSoTxnEncoding: Transaction and params cannot be nil") + } + // Validate transaction to/from bytes encoding txnBytes, err := txn.ToBytes(false) if err != nil { @@ -70,9 +78,8 @@ func ValidateDeSoTxnEncoding(txn *MsgDeSoTxn, params *DeSoParams) error { // ValidateDeSoTxnMetadata validates that the transaction metadata is correctly formatted. func ValidateDeSoTxnMetadata(txn *MsgDeSoTxn) error { - // Validate that the transaction has correct metadata - if txn.TxnMeta == nil { - return fmt.Errorf("ValidateDeSoTxnMetadata: Transaction is missing TxnMeta") + if txn == nil || txn.TxnMeta == nil { + return fmt.Errorf("ValidateDeSoTxnMetadata: Transaction is nil or is missing TxnMeta") } if _, err := NewTxnMetadata(txn.TxnMeta.GetTxnType()); err != nil { return errors.Wrapf(err, "ValidateDeSoTxnMetadata: Problem parsing TxnType") @@ -82,6 +89,10 @@ func ValidateDeSoTxnMetadata(txn *MsgDeSoTxn) error { // ValidateDeSoTxnHash validates that the transaction hash is correctly computed. func ValidateDeSoTxnHash(txn *MsgDeSoTxn) error { + if txn == nil { + return fmt.Errorf("ValidateDeSoTxnHash: Transaction cannot be nil") + } + // Validate transaction hash if txn.Hash() == nil { return fmt.Errorf("ValidateDeSoTxnHash: Problem computing tx hash") @@ -91,6 +102,10 @@ func ValidateDeSoTxnHash(txn *MsgDeSoTxn) error { // ValidateDeSoTxnPublicKey validates that the transaction public key is correctly formatted. func ValidateDeSoTxnPublicKey(txn *MsgDeSoTxn) error { + if txn == nil { + return fmt.Errorf("ValidateDeSoTxnPublicKey: Transaction cannot be nil") + } + // Validate public key if err := IsByteArrayValidPublicKey(txn.PublicKey); err != nil { return errors.Wrapf(err, "ValidateDeSoTxnPublicKey: Problem with public key") @@ -102,6 +117,10 @@ func ValidateDeSoTxnPublicKey(txn *MsgDeSoTxn) error { func ValidateDeSoTxnFormatBalanceModel(txn *MsgDeSoTxn, blockHeight uint64, globalParams *GlobalParamsEntry) error { var err error + if txn == nil || globalParams == nil || txn.TxnNonce == nil { + return fmt.Errorf("ValidateDeSoTxnFormatBalanceModel: Transaction, globalParams, and nonce cannot be nil") + } + // Validate transaction version if txn.TxnVersion == DeSoTxnVersion0 { return fmt.Errorf("ValidateDeSoTxnFormatBalanceModel: DeSoTxnVersion0 is outdated in balance model") @@ -153,6 +172,10 @@ func ValidateDeSoTxnFormatBalanceModel(txn *MsgDeSoTxn, blockHeight uint64, glob // ValidateDeSoTxnMinimalNetworkFee validates that the transaction is above the network's minimal fee. func ValidateDeSoTxnMinimalNetworkFee(txn *MsgDeSoTxn, globalParams *GlobalParamsEntry) error { + if txn == nil || globalParams == nil { + return fmt.Errorf("ValidateDeSoTxnMinimalNetworkFee: Transaction and globalParams cannot be nil") + } + // Verify the transaction fee feeNanosPerKb, err := txn.ComputeFeeRatePerKBNanos() if err != nil { diff --git a/lib/varint.go b/lib/varint.go index 063045b7f..a58a0f175 100644 --- a/lib/varint.go +++ b/lib/varint.go @@ -119,10 +119,10 @@ func ReadUvarint(r io.Reader) (uint64, error) { return x, err } b := buf[0] + if i == 9 && b > 1 { + return x, overflow + } if b < 0x80 { - if i > 9 || i == 9 && b > 1 { - return x, overflow - } return x | uint64(b)< Date: Mon, 30 Oct 2023 14:45:09 -0400 Subject: [PATCH 264/762] Revert LN ProcessBlockPoS changes from feature/pos-fast-hotstuff-consensus --- bls/signature.go | 4 - bls/signature_no_relic.go | 4 - lib/block_view_bitcoin_test.go | 2 +- lib/block_view_validator.go | 8 - lib/blockchain.go | 87 ++---- lib/blockchain_test.go | 32 +- lib/constants.go | 4 +- lib/db_utils.go | 72 ++--- lib/load_test.go | 4 +- lib/miner.go | 2 +- lib/network.go | 27 -- lib/pos_blockchain.go | 451 ---------------------------- lib/pos_blockchain_test.go | 441 --------------------------- lib/pos_epoch.go | 24 +- lib/pos_epoch_complete_hook.go | 9 +- lib/pos_epoch_complete_hook_test.go | 71 ++--- lib/pos_epoch_test.go | 8 - lib/pos_network.go | 47 --- lib/pos_random_seed.go | 4 - lib/postgres.go | 2 +- lib/server.go | 4 +- lib/txindex.go | 2 +- lib/types.go | 4 - 23 files changed, 110 insertions(+), 1203 deletions(-) delete mode 100644 lib/pos_blockchain.go delete mode 100644 lib/pos_blockchain_test.go diff --git a/bls/signature.go b/bls/signature.go index a3fe19282..9bcfdb4a9 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -251,10 +251,6 @@ func (publicKey *PublicKey) Copy() *PublicKey { } } -func (publicKey *PublicKey) IsEmpty() bool { - return publicKey == nil || publicKey.flowPublicKey == nil -} - // // TYPES: Signature // diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index d264b0449..093a06331 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -96,10 +96,6 @@ func (publicKey *PublicKey) Copy() *PublicKey { panic(BLSNoRelicError) } -func (publicKey *PublicKey) IsEmpty() bool { - panic(BLSNoRelicError) -} - // // TYPES: Signature // diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index 79d27e5d1..6a422dec5 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -49,7 +49,7 @@ func GetTestParamsCopy( // Set the BitcoinExchange-related params to canned values. paramsCopy := *paramss headerHash := (BlockHash)(startHeader.BlockHash()) - paramsCopy.BitcoinStartBlockNode = NewPoWBlockNode( + paramsCopy.BitcoinStartBlockNode = NewBlockNode( nil, /*ParentNode*/ &headerHash, /*Hash*/ startHeight, diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 94d7b20ff..8ee6d768b 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -84,14 +84,6 @@ type ValidatorEntry struct { isDeleted bool } -func (validatorEntry *ValidatorEntry) GetPublicKey() *bls.PublicKey { - return validatorEntry.VotingPublicKey -} - -func (validatorEntry *ValidatorEntry) GetStakeAmount() *uint256.Int { - return validatorEntry.TotalStakeAmountNanos -} - func (validatorEntry *ValidatorEntry) Status() ValidatorStatus { // ValidatorEntry.Status() is a virtual/derived field that is not stored in // the database, but instead constructed from other ValidatorEntry fields. diff --git a/lib/blockchain.go b/lib/blockchain.go index fe1cc5de7..0a8ba1077 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -75,13 +75,6 @@ const ( StatusBitcoinHeaderValidateFailed // Deprecated ) -type CommittedBlockStatus uint8 - -const ( - COMMITTED CommittedBlockStatus = 0 - UNCOMMITTED CommittedBlockStatus = 1 -) - // IsFullyProcessed determines if the BlockStatus corresponds to a fully processed and stored block. func (blockStatus BlockStatus) IsFullyProcessed() bool { return blockStatus&StatusHeaderValidated != 0 && @@ -156,16 +149,6 @@ type BlockNode struct { // Status holds the validation state for the block and whether or not // it's stored in the database. Status BlockStatus - - // CommittedStatus is either COMMITTED or UNCOMMITTED. If it's UNCOMMITTED, then - // the block is not yet committed to the blockchain. If it's COMMITTED, then the - // block is committed to the blockchain. - // In PoW consensus, all blocks will have CommittedStatus = COMMITTED. - // In PoS consensus, the chain tip and its parent will have CommittedStatus = UNCOMMITTED and - // all other blocks will have CommittedStatus = COMMITTED. When a new block is added to the tip, - // its CommittedStatus will be set to UNCOMMITTED and its grandparent's CommittedStatus will be - // updated to COMMITTED. - CommittedStatus CommittedBlockStatus } func _difficultyBitsToHash(diffBits uint32) (_diffHash *BlockHash) { @@ -298,15 +281,12 @@ func (nn *BlockNode) String() string { if nn.Header != nil { tstamp = uint32(nn.Header.GetTstampSecs()) } - return fmt.Sprintf("< TstampSecs: %d, Height: %d, Hash: %s, ParentHash %s, Status: %s, CumWork: %v, CommittedStatus: %v>", - tstamp, nn.Header.Height, nn.Hash, parentHash, nn.Status, nn.CumWork, nn.CommittedStatus) + return fmt.Sprintf("< TstampSecs: %d, Height: %d, Hash: %s, ParentHash %s, Status: %s, CumWork: %v>", + tstamp, nn.Header.Height, nn.Hash, parentHash, nn.Status, nn.CumWork) } -// NewPoWBlockNode is a helper function to create a BlockNode -// when running PoW consensus. All blocks in the PoW consensus -// have a committed status of COMMITTED. // TODO: Height not needed in this since it's in the header. -func NewPoWBlockNode( +func NewBlockNode( parent *BlockNode, hash *BlockHash, height uint32, @@ -323,28 +303,6 @@ func NewPoWBlockNode( CumWork: cumWork, Header: header, Status: status, - // All blocks have a committed status in PoW. - CommittedStatus: COMMITTED, - } -} - -// NewPoSBlockNode is a new helper function to create a block node -// as we need to control the value of the CommittedStatus field. -func NewPoSBlockNode( - parent *BlockNode, - hash *BlockHash, - height uint32, - header *MsgDeSoHeader, - status BlockStatus, - committedStatus CommittedBlockStatus) *BlockNode { - - return &BlockNode{ - Parent: parent, - Hash: hash, - Height: height, - Header: header, - Status: status, - CommittedStatus: committedStatus, } } @@ -482,10 +440,6 @@ type Blockchain struct { bestHeaderChain []*BlockNode bestHeaderChainMap map[BlockHash]*BlockNode - // Tracks all uncommitted blocks in memory. This includes blocks that are not part - // of the best chain. - uncommittedBlocksMap map[BlockHash]*MsgDeSoBlock - // We keep track of orphan blocks with the following data structures. Orphans // are not written to disk and are only cached in memory. Moreover we only keep // up to MaxOrphansInMemory of them in order to prevent memory exhaustion. @@ -696,9 +650,8 @@ func NewBlockchain( eventManager: eventManager, archivalMode: archivalMode, - blockIndex: make(map[BlockHash]*BlockNode), - uncommittedBlocksMap: make(map[BlockHash]*MsgDeSoBlock), - bestChainMap: make(map[BlockHash]*BlockNode), + blockIndex: make(map[BlockHash]*BlockNode), + bestChainMap: make(map[BlockHash]*BlockNode), bestHeaderChainMap: make(map[BlockHash]*BlockNode), @@ -1346,7 +1299,6 @@ func (bc *Blockchain) SetBestChainMap(bestChain []*BlockNode, bestChainMap map[B bc.blockIndex = blockIndex } -// TODO: update to support validating orphan PoS Blocks func (bc *Blockchain) _validateOrphanBlock(desoBlock *MsgDeSoBlock) error { // Error if the block is missing a parent hash or header. if desoBlock.Header == nil { @@ -1772,7 +1724,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // and try to mine on top of it before revealing it to everyone. newWork := BytesToBigint(ExpectedWorkForBlockHash(diffTarget)[:]) cumWork := newWork.Add(newWork, parentNode.CumWork) - newNode := NewPoWBlockNode( + newNode := NewBlockNode( parentNode, headerHash, uint32(blockHeader.Height), @@ -1853,25 +1805,23 @@ func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc return bc.processHeaderPoW(blockHeader, headerHash) } -func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { +func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _err error) { bc.ChainLock.Lock() defer bc.ChainLock.Unlock() if desoBlock == nil { // If the block is nil then we return an error. Nothing we can do here. - return false, false, nil, fmt.Errorf("ProcessBlock: Block is nil") + return false, false, fmt.Errorf("ProcessBlock: Block is nil") } // If the block's height is after the PoS cut-over fork height, then we use the PoS block processing logic. Otherwise, fall back // to the PoW logic. if desoBlock.Header.Height >= uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { // TODO: call bc.processBlockPoS(desoBlock, verifySignatures) instead - isMainChain, isOrphan, err := bc.processBlockPoW(desoBlock, verifySignatures) - return isMainChain, isOrphan, nil, err + return bc.processBlockPoW(desoBlock, verifySignatures) } - isMainChain, isOrphan, err := bc.processBlockPoW(desoBlock, verifySignatures) - return isMainChain, isOrphan, nil, err + return bc.processBlockPoW(desoBlock, verifySignatures) } func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { @@ -2638,6 +2588,23 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures return isMainChain, false, nil } +// processBlockPoS runs the Fast-Hotstuff block connect and commit rule as follows: +// 1. Validate on an incoming block and its header +// 2. Store the block in the db +// 3. Resolves forks within the last two blocks +// 4. Connect the block to the blockchain's tip +// 5. If applicable, flush the incoming block's grandparent to the DB +// 6. Notify the block proposer, pacemaker, and voting logic that the incoming block has been accepted +func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { + // TODO: Implement me + return false, false, fmt.Errorf("ProcessBlockPoS: Not implemented yet") +} + +func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { + // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks + panic("GetUncommittedTipView: Not implemented yet") +} + // DisconnectBlocksToHeight will rollback blocks from the db and blockchain structs until block tip reaches the provided // blockHeight parameter. func (bc *Blockchain) DisconnectBlocksToHeight(blockHeight uint64, snap *Snapshot) error { diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 4f85996ba..c444425df 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -616,7 +616,7 @@ func TestBasicTransferReorg(t *testing.T) { // Process all of the fork blocks on the original chain to make it // experience a reorg. for _, forkBlock := range forkBlocks { - _, _, _, err := chain1.ProcessBlock(forkBlock, true /*verifySignatures*/) + _, _, err := chain1.ProcessBlock(forkBlock, true /*verifySignatures*/) require.NoError(err) } @@ -659,7 +659,7 @@ func _shouldConnectBlock(blk *MsgDeSoBlock, t *testing.T, chain *Blockchain) { blockHash, _ := blk.Hash() verifySignatures := true - isMainChain, isOrphan, _, err := chain.ProcessBlock(blk, verifySignatures) + isMainChain, isOrphan, err := chain.ProcessBlock(blk, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block %v should not be an orphan", blockHash) require.Truef(isMainChain, "Block %v should be on the main chain", blockHash) @@ -824,7 +824,7 @@ func TestProcessBlockReorgBlocks(t *testing.T) { // Block b1 fmt.Println("Connecting block b1") require.Equal(uint64(3), GetUtxoNumEntries(db, chain.snapshot)) - isMainChain, isOrphan, _, err := chain.ProcessBlock(blockB1, verifySignatures) + isMainChain, isOrphan, err := chain.ProcessBlock(blockB1, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block b1 should not be an orphan") require.Falsef(isMainChain, "Block b1 should not be on the main chain") @@ -840,7 +840,7 @@ func TestProcessBlockReorgBlocks(t *testing.T) { // Block b2 fmt.Println("Connecting block b2") require.Equal(uint64(3), GetUtxoNumEntries(db, chain.snapshot)) - isMainChain, isOrphan, _, err := chain.ProcessBlock(blockB2, verifySignatures) + isMainChain, isOrphan, err := chain.ProcessBlock(blockB2, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block b2 should not be an orphan") require.Falsef(isMainChain, "Block b2 should not be on the main chain") @@ -1239,7 +1239,7 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { } nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewPoWBlockNode( + nodes = append(nodes, NewBlockNode( lastNode, nil, uint32(ii), @@ -1276,7 +1276,7 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { lastNode := nodes[ii-1] nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewPoWBlockNode( + nodes = append(nodes, NewBlockNode( lastNode, nil, uint32(ii), @@ -1335,7 +1335,7 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { } nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewPoWBlockNode( + nodes = append(nodes, NewBlockNode( lastNode, nil, uint32(ii), @@ -1372,7 +1372,7 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { lastNode := nodes[ii-1] nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewPoWBlockNode( + nodes = append(nodes, NewBlockNode( lastNode, nil, uint32(ii), @@ -1431,7 +1431,7 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { } nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewPoWBlockNode( + nodes = append(nodes, NewBlockNode( lastNode, nil, uint32(ii), @@ -1486,7 +1486,7 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { } nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewPoWBlockNode( + nodes = append(nodes, NewBlockNode( lastNode, nil, uint32(ii), @@ -1541,7 +1541,7 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { } nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewPoWBlockNode( + nodes = append(nodes, NewBlockNode( lastNode, nil, uint32(ii), @@ -1578,7 +1578,7 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { lastNode := nodes[ii-1] nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) require.NoErrorf(err, "Block index: %d", ii) - nodes = append(nodes, NewPoWBlockNode( + nodes = append(nodes, NewBlockNode( lastNode, nil, uint32(ii), @@ -1673,7 +1673,7 @@ func TestBadBlockSignature(t *testing.T) { // A bad signature with the right public key should fail. finalBlock1.BlockProducerInfo.PublicKey = senderPkBytes - _, _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorInvalidBlockProducerSIgnature) @@ -1682,20 +1682,20 @@ func TestBadBlockSignature(t *testing.T) { require.NoError(err) finalBlock1.BlockProducerInfo.PublicKey = blockSignerPkBytes finalBlock1.BlockProducerInfo.Signature = nil - _, _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorMissingBlockProducerSignature) // If all the BlockProducerInfo is missing, things should fail finalBlock1.BlockProducerInfo = nil - _, _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorMissingBlockProducerSignature) // Now let's add blockSignerPK to the map of trusted keys and confirm that the block processes. chain.trustedBlockProducerPublicKeys[MakePkMapKey(blockSignerPkBytes)] = true finalBlock1.BlockProducerInfo = blockProducerInfoCopy - _, _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, err = chain.ProcessBlock(finalBlock1, true) require.NoError(err) _, _ = finalBlock1, db diff --git a/lib/constants.go b/lib/constants.go index bfd364e3a..308b08e27 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -975,7 +975,7 @@ var DeSoMainnetParams = DeSoParams{ // value should equal the amount of work it takes to get from whatever start node you // choose and the tip. This is done by running once, letting it fail, and then rerunning // with the value it outputs. - BitcoinStartBlockNode: NewPoWBlockNode( + BitcoinStartBlockNode: NewBlockNode( nil, mustDecodeHexBlockHashBitcoin("000000000000000000092d577cc673bede24b6d7199ee69c67eeb46c18fc978c"), // Note the height is always one greater than the parent node. @@ -1240,7 +1240,7 @@ var DeSoTestnetParams = DeSoParams{ DeSoNanosPurchasedAtGenesis: uint64(6000000000000000), // See comment in mainnet config. - BitcoinStartBlockNode: NewPoWBlockNode( + BitcoinStartBlockNode: NewBlockNode( nil, mustDecodeHexBlockHashBitcoin("000000000000003aae8fb976056413aa1d863eb5bee381ff16c9642283b1da1a"), 1897056, diff --git a/lib/db_utils.go b/lib/db_utils.go index 2e35e6fd3..eaedb4b20 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -4670,44 +4670,44 @@ func DeleteUtxoOperationsForBlockWithTxn(txn *badger.Txn, snap *Snapshot, blockH return DBDeleteWithTxn(txn, snap, _DbKeyForUtxoOps(blockHash)) } -func blockNodeProofOfStakeCutoverMigrationTriggered(height uint32) bool { - return height >= GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight -} - func SerializeBlockNode(blockNode *BlockNode) ([]byte, error) { data := []byte{} + // Hash if blockNode.Hash == nil { return nil, fmt.Errorf("SerializeBlockNode: Hash cannot be nil") } data = append(data, blockNode.Hash[:]...) + + // Height data = append(data, UintToBuf(uint64(blockNode.Height))...) - if !blockNodeProofOfStakeCutoverMigrationTriggered(blockNode.Height) { - // DifficultyTarget - if blockNode.DifficultyTarget == nil { - return nil, fmt.Errorf("SerializeBlockNode: DifficultyTarget cannot be nil") - } - data = append(data, blockNode.DifficultyTarget[:]...) - // CumWork - data = append(data, BigintToHash(blockNode.CumWork)[:]...) + // DifficultyTarget + if blockNode.DifficultyTarget == nil { + return nil, fmt.Errorf("SerializeBlockNode: DifficultyTarget cannot be nil") } + data = append(data, blockNode.DifficultyTarget[:]...) + + // CumWork + data = append(data, BigintToHash(blockNode.CumWork)[:]...) + + // Header serializedHeader, err := blockNode.Header.ToBytes(false) if err != nil { - return nil, fmt.Errorf("serializePoSBlockNode: Problem serializing header: %v", err) + return nil, errors.Wrapf(err, "SerializeBlockNode: Problem serializing header") } data = append(data, IntToBuf(int64(len(serializedHeader)))...) data = append(data, serializedHeader...) + // Status + // It's assumed this field is one byte long. data = append(data, UintToBuf(uint64(blockNode.Status))...) - if blockNodeProofOfStakeCutoverMigrationTriggered(blockNode.Height) { - data = append(data, UintToBuf(uint64(blockNode.CommittedStatus))...) - } + return data, nil } func DeserializeBlockNode(data []byte) (*BlockNode, error) { - blockNode := NewPoWBlockNode( + blockNode := NewBlockNode( nil, // Parent &BlockHash{}, // Hash 0, // Height @@ -4719,6 +4719,7 @@ func DeserializeBlockNode(data []byte) (*BlockNode, error) { ) rr := bytes.NewReader(data) + // Hash _, err := io.ReadFull(rr, blockNode.Hash[:]) if err != nil { @@ -4732,21 +4733,19 @@ func DeserializeBlockNode(data []byte) (*BlockNode, error) { } blockNode.Height = uint32(height) - if !blockNodeProofOfStakeCutoverMigrationTriggered(blockNode.Height) { - // DifficultyTarget - _, err = io.ReadFull(rr, blockNode.DifficultyTarget[:]) - if err != nil { - return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding DifficultyTarget") - } + // DifficultyTarget + _, err = io.ReadFull(rr, blockNode.DifficultyTarget[:]) + if err != nil { + return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding DifficultyTarget") + } - // CumWork - tmp := BlockHash{} - _, err = io.ReadFull(rr, tmp[:]) - if err != nil { - return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding CumWork") - } - blockNode.CumWork = HashToBigint(&tmp) + // CumWork + tmp := BlockHash{} + _, err = io.ReadFull(rr, tmp[:]) + if err != nil { + return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding CumWork") } + blockNode.CumWork = HashToBigint(&tmp) // Header payloadLen, err := ReadVarint(rr) @@ -4774,14 +4773,6 @@ func DeserializeBlockNode(data []byte) (*BlockNode, error) { } blockNode.Status = BlockStatus(uint32(status)) - // CommittedStatus - if blockNodeProofOfStakeCutoverMigrationTriggered(blockNode.Height) { - committedStatus, err := ReadUvarint(rr) - if err != nil { - return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding CommittedStatus") - } - blockNode.CommittedStatus = CommittedBlockStatus(committedStatus) - } return blockNode, nil } @@ -5043,7 +5034,8 @@ func GetHeightHashToNodeInfoWithTxn(txn *badger.Txn, snap *Snapshot, return nil } - blockNode, err := DeserializeBlockNode(nodeBytes) + var blockNode *BlockNode + blockNode, err = DeserializeBlockNode(nodeBytes) if err != nil { return nil } @@ -5124,7 +5116,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, genesisBlock := params.GenesisBlock diffTarget := MustDecodeHexBlockHash(params.MinDifficultyTargetHex) blockHash := MustDecodeHexBlockHash(params.GenesisBlockHashHex) - genesisNode := NewPoWBlockNode( + genesisNode := NewBlockNode( nil, // Parent blockHash, 0, // Height diff --git a/lib/load_test.go b/lib/load_test.go index a8afc8718..af413511c 100644 --- a/lib/load_test.go +++ b/lib/load_test.go @@ -190,7 +190,7 @@ func TestComputeMaxTPS(t *testing.T) { _, _ = newParams, newDB timeStart := time.Now() for _, blockToConnect := range blocksMined { - _, _, _, err := newChain.ProcessBlock(blockToConnect, true /*verifySignatures*/) + _, _, err := newChain.ProcessBlock(blockToConnect, true /*verifySignatures*/) require.NoError(err) } elapsedSecs := (time.Since(timeStart)).Seconds() @@ -237,7 +237,7 @@ func TestConnectBlocksLoadTest(t *testing.T) { pprof.StartCPUProfile(ff) timeStart := time.Now() for _, blockToConnect := range blocksMined { - _, _, _, err := newChain.ProcessBlock(blockToConnect, false /*verifySignatures*/) + _, _, err := newChain.ProcessBlock(blockToConnect, false /*verifySignatures*/) require.NoError(err) } elapsedSecs := (time.Since(timeStart)).Seconds() diff --git a/lib/miner.go b/lib/miner.go index 6a2453486..26fcf436b 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -239,7 +239,7 @@ func (desoMiner *DeSoMiner) MineAndProcessSingleBlock(threadIndex uint32, mempoo // will be informed about it. This will cause it to be relayed appropriately. verifySignatures := true // TODO(miner): Replace with a call to SubmitBlock. - isMainChain, isOrphan, _, err := desoMiner.BlockProducer.chain.ProcessBlock( + isMainChain, isOrphan, err := desoMiner.BlockProducer.chain.ProcessBlock( blockToMine, verifySignatures) glog.V(2).Infof("Called ProcessBlock: isMainChain=(%v), isOrphan=(%v), err=(%v)", isMainChain, isOrphan, err) diff --git a/lib/network.go b/lib/network.go index 6c0ed4f09..c77f3ea0e 100644 --- a/lib/network.go +++ b/lib/network.go @@ -20,8 +20,6 @@ import ( "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/deso-protocol/core/collections/bitset" - "github.com/deso-protocol/core/consensus" - "github.com/golang/glog" "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" @@ -1974,31 +1972,6 @@ type MsgDeSoHeader struct { ProposerVotePartialSignature *bls.Signature } -func (msg *MsgDeSoHeader) GetBlockHash() consensus.BlockHash { - hash, err := msg.Hash() - if err != nil { - glog.Errorf("MsgDeSoHeader.GetBlockHash: Problem hashing header: %v", err) - // TODO: Should we return nil? - return &BlockHash{} - } - return hash -} - -func (msg *MsgDeSoHeader) GetHeight() uint64 { - return msg.Height -} - -func (msg *MsgDeSoHeader) GetView() uint64 { - return msg.ProposedInView -} - -func (msg *MsgDeSoHeader) GetQC() consensus.QuorumCertificate { - if msg.ValidatorsTimeoutAggregateQC.isEmpty() { - return msg.ValidatorsVoteQC - } - return msg.ValidatorsTimeoutAggregateQC.ValidatorsHighQC -} - func HeaderSizeBytes() int { header := NewMessage(MsgTypeHeader) headerBytes, _ := header.ToBytes(false) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go deleted file mode 100644 index 5c96e2967..000000000 --- a/lib/pos_blockchain.go +++ /dev/null @@ -1,451 +0,0 @@ -package lib - -import ( - "github.com/deso-protocol/core/collections" - "github.com/golang/glog" - "github.com/pkg/errors" - "time" -) - -// processBlockPoS runs the Fast-Hotstuff block connect and commit rule as follows: -// 1. Determine if we're missing a parent block of this block and any of its parents from the block index. -// If so, return the hash of the missing block and add this block to the orphans list. -// 2. Validate on an incoming block, its header, its block height, the leader, and its QCs (vote or timeout) -// 3. Store the block in the block index and uncommitted blocks map. -// 4. Resolves forks within the last two blocks -// 5. Connect the block to the blockchain's tip -// 6. Run the commit rule - If applicable, flushes the incoming block's grandparent to the DB -// 7. Prune in-memory struct holding uncommitted block. -// 8. Update the currentView to this new block's view + 1 -func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, currentView uint64, verifySignatures bool) (_success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { - // TODO: Implement me - // 1. Determine if we're missing a parent block of this block and any of its parents from the block index. - // If so, process the orphan, but don't add to the block index or uncommitted block map. - missingBlockHash, err := bc.validateAncestorsExist(desoBlock) - if err != nil { - return false, false, nil, err - } - if missingBlockHash != nil { - missingBlockHashes := []*BlockHash{missingBlockHash} - var blockHash *BlockHash - blockHash, err = desoBlock.Header.Hash() - // If we fail to get the block hash, this block isn't valid at all, so we - // don't need to worry about adding it to the orphan list or block index. - if err != nil { - return false, true, missingBlockHashes, err - } - // ProcessOrphanBlock validates the block and adds it to the orphan list. - // TODO: update _validateOrphanBlock to perform additional validation required. - if err = bc.ProcessOrphanBlock(desoBlock, blockHash); err != nil { - return false, true, missingBlockHashes, err - } - return false, true, missingBlockHashes, nil - } - - // 2. Start with all sanity checks of the block. - // TODO: Check if err is for view > latest committed block view and <= latest uncommitted block. - // If so, we need to perform the rest of the validations and then add to our block index. - if err = bc.validateDeSoBlockPoS(desoBlock); err != nil { - - } - // TODO: Get validator set for current block height. Alternatively, we could do this in - // validateQC, but we may need the validator set elsewhere in this function anyway. - var validatorSet []*ValidatorEntry - // 1e. Validate QC - if err = bc.validateQC(desoBlock, validatorSet); err != nil { - return false, false, nil, err - } - - // @sofonias @piotr - should we move this to - // If the block doesn’t contain a ValidatorsTimeoutAggregateQC, then that indicates that we - // did NOT timeout in the previous view, which means we should just check that - // the QC corresponds to the previous view. - if desoBlock.Header.ValidatorsTimeoutAggregateQC.isEmpty() { - // The block is safe to vote on if it is a direct child of the previous - // block. This means that the parent and child blocks have consecutive - // views. We use the current block’s QC to find the view of the parent. - // TODO: Any processing related to the block's vote QC. - } else { - // If we have a ValidatorsTimeoutAggregateQC set on the block, it means the nodes decided - // to skip a view by sending TimeoutMessages to the leader, so we process - // the block accordingly. - // 1f. If timeout QC, validate that block hash isn't too far back from the latest. - if err := bc.validateTimeoutQC(desoBlock, validatorSet); err != nil { - return false, false, nil, err - } - // TODO: Get highest timeout QC from the block. - // We find the QC with the highest view among the QCs contained in the - // AggregateQC. - var highestTimeoutQC *QuorumCertificate - // TODO: Check if our local highestQC has a smaller view than the highestTimeoutQC. - // If our local highestQC has a smaller view than the highestTimeoutQC, - // we update our local highestQC. - _ = highestTimeoutQC - } - - // 2. We can now add this block to the block index since we have performed - // all basic validations. We can also add it to the uncommittedBlocksMap - if err := bc.addBlockToBlockIndex(desoBlock); err != nil { - return false, false, nil, err - } - - // 4. Handle reorgs if necessary - if bc.shouldReorg(desoBlock) { - if err = bc.handleReorg(desoBlock); err != nil { - return false, false, nil, err - } - } - - // Happy path - // 5. Add block to best chain. - if err = bc.addBlockToBestChain(desoBlock); err != nil { - return false, false, nil, err - } - - // 6. Commit grandparent if possible. - if err = bc.commitGrandparents(desoBlock); err != nil { - return false, false, nil, err - } - - // 7. Update in-memory struct holding uncommitted blocks. - if err = bc.pruneUncommittedBlocks(desoBlock); err != nil { - // We glog and continue here as failing to prune the uncommitted blocks map is not a - // critical error. - glog.Errorf("processBlockPoS: Error pruning uncommitted blocks: %v", err) - } - - // 8. Update current view to block's view + 1 - bc.updateCurrentView(desoBlock) - - return true, false, nil, nil -} - -// validateDeSoBlockPoS performs all basic validations on a block as it relates to -// the Blockchain struct. -func (bc *Blockchain) validateDeSoBlockPoS(desoBlock *MsgDeSoBlock) error { - // Surface Level validation of the block - if err := bc.validateBlockIntegrity(desoBlock); err != nil { - return err - } - // Validate Block Height - if err := bc.validateBlockHeight(desoBlock); err != nil { - return err - } - // Validate View - if err := bc.validateBlockView(desoBlock); err != nil { - // Check if err is for view > latest committed block view and <= latest uncommitted block. - // If so, we need to perform the rest of the validations and then add to our block index. - // TODO: implement check on error described above. Caller will handle this. - return err - } - // Validate Leader - if err := bc.validateBlockLeader(desoBlock); err != nil { - return err - } - return nil -} - -// validateBlockIntegrity validates the block at a surface level. It checks -// that the timestamp is valid, that the version of the header is valid, -// and other general integrity checks (such as not malformed). -func (bc *Blockchain) validateBlockIntegrity(desoBlock *MsgDeSoBlock) error { - // First make sure we have a non-nil header - if desoBlock.Header == nil { - return RuleErrorNilBlockHeader - } - - // Make sure we have a prevBlockHash - if desoBlock.Header.PrevBlockHash == nil { - return RuleErrorNilPrevBlockHash - } - - // Timestamp validation - - // Validate that the timestamp is not less than its parent. - parentBlock, exists := bc.blockIndex[*desoBlock.Header.PrevBlockHash] - if !exists { - // Note: this should never happen as we only call this function after - // we've validated that all ancestors exist in the block index. - return RuleErrorMissingParentBlock - } - if desoBlock.Header.TstampNanoSecs < parentBlock.Header.TstampNanoSecs { - return RuleErrorPoSBlockTstampNanoSecsTooOld - } - // TODO: Add support for putting the drift into global params. - if desoBlock.Header.TstampNanoSecs > uint64(time.Now().UnixNano())+bc.params.DefaultBlockTimestampDriftNanoSecs { - return RuleErrorPoSBlockTstampNanoSecsInFuture - } - - // Header validation - if desoBlock.Header.Version != HeaderVersion2 { - return RuleErrorInvalidPoSBlockHeaderVersion - } - - // Malformed block checks - // Require header to have either vote or timeout QC - isTimeoutQCEmpty := desoBlock.Header.ValidatorsTimeoutAggregateQC.isEmpty() - isVoteQCEmpty := desoBlock.Header.ValidatorsVoteQC.isEmpty() - if isTimeoutQCEmpty && isVoteQCEmpty { - return RuleErrorNoTimeoutOrVoteQC - } - - if !isTimeoutQCEmpty && !isVoteQCEmpty { - return RuleErrorBothTimeoutAndVoteQC - } - - if !isTimeoutQCEmpty && len(desoBlock.Txns) != 0 { - return RuleErrorTimeoutQCWithTransactions - } - - if desoBlock.Header.ProposerVotingPublicKey.IsEmpty() { - return RuleErrorInvalidProposerVotingPublicKey - } - - if desoBlock.Header.ProposerPublicKey == nil || desoBlock.Header.ProposerPublicKey.IsZeroPublicKey() { - return RuleErrorInvalidProposerPublicKey - } - - if desoBlock.Header.ProposerRandomSeedHash.isEmpty() { - return RuleErrorInvalidRandomSeedHash - } - - merkleRoot := desoBlock.Header.TransactionMerkleRoot - - // We only want to check the merkle root if we have more than 0 transactions. - if len(desoBlock.Txns) > 0 { - if merkleRoot == nil { - return RuleErrorNilMerkleRoot - } - computedMerkleRoot, _, err := ComputeMerkleRoot(desoBlock.Txns) - if err != nil { - return errors.Wrapf(err, "validateBlockIntegrity: Problem computing merkle root") - } - if !merkleRoot.IsEqual(computedMerkleRoot) { - return RuleErrorInvalidMerkleRoot - } - } else { - if merkleRoot != nil { - return RuleErrorNoTxnsWithMerkleRoot - } - } - - // TODO: What other checks do we need to do here? - return nil -} - -// validateBlockHeight validates the block height for a given block. First, -// it checks that we've passed the PoS cutover fork height. Then it checks -// that this block height is exactly one greater than its parent's block height. -func (bc *Blockchain) validateBlockHeight(desoBlock *MsgDeSoBlock) error { - blockHeight := desoBlock.Header.Height - if blockHeight < uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { - return RuleErrorPoSBlockBeforeCutoverHeight - } - // Validate that the block height is exactly one greater than its parent. - parentBlock, exists := bc.blockIndex[*desoBlock.Header.PrevBlockHash] - if !exists { - // Note: this should never happen as we only call this function after - // we've validated that all ancestors exist in the block index. - return RuleErrorMissingParentBlock - } - if desoBlock.Header.Height != parentBlock.Header.Height+1 { - return RuleErrorInvalidPoSBlockHeight - } - return nil -} - -// validateBlockView validates the view for a given block. First, it checks that -// the view is greater than the latest committed block view. If not, -// we return an error indicating that we'll never accept this block. Next, -// it checks that the view is less than or equal to its parent. -// If not, we return an error indicating that we'll want to add this block as an -// orphan. Then it will check if that the view is exactly one greater than the -// latest uncommitted block if we have an regular vote QC. If this block has a -// timeout QC, it will check that the view is at least greater than the latest -// uncommitted block's view + 1. -func (bc *Blockchain) validateBlockView(desoBlock *MsgDeSoBlock) error { - // Validate that the view is greater than the latest uncommitted block. - parentBlock, exists := bc.blockIndex[*desoBlock.Header.PrevBlockHash] - if !exists { - // Note: this should never happen as we only call this function after - // we've validated that all ancestors exist in the block index. - return RuleErrorMissingParentBlock - } - // If our current block has a vote QC, then we need to validate that the - // view is exactly one greater than the latest uncommitted block. - if desoBlock.Header.ValidatorsTimeoutAggregateQC.isEmpty() { - if desoBlock.Header.ProposedInView != parentBlock.Header.ProposedInView+1 { - return RuleErrorPoSVoteBlockViewNotOneGreaterThanParent - } - } else { - // If our current block has a timeout QC, then we need to validate that the - // view is strictly greater than the latest uncommitted block's view. - if desoBlock.Header.ProposedInView <= parentBlock.Header.ProposedInView { - return RuleErrorPoSTimeoutBlockViewNotGreaterThanParent - } - } - return nil -} - -// validateBlockLeader validates that the proposer is the expected proposer for the -// block height + view number pair. -func (bc *Blockchain) validateBlockLeader(desoBlock *MsgDeSoBlock) error { - // TODO: Implement me - return errors.New("IMPLEMENT ME") -} - -// validateQC validates that the QC of this block is valid, meaning a super majority -// of the validator set has voted (or timed out). Assumes ValidatorEntry list is sorted. -func (bc *Blockchain) validateQC(desoBlock *MsgDeSoBlock, validatorSet []*ValidatorEntry) error { - // TODO: Implement me - return errors.New("IMPLEMENT ME") -} - -// validateTimeoutQC validates that the parent block hash is not too far back from the latest. -// Specifically, it checks that the parent block hash is at least the latest committed block. -func (bc *Blockchain) validateTimeoutQC(desoBlock *MsgDeSoBlock, validatorSet []*ValidatorEntry) error { - // TODO: Implement me - return errors.New("IMPLEMENT ME") -} - -// validateAncestorsExist checks that all ancestors of this block exist in the block index. -// If an ancestor is not found, we'll return the block hash of the missing ancestor so the -// caller can request this block. -func (bc *Blockchain) validateAncestorsExist(desoBlock *MsgDeSoBlock) (_missingBlockHash *BlockHash, _err error) { - // Notes: starting from the block passed in, we'll look for the parent in the block index. - // 1. If the parent does not appear in the block index, we'll return the parent's hash. - // 2. If the parent exists in the block index AND is in the best chain, we can safely assume - // that all ancestors exist in the block index. - // 3. If the parent exists in the block index but is not in the best chain, we repeat from - // step 1 with the parent as the block passed in. - // TODO: Implement me - return nil, errors.New("IMPLEMENT ME") -} - -// addBlockToBlockIndex adds the block to the block index and uncommitted blocks map. -func (bc *Blockchain) addBlockToBlockIndex(desoBlock *MsgDeSoBlock) error { - hash, err := desoBlock.Hash() - if err != nil { - return errors.Wrapf(err, "addBlockToBlockIndex: Problem hashing block %v", desoBlock) - } - // Need to get parent block node from block index - prevBlock := bc.blockIndex[*desoBlock.Header.PrevBlockHash] - // TODO: What should the block status be here? Validated? What combo is correct? Need to check in with Diamondhands. - bc.blockIndex[*hash] = NewPoSBlockNode(prevBlock, hash, uint32(desoBlock.Header.Height), desoBlock.Header, StatusHeaderValidated|StatusBlockValidated, UNCOMMITTED) - - bc.uncommittedBlocksMap[*hash] = desoBlock - return nil -} - -// shouldReorg determines if we should reorg to the block provided. We should reorg if -// this block has a higher QC than our current tip and extends from either the committed -// tip OR any uncommitted safe block in our block index. -func (bc *Blockchain) shouldReorg(desoBlock *MsgDeSoBlock) bool { - return false -} - -// handleReorg handles a reorg to the block provided. It does not check whether or not we should -// perform a reorg, so this should be called after shouldReorg. It will do the following: -// 1. Update the bestChain and bestChainMap by removing blocks that are not uncommitted ancestor of this block. -// 2. Update the bestChain and bestChainMap by adding blocks that are uncommitted ancestors of this block. -// Note: addBlockToBestChain will be called after this to handle adding THIS block to the best chain. -func (bc *Blockchain) handleReorg(desoBlock *MsgDeSoBlock) error { - // TODO: Implement me. - return errors.New("IMPLEMENT ME") -} - -// addBlockToBestChain adds the block to the best chain. -func (bc *Blockchain) addBlockToBestChain(desoBlock *MsgDeSoBlock) error { - // TODO: Implement me. - return errors.New("IMPLEMENT ME") -} - -// pruneUncommittedBlocks prunes the in-memory struct holding uncommitted blocks. -func (bc *Blockchain) pruneUncommittedBlocks(desoBlock *MsgDeSoBlock) error { - // TODO: Implement me. - return errors.New("IMPLEMENT ME") -} - -// commitGrandparents commits the grandparent of the block if possible. -// Specifically, this updates the CommittedBlockStatus of its grandparent -// and flushes the view after connecting the grandparent block to the DB. -func (bc *Blockchain) commitGrandparents(desoBlock *MsgDeSoBlock) error { - // TODO: Implement me. - return errors.New("IMPLEMENT ME") -} - -// updateCurrentView updates the current view to the block's view + 1. -func (bc *Blockchain) updateCurrentView(desoBlock *MsgDeSoBlock) { - // TODO: Implement me. - panic(errors.New("IMPLEMENT ME")) -} - -func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { - // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks - highestCommittedBlock, committedBlockIndex := bc.getHighestCommittedBlock() - if highestCommittedBlock == nil { - // This is an edge case we'll never hit in practice since all the PoW blocks - // are committed. - return nil, errors.New("GetUncommittedTipView: No committed blocks found") - } - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) - if err != nil { - return nil, errors.Wrapf(err, "GetUncommittedTipView: Problem initializing UtxoView") - } - if committedBlockIndex == len(bc.bestChain)-1 { - return utxoView, nil - } - for ii := committedBlockIndex + 1; ii < len(bc.bestChain); ii++ { - // We need to get these blocks from the uncommitted blocks map - fullBlock, exists := bc.uncommittedBlocksMap[*bc.bestChain[ii].Hash] - if !exists { - return nil, errors.Errorf("GetUncommittedTipView: Block %v not found in block index", bc.bestChain[ii].Hash) - } - txnHashes := collections.Transform(fullBlock.Txns, func(txn *MsgDeSoTxn) *BlockHash { - return txn.Hash() - }) - _, err = utxoView.ConnectBlock(fullBlock, txnHashes, false, nil, fullBlock.Header.Height) - if err != nil { - hash, _ := fullBlock.Hash() - return nil, errors.Wrapf(err, "GetUncommittedTipView: Problem connecting block hash %v", hash.String()) - } - } - return utxoView, nil -} - -func (bc *Blockchain) GetBestChainTip() *BlockNode { - return bc.bestChain[len(bc.bestChain)-1] -} - -func (bc *Blockchain) getHighestCommittedBlock() (*BlockNode, int) { - for ii := len(bc.bestChain) - 1; ii >= 0; ii-- { - if bc.bestChain[ii].CommittedStatus == COMMITTED { - return bc.bestChain[ii], ii - } - } - return nil, 0 -} - -const ( - RuleErrorNilBlockHeader RuleError = "RuleErrorNilBlockHeader" - RuleErrorNilPrevBlockHash RuleError = "RuleErrorNilPrevBlockHash" - RuleErrorPoSBlockTstampNanoSecsTooOld RuleError = "RuleErrorPoSBlockTstampNanoSecsTooOld" - RuleErrorPoSBlockTstampNanoSecsInFuture RuleError = "RuleErrorPoSBlockTstampNanoSecsInFuture" - RuleErrorInvalidPoSBlockHeaderVersion RuleError = "RuleErrorInvalidPoSBlockHeaderVersion" - RuleErrorNoTimeoutOrVoteQC RuleError = "RuleErrorNoTimeoutOrVoteQC" - RuleErrorBothTimeoutAndVoteQC RuleError = "RuleErrorBothTimeoutAndVoteQC" - RuleErrorTimeoutQCWithTransactions RuleError = "RuleErrorTimeoutQCWithTransactions" - RuleErrorMissingParentBlock RuleError = "RuleErrorMissingParentBlock" - RuleErrorNilMerkleRoot RuleError = "RuleErrorNilMerkleRoot" - RuleErrorInvalidMerkleRoot RuleError = "RuleErrorInvalidMerkleRoot" - RuleErrorNoTxnsWithMerkleRoot RuleError = "RuleErrorNoTxnsWithMerkleRoot" - RuleErrorInvalidProposerVotingPublicKey RuleError = "RuleErrorInvalidProposerVotingPublicKey" - RuleErrorInvalidProposerPublicKey RuleError = "RuleErrorInvalidProposerPublicKey" - RuleErrorInvalidRandomSeedHash RuleError = "RuleErrorInvalidRandomSeedHash" - - RuleErrorInvalidPoSBlockHeight RuleError = "RuleErrorInvalidPoSBlockHeight" - RuleErrorPoSBlockBeforeCutoverHeight RuleError = "RuleErrorPoSBlockBeforeCutoverHeight" - - RuleErrorPoSVoteBlockViewNotOneGreaterThanParent RuleError = "RuleErrorPoSVoteBlockViewNotOneGreaterThanParent" - RuleErrorPoSTimeoutBlockViewNotGreaterThanParent RuleError = "RuleErrorPoSTimeoutBlockViewNotGreaterThanParent" -) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go deleted file mode 100644 index 0d9d8ee14..000000000 --- a/lib/pos_blockchain_test.go +++ /dev/null @@ -1,441 +0,0 @@ -//go:build relic - -package lib - -import ( - "bytes" - "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/collections/bitset" - "github.com/stretchr/testify/require" - "testing" - "time" -) - -func TestValidateBlockIntegrity(t *testing.T) { - bc, params, _ := NewTestBlockchain(t) - // TODO: update for PoS - mempool, miner := NewTestMiner(t, bc, params, true) - - // Mine a few blocks to give the senderPkString some money. - var err error - for ii := 0; ii < 10; ii++ { - _, err = miner.MineAndProcessSingleBlock(0, mempool) - require.NoError(t, err) - } - // Create a block with a valid header. - randomPayload := RandomBytes(256) - randomSeedHashBytes := RandomBytes(32) - randomSeedHash := &RandomSeedHash{} - _, err = randomSeedHash.FromBytes(randomSeedHashBytes) - require.NoError(t, err) - randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) - signature, err := randomBLSPrivateKey.Sign(randomPayload) - require.NoError(t, err) - block := &MsgDeSoBlock{ - Header: &MsgDeSoHeader{ - Version: 2, - TstampNanoSecs: bc.GetBestChainTip().Header.TstampNanoSecs + 10, - Height: 2, - ProposedInView: 2, - PrevBlockHash: bc.GetBestChainTip().Hash, - ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ - TimedOutView: 2, - ValidatorsHighQC: &QuorumCertificate{ - BlockHash: bc.GetBestChainTip().Hash, - ProposedInView: 1, - ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - Signature: signature, - SignersList: bitset.NewBitset(), - }, - }, - ValidatorsTimeoutHighQCViews: []uint64{28934}, - ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ - Signature: signature, - SignersList: bitset.NewBitset(), - }, - }, - ProposerRandomSeedHash: randomSeedHash, - ProposerPublicKey: NewPublicKey(RandomBytes(33)), - ProposerVotingPublicKey: randomBLSPrivateKey.PublicKey(), - }, - Txns: nil, - } - - // Validate the block with a valid timeout QC and header. - err = bc.validateBlockIntegrity(block) - // There should be no error. - require.Nil(t, err) - - // Timeout QC shouldn't have any transactions - block.Txns = []*MsgDeSoTxn{ - { // The validation just checks the length of transactions. - // Connecting the block elsewhere will ensure that the transactions themselves are valid. - TxInputs: nil, - }, - } - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorTimeoutQCWithTransactions) - - // Timeout QC shouldn't have a merkle root - block.Txns = nil - block.Header.TransactionMerkleRoot = &ZeroBlockHash - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorNoTxnsWithMerkleRoot) - - // Make sure block can't have both timeout and vote QC. - validatorVoteQC := &QuorumCertificate{ - BlockHash: bc.GetBestChainTip().Hash, - ProposedInView: 1, - ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - Signature: signature, - SignersList: bitset.NewBitset(), - }, - } - block.Header.ValidatorsVoteQC = validatorVoteQC - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorBothTimeoutAndVoteQC) - - // Make sure block has either timeout or vote QC. - block.Header.ValidatorsTimeoutAggregateQC = nil - block.Header.ValidatorsVoteQC = nil - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorNoTimeoutOrVoteQC) - - // Reset validator vote QC. - block.Header.ValidatorsVoteQC = validatorVoteQC - - // Validate the block with a valid vote QC and header. Vote QCs must have at least 1 transaction. - txn := _assembleBasicTransferTxnFullySigned(t, bc, 100, 1000, - senderPkString, recipientPkString, senderPrivString, nil) - block.Txns = []*MsgDeSoTxn{ - // The validation just checks the length of transactions. - // Connecting the block elsewhere will ensure that the transactions themselves are valid. - txn, - } - merkleRoot, _, err := ComputeMerkleRoot(block.Txns) - require.NoError(t, err) - block.Header.TransactionMerkleRoot = merkleRoot - // There should be no error. - err = bc.validateBlockIntegrity(block) - require.Nil(t, err) - - // Block must have non-nil Merkle root iff we have non-zero transactions - block.Header.TransactionMerkleRoot = nil - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorNilMerkleRoot) - - // Block must have a matching merkle root - block.Header.TransactionMerkleRoot = &ZeroBlockHash - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorInvalidMerkleRoot) - - // Vote QC with no transactions and no merkle root is valid - block.Header.TransactionMerkleRoot = nil - block.Txns = nil - err = bc.validateBlockIntegrity(block) - require.Nil(t, err) - - // Vote QC with no transactions but includes a merkle is invalid - block.Header.TransactionMerkleRoot = merkleRoot - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorNoTxnsWithMerkleRoot) - - // Reset transactions - block.Txns = []*MsgDeSoTxn{txn} - - // Block must have valid proposer voting public key - block.Header.ProposerVotingPublicKey = nil - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorInvalidProposerVotingPublicKey) - - block.Header.ProposerVotingPublicKey = &bls.PublicKey{} - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorInvalidProposerVotingPublicKey) - - // Reset proposer voting public key - block.Header.ProposerVotingPublicKey = randomBLSPrivateKey.PublicKey() - - // Block must have valid proposer public key - block.Header.ProposerPublicKey = nil - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorInvalidProposerPublicKey) - - block.Header.ProposerPublicKey = &ZeroPublicKey - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorInvalidProposerPublicKey) - - block.Header.ProposerPublicKey = NewPublicKey(RandomBytes(33)) - - // Block must have valid proposer random seed hash - block.Header.ProposerRandomSeedHash = nil - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorInvalidRandomSeedHash) - - block.Header.ProposerRandomSeedHash = &RandomSeedHash{} - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorInvalidRandomSeedHash) - - block.Header.ProposerRandomSeedHash = randomSeedHash - - // Timestamp validations - // Block timestamp must be greater than the previous block timestamp - block.Header.TstampNanoSecs = bc.GetBestChainTip().Header.GetTstampSecs() - 1 - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsTooOld) - - // Block timestamps can't be in the future. - block.Header.TstampNanoSecs = uint64(time.Now().UnixNano() + (11 * time.Minute).Nanoseconds()) - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsInFuture) - - // Revert the Header's timestamp - block.Header.TstampNanoSecs = bc.GetBestChainTip().Header.TstampNanoSecs + 10 - - // Block Header version must be 2 - block.Header.Version = 1 - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorInvalidPoSBlockHeaderVersion) - - // Revert block header version - block.Header.Version = 2 - - // Nil prev block hash not allowed - block.Header.PrevBlockHash = nil - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorNilPrevBlockHash) - - // Parent must exist in the block index. - block.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorMissingParentBlock) - - // Nil block header not allowed - block.Header = nil - err = bc.validateBlockIntegrity(block) - require.Equal(t, err, RuleErrorNilBlockHeader) -} - -func TestValidateBlockHeight(t *testing.T) { - bc, _, _ := NewTestBlockchain(t) - hash := NewBlockHash(RandomBytes(32)) - nowTimestamp := uint64(time.Now().UnixNano()) - genesisBlock := NewPoSBlockNode(nil, hash, 1, &MsgDeSoHeader{ - Version: 2, - TstampNanoSecs: nowTimestamp - uint64(time.Minute.Nanoseconds()), - Height: 1, - ProposedInView: 1, - ValidatorsVoteQC: nil, - ValidatorsTimeoutAggregateQC: nil, - }, StatusBlockValidated, UNCOMMITTED) - bc.bestChain = []*BlockNode{genesisBlock} - bc.blockIndex[*genesisBlock.Hash] = genesisBlock - // Create a block with a valid header. - randomPayload := RandomBytes(256) - randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) - signature, err := randomBLSPrivateKey.Sign(randomPayload) - require.NoError(t, err) - block := &MsgDeSoBlock{ - Header: &MsgDeSoHeader{ - PrevBlockHash: genesisBlock.Hash, - Version: 2, - TstampNanoSecs: uint64(time.Now().UnixNano()) - 10, - Height: 2, - ProposedInView: 1, - ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ - TimedOutView: 2, - ValidatorsHighQC: &QuorumCertificate{ - BlockHash: bc.GetBestChainTip().Hash, - ProposedInView: bc.GetBestChainTip().Header.ProposedInView, - ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - Signature: signature, - SignersList: bitset.NewBitset(), - }, - }, - ValidatorsTimeoutHighQCViews: []uint64{28934}, - ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ - Signature: signature, - SignersList: bitset.NewBitset(), - }, - }, - }, - Txns: nil, - } - - // validate that we've cutover to PoS - bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 3 - err = bc.validateBlockHeight(block) - require.Equal(t, err, RuleErrorPoSBlockBeforeCutoverHeight) - - // Update the fork height - bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 0 - - err = bc.validateBlockHeight(block) - require.Nil(t, err) - - block.Header.Height = 1 - err = bc.validateBlockHeight(block) - require.Equal(t, err, RuleErrorInvalidPoSBlockHeight) - - block.Header.Height = 2 - bc.blockIndex = map[BlockHash]*BlockNode{} - err = bc.validateBlockHeight(block) - require.Equal(t, err, RuleErrorMissingParentBlock) -} - -func TestAddBlockToBlockIndex(t *testing.T) { - bc, _, _ := NewTestBlockchain(t) - hash := NewBlockHash(RandomBytes(32)) - genesisBlockNode := NewPoSBlockNode(nil, hash, 1, &MsgDeSoHeader{ - Version: 2, - Height: 1, - ProposedInView: 1, - ValidatorsVoteQC: nil, - ValidatorsTimeoutAggregateQC: nil, - }, StatusBlockValidated, COMMITTED) - _ = genesisBlockNode - derefedHash := *hash - bc.blockIndex = map[BlockHash]*BlockNode{ - derefedHash: genesisBlockNode, - } - proposerVotingPublicKey := _generateRandomBLSPrivateKey(t) - dummySig, err := proposerVotingPublicKey.Sign(RandomBytes(32)) - require.NoError(t, err) - block := &MsgDeSoBlock{ - Header: &MsgDeSoHeader{ - Version: 2, - PrevBlockHash: hash, - TstampNanoSecs: uint64(time.Now().UnixNano()), - Height: 2, - ProposerPublicKey: NewPublicKey(RandomBytes(33)), - ProposerVotingPublicKey: proposerVotingPublicKey.PublicKey(), - ProposerRandomSeedHash: &RandomSeedHash{}, - ProposedInView: 1, - ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ - TimedOutView: 2, - ValidatorsHighQC: &QuorumCertificate{ - BlockHash: NewBlockHash(RandomBytes(32)), - ProposedInView: 1, - ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - SignersList: bitset.NewBitset(), - Signature: dummySig, - }, - }, - ValidatorsTimeoutHighQCViews: []uint64{28934}, - ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ - SignersList: bitset.NewBitset(), - Signature: dummySig, - }, - }, - ProposerVotePartialSignature: dummySig, - }, - Txns: nil, - } - err = bc.addBlockToBlockIndex(block) - require.Nil(t, err) - newHash, err := block.Hash() - require.NoError(t, err) - // Check the block index - blockNode, exists := bc.blockIndex[*newHash] - require.True(t, exists) - require.True(t, bytes.Equal(blockNode.Hash[:], newHash[:])) - - // Check the uncommitted blocks map - uncommittedBlock, uncommittedExists := bc.uncommittedBlocksMap[*newHash] - require.True(t, uncommittedExists) - uncommittedBytes, err := uncommittedBlock.ToBytes(false) - require.NoError(t, err) - origBlockBytes, err := block.ToBytes(false) - require.NoError(t, err) - require.True(t, bytes.Equal(uncommittedBytes, origBlockBytes)) -} - -func TestValidateBlockView(t *testing.T) { - bc, _, _ := NewTestBlockchain(t) - hash1 := NewBlockHash(RandomBytes(32)) - hash2 := NewBlockHash(RandomBytes(32)) - genesisNode := NewPoSBlockNode(nil, hash1, 1, &MsgDeSoHeader{ - Version: 2, - Height: 1, - ProposedInView: 1, - }, StatusBlockValidated, COMMITTED) - block2 := NewPoSBlockNode(genesisNode, hash2, 2, &MsgDeSoHeader{ - Version: 2, - Height: 2, - ProposedInView: 2, - ValidatorsVoteQC: nil, - ValidatorsTimeoutAggregateQC: nil, - }, StatusBlockValidated, UNCOMMITTED) - bc.bestChain = []*BlockNode{ - genesisNode, - block2, - } - bc.blockIndex = map[BlockHash]*BlockNode{ - *hash1: genesisNode, - *hash2: block2, - } - randomPayload := RandomBytes(256) - randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) - signature, err := randomBLSPrivateKey.Sign(randomPayload) - voteQC := &QuorumCertificate{ - BlockHash: bc.GetBestChainTip().Hash, - ProposedInView: 1, - ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - Signature: signature, - SignersList: bitset.NewBitset(), - }, - } - require.NoError(t, err) - block := &MsgDeSoBlock{ - Header: &MsgDeSoHeader{ - PrevBlockHash: hash2, - Version: 2, - TstampNanoSecs: uint64(time.Now().UnixNano()) - 10, - Height: 2, - ProposedInView: 1, - ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ - TimedOutView: 2, - ValidatorsHighQC: voteQC, - ValidatorsTimeoutHighQCViews: []uint64{28934}, - ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ - Signature: signature, - SignersList: bitset.NewBitset(), - }, - }, - }, - Txns: nil, - } - - block.Header.ProposedInView = 2 - - // Blocks with timeout QCs must have a view strictly greater than the parent. - err = bc.validateBlockView(block) - require.Equal(t, err, RuleErrorPoSTimeoutBlockViewNotGreaterThanParent) - - // Any arbitrary number GREATER than the parent's view is valid. - block.Header.ProposedInView = 10 - err = bc.validateBlockView(block) - require.Nil(t, err) - - // Now we set the timeout QC to nil and provide a vote QC, with height = 2 - block.Header.ValidatorsTimeoutAggregateQC = nil - block.Header.ValidatorsVoteQC = voteQC - block.Header.ProposedInView = 2 - err = bc.validateBlockView(block) - require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanParent) - - // An arbitrary number greater than its parents should fail. - block.Header.ProposedInView = 10 - err = bc.validateBlockView(block) - require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanParent) - - // Exactly one great w/ vote QC should pass. - block.Header.ProposedInView = 3 - err = bc.validateBlockView(block) - require.Nil(t, err) -} - -func _generateRandomBLSPrivateKey(t *testing.T) *bls.PrivateKey { - privateKey, err := bls.NewPrivateKey() - require.NoError(t, err) - return privateKey -} diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index de7034abd..2a7f8439f 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -13,10 +13,8 @@ import ( // type EpochEntry struct { - EpochNumber uint64 - InitialBlockHeight uint64 - InitialView uint64 - FinalBlockHeight uint64 + EpochNumber uint64 + FinalBlockHeight uint64 // This captures the on-chain timestamp when this epoch entry was created. This does not // represent the timestamp for first block of the epoch, but rather when this epoch entry @@ -27,8 +25,6 @@ type EpochEntry struct { func (epochEntry *EpochEntry) Copy() *EpochEntry { return &EpochEntry{ EpochNumber: epochEntry.EpochNumber, - InitialBlockHeight: epochEntry.InitialBlockHeight, - InitialView: epochEntry.InitialView, FinalBlockHeight: epochEntry.FinalBlockHeight, CreatedAtBlockTimestampNanoSecs: epochEntry.CreatedAtBlockTimestampNanoSecs, } @@ -37,8 +33,6 @@ func (epochEntry *EpochEntry) Copy() *EpochEntry { func (epochEntry *EpochEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte data = append(data, UintToBuf(epochEntry.EpochNumber)...) - data = append(data, UintToBuf(epochEntry.InitialBlockHeight)...) - data = append(data, UintToBuf(epochEntry.InitialView)...) data = append(data, UintToBuf(epochEntry.FinalBlockHeight)...) data = append(data, UintToBuf(epochEntry.CreatedAtBlockTimestampNanoSecs)...) return data @@ -53,18 +47,6 @@ func (epochEntry *EpochEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *b return errors.Wrapf(err, "EpochEntry.Decode: Problem reading EpochNumber: ") } - // InitialBlockHeight - epochEntry.InitialBlockHeight, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "EpochEntry.Decode: Problem reading InitialBlockHeight: ") - } - - // InitialView - epochEntry.InitialView, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "EpochEntry.Decode: Problem reading InitialView: ") - } - // FinalBlockHeight epochEntry.FinalBlockHeight, err = ReadUvarint(rr) if err != nil { @@ -117,8 +99,6 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { // case prior to the first execution of the OnEpochCompleteHook. genesisEpochEntry := &EpochEntry{ EpochNumber: 0, - InitialBlockHeight: 0, - InitialView: 0, FinalBlockHeight: uint64(bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight), CreatedAtBlockTimestampNanoSecs: 0, } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 9a3a3f5dd..9ecb90636 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -47,10 +47,9 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // - Snapshot the current top N stake entries, who will receive staking rewards. // // Step 3: Roll over to the next epoch. -// - Compute the start block height and view number for the next epoch. // - Compute the final block height for the next epoch. // - Update CurrentEpochEntry to the next epoch's. -func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, view uint64, blockTimestampNanoSecs uint64) error { +func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, blockTimestampNanoSecs uint64) error { // Sanity-check that the current block is the last block in the current epoch. // // Note that this will also return true if we're currently at the ProofOfStake1StateSetupBlockHeight @@ -85,7 +84,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, view uint64, block // TODO: Evict old snapshots when safe to do so. // Step 3: Roll Over to The Next Epoch - if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, view, blockTimestampNanoSecs); err != nil { + if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, blockTimestampNanoSecs); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: ") } @@ -139,7 +138,7 @@ func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) erro } // Updates the currentEpochEntry to the next epoch's. -func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, view uint64, blockTimestampNanoSecs uint64) error { +func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, blockTimestampNanoSecs uint64) error { // Retrieve the SnapshotGlobalParamsEntry to determine the next epoch's final block height. We use the // snapshot global params here because the next epoch begin immediately, and its length is used in the PoS // consensus. The validator set for the next epoch needs to be in agreement on the length of the epoch @@ -158,8 +157,6 @@ func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeig // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ EpochNumber: epochNumber + 1, - InitialBlockHeight: blockHeight + 1, - InitialView: view + 1, FinalBlockHeight: nextEpochFinalBlockHeight, CreatedAtBlockTimestampNanoSecs: blockTimestampNanoSecs, } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 16fe99fa2..269c25486 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -84,11 +84,6 @@ func TestRunEpochCompleteHook(t *testing.T) { blockHeight += 1 return blockHeight } - viewNumber := uint64(0) - incrViewNumber := func() uint64 { - viewNumber += 1 - return viewNumber - } // Seed a CurrentEpochEntry. tmpUtxoView := _newUtxoView(testMeta) @@ -145,7 +140,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook() with no validators or stakers. - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { // Test the state of the snapshots after running our first OnEpochCompleteHook @@ -156,13 +151,6 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(1)) - // Test CurrentEpochEntry - currentEpoch, err := _newUtxoView(testMeta).GetCurrentEpochEntry() - require.NoError(t, err) - require.NotNil(t, currentEpoch) - require.Equal(t, currentEpoch.InitialBlockHeight, uint64(13)) - require.Equal(t, currentEpoch.InitialView, uint64(2)) - // Test SnapshotGlobalParamsEntry is nil. snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) @@ -194,7 +182,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { // Test CurrentEpochNumber. @@ -202,13 +190,6 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(2)) - // Test CurrentEpochEntry - currentEpoch, err := _newUtxoView(testMeta).GetCurrentEpochEntry() - require.NoError(t, err) - require.NotNil(t, currentEpoch) - require.Equal(t, currentEpoch.InitialBlockHeight, uint64(14)) - require.Equal(t, currentEpoch.InitialView, uint64(3)) - // Test SnapshotGlobalParamsEntry is populated. snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) @@ -222,7 +203,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { // Test CurrentEpochNumber. @@ -230,13 +211,6 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(3)) - // Test CurrentEpochEntry - currentEpoch, err := _newUtxoView(testMeta).GetCurrentEpochEntry() - require.NoError(t, err) - require.NotNil(t, currentEpoch) - require.Equal(t, currentEpoch.InitialBlockHeight, uint64(15)) - require.Equal(t, currentEpoch.InitialView, uint64(4)) - // Test SnapshotGlobalParamsEntry is populated. snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() require.NoError(t, err) @@ -301,7 +275,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // Snapshot m5 still has 600 staked. validatorEntry, err = _newUtxoView(testMeta).GetSnapshotValidatorSetEntryByPKID(m5PKID) @@ -317,7 +291,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(600)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // Snapshot m5 now has 800 staked. validatorEntry, err = _newUtxoView(testMeta).GetSnapshotValidatorSetEntryByPKID(m5PKID) @@ -351,7 +325,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, _newUtxoView(testMeta).GlobalParamsEntry.StakeLockupEpochDuration, uint64(2)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // Snapshot StakeLockupEpochDuration is still 3. snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() @@ -359,7 +333,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(3)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // Snapshot StakeLockupEpochDuration is updated to 2. snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() @@ -378,7 +352,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // m0 is still in the snapshot validator set. snapshotValidatorSet, err = _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) @@ -390,7 +364,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Len(t, snapshotStakeEntries, 7) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // m0 is dropped from the snapshot validator set. snapshotValidatorSet, err = _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) @@ -403,11 +377,11 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { // Run OnEpochCompleteHook() - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { // Test jailing inactive validators. @@ -466,7 +440,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getNumSnapshotStakes(), 6) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // In epoch 12, all current registered validators have Status = Jailed. // In snapshot 10, all snapshot validators have Status = Active. @@ -480,7 +454,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentValidator(m6PKID).JailedAtEpochNumber, uint64(11)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // In epoch 13, all current registered validators have Status = Jailed. // In snapshot 11, the validator set is empty because all validators have Status = Jailed. @@ -491,7 +465,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Empty(t, getNumSnapshotStakes()) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) // In epoch 14, all current registered validators have Status = Jailed. // In snapshot 12, the validator set is empty because all validators have Status = Jailed. @@ -528,11 +502,6 @@ func TestStakingRewardDistribution(t *testing.T) { blockHeight += 1 return blockHeight } - viewNumber := uint64(0) - incrViewNumber := func() uint64 { - viewNumber += 1 - return viewNumber - } // Seed a CurrentEpochEntry. tmpUtxoView := _newUtxoView(testMeta) @@ -589,12 +558,12 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { @@ -642,7 +611,7 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { @@ -754,7 +723,7 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight()) } { @@ -867,12 +836,12 @@ func _newUtxoView(testMeta *TestMeta) *UtxoView { return newUtxoView } -func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64, viewNumber uint64) { +func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64) { tmpUtxoView := _newUtxoView(testMeta) // Set blockTimestampNanoSecs to 1 year * block height. Every time the block height increments, // the timestamp increases by 1 year blockTimestampNanoSecs := blockHeight * 365 * 24 * 3600 * 1e9 - require.NoError(testMeta.t, tmpUtxoView.RunEpochCompleteHook(blockHeight, viewNumber, blockTimestampNanoSecs)) + require.NoError(testMeta.t, tmpUtxoView.RunEpochCompleteHook(blockHeight, blockTimestampNanoSecs)) require.NoError(testMeta.t, tmpUtxoView.FlushToDb(blockHeight)) } diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index 0c9f85d18..7ea66a52a 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -35,8 +35,6 @@ func TestCurrentEpoch(t *testing.T) { // Set the CurrentEpoch. epochEntry = &EpochEntry{ EpochNumber: 1, - InitialBlockHeight: blockHeight + 1, - InitialView: 1, FinalBlockHeight: blockHeight + 5, CreatedAtBlockTimestampNanoSecs: blockTimestampNanoSecs + 5*1e9, } @@ -48,8 +46,6 @@ func TestCurrentEpoch(t *testing.T) { require.NoError(t, err) require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) - require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) - require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) @@ -61,8 +57,6 @@ func TestCurrentEpoch(t *testing.T) { require.NoError(t, err) require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) - require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) - require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) @@ -70,8 +64,6 @@ func TestCurrentEpoch(t *testing.T) { epochEntry = utxoView.CurrentEpochEntry require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) - require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) - require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) diff --git a/lib/pos_network.go b/lib/pos_network.go index 0b7003ba5..75efccf6d 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -5,7 +5,6 @@ import ( "fmt" "io" - "github.com/deso-protocol/core/consensus" "golang.org/x/crypto/sha3" "github.com/deso-protocol/core/bls" @@ -297,18 +296,6 @@ type QuorumCertificate struct { ValidatorsVoteAggregatedSignature *AggregatedBLSSignature } -func (qc *QuorumCertificate) GetBlockHash() consensus.BlockHash { - return qc.BlockHash -} - -func (qc *QuorumCertificate) GetView() uint64 { - return qc.ProposedInView -} - -func (qc *QuorumCertificate) GetAggregatedSignature() consensus.AggregatedSignature { - return qc.ValidatorsVoteAggregatedSignature -} - // Performs a deep equality check between two QuorumCertificates, and returns true // if the two are fully initialized and have identical values. In all other cases, // it return false. @@ -330,14 +317,6 @@ func (qc *QuorumCertificate) Eq(other *QuorumCertificate) bool { return bytes.Equal(qcEncodedBytes, otherEncodedBytes) } -func (qc *QuorumCertificate) isEmpty() bool { - return qc == nil || - qc.BlockHash == nil || - qc.ProposedInView == 0 || - qc.ValidatorsVoteAggregatedSignature == nil || - qc.ValidatorsVoteAggregatedSignature.Signature == nil -} - func (qc *QuorumCertificate) ToBytes() ([]byte, error) { retBytes := []byte{} @@ -429,14 +408,6 @@ type AggregatedBLSSignature struct { Signature *bls.Signature } -func (sig *AggregatedBLSSignature) GetSignersList() *bitset.Bitset { - return sig.SignersList -} - -func (sig *AggregatedBLSSignature) GetSignature() *bls.Signature { - return sig.Signature -} - // Performs a deep equality check between two AggregatedBLSSignatures, and returns true // if the two are fully initialized and have identical values. In all other cases, // it return false. @@ -528,22 +499,6 @@ type TimeoutAggregateQuorumCertificate struct { ValidatorsTimeoutAggregatedSignature *AggregatedBLSSignature } -func (aggQC *TimeoutAggregateQuorumCertificate) GetView() uint64 { - return aggQC.TimedOutView -} - -func (aggQC *TimeoutAggregateQuorumCertificate) GetHighQC() *QuorumCertificate { - return aggQC.ValidatorsHighQC -} - -func (aggQC *TimeoutAggregateQuorumCertificate) GetHighQCViews() []uint64 { - return aggQC.ValidatorsTimeoutHighQCViews -} - -func (aggQC *TimeoutAggregateQuorumCertificate) GetAggregatedSignature() consensus.AggregatedSignature { - return aggQC.ValidatorsTimeoutAggregatedSignature -} - // Performs a deep equality check between two TimeoutAggregateQuorumCertificates, and // returns true if the two are fully initialized and have identical values. In all other // cases, it return false. @@ -630,8 +585,6 @@ func (aggQC *TimeoutAggregateQuorumCertificate) FromBytes(rr io.Reader) error { func (aggQC *TimeoutAggregateQuorumCertificate) isEmpty() bool { return aggQC == nil || aggQC.TimedOutView == 0 || - aggQC.ValidatorsHighQC.isEmpty() || - len(aggQC.ValidatorsTimeoutHighQCViews) == 0 || aggQC.ValidatorsTimeoutAggregatedSignature == nil || aggQC.ValidatorsTimeoutAggregatedSignature.Signature == nil } diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index 1b8eb03fc..567025199 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -55,10 +55,6 @@ func DecodeRandomSeedHash(rr io.Reader) (*RandomSeedHash, error) { return (&RandomSeedHash{}).FromBytes(randomSeedHashBytes) } -func (randomSeedHash *RandomSeedHash) isEmpty() bool { - return randomSeedHash == nil || randomSeedHash.Eq(&RandomSeedHash{}) -} - // // UTXO VIEW UTILS // diff --git a/lib/postgres.go b/lib/postgres.go index 772641007..1b38f820f 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -4083,7 +4083,7 @@ func (postgres *Postgres) InitGenesisBlock(params *DeSoParams, db *badger.DB) er genesisBlock := params.GenesisBlock diffTarget := MustDecodeHexBlockHash(params.MinDifficultyTargetHex) blockHash := MustDecodeHexBlockHash(params.GenesisBlockHashHex) - genesisNode := NewPoWBlockNode( + genesisNode := NewBlockNode( nil, blockHash, 0, diff --git a/lib/server.go b/lib/server.go index d455e08d0..57be274f4 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1870,7 +1870,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { glog.V(1).Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Processing block %v WITHOUT "+ "signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp))) - _, isOrphan, _, err = srv.blockchain.ProcessBlock(blk, false) + _, isOrphan, err = srv.blockchain.ProcessBlock(blk, false) } else { // TODO: Signature checking slows things down because it acquires the ChainLock. @@ -1879,7 +1879,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { glog.V(1).Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Processing block %v WITH "+ "signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp))) - _, isOrphan, _, err = srv.blockchain.ProcessBlock(blk, true) + _, isOrphan, err = srv.blockchain.ProcessBlock(blk, true) } // If we hit an error then abort mission entirely. We should generally never diff --git a/lib/txindex.go b/lib/txindex.go index 0189d5a45..3db429a83 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -438,7 +438,7 @@ func (txi *TXIndex) Update() error { // Now that we have added all the txns to our TxIndex db, attach the block // to update our chain. - _, _, _, err = txi.TXIndexChain.ProcessBlock(blockMsg, false /*verifySignatures*/) + _, _, err = txi.TXIndexChain.ProcessBlock(blockMsg, false /*verifySignatures*/) if err != nil { return fmt.Errorf("Update: Problem attaching block %v: %v", blockToAttach, err) diff --git a/lib/types.go b/lib/types.go index 9182883a7..3e531f2ca 100644 --- a/lib/types.go +++ b/lib/types.go @@ -200,10 +200,6 @@ func NewBlockHash(input []byte) *BlockHash { return blockHash } -func (bh *BlockHash) GetValue() [HashSizeBytes]byte { - return *bh -} - func (bh *BlockHash) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { return EncodeByteArray(bh[:]) } From fba1bd0e7f88c854c7a6bb7acc362a70af1d582b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 31 Oct 2023 13:18:24 -0400 Subject: [PATCH 265/762] Address DH's feedback on FastHotStuffEventLoop structure (#767) * Address DH's feedback on FastHotStuffEventLoop structure * Fix bit shifting in timeout exponential backoff * Address Nina's feedback --- collections/slice.go | 7 ++ collections/slice_test.go | 57 +++++++++ consensus/event_loop.go | 127 ++++++++++---------- consensus/event_loop_test.go | 180 ++++++++++++++-------------- consensus/integration_test_types.go | 16 +-- consensus/types.go | 30 +++-- consensus/types_internal.go | 2 +- consensus/utils.go | 24 +++- consensus/utils_test.go | 4 +- 9 files changed, 264 insertions(+), 183 deletions(-) create mode 100644 collections/slice_test.go diff --git a/collections/slice.go b/collections/slice.go index 4bd6cc133..95c552b80 100644 --- a/collections/slice.go +++ b/collections/slice.go @@ -1,5 +1,12 @@ package collections +func All[T any](slice []T, predicate func(T) bool) bool { + negatedPredicate := func(val T) bool { + return !predicate(val) + } + return !Any(slice, negatedPredicate) +} + func Any[T any](slice []T, predicate func(T) bool) bool { for _, val := range slice { if predicate(val) { diff --git a/collections/slice_test.go b/collections/slice_test.go new file mode 100644 index 000000000..7932133e2 --- /dev/null +++ b/collections/slice_test.go @@ -0,0 +1,57 @@ +package collections + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSliceAll(t *testing.T) { + // Predicate: all values > 0 + predicate := func(val int) bool { + return val > 0 + } + + // Test sad path where no values are > 0 + { + slice := []int{-1, -2, -3, -4, -5} + require.False(t, All(slice, predicate)) + } + + // Test sad path where some values are > 0 + { + slice := []int{-1, 2, 3, 4, 5} + require.False(t, All(slice, predicate)) + } + + // Test happy path where all values are > 0 + { + slice := []int{1, 2, 3, 4, 5} + require.True(t, All(slice, predicate)) + } +} + +func TestSliceAny(t *testing.T) { + // Predicate: all values > 0 + predicate := func(val int) bool { + return val > 0 + } + + // Test sad path where no values are > 0 + { + slice := []int{-1, -2, -3, -4, -5} + require.False(t, Any(slice, predicate)) + } + + // Test happy path where some values are > 0 + { + slice := []int{-1, 2, 3, 4, 5} + require.True(t, Any(slice, predicate)) + } + + // Test happy path where all values are > 0 + { + slice := []int{1, 2, 3, 4, 5} + require.True(t, Any(slice, predicate)) + } +} diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 4b70b10d0..9c94e4cd5 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -3,6 +3,7 @@ package consensus import ( "time" + "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" @@ -20,11 +21,11 @@ func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { } // Initializes the consensus event loop with the latest known valid block in the blockchain, and -// the validator set for the next block height. The functions expects the following for the input +// the validator list for the next block height. The functions expects the following for the input // params: // - crankTimerInterval: crank timer interval duration must be > 0 // - timeoutBaseDuration: timeout base duration must be > 0 -// - tip: the current tip of the blockchain, with the validator set at that block height. This may +// - tip: the current tip of the blockchain, with the validator list at that block height. This may // be a committed or uncommitted block. // - safeBlocks: an unordered slice of blocks including the committed tip, the uncommitted tip, // all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks @@ -36,8 +37,8 @@ func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { func (fc *FastHotStuffEventLoop) Init( crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, - tip BlockWithValidators, - safeBlocks []BlockWithValidators, + tip BlockWithValidatorList, + safeBlocks []BlockWithValidatorList, ) error { // Grab the event loop's lock fc.lock.Lock() @@ -56,7 +57,7 @@ func (fc *FastHotStuffEventLoop) Init( return errors.New("FastHotStuffEventLoop.Init: Timeout base duration must be > 0") } - // Validate the safe blocks and validator sets, and store them + // Validate the safe blocks and validator lists, and store them if err := fc.storeBlocks(tip, safeBlocks); err != nil { return errors.Wrap(err, "FastHotStuffEventLoop.Init: ") } @@ -69,8 +70,8 @@ func (fc *FastHotStuffEventLoop) Init( fc.hasConstructedQCInCurrentView = false // Reset all internal data structures for votes and timeouts - fc.votesSeen = make(map[[32]byte]map[string]VoteMessage) - fc.timeoutsSeen = make(map[uint64]map[string]TimeoutMessage) + fc.votesSeenByBlockHash = make(map[BlockHashValue]map[string]VoteMessage) + fc.timeoutsSeenByView = make(map[uint64]map[string]TimeoutMessage) // Reset the external channel used for signaling fc.Events = make(chan *FastHotStuffEvent, signalChannelBufferSize) @@ -92,7 +93,7 @@ func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { fc.lock.Lock() defer fc.lock.Unlock() - // Ensure the event loop is running. This guarantees that the chain tip and validator set + // Ensure the event loop is running. This guarantees that the chain tip and validator list // have already been set. if fc.status != eventLoopStatusRunning { return 0, errors.New("FastHotStuffEventLoop.AdvanceViewOnTimeout: Event loop is not running") @@ -120,12 +121,12 @@ func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { // timer. // // Expected params: -// - tip: the current uncommitted tip of the blockchain, with the validator set at that block height +// - tip: the current uncommitted tip of the blockchain, with the validator list at that block height // - safeBlocks: an unordered slice of blocks including the committed tip, the uncommitted tip, // all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks // that are safe to extend from. This function does not validate the collection of blocks. It // expects the server to know and decide what blocks are safe to extend from. -func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBlocks []BlockWithValidators) error { +func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() @@ -135,7 +136,7 @@ func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBl return errors.New("FastHotStuffEventLoop.ProcessTipBlock: Event loop is not running") } - // Validate the safe blocks and validator sets, and store them + // Validate the safe blocks and validator lists, and store them if err := fc.storeBlocks(tip, safeBlocks); err != nil { return errors.Wrap(err, "FastHotStuffEventLoop.ProcessTipBlock: ") } @@ -165,41 +166,35 @@ func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidators, safeBl return nil } -// setSafeBlocks is a helper function that validates the provided blocks, validator sets, and stores them. +// storeBlocks is a helper function that validates the provided blocks, validator lists, and stores them. // It must be called while holding the event loop's lock. -func (fc *FastHotStuffEventLoop) storeBlocks(tip BlockWithValidators, safeBlocks []BlockWithValidators) error { - // Do a basic integrity check on the tip block and validator set - if !isProperlyFormedBlock(tip.Block) || !isProperlyFormedValidatorSet(tip.Validators) { - return errors.New("Invalid tip block or validator set") +func (fc *FastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { + // Do a basic integrity check on the tip block and validator list + if !isProperlyFormedBlockWithValidatorList(tip) { + return errors.New("Invalid tip block or validator list") } - // Do a basic integrity check on the blocks and validator sets - hasMalformedInput := collections.Any(safeBlocks, func(block BlockWithValidators) bool { - return !isProperlyFormedBlock(block.Block) || !isProperlyFormedValidatorSet(block.Validators) - }) + // Do a basic integrity check on the safe blocks and validator lists + hasProperlyFormedSafeBlocksAndValidatorLists := collections.All(safeBlocks, isProperlyFormedBlockWithValidatorList) // There must be at least one block - if len(safeBlocks) == 0 || hasMalformedInput { - return errors.New("Invalid safe blocks or validator sets") + if len(safeBlocks) == 0 || !hasProperlyFormedSafeBlocksAndValidatorLists { + return errors.New("Invalid safe blocks or validator lists") } - // Store the tip block and validator set + // Store the tip block and validator list fc.tip = blockWithValidatorLookup{ - block: tip.Block, - validatorSet: tip.Validators, - validatorLookup: collections.ToMap(tip.Validators, func(validator Validator) string { - return validator.GetPublicKey().ToString() - }), + block: tip.Block, + validatorList: tip.ValidatorList, + validatorLookup: collections.ToMap(tip.ValidatorList, validatorToPublicKeyString), } - // Store the blocks and validator sets - fc.safeBlocks = collections.Transform(safeBlocks, func(block BlockWithValidators) blockWithValidatorLookup { + // Store the blocks and validator lists + fc.safeBlocks = collections.Transform(safeBlocks, func(block BlockWithValidatorList) blockWithValidatorLookup { return blockWithValidatorLookup{ - block: block.Block, - validatorSet: block.Validators, - validatorLookup: collections.ToMap(block.Validators, func(validator Validator) string { - return validator.GetPublicKey().ToString() - }), + block: block.Block, + validatorList: block.ValidatorList, + validatorLookup: collections.ToMap(block.ValidatorList, validatorToPublicKeyString), } }) @@ -222,7 +217,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { fc.lock.Lock() defer fc.lock.Unlock() - // Ensure the event loop is running. This guarantees that the chain tip and validator set + // Ensure the event loop is running. This guarantees that the chain tip and validator list // have already been set. if fc.status != eventLoopStatusRunning { return errors.New("FastHotStuffEventLoop.ProcessValidatorVote: Event loop is not running") @@ -307,7 +302,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) fc.lock.Lock() defer fc.lock.Unlock() - // Ensure the event loop is running. This guarantees that the chain tip and validator set + // Ensure the event loop is running. This guarantees that the chain tip and validator list // have already been set. if fc.status != eventLoopStatusRunning { return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Event loop is not running") @@ -446,7 +441,7 @@ func (fc *FastHotStuffEventLoop) resetScheduledTasks() { numTimeouts := fc.currentView - fc.tip.block.GetView() - 1 // Compute the exponential back-off: nextTimeoutDuration * 2^numTimeouts - timeoutDuration = fc.timeoutBaseDuration << numTimeouts + timeoutDuration = fc.timeoutBaseDuration * time.Duration(powerOfTwo(numTimeouts)) } // Schedule the next crank timer task. This will run with currentView param. @@ -512,14 +507,14 @@ func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff return nil } - // Fetch the validator set at the tip. - validatorSet := fc.tip.validatorSet + // Fetch the validator list at the tip. + validatorList := fc.tip.validatorList // Compute the chain tip's signature payload. voteSignaturePayload := GetVoteSignaturePayload(tipBlock.GetView(), tipBlock.GetBlockHash()) // Fetch the validator votes for the tip block. - votesByValidator := fc.votesSeen[voteSignaturePayload] + votesByValidator := fc.votesSeenByBlockHash[voteSignaturePayload] // Compute the total stake and total stake with votes totalStake := uint256.NewInt() @@ -529,9 +524,9 @@ func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff signersList := bitset.NewBitset() signatures := []*bls.Signature{} - // Iterate through the entire validator set and check if each one has voted for the tip block. Track + // Iterate through the entire validator list and check if each one has voted for the tip block. Track // all voters and their stakes. - for ii, validator := range validatorSet { + for ii, validator := range validatorList { totalStake = uint256.NewInt().Add(totalStake, validator.GetStakeAmount()) // Skip the validator if it hasn't voted for the the block @@ -545,7 +540,7 @@ func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff continue } - // Track the vote's signature, stake, and place in the validator set + // Track the vote's signature, stake, and place in the validator list totalVotingStake = uint256.NewInt().Add(totalVotingStake, validator.GetStakeAmount()) signersList.Set(ii, true) signatures = append(signatures, vote.GetSignature()) @@ -560,6 +555,8 @@ func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff // aggregate the signatures. This should never fail. aggregateSignature, err := bls.AggregateSignatures(signatures) if err != nil { + // This should never happen. If it does, then we log an error and return. + glog.Errorf("FastHotStuffEventLoop.tryConstructVoteQCInCurrentView: Failed to aggregate signatures: %v", err) return nil } @@ -593,7 +590,7 @@ func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt // Fetch all timeouts for the previous view. All timeout messages for a view are aggregated and // proposed in the next view. So if we want to propose a timeout QC in the current view, we need // to aggregate timeouts from the previous one. - timeoutsByValidator := fc.timeoutsSeen[fc.currentView-1] + timeoutsByValidator := fc.timeoutsSeenByView[fc.currentView-1] // Tracks the highQC from validators as we go along. var validatorsHighQC QuorumCertificate @@ -626,9 +623,9 @@ func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt return nil } - // Fetch the validator set for the block height of the high QC. This lookup is guaranteed to succeed + // Fetch the validator list for the block height of the high QC. This lookup is guaranteed to succeed // because it succeeded above. - ok, safeBlock, validatorSet, _ := fc.fetchSafeBlockInfo(validatorsHighQC.GetBlockHash()) + ok, safeBlock, validatorList, _ := fc.fetchSafeBlockInfo(validatorsHighQC.GetBlockHash()) if !ok { return nil } @@ -638,18 +635,18 @@ func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt totalTimedOutStake := uint256.NewInt() // Track the high QC view for each validator - highQCViews := make([]uint64, len(validatorSet)) + highQCViews := make([]uint64, len(validatorList)) // Track the signatures and signers list for validators who timed out signersList := bitset.NewBitset() signatures := []*bls.Signature{} - // Iterate through the entire validator set and check if each one has timed out for the previous - // view. Track all validators who timed out and their stakes. We iterate through the validator set + // Iterate through the entire validator list and check if each one has timed out for the previous + // view. Track all validators who timed out and their stakes. We iterate through the validator list // here rather than the timeoutsByValidator map because we want to preserve the order of the validator - // for the signersList bitset. In practice, the validator set is expected to be <= 1000 in size, so + // for the signersList bitset. In practice, the validator list is expected to be <= 1000 in size, so // this loop will be fast. - for ii, validator := range validatorSet { + for ii, validator := range validatorList { totalStake = uint256.NewInt().Add(totalStake, validator.GetStakeAmount()) // Skip the validator if it hasn't timed out for the previous view @@ -739,7 +736,7 @@ func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin // view to determine what is stale. The consensus mechanism will never construct a block with a view // that's lower than its current view. Consider the following: // - In the event the event update the chain tip, we will vote for that block and the view it was proposed in -// - In the event we locally time out a view locally, we will send a timeout message for that view +// - In the event we locally time out a view, we will send a timeout message for that view // // In both cases, we will never roll back the chain tip, or decrement the current view to construct a // conflicting block at that lower view that we have previously voted or timed out on. So we are safe to evict @@ -755,32 +752,32 @@ func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin // currentView > timeout.GetView() + 1. func (fc *FastHotStuffEventLoop) evictStaleVotesAndTimeouts() { // Evict stale vote messages - for blockHash, voters := range fc.votesSeen { + for blockHash, voters := range fc.votesSeenByBlockHash { for _, vote := range voters { if isStaleView(fc.currentView, vote.GetView()) { // Each block is proposed at a known view, and has an immutable block hash. Votes are signed on the // tuple (blockhash, view). So, if any vote message for the blockhash has a view that satisfies this // condition, then it's guaranteed that all votes for the same block hash have satisfy this condition. // We can safely evict all votes for this block hash. - delete(fc.votesSeen, blockHash) + delete(fc.votesSeenByBlockHash, blockHash) break } } } // Evict stale timeout messages - for view := range fc.timeoutsSeen { + for view := range fc.timeoutsSeenByView { if isStaleView(fc.currentView, view) { - delete(fc.timeoutsSeen, view) + delete(fc.timeoutsSeenByView, view) } } } func (fc *FastHotStuffEventLoop) storeVote(signaturePayload [32]byte, vote VoteMessage) { - votesForBlockHash, ok := fc.votesSeen[signaturePayload] + votesForBlockHash, ok := fc.votesSeenByBlockHash[signaturePayload] if !ok { votesForBlockHash = make(map[string]VoteMessage) - fc.votesSeen[signaturePayload] = votesForBlockHash + fc.votesSeenByBlockHash[signaturePayload] = votesForBlockHash } votesForBlockHash[vote.GetPublicKey().ToString()] = vote @@ -798,7 +795,7 @@ func (fc *FastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view publicKeyString := publicKey.ToString() // Search for the public key's votes across all existing block hashes - for _, votesForBlock := range fc.votesSeen { + for _, votesForBlock := range fc.votesSeenByBlockHash { vote, ok := votesForBlock[publicKeyString] if ok && vote.GetView() == view { return true @@ -809,17 +806,17 @@ func (fc *FastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view } func (fc *FastHotStuffEventLoop) storeTimeout(timeout TimeoutMessage) { - timeoutsForView, ok := fc.timeoutsSeen[timeout.GetView()] + timeoutsForView, ok := fc.timeoutsSeenByView[timeout.GetView()] if !ok { timeoutsForView = make(map[string]TimeoutMessage) - fc.timeoutsSeen[timeout.GetView()] = timeoutsForView + fc.timeoutsSeenByView[timeout.GetView()] = timeoutsForView } timeoutsForView[timeout.GetPublicKey().ToString()] = timeout } func (fc *FastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, view uint64) bool { - timeoutsForView, ok := fc.timeoutsSeen[view] + timeoutsForView, ok := fc.timeoutsSeenByView[view] if !ok { return false } @@ -833,7 +830,7 @@ func (fc *FastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, vi func (fc *FastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( _isSafeBlock bool, _safeBlock Block, - _validatorSet []Validator, + _validatorList []Validator, _validatorLookup map[string]Validator, ) { // A linear search here is fine. The safeBlocks slice is expected to be extremely small as it represents the @@ -842,7 +839,7 @@ func (fc *FastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( // timeout -> block -> timeout -> block,... it can still be expected to have < 10 blocks. for _, block := range fc.safeBlocks { if isEqualBlockHashes(block.block.GetBlockHash(), blockHash) { - return true, block.block, block.validatorSet, block.validatorLookup + return true, block.block, block.validatorList, block.validatorLookup } } diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 2dac72b73..d933e394b 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -25,8 +25,8 @@ func TestInit(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(0, 1, - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } @@ -35,8 +35,8 @@ func TestInit(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(1, 0, - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } @@ -45,18 +45,18 @@ func TestInit(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(1, 1, - BlockWithValidators{nil, createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{nil, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } - // Test Init() function with malformed validator set for tip block + // Test Init() function with malformed validator list for tip block { fc := NewFastHotStuffEventLoop() err := fc.Init(1, 1, - BlockWithValidators{createDummyBlock(2), nil}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), nil}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } @@ -65,18 +65,18 @@ func TestInit(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(1, 1, - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{nil, createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{nil, createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } - // Test Init() function with malformed validator set for safe block + // Test Init() function with malformed validator list for safe block { fc := NewFastHotStuffEventLoop() err := fc.Init(1, 1, - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), nil}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), nil}}, // safeBlocks ) require.Error(t, err) } @@ -87,8 +87,8 @@ func TestInit(t *testing.T) { fc := NewFastHotStuffEventLoop() err := fc.Init(100, 101, - BlockWithValidators{block, createDummyValidatorSet()}, // tip - []BlockWithValidators{{block, createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{block, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{block, createDummyValidatorList()}}, // safeBlocks ) require.NoError(t, err) @@ -105,14 +105,14 @@ func TestInit(t *testing.T) { require.Equal(t, fc.timeoutBaseDuration, time.Duration(101)) require.Equal(t, fc.currentView, uint64(3)) - require.Equal(t, len(fc.tip.validatorSet), 2) + require.Equal(t, len(fc.tip.validatorList), 2) require.Equal(t, len(fc.tip.validatorLookup), 2) require.Equal(t, len(fc.safeBlocks), 1) require.Equal(t, fc.safeBlocks[0].block.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) require.Equal(t, fc.safeBlocks[0].block.GetView(), uint64(2)) require.Equal(t, fc.safeBlocks[0].block.GetHeight(), uint64(1)) - require.Equal(t, len(fc.safeBlocks[0].validatorSet), 2) + require.Equal(t, len(fc.safeBlocks[0].validatorList), 2) require.Equal(t, len(fc.safeBlocks[0].validatorLookup), 2) } } @@ -122,16 +122,16 @@ func TestProcessTipBlock(t *testing.T) { fc := NewFastHotStuffEventLoop() err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.NoError(t, err) // Test ProcessTipBlock() function when event loop is not running { err := fc.ProcessTipBlock( - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } @@ -142,17 +142,17 @@ func TestProcessTipBlock(t *testing.T) { // Test ProcessTipBlock() function with malformed tip block { err := fc.ProcessTipBlock( - BlockWithValidators{nil, createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{nil, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } - // Test ProcessTipBlock() function with malformed tip validator set + // Test ProcessTipBlock() function with malformed tip validator list { err := fc.ProcessTipBlock( - BlockWithValidators{createDummyBlock(2), nil}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), nil}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } @@ -160,24 +160,24 @@ func TestProcessTipBlock(t *testing.T) { // Test ProcessTipBlock() function with malformed safe block { err := fc.ProcessTipBlock( - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{nil, createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{nil, createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } - // Test ProcessTipBlock() function with malformed safe block's validator set + // Test ProcessTipBlock() function with malformed safe block's validator list { err := fc.ProcessTipBlock( - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), nil}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), nil}}, // safeBlocks ) require.Error(t, err) } - // Populate the votesSeen and timeoutsSeen maps with dummy data + // Populate the votesSeenByBlockHash and timeoutsSeenByView maps with dummy data { - fc.votesSeen = map[[32]byte]map[string]VoteMessage{ + fc.votesSeenByBlockHash = map[[32]byte]map[string]VoteMessage{ {0}: { // blockHash = 0 "pubKeyA": createDummyVoteMessage(0), }, @@ -195,7 +195,7 @@ func TestProcessTipBlock(t *testing.T) { }, } - fc.timeoutsSeen = map[uint64]map[string]TimeoutMessage{ + fc.timeoutsSeenByView = map[uint64]map[string]TimeoutMessage{ 0: { // view = 0 "pubKeyA": createDummyTimeoutMessage(0), }, @@ -214,10 +214,10 @@ func TestProcessTipBlock(t *testing.T) { } } - // Verify the sizes of the votesSeen and timeoutsSeen maps + // Verify the sizes of the votesSeenByBlockHash and timeoutsSeenByView maps { - require.Equal(t, len(fc.votesSeen), 5) - require.Equal(t, len(fc.timeoutsSeen), 5) + require.Equal(t, len(fc.votesSeenByBlockHash), 5) + require.Equal(t, len(fc.timeoutsSeenByView), 5) } // Test ProcessTipBlock() function with valid parameters @@ -227,8 +227,8 @@ func TestProcessTipBlock(t *testing.T) { nextBlock.view = 3 err := fc.ProcessTipBlock( - BlockWithValidators{nextBlock, createDummyValidatorSet()}, // tip - []BlockWithValidators{{nextBlock, createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{nextBlock, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{nextBlock, createDummyValidatorList()}}, // safeBlocks ) require.NoError(t, err) @@ -237,13 +237,13 @@ func TestProcessTipBlock(t *testing.T) { require.Equal(t, uint64(2), fc.tip.block.GetHeight()) require.Equal(t, uint64(4), fc.currentView) - require.Equal(t, 2, len(fc.tip.validatorSet)) + require.Equal(t, 2, len(fc.tip.validatorList)) } // Verify that stale votes and timeouts have been evicted { - require.Equal(t, 2, len(fc.votesSeen)) - require.Equal(t, 2, len(fc.timeoutsSeen)) + require.Equal(t, 2, len(fc.votesSeenByBlockHash)) + require.Equal(t, 2, len(fc.timeoutsSeenByView)) } // Stop the event loop @@ -257,8 +257,8 @@ func TestAdvanceViewOnTimeout(t *testing.T) { // BlockHeight = 1, Current View = 3 err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.NoError(t, err) @@ -271,9 +271,9 @@ func TestAdvanceViewOnTimeout(t *testing.T) { // Start the event loop fc.Start() - // Populate the votesSeen and timeoutsSeen maps with dummy data + // Populate the votesSeenByBlockHash and timeoutsSeenByView maps with dummy data { - fc.votesSeen = map[[32]byte]map[string]VoteMessage{ + fc.votesSeenByBlockHash = map[[32]byte]map[string]VoteMessage{ {1}: { // blockHash = 1 "pubKeyA": createDummyVoteMessage(1), }, @@ -291,7 +291,7 @@ func TestAdvanceViewOnTimeout(t *testing.T) { }, } - fc.timeoutsSeen = map[uint64]map[string]TimeoutMessage{ + fc.timeoutsSeenByView = map[uint64]map[string]TimeoutMessage{ 1: { // view = 1 "pubKeyA": createDummyTimeoutMessage(1), }, @@ -319,8 +319,8 @@ func TestAdvanceViewOnTimeout(t *testing.T) { // Verify that vote and timeout messages haven't changed { - require.Equal(t, len(fc.votesSeen), 3) - require.Equal(t, len(fc.timeoutsSeen), 3) + require.Equal(t, len(fc.votesSeenByBlockHash), 3) + require.Equal(t, len(fc.timeoutsSeenByView), 3) } // Run AdvanceViewOnTimeout() to view 5 @@ -332,8 +332,8 @@ func TestAdvanceViewOnTimeout(t *testing.T) { // Verify that stale votes and timeouts have been evicted { - require.Equal(t, len(fc.votesSeen), 2) - require.Equal(t, len(fc.timeoutsSeen), 2) + require.Equal(t, len(fc.votesSeenByBlockHash), 2) + require.Equal(t, len(fc.timeoutsSeenByView), 2) } // Stop the event loop @@ -347,8 +347,8 @@ func TestProcessValidatorVote(t *testing.T) { // BlockHeight = 1, Current View = 3 err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.NoError(t, err) @@ -389,7 +389,7 @@ func TestProcessValidatorVote(t *testing.T) { // Test when we've already seen a vote from the validator for the same view { vote := createDummyVoteMessage(4) - fc.votesSeen[GetVoteSignaturePayload(vote.GetView(), vote.GetBlockHash())] = map[string]VoteMessage{ + fc.votesSeenByBlockHash[GetVoteSignaturePayload(vote.GetView(), vote.GetBlockHash())] = map[string]VoteMessage{ vote.publicKey.ToString(): vote, } @@ -404,7 +404,7 @@ func TestProcessValidatorVote(t *testing.T) { timeout := createDummyTimeoutMessage(5) timeout.publicKey = vote.publicKey - fc.timeoutsSeen[timeout.GetView()] = map[string]TimeoutMessage{ + fc.timeoutsSeenByView[timeout.GetView()] = map[string]TimeoutMessage{ timeout.publicKey.ToString(): timeout, } @@ -431,8 +431,8 @@ func TestProcessValidatorTimeout(t *testing.T) { // BlockHeight = 1, Current View = 3 err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.NoError(t, err) @@ -475,7 +475,7 @@ func TestProcessValidatorTimeout(t *testing.T) { timeout := createDummyTimeoutMessage(4) vote := createDummyVoteMessage(4) - fc.votesSeen[GetVoteSignaturePayload(vote.GetView(), vote.GetBlockHash())] = map[string]VoteMessage{ + fc.votesSeenByBlockHash[GetVoteSignaturePayload(vote.GetView(), vote.GetBlockHash())] = map[string]VoteMessage{ timeout.publicKey.ToString(): vote, } @@ -488,7 +488,7 @@ func TestProcessValidatorTimeout(t *testing.T) { { timeout := createDummyTimeoutMessage(4) - fc.timeoutsSeen[timeout.view] = map[string]TimeoutMessage{ + fc.timeoutsSeenByView[timeout.view] = map[string]TimeoutMessage{ timeout.publicKey.ToString(): timeout, } @@ -516,8 +516,8 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { fc := NewFastHotStuffEventLoop() err := fc.Init(oneHourInNanoSecs, oneMilliSecondInNanoSeconds, - BlockWithValidators{dummyBlock, createDummyValidatorSet()}, // tip - []BlockWithValidators{{dummyBlock, createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{dummyBlock, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{dummyBlock, createDummyValidatorList()}}, // safeBlocks ) require.NoError(t, err) @@ -559,8 +559,8 @@ func TestResetEventLoopSignal(t *testing.T) { fc := NewFastHotStuffEventLoop() err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.NoError(t, err) @@ -604,11 +604,11 @@ func TestVoteQCConstructionSignal(t *testing.T) { // Create a valid dummy block at view 2 block := createDummyBlock(2) - // Create a valid validator set + // Create a valid validator list validatorPrivateKey1, _ := bls.NewPrivateKey() validatorPrivateKey2, _ := bls.NewPrivateKey() - validatorSet := []Validator{ + validatorList := []Validator{ &validator{ publicKey: validatorPrivateKey1.PublicKey(), stakeAmount: uint256.NewInt().SetUint64(70), @@ -628,8 +628,8 @@ func TestVoteQCConstructionSignal(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(time.Microsecond, time.Hour, - BlockWithValidators{block, validatorSet}, // tip - []BlockWithValidators{{block, validatorSet}}, // safeBlocks + BlockWithValidatorList{block, validatorList}, // tip + []BlockWithValidatorList{{block, validatorList}}, // safeBlocks ) require.NoError(t, err) @@ -641,8 +641,8 @@ func TestVoteQCConstructionSignal(t *testing.T) { signature: validator2Vote, // Validator 2's vote } - // Store the vote in the event loop's votesSeen map - fc.votesSeen[voteSignaturePayload] = map[string]VoteMessage{ + // Store the vote in the event loop's votesSeenByBlockHash map + fc.votesSeenByBlockHash[voteSignaturePayload] = map[string]VoteMessage{ vote.publicKey.ToString(): &vote, } @@ -665,8 +665,8 @@ func TestVoteQCConstructionSignal(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(time.Microsecond, time.Hour, - BlockWithValidators{block, validatorSet}, // tip - []BlockWithValidators{{block, validatorSet}}, // safeBlocks + BlockWithValidatorList{block, validatorList}, // tip + []BlockWithValidatorList{{block, validatorList}}, // safeBlocks ) require.NoError(t, err) @@ -678,8 +678,8 @@ func TestVoteQCConstructionSignal(t *testing.T) { signature: validator1Vote, // Validator 1's vote } - // Store the vote in the event loop's votesSeen map - fc.votesSeen[voteSignaturePayload] = map[string]VoteMessage{ + // Store the vote in the event loop's votesSeenByBlockHash map + fc.votesSeenByBlockHash[voteSignaturePayload] = map[string]VoteMessage{ vote.publicKey.ToString(): &vote, } @@ -723,11 +723,11 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { qc: createDummyQC(2, block1.GetBlockHash()), } - // Create a valid validator set + // Create a valid validator list validatorPrivateKey1, _ := bls.NewPrivateKey() validatorPrivateKey2, _ := bls.NewPrivateKey() - validatorSet := []Validator{ + validatorList := []Validator{ &validator{ publicKey: validatorPrivateKey1.PublicKey(), stakeAmount: uint256.NewInt().SetUint64(70), @@ -750,10 +750,10 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(time.Microsecond, time.Hour, - BlockWithValidators{block2, validatorSet}, // tip - []BlockWithValidators{ // safeBlocks - {block1, validatorSet}, - {block2, validatorSet}, + BlockWithValidatorList{block2, validatorList}, // tip + []BlockWithValidatorList{ // safeBlocks + {block1, validatorList}, + {block2, validatorList}, }, ) require.NoError(t, err) @@ -769,8 +769,8 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { signature: validator2TimeoutSignature, // Validator 2's timeout signature on payload (view 4, highQCview 2) } - // Store the timeout in the event loop's timeoutsSeen map - fc.timeoutsSeen[4] = map[string]TimeoutMessage{ + // Store the timeout in the event loop's timeoutsSeenByView map + fc.timeoutsSeenByView[4] = map[string]TimeoutMessage{ timeout.publicKey.ToString(): &timeout, } @@ -793,10 +793,10 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(time.Microsecond, time.Hour, - BlockWithValidators{block2, validatorSet}, // tip - []BlockWithValidators{ // safeBlocks - {block1, validatorSet}, - {block2, validatorSet}, + BlockWithValidatorList{block2, validatorList}, // tip + []BlockWithValidatorList{ // safeBlocks + {block1, validatorList}, + {block2, validatorList}, }, ) require.NoError(t, err) @@ -820,8 +820,8 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { signature: validator2TimeoutSignature, // Validator 2's timeout signature on payload (view 4, highQCview 2) } - // Store the timeout in the event loop's timeoutsSeen map - fc.timeoutsSeen[4] = map[string]TimeoutMessage{ + // Store the timeout in the event loop's timeoutsSeenByView map + fc.timeoutsSeenByView[4] = map[string]TimeoutMessage{ timeout1.publicKey.ToString(): &timeout1, timeout2.publicKey.ToString(): &timeout2, } @@ -873,8 +873,8 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { fc := NewFastHotStuffEventLoop() err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, - BlockWithValidators{createDummyBlock(2), createDummyValidatorSet()}, // tip - []BlockWithValidators{{createDummyBlock(2), createDummyValidatorSet()}}, // safeBlocks + BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks ) require.NoError(t, err) diff --git a/consensus/integration_test_types.go b/consensus/integration_test_types.go index 3255b1d6d..8d4e3733d 100644 --- a/consensus/integration_test_types.go +++ b/consensus/integration_test_types.go @@ -62,8 +62,8 @@ func (node *validatorNode) Init( return node.eventLoop.Init( crankTimerInterval, timeoutBaseDuration, - BlockWithValidators{genesisBlock, node.getValidators()}, - []BlockWithValidators{ + BlockWithValidatorList{genesisBlock, node.getValidators()}, + []BlockWithValidatorList{ {genesisBlock, node.getValidators()}, }, ) @@ -81,9 +81,9 @@ func (node *validatorNode) Resync(tipBlock *block, safeBlocks []*block) error { return node.eventLoop.Init( node.eventLoop.crankTimerInterval, node.eventLoop.timeoutBaseDuration, - BlockWithValidators{tipBlock, node.getValidators()}, - collections.Transform(safeBlocks, func(bb *block) BlockWithValidators { - return BlockWithValidators{bb, node.getValidators()} + BlockWithValidatorList{tipBlock, node.getValidators()}, + collections.Transform(safeBlocks, func(bb *block) BlockWithValidatorList { + return BlockWithValidatorList{bb, node.getValidators()} }), ) } @@ -154,11 +154,11 @@ func (node *validatorNode) ProcessBlock(incomingBlock *block) { // Update the event loop with the new block as the chain tip. node.eventLoop.ProcessTipBlock( - BlockWithValidators{incomingBlock, node.getValidators()}, + BlockWithValidatorList{incomingBlock, node.getValidators()}, collections.Transform( collections.MapValues(node.safeBlocks), - func(bb *block) BlockWithValidators { - return BlockWithValidators{bb, node.getValidators()} + func(bb *block) BlockWithValidatorList { + return BlockWithValidatorList{bb, node.getValidators()} }, ), ) diff --git a/consensus/types.go b/consensus/types.go index c0c841899..237676ed0 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -36,6 +36,10 @@ type FastHotStuffEvent struct { AggregateQC AggregateQuorumCertificate } +// Create an alias type of the 32 bit block hash so that the raw [32]byte type isn't +// ambiguously repeated in the code base +type BlockHashValue = [32]byte + // BlockHash is a 32-byte hash of a block used to uniquely identify a block. It's re-defined here // as an interface that matches the exact structure of the BlockHash type in core, so that the two // packages are decoupled and the Fast HotStuff event loop can be tested end-to-end independently. @@ -102,25 +106,25 @@ type Block interface { GetQC() QuorumCertificate } -type BlockWithValidators struct { +type BlockWithValidatorList struct { Block Block - // The validator set for the next block height after the block. This validator set can be used to validate - // votes and timeouts used to build a QC that extends from the block. The validator set must be sorted - // in descending order of stake amount with a consistent tie breaking scheme. - Validators []Validator + // The ordered validator list for the next block height after the block. This validator list can be used to + // validate votes and timeouts used to build a QC that extends from the block. The validator list must be + // sorted in descending order of stake amount with a consistent tie breaking scheme. + ValidatorList []Validator } // We want a large buffer for the signal channels to ensure threads don't block when trying to push new // signals. // -// TODO: is a size of 100 enough? If we want to bullet-proof this, we could back it by a slice as a +// TODO: is a size of 10000 enough? If we want to bullet-proof this, we could back it by a slice as a // secondary buffer. That seems unnecessary since every channel will only have signals pushed by a single // producer thread. -const signalChannelBufferSize = 100 +const signalChannelBufferSize = 10000 // An instance of FastHotStuffEventLoop is a self-contained module that represents a single node running // the event loop for the Fast HotStuff consensus protocol. The event loop is initialized at the current chain's -// tip, with a given block hash, block height, view number, and validator set. The event loop is simplified and +// tip, with a given block hash, block height, view number, and validator list. The event loop is simplified and // does not know whether its role is that of a block proposer or a replica validator. // // Given a block that's at the tip of the current chain, the event loop maintains its own internal data structures @@ -168,18 +172,18 @@ type FastHotStuffEventLoop struct { // descendants that are safe to extend from. This slice also includes the tip block itself. safeBlocks []blockWithValidatorLookup - // votesSeen is an in-memory map of all the votes we've seen so far. It's a nested map with the - // following nested key structure: + // votesSeenByBlockHash is an in-memory map of all the votes we've seen so far. It's a nested map with + // the following nested key structure: // // sha3-256(vote.View, vote.BlockHash) - > string(vote.PublicKey) -> VoteMessage // // We use a nested map as above because we want to be able to efficiently fetch all votes by block hash. - votesSeen map[[32]byte]map[string]VoteMessage + votesSeenByBlockHash map[BlockHashValue]map[string]VoteMessage - // timeoutsSeen is an in-memory map of all the timeout messages we've seen so far, organized by + // timeoutsSeenByView is an in-memory map of all the timeout messages we've seen so far, organized by // the timed out view and the BLS public key string of the sender. We use a nested map because // we want to be able to fetch all timeout messages by view. - timeoutsSeen map[uint64]map[string]TimeoutMessage + timeoutsSeenByView map[uint64]map[string]TimeoutMessage // Externally accessible channel for signals sent to the Server. Events chan *FastHotStuffEvent diff --git a/consensus/types_internal.go b/consensus/types_internal.go index 508c321dc..9e58523e3 100644 --- a/consensus/types_internal.go +++ b/consensus/types_internal.go @@ -192,6 +192,6 @@ func (tm *timeoutMessage) GetSignature() *bls.Signature { type blockWithValidatorLookup struct { block Block - validatorSet []Validator // Ordered slice of validators + validatorList []Validator // Ordered slice of validators validatorLookup map[string]Validator // Lookup of validators by validator public key string } diff --git a/consensus/utils.go b/consensus/utils.go index 4805a8e10..f9e62e0e8 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -13,8 +13,8 @@ import ( "golang.org/x/crypto/sha3" ) -// Given a QC and a sorted validator set, this function returns true if the QC contains a valid -// super-majority of signatures from the validator set for the QC's (View, BlockHash) pair. +// Given a QC and a sorted validator list, this function returns true if the QC contains a valid +// super-majority of signatures from the validator list for the QC's (View, BlockHash) pair. func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Validator) bool { if !isProperlyFormedQC(qc) || !isProperlyFormedValidatorSet(validators) { return false @@ -79,6 +79,10 @@ func GetTimeoutSignaturePayload(view uint64, highQCView uint64) [32]byte { return sha3.Sum256(append(viewBytes, highQCViewBytes...)) } +func isProperlyFormedBlockWithValidatorList(block BlockWithValidatorList) bool { + return isProperlyFormedBlock(block.Block) && isProperlyFormedValidatorSet(block.ValidatorList) +} + // This function checks if the block is properly formed. These are all surface level checks that // ensure that critical fields in the block are not nil so that the code in this package does not // panic. @@ -107,7 +111,7 @@ func isProperlyFormedBlock(block Block) bool { } func isProperlyFormedValidatorSet(validators []Validator) bool { - // The validator set must be non-empty + // The validator list must be non-empty if len(validators) == 0 { return false } @@ -263,7 +267,11 @@ func isEqualBlockHashes(hash1 BlockHash, hash2 BlockHash) bool { return bytes.Equal(hash1Value[:], hash2Value[:]) } -func createDummyValidatorSet() []Validator { +func validatorToPublicKeyString(validator Validator) string { + return validator.GetPublicKey().ToString() +} + +func createDummyValidatorList() []Validator { validators := []*validator{ { publicKey: createDummyBLSPublicKey(), @@ -373,3 +381,11 @@ func generateRandomBytes(numBytes int) []byte { rand.Read(randomBytes) return randomBytes } + +func powerOfTwo(n uint64) int64 { + result := int64(1) + for i := uint64(0); i < n; i++ { + result *= 2 + } + return result +} diff --git a/consensus/utils_test.go b/consensus/utils_test.go index e896c9610..9f7b5449a 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -14,10 +14,10 @@ import ( func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { // Test malformed QC { - require.False(t, IsValidSuperMajorityQuorumCertificate(nil, createDummyValidatorSet())) + require.False(t, IsValidSuperMajorityQuorumCertificate(nil, createDummyValidatorList())) } - // Test malformed validator set + // Test malformed validator list { require.False(t, IsValidSuperMajorityQuorumCertificate(createDummyQC(1, createDummyBlockHash()), nil)) } From 0937f535f31229993189945d6a510b576795fc77 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 31 Oct 2023 13:26:06 -0400 Subject: [PATCH 266/762] Fix Flaky FastHotStuffEventLoop Integration Test (#769) * Fix flaky integration test * Fix flaky TestConnectFailingTransaction --- consensus/integration_test.go | 4 ++-- lib/block_view_test.go | 16 ++++++---------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/consensus/integration_test.go b/consensus/integration_test.go index 43a4e31e5..a0b62ba8d 100644 --- a/consensus/integration_test.go +++ b/consensus/integration_test.go @@ -70,8 +70,8 @@ func TestNetworkWithOfflineValidators(t *testing.T) { node3.Start() node4.Stop() - // Let all nodes run for 0.5 seconds so that the network produces at least one block. - time.Sleep(time.Millisecond * 500) + // Let all nodes run for 2 seconds so that the network produces at least one block. + time.Sleep(time.Millisecond * 2000) // Stop all remaining nodes node1.Stop() diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 6e708e9fc..00f9ec5e3 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -10,6 +10,8 @@ import ( "sort" "testing" + "math/rand" + "github.com/btcsuite/btcd/btcec" "github.com/decred/dcrd/lru" "github.com/dgraph-io/badger/v3" @@ -17,7 +19,6 @@ import ( "github.com/golang/glog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "math/rand" ) func _strToPk(t *testing.T, pkStr string) []byte { @@ -78,6 +79,7 @@ func setBalanceModelBlockHeights(t *testing.T) { DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = 0 DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = 1 DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = 1 + DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams @@ -94,6 +96,7 @@ func resetBalanceModelBlockHeights() { DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = uint32(596555) DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = uint32(683058) DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(math.MaxUint32) + DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(math.MaxUint32) DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams @@ -2205,6 +2208,8 @@ func TestBlockRewardPatch(t *testing.T) { } func TestConnectFailingTransaction(t *testing.T) { + setBalanceModelBlockHeights(t) + require := require.New(t) seed := int64(1011) rand := rand.New(rand.NewSource(seed)) @@ -2214,13 +2219,6 @@ func TestConnectFailingTransaction(t *testing.T) { feeMax := uint64(10000) chain, params, db := NewLowDifficultyBlockchain(t) - params.ForkHeights.BalanceModelBlockHeight = 1 - params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 - params.ForkHeights.ProofOfStake1StateSetupBlockHeight = 1 - params.EncoderMigrationHeights.ProofOfStake1StateSetupMigration.Height = 1 - params.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - oldParams := GlobalDeSoParams - GlobalDeSoParams = *params mempool, miner := NewTestMiner(t, chain, params, true) // Mine a few blocks to give the senderPkString some money. _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, mempool) @@ -2297,8 +2295,6 @@ func TestConnectFailingTransaction(t *testing.T) { require.Equal(expectedUtilityFee, utilityFee) err = blockView.FlushToDb(uint64(blockHeight)) - - GlobalDeSoParams = oldParams } func _getBMFForTxn(txn *MsgDeSoTxn, gp *GlobalParamsEntry) (_burnFee uint64, _utilityFee uint64) { From d61e24cc41c9d87a3b47abf184491a2041289639 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 1 Nov 2023 10:28:34 -0400 Subject: [PATCH 267/762] Create Simple BLS Signer Struct (#770) --- lib/bls_signer.go | 52 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 lib/bls_signer.go diff --git a/lib/bls_signer.go b/lib/bls_signer.go new file mode 100644 index 000000000..fab496162 --- /dev/null +++ b/lib/bls_signer.go @@ -0,0 +1,52 @@ +package lib + +import ( + "errors" + + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/consensus" +) + +// BLSSigner is a wrapper for the bls.PrivateKey type, which abstracts away the private key +// and only exposes protected methods for signing a select set of message types needed for +// Proof of Stake. It allows signing for: +// - PoS Validator Votes Messages +// - PoS Validator Timeout Messages +// - PoS Block Proposals +// - PoS Validator Connection Handshakes +// +// TODO: We will likely need to associate individual op-codes for each message type that can be signed, +// so that there no risk of signature collisions between different message types. Ex: the payload +// signed per message type must be made up of the following tuples: +// - Validator Vote: (0x01, view uint64, blockHash consensus.BlockHash) +// - Validator Timeout: (0x02, view uint64, highQCView uint64) +// - PoS Block Proposal: (0x03, view uint64, blockHash consensus.BlockHash) +// - PoS Validator Handshake: (0x04, peer's random nonce, our node's random nonce) +type BLSSigner struct { + privateKey *bls.PrivateKey +} + +func NewBLSSigner(privateKey *bls.PrivateKey) (*BLSSigner, error) { + if privateKey == nil { + return nil, errors.New("NewBLSSigner: privateKey cannot be nil") + } + return &BLSSigner{privateKey: privateKey}, nil +} + +func (signer *BLSSigner) GetPublicKey() *bls.PublicKey { + return signer.privateKey.PublicKey() +} + +func (signer *BLSSigner) SignValidatorVote(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { + payload := consensus.GetVoteSignaturePayload(view, blockHash) + return signer.privateKey.Sign(payload[:]) +} + +func (signer *BLSSigner) SignValidatorTimeout(view uint64, highQCView uint64) (*bls.Signature, error) { + payload := consensus.GetTimeoutSignaturePayload(view, highQCView) + return signer.privateKey.Sign(payload[:]) +} + +// TODO: Add signing function for PoS blocks + +// TODO: Add signing function for PoS validator connection handshake From bb7afeadbaf1d508192a9ab033e23262df4db68a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 1 Nov 2023 11:59:51 -0400 Subject: [PATCH 268/762] Eliminate flaky conditions in TestNetworkWithOfflineValidators test (#772) --- consensus/integration_test.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/consensus/integration_test.go b/consensus/integration_test.go index a0b62ba8d..6f924294e 100644 --- a/consensus/integration_test.go +++ b/consensus/integration_test.go @@ -1,3 +1,5 @@ +//go:build relic + package consensus import ( @@ -57,21 +59,29 @@ func TestNetworkWithOfflineValidators(t *testing.T) { // Let all nodes run for 0.5 seconds so that the network produces at least one block. time.Sleep(time.Millisecond * 500) - // Restart node 2 to simulate it going back online, and stop node 3 to simulate it - // going offline. The network has enough stake online to continue producing blocks. + // Restart node 2 to simulate it going back online node2.Start() + + // Let all nodes run for 0.5 seconds so that the network produces at least one block. + time.Sleep(time.Millisecond * 500) + + // Stop node 3 to simulate it going offline. The network has enough stake online to continue producing blocks. node3.Stop() // Let all nodes run for 0.5 seconds so that the network produces at least one block. time.Sleep(time.Millisecond * 500) - // Restart node 3 to simulate it going back online, and stop node 4 to simulate it - // going offline. The network has enough stake online to continue producing blocks. + // Restart node 3 to simulate it going back online node3.Start() + + // Let all nodes run for 0.5 seconds so that the network produces at least one block. + time.Sleep(time.Millisecond * 500) + + // Stop node 4 to simulate it going offline. The network has enough stake online to continue producing blocks. node4.Stop() // Let all nodes run for 2 seconds so that the network produces at least one block. - time.Sleep(time.Millisecond * 2000) + time.Sleep(time.Millisecond * 500) // Stop all remaining nodes node1.Stop() From c7bbd914c89ffe0267d8b229d9855ddb53dd4925 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 1 Nov 2023 17:55:52 -0400 Subject: [PATCH 269/762] Add Vote, Timeout, View Utilities To FastHotstuffEventLoop (#773) --- consensus/event_loop.go | 7 +++++++ consensus/utils.go | 17 +++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 9c94e4cd5..f41465d11 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -86,6 +86,13 @@ func (fc *FastHotStuffEventLoop) Init( return nil } +// GetCurrentView is a simple getter that returns the event loop's current view. It does not need +// to be thread-safe. The caller is expected to use it in a thread-safe manner, at a time when +// the view is guaranteed to not change. +func (fc *FastHotStuffEventLoop) GetCurrentView() uint64 { + return fc.currentView +} + // AdvanceViewOnTimeout is called when the tip has not changed but the event loop has timed out. This // function advances the view and resets the crank timer and timeout scheduled tasks. func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { diff --git a/consensus/utils.go b/consensus/utils.go index f9e62e0e8..a288ff989 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -13,6 +13,23 @@ import ( "golang.org/x/crypto/sha3" ) +func IsProperlyFormedVoteEvent(event *FastHotStuffEvent) bool { + return event != nil && // Event non-nil + event.EventType == FastHotStuffEventTypeVote && // Event type is vote + event.View > 0 && // The view the tip block was proposed in is non-zero + event.TipBlockHeight > 0 && // Tip block height voted on is non-zero + !isInterfaceNil(event.TipBlockHash) // Tip block hash voted on is non-nil +} + +func IsProperlyFormedTimeoutEvent(event *FastHotStuffEvent) bool { + return event != nil && // Event non-nil + event.EventType == FastHotStuffEventTypeTimeout && // Event type is timeout + event.View > 0 && // The view that was timed out is non-zero + event.TipBlockHeight > 0 && // Tip block height is non-zero + !isInterfaceNil(event.TipBlockHash) && // Tip block hash is non-nil + !isInterfaceNil(event.QC) // The high QC is non-nil +} + // Given a QC and a sorted validator list, this function returns true if the QC contains a valid // super-majority of signatures from the validator list for the QC's (View, BlockHash) pair. func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Validator) bool { From dc49488d9da74fd6305e75d03ee88a29cf4d7bc6 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 1 Nov 2023 19:15:56 -0400 Subject: [PATCH 270/762] Move basic skeleton for PoS message handling and syncing to separate file (#763) * Create basic skeleton for PoS message handling and syncing * Simplify interface * Create new struct * Create constrtuctor for ConsensusMessageHandler * rename ConsensusMessageHandler to ConsensusController --- lib/pos_consensus_controller.go | 77 +++++++++++++++++++++++++++++++++ lib/server.go | 59 +++++-------------------- 2 files changed, 89 insertions(+), 47 deletions(-) create mode 100644 lib/pos_consensus_controller.go diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go new file mode 100644 index 000000000..276640c01 --- /dev/null +++ b/lib/pos_consensus_controller.go @@ -0,0 +1,77 @@ +package lib + +import "github.com/deso-protocol/core/consensus" + +type ConsensusController struct { + fastHotStuffEventLoop *consensus.FastHotStuffEventLoop + blockchain *Blockchain +} + +func NewConsensusController( + fastHotStuffEventLoop *consensus.FastHotStuffEventLoop, + blockchain *Blockchain, +) *ConsensusController { + return &ConsensusController{ + fastHotStuffEventLoop: fastHotStuffEventLoop, + blockchain: blockchain, + } +} + +func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus.FastHotStuffEvent) { + // The consensus module has signaled that we can propose a block at a certain block + // height. We construct the block and broadcast it here: + // 1. Verify that the block height we want to propose at is valid + // 2. Get a QC from the consensus module + // 3. Iterate over the top n transactions from the mempool + // 4. Construct a block with the QC and the top n transactions from the mempool + // 5. Sign the block + // 6. Process the block locally + // - This will connect the block to the blockchain, remove the transactions from the + // - mempool, and process the vote in the consensus module + // 7. Broadcast the block to the network +} + +func (cc *ConsensusController) HandleFastHostStuffEmptyTimeoutBlockProposal(event *consensus.FastHotStuffEvent) { + // The consensus module has signaled that we have a timeout QC and can propose one at a certain + // block height. We construct an empty block with a timeout QC and broadcast it here: + // 1. Verify that the block height and view we want to propose at is valid + // 2. Get a timeout QC from the consensus module + // 3. Construct a block with the timeout QC + // 4. Sign the block + // 5. Process the block locally + // 6. Broadcast the block to the network +} + +func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotStuffEvent) { + // The consensus module has signaled that we can vote on a block. We construct and + // broadcast the vote here: + // 1. Verify that the block height we want to vote on is valid + // 2. Construct the vote message + // 3. Process the vote in the consensus module + // 4. Broadcast the timeout msg to the network +} + +func (handler *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastHotStuffEvent) { + // The consensus module has signaled that we have timed out for a view. We construct and + // broadcast the timeout here: + // 1. Verify the block height and view we want to timeout on are valid + // 2. Construct the timeout message + // 3. Process the timeout in the consensus module + // 4. Broadcast the timeout msg to the network +} + +func (cc *ConsensusController) HandleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { + // TODO +} + +func (cc *ConsensusController) HandleGetBlocks(pp *Peer, msg *MsgDeSoGetBlocks) { + // TODO +} + +func (cc *ConsensusController) HandleHeader(pp *Peer, msg *MsgDeSoHeader) { + // TODO +} + +func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) { + // TODO +} diff --git a/lib/server.go b/lib/server.go index 57be274f4..e7f6ebf00 100644 --- a/lib/server.go +++ b/lib/server.go @@ -61,6 +61,7 @@ type Server struct { eventManager *EventManager TxIndex *TXIndex + consensusController *ConsensusController fastHotStuffEventLoop *consensus.FastHotStuffEventLoop // posMempool *PosMemPool TODO: Add the mempool later @@ -2216,59 +2217,23 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { } } -func (srv *Server) _handleFastHostStuffBlockProposal(event *consensus.FastHotStuffEvent) { - // The consensus module has signaled that we can propose a block at a certain block - // height. We construct the block and broadcast it here: - // 1. Verify that the block height we want to propose at is valid - // 2. Get a QC from the consensus module - // 3. Iterate over the top n transactions from the mempool - // 4. Construct a block with the QC and the top n transactions from the mempool - // 5. Sign the block - // 6. Process the block locally - // - This will connect the block to the blockchain, remove the transactions from the - // - mempool, and process the vote in the consensus module - // 7. Broadcast the block to the network -} - -func (srv *Server) _handleFastHostStuffEmptyTimeoutBlockProposal(event *consensus.FastHotStuffEvent) { - // The consensus module has signaled that we have a timeout QC and can propose one at a certain - // block height. We construct an empty block with a timeout QC and broadcast it here: - // 1. Verify that the block height and view we want to propose at is valid - // 2. Get a timeout QC from the consensus module - // 3. Construct a block with the timeout QC - // 4. Sign the block - // 5. Process the block locally - // 6. Broadcast the block to the network -} - -func (srv *Server) _handleFastHostStuffVote(event *consensus.FastHotStuffEvent) { - // The consensus module has signaled that we can vote on a block. We construct and - // broadcast the vote here: - // 1. Verify that the block height we want to vote on is valid - // 2. Construct the vote message - // 3. Process the vote in the consensus module - // 4. Broadcast the timeout msg to the network -} - -func (srv *Server) _handleFastHostStuffTimeout(event *consensus.FastHotStuffEvent) { - // The consensus module has signaled that we have timed out for a view. We construct and - // broadcast the timeout here: - // 1. Verify the block height and view we want to timeout on are valid - // 2. Construct the timeout message - // 3. Process the timeout in the consensus module - // 4. Broadcast the timeout msg to the network -} - func (srv *Server) _handleFastHostStuffConsensusEvent(event *consensus.FastHotStuffEvent) { + // This should never happen. If the consensus message handler isn't defined, then something went + // wrong during the node initialization. We log it and return early to avoid panicking. + if srv.consensusController == nil { + glog.Errorf("Server._handleFastHostStuffConsensusEvent: Consensus message handler is nil") + return + } + switch event.EventType { case consensus.FastHotStuffEventTypeVote: - srv._handleFastHostStuffVote(event) + srv.consensusController.HandleFastHostStuffVote(event) case consensus.FastHotStuffEventTypeTimeout: - srv._handleFastHostStuffTimeout(event) + srv.consensusController.HandleFastHostStuffTimeout(event) case consensus.FastHotStuffEventTypeConstructVoteQC: - srv._handleFastHostStuffBlockProposal(event) + srv.consensusController.HandleFastHostStuffBlockProposal(event) case consensus.FastHotStuffEventTypeConstructTimeoutQC: - srv._handleFastHostStuffEmptyTimeoutBlockProposal(event) + srv.consensusController.HandleFastHostStuffEmptyTimeoutBlockProposal(event) } } From 50a96841c8a37941949716d6d81a2ac0afee8dfd Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 2 Nov 2023 16:28:14 -0400 Subject: [PATCH 271/762] Instantiate FastHotStuffEventLoop Within ConsensusController (#778) * Instantiated FastHotStuffEventLoop within ConsensusController * Address Nina's comments --- lib/pos_consensus_controller.go | 18 ++++++++++++++++-- lib/server.go | 9 ++++----- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index 276640c01..d65b42435 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -1,8 +1,13 @@ package lib -import "github.com/deso-protocol/core/consensus" +import ( + "sync" + + "github.com/deso-protocol/core/consensus" +) type ConsensusController struct { + lock sync.RWMutex fastHotStuffEventLoop *consensus.FastHotStuffEventLoop blockchain *Blockchain } @@ -17,6 +22,15 @@ func NewConsensusController( } } +func (cc *ConsensusController) Init() { + // This initializes the FastHotStuffEventLoop based on the blockchain state. This should + // only be called once the blockchain has synced, the node is ready to join the validator + // network, and the node is able validate blocks in the steady state. + // + // TODO: Implement this later once the Blockchain struct changes are merged. We need to be + // able to fetch the tip block and current persisted view from DB from the Blockchain struct. +} + func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus.FastHotStuffEvent) { // The consensus module has signaled that we can propose a block at a certain block // height. We construct the block and broadcast it here: @@ -51,7 +65,7 @@ func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotS // 4. Broadcast the timeout msg to the network } -func (handler *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastHotStuffEvent) { +func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastHotStuffEvent) { // The consensus module has signaled that we have timed out for a view. We construct and // broadcast the timeout here: // 1. Verify the block height and view we want to timeout on are valid diff --git a/lib/server.go b/lib/server.go index e7f6ebf00..105d5033a 100644 --- a/lib/server.go +++ b/lib/server.go @@ -61,8 +61,7 @@ type Server struct { eventManager *EventManager TxIndex *TXIndex - consensusController *ConsensusController - fastHotStuffEventLoop *consensus.FastHotStuffEventLoop + consensusController *ConsensusController // posMempool *PosMemPool TODO: Add the mempool later // All messages received from peers get sent from the ConnectionManager to the @@ -2262,7 +2261,7 @@ func (srv *Server) _startConsensus() { } select { - case consensusEvent := <-srv.fastHotStuffEventLoop.Events: + case consensusEvent := <-srv.consensusController.fastHotStuffEventLoop.Events: { glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) srv._handleFastHostStuffConsensusEvent(consensusEvent) @@ -2428,8 +2427,8 @@ func (srv *Server) Stop() { } // Stop the PoS block proposer if we have one running. - if srv.fastHotStuffEventLoop != nil { - srv.fastHotStuffEventLoop.Stop() + if srv.consensusController != nil { + srv.consensusController.fastHotStuffEventLoop.Stop() glog.Infof(CLog(Yellow, "Server.Stop: Closed the fastHotStuffEventLoop")) } From 6ba8b3df34601ee137d8d2c7a2987f7e58bf41a6 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 6 Nov 2023 15:33:53 -0800 Subject: [PATCH 272/762] Adding legacy mempool and block producer timestamp support. --- lib/block_producer.go | 6 ++++-- lib/block_view_access_group_members_test.go | 3 ++- lib/block_view_access_group_test.go | 3 ++- lib/block_view_association_test.go | 6 ++++-- lib/block_view_dao_coin_limit_order_test.go | 3 ++- lib/block_view_lockups.go | 19 ++++++++++--------- lib/block_view_validator_test.go | 15 ++++++++++----- lib/blockchain.go | 4 +++- lib/legacy_mempool.go | 15 ++++++++++----- 9 files changed, 47 insertions(+), 27 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index 415ea9f16..b2aa7a8b8 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -225,7 +225,8 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) if err != nil { return nil, nil, nil, errors.Wrapf(err, "Error copying UtxoView: ") } - _, _, _, _, err = utxoViewCopy._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), uint32(blockRet.Header.Height), 0, true, false) + _, _, _, _, err = utxoViewCopy._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), + uint32(blockRet.Header.Height), int64(blockRet.Header.TstampNanoSecs), true, false) if err != nil { // Skip failing txns. This should happen super rarely. txnErrorString := fmt.Sprintf( @@ -235,7 +236,8 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) continue } // At this point, we know the transaction isn't going to break our view so attach it. - _, _, _, _, err = utxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), uint32(blockRet.Header.Height), 0, true, false) + _, _, _, _, err = utxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), + uint32(blockRet.Header.Height), int64(blockRet.Header.TstampNanoSecs), true, false) if err != nil { // We should never get an error here since we just attached a txn to an indentical // view. diff --git a/lib/block_view_access_group_members_test.go b/lib/block_view_access_group_members_test.go index 29162d7a9..1ed8c99f2 100644 --- a/lib/block_view_access_group_members_test.go +++ b/lib/block_view_access_group_members_test.go @@ -1764,7 +1764,8 @@ func TestAccessGroupMembersTxnWithDerivedKey(t *testing.T) { // Sign txn. _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, + 0, true, false) if err != nil { return err } diff --git a/lib/block_view_access_group_test.go b/lib/block_view_access_group_test.go index b5978c661..4f7652ced 100644 --- a/lib/block_view_access_group_test.go +++ b/lib/block_view_access_group_test.go @@ -613,7 +613,8 @@ func TestAccessGroupTxnWithDerivedKey(t *testing.T) { // Sign txn. _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, + 0, true, false) if err != nil { return err } diff --git a/lib/block_view_association_test.go b/lib/block_view_association_test.go index 08500433e..9f556f263 100644 --- a/lib/block_view_association_test.go +++ b/lib/block_view_association_test.go @@ -2172,7 +2172,8 @@ func _submitAssociationTxn( _signTxn(testMeta.t, txn, TransactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( + txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -2339,7 +2340,8 @@ func _testAssociationsWithDerivedKey(t *testing.T) { // Sign txn. _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, _, _, _, err := utxoView.ConnectTransaction( + txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return err } diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index 8f16e7be1..adb25a705 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -4094,7 +4094,8 @@ func _connectDAOCoinLimitOrderTxn( _signTxn(testMeta.t, txn, privateKey) // Always use savedHeight (blockHeight+1) for validation since it's // assumed the transaction will get mined into the next block. - utxoOps, totalInput, totalOutput, fees, err := currentUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, totalInput, totalOutput, fees, err := currentUtxoView.ConnectTransaction( + txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { // If error, remove most-recent expected sender balance added for this txn. testMeta.expectedSenderBalances = testMeta.expectedSenderBalances[:len(testMeta.expectedSenderBalances)-1] diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index da22364f4..0cdc47692 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -368,8 +368,9 @@ func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration // Check if any of the points needs to be cached in the view. for _, yieldCurvePoint := range dbYieldCurvePoints { - if _, pointInView := - bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID][yieldCurvePoint.ToMapKey()]; !pointInView { + _, pointInView := + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID][yieldCurvePoint.ToMapKey()] + if !pointInView { bav._setLockupYieldCurvePoint(yieldCurvePoint) } } @@ -385,10 +386,10 @@ func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration // Check for nil pointer cases. if lockupYieldCurvePoint.LockupDurationNanoSecs < lockupDuration && leftLockupPoint == nil { - leftLockupPoint = lockupYieldCurvePoint + leftLockupPoint = lockupYieldCurvePoint.Copy() } if lockupYieldCurvePoint.LockupDurationNanoSecs >= lockupDuration && rightLockupPoint == nil { - rightLockupPoint = lockupYieldCurvePoint + rightLockupPoint = lockupYieldCurvePoint.Copy() } // Check if the point is "more left" than the current left point. @@ -482,7 +483,7 @@ type UpdateCoinLockupParamsMetadata struct { // Note that LockupYieldAPYBasisPoints is ignored in this transaction. // // By setting LockupYieldDurationNanoSecs to zero, the yield curve attached to the profile - // is left unmodified. In any UpdateDAOCoinLockupParams transaction looking to modify only + // is left unmodified. In any UpdateCoinLockupParams transaction looking to modify only // LockupTransferRestrictions, LockupYieldDurationNanoSecs would be set to zero. LockupYieldDurationNanoSecs int64 LockupYieldAPYBasisPoints uint64 @@ -799,7 +800,7 @@ func (bav *UtxoView) _connectCoinLockup( profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().Sub( &profileEntry.DAOCoinEntry.CoinsInCirculationNanos, txMeta.LockupAmountBaseUnits) - if transactorBalanceEntry.BalanceNanos.IsZero() { + if transactorBalanceEntry.BalanceNanos.IsZero() && !prevTransactorBalanceEntry.BalanceNanos.IsZero() { profileEntry.DAOCoinEntry.NumberOfHolders-- } bav._setProfileEntryMappings(profileEntry) @@ -1424,7 +1425,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( "_connectCoinLockupTransfer") } - // Credit the sender's balance entry. + // Debit the sender's balance entry. senderLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt().Sub( &senderLockedBalanceEntry.BalanceBaseUnits, txMeta.LockedCoinsToTransferBaseUnits) @@ -1662,7 +1663,7 @@ func (bav *UtxoView) _connectCoinUnlock( "_connectCoinUnlock") } - // Unlock coins until the amount specified by the transaction is deducted. + // Unlock all unlockable locked balance entries. var prevLockedBalanceEntries []*LockedBalanceEntry unlockedBalance := uint256.NewInt() for _, unlockableLockedBalanceEntry := range unlockableLockedBalanceEntries { @@ -1722,7 +1723,7 @@ func (bav *UtxoView) _connectCoinUnlock( "_connectCoinUnlock") } profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *newCoinsInCirculationNanos - if prevTransactorBalanceEntry.BalanceNanos.IsZero() { + if prevTransactorBalanceEntry.BalanceNanos.IsZero() && !newTransactorBalanceEntry.BalanceNanos.IsZero() { profileEntry.DAOCoinEntry.NumberOfHolders++ } bav._setProfileEntryMappings(profileEntry) diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 5c94fa89b..da9ff60c8 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -336,7 +336,8 @@ func _submitRegisterAsValidatorTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( + txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -386,7 +387,8 @@ func _submitUnregisterAsValidatorTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( + txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -539,7 +541,8 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, params) prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, _, _, _, err := utxoView.ConnectTransaction( + txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return err } @@ -1798,7 +1801,8 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, params) prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, _, _, _, err := utxoView.ConnectTransaction( + txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return err } @@ -1950,7 +1954,8 @@ func _submitUnjailValidatorTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( + txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } diff --git a/lib/blockchain.go b/lib/blockchain.go index 757145f1d..7df92ce45 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5139,7 +5139,9 @@ func (bc *Blockchain) EstimateDefaultFeeRateNanosPerKB( return minFeeRateNanosPerKB } numBytesInTxn := len(txnBytes) - _, _, _, fees, err := utxoView.ConnectTransaction(txn, txn.Hash(), int64(numBytesInTxn), tipNode.Height, int64(tipNode.Header.TstampNanoSecs), false, false) + _, _, _, fees, err := utxoView.ConnectTransaction( + txn, txn.Hash(), int64(numBytesInTxn), tipNode.Height, int64(tipNode.Header.TstampNanoSecs), + false, false) if err != nil { return minFeeRateNanosPerKB } diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 7c44ba81f..fea2572ce 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -782,7 +782,7 @@ func (mp *DeSoMempool) OpenTempDBAndDumpTxns() error { // only be called when one is sure that a transaction is valid. Otherwise, it could // mess up the UtxoViews that we store internally. func (mp *DeSoMempool) addTransaction( - tx *MsgDeSoTxn, height uint32, fee uint64, updateBackupView bool) (*MempoolTx, error) { + tx *MsgDeSoTxn, height uint32, timestamp int64, fee uint64, updateBackupView bool) (*MempoolTx, error) { // Add the transaction to the pool and mark the referenced outpoints // as spent by the pool. @@ -839,7 +839,9 @@ func (mp *DeSoMempool) addTransaction( // Add it to the universal view. We assume the txn was already added to the // backup view. - _, _, _, _, err = mp.universalUtxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), height, 0, false, false) + _, _, _, _, err = mp.universalUtxoView._connectTransaction( + mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), height, + timestamp, false, false) if err != nil { return nil, errors.Wrap(err, "ERROR addTransaction: _connectTransaction "+ "failed on universalUtxoView; this is a HUGE problem and should never happen") @@ -847,7 +849,8 @@ func (mp *DeSoMempool) addTransaction( // Add it to the universalTransactionList if it made it through the view mp.universalTransactionList = append(mp.universalTransactionList, mempoolTx) if updateBackupView { - _, _, _, _, err = mp.backupUniversalUtxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), height, 0, false, false) + _, _, _, _, err = mp.backupUniversalUtxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, + int64(mempoolTx.TxSizeBytes), height, timestamp, false, false) if err != nil { return nil, errors.Wrap(err, "ERROR addTransaction: _connectTransaction "+ "failed on backupUniversalUtxoView; this is a HUGE problem and should never happen") @@ -1018,8 +1021,10 @@ func (mp *DeSoMempool) tryAcceptTransaction( totalNanosPurchasedBefore := mp.backupUniversalUtxoView.NanosPurchased usdCentsPerBitcoinBefore := mp.backupUniversalUtxoView.GetCurrentUSDCentsPerBitcoin() bestHeight := uint32(mp.bc.blockTip().Height + 1) + bestTimestamp := time.Now().UnixNano() // We can skip verifying the transaction size as related to the minimum fee here. - utxoOps, totalInput, totalOutput, txFee, err := mp.backupUniversalUtxoView._connectTransaction(tx, txHash, 0, bestHeight, 0, verifySignatures, false) + utxoOps, totalInput, totalOutput, txFee, err := mp.backupUniversalUtxoView._connectTransaction( + tx, txHash, 0, bestHeight, bestTimestamp, verifySignatures, false) if err != nil { mp.rebuildBackupView() return nil, nil, errors.Wrapf(err, "tryAcceptTransaction: Problem "+ @@ -1086,7 +1091,7 @@ func (mp *DeSoMempool) tryAcceptTransaction( // Add to transaction pool. Don't update the backup view since the call above // will have already done this. - mempoolTx, err := mp.addTransaction(tx, bestHeight, txFee, false /*updateBackupUniversalView*/) + mempoolTx, err := mp.addTransaction(tx, bestHeight, bestTimestamp, txFee, false /*updateBackupUniversalView*/) if err != nil { mp.rebuildBackupView() return nil, nil, errors.Wrapf(err, "tryAcceptTransaction: ") From cd079f67e2d3503e7276199e29c3613c2615c60a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 7 Nov 2023 10:48:50 -0500 Subject: [PATCH 273/762] Initial commit (#794) --- consensus/event_loop.go | 48 +++++++++++------------ consensus/integration_test_types.go | 2 +- consensus/mocking.go | 60 +++++++++++++++++++++++++++++ consensus/mocking_test.go | 15 ++++++++ consensus/types.go | 20 +++++++++- lib/server.go | 4 +- 6 files changed, 121 insertions(+), 28 deletions(-) create mode 100644 consensus/mocking.go create mode 100644 consensus/mocking_test.go diff --git a/consensus/event_loop.go b/consensus/event_loop.go index f41465d11..f5ff94e82 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -12,8 +12,8 @@ import ( "github.com/deso-protocol/core/collections/bitset" ) -func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { - return &FastHotStuffEventLoop{ +func NewFastHotStuffEventLoop() *fastHotStuffEventLoop { + return &fastHotStuffEventLoop{ status: eventLoopStatusNotInitialized, crankTimerTask: NewScheduledTask[uint64](), nextTimeoutTask: NewScheduledTask[uint64](), @@ -34,7 +34,7 @@ func NewFastHotStuffEventLoop() *FastHotStuffEventLoop { // // Given the above, This function updates the tip internally, stores the safe blocks, and re-initializes // all internal data structures that are used to track incoming votes and timeout messages for QC construction. -func (fc *FastHotStuffEventLoop) Init( +func (fc *fastHotStuffEventLoop) Init( crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, tip BlockWithValidatorList, @@ -89,13 +89,13 @@ func (fc *FastHotStuffEventLoop) Init( // GetCurrentView is a simple getter that returns the event loop's current view. It does not need // to be thread-safe. The caller is expected to use it in a thread-safe manner, at a time when // the view is guaranteed to not change. -func (fc *FastHotStuffEventLoop) GetCurrentView() uint64 { +func (fc *fastHotStuffEventLoop) GetCurrentView() uint64 { return fc.currentView } // AdvanceViewOnTimeout is called when the tip has not changed but the event loop has timed out. This // function advances the view and resets the crank timer and timeout scheduled tasks. -func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { +func (fc *fastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() @@ -133,7 +133,7 @@ func (fc *FastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { // all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks // that are safe to extend from. This function does not validate the collection of blocks. It // expects the server to know and decide what blocks are safe to extend from. -func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { +func (fc *fastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() @@ -175,7 +175,7 @@ func (fc *FastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidatorList, saf // storeBlocks is a helper function that validates the provided blocks, validator lists, and stores them. // It must be called while holding the event loop's lock. -func (fc *FastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { +func (fc *fastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { // Do a basic integrity check on the tip block and validator list if !isProperlyFormedBlockWithValidatorList(tip) { return errors.New("Invalid tip block or validator list") @@ -219,7 +219,7 @@ func (fc *FastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlo // // Reference implementation: // https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L756 -func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { +func (fc *fastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() @@ -304,7 +304,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { // // Reference implementation: // https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L958 -func (fc *FastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) error { +func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) error { // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() @@ -380,7 +380,7 @@ func (fc *FastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) } // Sets the initial times for the crank timer and timeouts and starts scheduled tasks. -func (fc *FastHotStuffEventLoop) Start() { +func (fc *fastHotStuffEventLoop) Start() { fc.lock.Lock() defer fc.lock.Unlock() @@ -397,7 +397,7 @@ func (fc *FastHotStuffEventLoop) Start() { fc.resetScheduledTasks() } -func (fc *FastHotStuffEventLoop) Stop() { +func (fc *fastHotStuffEventLoop) Stop() { fc.lock.Lock() defer fc.lock.Unlock() @@ -415,14 +415,14 @@ func (fc *FastHotStuffEventLoop) Stop() { fc.status = eventLoopStatusInitialized } -func (fc *FastHotStuffEventLoop) IsInitialized() bool { +func (fc *fastHotStuffEventLoop) IsInitialized() bool { fc.lock.RLock() defer fc.lock.RUnlock() return fc.status != eventLoopStatusNotInitialized } -func (fc *FastHotStuffEventLoop) IsRunning() bool { +func (fc *fastHotStuffEventLoop) IsRunning() bool { fc.lock.RLock() defer fc.lock.RUnlock() @@ -431,7 +431,7 @@ func (fc *FastHotStuffEventLoop) IsRunning() bool { // resetScheduledTasks recomputes the nextBlockConstructionTimeStamp and nextTimeoutTimeStamp // values, and reschedules the crank timer and timeout tasks. -func (fc *FastHotStuffEventLoop) resetScheduledTasks() { +func (fc *fastHotStuffEventLoop) resetScheduledTasks() { // Compute the next timeout ETA. We use exponential back-off for timeouts when there are // multiple consecutive timeouts. We use the difference between the current view and the // chain tip's view to determine this. The current view can only drift from the chain tip's @@ -461,7 +461,7 @@ func (fc *FastHotStuffEventLoop) resetScheduledTasks() { // When this function is triggered, it means that we have reached the crank timer // time ETA for blockConstructionView. If we have a QC or timeout QC for the view, then we // signal the server. -func (fc *FastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView uint64) { +func (fc *fastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView uint64) { fc.lock.Lock() defer fc.lock.Unlock() @@ -506,7 +506,7 @@ func (fc *FastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView // the signers list and aggregate signature that can be used to construct the QC. // // This function must be called while holding the event loop's lock. -func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuffEvent { +func (fc *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuffEvent { // If currentView != tipBlock.View + 1, then we have timed out at some point, and can no longer // construct a block with a QC of votes for the tip block. tipBlock := fc.tip.block @@ -592,7 +592,7 @@ func (fc *FastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff // to construct the timeout QC. // // This function must be called while holding the consensus instance's lock. -func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotStuffEvent { +func (fc *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotStuffEvent { // Fetch all timeouts for the previous view. All timeout messages for a view are aggregated and // proposed in the next view. So if we want to propose a timeout QC in the current view, we need @@ -709,7 +709,7 @@ func (fc *FastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt // When this function is triggered, it means that we have reached out the timeout ETA for the // timedOutView. In the event of a timeout, we signal the server that we are ready to time out // and cancel the timeout task. -func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uint64) { +func (fc *fastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uint64) { fc.lock.Lock() defer fc.lock.Unlock() @@ -757,7 +757,7 @@ func (fc *FastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin // - Timeouts: if the next block were be an empty block with a timeout QC aggregated from timeout messages, // then it must satisfy nextBlock.GetView() = timeout.GetView() + 1. We can safely evict all timeout messages with // currentView > timeout.GetView() + 1. -func (fc *FastHotStuffEventLoop) evictStaleVotesAndTimeouts() { +func (fc *fastHotStuffEventLoop) evictStaleVotesAndTimeouts() { // Evict stale vote messages for blockHash, voters := range fc.votesSeenByBlockHash { for _, vote := range voters { @@ -780,7 +780,7 @@ func (fc *FastHotStuffEventLoop) evictStaleVotesAndTimeouts() { } } -func (fc *FastHotStuffEventLoop) storeVote(signaturePayload [32]byte, vote VoteMessage) { +func (fc *fastHotStuffEventLoop) storeVote(signaturePayload [32]byte, vote VoteMessage) { votesForBlockHash, ok := fc.votesSeenByBlockHash[signaturePayload] if !ok { votesForBlockHash = make(map[string]VoteMessage) @@ -790,7 +790,7 @@ func (fc *FastHotStuffEventLoop) storeVote(signaturePayload [32]byte, vote VoteM votesForBlockHash[vote.GetPublicKey().ToString()] = vote } -func (fc *FastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view uint64) bool { +func (fc *fastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view uint64) bool { // This is an O(n) operation that scales with the number of block hashes that we have stored // votes for. In practice, n will be very small because we evict stale votes, and server.go // will be smart about not processing votes for views we won't be the block proposer for. @@ -812,7 +812,7 @@ func (fc *FastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view return false } -func (fc *FastHotStuffEventLoop) storeTimeout(timeout TimeoutMessage) { +func (fc *fastHotStuffEventLoop) storeTimeout(timeout TimeoutMessage) { timeoutsForView, ok := fc.timeoutsSeenByView[timeout.GetView()] if !ok { timeoutsForView = make(map[string]TimeoutMessage) @@ -822,7 +822,7 @@ func (fc *FastHotStuffEventLoop) storeTimeout(timeout TimeoutMessage) { timeoutsForView[timeout.GetPublicKey().ToString()] = timeout } -func (fc *FastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, view uint64) bool { +func (fc *fastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, view uint64) bool { timeoutsForView, ok := fc.timeoutsSeenByView[view] if !ok { return false @@ -834,7 +834,7 @@ func (fc *FastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, vi return ok } -func (fc *FastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( +func (fc *fastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( _isSafeBlock bool, _safeBlock Block, _validatorList []Validator, diff --git a/consensus/integration_test_types.go b/consensus/integration_test_types.go index 8d4e3733d..63722faf8 100644 --- a/consensus/integration_test_types.go +++ b/consensus/integration_test_types.go @@ -22,7 +22,7 @@ type validatorNode struct { privateKey *bls.PrivateKey stake *uint256.Int - eventLoop *FastHotStuffEventLoop + eventLoop *fastHotStuffEventLoop isBlockProposer bool validatorNodes []*validatorNode diff --git a/consensus/mocking.go b/consensus/mocking.go new file mode 100644 index 000000000..e902239cf --- /dev/null +++ b/consensus/mocking.go @@ -0,0 +1,60 @@ +package consensus + +import "time" + +type MockFastHotStuffEventLoop struct { + onGetEvents func() chan *FastHotStuffEvent + onInit func(time.Duration, time.Duration, BlockWithValidatorList, []BlockWithValidatorList) error + onGetCurrentView func() uint64 + onAdvanceViewOnTimeout func() (uint64, error) + onProcessTipBlock func(BlockWithValidatorList, []BlockWithValidatorList) error + onProcessValidatorVote func(VoteMessage) error + onProcessValidatorTimeout func(TimeoutMessage) error + onStart func() + onStop func() + onIsInitialized func() bool + onIsRunning func() bool +} + +func (fc *MockFastHotStuffEventLoop) GetEvents() chan *FastHotStuffEvent { + return fc.GetEvents() +} +func (fc *MockFastHotStuffEventLoop) Init(crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { + return fc.onInit(crankTimerInterval, timeoutBaseDuration, tip, safeBlocks) +} + +func (fc *MockFastHotStuffEventLoop) GetCurrentView() uint64 { + return fc.onGetCurrentView() +} + +func (fc *MockFastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { + return fc.onAdvanceViewOnTimeout() +} + +func (fc *MockFastHotStuffEventLoop) ProcessTipBlock(tipBlock BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { + return fc.onProcessTipBlock(tipBlock, safeBlocks) +} + +func (fc *MockFastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { + return fc.onProcessValidatorVote(vote) +} + +func (fc *MockFastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) error { + return fc.onProcessValidatorTimeout(timeout) +} + +func (fc *MockFastHotStuffEventLoop) Start() { + fc.onStart() +} + +func (fc *MockFastHotStuffEventLoop) Stop() { + fc.onStop() +} + +func (fc *MockFastHotStuffEventLoop) IsInitialized() bool { + return fc.onIsInitialized() +} + +func (fc *MockFastHotStuffEventLoop) IsRunning() bool { + return fc.onIsRunning() +} diff --git a/consensus/mocking_test.go b/consensus/mocking_test.go new file mode 100644 index 000000000..0881ead62 --- /dev/null +++ b/consensus/mocking_test.go @@ -0,0 +1,15 @@ +package consensus + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFastHotStuffEventLoopMockType(t *testing.T) { + var eventLoop FastHotStuffEventLoop + eventLoop = &MockFastHotStuffEventLoop{} + + // Verify that the mock struct implements the interface type + require.True(t, !isInterfaceNil(eventLoop)) +} diff --git a/consensus/types.go b/consensus/types.go index 237676ed0..f757e07fd 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -40,6 +40,24 @@ type FastHotStuffEvent struct { // ambiguously repeated in the code base type BlockHashValue = [32]byte +// FastHotStuffEventLoop is the public facing interface for the consensus event loop. We expose an +// interface instead of the raw event loop struct to allow external callers to mock the event loop +// for testing purposes. +type FastHotStuffEventLoop interface { + GetEvents() chan *FastHotStuffEvent + + Init(time.Duration, time.Duration, BlockWithValidatorList, []BlockWithValidatorList) error + GetCurrentView() uint64 + AdvanceViewOnTimeout() (uint64, error) + ProcessTipBlock(BlockWithValidatorList, []BlockWithValidatorList) error + ProcessValidatorVote(VoteMessage) error + ProcessValidatorTimeout(TimeoutMessage) error + Start() + Stop() + IsInitialized() bool + IsRunning() bool +} + // BlockHash is a 32-byte hash of a block used to uniquely identify a block. It's re-defined here // as an interface that matches the exact structure of the BlockHash type in core, so that the two // packages are decoupled and the Fast HotStuff event loop can be tested end-to-end independently. @@ -146,7 +164,7 @@ const signalChannelBufferSize = 10000 // to pass in the tip block and safe extendable blocks. It expects the server to maintain the block chain, // the index of all past blocks, to perform QC validations for incoming blocks, to handle the commit rule, to // handle reorgs, and to only then to pass the the new validated tip. -type FastHotStuffEventLoop struct { +type fastHotStuffEventLoop struct { lock sync.RWMutex crankTimerInterval time.Duration diff --git a/lib/server.go b/lib/server.go index 57be274f4..c3e859805 100644 --- a/lib/server.go +++ b/lib/server.go @@ -61,7 +61,7 @@ type Server struct { eventManager *EventManager TxIndex *TXIndex - fastHotStuffEventLoop *consensus.FastHotStuffEventLoop + fastHotStuffEventLoop consensus.FastHotStuffEventLoop // posMempool *PosMemPool TODO: Add the mempool later // All messages received from peers get sent from the ConnectionManager to the @@ -2297,7 +2297,7 @@ func (srv *Server) _startConsensus() { } select { - case consensusEvent := <-srv.fastHotStuffEventLoop.Events: + case consensusEvent := <-srv.fastHotStuffEventLoop.GetEvents(): { glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) srv._handleFastHostStuffConsensusEvent(consensusEvent) From 8700cc36063f926d67cd16714dfb4b6b700e32c4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:16:02 -0500 Subject: [PATCH 274/762] Fix FastHotStuffEventLoop Implementation Struct (#796) --- consensus/event_loop.go | 6 ++++++ consensus/mocking_test.go | 10 +++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index f5ff94e82..ef7527473 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -86,6 +86,12 @@ func (fc *fastHotStuffEventLoop) Init( return nil } +// GetEvents returns the event loop's external channel for signaling. We need a getter function +// to ensure that this struct implements the FastHotStuffEventLoop interface type. +func (fc *fastHotStuffEventLoop) GetEvents() chan *FastHotStuffEvent { + return fc.Events +} + // GetCurrentView is a simple getter that returns the event loop's current view. It does not need // to be thread-safe. The caller is expected to use it in a thread-safe manner, at a time when // the view is guaranteed to not change. diff --git a/consensus/mocking_test.go b/consensus/mocking_test.go index 0881ead62..824209a9f 100644 --- a/consensus/mocking_test.go +++ b/consensus/mocking_test.go @@ -6,10 +6,18 @@ import ( "github.com/stretchr/testify/require" ) -func TestFastHotStuffEventLoopMockType(t *testing.T) { +func TestFastHotStuffEventLoopMockStruct(t *testing.T) { var eventLoop FastHotStuffEventLoop eventLoop = &MockFastHotStuffEventLoop{} // Verify that the mock struct implements the interface type require.True(t, !isInterfaceNil(eventLoop)) } + +func TestFastHotStuffEventLoopImplementationStruct(t *testing.T) { + var eventLoop FastHotStuffEventLoop + eventLoop = NewFastHotStuffEventLoop() + + // Verify that the implementation struct implements the interface type + require.True(t, !isInterfaceNil(eventLoop)) +} From 256c9406d80939f8806230ebf829dcca01dcf941 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:39:57 -0500 Subject: [PATCH 275/762] Make MockFastHotStuffEventLoop Fields Public (#797) --- consensus/mocking.go | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/consensus/mocking.go b/consensus/mocking.go index e902239cf..e91efd7d7 100644 --- a/consensus/mocking.go +++ b/consensus/mocking.go @@ -3,58 +3,58 @@ package consensus import "time" type MockFastHotStuffEventLoop struct { - onGetEvents func() chan *FastHotStuffEvent - onInit func(time.Duration, time.Duration, BlockWithValidatorList, []BlockWithValidatorList) error - onGetCurrentView func() uint64 - onAdvanceViewOnTimeout func() (uint64, error) - onProcessTipBlock func(BlockWithValidatorList, []BlockWithValidatorList) error - onProcessValidatorVote func(VoteMessage) error - onProcessValidatorTimeout func(TimeoutMessage) error - onStart func() - onStop func() - onIsInitialized func() bool - onIsRunning func() bool + OnGetEvents func() chan *FastHotStuffEvent + OnInit func(time.Duration, time.Duration, BlockWithValidatorList, []BlockWithValidatorList) error + OnGetCurrentView func() uint64 + OnAdvanceViewOnTimeout func() (uint64, error) + OnProcessTipBlock func(BlockWithValidatorList, []BlockWithValidatorList) error + OnProcessValidatorVote func(VoteMessage) error + OnProcessValidatorTimeout func(TimeoutMessage) error + OnStart func() + OnStop func() + OnIsInitialized func() bool + OnIsRunning func() bool } func (fc *MockFastHotStuffEventLoop) GetEvents() chan *FastHotStuffEvent { - return fc.GetEvents() + return fc.OnGetEvents() } func (fc *MockFastHotStuffEventLoop) Init(crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { - return fc.onInit(crankTimerInterval, timeoutBaseDuration, tip, safeBlocks) + return fc.OnInit(crankTimerInterval, timeoutBaseDuration, tip, safeBlocks) } func (fc *MockFastHotStuffEventLoop) GetCurrentView() uint64 { - return fc.onGetCurrentView() + return fc.OnGetCurrentView() } func (fc *MockFastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { - return fc.onAdvanceViewOnTimeout() + return fc.OnAdvanceViewOnTimeout() } func (fc *MockFastHotStuffEventLoop) ProcessTipBlock(tipBlock BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { - return fc.onProcessTipBlock(tipBlock, safeBlocks) + return fc.OnProcessTipBlock(tipBlock, safeBlocks) } func (fc *MockFastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { - return fc.onProcessValidatorVote(vote) + return fc.OnProcessValidatorVote(vote) } func (fc *MockFastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) error { - return fc.onProcessValidatorTimeout(timeout) + return fc.OnProcessValidatorTimeout(timeout) } func (fc *MockFastHotStuffEventLoop) Start() { - fc.onStart() + fc.OnStart() } func (fc *MockFastHotStuffEventLoop) Stop() { - fc.onStop() + fc.OnStop() } func (fc *MockFastHotStuffEventLoop) IsInitialized() bool { - return fc.onIsInitialized() + return fc.OnIsInitialized() } func (fc *MockFastHotStuffEventLoop) IsRunning() bool { - return fc.onIsRunning() + return fc.OnIsRunning() } From e538e1cc7982fc9aec1c42832679680d44a0ece8 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 7 Nov 2023 12:21:53 -0500 Subject: [PATCH 276/762] Move Consensus Interface Changes from feature/pos-process-block to feature/pos-fast-hotstuff-consensus (#798) --- bls/signature.go | 4 ++ bls/signature_no_relic.go | 4 ++ consensus/utils.go | 86 +++++++++++++++++++++++++++++++---- consensus/utils_test.go | 94 +++++++++++++++++++++++++++++++++++++++ lib/network.go | 27 +++++++++++ lib/pos_network.go | 57 ++++++++++++++++++++++-- lib/types.go | 4 ++ 7 files changed, 264 insertions(+), 12 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 9bcfdb4a9..a3fe19282 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -251,6 +251,10 @@ func (publicKey *PublicKey) Copy() *PublicKey { } } +func (publicKey *PublicKey) IsEmpty() bool { + return publicKey == nil || publicKey.flowPublicKey == nil +} + // // TYPES: Signature // diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index 093a06331..d264b0449 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -96,6 +96,10 @@ func (publicKey *PublicKey) Copy() *PublicKey { panic(BLSNoRelicError) } +func (publicKey *PublicKey) IsEmpty() bool { + panic(BLSNoRelicError) +} + // // TYPES: Signature // diff --git a/consensus/utils.go b/consensus/utils.go index a288ff989..ebfe6802a 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -40,6 +40,48 @@ func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Va // Compute the signature that validators in the QC would have signed signaturePayload := GetVoteSignaturePayload(qc.GetView(), qc.GetBlockHash()) + hasSuperMajorityStake, validatorPublicKeysInQC := isSuperMajorityStakeSignersList(qc.GetAggregatedSignature().GetSignersList(), validators) + if !hasSuperMajorityStake { + return false + } + + return isValidSignatureManyPublicKeys(validatorPublicKeysInQC, qc.GetAggregatedSignature().GetSignature(), signaturePayload[:]) +} + +func IsValidSuperMajorityAggregateQuorumCertificate(aggQC AggregateQuorumCertificate, validators []Validator) bool { + if !isProperlyFormedAggregateQC(aggQC) || !isProperlyFormedValidatorSet(validators) { + return false + } + + hasSuperMajorityStake, signerPublicKeys := isSuperMajorityStakeSignersList(aggQC.GetAggregatedSignature().GetSignersList(), validators) + if !hasSuperMajorityStake { + return false + } + + // Compute the timeout payloads signed by each validator. + // Each validator should sign a payload with the pair (View, HighQCView). + // The ordering of the high QC views and validators in the aggregate signature + // will match the ordering of active validators in descending order of stake for + // the timed out view's epoch. + signedPayloads := [][]byte{} + for _, highQCView := range aggQC.GetHighQCViews() { + payload := GetTimeoutSignaturePayload(aggQC.GetView(), highQCView) + signedPayloads = append(signedPayloads, payload[:]) + } + + // Validate the signers' aggregate signatures + isValidSignature, err := bls.VerifyAggregateSignatureMultiplePayloads( + signerPublicKeys, + aggQC.GetAggregatedSignature().GetSignature(), + signedPayloads, + ) + if err != nil || !isValidSignature { + return false + } + return true +} + +func isSuperMajorityStakeSignersList(signersList *bitset.Bitset, validators []Validator) (bool, []*bls.PublicKey) { // Compute the total stake in the QC and the total stake in the network stakeInQC := uint256.NewInt() totalStake := uint256.NewInt() @@ -47,12 +89,9 @@ func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Va // Fetch the validators in the QC validatorPublicKeysInQC := []*bls.PublicKey{} - // Fetch the aggregated signature in the QC - aggregatedSignature := qc.GetAggregatedSignature() - // Fetch the validators in the QC, and compute the sum of stake in the QC and in the network for ii := range validators { - if aggregatedSignature.GetSignersList().Get(ii) { + if signersList.Get(ii) { stakeInQC.Add(stakeInQC, validators[ii].GetStakeAmount()) validatorPublicKeysInQC = append(validatorPublicKeysInQC, validators[ii].GetPublicKey()) } @@ -61,11 +100,9 @@ func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Va // Check if the QC contains a super-majority of stake if !isSuperMajorityStake(stakeInQC, totalStake) { - return false + return false, validatorPublicKeysInQC } - - // Finally, validate the signature - return isValidSignatureManyPublicKeys(validatorPublicKeysInQC, aggregatedSignature.GetSignature(), signaturePayload[:]) + return true, validatorPublicKeysInQC } // When voting on a block, validators sign the payload sha3-256(View, BlockHash) with their BLS @@ -196,6 +233,19 @@ func isProperlyFormedQC(qc QuorumCertificate) bool { return isProperlyFormedAggregateSignature(qc.GetAggregatedSignature()) } +func isProperlyFormedAggregateQC(aggQC AggregateQuorumCertificate) bool { + // The QC must be non-nil + if isInterfaceNil(aggQC) { + return false + } + // The view must be non-zero and the high QC must be properly formed + // TODO: Do we need further validation on high qc views? such as non-zero? + if aggQC.GetView() == 0 || !isProperlyFormedQC(aggQC.GetHighQC()) || len(aggQC.GetHighQCViews()) == 0 { + return false + } + return isProperlyFormedAggregateSignature(aggQC.GetAggregatedSignature()) +} + func isProperlyFormedAggregateSignature(agg AggregatedSignature) bool { // The signature must be non-nil if isInterfaceNil(agg) { @@ -367,6 +417,26 @@ func createDummyQC(view uint64, blockHash BlockHash) *quorumCertificate { } } +func createDummyAggQc(view uint64, highQCView uint64) *aggregateQuorumCertificate { + timeoutSignaturePayload := GetTimeoutSignaturePayload(view, highQCView) + dummyQC := createDummyQC(highQCView, createDummyBlockHash()) + blsPrivateKey1, _ := bls.NewPrivateKey() + blsSignature1, _ := blsPrivateKey1.Sign(timeoutSignaturePayload[:]) + blsPrivateKey2, _ := bls.NewPrivateKey() + blsSignature2, _ := blsPrivateKey2.Sign(timeoutSignaturePayload[:]) + signersList := bitset.NewBitset().Set(0, true).Set(1, true) + aggregateSignature, _ := bls.AggregateSignatures([]*bls.Signature{blsSignature1, blsSignature2}) + return &aggregateQuorumCertificate{ + view: view, + highQC: dummyQC, + highQCViews: []uint64{highQCView, highQCView}, + aggregatedSignature: &aggregatedSignature{ + signersList: signersList, + signature: aggregateSignature, + }, + } +} + func createDummyBLSSignature() *bls.Signature { blsPrivateKey, _ := bls.NewPrivateKey() blockHashValue := createDummyBlockHash().GetValue() diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 9f7b5449a..d8203082f 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -93,6 +93,100 @@ func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { } } +func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { + // Test malformed QC + { + require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(nil, createDummyValidatorList())) + } + + // Test malformed validator set + { + require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(createDummyAggQc(2, 1), nil)) + } + + // Set up test validator data + validatorPrivateKey1 := createDummyBLSPrivateKey() + validatorPrivateKey2 := createDummyBLSPrivateKey() + validatorPrivateKey3 := createDummyBLSPrivateKey() + + validator1 := validator{ + publicKey: validatorPrivateKey1.PublicKey(), + stakeAmount: uint256.NewInt().SetUint64(3), + } + + validator2 := validator{ + publicKey: validatorPrivateKey2.PublicKey(), + stakeAmount: uint256.NewInt().SetUint64(2), + } + + validator3 := validator{ + publicKey: validatorPrivateKey3.PublicKey(), + stakeAmount: uint256.NewInt().SetUint64(1), + } + + validators := []Validator{&validator1, &validator2, &validator3} + + // Set up the block hash and view + dummyBlockHash := createDummyBlockHash() + view := uint64(10) + + // Compute the signature payload + signaturePayload := GetVoteSignaturePayload(view, dummyBlockHash) + + // Compute the aggregate signature payload + timeoutPayload := GetTimeoutSignaturePayload(view+1, view) + + validator1Signature, err := validatorPrivateKey1.Sign(signaturePayload[:]) + require.NoError(t, err) + highQC := quorumCertificate{ + blockHash: dummyBlockHash, + view: view, + aggregatedSignature: &aggregatedSignature{ + signersList: bitset.NewBitset().FromBytes([]byte{0x1}), // 0b0001, which represents validator 1 + signature: validator1Signature, + }, + } + + // Test with no super-majority stake + { + validator1TimeoutSignature, err := validatorPrivateKey1.Sign(timeoutPayload[:]) + require.NoError(t, err) + qc := aggregateQuorumCertificate{ + view: view + 1, + highQC: &highQC, + highQCViews: []uint64{view}, + aggregatedSignature: &aggregatedSignature{ + signersList: bitset.NewBitset().FromBytes([]byte{0x1}), // 0b0001, which represents validator 1 + signature: validator1TimeoutSignature, + }, + } + require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators)) + } + + // Test with 5/6 super-majority stake + { + validator1TimeoutSignature, err := validatorPrivateKey1.Sign(timeoutPayload[:]) + require.NoError(t, err) + // For fun, let's have validator 2 sign a timeout payload where its high QC is further behind. + validator2TimeoutPayload := GetTimeoutSignaturePayload(view+1, view-1) + validator2TimeoutSignature, err := validatorPrivateKey2.Sign(validator2TimeoutPayload[:]) + require.NoError(t, err) + + aggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1TimeoutSignature, validator2TimeoutSignature}) + require.NoError(t, err) + qc := aggregateQuorumCertificate{ + view: view + 1, + highQC: &highQC, + highQCViews: []uint64{view, view - 1}, + aggregatedSignature: &aggregatedSignature{ + signersList: bitset.NewBitset().FromBytes([]byte{0x3}), // 0b0011, which represents validators 1 and 2 + signature: aggSig, + }, + } + require.True(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators)) + } +} + func TestIsProperlyFormedBlock(t *testing.T) { // Test nil block { diff --git a/lib/network.go b/lib/network.go index c77f3ea0e..5fece2c45 100644 --- a/lib/network.go +++ b/lib/network.go @@ -18,8 +18,10 @@ import ( "time" "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/golang/glog" "github.com/deso-protocol/core/collections/bitset" + "github.com/deso-protocol/core/consensus" "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" @@ -1972,6 +1974,31 @@ type MsgDeSoHeader struct { ProposerVotePartialSignature *bls.Signature } +func (msg *MsgDeSoHeader) GetBlockHash() consensus.BlockHash { + hash, err := msg.Hash() + if err != nil { + glog.Errorf("MsgDeSoHeader.GetBlockHash: Problem hashing header: %v", err) + // TODO: Should we return nil? + return &BlockHash{} + } + return hash +} + +func (msg *MsgDeSoHeader) GetHeight() uint64 { + return msg.Height +} + +func (msg *MsgDeSoHeader) GetView() uint64 { + return msg.ProposedInView +} + +func (msg *MsgDeSoHeader) GetQC() consensus.QuorumCertificate { + if msg.ValidatorsTimeoutAggregateQC.isEmpty() { + return msg.ValidatorsVoteQC + } + return msg.ValidatorsTimeoutAggregateQC.ValidatorsHighQC +} + func HeaderSizeBytes() int { header := NewMessage(MsgTypeHeader) headerBytes, _ := header.ToBytes(false) diff --git a/lib/pos_network.go b/lib/pos_network.go index 75efccf6d..c270a8b30 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -5,6 +5,7 @@ import ( "fmt" "io" + "github.com/deso-protocol/core/consensus" "golang.org/x/crypto/sha3" "github.com/deso-protocol/core/bls" @@ -259,7 +260,7 @@ func (msg *MsgDeSoValidatorTimeout) FromBytes(data []byte) error { // HighQC msg.HighQC = &QuorumCertificate{} - if msg.HighQC.FromBytes(rr); err != nil { + if err = msg.HighQC.FromBytes(rr); err != nil { return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding HighQC") } @@ -296,6 +297,18 @@ type QuorumCertificate struct { ValidatorsVoteAggregatedSignature *AggregatedBLSSignature } +func (qc *QuorumCertificate) GetBlockHash() consensus.BlockHash { + return qc.BlockHash +} + +func (qc *QuorumCertificate) GetView() uint64 { + return qc.ProposedInView +} + +func (qc *QuorumCertificate) GetAggregatedSignature() consensus.AggregatedSignature { + return qc.ValidatorsVoteAggregatedSignature +} + // Performs a deep equality check between two QuorumCertificates, and returns true // if the two are fully initialized and have identical values. In all other cases, // it return false. @@ -317,6 +330,15 @@ func (qc *QuorumCertificate) Eq(other *QuorumCertificate) bool { return bytes.Equal(qcEncodedBytes, otherEncodedBytes) } +func (qc *QuorumCertificate) isEmpty() bool { + return qc == nil || + qc.BlockHash == nil || + qc.ProposedInView == 0 || + qc.ValidatorsVoteAggregatedSignature == nil || + qc.ValidatorsVoteAggregatedSignature.Signature == nil || + qc.ValidatorsVoteAggregatedSignature.SignersList == nil +} + func (qc *QuorumCertificate) ToBytes() ([]byte, error) { retBytes := []byte{} @@ -408,6 +430,14 @@ type AggregatedBLSSignature struct { Signature *bls.Signature } +func (sig *AggregatedBLSSignature) GetSignersList() *bitset.Bitset { + return sig.SignersList +} + +func (sig *AggregatedBLSSignature) GetSignature() *bls.Signature { + return sig.Signature +} + // Performs a deep equality check between two AggregatedBLSSignatures, and returns true // if the two are fully initialized and have identical values. In all other cases, // it return false. @@ -499,6 +529,22 @@ type TimeoutAggregateQuorumCertificate struct { ValidatorsTimeoutAggregatedSignature *AggregatedBLSSignature } +func (aggQC *TimeoutAggregateQuorumCertificate) GetView() uint64 { + return aggQC.TimedOutView +} + +func (aggQC *TimeoutAggregateQuorumCertificate) GetHighQC() consensus.QuorumCertificate { + return aggQC.ValidatorsHighQC +} + +func (aggQC *TimeoutAggregateQuorumCertificate) GetHighQCViews() []uint64 { + return aggQC.ValidatorsTimeoutHighQCViews +} + +func (aggQC *TimeoutAggregateQuorumCertificate) GetAggregatedSignature() consensus.AggregatedSignature { + return aggQC.ValidatorsTimeoutAggregatedSignature +} + // Performs a deep equality check between two TimeoutAggregateQuorumCertificates, and // returns true if the two are fully initialized and have identical values. In all other // cases, it return false. @@ -563,7 +609,7 @@ func (aggQC *TimeoutAggregateQuorumCertificate) FromBytes(rr io.Reader) error { } aggQC.ValidatorsHighQC = &QuorumCertificate{} - if aggQC.ValidatorsHighQC.FromBytes(rr); err != nil { + if err = aggQC.ValidatorsHighQC.FromBytes(rr); err != nil { return errors.Wrapf(err, "TimeoutAggregateQuorumCertificate.FromBytes: Error decoding ValidatorsHighQC") } @@ -573,7 +619,7 @@ func (aggQC *TimeoutAggregateQuorumCertificate) FromBytes(rr io.Reader) error { } aggQC.ValidatorsTimeoutAggregatedSignature = &AggregatedBLSSignature{} - if aggQC.ValidatorsTimeoutAggregatedSignature.FromBytes(rr); err != nil { + if err = aggQC.ValidatorsTimeoutAggregatedSignature.FromBytes(rr); err != nil { return errors.Wrapf(err, "TimeoutAggregateQuorumCertificate.FromBytes: Error decoding ValidatorsTimeoutAggregatedSignature") } @@ -585,8 +631,11 @@ func (aggQC *TimeoutAggregateQuorumCertificate) FromBytes(rr io.Reader) error { func (aggQC *TimeoutAggregateQuorumCertificate) isEmpty() bool { return aggQC == nil || aggQC.TimedOutView == 0 || + aggQC.ValidatorsHighQC.isEmpty() || + len(aggQC.ValidatorsTimeoutHighQCViews) == 0 || aggQC.ValidatorsTimeoutAggregatedSignature == nil || - aggQC.ValidatorsTimeoutAggregatedSignature.Signature == nil + aggQC.ValidatorsTimeoutAggregatedSignature.Signature == nil || + aggQC.ValidatorsTimeoutAggregatedSignature.SignersList == nil } func EncodeTimeoutAggregateQuorumCertificate(aggQC *TimeoutAggregateQuorumCertificate) ([]byte, error) { diff --git a/lib/types.go b/lib/types.go index 3e531f2ca..9182883a7 100644 --- a/lib/types.go +++ b/lib/types.go @@ -200,6 +200,10 @@ func NewBlockHash(input []byte) *BlockHash { return blockHash } +func (bh *BlockHash) GetValue() [HashSizeBytes]byte { + return *bh +} + func (bh *BlockHash) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { return EncodeByteArray(bh[:]) } From a31f3f8a0f67b717a387b63d977b1a4c69714934 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 8 Nov 2023 08:59:10 -0800 Subject: [PATCH 277/762] BLS Keystore (#801) --- lib/bls_keystore.go | 135 ++++++++++++++++++++++++++++++++++++++++++++ lib/bls_signer.go | 52 ----------------- 2 files changed, 135 insertions(+), 52 deletions(-) create mode 100644 lib/bls_keystore.go delete mode 100644 lib/bls_signer.go diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go new file mode 100644 index 000000000..2cb280f4e --- /dev/null +++ b/lib/bls_keystore.go @@ -0,0 +1,135 @@ +package lib + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/consensus" + "github.com/pkg/errors" +) + +// BLSSigner is a wrapper for the bls.PrivateKey type, which abstracts away the private key +// and only exposes protected methods for signing a select set of message types needed for +// Proof of Stake. It allows signing for: +// - PoS Validator Votes Messages +// - PoS Validator Timeout Messages +// - PoS Block Proposals +// - PoS Validator Connection Handshakes +// +// TODO: We will likely need to associate individual op-codes for each message type that can be signed, +// so that there no risk of signature collisions between different message types. Ex: the payload +// signed per message type must be made up of the following tuples: +// - Validator Vote: (0x01, view uint64, blockHash consensus.BlockHash) +// - Validator Timeout: (0x02, view uint64, highQCView uint64) +// - PoS Block Proposal: (0x03, view uint64, blockHash consensus.BlockHash) +// - PoS Validator Handshake: (0x04, peer's random nonce, our node's random nonce) + +type BLSSignatureOpCode byte + +const ( + BLSSignatureOpCodeValidatorVote BLSSignatureOpCode = 0 + BLSSignatureOpCodeValidatorTimeout BLSSignatureOpCode = 1 + BLSSignatureOpCodePoSBlockProposal BLSSignatureOpCode = 2 + BLSSignatureOpCodePoSValidatorHandshake BLSSignatureOpCode = 3 +) + +func (opCode BLSSignatureOpCode) Bytes() []byte { + return []byte{byte(opCode)} +} + +////////////////////////////////////////////////////////// +// BLSKeystore +////////////////////////////////////////////////////////// + +type BLSKeystore struct { + signer *BLSSigner +} + +func NewBLSKeystore(seed string) (*BLSKeystore, error) { + privateKey, err := bls.NewPrivateKey() + if err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed: %s", seed) + } + if _, err = privateKey.FromString(seed); err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem retrieving private key from seed: %s", seed) + } + + signer, err := NewBLSSigner(privateKey) + if err != nil { + return nil, err + } + return &BLSKeystore{signer: signer}, nil +} + +func (keystore *BLSKeystore) GetSigner() *BLSSigner { + return keystore.signer +} + +////////////////////////////////////////////////////////// +// BLSSigner +////////////////////////////////////////////////////////// + +type BLSSigner struct { + privateKey *bls.PrivateKey +} + +func NewBLSSigner(privateKey *bls.PrivateKey) (*BLSSigner, error) { + if privateKey == nil { + return nil, errors.New("NewBLSSigner: privateKey cannot be nil") + } + return &BLSSigner{privateKey: privateKey}, nil +} + +func (signer *BLSSigner) sign(opCode BLSSignatureOpCode, payload []byte) (*bls.Signature, error) { + newPayload := append(opCode.Bytes(), payload...) + return signer.privateKey.Sign(newPayload) +} + +func (signer *BLSSigner) GetPublicKey() *bls.PublicKey { + return signer.privateKey.PublicKey() +} + +func (signer *BLSSigner) SignValidatorVote(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { + payload := consensus.GetVoteSignaturePayload(view, blockHash) + return signer.sign(BLSSignatureOpCodeValidatorVote, payload[:]) +} + +func (signer *BLSSigner) SignValidatorTimeout(view uint64, highQCView uint64) (*bls.Signature, error) { + payload := consensus.GetTimeoutSignaturePayload(view, highQCView) + return signer.sign(BLSSignatureOpCodeValidatorTimeout, payload[:]) +} + +// TODO: Add signing function for PoS blocks + +func (signer *BLSSigner) SignPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64) (*bls.Signature, error) { + // FIXME + payload := []byte{} + return signer.sign(BLSSignatureOpCodePoSValidatorHandshake, payload[:]) +} + +////////////////////////////////////////////////////////// +// BLS Verification +////////////////////////////////////////////////////////// + +func _blsVerify(opCode BLSSignatureOpCode, payload []byte, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + newPayload := append(opCode.Bytes(), payload...) + return publicKey.Verify(signature, newPayload) +} + +func BLSVerifyValidatorVote(view uint64, blockHash consensus.BlockHash, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + payload := consensus.GetVoteSignaturePayload(view, blockHash) + return _blsVerify(BLSSignatureOpCodeValidatorVote, payload[:], signature, publicKey) +} + +func BLSVerifyValidatorTimeout(view uint64, highQCView uint64, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + payload := consensus.GetTimeoutSignaturePayload(view, highQCView) + return _blsVerify(BLSSignatureOpCodeValidatorTimeout, payload[:], signature, publicKey) +} + +// TODO: Add Verifier function for PoS blocks + +func BLSVerifyPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64, + signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + + // FIXME + payload := []byte{} + return _blsVerify(BLSSignatureOpCodePoSValidatorHandshake, payload[:], signature, publicKey) +} diff --git a/lib/bls_signer.go b/lib/bls_signer.go deleted file mode 100644 index fab496162..000000000 --- a/lib/bls_signer.go +++ /dev/null @@ -1,52 +0,0 @@ -package lib - -import ( - "errors" - - "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/consensus" -) - -// BLSSigner is a wrapper for the bls.PrivateKey type, which abstracts away the private key -// and only exposes protected methods for signing a select set of message types needed for -// Proof of Stake. It allows signing for: -// - PoS Validator Votes Messages -// - PoS Validator Timeout Messages -// - PoS Block Proposals -// - PoS Validator Connection Handshakes -// -// TODO: We will likely need to associate individual op-codes for each message type that can be signed, -// so that there no risk of signature collisions between different message types. Ex: the payload -// signed per message type must be made up of the following tuples: -// - Validator Vote: (0x01, view uint64, blockHash consensus.BlockHash) -// - Validator Timeout: (0x02, view uint64, highQCView uint64) -// - PoS Block Proposal: (0x03, view uint64, blockHash consensus.BlockHash) -// - PoS Validator Handshake: (0x04, peer's random nonce, our node's random nonce) -type BLSSigner struct { - privateKey *bls.PrivateKey -} - -func NewBLSSigner(privateKey *bls.PrivateKey) (*BLSSigner, error) { - if privateKey == nil { - return nil, errors.New("NewBLSSigner: privateKey cannot be nil") - } - return &BLSSigner{privateKey: privateKey}, nil -} - -func (signer *BLSSigner) GetPublicKey() *bls.PublicKey { - return signer.privateKey.PublicKey() -} - -func (signer *BLSSigner) SignValidatorVote(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { - payload := consensus.GetVoteSignaturePayload(view, blockHash) - return signer.privateKey.Sign(payload[:]) -} - -func (signer *BLSSigner) SignValidatorTimeout(view uint64, highQCView uint64) (*bls.Signature, error) { - payload := consensus.GetTimeoutSignaturePayload(view, highQCView) - return signer.privateKey.Sign(payload[:]) -} - -// TODO: Add signing function for PoS blocks - -// TODO: Add signing function for PoS validator connection handshake From f8cd0f0be1877769277d7f3d0ea164012c99a49e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 8 Nov 2023 12:55:26 -0500 Subject: [PATCH 278/762] Handle And Broadcast Local Timeout Signal (#779) * Handle And Broadcast Local Timeout Signal * Address Nina's feedback * Fix signature verification --- consensus/event_loop.go | 2 +- consensus/utils.go | 2 +- consensus/utils_test.go | 12 +-- lib/pos_consensus_controller.go | 92 +++++++++++++++++++--- lib/pos_consensus_controller_test.go | 112 +++++++++++++++++++++++++++ lib/pos_network.go | 20 ----- lib/pos_network_message_interface.go | 67 ++++++++++++++++ lib/types.go | 4 - 8 files changed, 270 insertions(+), 41 deletions(-) create mode 100644 lib/pos_consensus_controller_test.go create mode 100644 lib/pos_network_message_interface.go diff --git a/consensus/event_loop.go b/consensus/event_loop.go index ef7527473..14ee3d174 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -322,7 +322,7 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) } // Do a basic integrity check on the timeout message - if !isProperlyFormedTimeout(timeout) { + if !IsProperlyFormedTimeout(timeout) { return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Malformed timeout message") } diff --git a/consensus/utils.go b/consensus/utils.go index ebfe6802a..33893e4f6 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -195,7 +195,7 @@ func isProperlyFormedVote(vote VoteMessage) bool { return true } -func isProperlyFormedTimeout(timeout TimeoutMessage) bool { +func IsProperlyFormedTimeout(timeout TimeoutMessage) bool { // The timeout must be non-nil if isInterfaceNil(timeout) { return false diff --git a/consensus/utils_test.go b/consensus/utils_test.go index d8203082f..4a01e4682 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -302,40 +302,40 @@ func TestIsProperlyFormedVote(t *testing.T) { func TestIsProperlyFormedTimeout(t *testing.T) { // Test nil value { - require.False(t, isProperlyFormedTimeout(nil)) + require.False(t, IsProperlyFormedTimeout(nil)) } // Test zero-value view { timeout := createDummyTimeoutMessage(0) - require.False(t, isProperlyFormedTimeout(timeout)) + require.False(t, IsProperlyFormedTimeout(timeout)) } // Test nil high QC { timeout := createDummyTimeoutMessage(1) timeout.highQC = nil - require.False(t, isProperlyFormedTimeout(timeout)) + require.False(t, IsProperlyFormedTimeout(timeout)) } // Test nil public key { timeout := createDummyTimeoutMessage(1) timeout.publicKey = nil - require.False(t, isProperlyFormedTimeout(timeout)) + require.False(t, IsProperlyFormedTimeout(timeout)) } // Test nil signature { timeout := createDummyTimeoutMessage(1) timeout.signature = nil - require.False(t, isProperlyFormedTimeout(timeout)) + require.False(t, IsProperlyFormedTimeout(timeout)) } // Test happy path { timeout := createDummyTimeoutMessage(1) - require.True(t, isProperlyFormedTimeout(timeout)) + require.True(t, IsProperlyFormedTimeout(timeout)) } } diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index bb7a4e494..fd8c01e07 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -4,18 +4,21 @@ import ( "sync" "github.com/deso-protocol/core/consensus" + "github.com/pkg/errors" ) type ConsensusController struct { lock sync.RWMutex fastHotStuffEventLoop consensus.FastHotStuffEventLoop blockchain *Blockchain + signer *BLSSigner } -func NewConsensusController(blockchain *Blockchain) *ConsensusController { +func NewConsensusController(blockchain *Blockchain, signer *BLSSigner) *ConsensusController { return &ConsensusController{ - fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), blockchain: blockchain, + fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), + signer: signer, } } @@ -62,13 +65,84 @@ func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotS // 4. Broadcast the timeout msg to the network } -func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastHotStuffEvent) { - // The consensus module has signaled that we have timed out for a view. We construct and - // broadcast the timeout here: - // 1. Verify the block height and view we want to timeout on are valid - // 2. Construct the timeout message - // 3. Process the timeout in the consensus module - // 4. Broadcast the timeout msg to the network +// HandleFastHostStuffTimeout is triggered when the FastHotStuffEventLoop has signaled that +// it is ready to time out the current view. This function validates the timeout signal for +// staleness. If the signal is valid, then it constructs and broadcasts the timeout msg here. +// +// Steps: +// 1. Verify the timeout message and the view we want to timeout on +// 2. Construct the timeout message +// 3. Process the timeout in the consensus module +// 4. Broadcast the timeout msg to the network +func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastHotStuffEvent) error { + // Hold a read lock on the consensus controller. This is because we need to check the + // current view and block height of the consensus module. + cc.lock.Lock() + defer cc.lock.Unlock() + + var err error + + if !consensus.IsProperlyFormedTimeoutEvent(event) { + // If the event is not properly formed, we ignore it and log it. This should never happen. + return errors.Errorf("HandleFastHostStuffTimeout: Received improperly formed timeout event: %v", event) + } + + if event.View != cc.fastHotStuffEventLoop.GetCurrentView() { + // It's possible that the event loop signaled to timeout, but at the same time, we + // received a block proposal from the network and advanced the view. This is normal + // and an expected race condition in the steady-state. + // + // Nothing to do here. + return errors.Errorf("HandleFastHostStuffTimeout: Stale timeout event: %v", event) + } + + // Locally advance the event loop's view so that the node is locally running the Fast-HotStuff + // protocol correctly. Any errors below related to broadcasting the timeout message should not + // affect the correctness of the protocol's local execution. + if _, err := cc.fastHotStuffEventLoop.AdvanceViewOnTimeout(); err != nil { + // This should never happen as long as the event loop is running. If it happens, we return + // the error and let the caller handle it. + return errors.Errorf("HandleFastHostStuffTimeout: Error advancing view on timeout: %v", err) + } + + // Construct the timeout message + timeoutMsg := NewMessage(MsgTypeValidatorTimeout).(*MsgDeSoValidatorTimeout) + timeoutMsg.MsgVersion = MsgValidatorTimeoutVersion0 + timeoutMsg.TimedOutView = event.View + // TODO: Somehow we need to know the validator's ECDSA public key. Fill this out once the + // mapping between BLS and ECDSA keys is implemented and available in the consensus module. + // timeoutMsg.PublicKey = + timeoutMsg.VotingPublicKey = cc.signer.GetPublicKey() + highQCBlockHash := event.QC.GetBlockHash().GetValue() + timeoutMsg.HighQC = &QuorumCertificate{ + BlockHash: NewBlockHash(highQCBlockHash[:]), + ProposedInView: event.QC.GetView(), + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + SignersList: event.QC.GetAggregatedSignature().GetSignersList(), + Signature: event.QC.GetAggregatedSignature().GetSignature(), + }, + } + + timeoutMsg.TimeoutPartialSignature, err = cc.signer.SignValidatorTimeout(event.View, event.QC.GetView()) + if err != nil { + // This should never happen as long as the BLS signer is initialized correctly. + return errors.Errorf("HandleFastHostStuffTimeout: Error signing validator timeout: %v", err) + } + + // Process the timeout message locally in the FastHotStuffEventLoop + if err := cc.fastHotStuffEventLoop.ProcessValidatorTimeout(timeoutMsg); err != nil { + // This should never happen. If we error here, it means that the timeout message is stale + // beyond the committed tip, the timeout message is malformed, or the timeout message is + // is duplicated for the same view. In any case, something is very wrong. We should not + // broadcast this message to the network. + return errors.Errorf("HandleFastHostStuffTimeout: Error processing timeout locally: %v", err) + + } + + // Broadcast the timeout message to the network + // TODO: Broadcast the timeout message to the network or alternatively to just the block proposer + + return nil } func (cc *ConsensusController) HandleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { diff --git a/lib/pos_consensus_controller_test.go b/lib/pos_consensus_controller_test.go new file mode 100644 index 000000000..03149dc20 --- /dev/null +++ b/lib/pos_consensus_controller_test.go @@ -0,0 +1,112 @@ +package lib + +import ( + "sync" + "testing" + + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/consensus" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestConsensusControllerHandleTimeoutSignal(t *testing.T) { + // Create a test private key for the signer + blsPrivateKey, err := bls.NewPrivateKey() + require.NoError(t, err) + blsPublicKey := blsPrivateKey.PublicKey() + + // Create a test block header with a valid QC + blockHeader := createTestBlockHeaderVersion2(t, false) + blockHash, err := blockHeader.Hash() + require.NoError(t, err) + + // Compute the current and next views + currentView := blockHeader.ValidatorsVoteQC.GetView() + 1 + nextView := currentView + 1 + + // Create a mock controller + consensusController := ConsensusController{ + lock: sync.RWMutex{}, + signer: &BLSSigner{ + privateKey: blsPrivateKey, + }, + fastHotStuffEventLoop: &consensus.MockFastHotStuffEventLoop{ + OnGetCurrentView: func() uint64 { + return currentView + }, + OnAdvanceViewOnTimeout: func() (uint64, error) { + return nextView, nil + }, + OnProcessValidatorTimeout: func(timeout consensus.TimeoutMessage) error { + if !consensus.IsProperlyFormedTimeout(timeout) { + return errors.Errorf("Bad timeout message") + } + + if timeout.GetView() != (blockHeader.ValidatorsVoteQC.GetView()+1) || !timeout.GetPublicKey().Eq(blsPublicKey) { + return errors.Errorf("Bad view or public key in timeout message") + } + + if timeout.GetHighQC().GetView() != blockHeader.ValidatorsVoteQC.GetView() { + return errors.Errorf("Bad high QC in timeout message") + } + + if !timeout.GetHighQC().GetAggregatedSignature().GetSignature().Eq(blockHeader.ValidatorsVoteQC.ValidatorsVoteAggregatedSignature.GetSignature()) { + return errors.Errorf("Bad high QC in timeout message") + } + + if timeout.GetHighQC().GetAggregatedSignature().GetSignersList() != blockHeader.ValidatorsVoteQC.ValidatorsVoteAggregatedSignature.GetSignersList() { + return errors.Errorf("Bad high QC in timeout message") + } + + // Verify the timeout's signature + isValidSignature, err := BLSVerifyValidatorTimeout(currentView, blockHeader.ValidatorsVoteQC.GetView(), timeout.GetSignature(), blsPublicKey) + if err != nil { + return err + } + + if !isValidSignature { + return errors.Errorf("Bad signature in timeout message") + } + + return nil + }, + }, + } + + // Test sad path with invalid event type + { + event := &consensus.FastHotStuffEvent{ + EventType: consensus.FastHotStuffEventTypeVote, + } + + err := consensusController.HandleFastHostStuffTimeout(event) + require.Contains(t, err.Error(), "Received improperly formed timeout event") + } + + // Test sad path with stale view + { + event := &consensus.FastHotStuffEvent{ + EventType: consensus.FastHotStuffEventTypeTimeout, + View: currentView - 1, + TipBlockHeight: currentView - 1, + TipBlockHash: blockHash, + QC: blockHeader.ValidatorsVoteQC, + } + err := consensusController.HandleFastHostStuffTimeout(event) + require.Contains(t, err.Error(), "Stale timeout event") + } + + // Test happy path + { + event := &consensus.FastHotStuffEvent{ + EventType: consensus.FastHotStuffEventTypeTimeout, + View: currentView, + TipBlockHeight: currentView, + TipBlockHash: blockHeader.ValidatorsVoteQC.GetBlockHash(), + QC: blockHeader.ValidatorsVoteQC, + } + err := consensusController.HandleFastHostStuffTimeout(event) + require.NoError(t, err) + } +} diff --git a/lib/pos_network.go b/lib/pos_network.go index c270a8b30..868d302f6 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -297,18 +297,6 @@ type QuorumCertificate struct { ValidatorsVoteAggregatedSignature *AggregatedBLSSignature } -func (qc *QuorumCertificate) GetBlockHash() consensus.BlockHash { - return qc.BlockHash -} - -func (qc *QuorumCertificate) GetView() uint64 { - return qc.ProposedInView -} - -func (qc *QuorumCertificate) GetAggregatedSignature() consensus.AggregatedSignature { - return qc.ValidatorsVoteAggregatedSignature -} - // Performs a deep equality check between two QuorumCertificates, and returns true // if the two are fully initialized and have identical values. In all other cases, // it return false. @@ -430,14 +418,6 @@ type AggregatedBLSSignature struct { Signature *bls.Signature } -func (sig *AggregatedBLSSignature) GetSignersList() *bitset.Bitset { - return sig.SignersList -} - -func (sig *AggregatedBLSSignature) GetSignature() *bls.Signature { - return sig.Signature -} - // Performs a deep equality check between two AggregatedBLSSignatures, and returns true // if the two are fully initialized and have identical values. In all other cases, // it return false. diff --git a/lib/pos_network_message_interface.go b/lib/pos_network_message_interface.go new file mode 100644 index 000000000..236b0dea3 --- /dev/null +++ b/lib/pos_network_message_interface.go @@ -0,0 +1,67 @@ +package lib + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" + "github.com/deso-protocol/core/consensus" +) + +////////////////////////////////////////////////////////////////////////////////// +// This file implements the network message interfaces for the PoS messages // +// defined in the consensus package. These interfaces are used by the consensus // +// package to run the Fast-HotStuff protocol. This file is a good spot to // +// place all translations between types defined in lib and consensus packages. // +////////////////////////////////////////////////////////////////////////////////// + +// MsgDeSoValidatorTimeout struct <-> consensus.TimeoutMessage interface translation + +func (msg *MsgDeSoValidatorTimeout) GetPublicKey() *bls.PublicKey { + return msg.VotingPublicKey +} + +func (msg *MsgDeSoValidatorTimeout) GetView() uint64 { + return msg.TimedOutView +} + +func (msg *MsgDeSoValidatorTimeout) GetHighQC() consensus.QuorumCertificate { + return msg.HighQC +} + +func (msg *MsgDeSoValidatorTimeout) GetSignature() *bls.Signature { + return msg.TimeoutPartialSignature +} + +// QuorumCertificate struct <-> consensus.QuorumCertificate interface translation + +func (qc *QuorumCertificate) GetBlockHash() consensus.BlockHash { + return qc.BlockHash +} + +func (qc *QuorumCertificate) GetView() uint64 { + return qc.ProposedInView +} + +func (qc *QuorumCertificate) GetAggregatedSignature() consensus.AggregatedSignature { + return qc.ValidatorsVoteAggregatedSignature +} + +// AggregatedBLSSignature struct <-> consensus.AggregatedSignature interface translation + +func (aggSig *AggregatedBLSSignature) GetSignersList() *bitset.Bitset { + return aggSig.SignersList +} + +func (aggSig *AggregatedBLSSignature) GetSignature() *bls.Signature { + return aggSig.Signature +} + +// BlockHash struct <-> consensus.BlockHash interface translation + +func (blockhash *BlockHash) GetValue() [HashSizeBytes]byte { + return [HashSizeBytes]byte(blockhash.ToBytes()) +} + +func BlockHashFromConsensusInterface(blockHash consensus.BlockHash) *BlockHash { + blockHashValue := blockHash.GetValue() + return NewBlockHash(blockHashValue[:]) +} diff --git a/lib/types.go b/lib/types.go index 9182883a7..3e531f2ca 100644 --- a/lib/types.go +++ b/lib/types.go @@ -200,10 +200,6 @@ func NewBlockHash(input []byte) *BlockHash { return blockHash } -func (bh *BlockHash) GetValue() [HashSizeBytes]byte { - return *bh -} - func (bh *BlockHash) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { return EncodeByteArray(bh[:]) } From 21891062543c9863805a960d303e1f54ff7c1d25 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 8 Nov 2023 13:05:06 -0500 Subject: [PATCH 279/762] Handle And Broadcast Local Vote Signal (#799) * Handle And Broadcast Local Vote Signal * Fix signature verification --- consensus/event_loop.go | 4 +- consensus/utils.go | 4 +- consensus/utils_test.go | 12 ++-- lib/pos_consensus_controller.go | 101 ++++++++++++++++++++++----- lib/pos_consensus_controller_test.go | 69 ++++++++++++++++++ lib/pos_network_message_interface.go | 18 +++++ 6 files changed, 180 insertions(+), 28 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 14ee3d174..54e1b0b1e 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -237,7 +237,7 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { } // Do a basic integrity check on the vote message - if !isProperlyFormedVote(vote) { + if !IsProperlyFormedVote(vote) { return errors.New("FastHotStuffEventLoop.ProcessValidatorVote: Malformed vote message") } @@ -851,7 +851,7 @@ func (fc *fastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( // (one committed, two uncommitted). In the worse case, where the network has an unlucky series of // timeout -> block -> timeout -> block,... it can still be expected to have < 10 blocks. for _, block := range fc.safeBlocks { - if isEqualBlockHashes(block.block.GetBlockHash(), blockHash) { + if IsEqualBlockHash(block.block.GetBlockHash(), blockHash) { return true, block.block, block.validatorList, block.validatorLookup } } diff --git a/consensus/utils.go b/consensus/utils.go index 33893e4f6..b1604513b 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -176,7 +176,7 @@ func isProperlyFormedValidatorSet(validators []Validator) bool { }) } -func isProperlyFormedVote(vote VoteMessage) bool { +func IsProperlyFormedVote(vote VoteMessage) bool { // The vote must be non-nil if vote == nil { return false @@ -327,7 +327,7 @@ func isSuperMajorityStake(stake *uint256.Int, totalStake *uint256.Int) bool { return superMajorityConditionSum.Sign() >= 0 } -func isEqualBlockHashes(hash1 BlockHash, hash2 BlockHash) bool { +func IsEqualBlockHash(hash1 BlockHash, hash2 BlockHash) bool { hash1Value := hash1.GetValue() hash2Value := hash2.GetValue() diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 4a01e4682..ba3e15c3d 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -262,40 +262,40 @@ func TestIsProperlyFormedValidatorSet(t *testing.T) { func TestIsProperlyFormedVote(t *testing.T) { // Test nil value { - require.False(t, isProperlyFormedVote(nil)) + require.False(t, IsProperlyFormedVote(nil)) } // Test zero-value view { vote := createDummyVoteMessage(0) - require.False(t, isProperlyFormedVote(vote)) + require.False(t, IsProperlyFormedVote(vote)) } // Test nil block hash { vote := createDummyVoteMessage(1) vote.blockHash = nil - require.False(t, isProperlyFormedVote(vote)) + require.False(t, IsProperlyFormedVote(vote)) } // Test nil public key { vote := createDummyVoteMessage(1) vote.publicKey = nil - require.False(t, isProperlyFormedVote(vote)) + require.False(t, IsProperlyFormedVote(vote)) } // Test nil signature { vote := createDummyVoteMessage(1) vote.signature = nil - require.False(t, isProperlyFormedVote(vote)) + require.False(t, IsProperlyFormedVote(vote)) } // Test happy path { vote := createDummyVoteMessage(1) - require.True(t, isProperlyFormedVote(vote)) + require.True(t, IsProperlyFormedVote(vote)) } } diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index fd8c01e07..fcd1e8364 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -31,18 +31,26 @@ func (cc *ConsensusController) Init() { // able to fetch the tip block and current persisted view from DB from the Blockchain struct. } +// HandleFastHostStuffBlockProposal is called when FastHotStuffEventLoop has signaled that it can +// construct a block at a certain block height. This function validates the block proposal signal, +// then it constructs, processes locally, and then and broadcasts the block. +// +// Steps: +// 1. Verify that the block height we want to propose at is valid +// 2. Get a QC from the consensus module +// 3. Iterate over the top n transactions from the mempool +// 4. Construct a block with the QC and the top n transactions from the mempool +// 5. Sign the block +// 6. Process the block locally +// - This will connect the block to the blockchain, remove the transactions from the +// - mempool, and process the vote in the consensus module +// 7. Broadcast the block to the network func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus.FastHotStuffEvent) { - // The consensus module has signaled that we can propose a block at a certain block - // height. We construct the block and broadcast it here: - // 1. Verify that the block height we want to propose at is valid - // 2. Get a QC from the consensus module - // 3. Iterate over the top n transactions from the mempool - // 4. Construct a block with the QC and the top n transactions from the mempool - // 5. Sign the block - // 6. Process the block locally - // - This will connect the block to the blockchain, remove the transactions from the - // - mempool, and process the vote in the consensus module - // 7. Broadcast the block to the network + // Hold a read lock on the consensus controller. This is because we need to check the + // current view and block height of the consensus module. + cc.lock.Lock() + defer cc.lock.Unlock() + } func (cc *ConsensusController) HandleFastHostStuffEmptyTimeoutBlockProposal(event *consensus.FastHotStuffEvent) { @@ -56,13 +64,70 @@ func (cc *ConsensusController) HandleFastHostStuffEmptyTimeoutBlockProposal(even // 6. Broadcast the block to the network } -func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotStuffEvent) { - // The consensus module has signaled that we can vote on a block. We construct and - // broadcast the vote here: - // 1. Verify that the block height we want to vote on is valid - // 2. Construct the vote message - // 3. Process the vote in the consensus module - // 4. Broadcast the timeout msg to the network +// HandleFastHostStuffVote is triggered when FastHotStuffEventLoop has signaled that it wants to +// vote on the current tip. This functions validates the vote signal, then it constructs the +// vote message here. +// +// Steps: +// 1. Verify that the event is properly formed. +// 2. Construct the vote message +// 3. Process the vote in the consensus module +// 4. Broadcast the vote msg to the network +func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotStuffEvent) error { + // Hold a read lock on the consensus controller. This is because we need to check the + // current view and block height of the consensus module. + cc.lock.Lock() + defer cc.lock.Unlock() + + var err error + + if !consensus.IsProperlyFormedVoteEvent(event) { + // If the event is not properly formed, we ignore it and log it. This should never happen. + return errors.Errorf("HandleFastHostStuffVote: Received improperly formed vote event: %v", event) + } + + // Provided the vote message is properly formed, we construct and broadcast it in a best effort + // manner. We do this even if the consensus event loop has advanced the view or block height. We + // maintain the invariant here that if consensus connected a new tip and wanted to vote on it, the + // vote should be broadcasted regardless of other concurrent events that may have happened. + // + // The block acceptance rules in Blockchain.ProcessBlockPoS guarantee that we cannot vote more + // than once per view, so this best effort approach is safe, and in-line with the Fast-HotStuff + // protocol. + + // Construct the vote message + voteMsg := NewMessage(MsgTypeValidatorVote).(*MsgDeSoValidatorVote) + voteMsg.MsgVersion = MsgValidatorVoteVersion0 + voteMsg.ProposedInView = event.View + // TODO: Somehow we need to know the validator's ECDSA public key. Fill this out once the + // mapping between BLS and ECDSA keys is implemented and available in the consensus module. + // voteMsg.PublicKey = + voteMsg.VotingPublicKey = cc.signer.GetPublicKey() + + // Get the block hash + tipBlockHash := event.TipBlockHash.GetValue() + voteMsg.BlockHash = NewBlockHash(tipBlockHash[:]) + + // Compute and set the vote signature + voteMsg.VotePartialSignature, err = cc.signer.SignValidatorVote(event.View, event.TipBlockHash) + if err != nil { + // This should never happen as long as the BLS signer is initialized correctly. + return errors.Errorf("HandleFastHostStuffVote: Error signing validator vote: %v", err) + } + + // Process the vote message locally in the FastHotStuffEventLoop + if err := cc.fastHotStuffEventLoop.ProcessValidatorVote(voteMsg); err != nil { + // If we can't process the vote locally, then it must somehow be malformed, stale, + // or a duplicate vote/timeout for the same view. Something is very wrong. We should not + // broadcast it to the network. + return errors.Errorf("HandleFastHostStuffVote: Error processing vote locally: %v", err) + + } + + // Broadcast the vote message to the network + // TODO: Broadcast the vote message to the network or alternatively to just the block proposer + + return nil } // HandleFastHostStuffTimeout is triggered when the FastHotStuffEventLoop has signaled that diff --git a/lib/pos_consensus_controller_test.go b/lib/pos_consensus_controller_test.go index 03149dc20..84673ed87 100644 --- a/lib/pos_consensus_controller_test.go +++ b/lib/pos_consensus_controller_test.go @@ -10,6 +10,75 @@ import ( "github.com/stretchr/testify/require" ) +func TestConsensusControllerHandleVoteSignal(t *testing.T) { + // Create a test private key for the signer + blsPrivateKey, err := bls.NewPrivateKey() + require.NoError(t, err) + blsPublicKey := blsPrivateKey.PublicKey() + + // Create a test block header with a valid QC + blockHeader := createTestBlockHeaderVersion2(t, false) + blockHash, err := blockHeader.Hash() + require.NoError(t, err) + + // Create a mock controller + consensusController := ConsensusController{ + lock: sync.RWMutex{}, + signer: &BLSSigner{ + privateKey: blsPrivateKey, + }, + fastHotStuffEventLoop: &consensus.MockFastHotStuffEventLoop{ + OnProcessValidatorVote: func(vote consensus.VoteMessage) error { + if !consensus.IsProperlyFormedVote(vote) { + return errors.Errorf("Bad vote message") + } + + if vote.GetView() != blockHeader.GetView() || !vote.GetPublicKey().Eq(blsPublicKey) { + return errors.Errorf("Bad view or public key in vote message") + } + + if !consensus.IsEqualBlockHash(vote.GetBlockHash(), blockHash) { + return errors.Errorf("Bad tip block hash in vote message") + } + + // Verify the vote's signature + isValidSignature, err := BLSVerifyValidatorVote(blockHeader.GetView(), blockHash, vote.GetSignature(), blsPublicKey) + if err != nil { + return err + } + + if !isValidSignature { + return errors.Errorf("Bad signature in vote message") + } + + return nil + }, + }, + } + + // Test sad path with invalid event type + { + event := &consensus.FastHotStuffEvent{ + EventType: consensus.FastHotStuffEventTypeVote, + } + + err := consensusController.HandleFastHostStuffVote(event) + require.Contains(t, err.Error(), "Received improperly formed vote event") + } + + // Test happy path + { + event := &consensus.FastHotStuffEvent{ + EventType: consensus.FastHotStuffEventTypeVote, + View: blockHeader.GetView(), + TipBlockHeight: blockHeader.GetView(), + TipBlockHash: blockHash, + } + err := consensusController.HandleFastHostStuffVote(event) + require.NoError(t, err) + } +} + func TestConsensusControllerHandleTimeoutSignal(t *testing.T) { // Create a test private key for the signer blsPrivateKey, err := bls.NewPrivateKey() diff --git a/lib/pos_network_message_interface.go b/lib/pos_network_message_interface.go index 236b0dea3..50cc54867 100644 --- a/lib/pos_network_message_interface.go +++ b/lib/pos_network_message_interface.go @@ -13,6 +13,24 @@ import ( // place all translations between types defined in lib and consensus packages. // ////////////////////////////////////////////////////////////////////////////////// +// MsgDeSoValidatorVote struct <-> consensus.VoteMessage interface translation + +func (msg *MsgDeSoValidatorVote) GetPublicKey() *bls.PublicKey { + return msg.VotingPublicKey +} + +func (msg *MsgDeSoValidatorVote) GetView() uint64 { + return msg.ProposedInView +} + +func (msg *MsgDeSoValidatorVote) GetBlockHash() consensus.BlockHash { + return msg.BlockHash +} + +func (msg *MsgDeSoValidatorVote) GetSignature() *bls.Signature { + return msg.VotePartialSignature +} + // MsgDeSoValidatorTimeout struct <-> consensus.TimeoutMessage interface translation func (msg *MsgDeSoValidatorTimeout) GetPublicKey() *bls.PublicKey { From b68abe8c60025164cf7e45107d22526df8e0a03b Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 8 Nov 2023 14:15:47 -0800 Subject: [PATCH 280/762] Add comments, switch from Wrapf to Wrap, and reorganize small bits of code. --- lib/block_view_lockups.go | 201 +++++++++++++++++++------------------- lib/db_utils.go | 7 ++ 2 files changed, 109 insertions(+), 99 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 0cdc47692..2703cfe25 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -40,10 +40,6 @@ func (lockedBalanceEntry *LockedBalanceEntry) Copy() *LockedBalanceEntry { } } -func (lockedBalanceEntry *LockedBalanceEntry) Eq(other *LockedBalanceEntry) bool { - return lockedBalanceEntry.ToMapKey() == other.ToMapKey() -} - func (lockedBalanceEntry *LockedBalanceEntry) ToMapKey() LockedBalanceEntryKey { return LockedBalanceEntryKey{ HODLerPKID: *lockedBalanceEntry.HODLerPKID, @@ -58,7 +54,7 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawEncodeWithoutMetadata(blockHeig var data []byte data = append(data, EncodeToBytes(blockHeight, lockedBalanceEntry.HODLerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, lockedBalanceEntry.ProfilePKID, skipMetadata...)...) - data = append(data, UintToBuf(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))...) + data = append(data, IntToBuf(lockedBalanceEntry.UnlockTimestampNanoSecs)...) data = append(data, VariableEncodeUint256(&lockedBalanceEntry.BalanceBaseUnits)...) return data } @@ -69,30 +65,29 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawDecodeWithoutMetadata(blockHeig // HODLerPKID lockedBalanceEntry.HODLerPKID, err = DecodeDeSoEncoder(&PKID{}, rr) if err != nil { - return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading HODLerPKID") + return errors.Wrap(err, "LockedBalanceEntry.Decode: Problem reading HODLerPKID") } // ProfilePKID lockedBalanceEntry.ProfilePKID, err = DecodeDeSoEncoder(&PKID{}, rr) if err != nil { - return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading ProfilePKID") + return errors.Wrap(err, "LockedBalanceEntry.Decode: Problem reading ProfilePKID") } // UnlockTimestampNanoSecs - uint64UnlockTimestampNanoSecs, err := ReadUvarint(rr) + lockedBalanceEntry.UnlockTimestampNanoSecs, err = ReadVarint(rr) if err != nil { - return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading UnlockTimestampNanoSecs") + return errors.Wrap(err, "LockedBalanceEntry.Decode: Problem reading UnlockTimestampNanoSecs") } - lockedBalanceEntry.UnlockTimestampNanoSecs = int64(uint64UnlockTimestampNanoSecs) // BalanceBaseUnits balanceBaseUnits, err := VariableDecodeUint256(rr) if err != nil { - return errors.Wrapf(err, "LockedBalanceEntry.Decode: Problem reading BalanceBaseUnits") + return errors.Wrap(err, "LockedBalanceEntry.Decode: Problem reading BalanceBaseUnits") } lockedBalanceEntry.BalanceBaseUnits = *balanceBaseUnits - return err + return nil } func (lockedBalanceEntry *LockedBalanceEntry) GetVersionByte(blockHeight uint64) byte { @@ -118,11 +113,18 @@ func (bav *UtxoView) _setLockedBalanceEntry(lockedBalanceEntry *LockedBalanceEnt } func (bav *UtxoView) _deleteLockedBalanceEntry(lockedBalanceEntry *LockedBalanceEntry) { + // This function shouldn't be called with nil. + if lockedBalanceEntry == nil { + glog.Errorf("_deleteLockedBalanceEntry: Called with nil LockedBalanceEntry; " + + "this should never happen.") + return + } + // Create a tombstone entry. tombstoneLockedBalanceEntry := lockedBalanceEntry.Copy() tombstoneLockedBalanceEntry.isDeleted = true - // Set the LockupYieldCurvePoint as deleted in the view. + // Set the LockedBalanceEntry as deleted in the view. bav._setLockedBalanceEntry(tombstoneLockedBalanceEntry) } @@ -140,6 +142,9 @@ func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestam // Check if the key exists in the view. if viewEntry, viewEntryExists := bav.LockedBalanceEntryKeyToLockedBalanceEntry[lockedBalanceEntryKey]; viewEntryExists { + if viewEntry == nil || viewEntry.isDeleted { + return nil, nil + } return viewEntry, nil } @@ -176,7 +181,7 @@ func (bav *UtxoView) GetUnlockableLockedBalanceEntries( dbUnlockableLockedBalanceEntries, err := DBGetUnlockableLockedBalanceEntries( bav.Handle, bav.Snapshot, hodlerPKID, profilePKID, currentTimestampNanoSecs) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GetUnlockableLockedBalanceEntries") + return nil, errors.Wrap(err, "UtxoView.GetUnlockableLockedBalanceEntries") } for _, lockedBalanceEntry := range dbUnlockableLockedBalanceEntries { // Cache results in the UtxoView. @@ -228,6 +233,7 @@ func (lockupYieldCurvePoint *LockupYieldCurvePoint) Copy() *LockupYieldCurvePoin ProfilePKID: lockupYieldCurvePoint.ProfilePKID.NewPKID(), LockupDurationNanoSecs: lockupYieldCurvePoint.LockupDurationNanoSecs, LockupYieldAPYBasisPoints: lockupYieldCurvePoint.LockupYieldAPYBasisPoints, + isDeleted: lockupYieldCurvePoint.isDeleted, } } @@ -247,7 +253,7 @@ func (lockupYieldCurvePoint *LockupYieldCurvePoint) ToMapKey() LockupYieldCurveP func (lockupYieldCurvePoint *LockupYieldCurvePoint) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte data = append(data, EncodeToBytes(blockHeight, lockupYieldCurvePoint.ProfilePKID, skipMetadata...)...) - data = append(data, UintToBuf(uint64(lockupYieldCurvePoint.LockupDurationNanoSecs))...) + data = append(data, IntToBuf(lockupYieldCurvePoint.LockupDurationNanoSecs)...) data = append(data, UintToBuf(lockupYieldCurvePoint.LockupYieldAPYBasisPoints)...) return data } @@ -258,24 +264,23 @@ func (lockupYieldCurvePoint *LockupYieldCurvePoint) RawDecodeWithoutMetadata(blo // ProfilePKID lockupYieldCurvePoint.ProfilePKID, err = DecodeDeSoEncoder(&PKID{}, rr) if err != nil { - return errors.Wrapf(err, "LockupYieldCurvePoint.Decode: Problem reading ProfilePKID") + return errors.Wrap(err, "LockupYieldCurvePoint.Decode: Problem reading ProfilePKID") } // LockupDurationNanoSecs - uint64LockupDurationNanoSecs, err := ReadUvarint(rr) + lockupYieldCurvePoint.LockupDurationNanoSecs, err = ReadVarint(rr) if err != nil { - return errors.Wrapf(err, "LockupYieldCurvePoint.Decode: Problem reading LockupDurationNanoSecs") + return errors.Wrap(err, "LockupYieldCurvePoint.Decode: Problem reading LockupDurationNanoSecs") } - lockupYieldCurvePoint.LockupDurationNanoSecs = int64(uint64LockupDurationNanoSecs) // LockupYieldAPYBasisPoints lockupYieldAPYBasisPoints, err := ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "LockupYieldCurvePoint.Decode: Problem reading LockupYieldAPYBasisPoints") + return errors.Wrap(err, "LockupYieldCurvePoint.Decode: Problem reading LockupYieldAPYBasisPoints") } lockupYieldCurvePoint.LockupYieldAPYBasisPoints = lockupYieldAPYBasisPoints - return err + return nil } func (lockupYieldCurvePoint *LockupYieldCurvePoint) GetVersionByte(blockHeight uint64) byte { @@ -307,6 +312,13 @@ func (bav *UtxoView) _setLockupYieldCurvePoint(point *LockupYieldCurvePoint) { } func (bav *UtxoView) _deleteLockupYieldCurvePoint(point *LockupYieldCurvePoint) { + // This function shouldn't be called with nil. + if point == nil { + glog.Errorf("_deleteLockupYieldCurvePoint: Called with nil LockupYieldCurvePoint; " + + "this should never happen.") + return + } + // Create a tombstone entry. tombstoneLockupYieldCurvePoint := point.Copy() tombstoneLockupYieldCurvePoint.isDeleted = true @@ -426,7 +438,7 @@ func (txnData *CoinLockupMetadata) GetTxnType() TxnType { func (txnData *CoinLockupMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte data = append(data, EncodeByteArray(txnData.ProfilePublicKey.ToBytes())...) - data = append(data, UintToBuf(uint64(txnData.UnlockTimestampNanoSecs))...) + data = append(data, IntToBuf(txnData.UnlockTimestampNanoSecs)...) data = append(data, VariableEncodeUint256(txnData.LockupAmountBaseUnits)...) return data, nil } @@ -437,21 +449,20 @@ func (txnData *CoinLockupMetadata) FromBytes(data []byte) error { // ProfilePublicKey profilePublicKeyBytes, err := DecodeByteArray(rr) if err != nil { - return errors.Wrapf(err, "CoinLockupMetadata.FromBytes: Problem reading ProfilePublicKey") + return errors.Wrap(err, "CoinLockupMetadata.FromBytes: Problem reading ProfilePublicKey") } txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) // UnlockTimestampNanoSecs - uint64UnlockTimestampNanoSecs, err := ReadUvarint(rr) + txnData.UnlockTimestampNanoSecs, err = ReadVarint(rr) if err != nil { - return errors.Wrapf(err, "CoinLockupMetadata.FromBytes: Problem reading UnlockTimestampNanoSecs") + return errors.Wrap(err, "CoinLockupMetadata.FromBytes: Problem reading UnlockTimestampNanoSecs") } - txnData.UnlockTimestampNanoSecs = int64(uint64UnlockTimestampNanoSecs) // LockupAmountBaseUnits txnData.LockupAmountBaseUnits, err = VariableDecodeUint256(rr) if err != nil { - return errors.Wrapf(err, "CoinLockupMetadata.FromBytes: Problem reading LockupAmountBaseUnits") + return errors.Wrap(err, "CoinLockupMetadata.FromBytes: Problem reading LockupAmountBaseUnits") } return nil @@ -503,7 +514,7 @@ func (txnData *UpdateCoinLockupParamsMetadata) GetTxnType() TxnType { func (txnData *UpdateCoinLockupParamsMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte - data = append(data, UintToBuf(uint64(txnData.LockupYieldDurationNanoSecs))...) + data = append(data, IntToBuf(txnData.LockupYieldDurationNanoSecs)...) data = append(data, UintToBuf(txnData.LockupYieldAPYBasisPoints)...) data = append(data, BoolToByte(txnData.RemoveYieldCurvePoint)) data = append(data, BoolToByte(txnData.NewLockupTransferRestrictions)) @@ -512,32 +523,32 @@ func (txnData *UpdateCoinLockupParamsMetadata) ToBytes(preSignature bool) ([]byt } func (txnData *UpdateCoinLockupParamsMetadata) FromBytes(data []byte) error { + var err error rr := bytes.NewReader(data) - lockupYieldDurationNanoSecs, err := ReadUvarint(rr) + txnData.LockupYieldDurationNanoSecs, err = ReadVarint(rr) if err != nil { - return errors.Wrapf(err, "UpdateDAOCoinLockupParams.FromBytes: Problem reading LockupYieldDurationNanoSecs") + return errors.Wrap(err, "UpdateCoinLockupParams.FromBytes: Problem reading LockupYieldDurationNanoSecs") } - txnData.LockupYieldDurationNanoSecs = int64(lockupYieldDurationNanoSecs) txnData.LockupYieldAPYBasisPoints, err = ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "UpdateDAOCoinLockupParams.FromBytes: Problem reading LockupYieldAPYBasisPoints") + return errors.Wrap(err, "UpdateCoinLockupParams.FromBytes: Problem reading LockupYieldAPYBasisPoints") } txnData.RemoveYieldCurvePoint, err = ReadBoolByte(rr) if err != nil { - return errors.Wrapf(err, "UpdateDAOCoinLockupParams.FromBytes: Problem reading RemoveYieldCurvePoint") + return errors.Wrap(err, "UpdateCoinLockupParams.FromBytes: Problem reading RemoveYieldCurvePoint") } txnData.NewLockupTransferRestrictions, err = ReadBoolByte(rr) if err != nil { - return errors.Wrapf(err, "UpdateDAOCoinLockupParams.FromBytes: Problem reading NewLockupTransferRestrictions") + return errors.Wrap(err, "UpdateCoinLockupParams.FromBytes: Problem reading NewLockupTransferRestrictions") } lockedStatusByte, err := rr.ReadByte() if err != nil { - return errors.Wrapf(err, "UpdateDAOCoinLockupParams.FromBytes: Problem reading LockupTransferRestrictionStatus") + return errors.Wrap(err, "UpdateCoinLockupParams.FromBytes: Problem reading LockupTransferRestrictionStatus") } txnData.LockupTransferRestrictionStatus = TransferRestrictionStatus(lockedStatusByte) @@ -567,7 +578,7 @@ func (txnData *CoinLockupTransferMetadata) ToBytes(preSignature bool) ([]byte, e var data []byte data = append(data, EncodeByteArray(txnData.RecipientPublicKey.ToBytes())...) data = append(data, EncodeByteArray(txnData.ProfilePublicKey.ToBytes())...) - data = append(data, UintToBuf(uint64(txnData.UnlockTimestampNanoSecs))...) + data = append(data, IntToBuf(txnData.UnlockTimestampNanoSecs)...) data = append(data, VariableEncodeUint256(txnData.LockedCoinsToTransferBaseUnits)...) return data, nil } @@ -578,28 +589,27 @@ func (txnData *CoinLockupTransferMetadata) FromBytes(data []byte) error { // RecipientPublicKey recipientPublicKeyBytes, err := DecodeByteArray(rr) if err != nil { - return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading RecipientPublicKey") + return errors.Wrap(err, "CoinLockupTransferMetadata.FromBytes: Problem reading RecipientPublicKey") } txnData.RecipientPublicKey = NewPublicKey(recipientPublicKeyBytes) // ProfilePublicKey profilePublicKeyBytes, err := DecodeByteArray(rr) if err != nil { - return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading ProfilePublicKey") + return errors.Wrap(err, "CoinLockupTransferMetadata.FromBytes: Problem reading ProfilePublicKey") } txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) // UnlockTimestampNanoSecs - uint64UnlockTimestampNanoSecs, err := ReadUvarint(rr) + txnData.UnlockTimestampNanoSecs, err = ReadVarint(rr) if err != nil { - return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading UnlockTimestampNanoSecs") + return errors.Wrap(err, "CoinLockupTransferMetadata.FromBytes: Problem reading UnlockTimestampNanoSecs") } - txnData.UnlockTimestampNanoSecs = int64(uint64UnlockTimestampNanoSecs) // LockedDAOCoinToTransferBaseUnits txnData.LockedCoinsToTransferBaseUnits, err = VariableDecodeUint256(rr) if err != nil { - return errors.Wrapf(err, "DAOCoinLockupTransferMetadata.FromBytes: Problem reading LockedDAOCoinToTransferBaseUnits") + return errors.Wrap(err, "CoinLockupTransferMetadata.FromBytes: Problem reading LockedDAOCoinToTransferBaseUnits") } return nil @@ -633,7 +643,7 @@ func (txnData *CoinUnlockMetadata) FromBytes(data []byte) error { // ProfilePublicKey profilePublicKeyBytes, err := DecodeByteArray(rr) if err != nil { - return errors.Wrapf(err, "CoinUnlockMetadata.FromBytes: Problem reading ProfilePublicKey") + return errors.Wrap(err, "CoinUnlockMetadata.FromBytes: Problem reading ProfilePublicKey") } txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) @@ -659,7 +669,7 @@ func (bav *UtxoView) _connectCoinLockup( if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, - errors.Wrapf(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinLockup") + errors.Wrap(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinLockup") } // Validate the txn TxnType. @@ -675,7 +685,7 @@ func (bav *UtxoView) _connectCoinLockup( totalInput, totalOutput, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectCoinLockup") + return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") } utxoOpsForTxn = append(utxoOpsForTxn, utxoOpsForBasicTransfer...) @@ -683,7 +693,9 @@ func (bav *UtxoView) _connectCoinLockup( txMeta := txn.TxnMeta.(*CoinLockupMetadata) // Check that the target profile public key is valid and that a profile corresponding to that public key exists. + // We also go ahead and fetch the profile PKID as we will use it later. var profileEntry *ProfileEntry + var profilePKID *PKID if len(txMeta.ProfilePublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupInvalidProfilePubKey, "_connectCoinLockup") @@ -694,6 +706,14 @@ func (bav *UtxoView) _connectCoinLockup( return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupOnNonExistentProfile, "_connectCoinLockup") } + profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupNonExistentProfile, "_connectCoinLockup") + } + profilePKID = profilePKIDEntry.PKID.NewPKID() + } else { + profilePKID = ZeroPKID.NewPKID() } // Validate the lockup amount as non-zero. This is meant to prevent wasteful "no-op" transactions. @@ -725,19 +745,6 @@ func (bav *UtxoView) _connectCoinLockup( } hodlerPKID := transactorPKIDEntry.PKID - // Determine which profile PKID to use. - var profilePKID *PKID - if txMeta.ProfilePublicKey.IsZeroPublicKey() { - profilePKID = ZeroPKID.NewPKID() - } else { - profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) - if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupNonExistentProfile, "_connectCoinLockup") - } - profilePKID = profilePKIDEntry.PKID.NewPKID() - } - // Validate the transactor as having sufficient DAO Coin or DESO balance for the transaction. var transactorBalanceNanos256 *uint256.Int var prevTransactorBalanceEntry *BalanceEntry @@ -764,7 +771,7 @@ func (bav *UtxoView) _connectCoinLockup( lockupAmount64 := txMeta.LockupAmountBaseUnits.Uint64() newUtxoOp, err := bav._spendBalance(lockupAmount64, txn.PublicKey, blockHeight-1) if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectCoinLockup") + return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") } utxoOpsForTxn = append(utxoOpsForTxn, newUtxoOp) } else { @@ -775,7 +782,7 @@ func (bav *UtxoView) _connectCoinLockup( true) if transactorBalanceEntry == nil || transactorBalanceEntry.isDeleted { return 0, 0, nil, - errors.Wrapf(RuleErrorCoinLockupBalanceEntryDoesNotExist, "_connectCoinLockup") + errors.Wrap(RuleErrorCoinLockupBalanceEntryDoesNotExist, "_connectCoinLockup") } // Validate the balance entry as having sufficient funds. @@ -808,7 +815,7 @@ func (bav *UtxoView) _connectCoinLockup( // By now we know the transaction to be valid. We now source yield information from either // the profile's yield curve or the raw DeSo yield curve. Because there's some choice in how - // to determine the yield when the lockup duration falls between two profile specified yield curve + // to determine the yield when the lockup duration falls between two profile-specified yield curve // points, we return here the two local points and choose/interpolate between them below. leftYieldCurvePoint, rightYieldCurvePoint, err := bav.GetLocalYieldCurvePoints(profilePKID, lockupDurationNanoSeconds) if err != nil { @@ -1058,7 +1065,7 @@ func (bav *UtxoView) _disconnectCoinLockup( basicTransferOps := utxoOpsForTxn[:operationIndex] err = bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) if err != nil { - return errors.Wrapf(err, "_disconnectCoinLockup") + return errors.Wrap(err, "_disconnectCoinLockup") } return nil } @@ -1082,7 +1089,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, - errors.Wrapf(RuleErrorLockupTxnBeforeBlockHeight, "_connectUpdateCoinLockupParams") + errors.Wrap(RuleErrorLockupTxnBeforeBlockHeight, "_connectUpdateCoinLockupParams") } // Validate the txn TxnType. @@ -1095,7 +1102,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( totalInput, totalOutput, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectUpdateCoinLockupParams") + return 0, 0, nil, errors.Wrap(err, "_connectUpdateCoinLockupParams") } utxoOpsForTxn = append(utxoOpsForTxn, utxoOpsForBasicTransfer...) @@ -1106,6 +1113,8 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( var profilePKID *PKID _, updaterIsParamUpdater := GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(txn.PublicKey)] if updaterIsParamUpdater { + // NOTE: The implication here is ParamUpdaters share write access to the DeSo lockup parameters. + // As further implication, this means ParamUpdaters cannot specify their own coin's lockup parameters. profilePKID = ZeroPKID.NewPKID() } else { profilePKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) @@ -1118,7 +1127,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( // Sanity check the lockup duration as valid. if txMeta.LockupYieldDurationNanoSecs < 0 { - return 0, 0, nil, errors.Wrapf(RuleErrorUpdateCoinLockupParamsNegativeDuration, + return 0, 0, nil, errors.Wrap(RuleErrorUpdateCoinLockupParamsNegativeDuration, "_connectUpdateCoinLockupParams") } @@ -1145,7 +1154,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( // as well ensures there's no wasteful "no-ops" executed. if prevLockupYieldCurvePoint == nil { return 0, 0, nil, - errors.Wrapf(RuleErrorUpdateCoinLockupParamsDeletingNonExistentPoint, "_connectUpdateCoinLockupParams") + errors.Wrap(RuleErrorUpdateCoinLockupParamsDeletingNonExistentPoint, "_connectUpdateCoinLockupParams") } // NOTE: The "LockupYieldAPYBasisPoints" field is effectively irrelevant here. @@ -1165,7 +1174,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( profileEntry := bav.GetProfileEntryForPKID(profilePKID) if profileEntry == nil || profileEntry.isDeleted { return 0, 0, nil, - errors.Wrapf(RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile, "_connectUpdateCoinLockupParams") + errors.Wrap(RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile, "_connectUpdateCoinLockupParams") } // Store a copy of the previous LockupTransferRestrictionStatus for easy transaction disconnect. @@ -1187,7 +1196,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( if txMeta.NewLockupTransferRestrictions { // Ensure we're not updating a permanent transfer restriction. if prevLockupTransferRestriction == TransferRestrictionStatusPermanentlyUnrestricted { - return 0, 0, nil, errors.Wrapf( + return 0, 0, nil, errors.Wrap( RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction, "_connectUpdateCoinLockupParams") } @@ -1197,7 +1206,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( !(txMeta.LockupTransferRestrictionStatus == TransferRestrictionStatusDAOMembersOnly) && !(txMeta.LockupTransferRestrictionStatus == TransferRestrictionStatusPermanentlyUnrestricted) { return 0, 0, nil, - errors.Wrapf(RuleErrorUpdateCoinLockupParamsInvalidRestrictions, "_connectUpdateCoinLockupParams") + errors.Wrap(RuleErrorUpdateCoinLockupParamsInvalidRestrictions, "_connectUpdateCoinLockupParams") } } @@ -1308,7 +1317,7 @@ func (bav *UtxoView) _disconnectUpdateCoinLockupParams( basicTransferOps := utxoOpsForTxn[:operationIndex] err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) if err != nil { - return errors.Wrapf(err, "_disconnectUpdateCoinLockupParams") + return errors.Wrap(err, "_disconnectUpdateCoinLockupParams") } return nil } @@ -1332,7 +1341,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, - errors.Wrapf(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinLockupTransfer") + errors.Wrap(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinLockupTransfer") } // Validate the txn TxnType. @@ -1383,14 +1392,8 @@ func (bav *UtxoView) _connectCoinLockupTransfer( } // Fetch PKIDs for the recipient, sender, and profile. - var senderPKID *PKID - if _, senderIsParamUpdater := - GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(txn.PublicKey)]; senderIsParamUpdater { - senderPKID = ZeroPKID.NewPKID() - } else { - senderPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) - senderPKID = senderPKIDEntry.PKID - } + senderPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + senderPKID := senderPKIDEntry.PKID receiverPKIDEntry := bav.GetPKIDForPublicKey(txMeta.RecipientPublicKey.ToBytes()) receiverPKID := receiverPKIDEntry.PKID profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) @@ -1398,7 +1401,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( // Ensure the sender and receiver are different. if senderPKID.Eq(receiverPKID) { - return 0, 0, nil, errors.Wrapf(RuleErrorCoinLockupTransferSenderEqualsReceiver, + return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupTransferSenderEqualsReceiver, "_connectCoinLockupTransfer") } @@ -1421,7 +1424,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( // Check that the sender's balance entry has sufficient balance. if txMeta.LockedCoinsToTransferBaseUnits.Gt(&senderLockedBalanceEntry.BalanceBaseUnits) { - return 0, 0, nil, errors.Wrapf(RuleErrorCoinLockupTransferInsufficientBalance, + return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupTransferInsufficientBalance, "_connectCoinLockupTransfer") } @@ -1457,7 +1460,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( // Check if transfers are limited to profile owner only. if transferRestrictionStatus == TransferRestrictionStatusProfileOwnerOnly && !profilePKID.Eq(senderPKID) { return 0, 0, nil, - errors.Wrapf(RuleErrorCoinLockupTransferRestrictedToProfileOwner, "_connectCoinLockupTransfer") + errors.Wrap(RuleErrorCoinLockupTransferRestrictedToProfileOwner, "_connectCoinLockupTransfer") } // Check if the transfers are limited to DAO members only. @@ -1466,7 +1469,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( receiverBalanceEntry := bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(receiverPKID, profilePKID, true) if receiverBalanceEntry.BalanceNanos.IsZero() && receiverLockedBalanceEntry.BalanceBaseUnits.IsZero() { return 0, 0, nil, - errors.Wrapf(RuleErrorCoinLockupTransferRestrictedToDAOMembers, "_connectCoinLockupTransfer") + errors.Wrap(RuleErrorCoinLockupTransferRestrictedToDAOMembers, "_connectCoinLockupTransfer") } } @@ -1474,7 +1477,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( newRecipientBalanceBaseUnits, err := SafeUint256().Add(&receiverLockedBalanceEntry.BalanceBaseUnits, txMeta.LockedCoinsToTransferBaseUnits) if err != nil { - return 0, 0, nil, errors.Wrapf(RuleErrorCoinLockupTransferBalanceOverflowAtReceiver, + return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupTransferBalanceOverflowAtReceiver, "_connectCoinLockupTransfer") } receiverLockedBalanceEntry.BalanceBaseUnits = *newRecipientBalanceBaseUnits @@ -1577,7 +1580,7 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( basicTransferOps := utxoOpsForTxn[:operationIndex] err = bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) if err != nil { - return errors.Wrapf(err, "_disconnectCoinLockupTransfer") + return errors.Wrap(err, "_disconnectCoinLockupTransfer") } return nil @@ -1597,7 +1600,7 @@ func (bav *UtxoView) _connectCoinUnlock( if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, - errors.Wrapf(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinUnlock") + errors.Wrap(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinUnlock") } // Validate the txn TxnType. @@ -1610,7 +1613,7 @@ func (bav *UtxoView) _connectCoinUnlock( // Try connecting the basic transfer without considering transaction metadata. totalInput, totalOutput, utxoOpsForBasicTransfer, err := bav._connectBasicTransfer(txn, txHash, blockHeight, verifySignatures) if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectCoinUnlock") + return 0, 0, nil, errors.Wrap(err, "_connectCoinUnlock") } utxoOpsForTxn = append(utxoOpsForTxn, utxoOpsForBasicTransfer...) @@ -1634,7 +1637,7 @@ func (bav *UtxoView) _connectCoinUnlock( // Convert the TransactorPublicKey to HODLerPKID transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { - return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockInvalidHODLerPKID, + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockInvalidHODLerPKID, "_connectCoinUnlock") } hodlerPKID := transactorPKIDEntry.PKID @@ -1646,7 +1649,7 @@ func (bav *UtxoView) _connectCoinUnlock( } else { profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { - return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockInvalidProfilePKID, + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockInvalidProfilePKID, "_connectCoinUnlock") } profilePKID = profilePKIDEntry.PKID @@ -1656,10 +1659,10 @@ func (bav *UtxoView) _connectCoinUnlock( unlockableLockedBalanceEntries, err := bav.GetUnlockableLockedBalanceEntries( hodlerPKID, profilePKID, blockTimestamp) if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectCoinUnlock") + return 0, 0, nil, errors.Wrap(err, "_connectCoinUnlock") } if len(unlockableLockedBalanceEntries) == 0 { - return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockNoUnlockableCoinsFound, + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockNoUnlockableCoinsFound, "_connectCoinUnlock") } @@ -1671,7 +1674,7 @@ func (bav *UtxoView) _connectCoinUnlock( SafeUint256().Add(unlockedBalance, &unlockableLockedBalanceEntry.BalanceBaseUnits) if err != nil { return 0, 0, nil, - errors.Wrapf(RuleErrorCoinUnlockUnlockableCoinsOverflow, "_connectCoinUnlock") + errors.Wrap(RuleErrorCoinUnlockUnlockableCoinsOverflow, "_connectCoinUnlock") } // Append the LockedBalanceEntry in the event we rollback the transaction. @@ -1689,14 +1692,14 @@ func (bav *UtxoView) _connectCoinUnlock( // Ensure the uint256 can be properly represented as a uint64. if !unlockedBalance.IsUint64() { return 0, 0, nil, - errors.Wrapf(RuleErrorCoinUnlockUnlockableDeSoOverflow, "_connectCoinUnlock") + errors.Wrap(RuleErrorCoinUnlockUnlockableDeSoOverflow, "_connectCoinUnlock") } // Add the unlockedBalance to the transactors DeSo balance. // NOTE: _addBalance checks for balance overflow. utxoOp, err := bav._addBalance(unlockedBalance.Uint64(), txn.PublicKey) if err != nil { - return 0, 0, nil, errors.Wrapf(err, "_connectCoinUnlock: error"+ + return 0, 0, nil, errors.Wrap(err, "_connectCoinUnlock: error"+ "adding CoinToUnlockBaseUnits to the transactor balance: ") } utxoOpsForTxn = append(utxoOpsForTxn, utxoOp) @@ -1707,7 +1710,7 @@ func (bav *UtxoView) _connectCoinUnlock( newTransactorBalanceEntry := prevTransactorBalanceEntry.Copy() newTransactorBalanceNanos, err := SafeUint256().Add(&newTransactorBalanceEntry.BalanceNanos, unlockedBalance) if err != nil { - return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockCausesBalanceOverflow, + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockCausesBalanceOverflow, "_connectCoinUnlock") } newTransactorBalanceEntry.BalanceNanos = *newTransactorBalanceNanos @@ -1719,7 +1722,7 @@ func (bav *UtxoView) _connectCoinUnlock( &profileEntry.DAOCoinEntry.CoinsInCirculationNanos, unlockedBalance) if err != nil { - return 0, 0, nil, errors.Wrapf(RuleErrorCoinUnlockCausesCoinsInCirculationOverflow, + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockCausesCoinsInCirculationOverflow, "_connectCoinUnlock") } profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *newCoinsInCirculationNanos @@ -1860,7 +1863,7 @@ func (bav *UtxoView) _disconnectCoinUnlock( basicTransferOps := utxoOpsForTxn[:operationIndex] err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) if err != nil { - return errors.Wrapf(err, "_disconnectCoinLockup") + return errors.Wrap(err, "_disconnectCoinLockup") } return nil } @@ -1897,7 +1900,7 @@ func (bav *UtxoView) _flushLockedBalanceEntriesToDbWithTxn(txn *badger.Txn, bloc } else { if err := DbPutLockedBalanceEntryMappingsWithTxn(txn, bav.Snapshot, blockHeight, *lockedBalanceEntry); err != nil { - return errors.Wrapf(err, "_flushLockedBalanceEntriesToDbWithTxn") + return errors.Wrap(err, "_flushLockedBalanceEntriesToDbWithTxn") } } } @@ -1939,7 +1942,7 @@ func (bav *UtxoView) _flushLockupYieldCurvePointEntriesToDbWithTxn(txn *badger.T } else { if err := DbPutLockupYieldCurvePointMappingsWithTxn(txn, bav.Snapshot, blockHeight, *lockupYieldCurvePoint); err != nil { - return errors.Wrapf(err, "_flushYieldCurveEntriesToDbWithTxn") + return errors.Wrap(err, "_flushYieldCurveEntriesToDbWithTxn") } } } diff --git a/lib/db_utils.go b/lib/db_utils.go index 32df1f5b9..14890231a 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -11028,6 +11028,13 @@ func DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecsWithTxn(txn *badger.Tx return lockupYieldCurvePointObj } +// DBGetLocalYieldCurvePoints NOTE: This is a high performance implementation of +// a yield curve seek operation. It's not currently used but can be used in core +// if we opt to make coin lockup operations more performant. At a high level, it +// reduces the number of badger reads in a coin lockup operation from O(n) to +// O(1) where n is the number of yield curve points. If used, make sure to test +// that the implementation is equivalent to the current process of fetching the +// entire yield curve and processing it. func DBGetLocalYieldCurvePoints(handle *badger.DB, snap *Snapshot, profilePKID *PKID, lockupDurationNanoSecs int64) ( _leftLockupYieldCurvePoint *LockupYieldCurvePoint, _rightLockupYieldCurvePoint *LockupYieldCurvePoint) { var leftLockupYieldCurvePoint *LockupYieldCurvePoint From 42bde60569d681a9d9225e48d30324249e999bef Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 8 Nov 2023 15:13:25 -0800 Subject: [PATCH 281/762] Stop swallowing certain errors and better utilize DeSoEncoder. Also remove unused DB code. --- lib/block_view_lockups.go | 21 +++- lib/db_utils.go | 195 ++++++++------------------------------ 2 files changed, 53 insertions(+), 163 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 2703cfe25..b5eeee7e6 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -330,8 +330,9 @@ func (bav *UtxoView) _deleteLockupYieldCurvePoint(point *LockupYieldCurvePoint) // Get Helper Functions for LockupYieldCurvePoint func (bav *UtxoView) GetYieldCurvePointByProfilePKIDAndDurationNanoSecs(profilePKID *PKID, - lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint) { + lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint, _err error) { var lockupYieldCurvePoint *LockupYieldCurvePoint + var err error // Check the view for a yield curve point. if _, pointsInView := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID]; pointsInView { @@ -341,20 +342,26 @@ func (bav *UtxoView) GetYieldCurvePointByProfilePKIDAndDurationNanoSecs(profileP }).ToMapKey() if inMemoryYieldCurvePoint, pointExists := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID][lockupYieldCurvePointKey]; pointExists { - return inMemoryYieldCurvePoint + if inMemoryYieldCurvePoint == nil || inMemoryYieldCurvePoint.isDeleted { + return nil, nil + } + return inMemoryYieldCurvePoint, nil } } // No mapping exists in the view, check for an entry in the DB. - lockupYieldCurvePoint = DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecs(bav.GetDbAdapter().badgerDb, + lockupYieldCurvePoint, err = DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecs(bav.GetDbAdapter().badgerDb, bav.Snapshot, profilePKID, lockupDurationNanoSecs) + if err != nil { + return nil, errors.Wrap(err, "GetYieldCurvePointByProfilePKIDAndDurationNanoSecs") + } // Cache the DB entry in the in-memory map. if lockupYieldCurvePoint != nil { bav._setLockupYieldCurvePoint(lockupYieldCurvePoint) } - return lockupYieldCurvePoint + return lockupYieldCurvePoint, nil } func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration int64) ( @@ -1132,8 +1139,12 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( } // Fetch the previous yield curve point associated with this pair. - prevLockupYieldCurvePoint := + prevLockupYieldCurvePoint, err := bav.GetYieldCurvePointByProfilePKIDAndDurationNanoSecs(profilePKID, txMeta.LockupYieldDurationNanoSecs) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectUpdateCoinLockupParams: "+ + "failed a DB get operation on the previous yield curve point; this shouldn't happen") + } // Check if a yield curve point is being added. if !txMeta.RemoveYieldCurvePoint && txMeta.LockupYieldDurationNanoSecs > 0 { diff --git a/lib/db_utils.go b/lib/db_utils.go index 14890231a..fbb528a82 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -10725,13 +10725,19 @@ func DbPutLockedBalanceEntryMappingsWithTxn(txn *badger.Txn, snap *Snapshot, blo func DbDeleteLockedBalanceEntryWithTxn(txn *badger.Txn, snap *Snapshot, lockedBalanceEntry LockedBalanceEntry) error { // First check that a mapping exists. If one doesn't then there's nothing to do. _, err := DBGetWithTxn(txn, snap, _dbKeyForLockedBalanceEntry(lockedBalanceEntry)) - if err != nil { + if errors.Is(err, badger.ErrKeyNotFound) { return nil } + if err != nil { + return errors.Wrapf(err, "DbDeleteLockedBalanceEntryWithTxn: Problem getting "+ + "locked balance entry for HODLer PKID %s, Profile PKID %s, expiration timestamp %d", + lockedBalanceEntry.HODLerPKID.ToString(), lockedBalanceEntry.ProfilePKID.ToString(), + lockedBalanceEntry.UnlockTimestampNanoSecs) + } // When a locked balance entry exists, delete the locked balance entry mapping. if err := DBDeleteWithTxn(txn, snap, _dbKeyForLockedBalanceEntry(lockedBalanceEntry)); err != nil { - return errors.Wrapf(err, "DbDeleteRepostMappingsWithTxn: Deleting "+ + return errors.Wrapf(err, "DbDeleteLockedBalanceEntryWithTxn: Deleting "+ "locked balance entry for HODLer PKID %s, Profile PKID %s, expiration timestamp %d", lockedBalanceEntry.HODLerPKID.ToString(), lockedBalanceEntry.ProfilePKID.ToString(), lockedBalanceEntry.UnlockTimestampNanoSecs) @@ -10767,7 +10773,7 @@ func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithT // Get the key from the db. lockedBalanceEntryBytes, err := DBGetWithTxn(txn, snap, key) - if err == badger.ErrKeyNotFound { + if errors.Is(err, badger.ErrKeyNotFound) { return nil, nil } if err != nil { @@ -10775,10 +10781,7 @@ func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithT errors.Wrap(err, "DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithTxn") } - lockedBalanceEntryObj := &LockedBalanceEntry{} - rr := bytes.NewReader(lockedBalanceEntryBytes) - DecodeFromBytes(lockedBalanceEntryObj, rr) - return lockedBalanceEntryObj, nil + return DecodeDeSoEncoder(&LockedBalanceEntry{}, bytes.NewReader(lockedBalanceEntryBytes)) } func DBGetUnlockableLockedBalanceEntries( @@ -10887,9 +10890,7 @@ func _dbKeyForLockupYieldCurvePoint(lockupYieldCurvePoint LockupYieldCurvePoint) // This is crucial for badgerDB seeks as badger lexicographically seeks to nearest keys and // BigEndian formatting ensures the lexicographic seeks function properly. - lockupDurationBytes := make([]byte, 8) - binary.BigEndian.PutUint64(lockupDurationBytes, uint64(lockupYieldCurvePoint.LockupDurationNanoSecs)) - key = append(key, lockupDurationBytes...) + key = append(key, EncodeUint64(uint64(lockupYieldCurvePoint.LockupDurationNanoSecs))...) return key } @@ -10927,13 +10928,19 @@ func DbDeleteLockupYieldCurvePointWithTxn(txn *badger.Txn, snap *Snapshot, lockupYieldCurvePoint LockupYieldCurvePoint) error { // First check that a mapping exists. If one doesn't then there's nothing to do. _, err := DBGetWithTxn(txn, snap, _dbKeyForLockupYieldCurvePoint(lockupYieldCurvePoint)) - if err != nil { + if errors.Is(err, badger.ErrKeyNotFound) { return nil } + if err != nil { + return errors.Wrapf(err, "DbDeleteLockupYieldCurvePointWithTxn: Problem getting "+ + "locked balance entry for Profile PKID %s, Duration %d, APY Yield Basis Points %d", + lockupYieldCurvePoint.ProfilePKID.ToString(), lockupYieldCurvePoint.LockupDurationNanoSecs, + lockupYieldCurvePoint.LockupYieldAPYBasisPoints) + } // When a locked balance entry exists, delete the locked balance entry mapping. if err := DBDeleteWithTxn(txn, snap, _dbKeyForLockupYieldCurvePoint(lockupYieldCurvePoint)); err != nil { - return errors.Wrapf(err, "DbDeleteRepostMappingsWithTxn: Deleting "+ + return errors.Wrapf(err, "DbDeleteLockupYieldCurvePointWithTxn: Deleting "+ "locked balance entry for Profile PKID %s, Duration %d, APY Yield Basis Points %d", lockupYieldCurvePoint.ProfilePKID.ToString(), lockupYieldCurvePoint.LockupDurationNanoSecs, lockupYieldCurvePoint.LockupYieldAPYBasisPoints) @@ -10997,18 +11004,23 @@ func DBGetAllYieldCurvePointsByProfilePKIDWithTxn(txn *badger.Txn, snap *Snapsho } func DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecs(handle *badger.DB, snap *Snapshot, profilePKID *PKID, - lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint) { + lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint, _err error) { var lockupYieldCurvePoint *LockupYieldCurvePoint - handle.View(func(txn *badger.Txn) error { - lockupYieldCurvePoint = DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecsWithTxn( + err := handle.View(func(txn *badger.Txn) error { + var err error + lockupYieldCurvePoint, err = DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecsWithTxn( txn, snap, profilePKID, lockupDurationNanoSecs) - return nil + return err }) - return lockupYieldCurvePoint + if err != nil { + return nil, errors.Wrap(err, "DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecs") + + } + return lockupYieldCurvePoint, nil } func DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecsWithTxn(txn *badger.Txn, snap *Snapshot, - profilePKID *PKID, lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint) { + profilePKID *PKID, lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint, _err error) { // Construct the key. key := _dbKeyForLockupYieldCurvePoint(LockupYieldCurvePoint{ ProfilePKID: profilePKID, @@ -11017,151 +11029,18 @@ func DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecsWithTxn(txn *badger.Tx // Fetch the point from the database. lockupYieldCurvePointBytes, err := DBGetWithTxn(txn, snap, key) - if err != nil { - return nil - } - - // Parse the bytes beneath the key. - lockupYieldCurvePointObj := &LockupYieldCurvePoint{} - rr := bytes.NewReader(lockupYieldCurvePointBytes) - DecodeFromBytes(lockupYieldCurvePointObj, rr) - return lockupYieldCurvePointObj -} - -// DBGetLocalYieldCurvePoints NOTE: This is a high performance implementation of -// a yield curve seek operation. It's not currently used but can be used in core -// if we opt to make coin lockup operations more performant. At a high level, it -// reduces the number of badger reads in a coin lockup operation from O(n) to -// O(1) where n is the number of yield curve points. If used, make sure to test -// that the implementation is equivalent to the current process of fetching the -// entire yield curve and processing it. -func DBGetLocalYieldCurvePoints(handle *badger.DB, snap *Snapshot, profilePKID *PKID, lockupDurationNanoSecs int64) ( - _leftLockupYieldCurvePoint *LockupYieldCurvePoint, _rightLockupYieldCurvePoint *LockupYieldCurvePoint) { - var leftLockupYieldCurvePoint *LockupYieldCurvePoint - handle.View(func(txn *badger.Txn) error { - leftLockupYieldCurvePoint = DBGetLeftLockupYieldCurvePointWithTxn( - txn, snap, profilePKID, lockupDurationNanoSecs) - return nil - }) - var rightLockupYieldCurvePoint *LockupYieldCurvePoint - handle.View(func(txn *badger.Txn) error { - rightLockupYieldCurvePoint = DBGetRightLockupYieldCurvePointWithTxn( - txn, snap, profilePKID, lockupDurationNanoSecs) - return nil - }) - return leftLockupYieldCurvePoint, rightLockupYieldCurvePoint -} - -func DBGetLeftLockupYieldCurvePointWithTxn(txn *badger.Txn, snap *Snapshot, profilePKID *PKID, - lockupDurationNanoSecs int64) (_leftLockupYieldCurvePoint *LockupYieldCurvePoint) { - key := _dbKeyForLockupYieldCurvePoint(LockupYieldCurvePoint{ - ProfilePKID: profilePKID, - LockupDurationNanoSecs: lockupDurationNanoSecs, - }) - - // Seek left of the yield curve point. - iterLeftOpts := badger.DefaultIteratorOptions - iterLeftOpts.Reverse = true - iterLeft := txn.NewIterator(iterLeftOpts) - iterLeft.Seek(key) - iterLeftKey := iterLeft.Item().Key() - - // There's a chance our seek yield a key in a different prefix (i.e. not a yield curve point). - // In this case, we know _dbKeyToLockupYieldCurvePoint will fail in parsing the key. - // We can return early in this case as there's no relevant yield points in the DB. - if len(iterLeftKey) < len(Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { - return nil - } - if !bytes.Equal(iterLeftKey[:len(Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs)], - Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { - return nil - } - - // Fetch the LockupYieldCurvePoint beneath the key. - leftLockupYieldCurvePointBytes, err := DBGetWithTxn(txn, snap, iterLeftKey) - if err != nil { - return nil - } - - // Parse the bytes beneath the key. - leftLockupYieldCurvePointObj := &LockupYieldCurvePoint{} - rr := bytes.NewReader(leftLockupYieldCurvePointBytes) - DecodeFromBytes(leftLockupYieldCurvePointObj, rr) - if !leftLockupYieldCurvePointObj.ProfilePKID.Eq(profilePKID) { - return nil - } - - return leftLockupYieldCurvePointObj -} - -func DBGetRightLockupYieldCurvePointWithTxn(txn *badger.Txn, snap *Snapshot, profilePKID *PKID, - lockupDurationNanoSecs int64) (_rightLockupYieldCurvePoint *LockupYieldCurvePoint) { - key := _dbKeyForLockupYieldCurvePoint(LockupYieldCurvePoint{ - ProfilePKID: profilePKID, - LockupDurationNanoSecs: lockupDurationNanoSecs, - }) - - // Seek left of the yield curve point. - iterRightOpts := badger.DefaultIteratorOptions - iterRight := txn.NewIterator(iterRightOpts) - iterRight.Seek(key) - iterRightKey := iterRight.Item().Key() - - // There's a chance our seek yield a key in a different prefix (i.e. not a yield curve point). - // In this case, we know _dbKeyToLockupYieldCurvePoint will fail in parsing the key. - // We can return early in this case as there's no relevant yield points in the DB. - if len(iterRightKey) < len(Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { - return nil - } - if !bytes.Equal(iterRightKey[:len(Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs)], - Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { - return nil + if errors.Is(err, badger.ErrKeyNotFound) { + return nil, nil } - - // Fetch the LockupYieldCurvePoint beneath the key. - rightLockupYieldCurvePointBytes, err := DBGetWithTxn(txn, snap, iterRightKey) if err != nil { - return nil + return nil, errors.Wrapf(err, + "DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecsWithTxn: failed getting "+ + "lockup yield curve point with PKID %s and duration %d", + profilePKID.ToString(), lockupDurationNanoSecs) } // Parse the bytes beneath the key. - rightLockupYieldCurvePointObj := &LockupYieldCurvePoint{} - rr := bytes.NewReader(rightLockupYieldCurvePointBytes) - DecodeFromBytes(rightLockupYieldCurvePointObj, rr) - if !rightLockupYieldCurvePointObj.ProfilePKID.Eq(profilePKID) { - return nil - } - - return rightLockupYieldCurvePointObj -} - -func DBGetLockupYieldCurvePointForProfilePKIDAndLockupDurationNanoSecs(handle *badger.DB, snap *Snapshot, profilePKID *PKID, lockupDurationNanoSecs int64) *LockupYieldCurvePoint { - - var ret *LockupYieldCurvePoint - handle.View(func(txn *badger.Txn) error { - ret = DBGetLockupYieldCurvePointForProfilePKIDAndLockupDurationNanoSecsWithTxn( - txn, snap, profilePKID, lockupDurationNanoSecs) - return nil - }) - return ret -} - -func DBGetLockupYieldCurvePointForProfilePKIDAndLockupDurationNanoSecsWithTxn(txn *badger.Txn, snap *Snapshot, - profilePKID *PKID, lockupDurationNanoSecs int64) *LockupYieldCurvePoint { - - key := _dbKeyForLockupYieldCurvePoint(LockupYieldCurvePoint{ - ProfilePKID: profilePKID, - LockupDurationNanoSecs: lockupDurationNanoSecs, - }) - lockupYieldCurvePointBytes, err := DBGetWithTxn(txn, snap, key) - if err != nil { - return nil - } - - lockupYieldCurvePointObj := &LockupYieldCurvePoint{} - rr := bytes.NewReader(lockupYieldCurvePointBytes) - DecodeFromBytes(lockupYieldCurvePointObj, rr) - return lockupYieldCurvePointObj + return DecodeDeSoEncoder(&LockupYieldCurvePoint{}, bytes.NewReader(lockupYieldCurvePointBytes)) } // ------------------------------------------------------------------------------------- From 3cffe8d0ffde5cc64dd278419c26805faa79704e Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 8 Nov 2023 15:46:06 -0800 Subject: [PATCH 282/762] Simplify code by using DecodeDeSoEncoderSlice. --- lib/block_view_types.go | 30 +++++++++--------------------- lib/network.go | 6 ++++-- 2 files changed, 13 insertions(+), 23 deletions(-) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 6eb4f4b1f..65a013a7d 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -1302,10 +1302,7 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada // PrevTransactorBalanceEntry, PrevLockedBalanceEntries data = append(data, EncodeToBytes(blockHeight, op.PrevTransactorBalanceEntry, skipMetadata...)...) - data = append(data, UintToBuf(uint64(len(op.PrevLockedBalanceEntries)))...) - for _, entry := range op.PrevLockedBalanceEntries { - data = append(data, EncodeToBytes(blockHeight, entry, skipMetadata...)...) - } + data = append(data, EncodeDeSoEncoderSlice(op.PrevLockedBalanceEntries, blockHeight, skipMetadata...)...) } return data @@ -1943,42 +1940,33 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. // PrevLockedBalanceEntry if op.PrevLockedBalanceEntry, err = DecodeDeSoEncoder(&LockedBalanceEntry{}, rr); err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockedBalanceEntry") + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockedBalanceEntry: ") } // PrevLockupYieldCurvePoint, PrevLockupTransferRestriction if op.PrevLockupYieldCurvePoint, err = DecodeDeSoEncoder(&LockupYieldCurvePoint{}, rr); err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockupYieldCurvePoint") + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockupYieldCurvePoint: ") } lockupTransferRestriction, err := rr.ReadByte() if err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockupTransferRestriction") + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockupTransferRestriction: ") } op.PrevLockupTransferRestriction = TransferRestrictionStatus(lockupTransferRestriction) // PrevSenderLockedBalanceEntry, PrevReceiverLockedBalanceEntry if op.PrevSenderLockedBalanceEntry, err = DecodeDeSoEncoder(&LockedBalanceEntry{}, rr); err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevSenderLockedBalanceEntry") + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevSenderLockedBalanceEntry: ") } if op.PrevReceiverLockedBalanceEntry, err = DecodeDeSoEncoder(&LockedBalanceEntry{}, rr); err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem Reading PrevReceiverLockedBalanceEntry") + return errors.Wrapf(err, "UtxoOperation.Decode: Problem Reading PrevReceiverLockedBalanceEntry: ") } // PrevTransactorBalanceEntry, PrevLockedBalanceEntries if op.PrevTransactorBalanceEntry, err = DecodeDeSoEncoder(&BalanceEntry{}, rr); err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevTransactorBalanceEntry") + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevTransactorBalanceEntry: ") } - var numPrevLockedBalanceEntries uint64 - numPrevLockedBalanceEntries, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading numPrevLockedBalanceEntries") - } - for ; numPrevLockedBalanceEntries > 0; numPrevLockedBalanceEntries-- { - prevLockedBalanceEntry := &LockedBalanceEntry{} - if _, err = DecodeFromBytes(prevLockedBalanceEntry, rr); err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading prevLockedBalanceEntry") - } - op.PrevLockedBalanceEntries = append(op.PrevLockedBalanceEntries, prevLockedBalanceEntry) + if op.PrevLockedBalanceEntries, err = DecodeDeSoEncoderSlice[*LockedBalanceEntry](rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockedBalanceEntry: ") } } diff --git a/lib/network.go b/lib/network.go index 97aa28dbb..d7270a3f2 100644 --- a/lib/network.go +++ b/lib/network.go @@ -5774,6 +5774,8 @@ type TransactionSpendingLimit struct { // - ProfilePKID: A PKID to scope transactions by. // If using the "Any" scope, then ProfilePKID has to be the ZeroPKID. // - LockupLimitOperationType: One of {Any, Scoped} + // If using the "Any" operation type, this limit applies to any possible DeSo token lockup. + // If using the "Scoped" operation type, this limit applies to the ProfilePKID specified. LockupLimitMap map[LockupLimitKey]uint64 // ValidatorPKID || StakerPKID to amount of stake-able $DESO. // Note that this is not a limit on the number of Stake txns that @@ -6680,10 +6682,10 @@ func (tsl *TransactionSpendingLimit) FromBytes(blockHeight uint64, rr *bytes.Rea var operationCount uint64 operationCount, err = ReadUvarint(rr) if err != nil { - return errors.Wrap(err, "Error decoding LockupLimitKey: ") + return errors.Wrap(err, "Error decoding OperationCount for LockupLimitKey: ") } if _, keyExists := tsl.LockupLimitMap[*lockupLimitKey]; keyExists { - return fmt.Errorf("LockupLimitKey already exists") + return errors.New("LockupLimitKey already exists") } tsl.LockupLimitMap[*lockupLimitKey] = operationCount } From c24470c9e81efad8fe74cff76e1afe15ab2d6759 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Thu, 9 Nov 2023 16:32:03 -0500 Subject: [PATCH 283/762] Adapt PoS code to work with state syncer --- lib/block_view_stake.go | 32 ++++---- lib/block_view_stake_test.go | 10 +-- lib/block_view_test.go | 7 +- lib/block_view_types.go | 10 +-- lib/block_view_validator.go | 21 ++--- lib/block_view_validator_test.go | 20 ++--- lib/db_utils.go | 115 +++++++--------------------- lib/pos_block_producer_test.go | 13 ++-- lib/pos_epoch.go | 6 +- lib/pos_epoch_complete_hook_test.go | 4 +- lib/pos_epoch_test.go | 2 +- lib/pos_leader_schedule_test.go | 2 +- lib/pos_mempool_test.go | 15 ++-- lib/pos_random_seed.go | 8 +- lib/pos_random_seed_test.go | 5 +- lib/pos_snapshot_entries.go | 41 ++++++---- 16 files changed, 135 insertions(+), 176 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 18fa50eba..fa003c92f 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -912,6 +912,7 @@ func DBPutStakeEntryWithTxn( snap *Snapshot, stakeEntry *StakeEntry, blockHeight uint64, + eventManager *EventManager, ) error { if stakeEntry == nil { return nil @@ -919,7 +920,7 @@ func DBPutStakeEntryWithTxn( // Set StakeEntry in PrefixStakeByValidatorByStaker. stakeByValidatorAndStakerKey := DBKeyForStakeByValidatorAndStaker(stakeEntry.ValidatorPKID, stakeEntry.StakerPKID) - if err := DBSetWithTxn(txn, snap, stakeByValidatorAndStakerKey, EncodeToBytes(blockHeight, stakeEntry)); err != nil { + if err := DBSetWithTxn(txn, snap, stakeByValidatorAndStakerKey, EncodeToBytes(blockHeight, stakeEntry), eventManager); err != nil { return errors.Wrapf( err, "DBPutStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByValidatorByStaker: ", ) @@ -927,7 +928,7 @@ func DBPutStakeEntryWithTxn( // Set StakeEntry in PrefixStakeByStakeAmount. stakeByStakeAmountKey := DBKeyForStakeByStakeAmount(stakeEntry) - if err := DBSetWithTxn(txn, snap, stakeByStakeAmountKey, nil); err != nil { + if err := DBSetWithTxn(txn, snap, stakeByStakeAmountKey, nil, eventManager); err != nil { return errors.Wrapf( err, "DBPutStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByStakeAmount: ", ) @@ -941,6 +942,7 @@ func DBPutLockedStakeEntryWithTxn( snap *Snapshot, lockedStakeEntry *LockedStakeEntry, blockHeight uint64, + eventManager *EventManager, ) error { if lockedStakeEntry == nil { return nil @@ -948,7 +950,7 @@ func DBPutLockedStakeEntryWithTxn( // Set LockedStakeEntry in PrefixLockedStakeByValidatorByStakerByLockedAt. key := DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry) - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, lockedStakeEntry)); err != nil { + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, lockedStakeEntry), eventManager); err != nil { return errors.Wrapf( err, "DBPutLockedStakeEntryWithTxn: problem storing LockedStakeEntry in index PrefixLockedStakeByValidatorByStakerByLockedAt: ", ) @@ -963,6 +965,8 @@ func DBDeleteStakeEntryWithTxn( validatorPKID *PKID, stakerPKID *PKID, blockHeight uint64, + eventManager *EventManager, + isDeleted bool, ) error { if validatorPKID == nil || stakerPKID == nil { return nil @@ -984,7 +988,7 @@ func DBDeleteStakeEntryWithTxn( // Delete StakeEntry from PrefixStakeByValidatorByStaker. stakeByValidatorAndStakerKey := DBKeyForStakeByValidatorAndStaker(validatorPKID, stakerPKID) - if err := DBDeleteWithTxn(txn, snap, stakeByValidatorAndStakerKey); err != nil { + if err := DBDeleteWithTxn(txn, snap, stakeByValidatorAndStakerKey, eventManager, isDeleted); err != nil { return errors.Wrapf( err, "DBDeleteStakeEntryWithTxn: problem deleting StakeEntry from index PrefixStakeByValidatorByStaker: ", ) @@ -992,7 +996,7 @@ func DBDeleteStakeEntryWithTxn( // Delete the StakeEntry from PrefixStakeByStakeAmount. stakeByStakeAmountKey := DBKeyForStakeByStakeAmount(stakeEntry) - if err := DBDeleteWithTxn(txn, snap, stakeByStakeAmountKey); err != nil { + if err := DBDeleteWithTxn(txn, snap, stakeByStakeAmountKey, eventManager, isDeleted); err != nil { return errors.Wrapf( err, "DBDeleteStakeEntryWithTxn: problem deleting StakeEntry from index PrefixStakeByStakeAmount: ", ) @@ -1006,6 +1010,8 @@ func DBDeleteLockedStakeEntryWithTxn( snap *Snapshot, lockedStakeEntry *LockedStakeEntry, blockHeight uint64, + eventManager *EventManager, + isDeleted bool, ) error { if lockedStakeEntry == nil { return nil @@ -1013,7 +1019,7 @@ func DBDeleteLockedStakeEntryWithTxn( // Delete LockedStakeEntry from PrefixLockedStakeByValidatorByStakerByLockedAt. key := DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry) - if err := DBDeleteWithTxn(txn, snap, key); err != nil { + if err := DBDeleteWithTxn(txn, snap, key, eventManager, isDeleted); err != nil { return errors.Wrapf( err, "DBDeleteLockedStakeEntryWithTxn: problem deleting StakeEntry from index PrefixLockedStakeByValidatorByStakerByLockedAt: ", ) @@ -1052,7 +1058,7 @@ func (bc *Blockchain) CreateStakeTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if err != nil { return nil, 0, 0, 0, errors.Wrap( err, "Blockchain.CreateStakeTxn: problem creating new utxo view: ", @@ -1129,7 +1135,7 @@ func (bc *Blockchain) CreateUnstakeTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if err != nil { return nil, 0, 0, 0, errors.Wrap( err, "Blockchain.CreateUnstakeTxn: problem creating new utxo view: ", @@ -1205,7 +1211,7 @@ func (bc *Blockchain) CreateUnlockStakeTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if err != nil { return nil, 0, 0, 0, errors.Wrap( err, "Blockchain.CreateUnlockStakeTxn: problem creating new utxo view: ", @@ -2673,7 +2679,7 @@ func (bav *UtxoView) _flushStakeEntriesToDbWithTxn(txn *badger.Txn, blockHeight // Delete the existing mappings in the db for this MapKey. They will be // re-added if the corresponding entry in-memory has isDeleted=false. - if err := DBDeleteStakeEntryWithTxn(txn, bav.Snapshot, entry.ValidatorPKID, entry.StakerPKID, blockHeight); err != nil { + if err := DBDeleteStakeEntryWithTxn(txn, bav.Snapshot, entry.ValidatorPKID, entry.StakerPKID, blockHeight, bav.EventManager, entry.isDeleted); err != nil { return errors.Wrapf(err, "_flushStakeEntriesToDbWithTxn: ") } } @@ -2687,7 +2693,7 @@ func (bav *UtxoView) _flushStakeEntriesToDbWithTxn(txn *badger.Txn, blockHeight } else { // If !isDeleted then we put the corresponding // mappings for it into the db. - if err := DBPutStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight); err != nil { + if err := DBPutStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight, bav.EventManager); err != nil { return errors.Wrapf(err, "_flushStakeEntriesToDbWithTxn: ") } } @@ -2715,7 +2721,7 @@ func (bav *UtxoView) _flushLockedStakeEntriesToDbWithTxn(txn *badger.Txn, blockH // Delete the existing mappings in the db for this MapKey. They will be // re-added if the corresponding entry in-memory has isDeleted=false. - if err := DBDeleteLockedStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight); err != nil { + if err := DBDeleteLockedStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight, bav.EventManager, entry.isDeleted); err != nil { return errors.Wrapf(err, "_flushLockedStakeEntriesToDbWithTxn: ") } } @@ -2729,7 +2735,7 @@ func (bav *UtxoView) _flushLockedStakeEntriesToDbWithTxn(txn *badger.Txn, blockH } else { // If !isDeleted then we put the corresponding // mappings for it into the db. - if err := DBPutLockedStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight); err != nil { + if err := DBPutLockedStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight, bav.EventManager); err != nil { return errors.Wrapf(err, "_flushLockedStakeEntriesToDbWithTxn: ") } } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 374922960..b0ba7c3c7 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -78,7 +78,7 @@ func _testStaking(t *testing.T, flushToDB bool) { } // Seed a CurrentEpochEntry. - epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) @@ -843,7 +843,7 @@ func TestStakingWithDerivedKey(t *testing.T) { senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) return utxoView } @@ -1846,7 +1846,7 @@ func TestGetLockedStakeEntriesInRange(t *testing.T) { // Initialize test chain and UtxoView. chain, params, db := NewLowDifficultyBlockchain(t) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) blockHeight := uint64(chain.blockTip().Height + 1) @@ -1996,7 +1996,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) return utxoView } @@ -2180,7 +2180,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { require.NoError(t, err) // Jail the validator. - tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 0a15806b8..fa6bd1253 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -10,6 +10,8 @@ import ( "sort" "testing" + "math/rand" + "github.com/btcsuite/btcd/btcec" "github.com/decred/dcrd/lru" "github.com/dgraph-io/badger/v3" @@ -17,7 +19,6 @@ import ( "github.com/golang/glog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "math/rand" ) func _strToPk(t *testing.T, pkStr string) []byte { @@ -2237,7 +2238,7 @@ func TestConnectFailingTransaction(t *testing.T) { senderPrivString, 200000, 11) blockHeight := chain.BlockTip().Height + 1 - blockView, err := NewUtxoView(db, params, nil, nil) + blockView, err := NewUtxoView(db, params, nil, nil, chain.eventManager) require.NoError(err) txn1 := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn1, blockHeight, true) @@ -2282,7 +2283,7 @@ func TestConnectFailingTransaction(t *testing.T) { map[string][]byte{FeeBucketGrowthRateBasisPointsKey: UintToBuf(7000)}, ) } - blockView, err = NewUtxoView(db, params, nil, nil) + blockView, err = NewUtxoView(db, params, nil, nil, chain.eventManager) require.NoError(err) newParams := blockView.GetCurrentGlobalParamsEntry() require.Equal(uint64(7000), newParams.FailingTransactionBMFMultiplierBasisPoints) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 75cacc285..1604f949b 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -120,13 +120,13 @@ const ( EncoderTypeBlock EncoderType = 43 // EncoderTypeTxn represents a transaction in the blockchain. EncoderTypeTxn EncoderType = 44 - EncoderTypeValidatorEntry EncoderType = 40 - EncoderTypeStakeEntry EncoderType = 41 - EncoderTypeLockedStakeEntry EncoderType = 42 - EncoderTypeEpochEntry EncoderType = 43 + EncoderTypeValidatorEntry EncoderType = 45 + EncoderTypeStakeEntry EncoderType = 46 + EncoderTypeLockedStakeEntry EncoderType = 47 + EncoderTypeEpochEntry EncoderType = 48 // EncoderTypeEndBlockView encoder type should be at the end and is used for automated tests. - EncoderTypeEndBlockView EncoderType = 45 + EncoderTypeEndBlockView EncoderType = 49 ) // Txindex encoder types. diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 8ee6d768b..87ca7c04a 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -691,6 +691,7 @@ func DBPutValidatorWithTxn( snap *Snapshot, validatorEntry *ValidatorEntry, blockHeight uint64, + eventManager *EventManager, ) error { if validatorEntry == nil { // This should never happen but is a sanity check. @@ -700,7 +701,7 @@ func DBPutValidatorWithTxn( // Set ValidatorEntry in PrefixValidatorByPKID. key := DBKeyForValidatorByPKID(validatorEntry) - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry)); err != nil { + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry), eventManager); err != nil { return errors.Wrapf( err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByPKID", ) @@ -709,7 +710,7 @@ func DBPutValidatorWithTxn( // Set ValidatorEntry key in PrefixValidatorByStatusAndStakeAmount. The value should be nil. // We parse the ValidatorPKID from the key for this index. key = DBKeyForValidatorByStatusAndStakeAmount(validatorEntry) - if err := DBSetWithTxn(txn, snap, key, nil); err != nil { + if err := DBSetWithTxn(txn, snap, key, nil, eventManager); err != nil { return errors.Wrapf( err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStatusAndStakeAmount", ) @@ -718,7 +719,7 @@ func DBPutValidatorWithTxn( return nil } -func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PKID) error { +func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PKID, eventManager *EventManager, entryIsDeleted bool) error { if validatorPKID == nil { // This should never happen but is a sanity check. glog.Errorf("DBDeleteValidatorWithTxn: called with nil ValidatorPKID") @@ -741,7 +742,7 @@ func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PK // Delete ValidatorEntry from PrefixValidatorByPKID. key := DBKeyForValidatorByPKID(validatorEntry) - if err := DBDeleteWithTxn(txn, snap, key); err != nil { + if err := DBDeleteWithTxn(txn, snap, key, eventManager, entryIsDeleted); err != nil { return errors.Wrapf( err, "DBDeleteValidatorWithTxn: problem deleting ValidatorEntry from index PrefixValidatorByPKID", ) @@ -749,7 +750,7 @@ func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PK // Delete ValidatorEntry.PKID from PrefixValidatorByStatusAndStakeAmount. key = DBKeyForValidatorByStatusAndStakeAmount(validatorEntry) - if err := DBDeleteWithTxn(txn, snap, key); err != nil { + if err := DBDeleteWithTxn(txn, snap, key, eventManager, entryIsDeleted); err != nil { return errors.Wrapf( err, "DBDeleteValidatorWithTxn: problem deleting ValidatorEntry from index PrefixValidatorByStatusAndStakeAmount", ) @@ -788,7 +789,7 @@ func (bc *Blockchain) CreateRegisterAsValidatorTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if err != nil { return nil, 0, 0, 0, errors.Wrap( err, "Blockchain.CreateRegisterAsValidatorTxn: problem creating new utxo view: ", @@ -865,7 +866,7 @@ func (bc *Blockchain) CreateUnregisterAsValidatorTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if err != nil { return nil, 0, 0, 0, errors.Wrap( err, "Blockchain.CreateUnregisterAsValidatorTxn: problem creating new utxo view: ", @@ -941,7 +942,7 @@ func (bc *Blockchain) CreateUnjailValidatorTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot) + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if err != nil { return nil, 0, 0, 0, errors.Wrap( err, "Blockchain.CreateUnjailValidatorTxn: problem creating new utxo view: ", @@ -2026,7 +2027,7 @@ func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHei // Delete the existing mappings in the db for this ValidatorMapKey. They // will be re-added if the corresponding entry in memory has isDeleted=false. - if err := DBDeleteValidatorWithTxn(txn, bav.Snapshot, &validatorMapKey); err != nil { + if err := DBDeleteValidatorWithTxn(txn, bav.Snapshot, &validatorMapKey, bav.EventManager, validatorEntry.isDeleted); err != nil { return errors.Wrapf(err, "_flushValidatorEntriesToDbWithTxn: ") } } @@ -2040,7 +2041,7 @@ func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHei } else { // If !ValidatorEntry.isDeleted then we put the // corresponding mappings for it into the db. - if err := DBPutValidatorWithTxn(txn, bav.Snapshot, &validatorEntry, blockHeight); err != nil { + if err := DBPutValidatorWithTxn(txn, bav.Snapshot, &validatorEntry, blockHeight, bav.EventManager); err != nil { return errors.Wrapf(err, "_flushValidatorEntriesToDbWithTxn: ") } } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 0f3f8c70b..e93fb7e00 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -463,7 +463,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) return utxoView } @@ -606,7 +606,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.NoError(t, err) // Validate the ValidatorEntry exists. - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) validatorEntry, err := utxoView.GetValidatorByPKID(senderPKID) require.NoError(t, err) @@ -644,7 +644,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.NoError(t, err) // Validate the ValidatorEntry no longer exists. - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) validatorEntry, err := utxoView.GetValidatorByPKID(senderPKID) require.NoError(t, err) @@ -939,7 +939,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Initialize test chain and UtxoView. chain, params, db := NewLowDifficultyBlockchain(t) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) blockHeight := uint64(chain.blockTip().Height + 1) @@ -1539,7 +1539,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID // Seed a CurrentEpochEntry. - epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) @@ -1586,7 +1586,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // trying to unjail himself, but he was never jailed. // Jail m0. - tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) @@ -1651,7 +1651,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { mempool.readOnlyUtxoView.CurrentEpochEntry = nil // Store a new CurrentEpochEntry in the db. - epochUtxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) + epochUtxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) epochUtxoView._setCurrentEpochEntry( &EpochEntry{EpochNumber: currentEpochNumber + 3, FinalBlockHeight: blockHeight + 10}, @@ -1747,7 +1747,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) return utxoView } @@ -1878,7 +1878,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // trying to unjail himself, but he was never jailed. // Jail the sender. - tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) @@ -1912,7 +1912,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { mempool.readOnlyUtxoView.CurrentEpochEntry = nil // Store a new CurrentEpochEntry in the db. - epochUtxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot) + epochUtxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) epochUtxoView._setCurrentEpochEntry( &EpochEntry{EpochNumber: currentEpochNumber + 3, FinalBlockHeight: blockHeight + 10}, diff --git a/lib/db_utils.go b/lib/db_utils.go index 0ae65d009..0b75ead57 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -491,20 +491,20 @@ type DBPrefixes struct { // PrefixValidatorByPKID: Retrieve a validator by PKID. // Prefix, -> ValidatorEntry - PrefixValidatorByPKID []byte `prefix_id:"[78]" is_state:"true"` + PrefixValidatorByPKID []byte `prefix_id:"[80]" is_state:"true"` // PrefixValidatorByStatusAndStakeAmount: Retrieve the top N active validators by stake. // Prefix, , , -> nil // Note that we save space by storing a nil value and parsing the ValidatorPKID from the key. - PrefixValidatorByStatusAndStakeAmount []byte `prefix_id:"[79]" is_state:"true"` + PrefixValidatorByStatusAndStakeAmount []byte `prefix_id:"[81]" is_state:"true"` // PrefixStakeByValidatorAndStaker: Retrieve a StakeEntry. // Prefix, , -> StakeEntry - PrefixStakeByValidatorAndStaker []byte `prefix_id:"[80]" is_state:"true"` + PrefixStakeByValidatorAndStaker []byte `prefix_id:"[82]" is_state:"true"` // PrefixStakeByStakeAmount: Retrieve the top N stake entries by stake amount. // Prefix, , , -> nil - PrefixStakeByStakeAmount []byte `prefix_id:"[81]" is_state:"true"` + PrefixStakeByStakeAmount []byte `prefix_id:"[83]" is_state:"true"` // PrefixLockedStakeByValidatorAndStakerAndLockedAt: Retrieve a LockedStakeEntry. // Prefix, , , -> LockedStakeEntry @@ -531,47 +531,47 @@ type DBPrefixes struct { // (CurrentEpoch - LockedAtEpochNumber) = 133 - 123 = 10, which is greater than // cooldown=3. Thus the UnlockStake will succeed, which will result in the // LockedStakeEntry being deleted and 25 DESO being added to the user's balance. - PrefixLockedStakeByValidatorAndStakerAndLockedAt []byte `prefix_id:"[82]" is_state:"true"` + PrefixLockedStakeByValidatorAndStakerAndLockedAt []byte `prefix_id:"[84]" is_state:"true"` // PrefixCurrentEpoch: Retrieve the current EpochEntry. // Prefix -> EpochEntry - PrefixCurrentEpoch []byte `prefix_id:"[83]" is_state:"true"` + PrefixCurrentEpoch []byte `prefix_id:"[85]" is_state:"true"` // PrefixCurrentRandomSeedHash: Retrieve the current RandomSeedHash. // Prefix -> . - PrefixCurrentRandomSeedHash []byte `prefix_id:"[84]" is_state:"true"` + PrefixCurrentRandomSeedHash []byte `prefix_id:"[86]" is_state:"true"` // PrefixSnapshotGlobalParamsEntry: Retrieve a snapshot GlobalParamsEntry by SnapshotAtEpochNumber. // Prefix, -> *GlobalParamsEntry - PrefixSnapshotGlobalParamsEntry []byte `prefix_id:"[85]" is_state:"true"` + PrefixSnapshotGlobalParamsEntry []byte `prefix_id:"[87]" is_state:"true"` // PrefixSnapshotValidatorSetByPKID: Retrieve a ValidatorEntry from a snapshot validator set by // . // Prefix, , -> *ValidatorEntry - PrefixSnapshotValidatorSetByPKID []byte `prefix_id:"[86]" is_state:"true"` + PrefixSnapshotValidatorSetByPKID []byte `prefix_id:"[88]" is_state:"true"` // PrefixSnapshotValidatorSetByStakeAmount: Retrieve stake-ordered ValidatorEntries from a snapshot validator set // by SnapshotAtEpochNumber. // Prefix, , , -> nil // Note: we parse the ValidatorPKID from the key and the value is nil to save space. - PrefixSnapshotValidatorSetByStakeAmount []byte `prefix_id:"[87]" is_state:"true"` + PrefixSnapshotValidatorSetByStakeAmount []byte `prefix_id:"[89]" is_state:"true"` // PrefixSnapshotValidatorSetTotalStakeAmountNanos: Retrieve a snapshot of the validator set's total amount of // staked DESO by SnapshotAtEpochNumber. // Prefix, -> *uint256.Int - PrefixSnapshotValidatorSetTotalStakeAmountNanos []byte `prefix_id:"[88]" is_state:"true"` + PrefixSnapshotValidatorSetTotalStakeAmountNanos []byte `prefix_id:"[90]" is_state:"true"` // PrefixSnapshotLeaderSchedule: Retrieve a ValidatorPKID by . // Prefix, , -> ValidatorPKID - PrefixSnapshotLeaderSchedule []byte `prefix_id:"[89]" is_state:"true"` + PrefixSnapshotLeaderSchedule []byte `prefix_id:"[91]" is_state:"true"` // PrefixSnapshotStakeToRewardByValidatorAndStaker: Retrieves snapshotted StakeEntries that are eligible to // receive staking rewards for an epoch. StakeEntries can be retrieved by ValidatorPKID and StakerPKID. // Prefix, , , -> *StakeEntry // Note, we parse the ValidatorPKID and StakerPKID from the key. - PrefixSnapshotStakeToRewardByValidatorAndStaker []byte `prefix_id:"[90]" is_state:"true"` + PrefixSnapshotStakeToRewardByValidatorAndStaker []byte `prefix_id:"[92]" is_state:"true"` - // NEXT_TAG: 91 + // NEXT_TAG: 93 } // DecodeStateKey decodes a state key into a DeSoEncoder type. This is useful for encoders which don't have a stored @@ -818,43 +818,43 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco // prefix_id:"[79]" return true, &UtxoOperationBundle{} } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByPKID) { - // prefix_id:"[78]" + // prefix_id:"[80]" return true, &ValidatorEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixValidatorByStatusAndStakeAmount) { - // prefix_id:"[79]" + // prefix_id:"[81]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixStakeByValidatorAndStaker) { - // prefix_id:"[80]" + // prefix_id:"[82]" return true, &StakeEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixStakeByStakeAmount) { - // prefix_id:"[81]" + // prefix_id:"[83]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixLockedStakeByValidatorAndStakerAndLockedAt) { - // prefix_id:"[82]" + // prefix_id:"[84]" return true, &LockedStakeEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixCurrentEpoch) { - // prefix_id:"[83]" + // prefix_id:"[85]" return true, &EpochEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixCurrentRandomSeedHash) { - // prefix_id:"[84]" + // prefix_id:"[86]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotGlobalParamsEntry) { - // prefix_id:"[85]" + // prefix_id:"[87]" return true, &GlobalParamsEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByPKID) { - // prefix_id:"[86]" + // prefix_id:"[88]" return true, &ValidatorEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByStakeAmount) { - // prefix_id:"[87]" + // prefix_id:"[89]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetTotalStakeAmountNanos) { - // prefix_id:"[88]" + // prefix_id:"[90]" return false, nil } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotLeaderSchedule) { - // prefix_id:"[89]" + // prefix_id:"[91]" return true, &PKID{} } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotStakeToRewardByValidatorAndStaker) { - // prefix_id:"[90]" + // prefix_id:"[92]" return true, &StakeEntry{} } @@ -7059,67 +7059,6 @@ func (txnMeta *TransactionMetadata) GetEncoderForTxType(txnType TxnType) DeSoEnc } } -func (txnMeta *TransactionMetadata) GetEncoderForTxType(txnType TxnType) DeSoEncoder { - switch txnType { - case TxnTypeBasicTransfer: - return txnMeta.BasicTransferTxindexMetadata - case TxnTypeBitcoinExchange: - return txnMeta.BitcoinExchangeTxindexMetadata - case TxnTypeCreatorCoin: - return txnMeta.CreatorCoinTxindexMetadata - case TxnTypeCreatorCoinTransfer: - return txnMeta.CreatorCoinTransferTxindexMetadata - case TxnTypeUpdateProfile: - return txnMeta.UpdateProfileTxindexMetadata - case TxnTypeSubmitPost: - return txnMeta.SubmitPostTxindexMetadata - case TxnTypeLike: - return txnMeta.LikeTxindexMetadata - case TxnTypeFollow: - return txnMeta.FollowTxindexMetadata - case TxnTypePrivateMessage: - return txnMeta.PrivateMessageTxindexMetadata - case TxnTypeSwapIdentity: - return txnMeta.SwapIdentityTxindexMetadata - case TxnTypeNFTBid: - return txnMeta.NFTBidTxindexMetadata - case TxnTypeAcceptNFTBid: - return txnMeta.AcceptNFTBidTxindexMetadata - case TxnTypeNFTTransfer: - return txnMeta.NFTTransferTxindexMetadata - case TxnTypeAcceptNFTTransfer: - return txnMeta.AcceptNFTTransferTxindexMetadata - case TxnTypeBurnNFT: - return txnMeta.BurnNFTTxindexMetadata - case TxnTypeDAOCoin: - return txnMeta.DAOCoinTxindexMetadata - case TxnTypeDAOCoinTransfer: - return txnMeta.DAOCoinTransferTxindexMetadata - case TxnTypeCreateNFT: - return txnMeta.CreateNFTTxindexMetadata - case TxnTypeUpdateNFT: - return txnMeta.UpdateNFTTxindexMetadata - case TxnTypeDAOCoinLimitOrder: - return txnMeta.DAOCoinLimitOrderTxindexMetadata - case TxnTypeCreateUserAssociation: - return txnMeta.CreateUserAssociationTxindexMetadata - case TxnTypeDeleteUserAssociation: - return txnMeta.DeleteUserAssociationTxindexMetadata - case TxnTypeCreatePostAssociation: - return txnMeta.CreatePostAssociationTxindexMetadata - case TxnTypeDeletePostAssociation: - return txnMeta.DeletePostAssociationTxindexMetadata - case TxnTypeAccessGroup: - return txnMeta.AccessGroupTxindexMetadata - case TxnTypeAccessGroupMembers: - return txnMeta.AccessGroupMembersTxindexMetadata - case TxnTypeNewMessage: - return txnMeta.NewMessageTxindexMetadata - default: - return nil - } -} - func (txnMeta *TransactionMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index fc615b7e2..daa2d4162 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -1,13 +1,14 @@ package lib import ( - "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/collections/bitset" - "github.com/stretchr/testify/require" "math" "math/rand" "testing" "time" + + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" + "github.com/stretchr/testify/require" ) func TestCreateBlockTemplate(t *testing.T) { @@ -26,7 +27,7 @@ func TestCreateBlockTemplate(t *testing.T) { params, db := _posTestBlockchainSetupWithBalances(t, 200000, 200000) params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 - latestBlockView, err := NewUtxoView(db, params, nil, nil) + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) require.NoError(err) dir := _dbDirSetup(t) @@ -87,7 +88,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { maxMempoolPosSizeBytes := uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) - latestBlockView, err := NewUtxoView(db, params, nil, nil) + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) require.NoError(err) dir := _dbDirSetup(t) @@ -142,7 +143,7 @@ func TestGetBlockTransactions(t *testing.T) { m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil) + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) require.NoError(err) dir := _dbDirSetup(t) diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index 2a7f8439f..2e686e24d 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -131,7 +131,7 @@ func (bav *UtxoView) _flushCurrentEpochEntryToDbWithTxn(txn *badger.Txn, blockHe // don't want to overwrite what is in the database. Just no-op. return nil } - if err := DBPutCurrentEpochEntryWithTxn(txn, bav.Snapshot, bav.CurrentEpochEntry, blockHeight); err != nil { + if err := DBPutCurrentEpochEntryWithTxn(txn, bav.Snapshot, bav.CurrentEpochEntry, blockHeight, bav.EventManager); err != nil { return errors.Wrapf(err, "_flushCurrentEpochEntryToDbWithTxn: ") } return nil @@ -176,7 +176,7 @@ func DBGetCurrentEpochEntryWithTxn(txn *badger.Txn, snap *Snapshot) (*EpochEntry return epochEntry, nil } -func DBPutCurrentEpochEntryWithTxn(txn *badger.Txn, snap *Snapshot, epochEntry *EpochEntry, blockHeight uint64) error { +func DBPutCurrentEpochEntryWithTxn(txn *badger.Txn, snap *Snapshot, epochEntry *EpochEntry, blockHeight uint64, eventManager *EventManager) error { // Set EpochEntry in PrefixCurrentEpoch. if epochEntry == nil { // This is just a safety check that we are not accidentally overwriting an @@ -184,7 +184,7 @@ func DBPutCurrentEpochEntryWithTxn(txn *badger.Txn, snap *Snapshot, epochEntry * return errors.New("DBPutCurrentEpochEntryWithTxn: called with nil EpochEntry") } key := DBKeyForCurrentEpoch() - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, epochEntry)); err != nil { + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, epochEntry), eventManager); err != nil { return errors.Wrapf( err, "DBPutCurrentEpochEntryWithTxn: problem storing EpochEntry in index PrefixCurrentEpoch: ", ) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 269c25486..35028462e 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -25,7 +25,7 @@ func TestIsLastBlockInCurrentEpoch(t *testing.T) { GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) // The BlockHeight is before the PoS snapshotting fork height. @@ -831,7 +831,7 @@ func _stakeToValidator( } func _newUtxoView(testMeta *TestMeta) *UtxoView { - newUtxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot) + newUtxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) require.NoError(testMeta.t, err) return newUtxoView } diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index 7ea66a52a..ad49caff0 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -14,7 +14,7 @@ func TestCurrentEpoch(t *testing.T) { chain, params, db := NewLowDifficultyBlockchain(t) blockHeight := uint64(chain.blockTip().Height) + 1 blockTimestampNanoSecs := uint64(chain.blockTip().Header.TstampNanoSecs) + 1e9 - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) // Test that the CurrentEpoch is nil in the db. diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 38efa57fb..c10037747 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -62,7 +62,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { // Helper utils newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) return utxoView } diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index e0ced9db0..7d1c08ab5 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -2,13 +2,14 @@ package lib import ( "bytes" - "github.com/dgraph-io/badger/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "math/rand" "os" "testing" "time" + + "github.com/dgraph-io/badger/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestPosMempoolStart(t *testing.T) { @@ -43,7 +44,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { params, db := _posTestBlockchainSetup(t) m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil) + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) require.NoError(err) dir := _dbDirSetup(t) @@ -94,7 +95,7 @@ func TestPosMempoolPrune(t *testing.T) { m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil) + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) require.NoError(err) dir := _dbDirSetup(t) @@ -186,7 +187,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil) + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) require.NoError(err) dir := _dbDirSetup(t) @@ -242,7 +243,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil) + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) require.NoError(err) dir := _dbDirSetup(t) diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index 567025199..aa5262f9d 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -3,12 +3,13 @@ package lib import ( "bytes" "crypto/sha256" + "io" + "github.com/deso-protocol/core/bls" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "io" ) // @@ -138,7 +139,7 @@ func (bav *UtxoView) _flushCurrentRandomSeedHashToDbWithTxn(txn *badger.Txn, blo if bav.CurrentRandomSeedHash == nil { return nil } - return DBPutCurrentRandomSeedHashWithTxn(txn, bav.Snapshot, bav.CurrentRandomSeedHash, blockHeight) + return DBPutCurrentRandomSeedHashWithTxn(txn, bav.Snapshot, bav.CurrentRandomSeedHash, blockHeight, bav.EventManager) } // @@ -179,6 +180,7 @@ func DBPutCurrentRandomSeedHashWithTxn( snap *Snapshot, currentRandomSeedHash *RandomSeedHash, blockHeight uint64, + eventManager *EventManager, ) error { if currentRandomSeedHash == nil { // This should never happen but is a sanity check. @@ -186,5 +188,5 @@ func DBPutCurrentRandomSeedHashWithTxn( return nil } key := DBKeyForCurrentRandomSeedHash() - return DBSetWithTxn(txn, snap, key, currentRandomSeedHash.ToBytes()) + return DBSetWithTxn(txn, snap, key, currentRandomSeedHash.ToBytes(), eventManager) } diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go index 9fc4e3822..e35f1d9fa 100644 --- a/lib/pos_random_seed_test.go +++ b/lib/pos_random_seed_test.go @@ -3,15 +3,16 @@ package lib import ( + "testing" + "github.com/deso-protocol/core/bls" "github.com/holiman/uint256" "github.com/stretchr/testify/require" - "testing" ) func TestCurrentRandomSeedHash(t *testing.T) { chain, params, db := NewLowDifficultyBlockchain(t) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) blockHeight := uint64(0) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 4c7ed3cce..f608e14ac 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -156,7 +156,7 @@ func (bav *UtxoView) _flushSnapshotGlobalParamsEntryToDbWithTxn(txn *badger.Txn, ) } if err := DBPutSnapshotGlobalParamsEntryWithTxn( - txn, bav.Snapshot, globalParamsEntry, snapshotAtEpochNumber, blockHeight, + txn, bav.Snapshot, globalParamsEntry, snapshotAtEpochNumber, blockHeight, bav.EventManager, ); err != nil { return errors.Wrapf( err, @@ -211,6 +211,7 @@ func DBPutSnapshotGlobalParamsEntryWithTxn( globalParamsEntry *GlobalParamsEntry, snapshotAtEpochNumber uint64, blockHeight uint64, + eventManager *EventManager, ) error { if globalParamsEntry == nil { // This should never happen but is a sanity check. @@ -218,7 +219,7 @@ func DBPutSnapshotGlobalParamsEntryWithTxn( return nil } key := DBKeyForSnapshotGlobalParamsEntry(snapshotAtEpochNumber) - return DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, globalParamsEntry)) + return DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, globalParamsEntry), eventManager) } // @@ -352,7 +353,7 @@ func (bav *UtxoView) _flushSnapshotValidatorSetToDbWithTxn(txn *badger.Txn, bloc ) } if err := DBDeleteSnapshotValidatorSetEntryWithTxn( - txn, bav.Snapshot, &mapKey.ValidatorPKID, mapKey.SnapshotAtEpochNumber, + txn, bav.Snapshot, &mapKey.ValidatorPKID, mapKey.SnapshotAtEpochNumber, bav.EventManager, validatorEntry.isDeleted, ); err != nil { return errors.Wrapf( err, @@ -375,7 +376,7 @@ func (bav *UtxoView) _flushSnapshotValidatorSetToDbWithTxn(txn *badger.Txn, bloc continue } if err := DBPutSnapshotValidatorSetEntryWithTxn( - txn, bav.Snapshot, validatorEntry, mapKey.SnapshotAtEpochNumber, blockHeight, + txn, bav.Snapshot, validatorEntry, mapKey.SnapshotAtEpochNumber, blockHeight, bav.EventManager, ); err != nil { return errors.Wrapf( err, @@ -490,6 +491,7 @@ func DBPutSnapshotValidatorSetEntryWithTxn( validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64, blockHeight uint64, + eventManager *EventManager, ) error { if validatorEntry == nil { // This should never happen but is a sanity check. @@ -499,7 +501,7 @@ func DBPutSnapshotValidatorSetEntryWithTxn( // Put the ValidatorEntry in the SnapshotSetByPKID index. key := DBKeyForSnapshotValidatorSetByPKID(validatorEntry, snapshotAtEpochNumber) - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry)); err != nil { + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry), eventManager); err != nil { return errors.Wrapf( err, "DBPutSnapshotValidatorSetEntryWithTxn: problem putting ValidatorEntry in the SnapshotValidatorByPKID index: ", @@ -508,7 +510,7 @@ func DBPutSnapshotValidatorSetEntryWithTxn( // Put the ValidatorPKID in the SnapshotValidatorByStatusAndStakeAmount index. key = DBKeyForSnapshotValidatorSetByStakeAmount(validatorEntry, snapshotAtEpochNumber) - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID)); err != nil { + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID), eventManager); err != nil { return errors.Wrapf( err, "DBPutSnapshotValidatorSetEntryWithTxn: problem putting ValidatorPKID in the SnapshotValidatorByStake index: ", @@ -519,7 +521,7 @@ func DBPutSnapshotValidatorSetEntryWithTxn( } func DBDeleteSnapshotValidatorSetEntryWithTxn( - txn *badger.Txn, snap *Snapshot, validatorPKID *PKID, snapshotAtEpochNumber uint64, + txn *badger.Txn, snap *Snapshot, validatorPKID *PKID, snapshotAtEpochNumber uint64, eventManager *EventManager, entryIsDeleted bool, ) error { if validatorPKID == nil { // This should never happen but is a sanity check. @@ -543,7 +545,7 @@ func DBDeleteSnapshotValidatorSetEntryWithTxn( // Delete ValidatorEntry from PrefixSnapshotSetByPKID. key := DBKeyForSnapshotValidatorSetByPKID(snapshotValidatorSetEntry, snapshotAtEpochNumber) - if err = DBDeleteWithTxn(txn, snap, key); err != nil { + if err = DBDeleteWithTxn(txn, snap, key, eventManager, entryIsDeleted); err != nil { return errors.Wrapf( err, "DBDeleteSnapshotValidatorSetEntryWithTxn: problem deleting ValidatorEntry from index PrefixSnapshotSetByPKID", ) @@ -551,7 +553,7 @@ func DBDeleteSnapshotValidatorSetEntryWithTxn( // Delete ValidatorEntry.PKID from PrefixSnapshotValidatorByStatusAndStakeAmount. key = DBKeyForSnapshotValidatorSetByStakeAmount(snapshotValidatorSetEntry, snapshotAtEpochNumber) - if err = DBDeleteWithTxn(txn, snap, key); err != nil { + if err = DBDeleteWithTxn(txn, snap, key, eventManager, entryIsDeleted); err != nil { return errors.Wrapf( err, "DBDeleteSnapshotValidatorSetEntryWithTxn: problem deleting ValidatorEntry from index PrefixSnapshotValidatorByStatusAndStakeAmount", ) @@ -633,7 +635,7 @@ func (bav *UtxoView) _flushSnapshotValidatorSetTotalStakeAmountNanosToDbWithTxn( ) } if err := DBPutSnapshotValidatorSetTotalStakeAmountNanosWithTxn( - txn, bav.Snapshot, globalActiveStakeAmountNanos, snapshotAtEpochNumber, blockHeight, + txn, bav.Snapshot, globalActiveStakeAmountNanos, snapshotAtEpochNumber, blockHeight, bav.EventManager, ); err != nil { return errors.Wrapf( err, @@ -689,6 +691,7 @@ func DBPutSnapshotValidatorSetTotalStakeAmountNanosWithTxn( globalActiveStakeAmountNanos *uint256.Int, snapshotAtEpochNumber uint64, blockHeight uint64, + eventManager *EventManager, ) error { if globalActiveStakeAmountNanos == nil { // This should never happen but is a sanity check. @@ -696,7 +699,7 @@ func DBPutSnapshotValidatorSetTotalStakeAmountNanosWithTxn( return nil } key := DBKeyForSnapshotValidatorSetTotalStakeAmountNanos(snapshotAtEpochNumber) - return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos)) + return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos), eventManager) } // @@ -855,7 +858,7 @@ func (bav *UtxoView) _flushSnapshotStakesToRewardToDbWithTxn(txn *badger.Txn, bl // Delete the existing mappings in the db for this map key. They will be // re-added if the corresponding entry in-memory has isDeleted=false. if err := DBDeleteSnapshotStakeToRewardWithTxn( - txn, bav.Snapshot, stakeEntry.ValidatorPKID, stakeEntry.StakerPKID, mapKey.SnapshotAtEpochNumber, blockHeight, + txn, bav.Snapshot, stakeEntry.ValidatorPKID, stakeEntry.StakerPKID, mapKey.SnapshotAtEpochNumber, blockHeight, bav.EventManager, stakeEntry.isDeleted, ); err != nil { return errors.Wrapf(err, "_flushSnapshotStakesToRewardToDbWithTxn: ") } @@ -868,7 +871,7 @@ func (bav *UtxoView) _flushSnapshotStakesToRewardToDbWithTxn(txn *badger.Txn, bl } if err := DBPutSnapshotStakeToRewardWithTxn( - txn, bav.Snapshot, stakeEntry, mapKey.SnapshotAtEpochNumber, blockHeight, + txn, bav.Snapshot, stakeEntry, mapKey.SnapshotAtEpochNumber, blockHeight, bav.EventManager, ); err != nil { return errors.Wrapf( err, @@ -888,6 +891,8 @@ func DBDeleteSnapshotStakeToRewardWithTxn( stakerPKID *PKID, snapshotAtEpochNumber uint64, blockHeight uint64, + eventManager *EventManager, + entryIsDeleted bool, ) error { if validatorPKID == nil || stakerPKID == nil { return nil @@ -895,7 +900,7 @@ func DBDeleteSnapshotStakeToRewardWithTxn( // Delete the snapshot StakeEntry from PrefixSnapshotStakeToRewardByValidatorByStaker. stakeByValidatorAndStakerKey := DBKeyForSnapshotStakeToRewardByValidatorAndStaker(snapshotAtEpochNumber, validatorPKID, stakerPKID) - if err := DBDeleteWithTxn(txn, snap, stakeByValidatorAndStakerKey); err != nil { + if err := DBDeleteWithTxn(txn, snap, stakeByValidatorAndStakerKey, eventManager, entryIsDeleted); err != nil { return errors.Wrapf( err, "DBDeleteSnapshotStakeToRewardWithTxn: problem deleting snapshot StakeEntry from index PrefixSnapshotStakeToRewardByValidatorByStaker: ", ) @@ -910,6 +915,7 @@ func DBPutSnapshotStakeToRewardWithTxn( stakeEntry *StakeEntry, snapshotAtEpochNumber uint64, blockHeight uint64, + eventManager *EventManager, ) error { if stakeEntry == nil { // This should never happen but is a sanity check. @@ -918,7 +924,7 @@ func DBPutSnapshotStakeToRewardWithTxn( } dbKey := DBKeyForSnapshotStakeToRewardByValidatorAndStaker(snapshotAtEpochNumber, stakeEntry.ValidatorPKID, stakeEntry.StakerPKID) - if err := DBSetWithTxn(txn, snap, dbKey, EncodeToBytes(blockHeight, stakeEntry)); err != nil { + if err := DBSetWithTxn(txn, snap, dbKey, EncodeToBytes(blockHeight, stakeEntry), eventManager); err != nil { return errors.Wrapf( err, "DBPutSnapshotStakeToRewardWithTxn: problem putting snapshot stakeEntry in the SnapshotStakeToRewardByValidatorAndStaker index: ", @@ -990,7 +996,7 @@ func (bav *UtxoView) _flushSnapshotLeaderScheduleToDbWithTxn(txn *badger.Txn, bl ) } if err := DBPutSnapshotLeaderScheduleValidatorWithTxn( - txn, bav.Snapshot, validatorPKID, mapKey.LeaderIndex, mapKey.SnapshotAtEpochNumber, blockHeight, + txn, bav.Snapshot, validatorPKID, mapKey.LeaderIndex, mapKey.SnapshotAtEpochNumber, blockHeight, bav.EventManager, ); err != nil { return errors.Wrapf( err, @@ -1059,6 +1065,7 @@ func DBPutSnapshotLeaderScheduleValidatorWithTxn( leaderIndex uint16, snapshotAtEpochNumber uint64, blockHeight uint64, + eventManager *EventManager, ) error { if validatorPKID == nil { // This should never happen but is a sanity check. @@ -1066,7 +1073,7 @@ func DBPutSnapshotLeaderScheduleValidatorWithTxn( return nil } key := DBKeyForSnapshotLeaderScheduleValidator(leaderIndex, snapshotAtEpochNumber) - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorPKID)); err != nil { + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorPKID), eventManager); err != nil { return errors.Wrapf( err, "DBPutSnapshotLeaderScheduleValidatorWithTxn: problem putting ValidatorPKID in the SnapshotLeaderSchedule index: ", From bed7cf6f77e1906008a4e6072cae16e4387a0dc3 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Tue, 14 Nov 2023 12:15:13 -0500 Subject: [PATCH 284/762] Update comments. Resolve outlying discussions. --- lib/block_view.go | 6 +++--- lib/block_view_lockups.go | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 36ca1ac7d..dab3616b1 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -124,7 +124,7 @@ type UtxoView struct { // Locked stake mappings LockedStakeMapKeyToLockedStakeEntry map[LockedStakeMapKey]*LockedStakeEntry - // Locked DAO coin balance entry mapping. + // Locked DAO coin and locked DESO balance entry mapping. LockedBalanceEntryKeyToLockedBalanceEntry map[LockedBalanceEntryKey]*LockedBalanceEntry // Lockup yield curve points. @@ -542,7 +542,7 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { len(bav.TransactorNonceMapKeyToTransactorNonceEntry)) for entryKey, entry := range bav.TransactorNonceMapKeyToTransactorNonceEntry { newEntry := *entry - newView.TransactorNonceMapKeyToTransactorNonceEntry[entryKey] = newEntry.Copy() + newView.TransactorNonceMapKeyToTransactorNonceEntry[entryKey] = &newEntry } // Copy the LockedBalanceEntries @@ -564,7 +564,7 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { // Go through all LockupYieldCurvePoints in the LockupYieldCurvePoint map. for entryKey, entry := range lockupYieldCurvePointMap { newLockupYieldCurvePoint := *entry - newView.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[pkid][entryKey] = &newLockupYieldCurvePoint + newView.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[pkid][entryKey] = newLockupYieldCurvePoint.Copy() } } diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 2cea76973..bc1abdc6e 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -274,11 +274,10 @@ func (lockupYieldCurvePoint *LockupYieldCurvePoint) RawDecodeWithoutMetadata(blo } // LockupYieldAPYBasisPoints - lockupYieldAPYBasisPoints, err := ReadUvarint(rr) + lockupYieldCurvePoint.LockupYieldAPYBasisPoints, err = ReadUvarint(rr) if err != nil { return errors.Wrap(err, "LockupYieldCurvePoint.Decode: Problem reading LockupYieldAPYBasisPoints") } - lockupYieldCurvePoint.LockupYieldAPYBasisPoints = lockupYieldAPYBasisPoints return nil } From 215d04b802ba28fdee0aadf802c64c4337a9c3b9 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Tue, 14 Nov 2023 14:05:03 -0500 Subject: [PATCH 285/762] Add more detailed comment in network.go. Resolve dubious buildkite failures. --- lib/block_view.go | 6 ++---- lib/block_view_test.go | 12 +++--------- lib/network.go | 22 +++++++++++++++++++--- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index dab3616b1..3836c3dfe 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -549,8 +549,7 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.LockedBalanceEntryKeyToLockedBalanceEntry = make(map[LockedBalanceEntryKey]*LockedBalanceEntry, len(bav.LockedBalanceEntryKeyToLockedBalanceEntry)) for entryKey, entry := range bav.LockedBalanceEntryKeyToLockedBalanceEntry { - newEntry := *entry - newView.LockedBalanceEntryKeyToLockedBalanceEntry[entryKey] = newEntry.Copy() + newView.LockedBalanceEntryKeyToLockedBalanceEntry[entryKey] = entry.Copy() } // Copy the LockupYieldCurvePoints @@ -563,8 +562,7 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { // Go through all LockupYieldCurvePoints in the LockupYieldCurvePoint map. for entryKey, entry := range lockupYieldCurvePointMap { - newLockupYieldCurvePoint := *entry - newView.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[pkid][entryKey] = newLockupYieldCurvePoint.Copy() + newView.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[pkid][entryKey] = entry.Copy() } } diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 5e91f6fc6..8a55bd93e 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -79,6 +79,7 @@ func setBalanceModelBlockHeights(t *testing.T) { DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = 0 DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = 1 DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = 1 + DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams @@ -95,6 +96,7 @@ func resetBalanceModelBlockHeights() { DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = uint32(596555) DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = uint32(683058) DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(math.MaxUint32) + DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(math.MaxUint32) DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams @@ -2200,6 +2202,7 @@ func TestBlockRewardPatch(t *testing.T) { } func TestConnectFailingTransaction(t *testing.T) { + setBalanceModelBlockHeights(t) require := require.New(t) seed := int64(1011) rand := rand.New(rand.NewSource(seed)) @@ -2209,13 +2212,6 @@ func TestConnectFailingTransaction(t *testing.T) { feeMax := uint64(10000) chain, params, db := NewLowDifficultyBlockchain(t) - params.ForkHeights.BalanceModelBlockHeight = 1 - params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 - params.ForkHeights.ProofOfStake1StateSetupBlockHeight = 1 - params.EncoderMigrationHeights.ProofOfStake1StateSetupMigration.Height = 1 - params.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - oldParams := GlobalDeSoParams - GlobalDeSoParams = *params mempool, miner := NewTestMiner(t, chain, params, true) // Mine a few blocks to give the senderPkString some money. _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, mempool) @@ -2292,8 +2288,6 @@ func TestConnectFailingTransaction(t *testing.T) { require.Equal(expectedUtilityFee, utilityFee) err = blockView.FlushToDb(uint64(blockHeight)) - - GlobalDeSoParams = oldParams } func _getBMFForTxn(txn *MsgDeSoTxn, gp *GlobalParamsEntry) (_burnFee uint64, _utilityFee uint64) { diff --git a/lib/network.go b/lib/network.go index ff4525b4e..2ac9fa6b0 100644 --- a/lib/network.go +++ b/lib/network.go @@ -5835,9 +5835,25 @@ type TransactionSpendingLimit struct { // ProfilePKID || LockupLimitOperation || LockupLimitScopeType to number of transactions. // - ProfilePKID: A PKID to scope transactions by. // If using the "Any" scope, then ProfilePKID has to be the ZeroPKID. - // - LockupLimitOperationType: One of {Any, Scoped} - // If using the "Any" operation type, this limit applies to any possible DeSo token lockup. - // If using the "Scoped" operation type, this limit applies to the ProfilePKID specified. + // - LockupLimitScopeType: One of {Any, Scoped} + // If using the "Any" scope type, this limit applies to any possible DeSo token lockup. + // If using the "Scoped" scope type, this limit applies to the ProfilePKID specified. + // - LockupLimitOperation: One of {Any, Lockup, UpdateCoinLockupYieldCurve, UpdateCoinLockupTransferRestrictions, + // CoinLockupTransfer, CoinLockupUnlock} + // If using the "Any" operation type the limit applies to any coin lockup transaction type. + // If using the "CoinLockup" operation type the limit applies strictly to coin lockups transactions. + // If using the "UpdateCoinLockupYield" operation type the limit applies to any + // UpdateCoinLockupParams transaction where the yield curve is updated. + // If using the "UpdateCoinLockupTransferRestrictions" operation the limit applies to any + // UpdateCoinLockupParams transaction where the lockup transfer restrictions are updated. + // If using the "CoinLockupTransfer" operation type the limit applies to any + // coin lockup transfer transactions. + // If using the "CoinLockupUnlock" operation type the limit applies to + // any locked coin unlock transactions. + // + // NOTE: Note that an UpdateCoinLockupParams transaction can decrement the transaction limits twice. + // This is because we consider updating the yield curve and updating transfer restrictions as + // separate for the purpose of derived key limits. LockupLimitMap map[LockupLimitKey]uint64 // ValidatorPKID || StakerPKID to amount of stake-able $DESO. // Note that this is not a limit on the number of Stake txns that From fe58e8def4a64eb3cada094e436aef3e5fccf857 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Tue, 14 Nov 2023 14:59:19 -0500 Subject: [PATCH 286/762] Clean up vote and timeout message construction for broadcast --- lib/pos_consensus_controller.go | 22 ++++------------------ lib/pos_network_message_interface.go | 11 +++++++++++ 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index fcd1e8364..e06e86131 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -99,16 +99,12 @@ func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotS voteMsg := NewMessage(MsgTypeValidatorVote).(*MsgDeSoValidatorVote) voteMsg.MsgVersion = MsgValidatorVoteVersion0 voteMsg.ProposedInView = event.View - // TODO: Somehow we need to know the validator's ECDSA public key. Fill this out once the - // mapping between BLS and ECDSA keys is implemented and available in the consensus module. - // voteMsg.PublicKey = voteMsg.VotingPublicKey = cc.signer.GetPublicKey() // Get the block hash - tipBlockHash := event.TipBlockHash.GetValue() - voteMsg.BlockHash = NewBlockHash(tipBlockHash[:]) + voteMsg.BlockHash = BlockHashFromConsensusInterface(event.TipBlockHash) - // Compute and set the vote signature + // Sign the vote message voteMsg.VotePartialSignature, err = cc.signer.SignValidatorVote(event.View, event.TipBlockHash) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. @@ -174,20 +170,10 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH timeoutMsg := NewMessage(MsgTypeValidatorTimeout).(*MsgDeSoValidatorTimeout) timeoutMsg.MsgVersion = MsgValidatorTimeoutVersion0 timeoutMsg.TimedOutView = event.View - // TODO: Somehow we need to know the validator's ECDSA public key. Fill this out once the - // mapping between BLS and ECDSA keys is implemented and available in the consensus module. - // timeoutMsg.PublicKey = timeoutMsg.VotingPublicKey = cc.signer.GetPublicKey() - highQCBlockHash := event.QC.GetBlockHash().GetValue() - timeoutMsg.HighQC = &QuorumCertificate{ - BlockHash: NewBlockHash(highQCBlockHash[:]), - ProposedInView: event.QC.GetView(), - ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - SignersList: event.QC.GetAggregatedSignature().GetSignersList(), - Signature: event.QC.GetAggregatedSignature().GetSignature(), - }, - } + timeoutMsg.HighQC = QuorumCertificateFromConsensusInterface(event.QC) + // Sign the timeout message timeoutMsg.TimeoutPartialSignature, err = cc.signer.SignValidatorTimeout(event.View, event.QC.GetView()) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. diff --git a/lib/pos_network_message_interface.go b/lib/pos_network_message_interface.go index 50cc54867..8b1a924f5 100644 --- a/lib/pos_network_message_interface.go +++ b/lib/pos_network_message_interface.go @@ -63,6 +63,17 @@ func (qc *QuorumCertificate) GetAggregatedSignature() consensus.AggregatedSignat return qc.ValidatorsVoteAggregatedSignature } +func QuorumCertificateFromConsensusInterface(qc consensus.QuorumCertificate) *QuorumCertificate { + return &QuorumCertificate{ + ProposedInView: qc.GetView(), + BlockHash: BlockHashFromConsensusInterface(qc.GetBlockHash()), + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: qc.GetAggregatedSignature().GetSignature(), + SignersList: qc.GetAggregatedSignature().GetSignersList(), + }, + } +} + // AggregatedBLSSignature struct <-> consensus.AggregatedSignature interface translation func (aggSig *AggregatedBLSSignature) GetSignersList() *bitset.Bitset { From d30169ac05c0d16c8a08b93ae13afccb19d74efc Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 Nov 2023 12:23:40 -0500 Subject: [PATCH 287/762] Add UpdateSafeBlocks Function to FastHotstuffEventLoop (#818) * Add UpdateSafeBlocks Function to FastHotstuffEventLoop * Update interface --- consensus/event_loop.go | 44 +++++++++++++++++++ consensus/event_loop_test.go | 84 ++++++++++++++++++------------------ consensus/mocking.go | 5 +++ consensus/types.go | 1 + consensus/utils.go | 10 +++++ 5 files changed, 103 insertions(+), 41 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index ef7527473..14bfe278b 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -179,6 +179,41 @@ func (fc *fastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidatorList, saf return nil } +// UpdateSafeBlocks is used to update the safe blocks and their validator lists. This function +// can be used instead of the above ProcessTipBlock when a new block has been added to a fork +// in the blockchain, and the server has determined that the fork is safe to extend from. This +// can happen even if the blockchain's current tip does not change. +// +// Expected param: +// - safeBlocks: an unordered slice of blocks including the committed tip, the uncommitted tip, +// all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks +// that are safe to extend from. This function does not validate the collection of blocks. It +// expects the server to know and decide what blocks are safe to extend from. +func (fc *fastHotStuffEventLoop) UpdateSafeBlocks(safeBlocks []BlockWithValidatorList) error { + // Grab the event loop's lock + fc.lock.Lock() + defer fc.lock.Unlock() + + // Ensure the event loop is running + if fc.status != eventLoopStatusRunning { + return errors.New("FastHotStuffEventLoop.UpdateSafeBlocks: Event loop is not running") + } + + // Fetch the current tip block + tipBlock := BlockWithValidatorList{ + Block: fc.tip.block, + ValidatorList: fc.tip.validatorList, + } + + // Validate the safe blocks and validator lists, and store them + if err := fc.storeBlocks(tipBlock, safeBlocks); err != nil { + return errors.Wrap(err, "FastHotStuffEventLoop.ProcessTipBlock: ") + } + + // Happy path. There's no need to reschedule the crank timer or timeout scheduled tasks here. + return nil +} + // storeBlocks is a helper function that validates the provided blocks, validator lists, and stores them. // It must be called while holding the event loop's lock. func (fc *fastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { @@ -195,6 +230,15 @@ func (fc *fastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlo return errors.New("Invalid safe blocks or validator lists") } + // Extract the block hashes for the tip block and safe blocks + tipBlockHash := tip.Block.GetBlockHash() + safeBlockHashes := collections.Transform(safeBlocks, extractBlockHash) + + // The safe blocks must contain the tip block. The tip block can always be extended from. + if !containsBlockHash(safeBlockHashes, tipBlockHash) { + return errors.New("Safe blocks do not contain the tip block") + } + // Store the tip block and validator list fc.tip = blockWithValidatorLookup{ block: tip.Block, diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index d933e394b..e4242c24e 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -121,18 +121,18 @@ func TestProcessTipBlock(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) fc := NewFastHotStuffEventLoop() - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks - ) - require.NoError(t, err) + + // Initialize the event loop + { + tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) + require.NoError(t, err) + } // Test ProcessTipBlock() function when event loop is not running { - err := fc.ProcessTipBlock( - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks - ) + tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} + err := fc.ProcessTipBlock(tipBlock, []BlockWithValidatorList{tipBlock}) require.Error(t, err) } @@ -226,10 +226,9 @@ func TestProcessTipBlock(t *testing.T) { nextBlock.height = 2 nextBlock.view = 3 - err := fc.ProcessTipBlock( - BlockWithValidatorList{nextBlock, createDummyValidatorList()}, // tip - []BlockWithValidatorList{{nextBlock, createDummyValidatorList()}}, // safeBlocks - ) + tipBlock := BlockWithValidatorList{nextBlock, createDummyValidatorList()} + + err := fc.ProcessTipBlock(tipBlock, []BlockWithValidatorList{tipBlock}) require.NoError(t, err) require.Equal(t, nextBlock.GetBlockHash().GetValue(), fc.tip.block.GetBlockHash().GetValue()) @@ -255,12 +254,13 @@ func TestAdvanceViewOnTimeout(t *testing.T) { fc := NewFastHotStuffEventLoop() - // BlockHeight = 1, Current View = 3 - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks - ) - require.NoError(t, err) + // Init the event loop + { + // BlockHeight = 1, Current View = 3 + tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) + require.NoError(t, err) + } // Running AdvanceViewOnTimeout() should fail because the event loop is not running { @@ -345,12 +345,13 @@ func TestProcessValidatorVote(t *testing.T) { fc := NewFastHotStuffEventLoop() - // BlockHeight = 1, Current View = 3 - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks - ) - require.NoError(t, err) + // Init the event loop + { + // BlockHeight = 1, Current View = 3 + tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) + require.NoError(t, err) + } // Start the event loop fc.Start() @@ -429,12 +430,13 @@ func TestProcessValidatorTimeout(t *testing.T) { fc := NewFastHotStuffEventLoop() - // BlockHeight = 1, Current View = 3 - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks - ) - require.NoError(t, err) + // Init the event loop + { + // BlockHeight = 1, Current View = 3 + tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) + require.NoError(t, err) + } // Start the event loop fc.Start() @@ -479,7 +481,7 @@ func TestProcessValidatorTimeout(t *testing.T) { timeout.publicKey.ToString(): vote, } - err = fc.ProcessValidatorTimeout(timeout) + err := fc.ProcessValidatorTimeout(timeout) require.Error(t, err) require.Contains(t, err.Error(), "has already voted for view") } @@ -492,7 +494,7 @@ func TestProcessValidatorTimeout(t *testing.T) { timeout.publicKey.ToString(): timeout, } - err = fc.ProcessValidatorTimeout(timeout) + err := fc.ProcessValidatorTimeout(timeout) require.Error(t, err) require.Contains(t, err.Error(), "has already timed out for view") } @@ -558,10 +560,10 @@ func TestResetEventLoopSignal(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) fc := NewFastHotStuffEventLoop() - err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks - ) + + // Init the event loop + tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) require.NoError(t, err) // Start the event loop @@ -872,10 +874,10 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) fc := NewFastHotStuffEventLoop() - err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks - ) + + // Init the event loop + tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) require.NoError(t, err) // Start the event loop diff --git a/consensus/mocking.go b/consensus/mocking.go index e91efd7d7..efc855fc7 100644 --- a/consensus/mocking.go +++ b/consensus/mocking.go @@ -8,6 +8,7 @@ type MockFastHotStuffEventLoop struct { OnGetCurrentView func() uint64 OnAdvanceViewOnTimeout func() (uint64, error) OnProcessTipBlock func(BlockWithValidatorList, []BlockWithValidatorList) error + OnUpdateSafeBlocks func([]BlockWithValidatorList) error OnProcessValidatorVote func(VoteMessage) error OnProcessValidatorTimeout func(TimeoutMessage) error OnStart func() @@ -35,6 +36,10 @@ func (fc *MockFastHotStuffEventLoop) ProcessTipBlock(tipBlock BlockWithValidator return fc.OnProcessTipBlock(tipBlock, safeBlocks) } +func (fc *MockFastHotStuffEventLoop) UpdateSafeBlocks(safeBlocks []BlockWithValidatorList) error { + return fc.OnUpdateSafeBlocks(safeBlocks) +} + func (fc *MockFastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { return fc.OnProcessValidatorVote(vote) } diff --git a/consensus/types.go b/consensus/types.go index f757e07fd..4af9cd7c1 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -50,6 +50,7 @@ type FastHotStuffEventLoop interface { GetCurrentView() uint64 AdvanceViewOnTimeout() (uint64, error) ProcessTipBlock(BlockWithValidatorList, []BlockWithValidatorList) error + UpdateSafeBlocks([]BlockWithValidatorList) error ProcessValidatorVote(VoteMessage) error ProcessValidatorTimeout(TimeoutMessage) error Start() diff --git a/consensus/utils.go b/consensus/utils.go index ebfe6802a..6ec8b603f 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -327,6 +327,16 @@ func isSuperMajorityStake(stake *uint256.Int, totalStake *uint256.Int) bool { return superMajorityConditionSum.Sign() >= 0 } +func extractBlockHash(block BlockWithValidatorList) BlockHash { + return block.Block.GetBlockHash() +} + +func containsBlockHash(blockHashes []BlockHash, blockHash BlockHash) bool { + return collections.Any(blockHashes, func(b BlockHash) bool { + return isEqualBlockHashes(b, blockHash) + }) +} + func isEqualBlockHashes(hash1 BlockHash, hash2 BlockHash) bool { hash1Value := hash1.GetValue() hash2Value := hash2.GetValue() From 02de943c3758941cc211c0584fec610ec99572d0 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 Nov 2023 19:35:36 -0500 Subject: [PATCH 288/762] Add ConsensusController Utility to Fetch Validator Set For Blocks (#819) * Add ConsensusController Utility to Fetch Validator Set For Blocks Cleanup Address Nina's comments * Better comments --- lib/pos_consensus_controller.go | 121 +++++++++++++++++++++++++++ lib/pos_network_message_interface.go | 19 +++++ 2 files changed, 140 insertions(+) diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index e06e86131..cc863f02b 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -11,6 +11,7 @@ type ConsensusController struct { lock sync.RWMutex fastHotStuffEventLoop consensus.FastHotStuffEventLoop blockchain *Blockchain + params *DeSoParams signer *BLSSigner } @@ -211,3 +212,123 @@ func (cc *ConsensusController) HandleHeader(pp *Peer, msg *MsgDeSoHeader) { func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) { // TODO } + +// fetchValidatorListsForSafeBlocks takes in a set of safe blocks that can be extended from, and fetches the +// the validator set for each safe block. The result is returned as type BlockWithValidatorList so it can be +// passed to the FastHotStuffEventLoop. If the input blocks precede the committed tip or they do no exist within +// the current or next epoch after the committed tip, then this function returns an error. Note: it is not possible +// for safe blocks to precede the committed tip or to belong to an epoch that is more than one epoch ahead of the +// committed tip. +func (cc *ConsensusController) fetchValidatorListsForSafeBlocks(blocks []*MsgDeSoHeader) ( + []consensus.BlockWithValidatorList, + error, +) { + // If there are no blocks, then there's nothing to do. + if len(blocks) == 0 { + return nil, nil + } + + // Create a map to cache the validator set entries by epoch number. Two blocks in the same epoch will have + // the same validator set, so we can use an in-memory cache to optimize the validator set lookup for them. + validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) + + // Create a UtxoView for the committed tip block. We will use this to fetch the validator set for the + // all of the safe blocks. + utxoView, err := NewUtxoView(cc.blockchain.db, cc.params, cc.blockchain.postgres, cc.blockchain.snapshot, nil) + if err != nil { + return nil, errors.Errorf("error creating UtxoView: %v", err) + } + + // Fetch the current epoch entry for the committed tip + epochEntryAtCommittedTip, err := utxoView.GetCurrentEpochEntry() + if err != nil { + return nil, errors.Errorf("error fetching epoch entry for committed tip: %v", err) + } + + // Fetch the next epoch entry + nextEpochEntryAfterCommittedTip, err := utxoView.SimulateNextEpochEntry(epochEntryAtCommittedTip) + if err != nil { + return nil, errors.Errorf("error fetching next epoch entry after committed tip: %v", err) + } + + // The input blocks can only be part of the current or next epoch entries. + possibleEpochEntriesForBlocks := []*EpochEntry{epochEntryAtCommittedTip, nextEpochEntryAfterCommittedTip} + + // Fetch the validator set at each block + blocksWithValidatorLists := make([]consensus.BlockWithValidatorList, len(blocks)) + for ii, block := range blocks { + // Find the epoch entry for the block. It'll either be the current epoch entry or the next one. + // We add 1 to the block height because we need the validator set that results AFTER connecting the + // block to the blockchain, and triggering an epoch transition (if at an epoch boundary). + epochEntryForBlock, err := getEpochEntryForBlockHeight(block.Height+1, possibleEpochEntriesForBlocks) + if err != nil { + return nil, errors.Errorf("error fetching epoch number for block: %v", err) + } + + // Compute the snapshot epoch number for the block. This is the epoch number that the validator set + // for the block was snapshotted in. + snapshotEpochNumber, err := utxoView.ComputeSnapshotEpochNumberForEpoch(epochEntryForBlock.EpochNumber) + if err != nil { + return nil, errors.Errorf("error computing snapshot epoch number for epoch: %v", err) + } + + var validatorSetAtBlock []*ValidatorEntry + var ok bool + + // If the validator set for the block is already cached by the snapshot epoch number, then use it. + // Otherwise, fetch it from the UtxoView. + if validatorSetAtBlock, ok = validatorSetEntriesBySnapshotEpochNumber[snapshotEpochNumber]; !ok { + // We don't have the validator set for the block cached. Fetch it from the UtxoView. + validatorSetAtBlock, err = utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) + if err != nil { + return nil, errors.Errorf("error fetching validator set for block: %v", err) + } + } + + blocksWithValidatorLists[ii] = consensus.BlockWithValidatorList{ + Block: block, + ValidatorList: ValidatorEntriesToConsensusInterface(validatorSetAtBlock), + } + } + + // Happy path: we fetched the validator lists for all blocks successfully. + return blocksWithValidatorLists, nil +} + +// Finds the epoch entry for the block and returns the epoch number. +func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { + for _, epochEntry := range epochEntries { + if epochEntry.ContainsBlockHeight(blockHeight) { + return epochEntry, nil + } + } + + return nil, errors.Errorf("error finding epoch number for block height: %v", blockHeight) +} + +//////////////////////////////////////////////////////////////////////////////// +// TODO: delete all of the functions below. They are dummy stubbed out functions +// needed by ConsensusController, but are implemented in other feature branches. +// We stub them out here to unblock consensus work. +//////////////////////////////////////////////////////////////////////////////// + +func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, error) { + return nil, errors.New("getUtxoViewAtBlockHash: replace me with a real implementation later") +} + +func (bav *UtxoView) SimulateNextEpochEntry(epochEntry *EpochEntry) (*EpochEntry, error) { + return nil, errors.New("SimulateNextEpochEntry: replace me with a real implementation later") +} + +func (bav *UtxoView) ComputeSnapshotEpochNumberForEpoch(epochNumber uint64) (uint64, error) { + return 0, errors.New("ComputeSnapshotEpochNumberForEpoch: replace me with a real implementation later") +} + +func (bav *UtxoView) GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotAtEpochNumber uint64) ([]*ValidatorEntry, error) { + return nil, errors.New("GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber: replace me with a real implementation later") +} + +func (epochEntry *EpochEntry) ContainsBlockHeight(blockHeight uint64) bool { + // TODO: Implement this later + return false +} diff --git a/lib/pos_network_message_interface.go b/lib/pos_network_message_interface.go index 8b1a924f5..249164709 100644 --- a/lib/pos_network_message_interface.go +++ b/lib/pos_network_message_interface.go @@ -4,6 +4,7 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" "github.com/deso-protocol/core/consensus" + "github.com/holiman/uint256" ) ////////////////////////////////////////////////////////////////////////////////// @@ -94,3 +95,21 @@ func BlockHashFromConsensusInterface(blockHash consensus.BlockHash) *BlockHash { blockHashValue := blockHash.GetValue() return NewBlockHash(blockHashValue[:]) } + +// ValidatorEntry struct <-> consensus.Validator interface translation + +func (validator *ValidatorEntry) GetPublicKey() *bls.PublicKey { + return validator.VotingPublicKey +} + +func (validator *ValidatorEntry) GetStakeAmount() *uint256.Int { + return validator.TotalStakeAmountNanos +} + +func ValidatorEntriesToConsensusInterface(validatorEntries []*ValidatorEntry) []consensus.Validator { + validatorInterfaces := make([]consensus.Validator, len(validatorEntries)) + for idx, validatorEntry := range validatorEntries { + validatorInterfaces[idx] = validatorEntry + } + return validatorInterfaces +} From d3fae5813bd4461d08667ce7b56e53fa51626394 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 Nov 2023 20:07:43 -0500 Subject: [PATCH 289/762] Implement E2E Block Processing Between Blockchain and FastHotStuffEventLoop (#816) Cleanup Address Nina's comments --- lib/blockchain.go | 5 ++- lib/pos_consensus_controller.go | 80 +++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index c5fb3ee41..9483130c0 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -2608,9 +2608,10 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // 4. Connect the block to the blockchain's tip // 5. If applicable, flush the incoming block's grandparent to the DB // 6. Notify the block proposer, pacemaker, and voting logic that the incoming block has been accepted -func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { +func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, verifySignatures bool) ( + _success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { // TODO: Implement me - return false, false, fmt.Errorf("ProcessBlockPoS: Not implemented yet") + return false, false, nil, fmt.Errorf("ProcessBlockPoS: Not implemented yet") } func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index cc863f02b..25af46fa7 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -213,6 +213,82 @@ func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) { // TODO } +// tryProcessBlockAsNewTip tries to apply a new tip block to both the Blockchain and FastHotStuffEventLoop data +// structures. It wraps the ProcessBlockPoS and ProcessTipBlock functions in the Blockchain and FastHotStuffEventLoop +// data structures, which together implement the Fast-HotStuff block handling algorithm end-to-end. +// +// Reference Implementation: +// https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L573 +func (cc *ConsensusController) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]*BlockHash, error) { + // Try to apply the block locally as the new tip of the blockchain + successfullyAppliedNewTip, _, missingBlockHashes, err := cc.blockchain.processBlockPoS( + block, // Pass in the block itself + cc.fastHotStuffEventLoop.GetCurrentView(), // Pass in the current view to ensure we don't process a stale block + true, // Make sure we verify signatures in the block + ) + if err != nil { + return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error processing block locally: %v", err) + } + + // If the incoming block is an orphan, then there's nothing we can do. We return the missing ancestor + // block hashes to the caller. The caller can then fetch the missing blocks from the network and retry + // if needed. + if len(missingBlockHashes) > 0 { + return missingBlockHashes, nil + } + + // At this point we know that the blockchain was mutated. Either the incoming block resulted in a new + // tip for the blockchain, or the incoming block was part of a fork that resulted in a change in the + // safe blocks + + // Fetch the safe blocks that are eligible to be extended from by the next incoming tip block + safeBlocks, err := cc.blockchain.getSafeBlocks() + if err != nil { + return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching safe blocks: %v", err) + } + + // Fetch the validator set at each safe block + safeBlocksWithValidators, err := cc.fetchValidatorListsForSafeBlocks(safeBlocks) + if err != nil { + return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching validator lists for safe blocks: %v", err) + } + + // If the block was processed successfully but was not applied as the new tip, we need up date the safe + // blocks in the FastHotStuffEventLoop. This is because the new block may be safe to extend even though + // it did not result in a new tip. + if !successfullyAppliedNewTip { + // Update the safe blocks to the FastHotStuffEventLoop + if err = cc.fastHotStuffEventLoop.UpdateSafeBlocks(safeBlocksWithValidators); err != nil { + return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error processing safe blocks locally: %v", err) + } + + // Happy path. The safe blocks were successfully updated in the FastHotStuffEventLoop. Nothing left to do. + return nil, nil + } + + // If the block was processed successfully and resulted in a change to the blockchain's tip, then + // we need to pass the new tip to the FastHotStuffEventLoop as well. + + // Fetch the new tip from the blockchain. Note: the new tip may or may not be the input block itself. + // It's possible that there was a descendant of the tip block that was previously stored as an orphan + // in the Blockchain, and was applied as the new tip. + tipBlock := cc.blockchain.BlockTip().Header + + // Fetch the validator set at the new tip block + tipBlockWithValidators, err := cc.fetchValidatorListsForSafeBlocks([]*MsgDeSoHeader{tipBlock}) + if err != nil { + return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching validator lists for tip block: %v", err) + } + + // Pass the new tip and safe blocks to the FastHotStuffEventLoop + if err = cc.fastHotStuffEventLoop.ProcessTipBlock(tipBlockWithValidators[0], safeBlocksWithValidators); err != nil { + return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error processing tip block locally: %v", err) + } + + // Happy path. The block was processed successfully and applied as the new tip. Nothing left to do. + return nil, nil +} + // fetchValidatorListsForSafeBlocks takes in a set of safe blocks that can be extended from, and fetches the // the validator set for each safe block. The result is returned as type BlockWithValidatorList so it can be // passed to the FastHotStuffEventLoop. If the input blocks precede the committed tip or they do no exist within @@ -332,3 +408,7 @@ func (epochEntry *EpochEntry) ContainsBlockHeight(blockHeight uint64) bool { // TODO: Implement this later return false } + +func (bc *Blockchain) getSafeBlocks() ([]*MsgDeSoHeader, error) { + return nil, errors.New("getSafeBlocks: replace me with a real implementation later") +} From 114b6de7c6b2948d8855fd6a10afe15772f1f262 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 Nov 2023 20:14:15 -0500 Subject: [PATCH 290/762] Implement ConsensusController Incoming Block Handler For Peer Messages (#820) --- lib/pos_consensus_controller.go | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index 25af46fa7..1e39f3664 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -209,8 +209,31 @@ func (cc *ConsensusController) HandleHeader(pp *Peer, msg *MsgDeSoHeader) { // TODO } -func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) { - // TODO +func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { + // Hold a lock on the consensus controller, because we will need to mutate the Blockchain + // and the FastHotStuffEventLoop data structures. + cc.lock.Lock() + defer cc.lock.Unlock() + + // Try to apply the block as the new tip of the blockchain. If the block is an orphan, then + // we will get back a list of missing ancestor block hashes. We can fetch the missing blocks + // from the network and retry. + missingBlockHashes, err := cc.tryProcessBlockAsNewTip(msg) + if err != nil { + // If we get an error here, it means something went wrong with the block processing algorithm. + // Nothing we can do to recover here. + return errors.Errorf("HandleBlock: Error processing block as new tip: %v", err) + } + + // If there are missing block hashes, then we need to fetch the missing blocks from the network + // and retry processing the block as a new tip. We'll request the blocks from the same peer. + if len(missingBlockHashes) > 0 { + pp.QueueMessage(&MsgDeSoGetBlocks{ + HashList: missingBlockHashes, + }) + } + + return nil } // tryProcessBlockAsNewTip tries to apply a new tip block to both the Blockchain and FastHotStuffEventLoop data From c42ca157ff0cfd9215bd2c8adc499fbf8ad0781c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 17 Nov 2023 12:58:18 -0500 Subject: [PATCH 291/762] Handle Block Construction Signal and Broadcast (#811) * Handle Block Construction Signal and Broadcast * Address Nina's comments --- consensus/utils.go | 9 +++ lib/bls_keystore.go | 6 ++ lib/pos_block_producer.go | 10 +-- lib/pos_consensus_controller.go | 134 ++++++++++++++++++++++++++++---- 4 files changed, 137 insertions(+), 22 deletions(-) diff --git a/consensus/utils.go b/consensus/utils.go index 4e8eab0fd..ebb69ade4 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -13,6 +13,15 @@ import ( "golang.org/x/crypto/sha3" ) +func IsProperlyFormedConstructVoteQCEvent(event *FastHotStuffEvent) bool { + return event != nil && // Event non-nil + event.EventType == FastHotStuffEventTypeConstructVoteQC && // Event type is QC construction + event.View > 0 && // The view the block was proposed in is non-zero + event.TipBlockHeight > 0 && // Tip block height is non-zero + !isInterfaceNil(event.TipBlockHash) && // Tip block hash is non-nil + !isInterfaceNil(event.QC) // The high QC is non-nil +} + func IsProperlyFormedVoteEvent(event *FastHotStuffEvent) bool { return event != nil && // Event non-nil event.EventType == FastHotStuffEventTypeVote && // Event type is vote diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go index 2cb280f4e..cf4908cd8 100644 --- a/lib/bls_keystore.go +++ b/lib/bls_keystore.go @@ -87,6 +87,12 @@ func (signer *BLSSigner) GetPublicKey() *bls.PublicKey { return signer.privateKey.PublicKey() } +func (signer *BLSSigner) SignBlockProposal(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { + // A block proposer's signature on a block is just its partial vote signature. This allows us to aggregate + // signatures from the proposer and validators into a single aggregated signature to build a QC. + return signer.SignValidatorVote(view, blockHash) +} + func (signer *BLSSigner) SignValidatorVote(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { payload := consensus.GetVoteSignaturePayload(view, blockHash) return signer.sign(BLSSignatureOpCodeValidatorVote, payload[:]) diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 5cc6886a6..a3c70f95c 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -1,11 +1,12 @@ package lib import ( + "math" + "time" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" "github.com/pkg/errors" - "math" - "time" ) // BlockTemplate is a dummy type that is used to label incomplete blocks. The only purpose of this type is to make it @@ -34,11 +35,6 @@ func NewPosBlockProducer(mp Mempool, params *DeSoParams, proposerPublicKey *Publ } } -func (pbp *PosBlockProducer) SignBlock(blockTemplate BlockTemplate, signerPrivateKey *bls.PrivateKey) (*MsgDeSoBlock, error) { - // TODO - return nil, nil -} - // CreateUnsignedBlock constructs an unsigned, PoS block with Fee-Time ordered transactions. This function should be used // during happy path in consensus when a vote QC has been assembled. The block is unsigned, so to indicate its incompleteness, // the block is returned as a BlockTemplate. diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index 1e39f3664..cf536b4c7 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -9,16 +9,20 @@ import ( type ConsensusController struct { lock sync.RWMutex - fastHotStuffEventLoop consensus.FastHotStuffEventLoop blockchain *Blockchain + blockProducer *PosBlockProducer + fastHotStuffEventLoop consensus.FastHotStuffEventLoop + mempool Mempool params *DeSoParams signer *BLSSigner } -func NewConsensusController(blockchain *Blockchain, signer *BLSSigner) *ConsensusController { +func NewConsensusController(params *DeSoParams, blockchain *Blockchain, mempool Mempool, signer *BLSSigner) *ConsensusController { return &ConsensusController{ blockchain: blockchain, + blockProducer: NewPosBlockProducer(mempool, params, nil, signer.GetPublicKey()), fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), + mempool: mempool, signer: signer, } } @@ -37,21 +41,114 @@ func (cc *ConsensusController) Init() { // then it constructs, processes locally, and then and broadcasts the block. // // Steps: -// 1. Verify that the block height we want to propose at is valid -// 2. Get a QC from the consensus module -// 3. Iterate over the top n transactions from the mempool -// 4. Construct a block with the QC and the top n transactions from the mempool -// 5. Sign the block -// 6. Process the block locally -// - This will connect the block to the blockchain, remove the transactions from the -// - mempool, and process the vote in the consensus module -// 7. Broadcast the block to the network -func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus.FastHotStuffEvent) { - // Hold a read lock on the consensus controller. This is because we need to check the - // current view and block height of the consensus module. +// 1. Validate that the block height and view we want to propose the block with are not stale +// 2. Iterate over the top n transactions from the mempool +// 3. Construct a block with the QC and the top n transactions from the mempool +// 4. Sign the block +// 5. Process the block locally +// - This will connect the block to the blockchain, remove the transactions from the +// mempool, and process the vote in the FastHotStuffEventLoop +// 6. Broadcast the block to the network +func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus.FastHotStuffEvent) error { + // Hold a read and write lock on the consensus controller. This is because we need to check + // the current view of the consensus event loop, and to update the blockchain. cc.lock.Lock() defer cc.lock.Unlock() + if !consensus.IsProperlyFormedConstructVoteQCEvent(event) { + // If the event is not properly formed, we ignore it and log it. This should never happen. + return errors.Errorf("HandleFastHostStuffBlockProposal: Received improperly formed vote QC construction event: %v", event) + } + + // If the block proposal is properly formed, we try to construct and broadcast the block here. + + // Fetch the parent block + parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) + parentBlock, parentBlockExists := cc.blockchain.blockIndex[*parentBlockHash] + if !parentBlockExists { + return errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching parent block: %v", parentBlockHash) + } + + // Make sure that the parent has a validated status. This should never fail. If it does, something is + // very wrong with the safeBlocks parameter in the FastHotStuffEventLoop. + if !parentBlock.IsValidated() { + return errors.Errorf("HandleFastHostStuffBlockProposal: parent block is not validated: %v", parentBlockHash) + } + + // Perform simple block height and view sanity checks on the block construction signal. + + // Cross-validate that the event's tip block height matches the parent's height. If the two don't match + // then something is very wrong. + if uint64(parentBlock.Height) != event.TipBlockHeight { + return errors.Errorf( + "HandleFastHostStuffBlockProposal: Error constructing block at height %d. Expected block height %d", + event.TipBlockHeight+1, + parentBlock.Height+1, + ) + } + + // Validate that the event's view is not stale. If the view is stale, then it means that the consensus + // has advanced to the next view after queuing this block proposal event. This is normal and an expected + // race condition in the steady-state. + currentView := cc.fastHotStuffEventLoop.GetCurrentView() + if currentView > event.View { + return errors.Errorf( + "HandleFastHostStuffBlockProposal: Error constructing block at height %d. Stale view %d", + event.TipBlockHeight+1, + event.View, + ) + } + + // Build a UtxoView at the parent block + utxoViewAtParent, err := cc.blockchain.getUtxoViewAtBlockHash(*parentBlock.Hash) + if err != nil { + // This should never happen as long as the parent block is a descendant of the committed tip. + return errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching UtxoView for parent block: %v", parentBlockHash) + } + + // TODO: Compute the random seed hash for the block proposer + var proposerRandomSeedHash *RandomSeedHash + + // Construct an unsigned block + block, err := cc.blockProducer.CreateUnsignedBlock( + utxoViewAtParent, + event.TipBlockHeight+1, + event.View, + proposerRandomSeedHash, + QuorumCertificateFromConsensusInterface(event.QC), + ) + if err != nil { + return errors.Errorf("HandleFastHostStuffBlockProposal: Error constructing unsigned block: %v", err) + } + + // Sign the block + blockHash, err := block.Header.Hash() + if err != nil { + return errors.Errorf("HandleFastHostStuffBlockProposal: Error hashing block: %v", err) + } + block.Header.ProposerVotePartialSignature, err = cc.signer.SignBlockProposal(block.Header.ProposedInView, blockHash) + if err != nil { + return errors.Errorf("HandleFastHostStuffBlockProposal: Error signing block: %v", err) + } + + // Process the block locally + missingBlockHashes, err := cc.tryProcessBlockAsNewTip(block) + if err != nil { + return errors.Errorf("HandleFastHostStuffBlockProposal: Error processing block locally: %v", err) + } + + if len(missingBlockHashes) > 0 { + // This should not be possible. If we successfully constructed the block, then we should + // have its ancestors on-hand too. Something is very wrong. We should not broadcast this block. + return errors.Errorf( + "HandleFastHostStuffBlockProposal: Error processing block locally: missing block hashes: %v", + missingBlockHashes, + ) + } + + // TODO: Broadcast the block proposal to the network + + return nil } func (cc *ConsensusController) HandleFastHostStuffEmptyTimeoutBlockProposal(event *consensus.FastHotStuffEvent) { @@ -118,7 +215,6 @@ func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotS // or a duplicate vote/timeout for the same view. Something is very wrong. We should not // broadcast it to the network. return errors.Errorf("HandleFastHostStuffVote: Error processing vote locally: %v", err) - } // Broadcast the vote message to the network @@ -435,3 +531,11 @@ func (epochEntry *EpochEntry) ContainsBlockHeight(blockHeight uint64) bool { func (bc *Blockchain) getSafeBlocks() ([]*MsgDeSoHeader, error) { return nil, errors.New("getSafeBlocks: replace me with a real implementation later") } + +func (bav *UtxoView) getNextRandomSeedHash() (*RandomSeedHash, error) { + return nil, errors.New("getNextRandomSeedHash: replace me with a real implementation later") +} + +func (b *BlockNode) IsValidated() bool { + return false +} From 1be9fd8a108453afdd1b17109b1e888a6187374b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 17 Nov 2023 17:05:26 -0500 Subject: [PATCH 292/762] Handle Timeout Block Construction Signal and Broadcast (#825) * Handle Timeout Block Construction Signal and Broadcast * Address Nina's comments --- collections/slice.go | 6 ++ consensus/utils.go | 9 ++ lib/pos_consensus_controller.go | 142 +++++++++++++++++++-------- lib/pos_network_message_interface.go | 13 +++ 4 files changed, 130 insertions(+), 40 deletions(-) diff --git a/collections/slice.go b/collections/slice.go index 95c552b80..4d45ae357 100644 --- a/collections/slice.go +++ b/collections/slice.go @@ -16,6 +16,12 @@ func Any[T any](slice []T, predicate func(T) bool) bool { return false } +func Contains[T comparable](slice []T, value T) bool { + return Any(slice, func(val T) bool { + return val == value + }) +} + func Transform[TInput any, TOutput any](slice []TInput, transformFn func(TInput) TOutput) []TOutput { var result []TOutput for _, val := range slice { diff --git a/consensus/utils.go b/consensus/utils.go index ebb69ade4..26ff54bc9 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -22,6 +22,15 @@ func IsProperlyFormedConstructVoteQCEvent(event *FastHotStuffEvent) bool { !isInterfaceNil(event.QC) // The high QC is non-nil } +func IsProperlyFormedConstructTimeoutQCEvent(event *FastHotStuffEvent) bool { + return event != nil && // Event non-nil + event.EventType == FastHotStuffEventTypeConstructTimeoutQC && // Event type is timeout QC construction + event.View > 0 && // The view the block was proposed in is non-zero + event.TipBlockHeight > 0 && // Tip block height is non-zero + !isInterfaceNil(event.TipBlockHash) && // Tip block hash is non-nil + !isProperlyFormedAggregateQC(event.AggregateQC) // The high QC is non-nil +} + func IsProperlyFormedVoteEvent(event *FastHotStuffEvent) bool { return event != nil && // Event non-nil event.EventType == FastHotStuffEventTypeVote && // Event type is vote diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index cf536b4c7..6876e9d3e 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -3,6 +3,7 @@ package lib import ( "sync" + "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" "github.com/pkg/errors" ) @@ -38,26 +39,82 @@ func (cc *ConsensusController) Init() { // HandleFastHostStuffBlockProposal is called when FastHotStuffEventLoop has signaled that it can // construct a block at a certain block height. This function validates the block proposal signal, -// then it constructs, processes locally, and then and broadcasts the block. +// constructs, processes locally, and then broadcasts the block. +func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus.FastHotStuffEvent) error { + // Hold a read and write lock on the consensus controller. This is because we need to check + // the current view of the consensus event loop, and to update the blockchain. + cc.lock.Lock() + defer cc.lock.Unlock() + + // Handle the event as a block proposal event for a regular block + if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructVoteQC); err != nil { + return errors.Wrapf(err, "HandleFastHostStuffBlockProposal:") + } + + // Happy path: nothing left to do + return nil +} + +// HandleFastHostStuffEmptyTimeoutBlockProposal is called when FastHotStuffEventLoop has signaled that it can +// construct a timeout block at a certain block height. This function validates the timeout block proposal +// signal, constructs, processes locally, and then broadcasts the block. +func (cc *ConsensusController) HandleFastHostStuffEmptyTimeoutBlockProposal(event *consensus.FastHotStuffEvent) error { + // Hold a read and write lock on the consensus controller. This is because we need to check + // the current view of the consensus event loop, and to update the blockchain. + cc.lock.Lock() + defer cc.lock.Unlock() + + // Handle the event as a block proposal event for a timeout block + if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructTimeoutQC); err != nil { + return errors.Wrapf(err, "HandleFastHostStuffEmptyTimeoutBlockProposal:") + } + + // Happy path: nothing left to do + return nil +} + +// handleBlockProposerEvent is a helper function that can process a block proposal event for either +// a regular block or a timeout block. It can be called with a expectedEventType param that toggles +// whether the event should be validated and processed as normal block or timeout block proposal. // // Steps: // 1. Validate that the block height and view we want to propose the block with are not stale // 2. Iterate over the top n transactions from the mempool -// 3. Construct a block with the QC and the top n transactions from the mempool +// 3. Construct a block with the QC or aggregate QC and the top n transactions from the mempool // 4. Sign the block // 5. Process the block locally // - This will connect the block to the blockchain, remove the transactions from the // mempool, and process the vote in the FastHotStuffEventLoop // 6. Broadcast the block to the network -func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus.FastHotStuffEvent) error { - // Hold a read and write lock on the consensus controller. This is because we need to check - // the current view of the consensus event loop, and to update the blockchain. - cc.lock.Lock() - defer cc.lock.Unlock() +func (cc *ConsensusController) handleBlockProposerEvent( + event *consensus.FastHotStuffEvent, + expectedEventType consensus.FastHotStuffEventType, +) error { + // Validate that the expected event type is a block proposal event type + possibleExpectedEventTypes := []consensus.FastHotStuffEventType{ + consensus.FastHotStuffEventTypeConstructVoteQC, + consensus.FastHotStuffEventTypeConstructTimeoutQC, + } + if !collections.Contains(possibleExpectedEventTypes, expectedEventType) { + return errors.Errorf("Invalid expected event type: %v", expectedEventType) + } - if !consensus.IsProperlyFormedConstructVoteQCEvent(event) { - // If the event is not properly formed, we ignore it and log it. This should never happen. - return errors.Errorf("HandleFastHostStuffBlockProposal: Received improperly formed vote QC construction event: %v", event) + // The event's type should match the expected event type + if event.EventType != expectedEventType { + return errors.Errorf("Unexpected event type: %v", event.EventType) + } + + // If the event is a regular block proposal event, then we validate and process it as a regular block + if expectedEventType == consensus.FastHotStuffEventTypeConstructVoteQC { + if !consensus.IsProperlyFormedConstructVoteQCEvent(event) { + // If the event is not properly formed, we ignore it and log it. This should never happen. + return errors.Errorf("Received improperly formed vote QC construction event: %v", event) + } + } else { // expectedEventType == consensus.FastHotStuffEventTypeConstructTimeoutQC + if !consensus.IsProperlyFormedConstructTimeoutQCEvent(event) { + // If the event is not properly formed, we ignore it and log it. This should never happen. + return errors.Errorf("Received improperly formed timeout QC construction event: %v", event) + } } // If the block proposal is properly formed, we try to construct and broadcast the block here. @@ -66,13 +123,13 @@ func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) parentBlock, parentBlockExists := cc.blockchain.blockIndex[*parentBlockHash] if !parentBlockExists { - return errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching parent block: %v", parentBlockHash) + return errors.Errorf("Error fetching parent block: %v", parentBlockHash) } // Make sure that the parent has a validated status. This should never fail. If it does, something is // very wrong with the safeBlocks parameter in the FastHotStuffEventLoop. if !parentBlock.IsValidated() { - return errors.Errorf("HandleFastHostStuffBlockProposal: parent block is not validated: %v", parentBlockHash) + return errors.Errorf("Parent block is not validated: %v", parentBlockHash) } // Perform simple block height and view sanity checks on the block construction signal. @@ -81,7 +138,7 @@ func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus // then something is very wrong. if uint64(parentBlock.Height) != event.TipBlockHeight { return errors.Errorf( - "HandleFastHostStuffBlockProposal: Error constructing block at height %d. Expected block height %d", + "Error constructing block at height %d. Expected block height %d", event.TipBlockHeight+1, parentBlock.Height+1, ) @@ -93,7 +150,7 @@ func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus currentView := cc.fastHotStuffEventLoop.GetCurrentView() if currentView > event.View { return errors.Errorf( - "HandleFastHostStuffBlockProposal: Error constructing block at height %d. Stale view %d", + "Error constructing block at height %d. Stale view %d", event.TipBlockHeight+1, event.View, ) @@ -103,45 +160,61 @@ func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus utxoViewAtParent, err := cc.blockchain.getUtxoViewAtBlockHash(*parentBlock.Hash) if err != nil { // This should never happen as long as the parent block is a descendant of the committed tip. - return errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching UtxoView for parent block: %v", parentBlockHash) + return errors.Errorf("Error fetching UtxoView for parent block: %v", parentBlockHash) } // TODO: Compute the random seed hash for the block proposer var proposerRandomSeedHash *RandomSeedHash - // Construct an unsigned block - block, err := cc.blockProducer.CreateUnsignedBlock( - utxoViewAtParent, - event.TipBlockHeight+1, - event.View, - proposerRandomSeedHash, - QuorumCertificateFromConsensusInterface(event.QC), - ) - if err != nil { - return errors.Errorf("HandleFastHostStuffBlockProposal: Error constructing unsigned block: %v", err) + var block *MsgDeSoBlock + + if expectedEventType == consensus.FastHotStuffEventTypeConstructVoteQC { + // Construct an unsigned block + block, err = cc.blockProducer.CreateUnsignedBlock( + utxoViewAtParent, + event.TipBlockHeight+1, + event.View, + proposerRandomSeedHash, + QuorumCertificateFromConsensusInterface(event.QC), + ) + if err != nil { + return errors.Errorf("Error constructing unsigned block: %v", err) + } + } else { // expectedEventType == consensus.FastHotStuffEventTypeConstructTimeoutQC + // Construct an unsigned timeout block + block, err = cc.blockProducer.CreateUnsignedTimeoutBlock( + utxoViewAtParent, + event.TipBlockHeight+1, + event.View, + proposerRandomSeedHash, + AggregateQuorumCertificateFromConsensusInterface(event.AggregateQC), + ) + if err != nil { + return errors.Errorf("Error constructing unsigned timeout block: %v", err) + } } // Sign the block blockHash, err := block.Header.Hash() if err != nil { - return errors.Errorf("HandleFastHostStuffBlockProposal: Error hashing block: %v", err) + return errors.Errorf("Error hashing block: %v", err) } block.Header.ProposerVotePartialSignature, err = cc.signer.SignBlockProposal(block.Header.ProposedInView, blockHash) if err != nil { - return errors.Errorf("HandleFastHostStuffBlockProposal: Error signing block: %v", err) + return errors.Errorf("Error signing block: %v", err) } // Process the block locally missingBlockHashes, err := cc.tryProcessBlockAsNewTip(block) if err != nil { - return errors.Errorf("HandleFastHostStuffBlockProposal: Error processing block locally: %v", err) + return errors.Errorf("Error processing block locally: %v", err) } if len(missingBlockHashes) > 0 { // This should not be possible. If we successfully constructed the block, then we should // have its ancestors on-hand too. Something is very wrong. We should not broadcast this block. return errors.Errorf( - "HandleFastHostStuffBlockProposal: Error processing block locally: missing block hashes: %v", + "Error processing block locally: missing block hashes: %v", missingBlockHashes, ) } @@ -151,17 +224,6 @@ func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus return nil } -func (cc *ConsensusController) HandleFastHostStuffEmptyTimeoutBlockProposal(event *consensus.FastHotStuffEvent) { - // The consensus module has signaled that we have a timeout QC and can propose one at a certain - // block height. We construct an empty block with a timeout QC and broadcast it here: - // 1. Verify that the block height and view we want to propose at is valid - // 2. Get a timeout QC from the consensus module - // 3. Construct a block with the timeout QC - // 4. Sign the block - // 5. Process the block locally - // 6. Broadcast the block to the network -} - // HandleFastHostStuffVote is triggered when FastHotStuffEventLoop has signaled that it wants to // vote on the current tip. This functions validates the vote signal, then it constructs the // vote message here. diff --git a/lib/pos_network_message_interface.go b/lib/pos_network_message_interface.go index 249164709..5a89c3520 100644 --- a/lib/pos_network_message_interface.go +++ b/lib/pos_network_message_interface.go @@ -75,6 +75,19 @@ func QuorumCertificateFromConsensusInterface(qc consensus.QuorumCertificate) *Qu } } +// AggregateQuorumCertificate struct <-> consensus.AggregateQuorumCertificate interface translation +func AggregateQuorumCertificateFromConsensusInterface(aggQC consensus.AggregateQuorumCertificate) *TimeoutAggregateQuorumCertificate { + return &TimeoutAggregateQuorumCertificate{ + TimedOutView: aggQC.GetView(), + ValidatorsHighQC: QuorumCertificateFromConsensusInterface(aggQC.GetHighQC()), + ValidatorsTimeoutHighQCViews: aggQC.GetHighQCViews(), + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + Signature: aggQC.GetAggregatedSignature().GetSignature(), + SignersList: aggQC.GetAggregatedSignature().GetSignersList(), + }, + } +} + // AggregatedBLSSignature struct <-> consensus.AggregatedSignature interface translation func (aggSig *AggregatedBLSSignature) GetSignersList() *bitset.Bitset { From e26dfdf73d3d89ecc697ab233aa0286ee926e40f Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Mon, 20 Nov 2023 16:51:27 -0500 Subject: [PATCH 293/762] Cleanup --- lib/pos_consensus_controller.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index 6876e9d3e..abd171390 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -359,10 +359,6 @@ func (cc *ConsensusController) HandleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBu // TODO } -func (cc *ConsensusController) HandleGetBlocks(pp *Peer, msg *MsgDeSoGetBlocks) { - // TODO -} - func (cc *ConsensusController) HandleHeader(pp *Peer, msg *MsgDeSoHeader) { // TODO } @@ -566,7 +562,8 @@ func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) //////////////////////////////////////////////////////////////////////////////// // TODO: delete all of the functions below. They are dummy stubbed out functions // needed by ConsensusController, but are implemented in other feature branches. -// We stub them out here to unblock consensus work. +// We stub them out here to unblock consensus work, and can delete them once other +// feature branches are merged. //////////////////////////////////////////////////////////////////////////////// func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, error) { From 14acb831998b74e9879af68ff4eb2322eb68ef82 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 20 Nov 2023 17:55:05 -0500 Subject: [PATCH 294/762] Add Unit Test Case for collections.ToMap (#832) --- collections/slice_test.go | 54 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/collections/slice_test.go b/collections/slice_test.go index 7932133e2..7b521bb6e 100644 --- a/collections/slice_test.go +++ b/collections/slice_test.go @@ -55,3 +55,57 @@ func TestSliceAny(t *testing.T) { require.True(t, Any(slice, predicate)) } } + +func TestSliceToMap(t *testing.T) { + // Create a struct to test the slice -> map transformation + type keyValueType struct { + Key string + Value string + } + + // Test empty slice + { + // Create a custom function extract the key from the struct + keyFn := func(val keyValueType) string { + return val.Key + } + + slice := []keyValueType{} + result := ToMap(slice, keyFn) + require.Equal(t, 0, len(result)) + } + + // Test slice with pointers + { + // Create a custom function extract the key from the struct + keyFn := func(val *keyValueType) string { + return val.Key + } + + slice := []*keyValueType{ + {Key: "a", Value: "1"}, + {Key: "b", Value: "2"}, + } + result := ToMap(slice, keyFn) + require.Equal(t, 2, len(result)) + require.Equal(t, "1", result["a"].Value) + require.Equal(t, "2", result["b"].Value) + } + + // Test slice with raw values + { + // Create a custom function extract the key from the struct + keyFn := func(val keyValueType) string { + return val.Key + } + + slice := []keyValueType{ + {Key: "a", Value: "1"}, + {Key: "b", Value: "2"}, + } + result := ToMap(slice, keyFn) + require.Equal(t, 2, len(result)) + require.Equal(t, "1", result["a"].Value) + require.Equal(t, "2", result["b"].Value) + } +} From 8b50fc12a8e763960a49571264f419c1b286638b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 21 Nov 2023 17:32:29 -0500 Subject: [PATCH 295/762] Address DH Minor Feedback on FastHotStuffEventLoop (#833) --- consensus/event_loop.go | 27 ++++++++++----------------- consensus/integration_test_types.go | 9 +++++++++ consensus/scheduled_task.go | 1 + consensus/types.go | 5 +++++ consensus/types_internal.go | 20 ++++++++++++-------- consensus/utils.go | 24 +++++++++++------------- 6 files changed, 48 insertions(+), 38 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 14bfe278b..fdc7c7bc3 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -498,7 +498,7 @@ func (fc *fastHotStuffEventLoop) resetScheduledTasks() { numTimeouts := fc.currentView - fc.tip.block.GetView() - 1 // Compute the exponential back-off: nextTimeoutDuration * 2^numTimeouts - timeoutDuration = fc.timeoutBaseDuration * time.Duration(powerOfTwo(numTimeouts)) + timeoutDuration = fc.timeoutBaseDuration * time.Duration(powerOfTwo(numTimeouts, maxConsecutiveTimeouts)) } // Schedule the next crank timer task. This will run with currentView param. @@ -791,22 +791,15 @@ func (fc *fastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin // Evict all locally stored votes and timeout messages with stale views. We can safely use the current // view to determine what is stale. The consensus mechanism will never construct a block with a view -// that's lower than its current view. Consider the following: -// - In the event the event update the chain tip, we will vote for that block and the view it was proposed in -// - In the event we locally time out a view, we will send a timeout message for that view +// that's lower than its current view. We can use the following to determine which votes & timeouts are +// stale: +// - The currentView value is the view that the next block is going to be proposed on +// - The next block must contain a QC of votes or aggregate QC of timeouts for the previous view +// - For votes, currentView = vote.GetView() + 1 +// - For timeouts, currentView = timeout.GetView() + 1 // -// In both cases, we will never roll back the chain tip, or decrement the current view to construct a -// conflicting block at that lower view that we have previously voted or timed out on. So we are safe to evict -// locally stored votes and timeout messages with stale views because we expect to never use them for -// block construction. -// -// The eviction works as follows: -// - Votes: if the next block were to be a regular block with a QC aggregated from votes, then the it must -// satisfy nextBlock.GetView() = tip.block.GetView() + 1, which means that currentView = tip.block.GetView() + 1. -// We can safely evict all votes where vote.GetView() < currentView - 1. -// - Timeouts: if the next block were be an empty block with a timeout QC aggregated from timeout messages, -// then it must satisfy nextBlock.GetView() = timeout.GetView() + 1. We can safely evict all timeout messages with -// currentView > timeout.GetView() + 1. +// Any votes or timeouts with a view that's less than currentView - 1 are stale because they cannot +// be used in the next block or any future blocks. func (fc *fastHotStuffEventLoop) evictStaleVotesAndTimeouts() { // Evict stale vote messages for blockHash, voters := range fc.votesSeenByBlockHash { @@ -904,5 +897,5 @@ func (fc *fastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( } func isStaleView(currentView uint64, testView uint64) bool { - return currentView > testView+1 + return testView < currentView-1 } diff --git a/consensus/integration_test_types.go b/consensus/integration_test_types.go index 63722faf8..3613e63e7 100644 --- a/consensus/integration_test_types.go +++ b/consensus/integration_test_types.go @@ -335,6 +335,9 @@ func (node *validatorNode) runEventSignalLoop() { } func (node *validatorNode) handleVoteEvent(event *FastHotStuffEvent) { + node.lock.Lock() + defer node.lock.Unlock() + payload := GetVoteSignaturePayload(event.View, event.TipBlockHash) signature, err := node.privateKey.Sign(payload[:]) if err != nil { @@ -390,6 +393,9 @@ func (node *validatorNode) broadcastTimeout(event *FastHotStuffEvent) { } func (node *validatorNode) handleVoteQCConstructionEvent(event *FastHotStuffEvent) { + node.lock.Lock() + defer node.lock.Unlock() + if !node.isBlockProposer { return } @@ -414,6 +420,9 @@ func (node *validatorNode) handleVoteQCConstructionEvent(event *FastHotStuffEven } func (node *validatorNode) handleTimeoutQCConstructionEvent(event *FastHotStuffEvent) { + node.lock.Lock() + defer node.lock.Unlock() + if !node.isBlockProposer { return } diff --git a/consensus/scheduled_task.go b/consensus/scheduled_task.go index baf970ebd..eb34748d4 100644 --- a/consensus/scheduled_task.go +++ b/consensus/scheduled_task.go @@ -65,6 +65,7 @@ func (t *ScheduledTask[TaskParam]) Cancel() { } t.timer = nil + t.duration = 0 } func (t *ScheduledTask[TaskParam]) GetDuration() time.Duration { diff --git a/consensus/types.go b/consensus/types.go index 4af9cd7c1..a4714f109 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -36,6 +36,11 @@ type FastHotStuffEvent struct { AggregateQC AggregateQuorumCertificate } +// The maximum number of consecutive timeouts that can occur before the event loop stops +// its exponential back-off. This is a safety valve that helps ensure that the event loop +// doesn't get stuck in a near indefinite back-off state. +const maxConsecutiveTimeouts = 16 + // Create an alias type of the 32 bit block hash so that the raw [32]byte type isn't // ambiguously repeated in the code base type BlockHashValue = [32]byte diff --git a/consensus/types_internal.go b/consensus/types_internal.go index 9e58523e3..7b98ce1a2 100644 --- a/consensus/types_internal.go +++ b/consensus/types_internal.go @@ -7,7 +7,7 @@ import ( ) ////////////////////////////////////////////////////////// -// BlockHash interface implementation for testing +// BlockHash interface implementation for unit tests ////////////////////////////////////////////////////////// type blockHash struct { @@ -19,7 +19,7 @@ func (bh *blockHash) GetValue() [32]byte { } ////////////////////////////////////////////////////////// -// Validator interface implementation for testing +// Validator interface implementation for unit tests ////////////////////////////////////////////////////////// type validator struct { @@ -36,7 +36,9 @@ func (v *validator) GetStakeAmount() *uint256.Int { } //////////////////////////////////////////////////////////////////////// -// AggregateQuorumCertificate interface implementation for internal use +// AggregateQuorumCertificate interface implementation for internal use. +// We use this type for unit tests, and to construct timeout QCs for +// external signaling. //////////////////////////////////////////////////////////////////////// type aggregateQuorumCertificate struct { @@ -63,7 +65,7 @@ func (qc *aggregateQuorumCertificate) GetAggregatedSignature() AggregatedSignatu } ////////////////////////////////////////////////////////// -// QuorumCertificate interface implementation for testing +// QuorumCertificate interface implementation for unit tests ////////////////////////////////////////////////////////// type quorumCertificate struct { @@ -102,7 +104,7 @@ func (as *aggregatedSignature) GetSignature() *bls.Signature { } ////////////////////////////////////////////////////////// -// Block interface implementation for testing +// Block interface implementation for unit tests ////////////////////////////////////////////////////////// type block struct { @@ -133,7 +135,7 @@ func (b *block) GetQC() QuorumCertificate { } ////////////////////////////////////////////////////////// -// VoteMessage interface implementation for testing +// VoteMessage interface implementation for unit tests ////////////////////////////////////////////////////////// type voteMessage struct { @@ -160,7 +162,7 @@ func (vm *voteMessage) GetSignature() *bls.Signature { } ////////////////////////////////////////////////////////// -// TimeoutMessage interface implementation for testing +// TimeoutMessage interface implementation for unit tests ////////////////////////////////////////////////////////// type timeoutMessage struct { @@ -187,7 +189,9 @@ func (tm *timeoutMessage) GetSignature() *bls.Signature { } ////////////////////////////////////////////////////////// -// Internal blockWithValidatorLookup type +// Internal blockWithValidatorLookup type. We use this type +// to bundle a block with its validator list and a lookup +// of validators by public key string. ////////////////////////////////////////////////////////// type blockWithValidatorLookup struct { diff --git a/consensus/utils.go b/consensus/utils.go index 6ec8b603f..6eecae235 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -298,8 +298,7 @@ func isValidSignatureManyPublicKeys(publicKeys []*bls.PublicKey, signature *bls. // - Cq >= 2f + 1 // - 3Cq >= 6f + 3 // - 3Cq >= 2(3f + 1) + 1 -// - 3Cq >= 2N + 1 -// - Finally, this gives us the condition: 3Cq - 2N - 1 >= 0. Which is what we will verify in this function. +// - Finally, this gives us the condition: 3Cq >= 2N + 1. Which is what we will verify in this function. func isSuperMajorityStake(stake *uint256.Int, totalStake *uint256.Int) bool { // Both values must be > 0 if stake == nil || totalStake == nil || stake.IsZero() || totalStake.IsZero() { @@ -314,17 +313,12 @@ func isSuperMajorityStake(stake *uint256.Int, totalStake *uint256.Int) bool { // Compute 3Cq honestStakeComponent := uint256.NewInt().Mul(stake, uint256.NewInt().SetUint64(3)) - // Compute 2N + // Compute 2N + 1 totalStakeComponent := uint256.NewInt().Mul(totalStake, uint256.NewInt().SetUint64(2)) + totalStakeComponent = uint256.NewInt().Add(totalStakeComponent, uint256.NewInt().SetUint64(1)) - // Compute 3Cq - 2N - 1 - superMajorityConditionSum := uint256.NewInt().Sub( - uint256.NewInt().Sub(honestStakeComponent, totalStakeComponent), - uint256.NewInt().SetOne(), - ) - - // Check if 3Cq - 2N - 1 >= 0 - return superMajorityConditionSum.Sign() >= 0 + // Check if 3Cq >= 2N + 1 + return honestStakeComponent.Cmp(totalStakeComponent) >= 0 } func extractBlockHash(block BlockWithValidatorList) BlockHash { @@ -479,9 +473,13 @@ func generateRandomBytes(numBytes int) []byte { return randomBytes } -func powerOfTwo(n uint64) int64 { +func powerOfTwo(exponent uint64, maxExponent uint64) int64 { + if exponent > maxExponent { + return powerOfTwo(maxExponent, maxExponent) + } + result := int64(1) - for i := uint64(0); i < n; i++ { + for i := uint64(0); i < exponent; i++ { result *= 2 } return result From 32b6da33bb98c267caef3eaa9b7764d6e38fd470 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 29 Nov 2023 14:47:47 -0500 Subject: [PATCH 296/762] Make FastHotStuffEvent Emits Non-blocking (#836) * Make FastHotStuffEvents Emits Non-blocking * Cleaner comments --- consensus/event_loop.go | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index fdc7c7bc3..5281413d1 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -166,12 +166,12 @@ func (fc *fastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidatorList, saf // Signal the server that we can vote for the block. The server will decide whether to construct and // broadcast the vote. - fc.Events <- &FastHotStuffEvent{ + fc.emitEvent(&FastHotStuffEvent{ EventType: FastHotStuffEventTypeVote, TipBlockHash: fc.tip.block.GetBlockHash(), TipBlockHeight: fc.tip.block.GetHeight(), View: fc.tip.block.GetView(), - } + }) // Schedule the next crank timer and timeout scheduled tasks fc.resetScheduledTasks() @@ -337,7 +337,7 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { // Signal the server that we can construct a QC for the chain tip, and mark that we have // constructed a QC for the current view. fc.hasConstructedQCInCurrentView = true - fc.Events <- voteQCEvent + fc.emitEvent(voteQCEvent) } return nil @@ -423,7 +423,7 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) // Signal the server that we can construct a timeout QC for the current view, and mark // that we have constructed a QC for the current view. fc.hasConstructedQCInCurrentView = true - fc.Events <- timeoutQCEvent + fc.emitEvent(timeoutQCEvent) } return nil @@ -536,7 +536,7 @@ func (fc *fastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView if voteQCEvent := fc.tryConstructVoteQCInCurrentView(); voteQCEvent != nil { // Signal the server that we can construct a QC for the chain tip fc.hasConstructedQCInCurrentView = true - fc.Events <- voteQCEvent + fc.emitEvent(voteQCEvent) return } @@ -545,7 +545,7 @@ func (fc *fastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView if timeoutQCEvent := fc.tryConstructTimeoutQCInCurrentView(); timeoutQCEvent != nil { // Signal the server that we can construct a timeout QC for the current view fc.hasConstructedQCInCurrentView = true - fc.Events <- timeoutQCEvent + fc.emitEvent(timeoutQCEvent) return } } @@ -777,13 +777,13 @@ func (fc *fastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin } // Signal the server that we are ready to time out - fc.Events <- &FastHotStuffEvent{ + fc.emitEvent(&FastHotStuffEvent{ EventType: FastHotStuffEventTypeTimeout, // The timeout event type View: timedOutView, // The view we timed out TipBlockHash: fc.tip.block.GetBlockHash(), // The last block we saw TipBlockHeight: fc.tip.block.GetHeight(), // The last block we saw QC: fc.tip.block.GetQC(), // The highest QC we have - } + }) // Cancel the timeout task. The server will reschedule it when it advances the view. fc.nextTimeoutTask.Cancel() @@ -896,6 +896,15 @@ func (fc *fastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( return false, nil, nil, nil } +// emitEvent emits the event via a non-blocking operation. This ensures that even if the Events channel +// is full, the emit operation completes without blocking. This guarantees that there will be no risk of +// deadlock when a thread holding the event loop's lock is blocked from emitting an event because another +// thread that needs to read an emitted event is blocked from doing so because it needs to first operate +// on the event loop. +func (fc *fastHotStuffEventLoop) emitEvent(event *FastHotStuffEvent) { + go func() { fc.Events <- event }() +} + func isStaleView(currentView uint64, testView uint64) bool { return testView < currentView-1 } From 27a3cd44a8e331faa683e51191a74f09ac48ab42 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 29 Nov 2023 15:19:07 -0500 Subject: [PATCH 297/762] Validate Validator Timeout Message (#838) --- consensus/event_loop.go | 24 +++++++++++++++ consensus/event_loop_test.go | 60 +++++++++++++++++++++++++----------- consensus/utils.go | 38 +++++++++++++++++------ consensus/utils_test.go | 16 +++++++--- 4 files changed, 106 insertions(+), 32 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 5281413d1..bc14bbb47 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -395,6 +395,30 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) ) } + // Check if the high QC's block hash is in our safeBlocks slice + // - If it is, then the high QC's block has already been validated and is safe to extend from + // - If it's not, then we have no knowledge of the block, or the block is not safe to extend from. + // This can happen if the timeout's creator is malicious, or if our node is far enough behind the + // blockchain to not have seen the high QC before other nodes have timed out. In either case, the + // simple and safe option is to reject the timeout and move on. + isSafeBlock, _, _, validatorLookup := fc.fetchSafeBlockInfo(timeout.GetHighQC().GetBlockHash()) + if !isSafeBlock { + return errors.Errorf( + "FastHotStuffEventLoop.ProcessValidatorTimeout: Timeout from public key %s has an unknown high QC with view %d", + timeout.GetPublicKey().ToString(), + timeout.GetView(), + ) + } + + // Check if the timeout's public key is in the validator set. If it is not, then the sender is not a validator + // at the block height after the high QC. + if _, isValidator := validatorLookup[timeout.GetPublicKey().ToString()]; !isValidator { + return errors.Errorf( + "FastHotStuffEventLoop.ProcessValidatorTimeout: Sender %s for timeout message is not in the validator list", + timeout.GetPublicKey().ToString(), + ) + } + // Compute the value sha3-256(timeout.View, timeout.HighQC.View) timeoutSignaturePayload := GetTimeoutSignaturePayload(timeout.GetView(), timeout.GetHighQC().GetView()) diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index e4242c24e..71e0dbb5d 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -99,7 +99,7 @@ func TestInit(t *testing.T) { require.Equal(t, fc.tip.block.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) require.Equal(t, fc.tip.block.GetView(), uint64(2)) - require.Equal(t, fc.tip.block.GetHeight(), uint64(1)) + require.Equal(t, fc.tip.block.GetHeight(), uint64(2)) require.Equal(t, fc.crankTimerInterval, time.Duration(100)) require.Equal(t, fc.timeoutBaseDuration, time.Duration(101)) @@ -111,7 +111,7 @@ func TestInit(t *testing.T) { require.Equal(t, len(fc.safeBlocks), 1) require.Equal(t, fc.safeBlocks[0].block.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) require.Equal(t, fc.safeBlocks[0].block.GetView(), uint64(2)) - require.Equal(t, fc.safeBlocks[0].block.GetHeight(), uint64(1)) + require.Equal(t, fc.safeBlocks[0].block.GetHeight(), uint64(2)) require.Equal(t, len(fc.safeBlocks[0].validatorList), 2) require.Equal(t, len(fc.safeBlocks[0].validatorLookup), 2) } @@ -430,22 +430,28 @@ func TestProcessValidatorTimeout(t *testing.T) { fc := NewFastHotStuffEventLoop() + validatorPrivateKey1 := createDummyBLSPrivateKey() + validatorPrivateKey2 := createDummyBLSPrivateKey() + + validatorList := createValidatorListForPrivateKeys(validatorPrivateKey1, validatorPrivateKey2) + // Init the event loop { - // BlockHeight = 1, Current View = 3 - tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) + // BlockHeight = 3, Current View = 4 + genesisBlock := BlockWithValidatorList{createDummyBlock(2), validatorList} + tipBlock := BlockWithValidatorList{createBlockWithParent(genesisBlock.Block), validatorList} + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock, genesisBlock}) require.NoError(t, err) } // Start the event loop fc.Start() - // Current View = 4 + // Current View = 5 { currentView, err := fc.AdvanceViewOnTimeout() require.NoError(t, err) - require.Equal(t, uint64(4), currentView) + require.Equal(t, uint64(5), currentView) } // Test with malformed timeout @@ -455,18 +461,9 @@ func TestProcessValidatorTimeout(t *testing.T) { require.Contains(t, err.Error(), "Malformed timeout message") } - // Test invalid signature - { - timeout := createDummyTimeoutMessage(4) - timeout.signature = createDummyBLSSignature() - err := fc.ProcessValidatorTimeout(timeout) - require.Error(t, err) - require.Contains(t, err.Error(), "Invalid signature") - } - // Test with stale view { - timeout := createDummyTimeoutMessage(1) + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(3, validatorPrivateKey1, fc.tip.block.GetQC()) err := fc.ProcessValidatorTimeout(timeout) require.Error(t, err) require.Contains(t, err.Error(), "Timeout has a stale view") @@ -499,9 +496,36 @@ func TestProcessValidatorTimeout(t *testing.T) { require.Contains(t, err.Error(), "has already timed out for view") } + // Test unknown high QC + { + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) + timeout.highQC = createDummyQC(2, createDummyBlockHash()) + err := fc.ProcessValidatorTimeout(timeout) + require.Error(t, err) + require.Contains(t, err.Error(), "has an unknown high QC") + } + + // Test unknown public key in timeout message + { + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) + timeout.publicKey = createDummyBLSPublicKey() + err := fc.ProcessValidatorTimeout(timeout) + require.Error(t, err) + require.Contains(t, err.Error(), "is not in the validator list") + } + + // Test invalid signature + { + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) + timeout.signature = createDummyBLSSignature() + err := fc.ProcessValidatorTimeout(timeout) + require.Error(t, err) + require.Contains(t, err.Error(), "Invalid signature") + } + // Test happy path { - timeout := createDummyTimeoutMessage(4) + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) err := fc.ProcessValidatorTimeout(timeout) require.NoError(t, err) } diff --git a/consensus/utils.go b/consensus/utils.go index 6eecae235..2fdaba6b4 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -216,7 +216,8 @@ func isProperlyFormedTimeout(timeout TimeoutMessage) bool { return false } - return true + // The high QC must be properly formed on its own + return isProperlyFormedQC(timeout.GetHighQC()) } func isProperlyFormedQC(qc QuorumCertificate) bool { @@ -343,13 +344,17 @@ func validatorToPublicKeyString(validator Validator) string { } func createDummyValidatorList() []Validator { + return createValidatorListForPrivateKeys(createDummyBLSPrivateKey(), createDummyBLSPrivateKey()) +} + +func createValidatorListForPrivateKeys(pk1 *bls.PrivateKey, pk2 *bls.PrivateKey) []Validator { validators := []*validator{ { - publicKey: createDummyBLSPublicKey(), + publicKey: pk1.PublicKey(), stakeAmount: uint256.NewInt().SetUint64(100), }, { - publicKey: createDummyBLSPublicKey(), + publicKey: pk2.PublicKey(), stakeAmount: uint256.NewInt().SetUint64(50), }, } @@ -363,11 +368,20 @@ func createDummyBlock(view uint64) *block { return &block{ blockHash: createDummyBlockHash(), view: view, - height: 1, + height: view, qc: createDummyQC(view-1, createDummyBlockHash()), } } +func createBlockWithParent(parentBlock Block) *block { + return &block{ + blockHash: createDummyBlockHash(), + view: parentBlock.GetView() + 1, + height: parentBlock.GetView() + 1, + qc: createDummyQC(parentBlock.GetView(), parentBlock.GetBlockHash()), + } +} + func createDummyVoteMessage(view uint64) *voteMessage { blockHash := createDummyBlockHash() signaturePayload := GetVoteSignaturePayload(view, blockHash) @@ -384,17 +398,21 @@ func createDummyVoteMessage(view uint64) *voteMessage { } func createDummyTimeoutMessage(view uint64) *timeoutMessage { - highQC := createDummyQC(view-1, createDummyBlockHash()) + return createTimeoutMessageWithPrivateKeyAndHighQC( + view, + createDummyBLSPrivateKey(), + createDummyQC(view-1, createDummyBlockHash()), + ) +} - signaturePayload := GetTimeoutSignaturePayload(view, highQC.view) - - blsPrivateKey, _ := bls.NewPrivateKey() - blsSignature, _ := blsPrivateKey.Sign(signaturePayload[:]) +func createTimeoutMessageWithPrivateKeyAndHighQC(view uint64, pk *bls.PrivateKey, highQC QuorumCertificate) *timeoutMessage { + signaturePayload := GetTimeoutSignaturePayload(view, highQC.GetView()) + blsSignature, _ := pk.Sign(signaturePayload[:]) return &timeoutMessage{ highQC: highQC, view: view, - publicKey: blsPrivateKey.PublicKey(), + publicKey: pk.PublicKey(), signature: blsSignature, } } diff --git a/consensus/utils_test.go b/consensus/utils_test.go index d8203082f..77c412e11 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -313,28 +313,36 @@ func TestIsProperlyFormedTimeout(t *testing.T) { // Test nil high QC { - timeout := createDummyTimeoutMessage(1) + timeout := createDummyTimeoutMessage(2) timeout.highQC = nil require.False(t, isProperlyFormedTimeout(timeout)) } // Test nil public key { - timeout := createDummyTimeoutMessage(1) + timeout := createDummyTimeoutMessage(2) timeout.publicKey = nil require.False(t, isProperlyFormedTimeout(timeout)) } // Test nil signature { - timeout := createDummyTimeoutMessage(1) + timeout := createDummyTimeoutMessage(2) timeout.signature = nil require.False(t, isProperlyFormedTimeout(timeout)) } + // Test malformed high QC + { + highQC := createDummyQC(1, createDummyBlockHash()) + highQC.aggregatedSignature = nil + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(2, createDummyBLSPrivateKey(), highQC) + require.False(t, isProperlyFormedTimeout(timeout)) + } + // Test happy path { - timeout := createDummyTimeoutMessage(1) + timeout := createDummyTimeoutMessage(2) require.True(t, isProperlyFormedTimeout(timeout)) } } From fe765808d993047fe94525e94d716bed671e612c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 29 Nov 2023 18:29:01 -0500 Subject: [PATCH 298/762] Validate High QC In AggregateQC Validation (#839) * Validate High QC In AggregateQC Validation * Minor fixes * Cross-validate views --- consensus/utils.go | 36 ++++++++++++++++++++++++++++++++---- consensus/utils_test.go | 10 +++++----- 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/consensus/utils.go b/consensus/utils.go index 2fdaba6b4..66c413740 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -53,6 +53,10 @@ func IsValidSuperMajorityAggregateQuorumCertificate(aggQC AggregateQuorumCertifi return false } + if IsValidSuperMajorityQuorumCertificate(aggQC.GetHighQC(), validators) { + return false + } + hasSuperMajorityStake, signerPublicKeys := isSuperMajorityStakeSignersList(aggQC.GetAggregatedSignature().GetSignersList(), validators) if !hasSuperMajorityStake { return false @@ -239,12 +243,36 @@ func isProperlyFormedAggregateQC(aggQC AggregateQuorumCertificate) bool { if isInterfaceNil(aggQC) { return false } - // The view must be non-zero and the high QC must be properly formed - // TODO: Do we need further validation on high qc views? such as non-zero? - if aggQC.GetView() == 0 || !isProperlyFormedQC(aggQC.GetHighQC()) || len(aggQC.GetHighQCViews()) == 0 { + // The view must be non-zero and the high QC views must be non-empty + if aggQC.GetView() == 0 || len(aggQC.GetHighQCViews()) == 0 { + return false + } + + // The high QC must be properly formed + if !isProperlyFormedQC(aggQC.GetHighQC()) { + return false + } + + // If there was a timeout, it means that we've skipped at least one view. The + // timed out view and the high QC's view cannot be consecutive. + if aggQC.GetView() <= aggQC.GetHighQC().GetView()+1 { return false } - return isProperlyFormedAggregateSignature(aggQC.GetAggregatedSignature()) + + // The aggregate signature must be properly formed + if !isProperlyFormedAggregateSignature(aggQC.GetAggregatedSignature()) { + return false + } + + // Validate that all of the high QC views are non-zero + for _, view := range aggQC.GetHighQCViews() { + if view == 0 { + return false + } + } + + // Happy path + return true } func isProperlyFormedAggregateSignature(agg AggregatedSignature) bool { diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 77c412e11..6e42266ee 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -94,7 +94,7 @@ func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { } func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { - // Test malformed QC + // Test malformed aggregate QC { require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(nil, createDummyValidatorList())) } @@ -134,7 +134,7 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { signaturePayload := GetVoteSignaturePayload(view, dummyBlockHash) // Compute the aggregate signature payload - timeoutPayload := GetTimeoutSignaturePayload(view+1, view) + timeoutPayload := GetTimeoutSignaturePayload(view+2, view) validator1Signature, err := validatorPrivateKey1.Sign(signaturePayload[:]) require.NoError(t, err) @@ -152,7 +152,7 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { validator1TimeoutSignature, err := validatorPrivateKey1.Sign(timeoutPayload[:]) require.NoError(t, err) qc := aggregateQuorumCertificate{ - view: view + 1, + view: view + 2, highQC: &highQC, highQCViews: []uint64{view}, aggregatedSignature: &aggregatedSignature{ @@ -168,14 +168,14 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { validator1TimeoutSignature, err := validatorPrivateKey1.Sign(timeoutPayload[:]) require.NoError(t, err) // For fun, let's have validator 2 sign a timeout payload where its high QC is further behind. - validator2TimeoutPayload := GetTimeoutSignaturePayload(view+1, view-1) + validator2TimeoutPayload := GetTimeoutSignaturePayload(view+2, view-1) validator2TimeoutSignature, err := validatorPrivateKey2.Sign(validator2TimeoutPayload[:]) require.NoError(t, err) aggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1TimeoutSignature, validator2TimeoutSignature}) require.NoError(t, err) qc := aggregateQuorumCertificate{ - view: view + 1, + view: view + 2, highQC: &highQC, highQCViews: []uint64{view, view - 1}, aggregatedSignature: &aggregatedSignature{ From 734d2700da10145f7061e3b771b048befa9a00ca Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 29 Nov 2023 19:05:53 -0500 Subject: [PATCH 299/762] Fix Supermajority Check in Aggregate QC (#840) --- consensus/utils.go | 2 +- consensus/utils_test.go | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/consensus/utils.go b/consensus/utils.go index 66c413740..a322a245a 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -53,7 +53,7 @@ func IsValidSuperMajorityAggregateQuorumCertificate(aggQC AggregateQuorumCertifi return false } - if IsValidSuperMajorityQuorumCertificate(aggQC.GetHighQC(), validators) { + if !IsValidSuperMajorityQuorumCertificate(aggQC.GetHighQC(), validators) { return false } diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 6e42266ee..3b12a28e2 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -138,12 +138,15 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { validator1Signature, err := validatorPrivateKey1.Sign(signaturePayload[:]) require.NoError(t, err) + validator2Signature, err := validatorPrivateKey2.Sign(signaturePayload[:]) + require.NoError(t, err) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1Signature, validator2Signature}) highQC := quorumCertificate{ blockHash: dummyBlockHash, view: view, aggregatedSignature: &aggregatedSignature{ - signersList: bitset.NewBitset().FromBytes([]byte{0x1}), // 0b0001, which represents validator 1 - signature: validator1Signature, + signersList: bitset.NewBitset().FromBytes([]byte{0x3}), // 0b0011, which represents validators 1 and 2 + signature: aggSig, }, } @@ -172,7 +175,7 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { validator2TimeoutSignature, err := validatorPrivateKey2.Sign(validator2TimeoutPayload[:]) require.NoError(t, err) - aggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1TimeoutSignature, validator2TimeoutSignature}) + timeoutAggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1TimeoutSignature, validator2TimeoutSignature}) require.NoError(t, err) qc := aggregateQuorumCertificate{ view: view + 2, @@ -180,7 +183,7 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { highQCViews: []uint64{view, view - 1}, aggregatedSignature: &aggregatedSignature{ signersList: bitset.NewBitset().FromBytes([]byte{0x3}), // 0b0011, which represents validators 1 and 2 - signature: aggSig, + signature: timeoutAggSig, }, } require.True(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators)) From 7854c9c1284b0cfc884969fd45e8d6495a5b1c6e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 29 Nov 2023 22:26:27 -0500 Subject: [PATCH 300/762] Validate HighQC in Timeout Messages on Receipt (#841) --- consensus/event_loop.go | 18 ++++++++++++++++-- consensus/event_loop_test.go | 12 +++++++++++- consensus/utils.go | 32 ++++++++++++++++++++++++++------ 3 files changed, 53 insertions(+), 9 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index bc14bbb47..2980d8cf9 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -401,7 +401,7 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) // This can happen if the timeout's creator is malicious, or if our node is far enough behind the // blockchain to not have seen the high QC before other nodes have timed out. In either case, the // simple and safe option is to reject the timeout and move on. - isSafeBlock, _, _, validatorLookup := fc.fetchSafeBlockInfo(timeout.GetHighQC().GetBlockHash()) + isSafeBlock, _, validatorList, validatorLookup := fc.fetchSafeBlockInfo(timeout.GetHighQC().GetBlockHash()) if !isSafeBlock { return errors.Errorf( "FastHotStuffEventLoop.ProcessValidatorTimeout: Timeout from public key %s has an unknown high QC with view %d", @@ -424,7 +424,21 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) // Verify the vote signature if !isValidSignatureSinglePublicKey(timeout.GetPublicKey(), timeout.GetSignature(), timeoutSignaturePayload[:]) { - return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Invalid signature") + return errors.Errorf( + "FastHotStuffEventLoop.ProcessValidatorTimeout: Invalid signature in timeout message from validator %s for view %d", + timeout.GetPublicKey().ToString(), + timeout.GetView(), + ) + } + + // Verify the high QC in the timeout message. We can use the validator list at the exact block height of + // the high QC's block hash. + if !IsValidSuperMajorityQuorumCertificate(timeout.GetHighQC(), validatorList) { + return errors.Errorf( + "FastHotStuffEventLoop.ProcessValidatorTimeout: Invalid high QC received in timeout message from validator %s for view %d", + timeout.GetPublicKey().ToString(), + timeout.GetView(), + ) } // Cache the timeout message in case we need it for later diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 71e0dbb5d..2b8a88040 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -433,13 +433,14 @@ func TestProcessValidatorTimeout(t *testing.T) { validatorPrivateKey1 := createDummyBLSPrivateKey() validatorPrivateKey2 := createDummyBLSPrivateKey() + privateKeys := []*bls.PrivateKey{validatorPrivateKey1, validatorPrivateKey2} validatorList := createValidatorListForPrivateKeys(validatorPrivateKey1, validatorPrivateKey2) // Init the event loop { // BlockHeight = 3, Current View = 4 genesisBlock := BlockWithValidatorList{createDummyBlock(2), validatorList} - tipBlock := BlockWithValidatorList{createBlockWithParent(genesisBlock.Block), validatorList} + tipBlock := BlockWithValidatorList{createBlockWithParentAndValidators(genesisBlock.Block, privateKeys), validatorList} err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock, genesisBlock}) require.NoError(t, err) } @@ -523,6 +524,15 @@ func TestProcessValidatorTimeout(t *testing.T) { require.Contains(t, err.Error(), "Invalid signature") } + // Test invalid high QC + { + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) + timeout.highQC = createDummyQC(timeout.highQC.GetView(), timeout.highQC.GetBlockHash()) + err := fc.ProcessValidatorTimeout(timeout) + require.Error(t, err) + require.Contains(t, err.Error(), "Invalid high QC") + } + // Test happy path { timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) diff --git a/consensus/utils.go b/consensus/utils.go index a322a245a..c21e8f987 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -410,6 +410,15 @@ func createBlockWithParent(parentBlock Block) *block { } } +func createBlockWithParentAndValidators(parentBlock Block, privateKeys []*bls.PrivateKey) *block { + return &block{ + blockHash: createDummyBlockHash(), + view: parentBlock.GetView() + 1, + height: parentBlock.GetView() + 1, + qc: createQCForBlockHashWithValidators(parentBlock.GetView(), parentBlock.GetBlockHash(), privateKeys), + } +} + func createDummyVoteMessage(view uint64) *voteMessage { blockHash := createDummyBlockHash() signaturePayload := GetVoteSignaturePayload(view, blockHash) @@ -446,16 +455,27 @@ func createTimeoutMessageWithPrivateKeyAndHighQC(view uint64, pk *bls.PrivateKey } func createDummyQC(view uint64, blockHash BlockHash) *quorumCertificate { + return createQCForBlockHashWithValidators( + view, + blockHash, + []*bls.PrivateKey{createDummyBLSPrivateKey(), createDummyBLSPrivateKey()}, + ) +} + +func createQCForBlockHashWithValidators(view uint64, blockHash BlockHash, privateKeys []*bls.PrivateKey) *quorumCertificate { signaturePayload := GetVoteSignaturePayload(view, blockHash) - blsPrivateKey1, _ := bls.NewPrivateKey() - blsSignature1, _ := blsPrivateKey1.Sign(signaturePayload[:]) + signersList := bitset.NewBitset() + signatures := []*bls.Signature{} - blsPrivateKey2, _ := bls.NewPrivateKey() - blsSignature2, _ := blsPrivateKey2.Sign(signaturePayload[:]) + for ii, pk := range privateKeys { + signersList.Set(ii, true) - signersList := bitset.NewBitset().Set(0, true).Set(1, true) - aggregateSignature, _ := bls.AggregateSignatures([]*bls.Signature{blsSignature1, blsSignature2}) + signature, _ := pk.Sign(signaturePayload[:]) + signatures = append(signatures, signature) + } + + aggregateSignature, _ := bls.AggregateSignatures(signatures) return &quorumCertificate{ blockHash: blockHash, From c98e9ac857abf9eda37e83ea5de1f53795e2ed01 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 1 Dec 2023 16:04:12 -0500 Subject: [PATCH 301/762] Fix Cross-Validation of Timed Out View And High QC View (#843) --- consensus/utils.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/consensus/utils.go b/consensus/utils.go index c21e8f987..f6adac8ae 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -253,9 +253,8 @@ func isProperlyFormedAggregateQC(aggQC AggregateQuorumCertificate) bool { return false } - // If there was a timeout, it means that we've skipped at least one view. The - // timed out view and the high QC's view cannot be consecutive. - if aggQC.GetView() <= aggQC.GetHighQC().GetView()+1 { + // The high QC's view must be less than the timed out view + if aggQC.GetHighQC().GetView() >= aggQC.GetView() { return false } From be7058650a2b30e4d777c5376f1f8494c5cce502 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 1 Dec 2023 16:23:13 -0500 Subject: [PATCH 302/762] Feature/pos process block (#742) * add block to best chain (#727) * add block to best chain * add test for add block to block index --------- Co-authored-by: Lazy Nina <> * ln/add-seek-over-leader-schedule (#728) Co-authored-by: Lazy Nina <> * add ValidateBlockLeader logic (#729) * add ValidateBlockLeader logic * add tests for validate block leader * address SA feedback, beef up comments, add more testing of edge cases --------- Co-authored-by: Lazy Nina <> * ln/validate-ancestors-exist (#730) * ln/validate-ancestors-exist * add test for validate ancestors exist * simplify validateAncestorsExists, simply check for parent * address SA feedback to simplify function --------- Co-authored-by: Lazy Nina <> * Validate QCs (#732) * Handle uncaught errors in fromBytes functions in pos_network.go (#733) * ln/check-parent-block-committed-status-in-integrity-checks (#734) * Update error formatting in pos process block (#735) * Handle reorg (#737) * Commit Rule (#739) * Clean up ProcessBlockPoS skeleton and fix small bugs (#741) * fix issues from merge * gofmt * fix duplicate defs * add createDummyAggQc back, delete dupe definition of createDummyValidatorSet * add go:build relic to integration_test.go * update canCommitGrandparent logic to avoid underflow (#750) Co-authored-by: Lazy Nina <> * Revert "Merge branch 'feature/pos-fast-hotstuff-consensus' into feature/pos-process-block" This reverts commit b87f71175665109129c80a5a8822c7401decc6fc, reversing changes made to 25c63087e7ae9076511db2b03842bc27154a6101. * fix tests that broke due to changes in header encoding (#760) Co-authored-by: Lazy Nina <> * Replace CommittedStatus with existing BlockStatus field (#755) * Replace CommittedStatus with existing BlockStatus field * fix get lineage test * address SA feedback * move StatusBlockCommitted to be at the end of the creation of BlockStatus values * clean up comments --------- Co-authored-by: Lazy Nina <> * Remove uncommitted blocks map (#756) * Remove uncommitted blocks map * fix fork height in test * clean up comments about uncommitted blocks map * update error message prefixes in addBlockToBlockIndex * address SA & Piotr feedback * rename functions to reflect naming conventions from SA * set block heights in TestGetLineageFromCommittedTip * fix tests * address SA feedback --------- Co-authored-by: Lazy Nina <> * modify skeleton based on DH feedback (#758) * modify skeleton based on DH feedback * don't need to write the block to badger a second time for orphan blocks after we call addBlockToBlockIndex * fix skeleton * address SA feedback * cleanup * provide nil event manager when connecting block * don't store uncommitted tip hash, only update tip hash in db on commit block --------- Co-authored-by: Lazy Nina <> * recursive validate func w/o side effects on chain other than adding to block index (#775) * recursive validate func w/o side effects on chain other than adding to block index * always store or store validate failed in validate block pos * address SA feedback * sa/recursive-validation-func-pr-review-feedback (#780) * sa/recursive-validation-func-pr-review-feedback * Address Nina's feedback * Apply suggestions from code review --------- Co-authored-by: Lazy Nina <81658138+lazynina@users.noreply.github.com> --------- Co-authored-by: Lazy Nina <> Co-authored-by: tholonious <99746187+tholonious@users.noreply.github.com> * Add internal func to simplify when we need to add a block as validate failed. Handle non-rule-error error from isValidBlockPoS (#781) * - Add internal func to handle the validate failed block return case. - If isValidBlockPoS doesn't return a RuleError, don't mark the block ValidateFailed * fix error message that said 'this should never happen' as that is no longer true * address SA feedback --------- Co-authored-by: Lazy Nina <> * refactor validate block leader so we can not special case rule errors (#783) * refactor validate block leader so we can not special case rule errors * address SA feedback, renaming deso block -> block * fix condition on isBlockProposerValid --------- Co-authored-by: Lazy Nina <> * check validate failed status, proposed in view and height when getting lineage to determine validity (#784) * check validate failed status, proposed in view and height when getting lineage to determine validity * add condition for RuleErrorAncestorBlockValidateFailed * address SA feedback --------- Co-authored-by: Lazy Nina <> * commit rule testing (#776) * commit rule testing * fix get lineage from committed tip test * add go:build relic tag to network_test.go * fix TestTryApplyNewTip to use pos block heights * move expectedHeaderVersion1 to db utils file to avoid relic issues * move utxo view flush outside of badger txn to avoid nested ancestral records flush issue * fix tests * add comments on test and test helper functions --------- Co-authored-by: Lazy Nina <> * Process Block e2e testing w/ recursive validate (#777) * Initial check in for full process block pos test * fix # of txns validations * fix test * okay really fix the tests * fix TestDecodeHeaderVersion0 * adjust block heights after call to setBalanceModelBlockHeights * fix tests * fix tests * fix name of test * address SA feedback --------- Co-authored-by: Lazy Nina <> * Add blockIndexByHeight to Blockchain struct, rename blockIndex to blockIndexByHash (#785) * Add blockIndexByHeight to Blockchain struct, rename blockIndex to blockIndexByHash * address SA Feedback --------- Co-authored-by: Lazy Nina <> * add get safe blocks function (#786) * add get safe blocks function * clean up - remove TODO and add comment --------- Co-authored-by: Lazy Nina <> * add validations for TxnConnectStatusIndex and TxnConnectStatusByIndexHash (#788) * fix renaming in this branch * fix renaming in this branch --------- Co-authored-by: Lazy Nina <> * try to validate orphans after running commit rule and before returning (#787) * try to validate orphans after running commit rule and before returning * address SA feedback * remove TODO --------- Co-authored-by: Lazy Nina <> * add basic integrity checks on orphans, if they fail, then mark them as validate failed (#789) * add basic integrity checks on orphans, if they fail, then mark them as validate failed * refactor orphan processing to separate processOrphanBlockPoS --------- Co-authored-by: Lazy Nina <> * add comments and cleanup (#791) Co-authored-by: Lazy Nina <> * Make collections.Reverse create a new slice instead of operating in place (#795) Co-authored-by: Lazy Nina <> * Address DH feedback (#800) * Address DH feedback * Apply suggestions from code review --------- Co-authored-by: Lazy Nina <> * All Block rewards mature immediately - no need to write block rewards to index (#802) Co-authored-by: Lazy Nina <> * Enhanced orphan validations (#803) Co-authored-by: Lazy Nina <> * Implement hasValidProposerPartialSig to validate the proposer vote partial signature in the header of a block (#804) Co-authored-by: Lazy Nina <> * Header needs to have ProposerRandomSeedSignature instead of hash (#805) Co-authored-by: Lazy Nina <> * Add hasValidProposerRandomSeedSignaturePoS and add to validation of non-orphan blocks (#806) Co-authored-by: Lazy Nina <> * Fix issues after merging lockups (#812) Co-authored-by: Lazy Nina <> * Add BLSPKIDEntry map to the view to enforce uniqueness on VotingPublicKey in validators (#809) Co-authored-by: Lazy Nina <> * Snapshot Validator BLSPKIDEntries (#813) Co-authored-by: Lazy Nina <> * Use BLS VotingPublicKey uniqueness constraint to avoid using PKIDs in orphan logic (#814) Co-authored-by: Lazy Nina <> * Replace GetBestChainTip with existing BlockTip function (#817) Co-authored-by: Lazy Nina <> * Update GetSafeBlocks to return only MsgDeSoHeaders (#823) Co-authored-by: Lazy Nina <> * Address DH feedback (#834) Co-authored-by: Lazy Nina <> * Reduce calls to storeBlockInBlockIndex (#835) Co-authored-by: Lazy Nina <> * Enhanced View validations (#842) Co-authored-by: Lazy Nina <> --------- Co-authored-by: Lazy Nina <> Co-authored-by: iamsofonias Co-authored-by: tholonious <99746187+tholonious@users.noreply.github.com> --- bls/signature.go | 14 + bls/signature_no_relic.go | 14 + collections/slice.go | 8 + lib/block_view.go | 33 + lib/block_view_flush.go | 15 + lib/block_view_lockups_test.go | 4 +- lib/block_view_test.go | 4 + lib/block_view_types.go | 6 +- lib/block_view_validator.go | 274 ++- lib/block_view_validator_test.go | 52 +- lib/blockchain.go | 184 +- lib/blockchain_test.go | 21 +- lib/db_utils.go | 108 +- lib/db_utils_test.go | 21 + lib/load_test.go | 4 +- lib/miner.go | 2 +- lib/network.go | 18 +- lib/network_test.go | 31 +- lib/pos_block_producer.go | 12 +- lib/pos_block_producer_test.go | 8 +- lib/pos_blockchain.go | 1436 ++++++++++++++ lib/pos_blockchain_test.go | 2733 +++++++++++++++++++++++++++ lib/pos_epoch.go | 28 +- lib/pos_epoch_complete_hook.go | 64 +- lib/pos_epoch_complete_hook_test.go | 104 +- lib/pos_epoch_test.go | 8 + lib/pos_random_seed.go | 43 +- lib/pos_random_seed_test.go | 8 +- lib/pos_snapshot_entries.go | 351 +++- lib/pos_staking_rewards.go | 2 +- lib/server.go | 20 +- lib/txindex.go | 11 +- 32 files changed, 5375 insertions(+), 266 deletions(-) create mode 100644 lib/pos_blockchain.go create mode 100644 lib/pos_blockchain_test.go diff --git a/bls/signature.go b/bls/signature.go index a3fe19282..a86a934f2 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -255,6 +255,16 @@ func (publicKey *PublicKey) IsEmpty() bool { return publicKey == nil || publicKey.flowPublicKey == nil } +type SerializedPublicKey string + +func (publicKey *PublicKey) Serialize() SerializedPublicKey { + return SerializedPublicKey(publicKey.ToString()) +} + +func (serializedPublicKey SerializedPublicKey) Deserialize() (*PublicKey, error) { + return new(PublicKey).FromString(string(serializedPublicKey)) +} + // // TYPES: Signature // @@ -341,6 +351,10 @@ func (signature *Signature) Copy() *Signature { } } +func (signature *Signature) IsEmpty() bool { + return signature == nil || signature.flowSignature == nil +} + func extractFlowPublicKeys(publicKeys []*PublicKey) []flowCrypto.PublicKey { flowPublicKeys := make([]flowCrypto.PublicKey, len(publicKeys)) for i, publicKey := range publicKeys { diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index d264b0449..288714278 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -100,6 +100,16 @@ func (publicKey *PublicKey) IsEmpty() bool { panic(BLSNoRelicError) } +type SerializedPublicKey string + +func (publicKey *PublicKey) Serialize() SerializedPublicKey { + panic(BLSNoRelicError) +} + +func (serializedPublicKey SerializedPublicKey) Deserialize() (*PublicKey, error) { + panic(BLSNoRelicError) +} + // // TYPES: Signature // @@ -137,3 +147,7 @@ func (signature *Signature) Eq(other *Signature) bool { func (signature *Signature) Copy() *Signature { panic(BLSNoRelicError) } + +func (signature *Signature) IsEmpty() bool { + panic(BLSNoRelicError) +} diff --git a/collections/slice.go b/collections/slice.go index 95c552b80..409610352 100644 --- a/collections/slice.go +++ b/collections/slice.go @@ -31,3 +31,11 @@ func ToMap[TKey comparable, TValue any](slice []TValue, keyFn func(TValue) TKey) } return result } + +func Reverse[T any](input []T) []T { + output := make([]T, len(input)) + for ii := 0; ii < len(input); ii++ { + output[len(input)-1-ii] = input[ii] + } + return output +} diff --git a/lib/block_view.go b/lib/block_view.go index 3836c3dfe..ac006dbdc 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" + "github.com/deso-protocol/core/bls" "math" "math/big" "reflect" @@ -117,6 +118,9 @@ type UtxoView struct { // Validator mappings ValidatorPKIDToValidatorEntry map[PKID]*ValidatorEntry + // ValidatorBLSPublicKeyPKIDPairEntries is a mapping of BLS Public Key to BLSPublicKeyPKIDPairEntry. + // Used for enforcing uniqueness of BLS Public Keys in the validator set. + ValidatorBLSPublicKeyPKIDPairEntries map[bls.SerializedPublicKey]*BLSPublicKeyPKIDPairEntry // Stake mappings StakeMapKeyToStakeEntry map[StakeMapKey]*StakeEntry @@ -145,6 +149,11 @@ type UtxoView struct { // the given SnapshotAtEpochNumber. SnapshotValidatorSet map[SnapshotValidatorSetMapKey]*ValidatorEntry + // SnapshotValidatorBLSPublicKeyPKIDPairEntries is a map of + // to a BLSPublicKeyPKIDPairEntry. It contains the snapshot value of the BLSPublicKeyPKIDPairEntry + // of every validator that makes up the validator set at the given SnapshotAtEpochNumber. + SnapshotValidatorBLSPublicKeyPKIDPairEntries map[SnapshotValidatorBLSPublicKeyMapKey]*BLSPublicKeyPKIDPairEntry + // SnapshotValidatorSetTotalStakeAmountNanos is a map of SnapshotAtEpochNumber to the sum TotalStakeAmountNanos // for the validator set of for an epoch. // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given SnapshotAtEpochNumber. @@ -269,6 +278,8 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // ValidatorEntries bav.ValidatorPKIDToValidatorEntry = make(map[PKID]*ValidatorEntry) + // Validator BLS PublicKey to PKID + bav.ValidatorBLSPublicKeyPKIDPairEntries = make(map[bls.SerializedPublicKey]*BLSPublicKeyPKIDPairEntry) // StakeEntries bav.StakeMapKeyToStakeEntry = make(map[StakeMapKey]*StakeEntry) @@ -285,6 +296,9 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // SnapshotValidatorSet bav.SnapshotValidatorSet = make(map[SnapshotValidatorSetMapKey]*ValidatorEntry) + // SnapshotValidatorBLSPublicKeyPKIDPairEntries + bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries = make(map[SnapshotValidatorBLSPublicKeyMapKey]*BLSPublicKeyPKIDPairEntry) + // SnapshotValidatorSetTotalStakeAmountNanos bav.SnapshotValidatorSetTotalStakeAmountNanos = make(map[uint64]*uint256.Int) @@ -572,6 +586,12 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.ValidatorPKIDToValidatorEntry[entryKey] = entry.Copy() } + // Copy the validator BLS PublicKey to PKID map + newView.ValidatorBLSPublicKeyPKIDPairEntries = make(map[bls.SerializedPublicKey]*BLSPublicKeyPKIDPairEntry, len(bav.ValidatorBLSPublicKeyPKIDPairEntries)) + for entryKey, entry := range bav.ValidatorBLSPublicKeyPKIDPairEntries { + newView.ValidatorBLSPublicKeyPKIDPairEntries[entryKey] = entry.Copy() + } + // Copy the StakeEntries newView.StakeMapKeyToStakeEntry = make(map[StakeMapKey]*StakeEntry, len(bav.StakeMapKeyToStakeEntry)) for entryKey, entry := range bav.StakeMapKeyToStakeEntry { @@ -606,6 +626,10 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.SnapshotValidatorSet[mapKey] = validatorEntry.Copy() } + for mapKey, blsPublicKeyPKIDPairEntry := range bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries { + newView.SnapshotValidatorBLSPublicKeyPKIDPairEntries[mapKey] = blsPublicKeyPKIDPairEntry.Copy() + } + // Copy the SnapshotValidatorSetTotalStakeAmountNanos for epochNumber, totalStakeAmountNanos := range bav.SnapshotValidatorSetTotalStakeAmountNanos { newView.SnapshotValidatorSetTotalStakeAmountNanos[epochNumber] = totalStakeAmountNanos.Clone() @@ -4537,6 +4561,15 @@ func (bav *UtxoView) GetUnspentUtxoEntrysForPublicKey(pkBytes []byte) ([]*UtxoEn // but should be fixed soon. func (bav *UtxoView) GetSpendableDeSoBalanceNanosForPublicKey(pkBytes []byte, tipHeight uint32) (_spendableBalance uint64, _err error) { + // After the cut-over to Proof Of Stake, we no longer check for immature block rewards. + // All block rewards are immediately mature. + if tipHeight >= bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + balanceNanos, err := bav.GetDeSoBalanceNanosForPublicKey(pkBytes) + if err != nil { + return 0, errors.Wrap(err, "GetSpendableDeSoBalanceNanosForPublicKey: ") + } + return balanceNanos, nil + } // In order to get the spendable balance, we need to account for any immature block rewards. // We get these by starting at the chain tip and iterating backwards until we have collected // all the immature block rewards for this public key. diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index deef15fbb..bffe54a4c 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -50,6 +50,15 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error defer bav.Snapshot.StartAncestralRecordsFlush(true) } + return bav.FlushToDBWithoutAncestralRecordsFlushWithTxn(txn, blockHeight) +} + +// FlushToDBWithoutAncestralRecordsFlushWithTxn flushes the UtxoView to the DB without +// calling PrepareAncestralRecordsFlush. This SHOULD ONLY be used when flushing the +// view within a badger transaction that itself calls PrepareAncestralRecordsFlush +// and defer StartAncestralRecordsFlush. +func (bav *UtxoView) FlushToDBWithoutAncestralRecordsFlushWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Only flush to BadgerDB if Postgres is disabled if bav.Postgres == nil { if err := bav._flushUtxosToDbWithTxn(txn, blockHeight); err != nil { @@ -150,6 +159,9 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushValidatorEntriesToDbWithTxn(txn, blockHeight); err != nil { return err } + if err := bav._flushValidatorBLSPublicKeyPKIDPairEntryMappingsWithTxn(txn, blockHeight); err != nil { + return err + } if err := bav._flushStakeEntriesToDbWithTxn(txn, blockHeight); err != nil { return err } @@ -180,6 +192,9 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error if err := bav._flushSnapshotStakesToRewardToDbWithTxn(txn, blockHeight); err != nil { return err } + if err := bav._flushSnapshotValidatorBLSPublicKeyPKIDPairEntryToDbWithTxn(txn, blockHeight); err != nil { + return err + } return nil } diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 5cd6f3674..96d9f72d3 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -2763,7 +2763,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { // Process the first block err = testMeta.miner.BlockProducer.SignBlock(blk1) require.NoError(t, err) - _, _, err = testMeta.chain.ProcessBlock(blk1, false) + _, _, _, err = testMeta.chain.ProcessBlock(blk1, false) require.NoError(t, err) // Validate state update @@ -2820,7 +2820,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { // Process the second block err = testMeta.miner.BlockProducer.SignBlock(blk2) require.NoError(t, err) - _, _, err = testMeta.chain.ProcessBlock(blk2, false) + _, _, _, err = testMeta.chain.ProcessBlock(blk2, false) require.NoError(t, err) // Validate state update diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 5e1cadd8b..32881709a 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" + "github.com/deso-protocol/core/bls" "math" _ "net/http/pprof" "reflect" @@ -1128,6 +1129,9 @@ type TestMeta struct { expectedSenderBalances []uint64 savedHeight uint32 feeRateNanosPerKb uint64 + posMempool *PosMempool + posBlockProducer *PosBlockProducer + pubKeyToBLSKeyMap map[string]*bls.PrivateKey } func _executeAllTestRollbackAndFlush(testMeta *TestMeta) { diff --git a/lib/block_view_types.go b/lib/block_view_types.go index ef8086af8..8089ca613 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -127,9 +127,11 @@ const ( EncoderTypeBlock EncoderType = 49 // EncoderTypeTxn represents a transaction in the blockchain. EncoderTypeTxn EncoderType = 50 + // EncoderTypeBLSPublicKeyPKIDPairEntry represents a BLS public key <> PKID mapping. + EncoderTypeBLSPublicKeyPKIDPairEntry EncoderType = 51 // EncoderTypeEndBlockView encoder type should be at the end and is used for automated tests. - EncoderTypeEndBlockView EncoderType = 51 + EncoderTypeEndBlockView EncoderType = 52 ) // Txindex encoder types. @@ -281,6 +283,8 @@ func (encoderType EncoderType) New() DeSoEncoder { return &LockedBalanceEntry{} case EncoderTypeLockupYieldCurvePoint: return &LockupYieldCurvePoint{} + case EncoderTypeBLSPublicKeyPKIDPairEntry: + return &BLSPublicKeyPKIDPairEntry{} } // Txindex encoder types diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 87ca7c04a..9f33f0810 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/sha256" "fmt" + "github.com/deso-protocol/core/consensus" "io" "math" "net/url" @@ -84,6 +85,14 @@ type ValidatorEntry struct { isDeleted bool } +func (validatorEntry *ValidatorEntry) GetPublicKey() *bls.PublicKey { + return validatorEntry.VotingPublicKey +} + +func (validatorEntry *ValidatorEntry) GetStakeAmount() *uint256.Int { + return validatorEntry.TotalStakeAmountNanos +} + func (validatorEntry *ValidatorEntry) Status() ValidatorStatus { // ValidatorEntry.Status() is a virtual/derived field that is not stored in // the database, but instead constructed from other ValidatorEntry fields. @@ -99,6 +108,14 @@ func (validatorEntry *ValidatorEntry) Status() ValidatorStatus { return ValidatorStatusActive } +func toConsensusValidators(validatorEntries []*ValidatorEntry) []consensus.Validator { + var consensusValidators []consensus.Validator + for _, validatorEntry := range validatorEntries { + consensusValidators = append(consensusValidators, validatorEntry) + } + return consensusValidators +} + type ValidatorStatus uint8 const ( @@ -243,6 +260,71 @@ func (validatorEntry *ValidatorEntry) GetEncoderType() EncoderType { return EncoderTypeValidatorEntry } +func (validatorEntry *ValidatorEntry) ToBLSPublicKeyPKIDPairEntry() *BLSPublicKeyPKIDPairEntry { + return &BLSPublicKeyPKIDPairEntry{ + BLSPublicKey: validatorEntry.VotingPublicKey.Copy(), + PKID: validatorEntry.ValidatorPKID.NewPKID(), + isDeleted: validatorEntry.isDeleted, + } +} + +// +// TYPES: BLSPublicKeyPKIDPairEntry +// + +type BLSPublicKeyPKIDPairEntry struct { + BLSPublicKey *bls.PublicKey + PKID *PKID + isDeleted bool +} + +func (blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry) Copy() *BLSPublicKeyPKIDPairEntry { + return &BLSPublicKeyPKIDPairEntry{ + BLSPublicKey: blsPublicKeyPKIDPairEntry.BLSPublicKey.Copy(), + PKID: blsPublicKeyPKIDPairEntry.PKID.NewPKID(), + isDeleted: blsPublicKeyPKIDPairEntry.isDeleted, + } +} + +func (blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry) ToMapKey() bls.SerializedPublicKey { + return blsPublicKeyPKIDPairEntry.BLSPublicKey.Serialize() +} + +func (blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry) ToSnapshotMapKey(snapshotAtEpoch uint64) SnapshotValidatorBLSPublicKeyMapKey { + return SnapshotValidatorBLSPublicKeyMapKey{ + SnapshotAtEpochNumber: snapshotAtEpoch, + ValidatorBLSPublicKey: blsPublicKeyPKIDPairEntry.BLSPublicKey.Serialize(), + } +} + +func (blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeBLSPublicKey(blsPublicKeyPKIDPairEntry.BLSPublicKey)...) + data = append(data, EncodeToBytes(blockHeight, blsPublicKeyPKIDPairEntry.PKID, skipMetadata...)...) + return data +} + +func (blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + blsPublicKeyPKIDPairEntry.BLSPublicKey, err = DecodeBLSPublicKey(rr) + if err != nil { + return errors.Wrapf(err, "BLSPublicKeyPKIDPairEntry.Decode: Problem reading BLSPublicKey: ") + } + blsPublicKeyPKIDPairEntry.PKID, err = DecodeDeSoEncoder(&PKID{}, rr) + if err != nil { + return errors.Wrapf(err, "BLSPublicKeyPKIDPairEntry.Decode: Problem reading PKID: ") + } + return nil +} + +func (blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry) GetEncoderType() EncoderType { + return EncoderTypeBLSPublicKeyPKIDPairEntry +} + // // TYPES: RegisterAsValidatorMetadata // @@ -575,6 +657,10 @@ func (txindexMetadata *UnjailValidatorTxindexMetadata) GetEncoderType() EncoderT // DB UTILS // +// +// ValidatorEntry DB UTILS +// + func DBKeyForValidatorByPKID(validatorEntry *ValidatorEntry) []byte { key := append([]byte{}, Prefixes.PrefixValidatorByPKID...) key = append(key, validatorEntry.ValidatorPKID.ToBytes()...) @@ -759,6 +845,86 @@ func DBDeleteValidatorWithTxn(txn *badger.Txn, snap *Snapshot, validatorPKID *PK return nil } +// +// BLSPublicKeyPKIDPairEntry DB Utils +// + +func DBKeyForValidatorBLSPublicKeyToPKIDPairEntry(blsPublicKey *bls.PublicKey) []byte { + key := append([]byte{}, Prefixes.PrefixValidatorBLSPublicKeyPKIDPairEntry...) + key = append(key, blsPublicKey.ToBytes()...) + return key +} + +func DBPutValidatorBLSPublicKeyPKIDPairEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + validatorBLSPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry, + blockHeight uint64, + eventManager *EventManager, +) error { + if validatorBLSPublicKeyPKIDPairEntry == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBPutValidatorBLSPublicKeyPKIDPairEntryWithTxn: called with nil BLSPublicKeyPKIDPairEntry") + return nil + } + + key := DBKeyForValidatorBLSPublicKeyToPKIDPairEntry(validatorBLSPublicKeyPKIDPairEntry.BLSPublicKey) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorBLSPublicKeyPKIDPairEntry), eventManager); err != nil { + return errors.Wrapf( + err, "DBPutValidatorBLSPublicKeyPKIDPairEntryWithTxn: problem storing BLSPublicKeyPKIDPairEntry in index PrefixValidatorBLSPublicKeyPKIDPairEntry", + ) + } + + return nil +} + +func DBDeleteBLSPublicKeyPKIDPairEntryWithTxn(txn *badger.Txn, snap *Snapshot, blsPublicKey *bls.PublicKey, eventManager *EventManager, entryIsDeleted bool) error { + if blsPublicKey == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBDeleteBLSPublicKeyPKIDPairEntryWithTxn: called with nil blsPublicKey") + return nil + } + + key := DBKeyForValidatorBLSPublicKeyToPKIDPairEntry(blsPublicKey) + if err := DBDeleteWithTxn(txn, snap, key, eventManager, entryIsDeleted); err != nil { + return errors.Wrapf( + err, "DBDeleteBLSPublicKeyPKIDPairEntryWithTxn: problem deleting BLSPublicKeyPKIDPairEntry from index PrefixValidatorBLSPublicKeyPKIDPairEntry", + ) + } + return nil +} + +func DBGetValidatorBLSPublicKeyPKIDPairEntry(handle *badger.DB, snap *Snapshot, blsPublicKey *bls.PublicKey) (*BLSPublicKeyPKIDPairEntry, error) { + var ret *BLSPublicKeyPKIDPairEntry + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetValidatorBLSPublicKeyPKIDPairEntryWithTxn(txn, snap, blsPublicKey) + return innerErr + }) + return ret, err +} + +func DBGetValidatorBLSPublicKeyPKIDPairEntryWithTxn(txn *badger.Txn, snap *Snapshot, blsPublicKey *bls.PublicKey) (*BLSPublicKeyPKIDPairEntry, error) { + // Retrieve ValidatorEntry from db. + key := DBKeyForValidatorBLSPublicKeyToPKIDPairEntry(blsPublicKey) + validatorBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return nil. + if errors.Is(err, badger.ErrKeyNotFound) { + return nil, nil + } + return nil, errors.Wrapf(err, "DBGetValidatorBLSPublicKeyPKIDPairEntryWithTxn: problem retrieving BLSPublicKeyPKIDPairEntry") + } + + // Decode ValidatorEntry from bytes. + blsPublicKeyPKIDPairEntry := &BLSPublicKeyPKIDPairEntry{} + rr := bytes.NewReader(validatorBytes) + if exist, err := DecodeFromBytes(blsPublicKeyPKIDPairEntry, rr); !exist || err != nil { + return nil, errors.Wrapf(err, "DBGetValidatorBLSPublicKeyPKIDPairEntryWithTxn: problem decoding BLSPublicKeyPKIDPairEntry") + } + return blsPublicKeyPKIDPairEntry, nil +} + // // BLOCKCHAIN UTILS // @@ -1055,7 +1221,8 @@ func (bav *UtxoView) _connectRegisterAsValidator( } // Delete the existing ValidatorEntry, if exists. There will be an existing ValidatorEntry // if the transactor is updating their ValidatorEntry. There will not be one if the transactor - // is registering a ValidatorEntry for the first time (or it was previously unregistered). + // is registering a ValidatorEntry for the first time (or it was previously unregistered). If + // we have a prevValidatorEntry, we know there must be a prevBLSPublicKeyPKIDPairEntry as well. // Note that we don't need to check isDeleted because the Get returns nil if isDeleted=true. if prevValidatorEntry != nil { bav._deleteValidatorEntryMappings(prevValidatorEntry) @@ -1635,6 +1802,16 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( } } + // Error if VotingPublicKey is already taken. + validatorBLSPublicKeyPKIDPairEntry, err := bav.GetBLSPublicKeyPKIDPairEntry(metadata.VotingPublicKey) + if validatorBLSPublicKeyPKIDPairEntry != nil { + // If there is already a BLS PKID Entry for the provided VotingPublicKey, + // we want to make sure that it has the same PKID as the same as the TransactorPKIDEntry. + if !validatorBLSPublicKeyPKIDPairEntry.PKID.Eq(transactorPKIDEntry.PKID) { + return errors.Wrap( + RuleErrorVotingPublicKeyDuplicate, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + } + } return nil } @@ -1654,6 +1831,15 @@ func (bav *UtxoView) IsValidUnregisterAsValidatorMetadata(transactorPublicKey [] return errors.Wrapf(RuleErrorValidatorNotFound, "UtxoView.IsValidUnregisterAsValidatorMetadata: ") } + // Validate BLSPublicKeyPKIDPairEntry exists + blsPublicKeyPKIDPairEntry, err := bav.GetBLSPublicKeyPKIDPairEntry(validatorEntry.VotingPublicKey) + if err != nil { + return errors.Wrap(err, "UtxoView.IsValidUnregisterAsValidatorMetadata: ") + } + if blsPublicKeyPKIDPairEntry == nil { + return errors.Wrap(RuleErrorValidatorBLSPublicKeyPKIDPairEntryNotFound, "UtxoView.IsValidUnregisterAsValidatorMetadata: ") + } + return nil } @@ -1800,6 +1986,31 @@ func (bav *UtxoView) GetValidatorByPublicKey(validatorPublicKey *PublicKey) (*Va return validatorEntry, nil } +func (bav *UtxoView) GetBLSPublicKeyPKIDPairEntry(blsPublicKey *bls.PublicKey) (*BLSPublicKeyPKIDPairEntry, error) { + blsPublicKeyPKIDPairEntry, exists := bav.ValidatorBLSPublicKeyPKIDPairEntries[blsPublicKey.Serialize()] + if exists { + if blsPublicKeyPKIDPairEntry.isDeleted { + return nil, nil + } + // If we get to this point, we found a matching + // !isDeleted BLSPublicKeyPKIDPairEntry for the given blsPublicKey. + return blsPublicKeyPKIDPairEntry, nil + } + // At this point, we know there was no matching BLSPublicKeyPKIDPairEntry in the view. + + // If no BLSPublicKeyPKIDPairEntry (either isDeleted or !isDeleted) was found + // in the UtxoView for the given blsPublicKey, check the database. + dbBLSPublicKeyPKIDPairEntry, err := DBGetValidatorBLSPublicKeyPKIDPairEntry(bav.Handle, bav.Snapshot, blsPublicKey) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GetBLSPublicKeyPKIDPairEntry: ") + } + if dbBLSPublicKeyPKIDPairEntry != nil { + // Cache the BLSPublicKeyPKIDPairEntry from the db in the UtxoView. + bav._setValidatorBLSPublicKeyPKIDPairEntryMappings(dbBLSPublicKeyPKIDPairEntry) + } + return dbBLSPublicKeyPKIDPairEntry, nil +} + func (bav *UtxoView) GetTopActiveValidatorsByStakeAmount(limit uint64) ([]*ValidatorEntry, error) { // Validate limit param. if limit == uint64(0) { @@ -1865,10 +2076,11 @@ func (bav *UtxoView) JailAllInactiveSnapshotValidators(blockHeight uint64) error return nil } + // TODO: We can replace this with a call to GetAllSnapshotValidatorSetEntriesByStake // Fetch the ValidatorSetMaxNumValidators from the snapshot global params. We use the snapshot global // params here because the value used to snapshot the size of the validator set was snapshotted along // with the validator set. - snapshotGlobalParams, err := bav.GetSnapshotGlobalParamsEntry() + snapshotGlobalParams, err := bav.GetCurrentSnapshotGlobalParamsEntry() if err != nil { return errors.Wrapf(err, "UtxoView.JailAllInactiveSnapshotValidators: error retrieving SnapshotGlobalParamsEntry: ") } @@ -1993,6 +2205,10 @@ func (bav *UtxoView) _setValidatorEntryMappings(validatorEntry *ValidatorEntry) return } bav.ValidatorPKIDToValidatorEntry[*validatorEntry.ValidatorPKID] = validatorEntry + + // We always construct the BLSPublicKeyPKIDPairEntry from the ValidatorEntry. This is to + // ensure that the two always line up. + bav._setValidatorBLSPublicKeyPKIDPairEntryMappings(validatorEntry.ToBLSPublicKeyPKIDPairEntry()) } func (bav *UtxoView) _deleteValidatorEntryMappings(validatorEntry *ValidatorEntry) { @@ -2050,6 +2266,58 @@ func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHei return nil } +// Note that we only explicitly call this _set function when we are caching a BLSPublicKeyPKIDPairEntry +// that we fetched from badger. We should never call this explicitly in transaction connection +// logic as _setValidatorEntryMappings handles this for us. +func (bav *UtxoView) _setValidatorBLSPublicKeyPKIDPairEntryMappings(blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry) { + if blsPublicKeyPKIDPairEntry == nil { + glog.Errorf("_setValidatorBLSPublicKeyPKIDPairEntryMappings: called with nil entry, this should never happen") + return + } + bav.ValidatorBLSPublicKeyPKIDPairEntries[blsPublicKeyPKIDPairEntry.ToMapKey()] = blsPublicKeyPKIDPairEntry +} + +func (bav *UtxoView) _flushValidatorBLSPublicKeyPKIDPairEntryMappingsWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Delete all entries in the ValidatorBLSPublicKeyPKIDPairEntries UtxoView map. + for blsPublicKeyIter, blsPublicKeyPKIDPairEntryIter := range bav.ValidatorBLSPublicKeyPKIDPairEntries { + // Make a copy of the iterators since we make references to them below. + blsPublicKey := blsPublicKeyIter + blsPublicKeyPKIDPairEntry := *blsPublicKeyPKIDPairEntryIter + + // Sanity-check that the entry matches the map key. + blsPublicKeyInEntry := blsPublicKeyPKIDPairEntry.BLSPublicKey.Serialize() + if blsPublicKeyInEntry != blsPublicKey { + return fmt.Errorf( + "_flushValidatorBLSPublicKeyPKIDPairEntryMappingsWithTxn: BLSPublicKeyPKIDPairEntry key %v doesn't match MapKey %v", + &blsPublicKeyInEntry, + &blsPublicKey, + ) + } + + // Delete the existing mappings in the db for this BLSPublicKey. They + // will be re-added if the corresponding entry in memory has isDeleted=false. + if err := DBDeleteBLSPublicKeyPKIDPairEntryWithTxn(txn, bav.Snapshot, blsPublicKeyPKIDPairEntry.BLSPublicKey, bav.EventManager, blsPublicKeyPKIDPairEntry.isDeleted); err != nil { + return errors.Wrap(err, "_flushValidatorBLSPublicKeyPKIDPairEntryMappingsWithTxn: ") + } + } + + // Set any !isDeleted BLSPublicKeyPKIDPairEntries in the ValidatorBLSPublicKeyPKIDPairEntries UtxoView map. + for _, blsPublicKeyPKIDPairEntryIter := range bav.ValidatorBLSPublicKeyPKIDPairEntries { + blsPublicKeyPKIDPairEntry := *blsPublicKeyPKIDPairEntryIter + if blsPublicKeyPKIDPairEntry.isDeleted { + // If BLSPublicKeyPKIDPairEntry.isDeleted then there's nothing to + // do because we already deleted the entry above. + } else { + // If !BLSPublicKeyPKIDPairEntry.isDeleted then we put the + // corresponding mappings for it into the db. + if err := DBPutValidatorBLSPublicKeyPKIDPairEntryWithTxn(txn, bav.Snapshot, &blsPublicKeyPKIDPairEntry, blockHeight, bav.EventManager); err != nil { + return errors.Wrap(err, "_flushValidatorBLSPublicKeyPKIDPairEntryMappingsWithTxn: ") + } + } + } + return nil +} + // // MEMPOOL UTILS // @@ -2273,10 +2541,12 @@ const RuleErrorValidatorInvalidDomain RuleError = "RuleErrorValidatorInvalidDoma const RuleErrorValidatorDuplicateDomains RuleError = "RuleErrorValidatorDuplicateDomains" const RuleErrorValidatorInvalidCommissionBasisPoints RuleError = "RuleErrorValidatorInvalidCommissionBasisPoints" const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" +const RuleErrorValidatorBLSPublicKeyPKIDPairEntryNotFound RuleError = "RuleErrorValidatorBLSPublicKeyPKIDPairEntryNotFound" const RuleErrorValidatorMissingVotingPublicKey RuleError = "RuleErrorValidatorMissingVotingPublicKey" const RuleErrorValidatorMissingVotingAuthorization RuleError = "RuleErrorValidatorMissingVotingAuthorization" const RuleErrorValidatorInvalidVotingAuthorization RuleError = "RuleErrorValidatorInvalidVotingAuthorization" const RuleErrorValidatorDisablingExistingDelegatedStakers RuleError = "RuleErrorValidatorDisablingExistingDelegatedStakers" +const RuleErrorVotingPublicKeyDuplicate RuleError = "RuleErrorVotingPublicKeyDuplicate" const RuleErrorUnjailingNonjailedValidator RuleError = "RuleErrorUnjailingNonjailedValidator" const RuleErrorUnjailingValidatorTooEarly RuleError = "RuleErrorUnjailingValidatorTooEarly" diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 2dae83ea3..35c48d203 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -30,6 +30,9 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // Local variables var registerMetadata *RegisterAsValidatorMetadata var validatorEntry *ValidatorEntry + var blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry + var prevBLSPublicKey *bls.PublicKey + var prevBLSPrivateKey *bls.PrivateKey var validatorEntries []*ValidatorEntry var err error @@ -226,6 +229,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { extraData := map[string][]byte{"TestKey": []byte("TestValue1")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) require.NoError(t, err) + prevBLSPublicKey = votingPublicKey.Copy() } { // Query: retrieve ValidatorEntry by PKID @@ -236,6 +240,12 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Equal(t, string(validatorEntry.Domains[0]), "https://example.com") require.False(t, validatorEntry.DisableDelegatedStake) require.Equal(t, string(validatorEntry.ExtraData["TestKey"]), "TestValue1") + + // Query: retrieve the BLSPublicKeyPKIDPairEntry for the validator's VotingPublicKey + blsPublicKeyPKIDPairEntry, err = utxoView().GetBLSPublicKeyPKIDPairEntry(validatorEntry.VotingPublicKey) + require.NoError(t, err) + require.True(t, blsPublicKeyPKIDPairEntry.BLSPublicKey.Eq(validatorEntry.VotingPublicKey)) + require.True(t, blsPublicKeyPKIDPairEntry.PKID.Eq(validatorEntry.ValidatorPKID)) } { // Query: retrieve top active ValidatorEntries by stake. @@ -246,7 +256,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { } { // Happy path: update a validator - votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) + votingPrivateKey, votingPublicKey, votingAuthorization := _generateVotingPrivateKeyPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, DisableDelegatedStake: false, @@ -256,6 +266,8 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { extraData := map[string][]byte{"TestKey": []byte("TestValue2")} _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, extraData, flushToDB) require.NoError(t, err) + prevBLSPublicKey = votingPublicKey.Copy() + prevBLSPrivateKey = votingPrivateKey } { // Query: retrieve ValidatorEntry by PKID, make sure it has been updated @@ -267,6 +279,28 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Equal(t, string(validatorEntry.Domains[1]), "https://example2.com") require.False(t, validatorEntry.DisableDelegatedStake) require.Equal(t, string(validatorEntry.ExtraData["TestKey"]), "TestValue2") + + // Query: retrieve the BLSPublicKeyPKIDPairEntry for the validator's VotingPublicKey + // make sure it has been updated. + blsPublicKeyPKIDPairEntry, err = utxoView().GetBLSPublicKeyPKIDPairEntry(validatorEntry.VotingPublicKey) + require.NoError(t, err) + require.True(t, blsPublicKeyPKIDPairEntry.BLSPublicKey.Eq(validatorEntry.VotingPublicKey)) + require.True(t, blsPublicKeyPKIDPairEntry.PKID.Eq(validatorEntry.ValidatorPKID)) + } + { + // Sad path: register validator with same VotingPublicKey as m0 + var votingAuthorization *bls.Signature + votingAuthorization, err = prevBLSPrivateKey.Sign(CreateValidatorVotingAuthorizationPayload(m1PkBytes)) + require.NoError(t, err) + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://example.com")}, + DisableDelegatedStake: false, + VotingPublicKey: prevBLSPublicKey, + VotingAuthorization: votingAuthorization, + } + _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerMetadata, nil, flushToDB) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorVotingPublicKeyDuplicate) } { // Sad path: unregister validator that doesn't exist @@ -291,6 +325,12 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Nil(t, validatorEntry) } + { + // Query: retrieve the BLSPublicKeyPKIDPairEntry for the validator's VotingPublicKey + blsPublicKeyPKIDPairEntry, err = utxoView().GetBLSPublicKeyPKIDPairEntry(prevBLSPublicKey) + require.NoError(t, err) + require.Nil(t, blsPublicKeyPKIDPairEntry) + } { // Query: retrieve top active ValidatorEntries by stake validatorEntries, err = utxoView().GetTopActiveValidatorsByStakeAmount(1) @@ -1983,3 +2023,13 @@ func _generateVotingPublicKeyAndAuthorization(t *testing.T, transactorPkBytes [] require.NoError(t, err) return votingPublicKey, votingAuthorization } + +func _generateVotingPrivateKeyPublicKeyAndAuthorization(t *testing.T, transactorPkBytes []byte) (*bls.PrivateKey, *bls.PublicKey, *bls.Signature) { + blsPrivateKey, err := bls.NewPrivateKey() + require.NoError(t, err) + votingPublicKey := blsPrivateKey.PublicKey() + votingAuthorizationPayload := CreateValidatorVotingAuthorizationPayload(transactorPkBytes) + votingAuthorization, err := blsPrivateKey.Sign(votingAuthorizationPayload) + require.NoError(t, err) + return blsPrivateKey, votingPublicKey, votingAuthorization +} diff --git a/lib/blockchain.go b/lib/blockchain.go index c5fb3ee41..d7052c0f7 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5,6 +5,7 @@ import ( "container/list" "encoding/hex" "fmt" + "github.com/deso-protocol/core/collections" "math" "math/big" "reflect" @@ -65,18 +66,43 @@ const ( // don't store orphan headers and therefore any header that we do // have in our node index will be known definitively to be valid or // invalid one way or the other. - StatusHeaderValidated = 1 << iota - StatusHeaderValidateFailed + StatusHeaderValidated = 1 << 0 + StatusHeaderValidateFailed = 1 << 1 - StatusBlockProcessed - StatusBlockStored - StatusBlockValidated - StatusBlockValidateFailed + StatusBlockProcessed = 1 << 2 // Process means that the block is not an orphan and has been processed. This helps prevent us from reprocessing a block that we've already attempted to validate and add to the block index. + StatusBlockStored = 1 << 3 // Stored means that the block has been added to the block index and stored in the DB. + StatusBlockValidated = 1 << 4 // Validated means that the block has passed validations and is eligible to be part of the best chain. + StatusBlockValidateFailed = 1 << 5 // Validate Failed means that the block did not pass validations and will never be part of the best chain. - StatusBitcoinHeaderValidated // Deprecated - StatusBitcoinHeaderValidateFailed // Deprecated + StatusBitcoinHeaderValidated = 1 << 6 // Deprecated + StatusBitcoinHeaderValidateFailed = 1 << 7 // Deprecated + + StatusBlockCommitted = 1 << 8 // Committed means that the block has been committed to the blockchain according to the Fast HotStuff commit rule. Only set on blocks after the cutover for PoS ) +// IsStored returns true if the BlockNode has been added to the blockIndexByHash and stored in the DB. +func (nn *BlockNode) IsStored() bool { + return nn.Status&StatusBlockStored != 0 +} + +// IsValidated returns true if a BlockNode has passed all validations. A BlockNode that is validated is +// generally always stored first. +func (nn *BlockNode) IsValidated() bool { + return nn.Status&StatusBlockValidated != 0 +} + +// IsValidateFailed returns true if a BlockNode has failed validations. A BlockNode that is validate failed +// will never be added to the best chain. +func (nn *BlockNode) IsValidateFailed() bool { + return nn.Status&StatusBlockValidateFailed != 0 +} + +// IsCommitted returns true if a BlockNode has passed all validations, and it has been committed to +// the Blockchain according to the Fast HotStuff commit rule. +func (nn *BlockNode) IsCommitted() bool { + return nn.Status&StatusBlockCommitted != 0 || !blockNodeProofOfStakeCutoverMigrationTriggered(nn.Height) +} + // IsFullyProcessed determines if the BlockStatus corresponds to a fully processed and stored block. func (blockStatus BlockStatus) IsFullyProcessed() bool { return blockStatus&StatusHeaderValidated != 0 && @@ -287,6 +313,9 @@ func (nn *BlockNode) String() string { tstamp, nn.Header.Height, nn.Hash, parentHash, nn.Status, nn.CumWork) } +// NewBlockNode is a helper function to create a BlockNode +// when running PoW consensus. All blocks in the PoW consensus +// have a committed status of COMMITTED. // TODO: Height not needed in this since it's in the header. func NewBlockNode( parent *BlockNode, @@ -432,8 +461,11 @@ type Blockchain struct { // These should only be accessed after acquiring the ChainLock. // // An in-memory index of the "tree" of blocks we are currently aware of. - // This index includes forks and side-chains but does not include unconnectedTxns. - blockIndex map[BlockHash]*BlockNode + // This index includes forks and side-chains. + blockIndexByHash map[BlockHash]*BlockNode + // blockIndexByHeight is an in-memory map of block height to block nodes. This is + // used to quickly find the safe blocks from which the chain can be extended for PoS + blockIndexByHeight map[uint64]map[BlockHash]*BlockNode // An in-memory slice of the blocks on the main chain only. The end of // this slice is the best known tip that we have at any given time. bestChain []*BlockNode @@ -461,14 +493,52 @@ type Blockchain struct { timer *Timer } -func (bc *Blockchain) CopyBlockIndex() map[BlockHash]*BlockNode { - newBlockIndex := make(map[BlockHash]*BlockNode) - for kk, vv := range bc.blockIndex { - newBlockIndex[kk] = vv +func (bc *Blockchain) addNewBlockNodeToBlockIndex(blockNode *BlockNode) { + bc.blockIndexByHash[*blockNode.Hash] = blockNode + if _, exists := bc.blockIndexByHeight[uint64(blockNode.Height)]; !exists { + bc.blockIndexByHeight[uint64(blockNode.Height)] = make(map[BlockHash]*BlockNode) + } + bc.blockIndexByHeight[uint64(blockNode.Height)][*blockNode.Hash] = blockNode +} + +func (bc *Blockchain) CopyBlockIndexes() (_blockIndexByHash map[BlockHash]*BlockNode, _blockIndexByHeight map[uint64]map[BlockHash]*BlockNode) { + newBlockIndexByHash := make(map[BlockHash]*BlockNode) + newBlockIndexByHeight := make(map[uint64]map[BlockHash]*BlockNode) + for kk, vv := range bc.blockIndexByHash { + newBlockIndexByHash[kk] = vv + blockHeight := uint64(vv.Height) + if _, exists := newBlockIndexByHeight[blockHeight]; !exists { + newBlockIndexByHeight[blockHeight] = make(map[BlockHash]*BlockNode) + } + newBlockIndexByHeight[blockHeight][kk] = vv + } + return newBlockIndexByHash, newBlockIndexByHeight +} + +func (bc *Blockchain) ConstructBlockIndexByHeight() map[uint64]map[BlockHash]*BlockNode { + newBlockIndex := make(map[uint64]map[BlockHash]*BlockNode) + for _, blockNode := range bc.blockIndexByHash { + blockHeight := uint64(blockNode.Height) + if _, exists := newBlockIndex[blockHeight]; !exists { + newBlockIndex[blockHeight] = make(map[BlockHash]*BlockNode) + } + newBlockIndex[blockHeight][*blockNode.Hash] = blockNode } return newBlockIndex } +func (bc *Blockchain) getAllBlockNodesIndexedAtHeight(blockHeight uint64) []*BlockNode { + return collections.MapValues(bc.blockIndexByHeight[blockHeight]) +} + +func (bc *Blockchain) hasBlockNodesIndexedAtHeight(blockHeight uint64) bool { + blocksAtHeight, hasNestedMapAtHeight := bc.blockIndexByHeight[blockHeight] + if !hasNestedMapAtHeight { + return false + } + return len(blocksAtHeight) > 0 +} + func (bc *Blockchain) CopyBestChain() ([]*BlockNode, map[BlockHash]*BlockNode) { newBestChain := []*BlockNode{} newBestChainMap := make(map[BlockHash]*BlockNode) @@ -558,26 +628,27 @@ func (bc *Blockchain) _initChain() error { // add a block's parents, if they exist, before adding the block itself. var err error if bc.postgres != nil { - bc.blockIndex, err = bc.postgres.GetBlockIndex() + bc.blockIndexByHash, err = bc.postgres.GetBlockIndex() } else { - bc.blockIndex, err = GetBlockIndex(bc.db, false /*bitcoinNodes*/) + bc.blockIndexByHash, err = GetBlockIndex(bc.db, false /*bitcoinNodes*/) } if err != nil { return errors.Wrapf(err, "_initChain: Problem reading block index from db") } + bc.blockIndexByHeight = bc.ConstructBlockIndexByHeight() - // At this point the blockIndex should contain a full node tree with all + // At this point the blockIndexByHash should contain a full node tree with all // nodes pointing to valid parent nodes. { // Find the tip node with the best node hash. - tipNode := bc.blockIndex[*bestBlockHash] + tipNode := bc.blockIndexByHash[*bestBlockHash] if tipNode == nil { return fmt.Errorf("_initChain(block): Best hash (%#v) not found in block index", bestBlockHash) } // Walk back from the best node to the genesis block and store them all // in bestChain. - bc.bestChain, err = GetBestChain(tipNode, bc.blockIndex) + bc.bestChain, err = GetBestChain(tipNode, bc.blockIndexByHash) if err != nil { return errors.Wrapf(err, "_initChain(block): Problem reading best chain from db") } @@ -589,14 +660,14 @@ func (bc *Blockchain) _initChain() error { // TODO: This code is a bit repetitive but this seemed clearer than factoring it out. { // Find the tip node with the best node hash. - tipNode := bc.blockIndex[*bestHeaderHash] + tipNode := bc.blockIndexByHash[*bestHeaderHash] if tipNode == nil { return fmt.Errorf("_initChain(header): Best hash (%#v) not found in block index", bestHeaderHash) } // Walk back from the best node to the genesis block and store them all // in bestChain. - bc.bestHeaderChain, err = GetBestChain(tipNode, bc.blockIndex) + bc.bestHeaderChain, err = GetBestChain(tipNode, bc.blockIndexByHash) if err != nil { return errors.Wrapf(err, "_initChain(header): Problem reading best chain from db") } @@ -652,8 +723,9 @@ func NewBlockchain( eventManager: eventManager, archivalMode: archivalMode, - blockIndex: make(map[BlockHash]*BlockNode), - bestChainMap: make(map[BlockHash]*BlockNode), + blockIndexByHash: make(map[BlockHash]*BlockNode), + blockIndexByHeight: make(map[uint64]map[BlockHash]*BlockNode), + bestChainMap: make(map[BlockHash]*BlockNode), bestHeaderChainMap: make(map[BlockHash]*BlockNode), @@ -821,7 +893,7 @@ func (bc *Blockchain) LocateBestBlockChainHeaders(locator []*BlockHash, stopHash // where it's currently called is single-threaded via a channel in server.go. Going to // avoid messing with it for now. headers := locateHeaders(locator, stopHash, MaxHeadersPerMsg, - bc.blockIndex, bc.bestChain, bc.bestChainMap) + bc.blockIndexByHash, bc.bestChain, bc.bestChainMap) return headers } @@ -900,9 +972,9 @@ func (bc *Blockchain) LatestLocator(tip *BlockNode) []*BlockHash { } func (bc *Blockchain) HeaderLocatorWithNodeHash(blockHash *BlockHash) ([]*BlockHash, error) { - node, exists := bc.blockIndex[*blockHash] + node, exists := bc.blockIndexByHash[*blockHash] if !exists { - return nil, fmt.Errorf("Blockchain.HeaderLocatorWithNodeHash: Node for hash %v is not in our blockIndex", blockHash) + return nil, fmt.Errorf("Blockchain.HeaderLocatorWithNodeHash: Node for hash %v is not in our blockIndexByHash", blockHash) } return bc.LatestLocator(node), nil @@ -981,7 +1053,7 @@ func (bc *Blockchain) GetBlockNodesToFetch( } func (bc *Blockchain) HasHeader(headerHash *BlockHash) bool { - _, exists := bc.blockIndex[*headerHash] + _, exists := bc.blockIndexByHash[*headerHash] return exists } @@ -994,7 +1066,7 @@ func (bc *Blockchain) HeaderAtHeight(blockHeight uint32) *BlockNode { } func (bc *Blockchain) HasBlock(blockHash *BlockHash) bool { - node, nodeExists := bc.blockIndex[*blockHash] + node, nodeExists := bc.blockIndexByHash[*blockHash] if !nodeExists { glog.V(2).Infof("Blockchain.HasBlock: Node with hash %v does not exist in node index", blockHash) return false @@ -1295,12 +1367,14 @@ func (bc *Blockchain) SetBestChain(bestChain []*BlockNode) { bc.bestChain = bestChain } -func (bc *Blockchain) SetBestChainMap(bestChain []*BlockNode, bestChainMap map[BlockHash]*BlockNode, blockIndex map[BlockHash]*BlockNode) { +func (bc *Blockchain) SetBestChainMap(bestChain []*BlockNode, bestChainMap map[BlockHash]*BlockNode, blockIndexByHash map[BlockHash]*BlockNode, blockIndexByHeight map[uint64]map[BlockHash]*BlockNode) { bc.bestChain = bestChain bc.bestChainMap = bestChainMap - bc.blockIndex = blockIndex + bc.blockIndexByHash = blockIndexByHash + bc.blockIndexByHeight = blockIndexByHeight } +// TODO: update to support validating orphan PoS Blocks func (bc *Blockchain) _validateOrphanBlock(desoBlock *MsgDeSoBlock) error { // Error if the block is missing a parent hash or header. if desoBlock.Header == nil { @@ -1612,7 +1686,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // index. If it does, then return an error. We should generally // expect that processHeaderPoW will only be called on headers we // haven't seen before. - _, nodeExists := bc.blockIndex[*headerHash] + _, nodeExists := bc.blockIndexByHash[*headerHash] if nodeExists { return false, false, HeaderErrorDuplicateHeader } @@ -1637,7 +1711,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B if blockHeader.PrevBlockHash == nil { return false, false, HeaderErrorNilPrevHash } - parentNode, parentNodeExists := bc.blockIndex[*blockHeader.PrevBlockHash] + parentNode, parentNodeExists := bc.blockIndexByHash[*blockHeader.PrevBlockHash] if !parentNodeExists { // This block is an orphan if its parent doesn't exist and we don't // process unconnectedTxns. @@ -1752,11 +1826,12 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // index. If we're still syncing then it's safe to just set it. Otherwise, we // need to make a copy first since there could be some concurrency issues. if bc.isSyncing() { - bc.blockIndex[*newNode.Hash] = newNode + bc.addNewBlockNodeToBlockIndex(newNode) } else { - newBlockIndex := bc.CopyBlockIndex() - newBlockIndex[*newNode.Hash] = newNode - bc.blockIndex = newBlockIndex + newBlockIndexByHash, newBlockIndexByHeight := bc.CopyBlockIndexes() + bc.blockIndexByHash = newBlockIndexByHash + bc.blockIndexByHeight = newBlockIndexByHeight + bc.addNewBlockNodeToBlockIndex(newNode) } // Update the header chain if this header has more cumulative work than @@ -1807,23 +1882,25 @@ func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc return bc.processHeaderPoW(blockHeader, headerHash) } -func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _err error) { +func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { bc.ChainLock.Lock() defer bc.ChainLock.Unlock() if desoBlock == nil { // If the block is nil then we return an error. Nothing we can do here. - return false, false, fmt.Errorf("ProcessBlock: Block is nil") + return false, false, nil, fmt.Errorf("ProcessBlock: Block is nil") } // If the block's height is after the PoS cut-over fork height, then we use the PoS block processing logic. Otherwise, fall back // to the PoW logic. if desoBlock.Header.Height >= uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { // TODO: call bc.processBlockPoS(desoBlock, verifySignatures) instead - return bc.processBlockPoW(desoBlock, verifySignatures) + isMainChain, isOrphan, err := bc.processBlockPoW(desoBlock, verifySignatures) + return isMainChain, isOrphan, nil, err } - return bc.processBlockPoW(desoBlock, verifySignatures) + isMainChain, isOrphan, err := bc.processBlockPoW(desoBlock, verifySignatures) + return isMainChain, isOrphan, nil, err } func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { @@ -1906,7 +1983,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bc.timer.Start("Blockchain.ProcessBlock: BlockNode") // See if a node for the block exists in our node index. - nodeToValidate, nodeExists := bc.blockIndex[*blockHash] + nodeToValidate, nodeExists := bc.blockIndexByHash[*blockHash] // If no node exists for this block at all, then process the header // first before we do anything. This should create a node and set // the header validation status for it. @@ -1927,7 +2004,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Reset the pointers after having presumably added the header to the // block index. - nodeToValidate, nodeExists = bc.blockIndex[*blockHash] + nodeToValidate, nodeExists = bc.blockIndexByHash[*blockHash] } // At this point if the node still doesn't exist or if the header's validation // failed then we should return an error for the block. Note that at this point @@ -1946,7 +2023,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // In this case go ahead and return early. If its parents are truly legitimate then we // should re-request it and its parents from a node and reprocess it // once it is no longer an orphan. - parentNode, parentNodeExists := bc.blockIndex[*blockHeader.PrevBlockHash] + parentNode, parentNodeExists := bc.blockIndexByHash[*blockHeader.PrevBlockHash] if !parentNodeExists || (parentNode.Status&StatusBlockProcessed) == 0 { return false, true, nil } @@ -2601,23 +2678,6 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures return isMainChain, false, nil } -// processBlockPoS runs the Fast-Hotstuff block connect and commit rule as follows: -// 1. Validate on an incoming block and its header -// 2. Store the block in the db -// 3. Resolves forks within the last two blocks -// 4. Connect the block to the blockchain's tip -// 5. If applicable, flush the incoming block's grandparent to the DB -// 6. Notify the block proposer, pacemaker, and voting logic that the incoming block has been accepted -func (bc *Blockchain) processBlockPoS(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { - // TODO: Implement me - return false, false, fmt.Errorf("ProcessBlockPoS: Not implemented yet") -} - -func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { - // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks - panic("GetUncommittedTipView: Not implemented yet") -} - // DisconnectBlocksToHeight will rollback blocks from the db and blockchain structs until block tip reaches the provided // blockHeight parameter. func (bc *Blockchain) DisconnectBlocksToHeight(blockHeight uint64, snap *Snapshot) error { @@ -2637,10 +2697,10 @@ func (bc *Blockchain) DisconnectBlocksToHeight(blockHeight uint64, snap *Snapsho // is at blockTipHeight. So we delete the block reward at the blockTipHeight + 1 to make sure the state is correct. // TODO: decouple block reward from PutBlockWithTxn. blockTipHeight := bc.bestChain[len(bc.bestChain)-1].Height - for hashIter, node := range bc.blockIndex { + for hashIter, node := range bc.blockIndexByHash { hash := hashIter.NewBlockHash() if node.Height > blockTipHeight { - glog.V(1).Info(CLog(Yellow, fmt.Sprintf("DisconnectBlocksToHeight: Found node in blockIndex with "+ + glog.V(1).Info(CLog(Yellow, fmt.Sprintf("DisconnectBlocksToHeight: Found node in blockIndexByHash with "+ "larger height than the current block tip. Deleting the corresponding block reward. Node: (%v)", node))) blockToDetach, err := GetBlock(hash, bc.db, snap) if err != nil && err != badger.ErrKeyNotFound { diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index a5192569a..eda5838cf 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -5,6 +5,7 @@ import ( "flag" "fmt" "log" + "math" "math/big" "math/rand" "os" @@ -451,7 +452,9 @@ func _getBalanceWithView(t *testing.T, chain *Blockchain, utxoView *UtxoView, pk func TestBalanceModelBlockTests(t *testing.T) { setBalanceModelBlockHeights(t) - + // This test assumes we're using PoW blocks, and thus we need to set the PoS cut-over + // fork height to some distant future height + DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = math.MaxUint32 t.Run("TestBasicTransferReorg", TestBasicTransferReorg) t.Run("TestProcessBlockConnectBlocks", TestProcessBlockConnectBlocks) t.Run("TestProcessHeaderskReorgBlocks", TestProcessHeaderskReorgBlocks) @@ -616,7 +619,7 @@ func TestBasicTransferReorg(t *testing.T) { // Process all of the fork blocks on the original chain to make it // experience a reorg. for _, forkBlock := range forkBlocks { - _, _, err := chain1.ProcessBlock(forkBlock, true /*verifySignatures*/) + _, _, _, err := chain1.ProcessBlock(forkBlock, true /*verifySignatures*/) require.NoError(err) } @@ -659,7 +662,7 @@ func _shouldConnectBlock(blk *MsgDeSoBlock, t *testing.T, chain *Blockchain) { blockHash, _ := blk.Hash() verifySignatures := true - isMainChain, isOrphan, err := chain.ProcessBlock(blk, verifySignatures) + isMainChain, isOrphan, _, err := chain.ProcessBlock(blk, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block %v should not be an orphan", blockHash) require.Truef(isMainChain, "Block %v should be on the main chain", blockHash) @@ -824,7 +827,7 @@ func TestProcessBlockReorgBlocks(t *testing.T) { // Block b1 fmt.Println("Connecting block b1") require.Equal(uint64(3), GetUtxoNumEntries(db, chain.snapshot)) - isMainChain, isOrphan, err := chain.ProcessBlock(blockB1, verifySignatures) + isMainChain, isOrphan, _, err := chain.ProcessBlock(blockB1, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block b1 should not be an orphan") require.Falsef(isMainChain, "Block b1 should not be on the main chain") @@ -840,7 +843,7 @@ func TestProcessBlockReorgBlocks(t *testing.T) { // Block b2 fmt.Println("Connecting block b2") require.Equal(uint64(3), GetUtxoNumEntries(db, chain.snapshot)) - isMainChain, isOrphan, err := chain.ProcessBlock(blockB2, verifySignatures) + isMainChain, isOrphan, _, err := chain.ProcessBlock(blockB2, verifySignatures) require.NoError(err) require.Falsef(isOrphan, "Block b2 should not be an orphan") require.Falsef(isMainChain, "Block b2 should not be on the main chain") @@ -1673,7 +1676,7 @@ func TestBadBlockSignature(t *testing.T) { // A bad signature with the right public key should fail. finalBlock1.BlockProducerInfo.PublicKey = senderPkBytes - _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorInvalidBlockProducerSIgnature) @@ -1682,20 +1685,20 @@ func TestBadBlockSignature(t *testing.T) { require.NoError(err) finalBlock1.BlockProducerInfo.PublicKey = blockSignerPkBytes finalBlock1.BlockProducerInfo.Signature = nil - _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorMissingBlockProducerSignature) // If all the BlockProducerInfo is missing, things should fail finalBlock1.BlockProducerInfo = nil - _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, true) require.Error(err) require.Contains(err.Error(), RuleErrorMissingBlockProducerSignature) // Now let's add blockSignerPK to the map of trusted keys and confirm that the block processes. chain.trustedBlockProducerPublicKeys[MakePkMapKey(blockSignerPkBytes)] = true finalBlock1.BlockProducerInfo = blockProducerInfoCopy - _, _, err = chain.ProcessBlock(finalBlock1, true) + _, _, _, err = chain.ProcessBlock(finalBlock1, true) require.NoError(err) _, _ = finalBlock1, db diff --git a/lib/db_utils.go b/lib/db_utils.go index d4df2368a..031be2be8 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -228,6 +228,8 @@ type DBPrefixes struct { // -> uint64 PrefixPublicKeyToDeSoBalanceNanos []byte `prefix_id:"[52]" is_state:"true" core_state:"true"` + // DEPRECATED as of the PoS cut-over. Block rewards are no longer stored in the db as + // we consider all block rewards to be mature immediately. // Block reward prefix: // - This index is needed because block rewards take N blocks to mature, which means we need // a way to deduct them from balance calculations until that point. Without this index, it @@ -585,7 +587,16 @@ type DBPrefixes struct { // Prefix, , -> PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs []byte `prefix_id:"[94]" is_state:"true"` - // NEXT_TAG: 95 + // PrefixValidatorBLSPublicKeyPKIDPairEntry: Retrieve a BLSPublicKeyPKIDPairEntry by BLS public key. + // Prefix, -> *BLSPublicKeyPKIDPairEntry + PrefixValidatorBLSPublicKeyPKIDPairEntry []byte `prefix_id:"[95]" is_state:"true"` + + // PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry: Retrieve a snapshotted BLSPublicKeyPKIDPairEntry + // by BLS Public Key and SnapshotAtEpochNumber. + // Prefix, , -> *BLSPublicKeyPKIDPairEntry + PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry []byte `prefix_id:"[96]" is_state:"true"` + + // NEXT_TAG: 97 } // DecodeStateKey decodes a state key into a DeSoEncoder type. This is useful for encoders which don't have a stored @@ -876,6 +887,12 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { // prefix_id:"[94]" return true, &LockupYieldCurvePoint{} + } else if bytes.Equal(prefix, Prefixes.PrefixValidatorBLSPublicKeyPKIDPairEntry) { + // prefix_id:"[95]" + return true, &BLSPublicKeyPKIDPairEntry{} + } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry) { + // prefix_id:"[96]" + return true, &BLSPublicKeyPKIDPairEntry{} } return true, nil @@ -4798,39 +4815,36 @@ func DeleteUtxoOperationsForBlockWithTxn(txn *badger.Txn, snap *Snapshot, blockH return DBDeleteWithTxn(txn, snap, _DbKeyForUtxoOps(blockHash), eventManager, entryIsDeleted) } +func blockNodeProofOfStakeCutoverMigrationTriggered(height uint32) bool { + return height >= GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight +} + func SerializeBlockNode(blockNode *BlockNode) ([]byte, error) { data := []byte{} - // Hash if blockNode.Hash == nil { return nil, fmt.Errorf("SerializeBlockNode: Hash cannot be nil") } data = append(data, blockNode.Hash[:]...) - - // Height data = append(data, UintToBuf(uint64(blockNode.Height))...) + if !blockNodeProofOfStakeCutoverMigrationTriggered(blockNode.Height) { + // DifficultyTarget + if blockNode.DifficultyTarget == nil { + return nil, fmt.Errorf("SerializeBlockNode: DifficultyTarget cannot be nil") + } + data = append(data, blockNode.DifficultyTarget[:]...) - // DifficultyTarget - if blockNode.DifficultyTarget == nil { - return nil, fmt.Errorf("SerializeBlockNode: DifficultyTarget cannot be nil") + // CumWork + data = append(data, BigintToHash(blockNode.CumWork)[:]...) } - data = append(data, blockNode.DifficultyTarget[:]...) - - // CumWork - data = append(data, BigintToHash(blockNode.CumWork)[:]...) - - // Header serializedHeader, err := blockNode.Header.ToBytes(false) if err != nil { - return nil, errors.Wrapf(err, "SerializeBlockNode: Problem serializing header") + return nil, fmt.Errorf("serializePoSBlockNode: Problem serializing header: %v", err) } data = append(data, IntToBuf(int64(len(serializedHeader)))...) data = append(data, serializedHeader...) - // Status - // It's assumed this field is one byte long. data = append(data, UintToBuf(uint64(blockNode.Status))...) - return data, nil } @@ -4843,11 +4857,9 @@ func DeserializeBlockNode(data []byte) (*BlockNode, error) { nil, // CumWork nil, // Header StatusNone, // Status - ) rr := bytes.NewReader(data) - // Hash _, err := io.ReadFull(rr, blockNode.Hash[:]) if err != nil { @@ -4861,19 +4873,21 @@ func DeserializeBlockNode(data []byte) (*BlockNode, error) { } blockNode.Height = uint32(height) - // DifficultyTarget - _, err = io.ReadFull(rr, blockNode.DifficultyTarget[:]) - if err != nil { - return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding DifficultyTarget") - } + if !blockNodeProofOfStakeCutoverMigrationTriggered(blockNode.Height) { + // DifficultyTarget + _, err = io.ReadFull(rr, blockNode.DifficultyTarget[:]) + if err != nil { + return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding DifficultyTarget") + } - // CumWork - tmp := BlockHash{} - _, err = io.ReadFull(rr, tmp[:]) - if err != nil { - return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding CumWork") + // CumWork + tmp := BlockHash{} + _, err = io.ReadFull(rr, tmp[:]) + if err != nil { + return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding CumWork") + } + blockNode.CumWork = HashToBigint(&tmp) } - blockNode.CumWork = HashToBigint(&tmp) // Header payloadLen, err := ReadVarint(rr) @@ -4900,7 +4914,6 @@ func DeserializeBlockNode(data []byte) (*BlockNode, error) { return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding Status") } blockNode.Status = BlockStatus(uint32(status)) - return blockNode, nil } @@ -5009,33 +5022,39 @@ func GetBlock(blockHash *BlockHash, handle *badger.DB, snap *Snapshot) (*MsgDeSo return blockRet, nil } -func PutBlockWithTxn(txn *badger.Txn, snap *Snapshot, desoBlock *MsgDeSoBlock, eventManager *EventManager) error { - if desoBlock.Header == nil { - return fmt.Errorf("PutBlockWithTxn: Header was nil in block %v", desoBlock) +func PutBlockHashToBlockWithTxn(txn *badger.Txn, snap *Snapshot, block *MsgDeSoBlock, eventManager *EventManager) error { + if block.Header == nil { + return fmt.Errorf("PutBlockHashToBlockWithTxn: Header was nil in block %v", block) } - blockHash, err := desoBlock.Header.Hash() + blockHash, err := block.Header.Hash() if err != nil { - return errors.Wrapf(err, "PutBlockWithTxn: Problem hashing header: ") + return errors.Wrap(err, "PutBlockHashToBlockWithTxn: Problem hashing header: ") } blockKey := BlockHashToBlockKey(blockHash) - data, err := desoBlock.ToBytes(false) + data, err := block.ToBytes(false) if err != nil { return err } // First check to see if the block is already in the db. - if _, err := DBGetWithTxn(txn, snap, blockKey); err == nil { + if _, err = DBGetWithTxn(txn, snap, blockKey); err == nil { // err == nil means the block already exists in the db so // no need to store it. return nil } // If the block is not in the db then set it. - if err := DBSetWithTxn(txn, snap, blockKey, data, eventManager); err != nil { + if err = DBSetWithTxn(txn, snap, blockKey, data, eventManager); err != nil { return err } + return nil +} - // Index the block reward. Used for deducting immature block rewards from user balances. - if len(desoBlock.Txns) == 0 { - return fmt.Errorf("PutBlockWithTxn: Got block without any txns %v", desoBlock) +func PutBlockWithTxn(txn *badger.Txn, snap *Snapshot, desoBlock *MsgDeSoBlock, eventManager *EventManager) error { + blockHash, err := desoBlock.Header.Hash() + if err != nil { + return errors.Wrapf(err, "PutBlockWithTxn: Problem hashing header: ") + } + if err = PutBlockHashToBlockWithTxn(txn, snap, desoBlock, eventManager); err != nil { + return errors.Wrap(err, "PutBlockWithTxn: Problem putting block hash to block") } blockRewardTxn := desoBlock.Txns[0] if blockRewardTxn.TxnMeta.GetTxnType() != TxnTypeBlockReward { @@ -5055,7 +5074,7 @@ func PutBlockWithTxn(txn *badger.Txn, snap *Snapshot, desoBlock *MsgDeSoBlock, e pkMapKey := pkMapKeyIter blockRewardKey := PublicKeyBlockHashToBlockRewardKey(pkMapKey[:], blockHash) - if err := DBSetWithTxn(txn, snap, blockRewardKey, EncodeUint64(blockReward), eventManager); err != nil { + if err = DBSetWithTxn(txn, snap, blockRewardKey, EncodeUint64(blockReward), eventManager); err != nil { return err } } @@ -5167,8 +5186,7 @@ func GetHeightHashToNodeInfoWithTxn(txn *badger.Txn, snap *Snapshot, return nil } - var blockNode *BlockNode - blockNode, err = DeserializeBlockNode(nodeBytes) + blockNode, err := DeserializeBlockNode(nodeBytes) if err != nil { return nil } diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index 5036e5d86..288ad9a18 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -16,6 +16,27 @@ import ( "github.com/stretchr/testify/require" ) +var expectedBlockHeaderVersion1 = &MsgDeSoHeader{ + Version: 1, + PrevBlockHash: &BlockHash{ + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, + 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20, 0x21, + 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x31, + 0x32, 0x33, + }, + TransactionMerkleRoot: &BlockHash{ + 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, 0x43, + 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x50, 0x51, 0x52, 0x53, + 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, + 0x64, 0x65, + }, + // Use full uint64 values to make sure serialization and de-serialization work + TstampNanoSecs: SecondsToNanoSeconds(1678943210), + Height: uint64(1321012345), + Nonce: uint64(12345678901234), + ExtraNonce: uint64(101234123456789), +} + // Check that all state db prefixes have been correctly mapped to DeSoEncoder types via StatePrefixToDeSoEncoder func TestStatePrefixToDeSoEncoder(t *testing.T) { for prefixByte, isState := range StatePrefixes.StatePrefixesMap { diff --git a/lib/load_test.go b/lib/load_test.go index 68d4ef3ba..00cec0e6f 100644 --- a/lib/load_test.go +++ b/lib/load_test.go @@ -190,7 +190,7 @@ func TestComputeMaxTPS(t *testing.T) { _, _ = newParams, newDB timeStart := time.Now() for _, blockToConnect := range blocksMined { - _, _, err := newChain.ProcessBlock(blockToConnect, true /*verifySignatures*/) + _, _, _, err := newChain.ProcessBlock(blockToConnect, true /*verifySignatures*/) require.NoError(err) } elapsedSecs := (time.Since(timeStart)).Seconds() @@ -237,7 +237,7 @@ func TestConnectBlocksLoadTest(t *testing.T) { pprof.StartCPUProfile(ff) timeStart := time.Now() for _, blockToConnect := range blocksMined { - _, _, err := newChain.ProcessBlock(blockToConnect, false /*verifySignatures*/) + _, _, _, err := newChain.ProcessBlock(blockToConnect, false /*verifySignatures*/) require.NoError(err) } elapsedSecs := (time.Since(timeStart)).Seconds() diff --git a/lib/miner.go b/lib/miner.go index 26fcf436b..6a2453486 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -239,7 +239,7 @@ func (desoMiner *DeSoMiner) MineAndProcessSingleBlock(threadIndex uint32, mempoo // will be informed about it. This will cause it to be relayed appropriately. verifySignatures := true // TODO(miner): Replace with a call to SubmitBlock. - isMainChain, isOrphan, err := desoMiner.BlockProducer.chain.ProcessBlock( + isMainChain, isOrphan, _, err := desoMiner.BlockProducer.chain.ProcessBlock( blockToMine, verifySignatures) glog.V(2).Infof("Called ProcessBlock: isMainChain=(%v), isOrphan=(%v), err=(%v)", isMainChain, isOrphan, err) diff --git a/lib/network.go b/lib/network.go index a36398a25..15b230a83 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1973,11 +1973,11 @@ type MsgDeSoHeader struct { // The BLS public key of the validator who proposed this block. ProposerVotingPublicKey *bls.PublicKey - // ProposerRandomSeedHash is only used for Proof of Stake blocks, starting with + // ProposerRandomSeedSignature is only used for Proof of Stake blocks, starting with // MsgDeSoHeader version 2. For all earlier versions, this field will default to nil. // // The current block's randomness seed provided by the block's proposer. - ProposerRandomSeedHash *RandomSeedHash + ProposerRandomSeedSignature *bls.Signature // ProposedInView is only used for Proof of Stake blocks, starting with MsgDeSoHeader // version 2. For all earlier versions, this field will default to nil. @@ -2222,11 +2222,11 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error } retBytes = append(retBytes, EncodeBLSPublicKey(msg.ProposerVotingPublicKey)...) - // ProposerRandomSeedHash - if msg.ProposerRandomSeedHash == nil { - return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerRandomSeedHash must be non-nil") + // ProposerRandomSeedSignature + if msg.ProposerRandomSeedSignature == nil { + return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerRandomSeedSignature must be non-nil") } - retBytes = append(retBytes, EncodeRandomSeedHash(msg.ProposerRandomSeedHash)...) + retBytes = append(retBytes, EncodeOptionalBLSSignature(msg.ProposerRandomSeedSignature)...) // ProposedInView retBytes = append(retBytes, UintToBuf(msg.ProposedInView)...) @@ -2444,10 +2444,10 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerVotingPublicKey") } - // ProposerRandomSeedHash - retHeader.ProposerRandomSeedHash, err = DecodeRandomSeedHash(rr) + // ProposerRandomSeedSignature + retHeader.ProposerRandomSeedSignature, err = DecodeOptionalBLSSignature(rr) if err != nil { - return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerRandomSeedHash") + return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerRandomSeedSignature") } // ProposedInView diff --git a/lib/network_test.go b/lib/network_test.go index 5c8191b2b..fbd9932e2 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -1,3 +1,5 @@ +//go:build relic + package lib import ( @@ -85,27 +87,6 @@ func TestVerack(t *testing.T) { require.Equal(&MsgDeSoVerack{Nonce: nonce}, testMsg) } -var expectedBlockHeaderVersion1 = &MsgDeSoHeader{ - Version: 1, - PrevBlockHash: &BlockHash{ - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, - 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20, 0x21, - 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x31, - 0x32, 0x33, - }, - TransactionMerkleRoot: &BlockHash{ - 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, 0x43, - 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x50, 0x51, 0x52, 0x53, - 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, - 0x64, 0x65, - }, - // Use full uint64 values to make sure serialization and de-serialization work - TstampNanoSecs: SecondsToNanoSeconds(1678943210), - Height: uint64(1321012345), - Nonce: uint64(12345678901234), - ExtraNonce: uint64(101234123456789), -} - // Creates fully formatted a PoS block header with random signatures // and block hashes func createTestBlockHeaderVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSoHeader { @@ -127,12 +108,6 @@ func createTestBlockHeaderVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSo 0x02, 0x23, 0x24, 0x25, 0x26, 0x27, 0x33, 0x29, 0x30, 0x31, 0x32, 0x33, } - testRandomSeedHash := RandomSeedHash{ - 0x00, 0x36, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, 0x43, - 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x50, 0x51, 0x52, 0x53, - 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, - 0x64, 0x65, - } testBitset := bitset.NewBitset().Set(0, true).Set(3, true) testBLSPublicKey, testBLSSignature := _generateValidatorVotingPublicKeyAndSignature(t) @@ -175,7 +150,7 @@ func createTestBlockHeaderVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSo TxnConnectStatusByIndexHash: &testTxnConnectStatusByIndex, ProposerPublicKey: NewPublicKey(pkForTesting1), ProposerVotingPublicKey: testBLSPublicKey, - ProposerRandomSeedHash: &testRandomSeedHash, + ProposerRandomSeedSignature: testBLSSignature, ProposedInView: uint64(1432101234), // Use real signatures and public keys for the PoS fields ProposerVotePartialSignature: testBLSSignature, diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 5cc6886a6..090542047 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -43,10 +43,10 @@ func (pbp *PosBlockProducer) SignBlock(blockTemplate BlockTemplate, signerPrivat // during happy path in consensus when a vote QC has been assembled. The block is unsigned, so to indicate its incompleteness, // the block is returned as a BlockTemplate. func (pbp *PosBlockProducer) CreateUnsignedBlock(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, - proposerRandomSeedHash *RandomSeedHash, validatorsVoteQC *QuorumCertificate) (BlockTemplate, error) { + proposerRandomSeedSignature *bls.Signature, validatorsVoteQC *QuorumCertificate) (BlockTemplate, error) { // Create the block template. - block, err := pbp.createBlockTemplate(latestBlockView, newBlockHeight, view, proposerRandomSeedHash) + block, err := pbp.createBlockTemplate(latestBlockView, newBlockHeight, view, proposerRandomSeedSignature) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.CreateUnsignedTimeoutBlock: Problem creating block template") } @@ -60,10 +60,10 @@ func (pbp *PosBlockProducer) CreateUnsignedBlock(latestBlockView *UtxoView, newB // during a timeout in consensus when a validators timeout aggregate QC has been assembled. The block is unsigned, // and so is returned as a BlockTemplate. func (pbp *PosBlockProducer) CreateUnsignedTimeoutBlock(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, - proposerRandomSeedHash *RandomSeedHash, validatorsTimeoutAggregateQC *TimeoutAggregateQuorumCertificate) (BlockTemplate, error) { + proposerRandomSeedSignature *bls.Signature, validatorsTimeoutAggregateQC *TimeoutAggregateQuorumCertificate) (BlockTemplate, error) { // Create the block template. - block, err := pbp.createBlockTemplate(latestBlockView, newBlockHeight, view, proposerRandomSeedHash) + block, err := pbp.createBlockTemplate(latestBlockView, newBlockHeight, view, proposerRandomSeedSignature) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.CreateUnsignedTimeoutBlock: Problem creating block template") } @@ -77,7 +77,7 @@ func (pbp *PosBlockProducer) CreateUnsignedTimeoutBlock(latestBlockView *UtxoVie // a partially filled out block with Fee-Time ordered transactions. The returned block is complete except for // the qc / aggregateQc fields, and the signature. func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, - proposerRandomSeedHash *RandomSeedHash) (BlockTemplate, error) { + proposerRandomSeedSignature *bls.Signature) (BlockTemplate, error) { // First get the block without the header. currentTimestamp := uint64(time.Now().UnixNano()) block, err := pbp.createBlockWithoutHeader(latestBlockView, newBlockHeight, currentTimestamp) @@ -102,7 +102,7 @@ func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newB // Set the proposer information. block.Header.ProposerPublicKey = pbp.proposerPublicKey block.Header.ProposerVotingPublicKey = pbp.proposerVotingPublicKey - block.Header.ProposerRandomSeedHash = proposerRandomSeedHash + block.Header.ProposerRandomSeedSignature = proposerRandomSeedSignature return block, nil } diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index e9a22c38a..d2c988f9b 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -51,13 +51,13 @@ func TestCreateBlockTemplate(t *testing.T) { priv, err := bls.NewPrivateKey() require.NoError(err) pub := priv.PublicKey() - seedHash := &RandomSeedHash{} - _, err = seedHash.FromBytes(Sha256DoubleHash([]byte("seed")).ToBytes()) + seedSignature := &bls.Signature{} + _, err = seedSignature.FromBytes(Sha256DoubleHash([]byte("seed")).ToBytes()) require.NoError(err) m0Pk := NewPublicKey(m0PubBytes) pbp := NewPosBlockProducer(mempool, params, m0Pk, pub) - blockTemplate, err := pbp.createBlockTemplate(latestBlockView, 3, 10, seedHash) + blockTemplate, err := pbp.createBlockTemplate(latestBlockView, 3, 10, seedSignature) require.NoError(err) require.NotNil(blockTemplate) require.NotNil(blockTemplate.Header) @@ -71,7 +71,7 @@ func TestCreateBlockTemplate(t *testing.T) { require.Equal(blockTemplate.Header.ProposedInView, uint64(10)) require.Equal(blockTemplate.Header.ProposerPublicKey, m0Pk) require.Equal(blockTemplate.Header.ProposerVotingPublicKey, pub) - require.Equal(blockTemplate.Header.ProposerRandomSeedHash, seedHash) + require.True(blockTemplate.Header.ProposerRandomSeedSignature.Eq(seedSignature)) } func TestCreateBlockWithoutHeader(t *testing.T) { diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go new file mode 100644 index 000000000..4a3381c10 --- /dev/null +++ b/lib/pos_blockchain.go @@ -0,0 +1,1436 @@ +package lib + +import ( + "fmt" + "math" + "time" + + "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/consensus" + "github.com/dgraph-io/badger/v3" + "github.com/golang/glog" + "github.com/pkg/errors" +) + +// processBlockPoS runs the Fast-Hotstuff block connect and commit rule as follows: +// 1. Determine if we're missing the parent block of this block. +// If so, return the hash of the missing block and add this block to the orphans list. +// 2. Validate the incoming block, its header, its block height, the leader, and its QCs (vote or timeout) +// 3. Store the block in the block index and save to DB. +// 4. try to apply the incoming block as the tip (performing reorgs as necessary). If it can't be applied, exit here. +// 5. Run the commit rule - If applicable, flushes the incoming block's grandparent to the DB +func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, verifySignatures bool) ( + _success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { + // If we can't hash the block, we can never store in the block index and we should throw it out immediately. + if _, err := block.Hash(); err != nil { + return false, false, nil, errors.Wrapf(err, "processBlockPoS: Problem hashing block") + } + // Get all the blocks between the current block and the committed tip. If the block + // is an orphan, then we store it after performing basic validations. + // If the block extends from any committed block other than the committed tip, + // then we throw it away. + lineageFromCommittedTip, err := bc.getLineageFromCommittedTip(block) + if err == RuleErrorDoesNotExtendCommittedTip || + err == RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock || + err == RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight || + err == RuleErrorAncestorBlockValidationFailed { + // In this case, the block extends a committed block that is NOT the tip + // block. We will never accept this block. To prevent spam, we do not + // store this block as validate failed. We just throw it away. + return false, false, nil, errors.Wrap(err, "processBlockPoS: ") + } + if err == RuleErrorMissingAncestorBlock { + // In this case, the block is an orphan that does not extend from any blocks + // on our best chain. Try to process the orphan by running basic validations. + // If it passes basic integrity checks, we'll store it with the hope that we + // will eventually get a parent that connects to our best chain. + missingBlockHashes := []*BlockHash{block.Header.PrevBlockHash} + return false, true, missingBlockHashes, bc.processOrphanBlockPoS(block) + } + + if err != nil { + return false, false, nil, errors.Wrap(err, + "processBlockPoS: Unexpected problem getting lineage from committed tip: ") + } + + // First, we perform a validation of the leader and the QC to prevent spam. + // If the block fails this check, we throw it away. + passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block) + if err != nil { + // If we hit an error, we can't store it since we're not sure if it passed the spam prevention check. + return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem validating leader and QC") + } + if !passedSpamPreventionCheck { + // If the block fails the spam prevention check, we throw it away. + return false, false, nil, errors.New("processBlockPoS: Block failed spam prevention check") + } + + // TODO: Is there any error that would require special handling? If that's the case, we should + // probably push that logic in validateAndIndexBlockPoS anyway. + blockNode, err := bc.validateAndIndexBlockPoS(block) + if err != nil { + return false, false, nil, errors.Wrap(err, + "processBlockPoS: Problem validating block: ") + } + if !blockNode.IsValidated() { + return false, false, nil, errors.New( + "processBlockPoS: Block not validated after performing all validations.") + } + + // 4. Try to apply the incoming block as the new tip. This function will + // first perform any required reorgs and then determine if the incoming block + // extends the chain tip. If it does, it will apply the block to the best chain + // and appliedNewTip will be true and we can continue to running the commit rule. + appliedNewTip, err := bc.tryApplyNewTip(blockNode, currentView, lineageFromCommittedTip) + if err != nil { + return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem applying new tip: ") + } + + // 5. Commit grandparent if possible. Only need to do this if we applied a new tip. + if appliedNewTip { + if err = bc.runCommitRuleOnBestChain(); err != nil { + return false, false, nil, errors.Wrap(err, + "processBlockPoS: error running commit rule: ") + } + } + + // Now that we've processed this block, we check for any blocks that were previously + // stored as orphans, which are children of this block. We can process them now. + blockNodesAtNextHeight := bc.blockIndexByHeight[uint64(blockNode.Height)+1] + for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { + if blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) && + blockNodeAtNextHeight.IsStored() && + !blockNodeAtNextHeight.IsValidated() && + !blockNodeAtNextHeight.IsValidateFailed() { + var orphanBlock *MsgDeSoBlock + orphanBlock, err = GetBlock(blockNodeAtNextHeight.Hash, bc.db, bc.snapshot) + if err != nil { + glog.Errorf("processBlockPoS: Problem getting orphan block %v", blockNodeAtNextHeight.Hash) + continue + } + var appliedNewTipOrphan bool + if appliedNewTipOrphan, _, _, err = bc.processBlockPoS( + orphanBlock, currentView, verifySignatures); err != nil { + glog.Errorf("processBlockPoS: Problem validating orphan block %v", blockNodeAtNextHeight.Hash) + continue + } + if appliedNewTipOrphan { + appliedNewTip = true + } + } + } + // Returns whether a new tip was applied, whether the block is an orphan, and any missing blocks, and an error. + return appliedNewTip, false, nil, nil +} + +// processOrphanBlockPoS validates that an orphan block is properly formed. If +// an orphan block is properly formed, we will save it as Stored in the block index. +// As a spam-prevention measure, we will not store a block if it fails the QC or leader check +// and simply throw it away. If it fails the other integrity checks, we'll store it +// as validate failed. +func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { + // Construct a UtxoView, so we can perform the QC and leader checks. + utxoView, err := NewUtxoView(bc.db, bc.params, nil, bc.snapshot, nil) + if err != nil { + // We can't validate the QC without a UtxoView. Return an error. + return errors.Wrap(err, "processOrphanBlockPoS: Problem initializing UtxoView") + } + currentEpochEntry, err := utxoView.GetCurrentEpochEntry() + if err != nil { + // We can't validate the QC without getting the current epoch entry. + return errors.Wrap(err, "processOrphanBlockPoS: Problem getting current epoch entry") + } + var validatorsByStake []*ValidatorEntry + // If the block is in a previous or future epoch, we need to compute the + // proper validator set for the block. We do this by computing the prev/next + // epoch entry and then fetching the validator set at the snapshot of the + // epoch number of the prev/next epoch entry. + if !currentEpochEntry.ContainsBlockHeight(block.Header.Height) { + // Get the epoch entry based on the block height. The logic is the same + // regardless of whether the block is in a previous or future epoch. + // Note that the InitialView cannot be properly computed. + var epochEntry *EpochEntry + usePrevEpoch := block.Header.Height < currentEpochEntry.InitialBlockHeight + // If it's in a previous epoch, we compute the prev epoch entry. + if usePrevEpoch { + epochEntry, err = utxoView.simulatePrevEpochEntry(currentEpochEntry.EpochNumber, + currentEpochEntry.InitialBlockHeight) + if err != nil { + return errors.Wrap(err, "processOrphanBlockPoS: Problem computing prev epoch entry") + } + } else { + // Okay now we know that this block must be in a future epoch. We do our best to compute + // the next epoch entry and check if it is in that epoch. If it's in a future epoch, we just throw it away. + // We supply 0 for the view and 0 for the block timestamp as we don't know what those values should be, and + // we will ignore these values. + epochEntry, err = utxoView.computeNextEpochEntry(currentEpochEntry.EpochNumber, + currentEpochEntry.FinalBlockHeight, 0, 0) + if err != nil { + return errors.Wrap(err, "processOrphanBlockPoS: Problem computing next epoch entry") + } + } + if !epochEntry.ContainsBlockHeight(block.Header.Height) { + // We will throw away this block as we know it's not in either + // the next or the previous epoch. + errSuffix := "future" + if usePrevEpoch { + errSuffix = "past" + } + return fmt.Errorf("processOrphanBlockPoS: Block height %d is too far in the %v", + block.Header.Height, errSuffix) + } + var epochEntrySnapshotAtEpochNumber uint64 + epochEntrySnapshotAtEpochNumber, err = utxoView.ComputeSnapshotEpochNumberForEpoch(epochEntry.EpochNumber) + if err != nil { + return errors.Wrapf(err, + "processOrphanBlockPoS: Problem getting snapshot at epoch number for poch entry at epoch #%d", + epochEntry.EpochNumber) + } + // Okay now that we've gotten the SnapshotAtEpochNumber for the prev/next epoch, we can make sure that the + // proposer of the block is within the set of potential block proposers for the prev/next epoch based on + // the VotingPublicKey. + // First, we get the snapshot validator entry based on the BLS public key in the header. + snapshotBLSPublicKeyPKIDEntry, err := utxoView.GetSnapshotValidatorBLSPublicKeyPKIDPairEntry( + block.Header.ProposerVotingPublicKey, epochEntrySnapshotAtEpochNumber) + if err != nil { + return errors.Wrapf(err, + "processOrphanBlockPoS: Problem getting snapshot validator entry for block proposer %v", + block.Header.ProposerVotingPublicKey) + } + // If no snapshot BLSPublicKeyPKIDEntry exists, we'll never accept this block as + // its block proposer is not in the validator set as we did not snapshot its BLS Public key. + // This is a spam prevention measure, so we just throw away the block. + if snapshotBLSPublicKeyPKIDEntry == nil { + return nil + } + // Fetch the snapshot leader PKIDs + snapshotLeaderPKIDs, err := utxoView.GetSnapshotLeaderScheduleAtEpochNumber(epochEntrySnapshotAtEpochNumber) + if err != nil { + return errors.Wrapf(err, + "processOrphanBlockPoS: Problem getting snapshot leader schedule at snapshot at epoch number %d", + epochEntrySnapshotAtEpochNumber) + } + // Get the PKID for the block proposer from the snapshot validator entry. + blockProposerPKID := snapshotBLSPublicKeyPKIDEntry.PKID + // TODO: Replace w/ collections.Any for simplicity. There is an issue with this version + // of Go's compiler that is preventing us from using collections.Any here. + // We can now check if the block proposer is in the set of snapshot leader PKIDs. + blockProposerSeen := false + for _, snapshotLeaderPKID := range snapshotLeaderPKIDs { + if snapshotLeaderPKID.Eq(blockProposerPKID) { + blockProposerSeen = true + break + } + } + if !blockProposerSeen { + // We'll never accept this block as its block proposer is not in the set of + // potential leaders. As a spam-prevention measure, we simply return nil and throw it away. + return nil + } + validatorsByStake, err = utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber( + epochEntrySnapshotAtEpochNumber) + if err != nil { + return errors.Wrapf(err, + "processOrphanBlockPoS: Problem getting validator set at snapshot at epoch number %d", + epochEntrySnapshotAtEpochNumber) + } + } else { + // This block is in the current epoch! + // First we validate the proposer vote partial signature + snapshotAtEpochNumber, err := utxoView.GetCurrentSnapshotEpochNumber() + if err != nil { + return errors.Wrap(err, "processOrphanBlockPoS: Problem getting current snapshot epoch number") + } + // Validate the proposer partial sig. + isValidPartialSig, err := utxoView.hasValidProposerPartialSignaturePoS(block, snapshotAtEpochNumber) + if err != nil { + return errors.Wrap(err, "processOrphanBlockPoS: Problem validating proposer partial sig") + } + if !isValidPartialSig { + // We'll never accept this block since it has an invalid leader signature. + // As a spam-prevention measure, we just throw away this block + // and don't store it. + return nil + } + // Next we validate that the leader is correct. We can only do this if the block + // is in the current epoch since we need the current epoch entry's initial view + // to compute the proper leader. + var isBlockProposerValid bool + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(block) + if err != nil { + return errors.Wrapf(err, "processOrphanBlockPoS: Problem validating block proposer") + } + if !isBlockProposerValid { + // If the block proposer isn't valid, we'll never accept this block. As a spam-prevention + // measure, we just throw away this block and don't store it. + return nil + } + // If we get here, we know we have the correct block proposer. We now fetch the validators ordered by + // stake, so we can validate the QC. + validatorsByStake, err = utxoView.GetAllSnapshotValidatorSetEntriesByStake() + if err != nil { + return errors.Wrap(err, "processOrphanBlockPoS: Problem getting validator set") + } + } + // Okay now we have the validator set ordered by stake, we can validate the QC. + if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { + // If we hit an error, we know that the QC is invalid, and we'll never accept this block, + // As a spam-prevention measure, we just throw away this block and don't store it. + return nil + } + // All blocks should pass the basic integrity validations, which ensure the block + // is not malformed. If the block is malformed, we should store it as ValidateFailed. + if err = bc.isProperlyFormedBlockPoS(block); err != nil { + if _, innerErr := bc.storeValidateFailedBlockInBlockIndex(block); innerErr != nil { + return errors.Wrapf(innerErr, + "processOrphanBlockPoS: Problem adding validate failed block to block index: %v", err) + } + return nil + } + // Add to blockIndexByHash with status STORED only as we are not sure if it's valid yet. + _, err = bc.storeBlockInBlockIndex(block) + return errors.Wrap(err, "processBlockPoS: Problem adding block to block index: ") +} + +// storeValidateFailedBlockWithWrappedError is a helper function that takes in a block and an error and +// stores the block in the block index with status VALIDATE_FAILED. It returns the resulting BlockNode. +func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlock, outerErr error) ( + *BlockNode, error) { + blockNode, innerErr := bc.storeValidateFailedBlockInBlockIndex(block) + if innerErr != nil { + return nil, errors.Wrapf(innerErr, + "storeValidateFailedBlockWithWrappedError: Problem adding validate failed block to block index: %v", + outerErr) + } + return blockNode, nil +} + +func (bc *Blockchain) validateLeaderAndQC(block *MsgDeSoBlock) (_passedSpamPreventionCheck bool, _err error) { + // We expect the utxoView for the parent block to be valid because we check that all ancestor blocks have + // been validated. + utxoView, err := bc.getUtxoViewAtBlockHash(*block.Header.PrevBlockHash) + if err != nil { + // This should never happen. If the parent is validated and extends from the tip, then we should + // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. + // We return that validation didn't fail and the error. + return false, errors.Wrap(err, "validateLeaderAndQC: Problem getting UtxoView") + } + currentEpochEntry, err := utxoView.GetCurrentEpochEntry() + if err != nil { + return false, errors.Wrap(err, + "validateLeaderAndQC: Problem getting current epoch entry") + } + // If after constructing a UtxoView based on the parent block, we find that the current block's height + // isn't in the current epoch, then block's stated height is wrong. The block is guaranteed to be invalid. + if !currentEpochEntry.ContainsBlockHeight(block.Header.Height) { + return false, nil + } + snapshotAtEpochNumber, err := utxoView.ComputeSnapshotEpochNumberForEpoch(currentEpochEntry.EpochNumber) + if err != nil { + return false, errors.Wrapf(err, + "validateLeaderAndQC: Problem getting snapshot epoch number for epoch #%d", + currentEpochEntry.EpochNumber) + } + // TODO: We may want to only perform this check if verifySignatures is true. + isValidPartialSig, err := utxoView.hasValidProposerPartialSignaturePoS(block, snapshotAtEpochNumber) + if err != nil { + return false, errors.Wrap(err, + "validateLeaderAndQC: Problem validating proposer partial sig") + } + if !isValidPartialSig { + return false, nil + } + // 2. Validate QC + validatorsByStake, err := utxoView.GetAllSnapshotValidatorSetEntriesByStake() + if err != nil { + // This should never happen. If the parent is validated and extends from the tip, then we should + // be able to fetch the validator set at its block height for it. This failure can only happen due + // to transient badger issues. We return false for failed spam prevention check and the error. + return false, errors.Wrap(err, "validateLeaderAndQC: Problem getting validator set") + } + + // Validate the block's QC. If it's invalid, we return true for failed spam prevention check. + if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { + return false, nil + } + + isBlockProposerValid, err := utxoView.hasValidBlockProposerPoS(block) + if err != nil { + return false, errors.Wrapf(err, + "validateAndIndexBlockPoS: Problem validating block proposer") + } + // If the block proposer is invalid, we return true for failed spam prevention check. + if !isBlockProposerValid { + return false, nil + } + return true, nil +} + +// validateAndIndexBlockPoS performs all validation checks, except the QC and leader check to prevent spam, +// for a given block and adds it to the block index with the appropriate status. It assumes that the block +// passed in has passed the spam prevention check. +// 1. If the block is already VALIDATE_FAILED, we return the BlockNode as-is without perform further validations and +// throw an error. +// 2. If the block is already VALIDATED, we return the BlockNode as-is without performing further validations and no +// error. +// 3. We check if its parent is VALIDATE_FAILED, if so we add the block to the block index with status VALIDATE_FAILED +// and throw an error. +// 4. If its parent is NOT VALIDATED and NOT VALIDATE_FAILED, we recursively call this function on its parent. +// 5. If after calling this function on its parent, the parent is VALIDATE_FAILED, we add the block to the block index +// with status VALIDATE_FAILED and throw an error. +// 6. If after calling this function on its parent, the parent is VALIDATED, we perform all other validations on the +// block. +// +// The recursive function's invariant is described as follows: +// - Base case: If block is VALIDATED or VALIDATE_FAILED, return the BlockNode as-is. +// - Recursive case: If the block is not VALIDATED or VALIDATE_FAILED in the blockIndexByHash, we will perform all +// validations and add the block to the block index with the appropriate status (VALIDATED OR VALIDATE_FAILED) and +// return the new BlockNode. +// - Error case: Something goes wrong that doesn't result in the block being marked VALIDATE or VALIDATE_FAILED. In +// this case, we will add the block to the block index with status STORED and return the BlockNode. +func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, error) { + blockHash, err := block.Header.Hash() + if err != nil { + return nil, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem hashing block %v", block) + } + + // Base case - Check if the block is validated or validate failed. If so, we can return early. + blockNode, exists := bc.blockIndexByHash[*blockHash] + if exists && (blockNode.IsValidateFailed() || blockNode.IsValidated()) { + return blockNode, nil + } + + // Run the validation for the parent and update the block index with the parent's status. We first + // check if the parent has a cached status. If so, we use the cached status. Otherwise, we run + // the full validation algorithm on it, then index it and use the result. + parentBlockNode, err := bc.validatePreviouslyIndexedBlockPoS(block.Header.PrevBlockHash) + if err != nil { + return blockNode, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem validating previously indexed block: ") + } + + // Here's where it gets a little tricky. If the parent has a status of ValidateFailed, then we know we store + // this block as ValidateFailed. If the parent is not ValidateFailed, we ONLY store the block and move on. + // We don't want to store it as ValidateFailed because we don't know if it's actually invalid. + if parentBlockNode.IsValidateFailed() { + return bc.storeValidateFailedBlockWithWrappedError(block, errors.New("parent block is ValidateFailed")) + } + + // If the parent block still has a Stored status, it means that we weren't able to validate it + // despite trying. The current block will also be stored as a Stored block. + if !parentBlockNode.IsValidated() { + return bc.storeBlockInBlockIndex(block) + } + + // Check if the block is properly formed and passes all basic validations. + if err = bc.isValidBlockPoS(block); err != nil { + return bc.storeValidateFailedBlockWithWrappedError(block, err) + } + + // Validate the block's random seed signature + isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(block) + if err != nil { + var innerErr error + blockNode, innerErr = bc.storeBlockInBlockIndex(block) + if innerErr != nil { + return nil, errors.Wrapf(innerErr, "validateAndIndexBlockPoS: Problem adding block to block index: %v", err) + } + return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem validating random seed signature") + } + if !isValidRandomSeedSignature { + return bc.storeValidateFailedBlockWithWrappedError(block, errors.New("invalid random seed signature")) + } + + // We expect the utxoView for the parent block to be valid because we check that all ancestor blocks have + // been validated. + utxoView, err := bc.getUtxoViewAtBlockHash(*block.Header.PrevBlockHash) + if err != nil { + // This should never happen. If the parent is validated and extends from the tip, then we should + // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. + // We store the block and return an error. + var innerErr error + blockNode, innerErr = bc.storeBlockInBlockIndex(block) + if innerErr != nil { + return nil, errors.Wrapf(innerErr, "validateAndIndexBlockPoS: Problem adding block to block index: %v", err) + } + return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem getting UtxoView") + } + + // Connect this block to the parent block's UtxoView. + txHashes := collections.Transform(block.Txns, func(txn *MsgDeSoTxn) *BlockHash { + return txn.Hash() + }) + + // If we fail to connect the block, then it means the block is invalid. We should store it as ValidateFailed. + if _, err = utxoView.ConnectBlock(block, txHashes, true, nil, block.Header.Height); err != nil { + // If it doesn't connect, we want to mark it as ValidateFailed. + return bc.storeValidateFailedBlockWithWrappedError(block, err) + } + + // We can now add this block to the block index since we have performed all basic validations. + blockNode, err = bc.storeValidatedBlockInBlockIndex(block) + if err != nil { + return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem adding block to block index: ") + } + return blockNode, nil +} + +// validatePreviouslyIndexedBlockPoS is a helper function that takes in a block hash for a previously +// cached block, and runs the validateAndIndexBlockPoS algorithm on it. It returns the resulting BlockNode. +func (bc *Blockchain) validatePreviouslyIndexedBlockPoS(blockHash *BlockHash) (*BlockNode, error) { + // Check if the block is already in the block index. If so, we check its current status first. + blockNode, exists := bc.blockIndexByHash[*blockHash] + if !exists { + // We should never really hit this if the block has already been cached in the block index first. + // We check here anyway to be safe. + return nil, errors.New( + "validatePreviouslyIndexedBlockPoS: Block not found in block index. This should never happen.") + } + + // If the block has already been validated or had validation failed, then we can return early. + if blockNode.IsValidateFailed() || blockNode.IsValidated() { + return blockNode, nil + } + + // At this point we know that we have the block node in the index, but it hasn't gone through full + // validations yet. We fetch the block from the DB and run the full validation algorithm on it. + block, err := GetBlock(blockHash, bc.db, bc.snapshot) + if err != nil { + // If we can't fetch the block from the DB, we should return an error. This should never happen + // provided the block was cached in the block index and stored in the DB first. + return nil, errors.Wrapf(err, "validatePreviouslyIndexedBlockPoS: Problem fetching block from DB") + } + + // If the block isn't validated or validate failed, we need to run the anti-spam checks on it. + passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block) + if err != nil { + // If we hit an error, that means there was an intermittent issue when trying to + // validate the QC or the leader. + return nil, errors.Wrap(err, "validatePreviouslyIndexedBlockPoS: Problem validating leader and QC") + } + if !passedSpamPreventionCheck { + // If the QC or Leader check failed, we'll never accept this block, but we've already stored it, + // so we need to mark it as ValidateFailed. + blockNode, err = bc.storeValidateFailedBlockInBlockIndex(block) + if err != nil { + return nil, errors.Wrap(err, + "validatePreviouslyIndexedBlockPoS: Problem adding validate failed block to block index") + } + return blockNode, nil + } + + // We run the full validation algorithm on the block. + return bc.validateAndIndexBlockPoS(block) +} + +// isValidBlockPoS performs all basic validations on a block as it relates to +// the Blockchain struct. Any error resulting from this function implies that +// the block is invalid. +func (bc *Blockchain) isValidBlockPoS(block *MsgDeSoBlock) error { + // Surface Level validation of the block + if err := bc.isProperlyFormedBlockPoS(block); err != nil { + return err + } + if err := bc.isBlockTimestampValidRelativeToParentPoS(block); err != nil { + return err + } + // Validate Block Height + if err := bc.hasValidBlockHeightPoS(block); err != nil { + return err + } + // Validate View + if err := bc.hasValidBlockViewPoS(block); err != nil { + return err + } + return nil +} + +// isBlockTimestampValidRelativeToParentPoS validates that the block's timestamp is +// greater than its parent's timestamp. +func (bc *Blockchain) isBlockTimestampValidRelativeToParentPoS(block *MsgDeSoBlock) error { + // Validate that the timestamp is not less than its parent. + parentBlock, exists := bc.blockIndexByHash[*block.Header.PrevBlockHash] + if !exists { + // Note: this should never happen as we only call this function after + // we've validated that all ancestors exist in the block index. + return RuleErrorMissingParentBlock + } + if block.Header.TstampNanoSecs < parentBlock.Header.TstampNanoSecs { + return RuleErrorPoSBlockTstampNanoSecsTooOld + } + return nil +} + +// isProperlyFormedBlockPoS validates the block at a surface level. It checks +// that the timestamp is valid, that the version of the header is valid, +// and other general integrity checks (such as not malformed). +func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock) error { + // First make sure we have a non-nil header + if block.Header == nil { + return RuleErrorNilBlockHeader + } + + // Make sure we have a prevBlockHash + if block.Header.PrevBlockHash == nil { + return RuleErrorNilPrevBlockHash + } + + // Timestamp validation + // First make sure we have a non-nil header + if block.Header == nil { + return RuleErrorNilBlockHeader + } + + // Make sure we have a prevBlockHash + if block.Header.PrevBlockHash == nil { + return RuleErrorNilPrevBlockHash + } + + // Timestamp validation + // TODO: Add support for putting the drift into global params. + if block.Header.TstampNanoSecs > uint64(time.Now().UnixNano())+bc.params.DefaultBlockTimestampDriftNanoSecs { + return RuleErrorPoSBlockTstampNanoSecsInFuture + } + + // Header validation + if block.Header.Version != HeaderVersion2 { + return RuleErrorInvalidPoSBlockHeaderVersion + } + + // Malformed block checks + // All blocks must have at least one txn + if len(block.Txns) == 0 { + return RuleErrorBlockWithNoTxns + } + // Must have non-nil TxnConnectStatusByIndex + if block.TxnConnectStatusByIndex == nil { + return RuleErrorNilTxnConnectStatusByIndex + } + + // Must have TxnConnectStatusByIndexHash + if block.Header.TxnConnectStatusByIndexHash == nil { + return RuleErrorNilTxnConnectStatusByIndexHash + } + + // Make sure the TxnConnectStatusByIndex matches the TxnConnectStatusByIndexHash + if !(HashBitset(block.TxnConnectStatusByIndex).IsEqual(block.Header.TxnConnectStatusByIndexHash)) { + return RuleErrorTxnConnectStatusByIndexHashMismatch + } + + // Require header to have either vote or timeout QC + isTimeoutQCEmpty := block.Header.ValidatorsTimeoutAggregateQC.isEmpty() + isVoteQCEmpty := block.Header.ValidatorsVoteQC.isEmpty() + if isTimeoutQCEmpty && isVoteQCEmpty { + return RuleErrorNoTimeoutOrVoteQC + } + + if !isTimeoutQCEmpty && !isVoteQCEmpty { + return RuleErrorBothTimeoutAndVoteQC + } + + if block.Txns[0].TxnMeta.GetTxnType() != TxnTypeBlockReward { + return RuleErrorBlockDoesNotStartWithRewardTxn + } + + if block.Header.ProposerVotingPublicKey.IsEmpty() { + return RuleErrorInvalidProposerVotingPublicKey + } + + if block.Header.ProposerPublicKey == nil || block.Header.ProposerPublicKey.IsZeroPublicKey() { + return RuleErrorInvalidProposerPublicKey + } + + if block.Header.ProposerRandomSeedSignature.IsEmpty() { + return RuleErrorInvalidProposerRandomSeedSignature + } + + merkleRoot := block.Header.TransactionMerkleRoot + + // We always need to check the merkle root. + if merkleRoot == nil { + return RuleErrorNilMerkleRoot + } + computedMerkleRoot, _, err := ComputeMerkleRoot(block.Txns) + if err != nil { + return errors.Wrapf(err, "isProperlyFormedBlockPoS: Problem computing merkle root") + } + if !merkleRoot.IsEqual(computedMerkleRoot) { + return RuleErrorInvalidMerkleRoot + } + + // If a block has a vote QC, then the Header's proposed in view must be exactly one + // greater than the QC's proposed in view. + if !isVoteQCEmpty && block.Header.ProposedInView != block.Header.ValidatorsVoteQC.ProposedInView+1 { + return RuleErrorPoSVoteBlockViewNotOneGreaterThanValidatorsVoteQCView + } + + // If a block has a timeout QC, then the Header's proposed in view be must exactly one + // greater than the QC's timed out view. + if !isTimeoutQCEmpty && block.Header.ProposedInView != block.Header.ValidatorsTimeoutAggregateQC.TimedOutView+1 { + return RuleErrorPoSTimeoutBlockViewNotOneGreaterThanValidatorsTimeoutQCView + } + return nil +} + +// hasValidBlockHeightPoS validates the block height for a given block. First, +// it checks that we've passed the PoS cutover fork height. Then it checks +// that this block height is exactly one greater than its parent's block height. +func (bc *Blockchain) hasValidBlockHeightPoS(block *MsgDeSoBlock) error { + blockHeight := block.Header.Height + if blockHeight < uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + return RuleErrorPoSBlockBeforeCutoverHeight + } + // Validate that the block height is exactly one greater than its parent. + parentBlock, exists := bc.blockIndexByHash[*block.Header.PrevBlockHash] + if !exists { + // Note: this should never happen as we only call this function after + // we've validated that all ancestors exist in the block index. + return RuleErrorMissingParentBlock + } + if block.Header.Height != parentBlock.Header.Height+1 { + return RuleErrorInvalidPoSBlockHeight + } + return nil +} + +// hasValidBlockViewPoS validates the view for a given block. First, it checks that +// the view is greater than the latest committed block view. If not, +// we return an error indicating that we'll never accept this block. Next, +// it checks that the view is less than or equal to its parent. +// If not, we return an error indicating that we'll want to add this block as an +// orphan. Then it will check if that the view is exactly one greater than the +// latest uncommitted block if we have a regular vote QC. If this block has a +// timeout QC, it will check that the view is at least greater than the latest +// uncommitted block's view + 1. +func (bc *Blockchain) hasValidBlockViewPoS(block *MsgDeSoBlock) error { + // Validate that the view is greater than the latest uncommitted block. + parentBlock, exists := bc.blockIndexByHash[*block.Header.PrevBlockHash] + if !exists { + // Note: this should never happen as we only call this function after + // we've validated that all ancestors exist in the block index. + return RuleErrorMissingParentBlock + } + // If the parent block was a PoW block, we can't validate this block's view + // in comparison. + if !blockNodeProofOfStakeCutoverMigrationTriggered(parentBlock.Height) { + return nil + } + // If our current block has a vote QC, then we need to validate that the + // view is exactly one greater than the latest uncommitted block. + if block.Header.ValidatorsTimeoutAggregateQC.isEmpty() { + if block.Header.ProposedInView != parentBlock.Header.ProposedInView+1 { + return RuleErrorPoSVoteBlockViewNotOneGreaterThanParent + } + } else { + // If our current block has a timeout QC, then we need to validate that the + // view is strictly greater than the latest uncommitted block's view. + if block.Header.ProposedInView <= parentBlock.Header.ProposedInView { + return RuleErrorPoSTimeoutBlockViewNotGreaterThanParent + } + } + return nil +} + +func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(block *MsgDeSoBlock) (bool, error) { + // Validate that the leader proposed a valid random seed signature. + parentBlock, exists := bc.blockIndexByHash[*block.Header.PrevBlockHash] + if !exists { + // Note: this should never happen as we only call this function after + // we've validated that all ancestors exist in the block index. + return false, RuleErrorMissingParentBlock + } + + prevRandomSeedHash, err := hashRandomSeedSignature(parentBlock.Header.ProposerRandomSeedSignature) + if err != nil { + return false, errors.Wrapf(err, + "hasValidProposerRandomSeedSignaturePoS: Problem converting prev random seed hash to RandomSeedHash") + } + isVerified, err := verifySignatureOnRandomSeedHash( + block.Header.ProposerVotingPublicKey, block.Header.ProposerRandomSeedSignature, prevRandomSeedHash) + if err != nil { + return false, errors.Wrapf(err, + "hasValidProposerRandomSeedSignaturePoS: Problem verifying proposer random seed signature") + } + return isVerified, nil +} + +func (bav *UtxoView) hasValidProposerPartialSignaturePoS(block *MsgDeSoBlock, snapshotAtEpochNumber uint64) ( + bool, error) { + votingPublicKey := block.Header.ProposerVotingPublicKey + proposerPartialSig := block.Header.ProposerVotePartialSignature + // If the proposer partial sig is nil, we can't validate it. That's an error. + if proposerPartialSig.IsEmpty() { + return false, nil + } + // Get the snapshot validator entry for the proposer. + snapshotBlockProposerValidatorEntry, err := bav.GetSnapshotValidatorEntryByBLSPublicKey( + votingPublicKey, snapshotAtEpochNumber) + if err != nil { + return false, errors.Wrapf(err, "hasValidProposerPartialSignaturePoS: Problem getting snapshot validator entry") + } + + // If the snapshot validator entry is nil or deleted, we didn't snapshot + // the validator at this epoch, so we will never accept this block. + if snapshotBlockProposerValidatorEntry == nil || snapshotBlockProposerValidatorEntry.isDeleted { + return false, nil + } + // If the voting public key from the block's header doesn't match the + // snapshotted voting public key, we will never accept this block. + if !snapshotBlockProposerValidatorEntry.VotingPublicKey.Eq(votingPublicKey) { + return false, nil + } + // Get the block's hash + blockHash, err := block.Header.Hash() + if err != nil { + return false, errors.Wrapf(err, "hasValidProposerPartialSignaturePoS: Problem hashing block") + } + // Now that we have the snapshot validator entry and validated that the + // voting public key from this block's header matches the snapshotted + // voting public key, we can validate the partial sig. + votePayload := consensus.GetVoteSignaturePayload(block.Header.ProposedInView, blockHash) + isVerified, err := votingPublicKey.Verify(proposerPartialSig, votePayload[:]) + if err != nil { + return false, errors.Wrapf(err, "hasValidProposerPartialSignaturePoS: Problem verifying partial sig") + } + return isVerified, nil +} + +// hasValidBlockProposerPoS validates that the proposer is the expected proposer for the +// block height + view number pair. It returns a bool indicating whether +// we confirmed that the leader is valid. If we receive an error, we are unsure +// if the leader is invalid or not, so we return false. +func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBlockProposer bool, _err error) { + currentEpochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting current epoch entry") + } + leaders, err := bav.GetCurrentSnapshotLeaderSchedule() + if err != nil { + return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting leader schedule") + } + if len(leaders) == 0 { + return false, errors.Wrapf(err, "hasValidBlockProposerPoS: No leaders found in leader schedule") + } + if block.Header.Height < currentEpochEntry.InitialBlockHeight { + return false, nil + } + if block.Header.ProposedInView < currentEpochEntry.InitialView { + return false, nil + } + heightDiff := block.Header.Height - currentEpochEntry.InitialBlockHeight + viewDiff := block.Header.ProposedInView - currentEpochEntry.InitialView + if viewDiff < heightDiff { + return false, nil + } + // We compute the current index in the leader schedule as follows: + // [(block.View - currentEpoch.InitialView) - (block.Height - currentEpoch.InitialHeight)] % len(leaders) + // The number of views that have elapsed since the start of the epoch is block.View - currentEpoch.InitialView. + // The number of blocks that have been added to the chain since the start of the epoch is + // block.Height - currentEpoch.InitialHeight. The difference between these two numbers is the number of timeouts + // that have occurred in this epoch. For each timeout, we need to go to the next leader in the schedule. + // If we have more timeouts than leaders in the schedule, we start from the top of the schedule again, + // which is why we take the modulo of the length of the leader schedule. + // A quick example: If we have 3 leaders in the schedule and the epoch started at height 10 and view 11, + // and the current block is at height 15 and view 17, then the number of timeouts that have occurred is + // (17 - 11) - (15 - 10) = 1. This means this block should be proposed by the 2nd leader in the schedule, + // which is at index 1. + leaderIdxUint64 := (viewDiff - heightDiff) % uint64(len(leaders)) + if leaderIdxUint64 > math.MaxUint16 { + return false, nil + } + leaderIdx := uint16(leaderIdxUint64) + leaderEntry, err := bav.GetSnapshotLeaderScheduleValidator(leaderIdx) + if err != nil { + return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting leader schedule validator") + } + snapshotAtEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(currentEpochEntry.EpochNumber) + if err != nil { + return false, errors.Wrapf(err, + "hasValidBlockProposerPoS: Problem getting snapshot epoch number for epoch #%d", + currentEpochEntry.EpochNumber) + } + leaderEntryFromVotingPublicKey, err := bav.GetSnapshotValidatorEntryByBLSPublicKey( + block.Header.ProposerVotingPublicKey, + snapshotAtEpochNumber) + if err != nil { + return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting leader validator entry") + } + // If no leader is found from the voting public key, we'll never accept this block. + if leaderEntryFromVotingPublicKey == nil { + return false, nil + } + if !leaderEntry.VotingPublicKey.Eq(block.Header.ProposerVotingPublicKey) || + !leaderEntry.ValidatorPKID.Eq(leaderEntryFromVotingPublicKey.ValidatorPKID) { + return false, nil + } + return true, nil +} + +// isValidPoSQuorumCertificate validates that the QC of this block is valid, meaning a super majority +// of the validator set has voted (or timed out). Assumes ValidatorEntry list is sorted. +func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validatorSet []*ValidatorEntry) error { + validators := toConsensusValidators(validatorSet) + if !block.Header.ValidatorsTimeoutAggregateQC.isEmpty() { + if !consensus.IsValidSuperMajorityAggregateQuorumCertificate( + block.Header.ValidatorsTimeoutAggregateQC, validators) { + return RuleErrorInvalidTimeoutQC + } + return nil + } + if !consensus.IsValidSuperMajorityQuorumCertificate(block.Header.ValidatorsVoteQC, validators) { + return RuleErrorInvalidVoteQC + } + return nil +} + +// getLineageFromCommittedTip returns the ancestors of the block provided up to, but not +// including the committed tip. The first block in the returned slice is the first uncommitted +// ancestor. +func (bc *Blockchain) getLineageFromCommittedTip(block *MsgDeSoBlock) ([]*BlockNode, error) { + highestCommittedBlock, idx := bc.getCommittedTip() + if idx == -1 || highestCommittedBlock == nil { + return nil, errors.New("getLineageFromCommittedTip: No committed blocks found") + } + currentHash := block.Header.PrevBlockHash.NewBlockHash() + ancestors := []*BlockNode{} + prevHeight := block.Header.Height + prevView := block.Header.ProposedInView + for { + currentBlock, exists := bc.blockIndexByHash[*currentHash] + if !exists { + return nil, RuleErrorMissingAncestorBlock + } + if currentBlock.Hash.IsEqual(highestCommittedBlock.Hash) { + break + } + if currentBlock.IsCommitted() { + return nil, RuleErrorDoesNotExtendCommittedTip + } + if currentBlock.IsValidateFailed() { + return nil, RuleErrorAncestorBlockValidationFailed + } + if currentBlock.Header.ProposedInView >= prevView { + return nil, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock + } + if uint64(currentBlock.Header.Height)+1 != prevHeight { + return nil, RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight + } + ancestors = append(ancestors, currentBlock) + currentHash = currentBlock.Header.PrevBlockHash + prevHeight = currentBlock.Header.Height + prevView = currentBlock.Header.ProposedInView + } + return collections.Reverse(ancestors), nil +} + +// getOrCreateBlockNodeFromBlockIndex returns the block node from the block index if it exists. +// Otherwise, it creates a new block node and adds it to the blockIndexByHash and blockIndexByHeight. +func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { + hash, err := block.Header.Hash() + if err != nil { + return nil, errors.Wrapf(err, "getOrCreateBlockNodeFromBlockIndex: Problem hashing block %v", block) + } + blockNode := bc.blockIndexByHash[*hash] + prevBlockNode := bc.blockIndexByHash[*block.Header.PrevBlockHash] + if blockNode != nil { + // If the block node already exists, we should set its parent if it doesn't have one already. + if blockNode.Parent == nil { + blockNode.Parent = prevBlockNode + } + return blockNode, nil + } + newBlockNode := NewBlockNode(prevBlockNode, hash, uint32(block.Header.Height), nil, nil, block.Header, StatusNone) + bc.addNewBlockNodeToBlockIndex(newBlockNode) + return newBlockNode, nil +} + +// storeBlockInBlockIndex upserts the blocks into the in-memory block index and updates its status to +// StatusBlockStored. It also writes the block to the block index in badger +// by calling upsertBlockAndBlockNodeToDB. +func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) + if err != nil { + return nil, errors.Wrapf(err, "storeBlockInBlockIndex: Problem getting or creating block node") + } + // If the block is stored, then this is a no-op. + if blockNode.IsStored() { + return blockNode, nil + } + blockNode.Status |= StatusBlockStored + // If the DB update fails, then we should return an error. + if err = bc.upsertBlockAndBlockNodeToDB(block, blockNode, true); err != nil { + return nil, errors.Wrapf(err, "storeBlockInBlockIndex: Problem upserting block and block node to DB") + } + return blockNode, nil +} + +// storeValidatedBlockInBlockIndex upserts the blocks into the in-memory block index and updates its status to +// StatusBlockValidated. If it does not have the status StatusBlockStored already, we add that as we will +// store the block in the DB after updating its status. It also writes the block to the block index in badger +// by calling upsertBlockAndBlockNodeToDB. +func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) + if err != nil { + return nil, errors.Wrapf(err, "storeValidatedBlockInBlockIndex: Problem getting or creating block node") + } + // If the block is validated, then this is a no-op. + if blockNode.IsValidated() { + return blockNode, nil + } + blockNode.Status |= StatusBlockValidated + // If the BlockNode is not already stored, we should set its status to stored. + if !blockNode.IsStored() { + blockNode.Status |= StatusBlockStored + } + // If the DB update fails, then we should return an error. + if err = bc.upsertBlockAndBlockNodeToDB(block, blockNode, true); err != nil { + return nil, errors.Wrapf(err, "storeValidatedBlockInBlockIndex: Problem upserting block and block node to DB") + } + return blockNode, nil +} + +// storeValidateFailedBlockInBlockIndex upserts the blocks into the in-memory block index and updates its status to +// StatusBlockValidateFailed. If it does not have the status StatusBlockStored already, we add that as we will +// store the block in the DB after updating its status. It also writes the block to the block index in badger +// by calling upsertBlockAndBlockNodeToDB. +func (bc *Blockchain) storeValidateFailedBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) + if err != nil { + return nil, errors.Wrapf(err, "storeValidateFailedBlockInBlockIndex: Problem getting or creating block node") + } + // If the block has had validation failed, then this is a no-op. + if blockNode.IsValidateFailed() { + return blockNode, nil + } + // We should throw an error if the BlockNode is already Validated + if blockNode.IsValidated() { + return nil, errors.New( + "storeValidateFailedBlockInBlockIndex: can't set BlockNode to validate failed after it's already validated") + } + blockNode.Status |= StatusBlockValidateFailed + // If the BlockNode is not already stored, we should set it to stored. + if !blockNode.IsStored() { + blockNode.Status |= StatusBlockStored + } + // If the DB update fails, then we should return an error. + if err = bc.upsertBlockAndBlockNodeToDB(block, blockNode, false); err != nil { + return nil, errors.Wrapf(err, + "storeValidateFailedBlockInBlockIndex: Problem upserting block and block node to DB") + } + return blockNode, nil +} + +// upsertBlockAndBlockNodeToDB writes the BlockNode to the blockIndexByHash in badger and writes the full block +// to the db under the -> index. +func (bc *Blockchain) upsertBlockAndBlockNodeToDB(block *MsgDeSoBlock, blockNode *BlockNode, storeFullBlock bool, +) error { + // Store the block in badger + err := bc.db.Update(func(txn *badger.Txn) error { + if bc.snapshot != nil { + bc.snapshot.PrepareAncestralRecordsFlush() + defer bc.snapshot.StartAncestralRecordsFlush(true) + glog.V(2).Infof("upsertBlockAndBlockNodeToDB: Preparing snapshot flush") + } + if storeFullBlock { + if innerErr := PutBlockHashToBlockWithTxn(txn, bc.snapshot, block, bc.eventManager); innerErr != nil { + return errors.Wrapf(innerErr, "upsertBlockAndBlockNodeToDB: Problem calling PutBlockHashToBlockWithTxn") + } + } + + // TODO: if storeFullBlock = false, then we should probably remove the block from the DB? This can + // happen if we had a block stored in the DB but then determined that it would have failed validation. + // We would need to evict the block from the DB in that case. + + // Store the new block's node in our node index in the db under the + // -> + // index. + if innerErr := PutHeightHashToNodeInfoWithTxn( + txn, bc.snapshot, blockNode, false /*bitcoinNodes*/, bc.eventManager); innerErr != nil { + return errors.Wrapf(innerErr, + "upsertBlockAndBlockNodeToDB: Problem calling PutHeightHashToNodeInfo before validation") + } + + // Notice we don't call PutBestHash or PutUtxoOperationsForBlockWithTxn because we're not + // affecting those right now. + + return nil + }) + if err != nil { + return errors.Wrapf(err, "upsertBlockAndBlockNodeToDB: Problem putting block in db: ") + } + return nil +} + +// tryApplyNewTip attempts to apply the new tip to the best chain. It will do the following: +// 1. Check if we should perform a reorg. If so, it will handle the reorg. If reorging causes an error, +// return false and error. +// 2. Check if the incoming block extends the chain tip after reorg. If not, return false and nil +// 3. If the incoming block extends the chain tip, we can apply it by calling addBlockToBestChain. Return true and nil. +func (bc *Blockchain) tryApplyNewTip(blockNode *BlockNode, currentView uint64, lineageFromCommittedTip []*BlockNode) ( + _appliedNewTip bool, _err error) { + + // Check if the incoming block extends the chain tip. If so, we don't need to reorg + // and can just add this block to the best chain. + chainTip := bc.BlockTip() + if chainTip.Hash.IsEqual(blockNode.Header.PrevBlockHash) { + bc.addBlockToBestChain(blockNode) + return true, nil + } + // Check if we should perform a reorg here. + // If we shouldn't reorg AND the incoming block doesn't extend the chain tip, we know that + // the incoming block will not get applied as the new tip. + if !bc.shouldReorg(blockNode, currentView) { + return false, nil + } + + // We need to perform a reorg here. For simplicity, we remove all uncommitted blocks and then re-add them. + committedTip, idx := bc.getCommittedTip() + if committedTip == nil || idx == -1 { + // This is an edge case we'll never hit in practice since all the PoW blocks + // are committed. + return false, errors.New("tryApplyNewTip: No committed blocks found") + } + // Remove all uncommitted blocks. These are all blocks that come after the committedTip + // in the best chain. + // Delete all blocks from bc.bestChainMap that come after the highest committed block. + for ii := idx + 1; ii < len(bc.bestChain); ii++ { + delete(bc.bestChainMap, *bc.bestChain[ii].Hash) + } + // Shorten best chain back to committed tip. + bc.bestChain = bc.bestChain[:idx+1] + // Add the ancestors of the new tip to the best chain. + for _, ancestor := range lineageFromCommittedTip { + bc.addBlockToBestChain(ancestor) + } + // Add the new tip to the best chain. + bc.addBlockToBestChain(blockNode) + return true, nil +} + +// shouldReorg determines if we should reorg to the block provided. We should reorg if +// this block is proposed in a view greater than or equal to the currentView. Other +// functions have validated that this block is not extending from a committed block +// that is not the latest committed block, so there is no need to validate that here. +func (bc *Blockchain) shouldReorg(blockNode *BlockNode, currentView uint64) bool { + chainTip := bc.BlockTip() + // If this block extends from the chain tip, there's no need to reorg. + if chainTip.Hash.IsEqual(blockNode.Header.PrevBlockHash) { + return false + } + // If the block is proposed in a view less than the current view, there's no need to reorg. + return blockNode.Header.ProposedInView >= currentView +} + +// addBlockToBestChain adds the block to the best chain. +func (bc *Blockchain) addBlockToBestChain(blockNode *BlockNode) { + bc.bestChain = append(bc.bestChain, blockNode) + bc.bestChainMap[*blockNode.Hash] = blockNode +} + +// runCommitRuleOnBestChain commits the grandparent of the block if possible. +// Specifically, this updates the CommittedBlockStatus of its grandparent +// and flushes the view after connecting the grandparent block to the DB. +func (bc *Blockchain) runCommitRuleOnBestChain() error { + currentBlock := bc.BlockTip() + // If we can commit the grandparent, commit it. + // Otherwise, we can't commit it and return nil. + blockToCommit, canCommit := bc.canCommitGrandparent(currentBlock) + if !canCommit { + return nil + } + // Find all uncommitted ancestors of block to commit + _, idx := bc.getCommittedTip() + if idx == -1 { + // This is an edge case we'll never hit in practice since all the PoW blocks + // are committed. + return errors.New("runCommitRuleOnBestChain: No committed blocks found") + } + uncommittedAncestors := []*BlockNode{} + for ii := idx + 1; ii < len(bc.bestChain); ii++ { + uncommittedAncestors = append(uncommittedAncestors, bc.bestChain[ii]) + if bc.bestChain[ii].Hash.IsEqual(blockToCommit) { + break + } + } + for ii := 0; ii < len(uncommittedAncestors); ii++ { + if err := bc.commitBlockPoS(uncommittedAncestors[ii].Hash); err != nil { + return errors.Wrapf(err, + "runCommitRuleOnBestChain: Problem committing block %v", uncommittedAncestors[ii].Hash.String()) + } + } + return nil +} + +// canCommitGrandparent determines if the grandparent of the current block can be committed. +// The grandparent can be committed if there exists a direct parent-child relationship +// between the grandparent and parent of the new block, meaning the grandparent and parent +// are proposed in consecutive views, and the "parent" is an ancestor of the incoming block +// (not necessarily consecutive views). Additionally, the grandparent must not already be committed. +func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) (_grandparentBlockHash *BlockHash, _canCommit bool, +) { + // TODO: Is it sufficient that the current block's header points to the parent + // or does it need to have something to do with the QC? + parent := bc.bestChainMap[*currentBlock.Header.PrevBlockHash] + grandParent := bc.bestChainMap[*parent.Header.PrevBlockHash] + if grandParent.IsCommitted() { + return nil, false + } + if grandParent.Header.ProposedInView+1 == parent.Header.ProposedInView { + // Then we can run the commit rule up to the grandparent! + return grandParent.Hash, true + } + return nil, false +} + +// commitBlockPoS commits the block with the given hash. Specifically, this updates the +// BlockStatus to include StatusBlockCommitted and flushes the view after connecting the block +// to the DB and updates relevant badger indexes with info about the block. +func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash) error { + // block must be in the best chain. we grab the block node from there. + blockNode, exists := bc.bestChainMap[*blockHash] + if !exists { + return errors.Errorf("commitBlockPoS: Block %v not found in best chain map", blockHash.String()) + } + // TODO: Do we want other validation in here? + if blockNode.IsCommitted() { + // Can't commit a block that's already committed. + return errors.Errorf("commitBlockPoS: Block %v is already committed", blockHash.String()) + } + block, err := GetBlock(blockHash, bc.db, bc.snapshot) + if err != nil { + return errors.Wrapf(err, "commitBlockPoS: Problem getting block from db %v", blockHash.String()) + } + // Connect a view up to the parent of the block we are committing. + utxoView, err := bc.getUtxoViewAtBlockHash(*block.Header.PrevBlockHash) + if err != nil { + return errors.Wrapf(err, "commitBlockPoS: Problem initializing UtxoView: ") + } + txHashes := collections.Transform(block.Txns, func(txn *MsgDeSoTxn) *BlockHash { + return txn.Hash() + }) + // Connect the block to the view! + utxoOpsForBlock, err := utxoView.ConnectBlock( + block, txHashes, true /*verifySignatures*/, bc.eventManager, block.Header.Height) + if err != nil { + // TODO: rule error handling? mark blocks invalid? + return errors.Wrapf(err, "commitBlockPoS: Problem connecting block to view: ") + } + // Put the block in the db + // Note: we're skipping postgres. + blockNode.Status |= StatusBlockCommitted + err = bc.db.Update(func(txn *badger.Txn) error { + if bc.snapshot != nil { + bc.snapshot.PrepareAncestralRecordsFlush() + defer bc.snapshot.StartAncestralRecordsFlush(true) + glog.V(2).Infof("commitBlockPoS: Preparing snapshot flush") + } + // Store the new block in the db under the + // -> + // index. + if innerErr := PutBlockHashToBlockWithTxn(txn, bc.snapshot, block, bc.eventManager); innerErr != nil { + return errors.Wrapf(innerErr, "commitBlockPoS: Problem calling PutBlockHashToBlockWithTxn") + } + + // Store the new block's node in our node index in the db under the + // -> + // index. + if innerErr := PutHeightHashToNodeInfoWithTxn( + txn, bc.snapshot, blockNode, false /*bitcoinNodes*/, bc.eventManager); innerErr != nil { + return errors.Wrapf(innerErr, "commitBlockPoS: Problem calling PutHeightHashToNodeInfo before validation") + } + + // Set the best node hash to this one. Note the header chain should already + // be fully aware of this block so we shouldn't update it here. + if innerErr := PutBestHashWithTxn( + txn, bc.snapshot, blockNode.Hash, ChainTypeDeSoBlock, bc.eventManager); innerErr != nil { + return errors.Wrapf(innerErr, "commitBlockPoS: Problem calling PutBestHash after validation") + } + // Write the utxo operations for this block to the db, so we can have the + // ability to roll it back in the future. + if innerErr := PutUtxoOperationsForBlockWithTxn( + txn, bc.snapshot, uint64(blockNode.Height), blockNode.Hash, utxoOpsForBlock, bc.eventManager, + ); innerErr != nil { + return errors.Wrapf(innerErr, "commitBlockPoS: Problem writing utxo operations to db on simple add to tip") + } + if innerErr := utxoView.FlushToDBWithoutAncestralRecordsFlushWithTxn( + txn, uint64(blockNode.Height)); innerErr != nil { + return errors.Wrapf(innerErr, "commitBlockPoS: Problem flushing UtxoView to db") + } + return nil + }) + if err != nil { + return errors.Wrapf(err, "commitBlockPoS: Problem putting block in db: ") + } + + if bc.eventManager != nil { + bc.eventManager.blockConnected(&BlockEvent{ + Block: block, + UtxoView: utxoView, + UtxoOps: utxoOpsForBlock, + }) + } + // TODO: What else do we need to do in here? + return nil +} + +// GetUncommittedTipView builds a UtxoView to the uncommitted tip. +func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { + // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks + return bc.getUtxoViewAtBlockHash(*bc.BlockTip().Hash) +} + +// getUtxoViewAtBlockHash builds a UtxoView to the block provided. It does this by +// identifying all uncommitted ancestors of this block and then connecting those blocks. +func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, error) { + uncommittedAncestors := []*BlockNode{} + currentBlock := bc.blockIndexByHash[blockHash] + if currentBlock == nil { + return nil, errors.Errorf("getUtxoViewAtBlockHash: Block %v not found in block index", blockHash) + } + // If the provided block is committed, we need to make sure it's the committed tip. + // Otherwise, we return an error. + if currentBlock.IsCommitted() { + highestCommittedBlock, _ := bc.getCommittedTip() + if highestCommittedBlock == nil { + return nil, errors.Errorf("getUtxoViewAtBlockHash: No committed blocks found") + } + if !highestCommittedBlock.Hash.IsEqual(&blockHash) { + return nil, errors.Errorf( + "getUtxoViewAtBlockHash: Block %v is committed but not the committed tip", blockHash) + } + } + for !currentBlock.IsCommitted() { + uncommittedAncestors = append(uncommittedAncestors, currentBlock) + currentParentHash := currentBlock.Header.PrevBlockHash + if currentParentHash == nil { + return nil, errors.Errorf("getUtxoViewAtBlockHash: Block %v has nil PrevBlockHash", currentBlock.Hash) + } + currentBlock = bc.blockIndexByHash[*currentParentHash] + if currentBlock == nil { + return nil, errors.Errorf("getUtxoViewAtBlockHash: Block %v not found in block index", blockHash) + } + } + // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks + utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + if err != nil { + return nil, errors.Wrapf(err, "getUtxoViewAtBlockHash: Problem initializing UtxoView") + } + for ii := len(uncommittedAncestors) - 1; ii >= 0; ii-- { + // We need to get these blocks from badger + fullBlock, err := GetBlock(uncommittedAncestors[ii].Hash, bc.db, bc.snapshot) + if err != nil { + return nil, errors.Wrapf(err, + "GetUncommittedTipView: Error fetching Block %v not found in block index", + uncommittedAncestors[ii].Hash.String()) + } + txnHashes := collections.Transform(fullBlock.Txns, func(txn *MsgDeSoTxn) *BlockHash { + return txn.Hash() + }) + _, err = utxoView.ConnectBlock(fullBlock, txnHashes, false, nil, fullBlock.Header.Height) + if err != nil { + hash, _ := fullBlock.Hash() + return nil, errors.Wrapf(err, "GetUncommittedTipView: Problem connecting block hash %v", hash.String()) + } + } + // Update the TipHash saved on the UtxoView to the blockHash provided. + utxoView.TipHash = &blockHash + return utxoView, nil +} + +// getCommittedTip returns the highest committed block and its index in the best chain. +func (bc *Blockchain) getCommittedTip() (*BlockNode, int) { + for ii := len(bc.bestChain) - 1; ii >= 0; ii-- { + if bc.bestChain[ii].IsCommitted() { + return bc.bestChain[ii], ii + } + } + return nil, -1 +} + +// GetSafeBlocks returns all headers of blocks from which the chain can safely extend. +// A safe block is defined as a block that has been validated and all of its +// ancestors have been validated and extending from this block would not +// change any committed blocks. This means we return the committed tip and +// all blocks from the committed tip that have been validated. +func (bc *Blockchain) GetSafeBlocks() ([]*MsgDeSoHeader, error) { + // First get committed tip. + committedTip, idx := bc.getCommittedTip() + if idx == -1 || committedTip == nil { + return nil, errors.New("GetSafeBlocks: No committed blocks found") + } + // Now get all blocks from the committed tip to the best chain tip. + safeBlocks := []*MsgDeSoHeader{committedTip.Header} + maxHeightWithSafeBlocks := bc.getMaxSequentialBlockHeightAfter(uint64(committedTip.Height)) + for ii := uint64(committedTip.Height + 1); ii < maxHeightWithSafeBlocks+1; ii++ { + // If we don't have any blocks at this height, we know that any blocks at a later height are not safe blocks. + if !bc.hasBlockNodesIndexedAtHeight(ii) { + break + } + hasSeenValidatedBlockAtThisHeight := false + blockNodes := bc.getAllBlockNodesIndexedAtHeight(ii) + for _, blockNode := range blockNodes { + // TODO: Are there other conditions we should consider? + if blockNode.IsValidated() { + hasSeenValidatedBlockAtThisHeight = true + safeBlocks = append(safeBlocks, blockNode.Header) + } + } + // If we didn't see any validated blocks at this height, we know + // that no blocks at a later height can be validated and thus + // cannot be safe blocks. + if !hasSeenValidatedBlockAtThisHeight { + break + } + } + return safeBlocks, nil +} + +// getMaxSequentialBlockHeightAfter returns the max sequential block height after the starting height. +// If the blockIndexByHeight does not have any blocks at a certain height, we know that any blocks +// at a later height are not valid. +func (bc *Blockchain) getMaxSequentialBlockHeightAfter(startingHeight uint64) uint64 { + hasBlocksAtCurrentHeight := true + maxSequentialHeightWithBlocks := startingHeight + for currentHeight := startingHeight; hasBlocksAtCurrentHeight; currentHeight++ { + maxSequentialHeightWithBlocks = currentHeight + hasBlocksAtCurrentHeight = bc.hasBlockNodesIndexedAtHeight(currentHeight) + } + return maxSequentialHeightWithBlocks +} + +const ( + RuleErrorNilBlockHeader RuleError = "RuleErrorNilBlockHeader" + RuleErrorNilPrevBlockHash RuleError = "RuleErrorNilPrevBlockHash" + RuleErrorPoSBlockTstampNanoSecsTooOld RuleError = "RuleErrorPoSBlockTstampNanoSecsTooOld" + RuleErrorPoSBlockTstampNanoSecsInFuture RuleError = "RuleErrorPoSBlockTstampNanoSecsInFuture" + RuleErrorInvalidPoSBlockHeaderVersion RuleError = "RuleErrorInvalidPoSBlockHeaderVersion" + RuleErrorNilTxnConnectStatusByIndex RuleError = "RuleErrorNilTxnConnectStatusByIndex" + RuleErrorNilTxnConnectStatusByIndexHash RuleError = "RuleErrorNilTxnConnectStatusByIndexHash" + RuleErrorTxnConnectStatusByIndexHashMismatch RuleError = "RuleErrorTxnConnectStatusByIndexHashMismatch" + RuleErrorNoTimeoutOrVoteQC RuleError = "RuleErrorNoTimeoutOrVoteQC" + RuleErrorBothTimeoutAndVoteQC RuleError = "RuleErrorBothTimeoutAndVoteQC" + RuleErrorBlockWithNoTxns RuleError = "RuleErrorBlockWithNoTxns" + RuleErrorBlockDoesNotStartWithRewardTxn RuleError = "RuleErrorBlockDoesNotStartWithRewardTxn" + RuleErrorMissingParentBlock RuleError = "RuleErrorMissingParentBlock" + RuleErrorMissingAncestorBlock RuleError = "RuleErrorMissingAncestorBlock" + RuleErrorDoesNotExtendCommittedTip RuleError = "RuleErrorDoesNotExtendCommittedTip" + RuleErrorAncestorBlockValidationFailed RuleError = "RuleErrorAncestorBlockValidationFailed" + RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock RuleError = "RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock" + RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight RuleError = "RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight" + + RuleErrorNilMerkleRoot RuleError = "RuleErrorNilMerkleRoot" + RuleErrorInvalidMerkleRoot RuleError = "RuleErrorInvalidMerkleRoot" + RuleErrorInvalidProposerVotingPublicKey RuleError = "RuleErrorInvalidProposerVotingPublicKey" + RuleErrorInvalidProposerPublicKey RuleError = "RuleErrorInvalidProposerPublicKey" + RuleErrorInvalidProposerRandomSeedSignature RuleError = "RuleErrorInvalidProposerRandomSeedSignature" + + RuleErrorInvalidPoSBlockHeight RuleError = "RuleErrorInvalidPoSBlockHeight" + RuleErrorPoSBlockBeforeCutoverHeight RuleError = "RuleErrorPoSBlockBeforeCutoverHeight" + + RuleErrorPoSVoteBlockViewNotOneGreaterThanParent RuleError = "RuleErrorPoSVoteBlockViewNotOneGreaterThanParent" + RuleErrorPoSVoteBlockViewNotOneGreaterThanValidatorsVoteQCView RuleError = "RuleErrorPoSVoteBlockViewNotOneGreaterThanValidatorsVoteQCView" + RuleErrorPoSTimeoutBlockViewNotGreaterThanParent RuleError = "RuleErrorPoSTimeoutBlockViewNotGreaterThanParent" + RuleErrorPoSTimeoutBlockViewNotOneGreaterThanValidatorsTimeoutQCView RuleError = "RuleErrorPoSTimeoutBlockViewNotOneGreaterThanValidatorsTimeoutQCView" + + RuleErrorInvalidVoteQC RuleError = "RuleErrorInvalidVoteQC" + RuleErrorInvalidTimeoutQC RuleError = "RuleErrorInvalidTimeoutQC" +) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go new file mode 100644 index 000000000..211a07d00 --- /dev/null +++ b/lib/pos_blockchain_test.go @@ -0,0 +1,2733 @@ +//go:build relic + +package lib + +import ( + "bytes" + "fmt" + "math" + "math/rand" + "testing" + "time" + + "crypto/sha256" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/collections/bitset" + "github.com/deso-protocol/core/consensus" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" +) + +// TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS tests that +// isProperlyFormedBlockPoS and isBlockTimestampValidRelativeToParentPoS work as expected. +// It first creates a valid block and ensures that the validation passes. +// Then it modifies that block to trigger each validation error and ensures that +// we hit the expected error. +func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t *testing.T) { + bc, params, _ := NewTestBlockchain(t) + // TODO: update for PoS + mempool, miner := NewTestMiner(t, bc, params, true) + + // Mine a few blocks to give the senderPkString some money. + var err error + for ii := 0; ii < 10; ii++ { + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + // Create a block with a valid header. + randomPayload := RandomBytes(256) + require.NoError(t, err) + randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) + signature, err := randomBLSPrivateKey.Sign(randomPayload) + require.NoError(t, err) + txns := []*MsgDeSoTxn{ + { + TxnMeta: &BlockRewardMetadataa{}, + }, + } + merkleRoot, _, err := ComputeMerkleRoot(txns) + require.NoError(t, err) + block := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + Version: 2, + TstampNanoSecs: bc.BlockTip().Header.TstampNanoSecs + 10, + Height: 2, + ProposedInView: 3, + PrevBlockHash: bc.BlockTip().Hash, + ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ + TimedOutView: 2, + ValidatorsHighQC: &QuorumCertificate{ + BlockHash: bc.BlockTip().Hash, + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + ValidatorsTimeoutHighQCViews: []uint64{28934}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + ProposerRandomSeedSignature: signature, + ProposerPublicKey: NewPublicKey(RandomBytes(33)), + ProposerVotingPublicKey: randomBLSPrivateKey.PublicKey(), + TransactionMerkleRoot: merkleRoot, + TxnConnectStatusByIndexHash: HashBitset(bitset.NewBitset().Set(0, true)), + }, + Txns: txns, + TxnConnectStatusByIndex: bitset.NewBitset().Set(0, true), + } + + // Validate the block with a valid timeout QC and header. + err = bc.isProperlyFormedBlockPoS(block) + // There should be no error. + require.Nil(t, err) + + // Timeout QC must have at least one transaction and that transaction must be a block reward txn. + block.Txns = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorBlockWithNoTxns) + + block.Txns = []*MsgDeSoTxn{ + { + TxnMeta: &BasicTransferMetadata{}, + }, + } + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorBlockDoesNotStartWithRewardTxn) + // Revert txns to be valid. + block.Txns = []*MsgDeSoTxn{ + { + TxnMeta: &BlockRewardMetadataa{}, + }, + } + + // Header's Proposed in view must be exactly one greater than the timeout QC's timed out view + block.Header.ProposedInView = 2 + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorPoSTimeoutBlockViewNotOneGreaterThanValidatorsTimeoutQCView) + + // Revert proposed in view + block.Header.ProposedInView = 3 + + // Timeout QC also must have a merkle root + block.Header.TransactionMerkleRoot = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorNilMerkleRoot) + + // Make sure block can't have both timeout and vote QC. + validatorVoteQC := &QuorumCertificate{ + BlockHash: bc.BlockTip().Hash, + ProposedInView: 2, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + } + block.Header.ValidatorsVoteQC = validatorVoteQC + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorBothTimeoutAndVoteQC) + + // Make sure block has either timeout or vote QC. + block.Header.ValidatorsTimeoutAggregateQC = nil + block.Header.ValidatorsVoteQC = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorNoTimeoutOrVoteQC) + + // Reset validator vote QC. + block.Header.ValidatorsVoteQC = validatorVoteQC + + // Validate the block with a valid vote QC and header. Vote QCs must have at least 1 transaction + // and first transaction must be a block reward. + block.Txns = []*MsgDeSoTxn{ + // The validation just checks the length of transactions. + // Connecting the block elsewhere will ensure that the transactions themselves are valid. + { + TxnMeta: &BlockRewardMetadataa{}, + }, + } + merkleRoot, _, err = ComputeMerkleRoot(block.Txns) + require.NoError(t, err) + block.Header.TransactionMerkleRoot = merkleRoot + // There should be no error. + err = bc.isProperlyFormedBlockPoS(block) + require.Nil(t, err) + + // Vote QC must have Header's Proposed in view exactly one greater than vote QC's proposed in view. + block.Header.ProposedInView = 2 + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanValidatorsVoteQCView) + + // Revert proposed in view + block.Header.ProposedInView = 3 + + // Block must have non-nil Merkle root if we have non-zero transactions + block.Header.TransactionMerkleRoot = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorNilMerkleRoot) + + // Block must have a matching merkle root + block.Header.TransactionMerkleRoot = &ZeroBlockHash + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorInvalidMerkleRoot) + + // Reset transactions + block.Txns = []*MsgDeSoTxn{ + { + TxnMeta: &BlockRewardMetadataa{}, + }, + } + + // TxnConnectStatusByIndex tests + // TxnConnectStatusByIndex must be non-nil + block.TxnConnectStatusByIndex = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorNilTxnConnectStatusByIndex) + // TxnConnectStatusByIndexHash must be non-nil + block.TxnConnectStatusByIndex = bitset.NewBitset().Set(0, true) + block.Header.TxnConnectStatusByIndexHash = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorNilTxnConnectStatusByIndexHash) + // The hashed version of TxnConnectStatusByIndex must match the actual TxnConnectStatusByIndexHash + block.Header.TxnConnectStatusByIndexHash = HashBitset(bitset.NewBitset().Set(0, false)) + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorTxnConnectStatusByIndexHashMismatch) + // Reset TxnConnectStatusByIndexHash + block.Header.TxnConnectStatusByIndexHash = HashBitset(block.TxnConnectStatusByIndex) + + // Block must have valid proposer voting public key + block.Header.ProposerVotingPublicKey = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorInvalidProposerVotingPublicKey) + + block.Header.ProposerVotingPublicKey = &bls.PublicKey{} + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorInvalidProposerVotingPublicKey) + + // Reset proposer voting public key + block.Header.ProposerVotingPublicKey = randomBLSPrivateKey.PublicKey() + + // Block must have valid proposer public key + block.Header.ProposerPublicKey = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorInvalidProposerPublicKey) + + block.Header.ProposerPublicKey = &ZeroPublicKey + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorInvalidProposerPublicKey) + + block.Header.ProposerPublicKey = NewPublicKey(RandomBytes(33)) + + // Block must have valid proposer random seed hash + block.Header.ProposerRandomSeedSignature = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorInvalidProposerRandomSeedSignature) + + block.Header.ProposerRandomSeedSignature = &bls.Signature{} + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorInvalidProposerRandomSeedSignature) + + block.Header.ProposerRandomSeedSignature = signature + + // Timestamp validations + // Block timestamp must be greater than the previous block timestamp + block.Header.TstampNanoSecs = bc.BlockTip().Header.GetTstampSecs() - 1 + err = bc.isBlockTimestampValidRelativeToParentPoS(block) + require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsTooOld) + + // Block timestamps can't be in the future. + block.Header.TstampNanoSecs = uint64(time.Now().UnixNano() + (11 * time.Minute).Nanoseconds()) + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsInFuture) + + // Revert the Header's timestamp + block.Header.TstampNanoSecs = bc.BlockTip().Header.TstampNanoSecs + 10 + + // Block Header version must be 2 + block.Header.Version = 1 + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorInvalidPoSBlockHeaderVersion) + + // Revert block header version + block.Header.Version = 2 + + // Nil prev block hash not allowed + block.Header.PrevBlockHash = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorNilPrevBlockHash) + + // Parent must exist in the block index. + block.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + err = bc.isBlockTimestampValidRelativeToParentPoS(block) + require.Equal(t, err, RuleErrorMissingParentBlock) + + // Nil block header not allowed + block.Header = nil + err = bc.isProperlyFormedBlockPoS(block) + require.Equal(t, err, RuleErrorNilBlockHeader) +} + +// TestHasValidBlockHeight tests that hasValidBlockHeightPoS works as expected. +// It ensures that the block does not have a height before the PoS cut over height, +// that the block's height is one greater than its parent, and that the block's parent +// exists. +func TestHasValidBlockHeight(t *testing.T) { + bc, _, _ := NewTestBlockchain(t) + hash := NewBlockHash(RandomBytes(32)) + nowTimestamp := uint64(time.Now().UnixNano()) + genesisBlock := NewBlockNode(nil, hash, 1, nil, nil, &MsgDeSoHeader{ + Version: 2, + TstampNanoSecs: nowTimestamp - uint64(time.Minute.Nanoseconds()), + Height: 1, + ProposedInView: 1, + ValidatorsVoteQC: nil, + ValidatorsTimeoutAggregateQC: nil, + }, StatusBlockStored|StatusBlockValidated) + bc.bestChain = []*BlockNode{genesisBlock} + bc.blockIndexByHash[*genesisBlock.Hash] = genesisBlock + // Create a block with a valid header. + randomPayload := RandomBytes(256) + randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) + signature, err := randomBLSPrivateKey.Sign(randomPayload) + require.NoError(t, err) + block := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + PrevBlockHash: genesisBlock.Hash, + Version: 2, + TstampNanoSecs: uint64(time.Now().UnixNano()) - 10, + Height: 2, + ProposedInView: 1, + ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ + TimedOutView: 2, + ValidatorsHighQC: &QuorumCertificate{ + BlockHash: bc.BlockTip().Hash, + ProposedInView: bc.BlockTip().Header.ProposedInView, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + ValidatorsTimeoutHighQCViews: []uint64{28934}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + }, + Txns: nil, + } + + // validate that we've cutover to PoS + bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 3 + err = bc.hasValidBlockHeightPoS(block) + require.Equal(t, err, RuleErrorPoSBlockBeforeCutoverHeight) + + // Update the fork height + bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 0 + + err = bc.hasValidBlockHeightPoS(block) + require.Nil(t, err) + + block.Header.Height = 1 + err = bc.hasValidBlockHeightPoS(block) + require.Equal(t, err, RuleErrorInvalidPoSBlockHeight) + + block.Header.Height = 2 + bc.blockIndexByHash = map[BlockHash]*BlockNode{} + err = bc.hasValidBlockHeightPoS(block) + require.Equal(t, err, RuleErrorMissingParentBlock) +} + +// TestUpsertBlockAndBlockNodeToDB tests that upsertBlockAndBlockNodeToDB works as expected. +// It is tested by calling the wrapper functions storeBlockInBlockIndex and storeValidatedBlockInBlockIndex. +func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { + bc, _, _ := NewTestBlockchain(t) + GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 0 + resetGlobalDeSoParams := func() { + GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = math.MaxUint32 + } + t.Cleanup(resetGlobalDeSoParams) + hash1 := NewBlockHash(RandomBytes(32)) + hash2 := NewBlockHash(RandomBytes(32)) + genesisNode := NewBlockNode(nil, hash1, 1, nil, nil, &MsgDeSoHeader{ + Version: 2, + Height: 1, + ProposedInView: 1, + }, StatusBlockStored|StatusBlockValidated) + block2 := NewBlockNode(genesisNode, hash2, 2, nil, nil, &MsgDeSoHeader{ + Version: 2, + Height: 2, + ProposedInView: 2, + ValidatorsVoteQC: nil, + ValidatorsTimeoutAggregateQC: nil, + }, StatusBlockStored|StatusBlockValidated) + bc.blockIndexByHash = map[BlockHash]*BlockNode{ + *hash1: genesisNode, + *hash2: block2, + } + randomPayload := RandomBytes(256) + randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) + signature, err := randomBLSPrivateKey.Sign(randomPayload) + voteQC := &QuorumCertificate{ + BlockHash: bc.BlockTip().Hash, + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + } + require.NoError(t, err) + blsPrivKey := _generateRandomBLSPrivateKey(t) + block := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + PrevBlockHash: hash2, + Version: 2, + TstampNanoSecs: uint64(time.Now().UnixNano()) - 10, + Height: 2, + ProposedInView: 1, + ProposerPublicKey: NewPublicKey(RandomBytes(33)), + ProposerVotingPublicKey: blsPrivKey.PublicKey(), + ProposerRandomSeedSignature: signature, + ProposerVotePartialSignature: signature, + ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ + TimedOutView: 2, + ValidatorsHighQC: voteQC, + ValidatorsTimeoutHighQCViews: []uint64{28934}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + TxnConnectStatusByIndexHash: NewBlockHash(bitset.NewBitset().ToBytes()), + }, + Txns: []*MsgDeSoTxn{ + { + TxnMeta: &BlockRewardMetadataa{}, + }, + }, + TxnConnectStatusByIndex: bitset.NewBitset(), + } + blockNode, err := bc.storeBlockInBlockIndex(block) + require.NoError(t, err) + newHash, err := block.Hash() + require.NoError(t, err) + // Check the block index by hash + blockNodeFromIndex, exists := bc.blockIndexByHash[*newHash] + require.True(t, exists) + require.True(t, blockNodeFromIndex.Hash.IsEqual(blockNode.Hash)) + require.Equal(t, blockNodeFromIndex.Height, uint32(2)) + require.True(t, blockNodeFromIndex.IsStored()) + require.False(t, blockNodeFromIndex.IsValidated()) + // Check the block index by height + byHeightBlockNodes, exists := bc.blockIndexByHeight[2] + require.True(t, exists) + require.Len(t, byHeightBlockNodes, 1) + require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) + require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) + require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 1) + // Check the DB for the block + uncommittedBlock, err := GetBlock(newHash, bc.db, bc.snapshot) + require.NoError(t, err) + uncommittedBytes, err := uncommittedBlock.ToBytes(false) + require.NoError(t, err) + origBlockBytes, err := block.ToBytes(false) + require.NoError(t, err) + require.True(t, bytes.Equal(uncommittedBytes, origBlockBytes)) + // Okay now we update the status of the block to include validated. + blockNode, err = bc.storeValidatedBlockInBlockIndex(block) + require.NoError(t, err) + blockNodeFromIndex, exists = bc.blockIndexByHash[*newHash] + require.True(t, exists) + require.True(t, blockNodeFromIndex.Hash.IsEqual(blockNode.Hash)) + require.Equal(t, blockNodeFromIndex.Height, uint32(2)) + require.True(t, blockNodeFromIndex.IsStored()) + require.True(t, blockNodeFromIndex.IsValidated()) + // Check the block index by height. + byHeightBlockNodes, exists = bc.blockIndexByHeight[2] + require.True(t, exists) + require.Len(t, byHeightBlockNodes, 1) + require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[*newHash].IsValidated()) + require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) + require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 1) + + // Okay now we'll put in another block at the same height. + // Update the random seed hash so we have a new hash for the block. + randomSig, err := (&bls.Signature{}).FromBytes(RandomBytes(32)) + block.Header.ProposerRandomSeedSignature = randomSig + updatedBlockHash, err := block.Hash() + require.NoError(t, err) + require.False(t, updatedBlockHash.IsEqual(newHash)) + + // Okay now put this new block in there. + blockNode, err = bc.storeBlockInBlockIndex(block) + require.NoError(t, err) + // Make sure the blockIndexByHash is correct. + updatedBlockNode, exists := bc.blockIndexByHash[*updatedBlockHash] + require.True(t, exists) + require.True(t, updatedBlockNode.Hash.IsEqual(updatedBlockHash)) + require.Equal(t, updatedBlockNode.Height, uint32(2)) + require.True(t, updatedBlockNode.IsStored()) + require.False(t, updatedBlockNode.IsValidated()) + // Make sure the blockIndexByHeight is correct + byHeightBlockNodes, exists = bc.blockIndexByHeight[2] + require.True(t, exists) + require.Len(t, byHeightBlockNodes, 2) + require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[*updatedBlockHash].Hash.IsEqual(updatedBlockHash)) + require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) + require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 2) + + // If we're missing a field in the header, we should get an error + // as we can't compute the hash. + block.Header.ProposerPublicKey = nil + _, err = bc.storeBlockInBlockIndex(block) + require.Error(t, err) +} + +// TestHasValidBlockView tests that hasValidBlockViewPoS works as expected. +// If the block has a vote QC, it ensures that the block's view is exactly +// one greater than its parent's view. +// If the block has a timeout QC, it ensures that the block's view is +// greater than its parent's view. +func TestHasValidBlockViewPoS(t *testing.T) { + setBalanceModelBlockHeights(t) + bc, _, _ := NewTestBlockchain(t) + hash1 := NewBlockHash(RandomBytes(32)) + hash2 := NewBlockHash(RandomBytes(32)) + genesisNode := NewBlockNode(nil, hash1, 1, nil, nil, &MsgDeSoHeader{ + Version: 2, + Height: 1, + ProposedInView: 1, + }, StatusBlockStored|StatusBlockValidated) + block2 := NewBlockNode(genesisNode, hash2, 2, nil, nil, &MsgDeSoHeader{ + Version: 2, + Height: 2, + ProposedInView: 2, + ValidatorsVoteQC: nil, + ValidatorsTimeoutAggregateQC: nil, + }, StatusBlockStored|StatusBlockValidated) + bc.bestChain = []*BlockNode{ + genesisNode, + block2, + } + bc.blockIndexByHash = map[BlockHash]*BlockNode{ + *hash1: genesisNode, + *hash2: block2, + } + randomPayload := RandomBytes(256) + randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) + signature, err := randomBLSPrivateKey.Sign(randomPayload) + voteQC := &QuorumCertificate{ + BlockHash: bc.BlockTip().Hash, + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + } + require.NoError(t, err) + block := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + PrevBlockHash: hash2, + Version: 2, + TstampNanoSecs: uint64(time.Now().UnixNano()) - 10, + Height: 2, + ProposedInView: 1, + ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ + TimedOutView: 2, + ValidatorsHighQC: voteQC, + ValidatorsTimeoutHighQCViews: []uint64{28934}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + Signature: signature, + SignersList: bitset.NewBitset(), + }, + }, + }, + Txns: nil, + } + + block.Header.ProposedInView = 2 + + // Blocks with timeout QCs must have a view strictly greater than the parent. + err = bc.hasValidBlockViewPoS(block) + require.Equal(t, err, RuleErrorPoSTimeoutBlockViewNotGreaterThanParent) + + // Any arbitrary number GREATER than the parent's view is valid. + block.Header.ProposedInView = 10 + err = bc.hasValidBlockViewPoS(block) + require.Nil(t, err) + + // Now we set the timeout QC to nil and provide a vote QC, with height = 2 + block.Header.ValidatorsTimeoutAggregateQC = nil + block.Header.ValidatorsVoteQC = voteQC + block.Header.ProposedInView = 2 + err = bc.hasValidBlockViewPoS(block) + require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanParent) + + // An arbitrary number greater than its parents should fail. + block.Header.ProposedInView = 10 + err = bc.hasValidBlockViewPoS(block) + require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanParent) + + // Exactly one great w/ vote QC should pass. + block.Header.ProposedInView = 3 + err = bc.hasValidBlockViewPoS(block) + require.Nil(t, err) +} + +// TestHasValidBlockProposerPoS tests that hasValidBlockProposerPoS works as expected. +// It registers 7 validators and stakes to themselves and then makes sure we can +// validate the block proposer for a valid block and makes sure we hit the appropriate +// RuleError if the block proposer is invalid for any reason. +func TestHasValidBlockProposerPoS(t *testing.T) { + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForEpochCompleteTest(t) + + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m5", senderPkString, m5Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "m6", senderPkString, m6Pub, senderPrivString, 1e3) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e3) + + m0PKID := *DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m0PkBytes).PKID + m1PKID := *DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m1PkBytes).PKID + m2PKID := *DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m2PkBytes).PKID + m3PKID := *DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m3PkBytes).PKID + m4PKID := *DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m4PkBytes).PKID + m5PKID := *DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m5PkBytes).PKID + m6PKID := *DBGetPKIDEntryForPublicKey(testMeta.db, testMeta.chain.snapshot, m6PkBytes).PKID + + validatorPKIDs := []PKID{m0PKID, m1PKID, m2PKID, m3PKID, m4PKID, m5PKID, m6PKID} + _ = validatorPKIDs + blockHeight := uint64(testMeta.chain.blockTip().Height) + 1 + incrBlockHeight := func() uint64 { + blockHeight += 1 + return blockHeight + } + viewNumber := uint64(0) + incrViewNumber := func() uint64 { + viewNumber += 1 + return viewNumber + } + + // Seed a CurrentEpochEntry. + tmpUtxoView := _newUtxoView(testMeta) + tmpUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 0, FinalBlockHeight: blockHeight + 1}) + require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) + + // For these tests, we set each epoch duration to only one block. + testMeta.params.DefaultEpochDurationNumBlocks = uint64(1) + + { + + // We need to reset the UniversalUtxoView since the RegisterAsValidator and Stake + // txn test helper utils use and flush the UniversalUtxoView. Otherwise, the + // updated GlobalParamsEntry will be overwritten by the default one cached in + // the UniversalUtxoView when it is flushed. + testMeta.mempool.universalUtxoView._ResetViewMappingsAfterFlush() + } + + // All validators register + stake to themselves. + _registerValidatorAndStake(testMeta, m0Pub, m0Priv, 0, 100, false) + _registerValidatorAndStake(testMeta, m1Pub, m1Priv, 0, 200, false) + _registerValidatorAndStake(testMeta, m2Pub, m2Priv, 0, 300, false) + _registerValidatorAndStake(testMeta, m3Pub, m3Priv, 0, 400, false) + _registerValidatorAndStake(testMeta, m4Pub, m4Priv, 0, 500, false) + _registerValidatorAndStake(testMeta, m5Pub, m5Priv, 0, 600, false) + _registerValidatorAndStake(testMeta, m6Pub, m6Priv, 0, 700, false) + + // Get current epoch number + utxoView := _newUtxoView(testMeta) + currentEpochNumber, err := utxoView.GetCurrentEpochNumber() + require.NoError(t, err) + + // Run the epoch complete hook + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + + // Get leader schedule from DB + leaderSchedule, err := DBSeekSnapshotLeaderSchedule(testMeta.db, currentEpochNumber) + require.NoError(t, err) + require.Equal(t, len(leaderSchedule), len(validatorPKIDs)) + // Make sure all the validators are in the leader schedule. + for _, pkid := range leaderSchedule { + require.Contains(t, validatorPKIDs, *pkid) + } + + utxoView = _newUtxoView(testMeta) + leaders, err := utxoView.GetCurrentSnapshotLeaderSchedule() + require.NoError(t, err) + require.Equal(t, len(leaders), len(validatorPKIDs)) + // Make sure all the validators are in the leader schedule. + for _, pkid := range leaders { + require.Contains(t, validatorPKIDs, *pkid) + } + + utxoView = _newUtxoView(testMeta) + // Get all the validator entries + m0ValidatorEntry, err := utxoView.GetValidatorByPKID(&m0PKID) + require.NoError(t, err) + m1ValidatorEntry, err := utxoView.GetValidatorByPKID(&m1PKID) + require.NoError(t, err) + m2ValidatorEntry, err := utxoView.GetValidatorByPKID(&m2PKID) + require.NoError(t, err) + m3ValidatorEntry, err := utxoView.GetValidatorByPKID(&m3PKID) + require.NoError(t, err) + m4ValidatorEntry, err := utxoView.GetValidatorByPKID(&m4PKID) + require.NoError(t, err) + m5ValidatorEntry, err := utxoView.GetValidatorByPKID(&m5PKID) + require.NoError(t, err) + m6ValidatorEntry, err := utxoView.GetValidatorByPKID(&m6PKID) + require.NoError(t, err) + validatorPKIDToValidatorEntryMap := map[PKID]*ValidatorEntry{ + m0PKID: m0ValidatorEntry, + m1PKID: m1ValidatorEntry, + m2PKID: m2ValidatorEntry, + m3PKID: m3ValidatorEntry, + m4PKID: m4ValidatorEntry, + m5PKID: m5ValidatorEntry, + m6PKID: m6ValidatorEntry, + } + // Mark chain tip as committed. + testMeta.chain.BlockTip().Status |= StatusBlockCommitted + var isBlockProposerValid bool + { + // First block, we should have the first leader. + leader0PKID := leaderSchedule[0] + leader0Entry := validatorPKIDToValidatorEntryMap[*leader0PKID] + leader0PublicKey := utxoView.GetPublicKeyForPKID(leader0PKID) + dummyBlock := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + PrevBlockHash: testMeta.chain.BlockTip().Hash, + ProposedInView: viewNumber + 1, + Height: blockHeight + 1, + ProposerPublicKey: NewPublicKey(leader0PublicKey), + ProposerVotingPublicKey: leader0Entry.VotingPublicKey, + }, + } + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.True(t, isBlockProposerValid) + + // If we have a different proposer public key, we will have an error + leader1Entry := validatorPKIDToValidatorEntryMap[*leaderSchedule[1]] + dummyBlock.Header.ProposerVotingPublicKey = leader1Entry.VotingPublicKey.Copy() + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.False(t, isBlockProposerValid) + + // If we advance the view, we know that leader 0 timed out, so + // we move to leader 1. + dummyBlock.Header.ProposedInView = viewNumber + 2 + dummyBlock.Header.ProposerVotingPublicKey = leader1Entry.VotingPublicKey + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.True(t, isBlockProposerValid) + + // If we have 4 timeouts, we know that leaders 0, 1, 2, and 3 timed out, + // so we move to leader 4. + dummyBlock.Header.ProposedInView = viewNumber + 5 + leader4Entry := validatorPKIDToValidatorEntryMap[*leaderSchedule[4]] + dummyBlock.Header.ProposerVotingPublicKey = leader4Entry.VotingPublicKey + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.True(t, isBlockProposerValid) + + // If we have 7 timeouts, we know everybody timed out, so we go back to leader 0. + dummyBlock.Header.ProposedInView = viewNumber + 8 + dummyBlock.Header.ProposerVotingPublicKey = leader0Entry.VotingPublicKey + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.True(t, isBlockProposerValid) + + // If the block view is less than the epoch's initial view, this is an error. + dummyBlock.Header.ProposedInView = viewNumber + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.False(t, isBlockProposerValid) + + // If the block height is less than epoch's initial block height, this is an error. + dummyBlock.Header.ProposedInView = viewNumber + 1 + dummyBlock.Header.Height = blockHeight + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.False(t, isBlockProposerValid) + + // If the difference between the block's view and epoch's initial view is less than + // the difference between the block's height and the epoch's initial height, this is an error. + // This would imply that we've had more blocks than views, which is not possible. + dummyBlock.Header.ProposedInView = viewNumber + 1 + dummyBlock.Header.Height = blockHeight + 2 + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.False(t, isBlockProposerValid) + } + +} + +// TestGetLineageFromCommittedTip tests that getLineageFromCommittedTip works as expected. +// It makes sure the happy path works as well as makes sure we hit the appropriate RuleError +// if a block is invalid for any reason. Invalid reasons include extending from a committed +// block that is not the committed tip, extending from a block that has status StatusBlockValidateFailed, +// extending from a block that doesn't have a sequential block height or a monotonically increasing view. +func TestGetLineageFromCommittedTip(t *testing.T) { + setBalanceModelBlockHeights(t) + bc, _, _ := NewTestBlockchain(t) + GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 0 + resetGlobalDeSoParams := func() { + GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = math.MaxUint32 + } + t.Cleanup(resetGlobalDeSoParams) + hash1 := NewBlockHash(RandomBytes(32)) + genesisNode := NewBlockNode(nil, hash1, 1, nil, nil, &MsgDeSoHeader{ + Version: 2, + Height: 1, + ProposedInView: 1, + }, StatusBlockStored|StatusBlockValidated|StatusBlockCommitted) + bc.bestChain = []*BlockNode{genesisNode} + bc.blockIndexByHash = map[BlockHash]*BlockNode{ + *hash1: genesisNode, + } + block := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + PrevBlockHash: hash1, + ProposedInView: 3, + Height: 3, + }, + } + // If parent is committed tip, we'll have 0 ancestors. + ancestors, err := bc.getLineageFromCommittedTip(block) + require.NoError(t, err) + require.Len(t, ancestors, 0) + + // If parent block is not in block index, we should get an error + block.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + ancestors, err = bc.getLineageFromCommittedTip(block) + require.Error(t, err) + require.Equal(t, err, RuleErrorMissingAncestorBlock) + require.Nil(t, ancestors) + + // If this block extends from a committed block that is not the tip, we should get an error. + block.Header.PrevBlockHash = hash1 + // add another block to the best chain. + hash2 := NewBlockHash(RandomBytes(32)) + block2 := NewBlockNode(genesisNode, hash2, 2, nil, nil, &MsgDeSoHeader{ + Version: 2, + Height: 2, + ProposedInView: 2, + PrevBlockHash: hash1, + }, StatusBlockStored|StatusBlockValidated|StatusBlockCommitted) + bc.bestChain = append(bc.bestChain, block2) + bc.blockIndexByHash[*hash2] = block2 + ancestors, err = bc.getLineageFromCommittedTip(block) + require.Error(t, err) + require.Equal(t, err, RuleErrorDoesNotExtendCommittedTip) + + // update block to be uncommitted + block2.Status = StatusBlockStored | StatusBlockValidated + // set new block's parent as block 2. + block.Header.PrevBlockHash = hash2 + ancestors, err = bc.getLineageFromCommittedTip(block) + require.NoError(t, err) + require.Len(t, ancestors, 1) + + // Testing error cases + // Set block 2 to be ValidateFailed + block2.Status = StatusBlockStored | StatusBlockValidateFailed + ancestors, err = bc.getLineageFromCommittedTip(block) + require.Error(t, err) + require.Equal(t, err, RuleErrorAncestorBlockValidationFailed) + + // Revert block 2 status. + block2.Status = StatusBlockStored | StatusBlockValidated + // Set block's height to be <= block2's height + block.Header.Height = 2 + ancestors, err = bc.getLineageFromCommittedTip(block) + require.Error(t, err) + require.Equal(t, err, RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight) + // Revert block 2's height and set block's view to be <= block2's view + block.Header.Height = 3 + block.Header.ProposedInView = 2 + ancestors, err = bc.getLineageFromCommittedTip(block) + require.Error(t, err) + require.Equal(t, err, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock) +} + +// TestIsValidPoSQuorumCertificate tests that isValidPoSQuorumCertificate works as expected. +// It tests the following cases: +// 1. Empty vote & timeout QC - INVALID +// 2. Valid vote QC w/ super-majority - VALID +// 3. Empty validator set - INVALID +// 4. Vote QC w/ malformed validator entries - INVALID +// 5. Malformed vote QC - INVALID +// 6. Valid vote QC w/o super-majority - INVALID +// 7. Vote QC w/ mismatched signer's list and signature - INVALID +// 8. Valid timeout QC w/ super-majority - VALID +// 9. Malformed timeout QC - Invalid +// 10. Timeout QC w/ malformed validator entries - INVALID +// 11. Valid timeout QC w/o super-majority - INVALID +// 12. Timeout QC w/ mismatched signer's list and signature - INVALID +func TestIsValidPoSQuorumCertificate(t *testing.T) { + bc, _, _ := NewTestBlockchain(t) + hash1 := NewBlockHash(RandomBytes(32)) + // Mock validator entries + m1PKID := DBGetPKIDEntryForPublicKey(bc.db, nil, m1PkBytes).PKID + m1VotingPrivateKey := _generateRandomBLSPrivateKey(t) + validator1Entry := &ValidatorEntry{ + ValidatorPKID: m1PKID, + VotingPublicKey: m1VotingPrivateKey.PublicKey(), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(3), + } + m2PKID := DBGetPKIDEntryForPublicKey(bc.db, nil, m2PkBytes).PKID + m2VotingPrivateKey := _generateRandomBLSPrivateKey(t) + validator2Entry := &ValidatorEntry{ + ValidatorPKID: m2PKID, + VotingPublicKey: m2VotingPrivateKey.PublicKey(), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(2), + } + m3PKID := DBGetPKIDEntryForPublicKey(bc.db, nil, m3PkBytes).PKID + m3VotingPrivateKey := _generateRandomBLSPrivateKey(t) + validator3Entry := &ValidatorEntry{ + ValidatorPKID: m3PKID, + VotingPublicKey: m3VotingPrivateKey.PublicKey(), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(1), + } + + validatorSet := []*ValidatorEntry{validator1Entry, validator2Entry, validator3Entry} + + desoBlock := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + Height: 5, + ProposedInView: 6, + }, + } + // Empty QC for both vote and timeout should fail + err := bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Valid vote QC should pass with supermajority + votePayload := consensus.GetVoteSignaturePayload(6, hash1) + vote1Signature, err := m1VotingPrivateKey.Sign(votePayload[:]) + require.NoError(t, err) + vote2Signature, err := m2VotingPrivateKey.Sign(votePayload[:]) + require.NoError(t, err) + aggregateSig, err := bls.AggregateSignatures([]*bls.Signature{vote1Signature, vote2Signature}) + require.NoError(t, err) + signersList1And2 := bitset.NewBitset().FromBytes([]byte{0x3}) // 0b0011, which represents validators 1 and 2, + voteQC := &QuorumCertificate{ + BlockHash: hash1, + ProposedInView: 6, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + SignersList: signersList1And2, + Signature: aggregateSig, + }, + } + desoBlock.Header.ValidatorsVoteQC = voteQC + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.NoError(t, err) + + // Empty validator set should fail + err = bc.isValidPoSQuorumCertificate(desoBlock, []*ValidatorEntry{}) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Malformed validators should fail + { + // Zero stake amount + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(0) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Nil stake amount + validatorSet[0].TotalStakeAmountNanos = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Reset stake amount + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(3) + // Nil voting public key + validatorSet[0].VotingPublicKey = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Reset voting public key + validatorSet[0].VotingPublicKey = m1VotingPrivateKey.PublicKey() + // Nil validator entry + err = bc.isValidPoSQuorumCertificate(desoBlock, append(validatorSet, nil)) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + } + + { + // Malformed vote QC should fail + // Nil vote QC + desoBlock.Header.ValidatorsVoteQC = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // View is 0 + desoBlock.Header.ValidatorsVoteQC = voteQC + voteQC.ProposedInView = 0 + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Nil block hash + voteQC.ProposedInView = 6 + voteQC.BlockHash = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Nil signers list + voteQC.ValidatorsVoteAggregatedSignature.SignersList = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Nil Signature + voteQC.ValidatorsVoteAggregatedSignature.SignersList = signersList1And2 + voteQC.ValidatorsVoteAggregatedSignature.Signature = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Nil aggregate signature + voteQC.BlockHash = hash1 + voteQC.ValidatorsVoteAggregatedSignature = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + // Reset the ValidatorsVoteAggregatedSignature + voteQC.ValidatorsVoteAggregatedSignature = &AggregatedBLSSignature{ + SignersList: signersList1And2, + Signature: aggregateSig, + } + } + + { + // No supermajority in vote QC + voteQC.ValidatorsVoteAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x1}) // 0b0001, which represents validator 1 + voteQC.ValidatorsVoteAggregatedSignature.Signature = vote1Signature + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + } + { + // Only having signature for validator 1 should fail even if signers list has validator 2 + voteQC.ValidatorsVoteAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x3}) // 0b0010, which represents validator 1 and 2 + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Having 1 and 3 in signers list, but including signature for 2 should fail + voteQC.ValidatorsVoteAggregatedSignature.SignersList = bitset.NewBitset().Set(0, true).Set(2, true) // represents validator 1 and 3 + voteQC.ValidatorsVoteAggregatedSignature.Signature = aggregateSig + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Reset the signers list and signature + voteQC.ValidatorsVoteAggregatedSignature.SignersList = signersList1And2 + voteQC.ValidatorsVoteAggregatedSignature.Signature = aggregateSig + } + + // Timeout QC tests + // Let's start with a valid timeout QC + timeout1Payload := consensus.GetTimeoutSignaturePayload(8, 6) + timeout1Signature, err := m1VotingPrivateKey.Sign(timeout1Payload[:]) + require.NoError(t, err) + timeout2Payload := consensus.GetTimeoutSignaturePayload(8, 5) + timeout2Signature, err := m2VotingPrivateKey.Sign(timeout2Payload[:]) + + timeoutAggSig, err := bls.AggregateSignatures([]*bls.Signature{timeout1Signature, timeout2Signature}) + require.NoError(t, err) + timeoutQC := &TimeoutAggregateQuorumCertificate{ + TimedOutView: 8, + ValidatorsHighQC: voteQC, + ValidatorsTimeoutHighQCViews: []uint64{6, 5}, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + SignersList: signersList1And2, + Signature: timeoutAggSig, + }, + } + // Set the vote qc to nil + desoBlock.Header.ValidatorsVoteQC = nil + // Set the timeout qc to the timeout qc constructed above + desoBlock.Header.ValidatorsTimeoutAggregateQC = timeoutQC + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.NoError(t, err) + + { + // Malformed timeout QC tests + // NOTE: these actually trigger RuleErrorInvalidVoteQC because the + // timeout QC is interpreted as empty + // View = 0 + timeoutQC.TimedOutView = 0 + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Nil high QC + timeoutQC.TimedOutView = 8 + timeoutQC.ValidatorsHighQC = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // High QC has view of 0 + timeoutQC.ValidatorsHighQC = voteQC + timeoutQC.ValidatorsHighQC.ProposedInView = 0 + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // No high QC views + timeoutQC.ValidatorsHighQC.ProposedInView = 6 + timeoutQC.ValidatorsTimeoutHighQCViews = []uint64{} + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Nil high QC block hash + timeoutQC.ValidatorsTimeoutHighQCViews = []uint64{6, 5} + timeoutQC.ValidatorsHighQC.BlockHash = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Nil high QC signers list + timeoutQC.ValidatorsHighQC.BlockHash = hash1 + timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature.SignersList = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Nil high QC signature + timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature.SignersList = signersList1And2 + timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature.Signature = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Nil High QC Aggregated signature + timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidVoteQC) + + // Revert high qc aggregated signature + timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature = &AggregatedBLSSignature{ + SignersList: signersList1And2, + Signature: aggregateSig, + } + + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.NoError(t, err) + } + { + // Timed out view is not exactly one greater than high QC view + timeoutQC.ValidatorsHighQC.ProposedInView = 7 + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidTimeoutQC) + } + { + // Invalid validator set tests + // Zero stake amount + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(0) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidTimeoutQC) + + // Nil stake amount + validatorSet[0].TotalStakeAmountNanos = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidTimeoutQC) + + // Reset stake amount + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(3) + // Nil voting public key + validatorSet[0].VotingPublicKey = nil + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidTimeoutQC) + + // Reset voting public key + validatorSet[0].VotingPublicKey = m1VotingPrivateKey.PublicKey() + // Nil validator entry + err = bc.isValidPoSQuorumCertificate(desoBlock, append(validatorSet, nil)) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidTimeoutQC) + } + + { + // No supermajority test + timeoutQC.ValidatorsTimeoutAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x1}) // 0b0001, which represents validator 1 + timeoutQC.ValidatorsTimeoutAggregatedSignature.Signature = timeout1Signature + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidTimeoutQC) + } + + { + // Only having signature for validator 1 should fail even if signers list has validator 2 + timeoutQC.ValidatorsTimeoutAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x3}) // 0b0010, which represents validator 1 and 2 + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidTimeoutQC) + + // Having 1 and 3 in signers list, but including signature for 2 should fail + timeoutQC.ValidatorsTimeoutAggregatedSignature.SignersList = bitset.NewBitset().Set(0, true).Set(2, true) // represents validator 1 and 3 + timeoutQC.ValidatorsTimeoutAggregatedSignature.Signature = timeoutAggSig + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + require.Error(t, err) + require.Equal(t, err, RuleErrorInvalidTimeoutQC) + } +} + +// TestShouldReorg tests that shouldReorg works as expected. +// It tests the following cases: +// 1. Parent is chain tip. No reorg required. +// 2. Parent is not chain tip, but currentView is greater than the block's view. No reorg required. +// 3. Parent is not chain tip and current view is less than or equal to block's view. Reorg required. +func TestShouldReorg(t *testing.T) { + bc, _, _ := NewTestBlockchain(t) + hash1 := NewBlockHash(RandomBytes(32)) + hash2 := NewBlockHash(RandomBytes(32)) + hash3 := NewBlockHash(RandomBytes(32)) + bc.bestChain = []*BlockNode{ + { + Hash: hash1, + Status: StatusBlockStored | StatusBlockValidated | StatusBlockCommitted, + }, + { + Hash: hash3, + Status: StatusBlockStored | StatusBlockValidated, + }, + } + + newBlock := &BlockNode{ + Header: &MsgDeSoHeader{ + ProposedInView: 2, + PrevBlockHash: bc.bestChain[1].Hash, + }, + } + + // Parent is chain tip. No reorg required. + require.False(t, bc.shouldReorg(newBlock, 2)) + + // Parent is not chain tip, but currentView is greater than + // the block's view. + newBlock.Header.PrevBlockHash = hash1 + require.False(t, bc.shouldReorg(newBlock, 3)) + + // Parent is not chain tip. Reorg required. + // Other checks have already been completed to ensure + // that hash2 exists in the blockIndexByHash + newBlock.Header.PrevBlockHash = hash2 + require.True(t, bc.shouldReorg(newBlock, 2)) +} + +// TestTryApplyNewTip tests that tryApplyNewTip works as expected. +// It tests the following cases: +// 1. Simple reorg. Just replacing the uncommitted tip. +// 2. Create a longer chain and reorg to it. +// 3. Make sure no reorg when current view is greater than block's view +// 4. Super happy path of simply extending current uncommitted tip. +func TestTryApplyNewTip(t *testing.T) { + setBalanceModelBlockHeights(t) + bc, _, _ := NewTestBlockchain(t) + GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 0 + resetGlobalDeSoParams := func() { + GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = math.MaxUint32 + } + t.Cleanup(resetGlobalDeSoParams) + hash1 := NewBlockHash(RandomBytes(32)) + bn1 := &BlockNode{ + Hash: hash1, + Status: StatusBlockStored | StatusBlockValidated | StatusBlockCommitted, + Height: 2, + Header: &MsgDeSoHeader{ + Height: 2, + ProposedInView: 2, + }, + } + hash2 := NewBlockHash(RandomBytes(32)) + bn2 := &BlockNode{ + Hash: hash2, + Status: StatusBlockStored | StatusBlockValidated, + Height: 3, + Header: &MsgDeSoHeader{ + PrevBlockHash: hash1, + Height: 3, + ProposedInView: 3, + }, + } + hash3 := NewBlockHash(RandomBytes(32)) + bn3 := &BlockNode{ + Hash: hash3, + Status: StatusBlockStored | StatusBlockValidated, + Height: 4, + Header: &MsgDeSoHeader{ + PrevBlockHash: hash2, + Height: 4, + ProposedInView: 4, + }, + } + bc.addBlockToBestChain(bn1) + bc.addBlockToBestChain(bn2) + bc.addBlockToBestChain(bn3) + bc.blockIndexByHash[*hash1] = bn1 + bc.blockIndexByHash[*hash2] = bn2 + bc.blockIndexByHash[*hash3] = bn3 + + // Simple reorg. Just replacing the uncommitted tip. + newBlock := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + PrevBlockHash: hash2, + ProposedInView: 10, + Height: 4, + }, + } + newBlockHash, err := newBlock.Hash() + require.NoError(t, err) + + ancestors, err := bc.getLineageFromCommittedTip(newBlock) + require.NoError(t, err) + checkBestChainForHash := func(hash *BlockHash) bool { + return collections.Any(bc.bestChain, func(bn *BlockNode) bool { + return bn.Hash.IsEqual(hash) + }) + } + + // Try to apply newBlock as tip. This should succeed. + newBlockNode := &BlockNode{ + Header: newBlock.Header, + Hash: newBlockHash, + } + appliedNewTip, err := bc.tryApplyNewTip(newBlockNode, 9, ancestors) + require.NoError(t, err) + require.True(t, appliedNewTip) + // hash 3 should no longer be in the best chain or best chain map + _, hash3ExistsInBestChainMap := bc.bestChainMap[*hash3] + require.False(t, hash3ExistsInBestChainMap) + require.False(t, checkBestChainForHash(hash3)) + + // newBlock should be in the best chain and the best chain map and should be the tip. + _, newBlockExistsInBestChainMap := bc.bestChainMap[*newBlockHash] + require.True(t, newBlockExistsInBestChainMap) + require.True(t, checkBestChainForHash(newBlockHash)) + require.True(t, bc.BlockTip().Hash.IsEqual(newBlockHash)) + + // Make sure block 2 and block 1 are still in the best chain. + _, hash2ExistsInBestChainMap := bc.bestChainMap[*hash2] + require.True(t, hash2ExistsInBestChainMap) + require.True(t, checkBestChainForHash(hash2)) + + _, hash1ExistsInBestChainMap := bc.bestChainMap[*hash1] + require.True(t, hash1ExistsInBestChainMap) + require.True(t, checkBestChainForHash(hash1)) + + // Remove newBlock from the best chain and block index to reset the state. + bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] + delete(bc.bestChainMap, *newBlockHash) + // Add block 3 back + bc.addBlockToBestChain(bn3) + + // Add a series of blocks that are not part of the best chain + // to the block index and reorg to them + hash4 := NewBlockHash(RandomBytes(32)) + bn4 := &BlockNode{ + Hash: hash4, + Status: StatusBlockStored | StatusBlockValidated, + Height: 5, + Header: &MsgDeSoHeader{ + PrevBlockHash: hash1, + ProposedInView: 5, + Height: 5, + }, + } + + hash5 := NewBlockHash(RandomBytes(32)) + bn5 := &BlockNode{ + Hash: hash5, + Status: StatusBlockStored | StatusBlockValidated, + Height: 6, + Header: &MsgDeSoHeader{ + PrevBlockHash: hash4, + ProposedInView: 6, + Height: 6, + }, + } + bc.blockIndexByHash[*hash4] = bn4 + bc.blockIndexByHash[*hash5] = bn5 + + // Set new block's parent to hash5 + newBlockNode.Header.PrevBlockHash = hash5 + newBlockNode.Header.ProposedInView = 7 + newBlockNode.Header.Height = 7 + newBlockNode.Height = 7 + require.NoError(t, err) + ancestors, err = bc.getLineageFromCommittedTip(newBlock) + require.NoError(t, err) + + // Try to apply newBlock as tip. + appliedNewTip, err = bc.tryApplyNewTip(newBlockNode, 6, ancestors) + require.NoError(t, err) + require.True(t, appliedNewTip) + // newBlockHash should be tip. + require.True(t, bc.BlockTip().Hash.IsEqual(newBlockHash)) + // hash 3 should no longer be in the best chain or best chain map + _, hash3ExistsInBestChainMap = bc.bestChainMap[*hash3] + require.False(t, hash3ExistsInBestChainMap) + require.False(t, checkBestChainForHash(hash3)) + // hash 2 should no longer be in the best chain or best chain map + _, hash2ExistsInBestChainMap = bc.bestChainMap[*hash2] + require.False(t, hash2ExistsInBestChainMap) + require.False(t, checkBestChainForHash(hash2)) + // hash 4 should be in the best chain and the best chain map + _, hash4ExistsInBestChainMap := bc.bestChainMap[*hash4] + require.True(t, hash4ExistsInBestChainMap) + require.True(t, checkBestChainForHash(hash4)) + // hash 5 should be in the best chain and the best chain map + _, hash5ExistsInBestChainMap := bc.bestChainMap[*hash5] + require.True(t, hash5ExistsInBestChainMap) + require.True(t, checkBestChainForHash(hash5)) + + // Reset the state of the best chain. + delete(bc.bestChainMap, *hash4) + delete(bc.bestChainMap, *hash5) + delete(bc.bestChainMap, *newBlockHash) + bc.bestChain = bc.bestChain[:len(bc.bestChain)-3] + + // Add block 2 and 3 back. + bc.addBlockToBestChain(bn2) + bc.addBlockToBestChain(bn3) + + // No reorg tests + // currentView > newBlock.View + newBlockNode.Header.ProposedInView = 8 + + // we should not apply the new tip if it doesn't extend the current tip. + appliedNewTip, err = bc.tryApplyNewTip(newBlockNode, 9, ancestors) + require.False(t, appliedNewTip) + require.NoError(t, err) + + // Super Happy path: no reorg, just extending tip. + newBlockNode.Header.ProposedInView = 10 + newBlockNode.Header.PrevBlockHash = hash3 + newBlockNode.Header.Height = 5 + newBlockNode.Height = 5 + require.NoError(t, err) + ancestors, err = bc.getLineageFromCommittedTip(newBlock) + require.NoError(t, err) + appliedNewTip, err = bc.tryApplyNewTip(newBlockNode, 6, ancestors) + require.True(t, appliedNewTip) + require.NoError(t, err) + // newBlockHash should be tip. + require.True(t, bc.BlockTip().Hash.IsEqual(newBlockHash)) +} + +// TestCanCommitGrandparent tests the canCommitGrandparent function +// by checking the commit rule. It ensures that the commit rule +// will be run when there is a direct parent-child relationship +// between the incoming block and its parent (no skipping views) +// and then we can commit the incoming block's grandparent. +func TestCanCommitGrandparent(t *testing.T) { + setBalanceModelBlockHeights(t) + bc, _, _ := NewTestBlockchain(t) + GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 0 + resetGlobalDeSoParams := func() { + GlobalDeSoParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = math.MaxUint32 + } + t.Cleanup(resetGlobalDeSoParams) + hash1 := NewBlockHash(RandomBytes(32)) + bn1 := &BlockNode{ + Hash: hash1, + Status: StatusBlockStored | StatusBlockValidated, + Height: 2, + Header: &MsgDeSoHeader{ + ProposedInView: 1, + }, + } + hash2 := NewBlockHash(RandomBytes(32)) + bn2 := &BlockNode{ + Hash: hash2, + Status: StatusBlockStored | StatusBlockValidated, + Height: 3, + Header: &MsgDeSoHeader{ + ProposedInView: 2, + PrevBlockHash: hash1, + }, + } + bc.bestChainMap[*hash1] = bn1 + bc.bestChainMap[*hash2] = bn2 + + // define incoming block + hash3 := NewBlockHash(RandomBytes(32)) + bn3 := &BlockNode{ + Hash: hash3, + Status: StatusBlockStored | StatusBlockValidated, + Height: 4, + Header: &MsgDeSoHeader{ + ProposedInView: 10, + PrevBlockHash: hash2, + }, + } + + // If we are adding bn3 to the chain, it is an descendant of bn2 + // and bn2 and bn3 possess a direct parent-child relationship + // (meaning they are in consecutive views). So we should be able + // to commit bn1. + grandparentHash, canCommit := bc.canCommitGrandparent(bn3) + require.True(t, hash1.IsEqual(grandparentHash)) + require.True(t, canCommit) + + // Update bn1 to be committed. We no longer can run the commit since bn1 is already + // committed. We expect committedBlockSeen to be true. + bn1.Status |= StatusBlockCommitted + grandparentHash, canCommit = bc.canCommitGrandparent(bn3) + require.Nil(t, grandparentHash) + require.False(t, canCommit) + + // revert bn1's committed status. + bn1.Status = StatusBlockStored | StatusBlockValidated + // Increase bn2's proposed in view, so that it is no longer a direct child of bn3. + // We should no longer be able to commit bn1. + bn2.Header.ProposedInView = 3 + grandparentHash, canCommit = bc.canCommitGrandparent(bn3) + require.Nil(t, grandparentHash) + require.False(t, canCommit) + + // TODO: What other cases do we really need tested here? +} + +// TestRunCommitRuleOnBestChain tests the runCommitRuleOnBestChain function +// to verify that it properly assesses the commit rule and that the commit logic +// behaves as expected. +// It tests the following cases: +// 1. Adding a single block (block1) to the best chain does not result in any new blocks being committed. +// 2. Adding a second block (block2) w/ parent (block1) to the best chain does not result in any new blocks being committed. +// 3. Adding a third block (block3) w/ parent (block2) and block3's view = block2's view + 1 to the best chain results in block1 being committed. +// 4. Adding a fourth block (block4) w/ parent (block3) and block4's view > block3's view + 1 to the best chain results in block2 being committed. +// 5. Adding a fifth block (block5) w/ parent (block4) and block5's view = block4's view + 1 to the best chain does not result in block3 being committed. +// 6. Adding a sixth block (block6) w/ parent (block5) and block6's view = block5's view + 1 to the best chain results in block3 and block4 being committed. +func TestRunCommitRuleOnBestChain(t *testing.T) { + testMeta := NewTestPoSBlockchain(t) + + // Create a single block and add it to the best chain. + blockTemplate1 := _generateBlockAndAddToBestChain(testMeta, 11, 11, 887) + // Okay now try to run the commit rule. Nothing will happen. + // We expect the block to be uncommitted. + err := testMeta.chain.runCommitRuleOnBestChain() + require.NoError(t, err) + + blockHash1, err := blockTemplate1.Hash() + require.NoError(t, err) + // Okay so let's make sure the block is uncommitted. + _verifyCommitRuleHelper(testMeta, []*BlockHash{}, []*BlockHash{blockHash1}, nil) + + // Add one more block to best chain. Should still not trigger commit rule + blockTemplate2 := _generateBlockAndAddToBestChain(testMeta, 12, 12, 813) + + // Run commit rule again. Nothing should happen. + // We expect both block 1 and block 2 to be uncommitted. + err = testMeta.chain.runCommitRuleOnBestChain() + require.NoError(t, err) + + blockHash2, err := blockTemplate2.Hash() + require.NoError(t, err) + // Okay so let's make sure blocks 1 and 2 are uncommitted. + _verifyCommitRuleHelper(testMeta, []*BlockHash{}, []*BlockHash{blockHash1, blockHash2}, nil) + + // Okay add one MORE block to the best chain. This should trigger the commit rule. + blockTemplate3 := _generateBlockAndAddToBestChain(testMeta, 13, 13, 513) + + // Run the commit rule again. This time we expect block 1 to be committed. + err = testMeta.chain.runCommitRuleOnBestChain() + require.NoError(t, err) + + blockHash3, err := blockTemplate3.Hash() + require.NoError(t, err) + + // Okay so let's make sure that block 1 is committed and blocks 2 and 3 are not. + _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1}, []*BlockHash{blockHash2, blockHash3}, blockHash1) + + // Add one more block to the best chain, but have the view be further in the future. + // this should trigger a commit on block 2. + blockTemplate4 := _generateBlockAndAddToBestChain(testMeta, 14, 20, 429) + err = testMeta.chain.runCommitRuleOnBestChain() + require.NoError(t, err) + + blockHash4, err := blockTemplate4.Hash() + require.NoError(t, err) + + // Blocks 1 and 2 should be committed, blocks 3 and 4 are not. + _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, blockHash4}, blockHash2) + + // Okay so add block 5 to the best chain. This should NOT trigger a commit on block 3 + // as block 4 is not a direct child of block 3 based on its view. + blockTemplate5 := _generateBlockAndAddToBestChain(testMeta, 15, 21, 654) + err = testMeta.chain.runCommitRuleOnBestChain() + require.NoError(t, err) + + blockHash5, err := blockTemplate5.Hash() + require.NoError(t, err) + + // Blocks 1 and 2 are committed, blocks 3, 4, and 5 are not. + _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, blockHash4, blockHash5}, blockHash2) + + // If we now add a block that is a descendent of block 5, we should be able to commit + // blocks 3 and 4 as block 4 and 5 possess a direct parent child relationship and + // we have a descendent of block 5. + blockTemplate6 := _generateBlockAndAddToBestChain(testMeta, 16, 22, 912) + require.NoError(t, err) + err = testMeta.chain.runCommitRuleOnBestChain() + require.NoError(t, err) + + blockHash6, err := blockTemplate6.Hash() + require.NoError(t, err) + + // Blocks 1, 2, 3, and 4 are committed, blocks 5 and 6 are not. + _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2, blockHash3, blockHash4}, []*BlockHash{blockHash5, blockHash6}, blockHash4) +} + +// _verifyCommitRuleHelper is a helper function that verifies the state of the blockchain +// by checking the best chain, best chain map, and DB to make sure that the expected blocks +// are committed or uncommitted and that the TipHash is correct. +func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, uncommittedBlocks []*BlockHash, bestHash *BlockHash) { + if bestHash != nil { + // Verify the best hash in the db. + dbBestHash := DbGetBestHash(testMeta.chain.db, testMeta.chain.snapshot, ChainTypeDeSoBlock) + require.True(testMeta.t, bestHash.IsEqual(dbBestHash)) + } + for _, committedHash := range committedBlocks { + // Okay so let's make sure the block is committed. + blockNode, exists := testMeta.chain.bestChainMap[*committedHash] + require.True(testMeta.t, exists) + require.True(testMeta.t, blockNode.IsCommitted()) + + // Block should be in DB. + fullBlock, err := GetBlock(blockNode.Hash, testMeta.chain.db, testMeta.chain.snapshot) + require.NoError(testMeta.t, err) + require.NotNil(testMeta.t, fullBlock) + // Height Hash To Node Info should be in DB. + heightHashToNodeInfo := GetHeightHashToNodeInfo(testMeta.chain.db, testMeta.chain.snapshot, blockNode.Height, blockNode.Hash, false) + require.NoError(testMeta.t, err) + require.NotNil(testMeta.t, heightHashToNodeInfo) + // Make sure this info matches the block node. + serializedDBBlockNode, err := SerializeBlockNode(heightHashToNodeInfo) + require.NoError(testMeta.t, err) + serializedBlockNode, err := SerializeBlockNode(blockNode) + require.NoError(testMeta.t, err) + require.True(testMeta.t, bytes.Equal(serializedDBBlockNode, serializedBlockNode)) + utxoOps, err := GetUtxoOperationsForBlock(testMeta.chain.db, testMeta.chain.snapshot, blockNode.Hash) + require.NoError(testMeta.t, err) + // We have 1 utxo op slice for each transaction PLUS 1 for expired nonces. + require.Len(testMeta.t, utxoOps, len(fullBlock.Txns)+1) + } + for _, uncommittedBlockHash := range uncommittedBlocks { + // Okay so let's make sure the block is uncommitted. + blockNode, exists := testMeta.chain.bestChainMap[*uncommittedBlockHash] + require.True(testMeta.t, exists) + require.False(testMeta.t, blockNode.IsCommitted()) + // TODO: Verify DB results?? Kinda silly to make sure everything is missing. + } +} + +// Test the following series of blocks to make sure that ProcessBlockPoS properly handles all cases as expected during the steady state +// 1. Process a bad block. The block could be bad for any reason, we don't really care the reason, we just want to // see it get rejected. +// 2. Process three good blocks in a row, which tests the commit rule +// 3. Process a timeout block that reorgs the previous tip +// 4. Process a regular block that reorgs from the previous tip +// 5. Process an orphan, which tests the block's storage and the return value of missingBlockHashes +func TestProcessBlockPoS(t *testing.T) { + testMeta := NewTestPoSBlockchainWithValidators(t) + + { + // Create a bad block and try to process it. + dummyBlock := _generateDummyBlock(testMeta, 12, 12, 887) + success, isOrphan, missingBlockHashes, err := testMeta.chain.processBlockPoS(dummyBlock, 12, true) + require.False(t, success) + require.False(t, isOrphan) + require.Len(t, missingBlockHashes, 0) + require.Error(t, err) + } + + var blockHash1 *BlockHash + { + var realBlock *MsgDeSoBlock + realBlock = _generateRealBlock(testMeta, 12, 12, 889, testMeta.chain.BlockTip().Hash, false) + success, isOrphan, missingBlockHashes, err := testMeta.chain.processBlockPoS(realBlock, 12, true) + require.True(t, success) + require.False(t, isOrphan) + require.Len(t, missingBlockHashes, 0) + require.NoError(t, err) + + // Okay now we can check the best chain. + // We expect the block to be uncommitted. + blockHash1, err = realBlock.Hash() + require.NoError(t, err) + _verifyCommitRuleHelper(testMeta, []*BlockHash{}, []*BlockHash{blockHash1}, nil) + } + + var blockHash2, blockHash3 *BlockHash + { + // Now let's try adding two more blocks on top of this one to make sure commit rule works properly. + var realBlock2 *MsgDeSoBlock + realBlock2 = _generateRealBlock(testMeta, 13, 13, 950, blockHash1, false) + success, _, _, err := testMeta.chain.processBlockPoS(realBlock2, 13, true) + require.True(t, success) + blockHash2, err = realBlock2.Hash() + require.NoError(t, err) + + var realBlock3 *MsgDeSoBlock + realBlock3 = _generateRealBlock(testMeta, 14, 14, 378, blockHash2, false) + + success, _, _, err = testMeta.chain.processBlockPoS(realBlock3, 14, true) + require.True(t, success) + // Okay now we expect blockHash1 to be committed, but blockHash2 and 3 to not be committed. + blockHash3, err = realBlock3.Hash() + require.NoError(t, err) + + _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1}, []*BlockHash{blockHash2, blockHash3}, blockHash1) + } + + var timeoutBlockHash *BlockHash + { + // Okay let's timeout view 15 + var timeoutBlock *MsgDeSoBlock + timeoutBlock = _generateRealBlock(testMeta, 15, 16, 381, blockHash3, true) + success, _, _, err := testMeta.chain.processBlockPoS(timeoutBlock, 15, true) + fmt.Println(err) + require.True(t, success) + timeoutBlockHash, err = timeoutBlock.Hash() + require.NoError(t, err) + + _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, timeoutBlockHash}, blockHash2) + } + + var reorgBlockHash *BlockHash + { + // Okay let's introduce a reorg. New block at view 15 with block 3 as its parent. + var reorgBlock *MsgDeSoBlock + reorgBlock = _generateRealBlock(testMeta, 15, 15, 373, blockHash3, false) + success, _, _, err := testMeta.chain.processBlockPoS(reorgBlock, 15, true) + require.True(t, success) + reorgBlockHash, err = reorgBlock.Hash() + require.NoError(t, err) + // We expect blockHash1 and blockHash2 to be committed, but blockHash3 and reorgBlockHash to not be committed. + // Timeout block will no longer be in best chain, and will still be in an uncommitted state in the block index + _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, reorgBlockHash}, blockHash2) + _, exists := testMeta.chain.bestChainMap[*timeoutBlockHash] + require.False(t, exists) + + timeoutBlockNode, exists := testMeta.chain.blockIndexByHash[*timeoutBlockHash] + require.True(t, exists) + require.False(t, timeoutBlockNode.IsCommitted()) + } + { + // Let's process an orphan block. + var dummyParentBlock *MsgDeSoBlock + dummyParentBlock = _generateRealBlock(testMeta, 16, 16, 272, reorgBlockHash, false) + dummyParentBlockHash, err := dummyParentBlock.Hash() + require.NoError(t, err) + var orphanBlock *MsgDeSoBlock + orphanBlock = _generateRealBlock(testMeta, 17, 17, 9273, reorgBlockHash, false) + updateRandomSeedSignature(testMeta, orphanBlock, dummyParentBlock.Header.ProposerRandomSeedSignature) + // Set the prev block hash manually on orphan block + orphanBlock.Header.PrevBlockHash = dummyParentBlockHash + // Create a QC on the dummy parent block + orphanBlock.Header.ValidatorsVoteQC = _getVoteQC(testMeta, testMeta.posMempool.readOnlyLatestBlockView, dummyParentBlockHash, 16) + updateProposerVotePartialSignatureForBlock(testMeta, orphanBlock) + orphanBlockHash, err := orphanBlock.Hash() + require.NoError(t, err) + success, isOrphan, missingBlockHashes, err := testMeta.chain.processBlockPoS(orphanBlock, 17, true) + require.False(t, success) + require.True(t, isOrphan) + require.Len(t, missingBlockHashes, 1) + require.True(t, missingBlockHashes[0].IsEqual(dummyParentBlockHash)) + require.NoError(t, err) + orphanBlockInIndex := testMeta.chain.blockIndexByHash[*orphanBlockHash] + require.NotNil(t, orphanBlockInIndex) + require.True(t, orphanBlockInIndex.IsStored()) + require.False(t, orphanBlockInIndex.IsValidated()) + + // Okay now if we process the parent block, the orphan should get updated to be validated. + success, isOrphan, missingBlockHashes, err = testMeta.chain.processBlockPoS(dummyParentBlock, 16, true) + require.True(t, success) + require.False(t, isOrphan) + require.Len(t, missingBlockHashes, 0) + require.NoError(t, err) + + orphanBlockInIndex = testMeta.chain.blockIndexByHash[*orphanBlockHash] + require.NotNil(t, orphanBlockInIndex) + require.True(t, orphanBlockInIndex.IsStored()) + require.True(t, orphanBlockInIndex.IsValidated()) + } + { + // Let's process a block that is an orphan, but is malformed. + randomHash := NewBlockHash(RandomBytes(32)) + var malformedOrphanBlock *MsgDeSoBlock + malformedOrphanBlock = _generateRealBlock(testMeta, 18, 18, 9273, testMeta.chain.BlockTip().Hash, false) + malformedOrphanBlock.Header.PrevBlockHash = randomHash + // Modify anything to make the block malformed, but make sure a hash can still be generated. + malformedOrphanBlock.Header.TxnConnectStatusByIndexHash = randomHash + // Resign the block. + updateProposerVotePartialSignatureForBlock(testMeta, malformedOrphanBlock) + malformedOrphanBlockHash, err := malformedOrphanBlock.Hash() + require.NoError(t, err) + success, isOrphan, missingBlockHashes, err := testMeta.chain.processBlockPoS(malformedOrphanBlock, 18, true) + require.False(t, success) + require.True(t, isOrphan) + require.Len(t, missingBlockHashes, 1) + require.True(t, missingBlockHashes[0].IsEqual(randomHash)) + require.NoError(t, err) + + malformedOrphanBlockInIndex := testMeta.chain.blockIndexByHash[*malformedOrphanBlockHash] + require.True(t, malformedOrphanBlockInIndex.IsValidateFailed()) + require.True(t, malformedOrphanBlockInIndex.IsStored()) + + // If a block can't be hashed, we expect to get an error. + malformedOrphanBlock.Header.TxnConnectStatusByIndexHash = nil + success, isOrphan, missingBlockHashes, err = testMeta.chain.processBlockPoS(malformedOrphanBlock, 18, true) + require.False(t, success) + require.False(t, isOrphan) + require.Len(t, missingBlockHashes, 0) + require.Error(t, err) + } +} + +// TestGetSafeBlocks tests the GetSafeBlocks function to make sure it returns the correct blocks. +// It adds three blocks as Validated and Stored to the block index, each referencing the previous +// block as its parent and adds one block as Stored with the same height as the third block, but not validated. +// Also, we add a block with a block height in the future to make sure it is not returned. +// First, we expect that all three Validated & Stored blocks are returned as safe blocks and +// the Stored block is not returned. +// Next, we update the previously stored block to be validated and expect it to be returned. +func TestGetSafeBlocks(t *testing.T) { + testMeta := NewTestPoSBlockchainWithValidators(t) + committedHash := testMeta.chain.BlockTip().Hash + var block1 *MsgDeSoBlock + block1 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight), uint64(testMeta.savedHeight), 1723, committedHash, false) + block1Hash, err := block1.Hash() + require.NoError(t, err) + // Add block 1 w/ stored and validated + bn1, err := testMeta.chain.storeValidatedBlockInBlockIndex(block1) + require.NoError(t, err) + require.True(t, bn1.Hash.IsEqual(block1Hash)) + // Create block 2 w/ block 1 as parent and add it to the block index w/ stored & validated + var block2 *MsgDeSoBlock + block2 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+1), uint64(testMeta.savedHeight+1), 1293, block1Hash, false) + block2Hash, err := block2.Hash() + require.NoError(t, err) + bn2, err := testMeta.chain.storeValidatedBlockInBlockIndex(block2) + require.NoError(t, err) + require.True(t, bn2.Hash.IsEqual(block2Hash)) + // Add block 3 only as stored and validated + var block3 *MsgDeSoBlock + block3 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+2), uint64(testMeta.savedHeight+2), 1372, block2Hash, false) + bn3, err := testMeta.chain.storeValidatedBlockInBlockIndex(block3) + require.NoError(t, err) + block3Hash, err := block3.Hash() + require.NoError(t, err) + require.True(t, bn3.Hash.IsEqual(block3Hash)) + // Add block 3' only as stored + var block3Prime *MsgDeSoBlock + block3Prime = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+2), uint64(testMeta.savedHeight+3), 13717, block2Hash, false) + bn3Prime, err := testMeta.chain.storeBlockInBlockIndex(block3Prime) + require.NoError(t, err) + block3PrimeHash, err := block3Prime.Hash() + require.NoError(t, err) + require.True(t, bn3Prime.Hash.IsEqual(block3PrimeHash)) + // Add block 5 as Stored & Validated (this could never really happen, but it illustrates a point!) + var block5 *MsgDeSoBlock + block5 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+4), uint64(testMeta.savedHeight+4), 1237, block3Hash, false) + block5.Header.Height = uint64(testMeta.savedHeight + 5) + block5Hash, err := block5.Hash() + require.NoError(t, err) + _, err = testMeta.chain.storeValidatedBlockInBlockIndex(block5) + require.NoError(t, err) + // Okay let's get the safe blocks. + safeBlocks, err := testMeta.chain.GetSafeBlocks() + require.NoError(t, err) + require.Len(t, safeBlocks, 4) + _checkSafeBlocksForBlockHash := func(blockHash *BlockHash, safeBlockSlice []*MsgDeSoHeader) bool { + return collections.Any(safeBlockSlice, func(header *MsgDeSoHeader) bool { + headerHash, err := header.Hash() + require.NoError(t, err) + return headerHash.IsEqual(blockHash) + }) + } + require.True(t, _checkSafeBlocksForBlockHash(committedHash, safeBlocks)) + require.True(t, _checkSafeBlocksForBlockHash(block1Hash, safeBlocks)) + require.True(t, _checkSafeBlocksForBlockHash(block2Hash, safeBlocks)) + require.True(t, _checkSafeBlocksForBlockHash(block3Hash, safeBlocks)) + require.False(t, _checkSafeBlocksForBlockHash(block3PrimeHash, safeBlocks)) + require.False(t, _checkSafeBlocksForBlockHash(block5Hash, safeBlocks)) + + // Update block 3 prime to be validated and it should now be a safe block. + bn3Prime, err = testMeta.chain.storeValidatedBlockInBlockIndex(block3Prime) + require.NoError(t, err) + require.True(t, bn3Prime.IsValidated()) + safeBlocks, err = testMeta.chain.GetSafeBlocks() + require.NoError(t, err) + require.Len(t, safeBlocks, 5) + require.True(t, _checkSafeBlocksForBlockHash(block3PrimeHash, safeBlocks)) +} + +// TestProcessOrphanBlockPoS tests the ProcessOrphanBlockPoS function to make sure it properly handles +// marking orphan blocks as Validate Failed if they are truly invalid. Note that orphan blocks will +// never be marked Validated. +func TestProcessOrphanBlockPoS(t *testing.T) { + testMeta := NewTestPoSBlockchainWithValidators(t) + + // Generate a real block and make sure it doesn't hit any errors. + { + var realBlock *MsgDeSoBlock + realBlock = _generateRealBlock(testMeta, 12, 12, 889, testMeta.chain.BlockTip().Hash, false) + // Give the block a random parent, so it is truly an orphan. + realBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + updateProposerVotePartialSignatureForBlock(testMeta, realBlock) + err := testMeta.chain.processOrphanBlockPoS(realBlock) + require.NoError(t, err) + // Get the block node from the block index. + blockHash, err := realBlock.Hash() + require.NoError(t, err) + blockNode, exists := testMeta.chain.blockIndexByHash[*blockHash] + require.True(t, exists) + require.True(t, blockNode.IsStored()) + require.False(t, blockNode.IsValidateFailed()) + require.False(t, blockNode.IsValidated()) + } + // Generate a real block and make some modification to the block to make it malformed. + { + var realBlock *MsgDeSoBlock + realBlock = _generateRealBlock(testMeta, 12, 12, 8172, testMeta.chain.BlockTip().Hash, false) + // Give the block a random parent, so it is truly an orphan. + realBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + // Set the header version to 1 + realBlock.Header.Version = 1 + updateProposerVotePartialSignatureForBlock(testMeta, realBlock) + // There should be no error, but the block should be marked as ValidateFailed. + err := testMeta.chain.processOrphanBlockPoS(realBlock) + require.NoError(t, err) + // Get the block node from the block index. + blockHash, err := realBlock.Hash() + require.NoError(t, err) + blockNode, exists := testMeta.chain.blockIndexByHash[*blockHash] + require.True(t, exists) + require.True(t, blockNode.IsStored()) + require.True(t, blockNode.IsValidateFailed()) + require.False(t, blockNode.IsValidated()) + } + // Generate a real block in this epoch and change the block proposer. This should fail the spam prevention check + // and the block will not be in the block index. + { + var realBlock *MsgDeSoBlock + realBlock = _generateRealBlock(testMeta, 12, 12, 1273, testMeta.chain.BlockTip().Hash, false) + // Give the block a random parent, so it is truly an orphan. + realBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + // Just make sure we're in the same epoch. + utxoView := _newUtxoView(testMeta) + currentEpochEntry, err := utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + require.True(t, currentEpochEntry.ContainsBlockHeight(12)) + // Change the block proposer to some any other validator's public key. + wrongBlockProposer := NewPublicKey(m0PkBytes) + if wrongBlockProposer.Equal(*realBlock.Header.ProposerPublicKey) { + wrongBlockProposer = NewPublicKey(m1PkBytes) + } + wrongBlockProposerVotingPublicKey := testMeta.pubKeyToBLSKeyMap[Base58CheckEncode(wrongBlockProposer.ToBytes(), false, testMeta.chain.params)].PublicKey() + realBlock.Header.ProposerPublicKey = wrongBlockProposer + realBlock.Header.ProposerVotingPublicKey = wrongBlockProposerVotingPublicKey + updateProposerVotePartialSignatureForBlock(testMeta, realBlock) + // There should be no error, but the block should be marked as ValidateFailed. + err = testMeta.chain.processOrphanBlockPoS(realBlock) + require.NoError(t, err) + // Get the block node from the block index. + blockHash, err := realBlock.Hash() + require.NoError(t, err) + _, exists := testMeta.chain.blockIndexByHash[*blockHash] + require.False(t, exists) + } + + // Generate a real block in this epoch and update the QC to not have a supermajority. + // This fails the spam prevention check and the block will not be in the block index. + { + var realBlock *MsgDeSoBlock + realBlock = _generateRealBlock(testMeta, 12, 12, 543, testMeta.chain.BlockTip().Hash, false) + // Give the block a random parent, so it is truly an orphan. + realBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + // Just make sure we're in the same epoch. + utxoView := _newUtxoView(testMeta) + currentEpochEntry, err := utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + require.True(t, currentEpochEntry.ContainsBlockHeight(12)) + // Update the QC to not have a supermajority. + // Get all the bls keys for the validators that aren't the leader. + signersList := bitset.NewBitset() + var signatures []*bls.Signature + require.NoError(testMeta.t, err) + votePayload := consensus.GetVoteSignaturePayload(11, testMeta.chain.BlockTip().Hash) + allSnapshotValidators, err := utxoView.GetAllSnapshotValidatorSetEntriesByStake() + require.NoError(t, err) + // Only have m0 sign it. m0 has significantly less than 2/3 of the stake. + m0PKID := utxoView.GetPKIDForPublicKey(m0PkBytes).PKID + for ii, validatorEntry := range allSnapshotValidators { + if !validatorEntry.ValidatorPKID.Eq(m0PKID) { + continue + } + validatorPublicKeyBytes := utxoView.GetPublicKeyForPKID(validatorEntry.ValidatorPKID) + validatorPublicKey := Base58CheckEncode(validatorPublicKeyBytes, false, testMeta.chain.params) + validatorBLSPrivateKey := testMeta.pubKeyToBLSKeyMap[validatorPublicKey] + sig, err := validatorBLSPrivateKey.Sign(votePayload[:]) + require.NoError(testMeta.t, err) + signatures = append(signatures, sig) + signersList = signersList.Set(ii, true) + } + // Create the aggregated signature. + aggregatedSignature, err := bls.AggregateSignatures(signatures) + require.NoError(testMeta.t, err) + realBlock.Header.ValidatorsVoteQC.ValidatorsVoteAggregatedSignature = &AggregatedBLSSignature{ + SignersList: signersList, + Signature: aggregatedSignature, + } + updateProposerVotePartialSignatureForBlock(testMeta, realBlock) + // There should be no error, but the block should be marked as ValidateFailed. + err = testMeta.chain.processOrphanBlockPoS(realBlock) + require.NoError(t, err) + // Get the block node from the block index. + blockHash, err := realBlock.Hash() + require.NoError(t, err) + _, exists := testMeta.chain.blockIndexByHash[*blockHash] + require.False(t, exists) + } + { + // Generate a real block in the next epoch and it should pass validation and be stored. + utxoView := _newUtxoView(testMeta) + currentEpochEntry, err := utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + var nextEpochBlock *MsgDeSoBlock + nextEpochBlock = _generateRealBlock(testMeta, currentEpochEntry.FinalBlockHeight+1, currentEpochEntry.FinalBlockHeight+1, 23, testMeta.chain.BlockTip().Hash, false) + // Give the block a random parent, so it is truly an orphan. + nextEpochBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + updateProposerVotePartialSignatureForBlock(testMeta, nextEpochBlock) + err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock) + require.NoError(t, err) + // Get the block node from the block index. + blockHash, err := nextEpochBlock.Hash() + require.NoError(t, err) + blockNode, exists := testMeta.chain.blockIndexByHash[*blockHash] + require.True(t, exists) + require.True(t, blockNode.IsStored()) + require.False(t, blockNode.IsValidateFailed()) + require.False(t, blockNode.IsValidated()) + } + { + // Generate a real block in the next epoch and make the block proposer any public key not in + // the validator set. This should fail the spam prevention check and the block will not be in the block index. + utxoView := _newUtxoView(testMeta) + currentEpochEntry, err := utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + var nextEpochBlock *MsgDeSoBlock + nextEpochBlock = _generateRealBlock(testMeta, currentEpochEntry.FinalBlockHeight+1, currentEpochEntry.FinalBlockHeight+1, 17283, testMeta.chain.BlockTip().Hash, false) + // Give the block a random parent, so it is truly an orphan. + nextEpochBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + // Change the block proposer to a random BLS public key. + nextEpochBlock.Header.ProposerVotingPublicKey = _generateRandomBLSPrivateKey(t).PublicKey() + updateProposerVotePartialSignatureForBlock(testMeta, nextEpochBlock) + // There should be no error, but the block should be marked as ValidateFailed. + err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock) + require.NoError(t, err) + // Get the block node from the block index. + blockHash, err := nextEpochBlock.Hash() + require.NoError(t, err) + _, exists := testMeta.chain.blockIndexByHash[*blockHash] + require.False(t, exists) + } + { + // Generate a real block in the next epoch and update the QC to not have a supermajority. + utxoView := _newUtxoView(testMeta) + currentEpochEntry, err := utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + var nextEpochBlock *MsgDeSoBlock + nextEpochBlock = _generateRealBlock(testMeta, currentEpochEntry.FinalBlockHeight+1, currentEpochEntry.FinalBlockHeight+1, 3178, testMeta.chain.BlockTip().Hash, false) + // Give the block a random parent, so it is truly an orphan. + nextEpochBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + updateProposerVotePartialSignatureForBlock(testMeta, nextEpochBlock) + // Update the QC to not have a supermajority. + err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock) + require.NoError(t, err) + // Update the QC to not have a supermajority. + // Get all the bls keys for the validators that aren't the leader. + signersList := bitset.NewBitset() + var signatures []*bls.Signature + require.NoError(testMeta.t, err) + votePayload := consensus.GetVoteSignaturePayload(currentEpochEntry.FinalBlockHeight, testMeta.chain.BlockTip().Hash) + allSnapshotValidators, err := utxoView.GetAllSnapshotValidatorSetEntriesByStake() + require.NoError(t, err) + // Only have m0 sign it. m0 has significantly less than 2/3 of the stake. + m0PKID := utxoView.GetPKIDForPublicKey(m0PkBytes).PKID + for ii, validatorEntry := range allSnapshotValidators { + if !validatorEntry.ValidatorPKID.Eq(m0PKID) { + continue + } + validatorPublicKeyBytes := utxoView.GetPublicKeyForPKID(validatorEntry.ValidatorPKID) + validatorPublicKey := Base58CheckEncode(validatorPublicKeyBytes, false, testMeta.chain.params) + validatorBLSPrivateKey := testMeta.pubKeyToBLSKeyMap[validatorPublicKey] + sig, err := validatorBLSPrivateKey.Sign(votePayload[:]) + require.NoError(testMeta.t, err) + signatures = append(signatures, sig) + signersList = signersList.Set(ii, true) + } + // Create the aggregated signature. + aggregatedSignature, err := bls.AggregateSignatures(signatures) + require.NoError(testMeta.t, err) + nextEpochBlock.Header.ValidatorsVoteQC.ValidatorsVoteAggregatedSignature = &AggregatedBLSSignature{ + SignersList: signersList, + Signature: aggregatedSignature, + } + updateProposerVotePartialSignatureForBlock(testMeta, nextEpochBlock) + err = testMeta.chain.processOrphanBlockPoS(nextEpochBlock) + require.NoError(t, err) + // Get the block node from the block index. + blockHash, err := nextEpochBlock.Hash() + require.NoError(t, err) + _, exists := testMeta.chain.blockIndexByHash[*blockHash] + require.False(t, exists) + } + { + // Generate a block that is two epochs in the future. We won't even store this. + utxoView := _newUtxoView(testMeta) + currentEpochEntry, err := utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + nextEpochEntry, err := utxoView.computeNextEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight, currentEpochEntry.FinalBlockHeight, 1) + require.NoError(t, err) + var twoEpochsInFutureBlock *MsgDeSoBlock + twoEpochsInFutureBlock = _generateRealBlock(testMeta, nextEpochEntry.FinalBlockHeight+1, nextEpochEntry.FinalBlockHeight+1, 17283, testMeta.chain.BlockTip().Hash, false) + // Give the block a random parent, so it is truly an orphan. + twoEpochsInFutureBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) + updateProposerVotePartialSignatureForBlock(testMeta, twoEpochsInFutureBlock) + // We should get an error that this block is too far in the future. + err = testMeta.chain.processOrphanBlockPoS(twoEpochsInFutureBlock) + require.Error(t, err) + // The block shouldn't be in the block index. + blockHash, err := twoEpochsInFutureBlock.Hash() + require.NoError(t, err) + _, exists := testMeta.chain.blockIndexByHash[*blockHash] + require.False(t, exists) + } + { + // Generate a block that is in the previous epoch. We should store this. + utxoView := _newUtxoView(testMeta) + currentEpochEntry, err := utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + prevEpochEntry, err := utxoView.simulatePrevEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) + require.NoError(t, err) + var prevEpochBlock *MsgDeSoBlock + prevEpochBlock = _generateRealBlock(testMeta, prevEpochEntry.FinalBlockHeight, prevEpochEntry.FinalBlockHeight, 17283, testMeta.chain.BlockTip().Hash, false) + err = testMeta.chain.processOrphanBlockPoS(prevEpochBlock) + require.NoError(t, err) + // The block should be in the block index. + blockHash, err := prevEpochBlock.Hash() + require.NoError(t, err) + blockNode, exists := testMeta.chain.blockIndexByHash[*blockHash] + require.True(t, exists) + require.True(t, blockNode.IsStored()) + require.False(t, blockNode.IsValidateFailed()) + require.False(t, blockNode.IsValidated()) + } +} + +func TestHasValidProposerPartialSignaturePoS(t *testing.T) { + testMeta := NewTestPoSBlockchainWithValidators(t) + // Generate a real block and make sure it doesn't hit any errors. + var realBlock *MsgDeSoBlock + realBlock = _generateRealBlock(testMeta, 12, 12, 889, testMeta.chain.BlockTip().Hash, false) + utxoView := _newUtxoView(testMeta) + snapshotEpochNumber, err := utxoView.GetCurrentSnapshotEpochNumber() + require.NoError(t, err) + isValid, err := utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + require.NoError(t, err) + require.True(t, isValid) + + realProposerPublicKey := realBlock.Header.ProposerPublicKey + realProposerPublicKeyBase58Check := Base58CheckEncode(realProposerPublicKey.ToBytes(), false, testMeta.params) + wrongProposerPublicKey := NewPublicKey(m1PkBytes) + if wrongProposerPublicKey.Equal(*realProposerPublicKey) { + wrongProposerPublicKey = NewPublicKey(m2PkBytes) + } + wrongProposerPublicKeyBase58Check := Base58CheckEncode(wrongProposerPublicKey.ToBytes(), false, testMeta.params) + // Using a different validator's public key as ProposerPublicKey should fail + { + realBlock.Header.ProposerPublicKey = wrongProposerPublicKey + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + require.NoError(t, err) + require.False(t, isValid) + } + // Using a non-validator's public key as ProposerPublicKey should fail. + { + realBlock.Header.ProposerPublicKey = NewPublicKey(paramUpdaterPkBytes) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + require.NoError(t, err) + require.False(t, isValid) + // Reset the proposer public key + realBlock.Header.ProposerPublicKey = realProposerPublicKey + } + // If the block proposer's voting public key doesn't match the snapshot, it should fail. + realVotingPublicKey := realBlock.Header.ProposerVotingPublicKey + { + realBlock.Header.ProposerVotingPublicKey = _generateRandomBLSPrivateKey(t).PublicKey() + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + require.NoError(t, err) + require.False(t, isValid) + // Reset the proposer voting public key + realBlock.Header.ProposerVotingPublicKey = realVotingPublicKey + } + // Signature on incorrect payload should fail. + { + incorrectPayload := consensus.GetVoteSignaturePayload(13, testMeta.chain.BlockTip().Hash) + realBlock.Header.ProposerVotePartialSignature, err = testMeta.pubKeyToBLSKeyMap[realProposerPublicKeyBase58Check].Sign(incorrectPayload[:]) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + require.NoError(t, err) + require.False(t, isValid) + } + // Signature on correct payload from wrong public key should fail. + { + var realBlockHash *BlockHash + realBlockHash, err = realBlock.Hash() + require.NoError(t, err) + correctPayload := consensus.GetVoteSignaturePayload(12, realBlockHash) + realBlock.Header.ProposerVotePartialSignature, err = testMeta.pubKeyToBLSKeyMap[wrongProposerPublicKeyBase58Check].Sign(correctPayload[:]) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + require.NoError(t, err) + require.False(t, isValid) + } +} + +func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { + testMeta := NewTestPoSBlockchainWithValidators(t) + // Generate a real block and process it so we have a PoS block on the best chain. + var realBlock *MsgDeSoBlock + realBlock = _generateRealBlock(testMeta, 12, 12, 889, testMeta.chain.BlockTip().Hash, false) + // The first PoS block passes the validation. + isValid, err := testMeta.chain.hasValidProposerRandomSeedSignaturePoS(realBlock) + require.NoError(t, err) + require.True(t, isValid) + _, _, _, err = testMeta.chain.processBlockPoS(realBlock, 12, true) + require.NoError(t, err) + realBlockHash, err := realBlock.Hash() + require.NoError(t, err) + realBlockNode, exists := testMeta.chain.blockIndexByHash[*realBlockHash] + require.True(t, exists) + require.True(t, realBlockNode.IsStored()) + require.False(t, realBlockNode.IsValidateFailed()) + require.True(t, realBlockNode.IsValidated()) + require.NotNil(t, realBlockNode.Header.ProposerRandomSeedSignature) + + // A valid child block with a valid proposer random seed signature will pass validations. + var childBlock *MsgDeSoBlock + childBlock = _generateRealBlock(testMeta, 13, 13, 273, realBlockNode.Hash, false) + { + isValid, err = testMeta.chain.hasValidProposerRandomSeedSignaturePoS(childBlock) + require.NoError(t, err) + require.True(t, isValid) + } + + // Modifying the random seed signature on the parent to make the child fail. + { + realBlockNode.Header.ProposerRandomSeedSignature, err = (&bls.Signature{}).FromBytes(RandomBytes(32)) + require.NoError(t, err) + isValid, err = testMeta.chain.hasValidProposerRandomSeedSignaturePoS(childBlock) + require.NoError(t, err) + require.False(t, isValid) + } + + // Signing the previous block's random seed signature with the wrong key should fail. + { + wrongProposerPrivateKey := _generateRandomBLSPrivateKey(t) + prevBlockRandomSeedHashBytes := sha256.Sum256(realBlockNode.Header.ProposerRandomSeedSignature.ToBytes()) + childBlock.Header.ProposerRandomSeedSignature, err = wrongProposerPrivateKey.Sign(prevBlockRandomSeedHashBytes[:]) + require.NoError(t, err) + isValid, err = testMeta.chain.hasValidProposerRandomSeedSignaturePoS(childBlock) + require.NoError(t, err) + require.False(t, isValid) + } +} + +// _generateRealBlock generates a BlockTemplate with real data by adding 50 test transactions to the +// PosMempool, generating a RandomSeedHash, updating the latestBlockView in the PosBlockProducer, and calling _getFullRealBlockTemplate. +// It can be used to generate a block w/ either a vote or timeout QC. +func _generateRealBlock(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64, prevBlockHash *BlockHash, isTimeout bool) BlockTemplate { + globalParams := _testGetDefaultGlobalParams() + randSource := rand.New(rand.NewSource(seed)) + passingTxns := []*MsgDeSoTxn{} + totalUtilityFee := uint64(0) + passingTransactions := 50 + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(2000) + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + for ii := 0; ii < passingTransactions; ii++ { + txn := _generateTestTxn(testMeta.t, randSource, feeMin, feeMax, m0PubBytes, m0Priv, blockHeight+100, 20) + passingTxns = append(passingTxns, txn) + _, utilityFee := computeBMF(txn.TxnFeeNanos) + totalUtilityFee += utilityFee + _wrappedPosMempoolAddTransaction(testMeta.t, testMeta.posMempool, txn) + } + + // TODO: Get real seed signature. + prevBlock, exists := testMeta.chain.blockIndexByHash[*prevBlockHash] + require.True(testMeta.t, exists) + seedSignature := getRandomSeedSignature(testMeta, blockHeight, view, prevBlock.Header.ProposerRandomSeedSignature) + // Always update the testMeta latestBlockView + latestBlockView, err := testMeta.chain.getUtxoViewAtBlockHash(*prevBlockHash) + require.NoError(testMeta.t, err) + latestBlockHeight := testMeta.chain.blockIndexByHash[*prevBlockHash].Height + testMeta.posMempool.UpdateLatestBlock(latestBlockView, uint64(latestBlockHeight)) + return _getFullRealBlockTemplate(testMeta, testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature, isTimeout) +} + +// _generateDummyBlock generates a BlockTemplate with dummy data by adding 50 test transactions to the +// PosMempool, generating a RandomSeedHash, updating the latestBlockView in the PosBlockProducer, and calling _getFullDummyBlockTemplate. +// It then adds this dummy block to the block index. +func _generateDummyBlock(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64) BlockTemplate { + globalParams := _testGetDefaultGlobalParams() + randSource := rand.New(rand.NewSource(seed)) + passingTxns := []*MsgDeSoTxn{} + totalUtilityFee := uint64(0) + passingTransactions := 50 + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(2000) + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + for ii := 0; ii < passingTransactions; ii++ { + txn := _generateTestTxn(testMeta.t, randSource, feeMin, feeMax, m0PubBytes, m0Priv, blockHeight+100, 20) + passingTxns = append(passingTxns, txn) + _, utilityFee := computeBMF(txn.TxnFeeNanos) + totalUtilityFee += utilityFee + _wrappedPosMempoolAddTransaction(testMeta.t, testMeta.posMempool, txn) + } + + seedSignature := &bls.Signature{} + _, err := seedSignature.FromBytes(Sha256DoubleHash([]byte("seed")).ToBytes()) + require.NoError(testMeta.t, err) + + blockTemplate := _getFullDummyBlockTemplate(testMeta, testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature) + require.NotNil(testMeta.t, blockTemplate) + // This is a hack to get the block to connect. We just give the block reward to m0. + blockTemplate.Txns[0].TxOutputs[0].PublicKey = m0PubBytes + // Make sure ToBytes works. + var msgDesoBlock *MsgDeSoBlock + msgDesoBlock = blockTemplate + _, err = msgDesoBlock.ToBytes(false) + require.NoError(testMeta.t, err) + newBlockHash, err := msgDesoBlock.Hash() + require.NoError(testMeta.t, err) + + // Add block to block index. + blockNode, err := testMeta.chain.storeBlockInBlockIndex(msgDesoBlock) + require.NoError(testMeta.t, err) + require.True(testMeta.t, blockNode.IsStored()) + _, exists := testMeta.chain.blockIndexByHash[*newBlockHash] + require.True(testMeta.t, exists) + return blockTemplate +} + +// _generateBlockAndAddToBestChain generates a dummy BlockTemplate by calling _generateDummyBlock and then adds it to the best chain. +// Finally it updates the PosMempool's latest block view. +func _generateBlockAndAddToBestChain(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64) *MsgDeSoBlock { + blockTemplate := _generateDummyBlock(testMeta, blockHeight, view, seed) + var msgDesoBlock *MsgDeSoBlock + msgDesoBlock = blockTemplate + newBlockHash, err := msgDesoBlock.Hash() + require.NoError(testMeta.t, err) + newBlockNode, exists := testMeta.chain.blockIndexByHash[*newBlockHash] + require.True(testMeta.t, exists) + testMeta.chain.addBlockToBestChain(newBlockNode) + // Update the latest block view + latestBlockView, err := testMeta.chain.GetUncommittedTipView() + require.NoError(testMeta.t, err) + testMeta.posMempool.UpdateLatestBlock(latestBlockView, blockTemplate.Header.Height) + + return blockTemplate +} + +func getLeaderForBlockHeightAndView(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint64, view uint64) (string, []byte) { + currentEpochEntry, err := latestBlockView.GetCurrentEpochEntry() + require.NoError(testMeta.t, err) + leaders, err := latestBlockView.GetCurrentSnapshotLeaderSchedule() + require.NoError(testMeta.t, err) + require.GreaterOrEqual(testMeta.t, view, currentEpochEntry.InitialView) + viewDiff := view - currentEpochEntry.InitialView + require.GreaterOrEqual(testMeta.t, blockHeight, currentEpochEntry.InitialBlockHeight) + heightDiff := blockHeight - currentEpochEntry.InitialBlockHeight + require.GreaterOrEqual(testMeta.t, viewDiff, heightDiff) + leaderIdx := (viewDiff - heightDiff) % uint64(len(leaders)) + require.Greater(testMeta.t, len(leaders), int(leaderIdx)) + leader := leaders[leaderIdx] + leaderPublicKeyBytes := latestBlockView.GetPublicKeyForPKID(leader) + return Base58CheckEncode(leaderPublicKeyBytes, false, testMeta.chain.params), leaderPublicKeyBytes +} + +func getRandomSeedSignature(testMeta *TestMeta, height uint64, view uint64, prevRandomSeedSignature *bls.Signature) *bls.Signature { + leaderPublicKey, _ := getLeaderForBlockHeightAndView(testMeta, testMeta.posMempool.readOnlyLatestBlockView, height, view) + leaderBLSPrivKey := testMeta.pubKeyToBLSKeyMap[leaderPublicKey] + prevRandomSeedHashSHA256 := sha256.Sum256(prevRandomSeedSignature.ToBytes()) + newRandomSeedSignature, err := leaderBLSPrivKey.Sign(prevRandomSeedHashSHA256[:]) + require.NoError(testMeta.t, err) + return newRandomSeedSignature +} + +func updateRandomSeedSignature(testMeta *TestMeta, block *MsgDeSoBlock, prevRandomSeedSignature *bls.Signature) { + block.Header.ProposerRandomSeedSignature = getRandomSeedSignature(testMeta, block.Header.Height, block.Header.ProposedInView, prevRandomSeedSignature) +} + +func updateProposerVotePartialSignatureForBlock(testMeta *TestMeta, block *MsgDeSoBlock) { + blockHash, err := block.Hash() + require.NoError(testMeta.t, err) + leaderPublicKey, _ := getLeaderForBlockHeightAndView(testMeta, testMeta.posMempool.readOnlyLatestBlockView, block.Header.Height, block.Header.ProposedInView) + leaderBlsPrivKey := testMeta.pubKeyToBLSKeyMap[leaderPublicKey] + partialSigPayload := consensus.GetVoteSignaturePayload(block.Header.ProposedInView, blockHash) + sig, err := leaderBlsPrivKey.Sign(partialSigPayload[:]) + require.NoError(testMeta.t, err) + block.Header.ProposerVotePartialSignature = sig +} + +func _getVoteQC(testMeta *TestMeta, latestBlockView *UtxoView, qcBlockHash *BlockHash, qcView uint64) *QuorumCertificate { + votePayload := consensus.GetVoteSignaturePayload(qcView, qcBlockHash) + allSnapshotValidators, err := latestBlockView.GetAllSnapshotValidatorSetEntriesByStake() + require.NoError(testMeta.t, err) + // QC stuff. + + // Get all the bls keys for the validators that aren't the leader. + signersList := bitset.NewBitset() + var signatures []*bls.Signature + require.NoError(testMeta.t, err) + for ii, validatorEntry := range allSnapshotValidators { + validatorPublicKeyBytes := latestBlockView.GetPublicKeyForPKID(validatorEntry.ValidatorPKID) + validatorPublicKey := Base58CheckEncode(validatorPublicKeyBytes, false, testMeta.chain.params) + validatorBLSPrivateKey := testMeta.pubKeyToBLSKeyMap[validatorPublicKey] + sig, err := validatorBLSPrivateKey.Sign(votePayload[:]) + require.NoError(testMeta.t, err) + signatures = append(signatures, sig) + signersList = signersList.Set(ii, true) + } + // Create the aggregated signature. + aggregatedSignature, err := bls.AggregateSignatures(signatures) + require.NoError(testMeta.t, err) + // Create the vote QC. + voteQC := &QuorumCertificate{ + BlockHash: qcBlockHash, + ProposedInView: qcView, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + SignersList: signersList, + Signature: aggregatedSignature, + }, + } + + isValid := consensus.IsValidSuperMajorityQuorumCertificate(voteQC, toConsensusValidators(allSnapshotValidators)) + require.True(testMeta.t, isValid) + return voteQC +} + +// _getFullRealBlockTemplate is a helper function that generates a block template with a valid TxnConnectStatusByIndexHash +// and a valid TxnConnectStatusByIndex, a valid vote or timeout QC, does all the required signing by validators, +// and generates the proper ProposerVotePartialSignature. +func _getFullRealBlockTemplate(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint64, view uint64, seedSignature *bls.Signature, isTimeout bool) BlockTemplate { + blockTemplate, err := testMeta.posBlockProducer.createBlockTemplate(latestBlockView, blockHeight, view, seedSignature) + require.NoError(testMeta.t, err) + require.NotNil(testMeta.t, blockTemplate) + blockTemplate.Header.TxnConnectStatusByIndexHash = HashBitset(blockTemplate.TxnConnectStatusByIndex) + + // Figure out who the leader is supposed to be. + leaderPublicKey, leaderPublicKeyBytes := getLeaderForBlockHeightAndView(testMeta, latestBlockView, blockHeight, view) + // Get leader voting private key. + leaderVotingPrivateKey := testMeta.pubKeyToBLSKeyMap[leaderPublicKey] + // Get hash of last block + chainTip := testMeta.chain.blockIndexByHash[*blockTemplate.Header.PrevBlockHash] + chainTipHash := chainTip.Hash + // Get the vote signature payload + // Hack to get view numbers working properly w/ PoW blocks. + qcView := chainTip.Header.ProposedInView + if qcView == 0 { + qcView = view - 1 + } + + // Create the vote QC. + voteQC := _getVoteQC(testMeta, latestBlockView, chainTipHash, qcView) + if !isTimeout { + blockTemplate.Header.ValidatorsVoteQC = voteQC + } else { + var validatorsTimeoutHighQCViews []uint64 + timeoutSignersList := bitset.NewBitset() + timeoutSigs := []*bls.Signature{} + // TODO: Get the latest vote QC. If the current tip isn't a vote QC, then + // we need to go further back. + prevQC := testMeta.chain.blockTip().Header.ValidatorsVoteQC + ii := 0 + for _, blsPrivKey := range testMeta.pubKeyToBLSKeyMap { + // Add timeout high qc view. Just assume it's the view after the vote QC for simplicity. + validatorsTimeoutHighQCViews = append(validatorsTimeoutHighQCViews, prevQC.GetView()) + // Add timeout aggregated signature. + newPayload := consensus.GetTimeoutSignaturePayload(view-1, prevQC.GetView()) + sig, err := blsPrivKey.Sign(newPayload[:]) + require.NoError(testMeta.t, err) + timeoutSigs = append(timeoutSigs, sig) + timeoutSignersList.Set(ii, true) + ii++ + } + timeoutAggregatedSignature, err := bls.AggregateSignatures(timeoutSigs) + require.NoError(testMeta.t, err) + timeoutQC := &TimeoutAggregateQuorumCertificate{ + TimedOutView: view - 1, + ValidatorsHighQC: prevQC, + ValidatorsTimeoutHighQCViews: validatorsTimeoutHighQCViews, + ValidatorsTimeoutAggregatedSignature: &AggregatedBLSSignature{ + SignersList: timeoutSignersList, + Signature: timeoutAggregatedSignature, + }, + } + blockTemplate.Header.ValidatorsTimeoutAggregateQC = timeoutQC + } + blockTemplate.Header.ProposerPublicKey = NewPublicKey(leaderPublicKeyBytes) + blockTemplate.Header.ProposerVotingPublicKey = leaderVotingPrivateKey.PublicKey() + // Ugh we need to adjust the timestamp. + blockTemplate.Header.TstampNanoSecs = uint64(time.Now().UnixNano()) + if chainTip.Header.TstampNanoSecs > blockTemplate.Header.TstampNanoSecs { + blockTemplate.Header.TstampNanoSecs = chainTip.Header.TstampNanoSecs + 1 + } + require.Less(testMeta.t, blockTemplate.Header.TstampNanoSecs, uint64(time.Now().UnixNano())+testMeta.chain.params.DefaultBlockTimestampDriftNanoSecs) + var proposerVotePartialSignature *bls.Signature + // Just hack it so the leader gets the block reward. + blockTemplate.Txns[0].TxOutputs[0].PublicKey = leaderPublicKeyBytes + // Fix the merkle root. + merkleRoot, _, err := ComputeMerkleRoot(blockTemplate.Txns) + require.NoError(testMeta.t, err) + blockTemplate.Header.TransactionMerkleRoot = merkleRoot + var msgDesoBlock *MsgDeSoBlock + msgDesoBlock = blockTemplate + newBlockHash, err := msgDesoBlock.Hash() + require.NoError(testMeta.t, err) + newBlockVotePayload := consensus.GetVoteSignaturePayload(view, newBlockHash) + proposerVotePartialSignature, err = leaderVotingPrivateKey.Sign(newBlockVotePayload[:]) + require.NoError(testMeta.t, err) + blockTemplate.Header.ProposerVotePartialSignature = proposerVotePartialSignature + return blockTemplate +} + +// _getFullDummyBlockTemplate is a helper function that generates a block template with a dummy TxnConnectStatusByIndexHash +// and a dummy ValidatorsVoteQC. +func _getFullDummyBlockTemplate(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint64, view uint64, seedSignature *bls.Signature) BlockTemplate { + blockTemplate, err := testMeta.posBlockProducer.createBlockTemplate(latestBlockView, blockHeight, view, seedSignature) + require.NoError(testMeta.t, err) + require.NotNil(testMeta.t, blockTemplate) + blockTemplate.Header.TxnConnectStatusByIndexHash = HashBitset(blockTemplate.TxnConnectStatusByIndex) + // Add a dummy vote QC + proposerVotingPublicKey := _generateRandomBLSPrivateKey(testMeta.t) + dummySig, err := proposerVotingPublicKey.Sign(RandomBytes(32)) + chainTip := testMeta.chain.BlockTip() + blockTemplate.Header.ValidatorsVoteQC = &QuorumCertificate{ + BlockHash: chainTip.Hash, + ProposedInView: chainTip.Header.ProposedInView, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + SignersList: bitset.NewBitset().Set(0, true), + Signature: dummySig, + }, + } + blockTemplate.Header.ProposerVotePartialSignature = dummySig + return blockTemplate +} + +// _generateRandomBLSPrivateKey generates a random BLS private key for use in tests. +func _generateRandomBLSPrivateKey(t *testing.T) *bls.PrivateKey { + privateKey, err := bls.NewPrivateKey() + require.NoError(t, err) + return privateKey +} + +// NewTestPoSBlockchainWithValidators creates a new low-difficulty Blockchain +// with 7 validators registered and staked for use in tests. +// Below is a description of the stake distribution. +// - m0 has 100 nanos staked +// - m1 has 200 nanos staked +// - m2 has 300 nanos staked +// - m3 has 400 nanos staked +// - m4 has 500 nanos staked +// - m5 has 600 nanos staked +// - m6 has 700 nanos staked +// After the validators have registered and staked, a block is mined and +// the PoW Mempool and Miner are stoppped. Then we run the end-of-epoch hook +// and the end-of-epoch hook is run to set the leader schedule. +// Finally, we create a new PoSMempool, PoSBlockProducer, and set the +// PoS Cutover height to 12. +func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { + setBalanceModelBlockHeights(t) + // Set the PoS Setup Height to block 11. + DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = 11 + DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 12 + DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) + DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) + GlobalDeSoParams = DeSoTestnetParams + + chain, params, db := NewLowDifficultyBlockchain(t) + oldPool, miner := NewTestMiner(t, chain, params, true) + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, oldPool) + require.NoError(t, err) + } + + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + publicKeys := []string{m0Pub, m1Pub, m2Pub, m3Pub, m4Pub, m5Pub, m6Pub} + for _, publicKey := range publicKeys { + _, _, _ = _doBasicTransferWithViewFlush( + t, chain, db, params, senderPkString, publicKey, + senderPrivString, 1e9, 1000) + } + testMeta := &TestMeta{ + t: t, + chain: chain, + db: db, + params: params, + posMempool: nil, + posBlockProducer: nil, + // TODO: what else do we need here? + feeRateNanosPerKb: 1000, + savedHeight: 11, + mempool: oldPool, + } + // validate and stake to the public keys + _registerValidatorAndStake(testMeta, m0Pub, m0Priv, 0, 100, false) + _registerValidatorAndStake(testMeta, m1Pub, m1Priv, 0, 200, false) + _registerValidatorAndStake(testMeta, m2Pub, m2Priv, 0, 300, false) + _registerValidatorAndStake(testMeta, m3Pub, m3Priv, 0, 400, false) + _registerValidatorAndStake(testMeta, m4Pub, m4Priv, 0, 500, false) + _registerValidatorAndStake(testMeta, m5Pub, m5Priv, 0, 600, false) + _registerValidatorAndStake(testMeta, m6Pub, m6Priv, 0, 700, false) + // Mine a block with these transactions. + _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, oldPool) + require.NoError(t, err) + oldPool.Stop() + miner.Stop() + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) + require.NoError(t, err) + + // Run the on epoch complete hook to set the leader schedule. + err = latestBlockView.RunEpochCompleteHook(11, 11, uint64(time.Now().UnixNano())) + require.NoError(t, err) + err = latestBlockView.FlushToDb(11) + require.NoError(t, err) + maxMempoolPosSizeBytes := uint64(500) + mempoolBackupIntervalMillis := uint64(30000) + mempool := NewPosMempool(params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(t, mempool.Start()) + require.True(t, mempool.IsRunning()) + priv := _generateRandomBLSPrivateKey(t) + m0Pk := NewPublicKey(m0PubBytes) + posBlockProducer := NewPosBlockProducer(mempool, params, m0Pk, priv.PublicKey()) + // TODO: do we need to update the encoder migration stuff for global params. Probably. + testMeta.mempool = nil + testMeta.posMempool = mempool + testMeta.posBlockProducer = posBlockProducer + testMeta.savedHeight = 12 + //:= &TestMeta{ + // t: t, + // chain: chain, + // db: db, + // params: params, + // posMempool: mempool, + // posBlockProducer: posBlockProducer, + // // TODO: what else do we need here? + // feeRateNanosPerKb: 1000, + // savedHeight: 10, + // //miner: nil, + // //txnOps: nil, + // //txns: nil, + // //expectedSenderBalances: nil, + // //savedHeight: 0, + // //feeRateNanosPerKb: 0, + //} + t.Cleanup(func() { + mempool.Stop() + }) + return testMeta +} + +// NewTestPoSBlockchain creates a new low-difficulty Blockchain for use in tests. +// It first creates a new Blockchain, then mines 10 blocks to give the senderPkString +// some DESO to send to m0 and m1. Then it stops the miner and PoW Mempool. Finally, +// it creates a PoSMempool and PoSBlockProducer and sets the PoS fork heights. The setup +// block height is set to 9 and the cutover is set to 11. +func NewTestPoSBlockchain(t *testing.T) *TestMeta { + setBalanceModelBlockHeights(t) + DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = 9 + DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 11 + DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) + DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) + GlobalDeSoParams = DeSoTestnetParams + chain, params, db := NewLowDifficultyBlockchain(t) + params.ForkHeights.BalanceModelBlockHeight = 1 + oldPool, miner := NewTestMiner(t, chain, params, true) + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, oldPool) + require.NoError(t, err) + } + + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + publicKeys := []string{m0Pub, m1Pub, m2Pub, m3Pub, m4Pub, m5Pub, m6Pub} + for _, publicKey := range publicKeys { + _, _, _ = _doBasicTransferWithViewFlush( + t, chain, db, params, senderPkString, publicKey, + senderPrivString, 1e9, 1000) + } + oldPool.Stop() + miner.Stop() + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) + require.NoError(t, err) + maxMempoolPosSizeBytes := uint64(500) + mempoolBackupIntervalMillis := uint64(30000) + mempool := NewPosMempool(params, _testGetDefaultGlobalParams(), latestBlockView, 10, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(t, mempool.Start()) + require.True(t, mempool.IsRunning()) + priv := _generateRandomBLSPrivateKey(t) + m0Pk := NewPublicKey(m0PubBytes) + posBlockProducer := NewPosBlockProducer(mempool, params, m0Pk, priv.PublicKey()) + testMeta := &TestMeta{ + t: t, + chain: chain, + db: db, + params: params, + posMempool: mempool, + posBlockProducer: posBlockProducer, + // TODO: what else do we need here? + feeRateNanosPerKb: 1000, + savedHeight: 10, + //miner: nil, + //txnOps: nil, + //txns: nil, + //expectedSenderBalances: nil, + //savedHeight: 0, + //feeRateNanosPerKb: 0, + } + t.Cleanup(func() { + mempool.Stop() + }) + return testMeta +} diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index 2e686e24d..25b55aa22 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -13,8 +13,10 @@ import ( // type EpochEntry struct { - EpochNumber uint64 - FinalBlockHeight uint64 + EpochNumber uint64 + InitialBlockHeight uint64 + InitialView uint64 + FinalBlockHeight uint64 // This captures the on-chain timestamp when this epoch entry was created. This does not // represent the timestamp for first block of the epoch, but rather when this epoch entry @@ -25,6 +27,8 @@ type EpochEntry struct { func (epochEntry *EpochEntry) Copy() *EpochEntry { return &EpochEntry{ EpochNumber: epochEntry.EpochNumber, + InitialBlockHeight: epochEntry.InitialBlockHeight, + InitialView: epochEntry.InitialView, FinalBlockHeight: epochEntry.FinalBlockHeight, CreatedAtBlockTimestampNanoSecs: epochEntry.CreatedAtBlockTimestampNanoSecs, } @@ -33,6 +37,8 @@ func (epochEntry *EpochEntry) Copy() *EpochEntry { func (epochEntry *EpochEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte data = append(data, UintToBuf(epochEntry.EpochNumber)...) + data = append(data, UintToBuf(epochEntry.InitialBlockHeight)...) + data = append(data, UintToBuf(epochEntry.InitialView)...) data = append(data, UintToBuf(epochEntry.FinalBlockHeight)...) data = append(data, UintToBuf(epochEntry.CreatedAtBlockTimestampNanoSecs)...) return data @@ -47,6 +53,18 @@ func (epochEntry *EpochEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *b return errors.Wrapf(err, "EpochEntry.Decode: Problem reading EpochNumber: ") } + // InitialBlockHeight + epochEntry.InitialBlockHeight, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "EpochEntry.Decode: Problem reading InitialBlockHeight: ") + } + + // InitialView + epochEntry.InitialView, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "EpochEntry.Decode: Problem reading InitialView: ") + } + // FinalBlockHeight epochEntry.FinalBlockHeight, err = ReadUvarint(rr) if err != nil { @@ -70,6 +88,10 @@ func (epochEntry *EpochEntry) GetEncoderType() EncoderType { return EncoderTypeEpochEntry } +func (epochEntry *EpochEntry) ContainsBlockHeight(blockHeight uint64) bool { + return blockHeight >= epochEntry.InitialBlockHeight && blockHeight <= epochEntry.FinalBlockHeight +} + // // UTXO VIEW UTILS // @@ -99,6 +121,8 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { // case prior to the first execution of the OnEpochCompleteHook. genesisEpochEntry := &EpochEntry{ EpochNumber: 0, + InitialBlockHeight: 0, + InitialView: 0, FinalBlockHeight: uint64(bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight), CreatedAtBlockTimestampNanoSecs: 0, } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 9ecb90636..863af693e 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -47,9 +47,10 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // - Snapshot the current top N stake entries, who will receive staking rewards. // // Step 3: Roll over to the next epoch. +// - Compute the start block height and view number for the next epoch. // - Compute the final block height for the next epoch. // - Update CurrentEpochEntry to the next epoch's. -func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, blockTimestampNanoSecs uint64) error { +func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, view uint64, blockTimestampNanoSecs uint64) error { // Sanity-check that the current block is the last block in the current epoch. // // Note that this will also return true if we're currently at the ProofOfStake1StateSetupBlockHeight @@ -84,7 +85,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, blockTimestampNano // TODO: Evict old snapshots when safe to do so. // Step 3: Roll Over to The Next Epoch - if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, blockTimestampNanoSecs); err != nil { + if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, view, blockTimestampNanoSecs); err != nil { return errors.Wrapf(err, "RunEpochCompleteHook: ") } @@ -137,28 +138,69 @@ func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) erro return nil } -// Updates the currentEpochEntry to the next epoch's. -func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, blockTimestampNanoSecs uint64) error { +func (bav *UtxoView) computeNextEpochEntry(currentEpochNumber uint64, currentEpochFinalBlockHeight uint64, currentEpochFinalView uint64, nextEpochBlockTimestampNanoSecs uint64) (*EpochEntry, error) { // Retrieve the SnapshotGlobalParamsEntry to determine the next epoch's final block height. We use the // snapshot global params here because the next epoch begin immediately, and its length is used in the PoS // consensus. The validator set for the next epoch needs to be in agreement on the length of the epoch // before the epoch begins. - snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntry() + snapshotGlobalParamsEntry, err := bav.GetCurrentSnapshotGlobalParamsEntry() if err != nil { - return errors.Wrapf(err, "runEpochCompleteEpochRollover: problem retrieving SnapshotGlobalParamsEntry: ") + return nil, errors.Wrapf(err, "computeNextEpochEntry: problem retrieving SnapshotGlobalParamsEntry: ") } - // Calculate the NextEpochFinalBlockHeight. - nextEpochFinalBlockHeight, err := SafeUint64().Add(blockHeight, snapshotGlobalParamsEntry.EpochDurationNumBlocks) + // Calculate the NextEpoch's FinalBlockHeight. + nextEpochFinalBlockHeight, err := SafeUint64().Add(currentEpochFinalBlockHeight, snapshotGlobalParamsEntry.EpochDurationNumBlocks) if err != nil { - return errors.Wrapf(err, "runEpochCompleteEpochRollover: problem calculating NextEpochFinalBlockHeight: ") + return nil, errors.Wrapf(err, "computeNextEpochEntry: problem calculating NextEpochFinalBlockHeight: ") } // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ - EpochNumber: epochNumber + 1, + EpochNumber: currentEpochNumber + 1, + InitialBlockHeight: currentEpochFinalBlockHeight + 1, + InitialView: currentEpochFinalView + 1, FinalBlockHeight: nextEpochFinalBlockHeight, - CreatedAtBlockTimestampNanoSecs: blockTimestampNanoSecs, + CreatedAtBlockTimestampNanoSecs: nextEpochBlockTimestampNanoSecs, + } + return nextEpochEntry, nil +} + +// simulatePrevEpochEntry simulates the block range for the previous epoch given the current epoch's initial +// block height and epoch number. The view and timestamp for the simulated epoch are left empty since they can't +// be easily simulated, so DO NOT USE CreatedAtBlockTimestampNanoSecs or InitialView from the returned EpochEntry. +func (bav *UtxoView) simulatePrevEpochEntry(currentEpochNumber uint64, currentEpochInitialBlockHeight uint64) (*EpochEntry, error) { + if currentEpochNumber == 0 { + return nil, errors.New("simulatePrevEpochEntry: currentEpochNumber is 0, this should never happen") + } + snapshotAtEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(currentEpochNumber - 1) + if err != nil { + return nil, errors.Wrapf(err, "simulatePrevEpochEntry: problem computing snapshot epoch number: ") + } + snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntryByEpochNumber(snapshotAtEpochNumber) + if err != nil { + return nil, errors.Wrapf(err, "simulatePrevEpochEntry: problem retrieving snapshot global params entry: ") + } + + // Calculate the PrevEpoch's InitialBlockHeight. + prevEpochInitialBlockHeight, err := SafeUint64().Sub(currentEpochInitialBlockHeight, snapshotGlobalParamsEntry.EpochDurationNumBlocks) + if err != nil { + return nil, errors.Wrapf(err, "simulatePrevEpochEntry: problem calculating PrevEpochInitialBlockHeight: ") + } + prevEpochEntry := &EpochEntry{ + EpochNumber: currentEpochNumber - 1, + InitialBlockHeight: prevEpochInitialBlockHeight, + InitialView: 0, + FinalBlockHeight: currentEpochInitialBlockHeight - 1, + CreatedAtBlockTimestampNanoSecs: 0, + } + return prevEpochEntry, nil +} + +// Updates the currentEpochEntry to the next epoch's. +func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, view uint64, blockTimestampNanoSecs uint64) error { + nextEpochEntry, err := bav.computeNextEpochEntry(epochNumber, blockHeight, view, blockTimestampNanoSecs) + if err != nil { + return errors.Wrap(err, "runEpochCompleteEpochRollover: ") } bav._setCurrentEpochEntry(nextEpochEntry) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 35028462e..dc8ca4126 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -4,6 +4,7 @@ package lib import ( "fmt" + "github.com/deso-protocol/core/bls" "sort" "testing" @@ -84,6 +85,11 @@ func TestRunEpochCompleteHook(t *testing.T) { blockHeight += 1 return blockHeight } + viewNumber := uint64(0) + incrViewNumber := func() uint64 { + viewNumber += 1 + return viewNumber + } // Seed a CurrentEpochEntry. tmpUtxoView := _newUtxoView(testMeta) @@ -129,7 +135,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, currentEpochNumber, uint64(0)) // Test SnapshotGlobalParamsEntry is non-nil and contains the default values. - snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() + snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) require.Equal(t, snapshotGlobalParamsEntry.ValidatorJailEpochDuration, uint64(3)) @@ -140,7 +146,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook() with no validators or stakers. - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Test the state of the snapshots after running our first OnEpochCompleteHook @@ -151,8 +157,15 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(1)) + // Test CurrentEpochEntry + currentEpoch, err := _newUtxoView(testMeta).GetCurrentEpochEntry() + require.NoError(t, err) + require.NotNil(t, currentEpoch) + require.Equal(t, currentEpoch.InitialBlockHeight, uint64(13)) + require.Equal(t, currentEpoch.InitialView, uint64(2)) + // Test SnapshotGlobalParamsEntry is nil. - snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() + snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) require.Equal(t, _newUtxoView(testMeta).GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) @@ -182,7 +195,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Test CurrentEpochNumber. @@ -190,8 +203,15 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(2)) + // Test CurrentEpochEntry + currentEpoch, err := _newUtxoView(testMeta).GetCurrentEpochEntry() + require.NoError(t, err) + require.NotNil(t, currentEpoch) + require.Equal(t, currentEpoch.InitialBlockHeight, uint64(14)) + require.Equal(t, currentEpoch.InitialView, uint64(3)) + // Test SnapshotGlobalParamsEntry is populated. - snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() + snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) @@ -203,7 +223,7 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Test CurrentEpochNumber. @@ -211,8 +231,15 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.Equal(t, currentEpochNumber, uint64(3)) + // Test CurrentEpochEntry + currentEpoch, err := _newUtxoView(testMeta).GetCurrentEpochEntry() + require.NoError(t, err) + require.NotNil(t, currentEpoch) + require.Equal(t, currentEpoch.InitialBlockHeight, uint64(15)) + require.Equal(t, currentEpoch.InitialView, uint64(4)) + // Test SnapshotGlobalParamsEntry is populated. - snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() + snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() require.NoError(t, err) require.NotNil(t, snapshotGlobalParamsEntry) require.Equal(t, snapshotGlobalParamsEntry.MinimumNetworkFeeNanosPerKB, testMeta.feeRateNanosPerKb) @@ -220,9 +247,14 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test SnapshotValidatorByPKID is populated. for _, pkid := range validatorPKIDs { - snapshotValidatorSetEntry, err := _newUtxoView(testMeta).GetSnapshotValidatorSetEntryByPKID(pkid) + snapshotValidatorSetEntry, err := _newUtxoView(testMeta).GetCurrentSnapshotValidatorSetEntryByPKID(pkid) require.NoError(t, err) require.NotNil(t, snapshotValidatorSetEntry) + // Make sure BLS public key -> PKID mapping is populated. + blsPublicKeyPKIDEntry, err := _newUtxoView(testMeta).GetCurrentSnapshotValidatorBLSPublicKeyPKIDPairEntry(snapshotValidatorSetEntry.VotingPublicKey) + require.NoError(t, err) + require.NotNil(t, blsPublicKeyPKIDEntry) + require.True(t, blsPublicKeyPKIDEntry.PKID.Eq(pkid)) } // Test GetSnapshotValidatorSetByStakeAmount is populated. @@ -275,10 +307,10 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // Snapshot m5 still has 600 staked. - validatorEntry, err = _newUtxoView(testMeta).GetSnapshotValidatorSetEntryByPKID(m5PKID) + validatorEntry, err = _newUtxoView(testMeta).GetCurrentSnapshotValidatorSetEntryByPKID(m5PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(600)) @@ -291,10 +323,10 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(600)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // Snapshot m5 now has 800 staked. - validatorEntry, err = _newUtxoView(testMeta).GetSnapshotValidatorSetEntryByPKID(m5PKID) + validatorEntry, err = _newUtxoView(testMeta).GetCurrentSnapshotValidatorSetEntryByPKID(m5PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) @@ -310,7 +342,7 @@ func TestRunEpochCompleteHook(t *testing.T) { // Test snapshotting changing GlobalParams. // Update StakeLockupEpochDuration from default of 3 to 2. - snapshotGlobalsParamsEntry, err := _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() + snapshotGlobalsParamsEntry, err := _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() require.NoError(t, err) require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(3)) @@ -325,18 +357,18 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, _newUtxoView(testMeta).GlobalParamsEntry.StakeLockupEpochDuration, uint64(2)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // Snapshot StakeLockupEpochDuration is still 3. - snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() + snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() require.NoError(t, err) require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(3)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // Snapshot StakeLockupEpochDuration is updated to 2. - snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetSnapshotGlobalParamsEntry() + snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() require.NoError(t, err) require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(2)) } @@ -352,7 +384,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // m0 is still in the snapshot validator set. snapshotValidatorSet, err = _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) @@ -364,7 +396,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Len(t, snapshotStakeEntries, 7) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // m0 is dropped from the snapshot validator set. snapshotValidatorSet, err = _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) @@ -377,11 +409,11 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Run OnEpochCompleteHook() - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Test jailing inactive validators. @@ -440,7 +472,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getNumSnapshotStakes(), 6) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // In epoch 12, all current registered validators have Status = Jailed. // In snapshot 10, all snapshot validators have Status = Active. @@ -454,7 +486,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentValidator(m6PKID).JailedAtEpochNumber, uint64(11)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // In epoch 13, all current registered validators have Status = Jailed. // In snapshot 11, the validator set is empty because all validators have Status = Jailed. @@ -465,7 +497,7 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Empty(t, getNumSnapshotStakes()) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) // In epoch 14, all current registered validators have Status = Jailed. // In snapshot 12, the validator set is empty because all validators have Status = Jailed. @@ -502,6 +534,11 @@ func TestStakingRewardDistribution(t *testing.T) { blockHeight += 1 return blockHeight } + viewNumber := uint64(0) + incrViewNumber := func() uint64 { + viewNumber += 1 + return viewNumber + } // Seed a CurrentEpochEntry. tmpUtxoView := _newUtxoView(testMeta) @@ -558,12 +595,12 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { @@ -611,7 +648,7 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { @@ -723,7 +760,7 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight()) + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) } { @@ -791,7 +828,12 @@ func _registerValidatorAndStake( require.NoError(testMeta.t, err) // Validator registers. - votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(testMeta.t, pkBytes) + votingPrivateKey, votingPublicKey, votingAuthorization := _generateVotingPrivateKeyPublicKeyAndAuthorization(testMeta.t, pkBytes) + if testMeta.pubKeyToBLSKeyMap == nil { + testMeta.pubKeyToBLSKeyMap = make(map[string]*bls.PrivateKey) + } + // Stash the voting private key in testmeta for convenience + testMeta.pubKeyToBLSKeyMap[publicKey] = votingPrivateKey registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, VotingPublicKey: votingPublicKey, @@ -836,12 +878,12 @@ func _newUtxoView(testMeta *TestMeta) *UtxoView { return newUtxoView } -func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64) { +func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64, viewNumber uint64) { tmpUtxoView := _newUtxoView(testMeta) // Set blockTimestampNanoSecs to 1 year * block height. Every time the block height increments, // the timestamp increases by 1 year blockTimestampNanoSecs := blockHeight * 365 * 24 * 3600 * 1e9 - require.NoError(testMeta.t, tmpUtxoView.RunEpochCompleteHook(blockHeight, blockTimestampNanoSecs)) + require.NoError(testMeta.t, tmpUtxoView.RunEpochCompleteHook(blockHeight, viewNumber, blockTimestampNanoSecs)) require.NoError(testMeta.t, tmpUtxoView.FlushToDb(blockHeight)) } diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index ad49caff0..558b558f7 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -35,6 +35,8 @@ func TestCurrentEpoch(t *testing.T) { // Set the CurrentEpoch. epochEntry = &EpochEntry{ EpochNumber: 1, + InitialBlockHeight: blockHeight + 1, + InitialView: 1, FinalBlockHeight: blockHeight + 5, CreatedAtBlockTimestampNanoSecs: blockTimestampNanoSecs + 5*1e9, } @@ -46,6 +48,8 @@ func TestCurrentEpoch(t *testing.T) { require.NoError(t, err) require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) + require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) + require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) @@ -57,6 +61,8 @@ func TestCurrentEpoch(t *testing.T) { require.NoError(t, err) require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) + require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) + require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) @@ -64,6 +70,8 @@ func TestCurrentEpoch(t *testing.T) { epochEntry = utxoView.CurrentEpochEntry require.NotNil(t, epochEntry) require.Equal(t, epochEntry.EpochNumber, uint64(1)) + require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) + require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index aa5262f9d..fbcbf56fd 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -56,21 +56,35 @@ func DecodeRandomSeedHash(rr io.Reader) (*RandomSeedHash, error) { return (&RandomSeedHash{}).FromBytes(randomSeedHashBytes) } +func (randomSeedHash *RandomSeedHash) isEmpty() bool { + return randomSeedHash == nil || randomSeedHash.Eq(&RandomSeedHash{}) +} + // // UTXO VIEW UTILS // -func (bav *UtxoView) GenerateRandomSeedSignature(signerPrivateKey *bls.PrivateKey) (*bls.Signature, error) { +func (bav *UtxoView) GenerateNextRandomSeedSignature(signerPrivateKey *bls.PrivateKey) (*bls.Signature, error) { // This function generates a RandomSeedSignature by signing the CurrentRandomSeedHash // with the provided bls.PrivateKey. This signature is deterministic: given the same // CurrentRandomSeedHash and bls.PrivateKey, the same signature will always be generated. currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GenerateRandomSeedSignature: problem retrieving CurrentRandomSeedHash: ") + return nil, errors.Wrapf(err, "UtxoView.GenerateNextRandomSeedSignature: problem retrieving CurrentRandomSeedHash: ") + } + randomSeedSignature, err := SignRandomSeedHash(signerPrivateKey, currentRandomSeedHash) + if err != nil { + return nil, errors.Wrapf(err, "UtxoView.GenerateNextRandomSeedSignature: problem generating RandomSeedSignature: ") } - randomSeedSignature, err := signerPrivateKey.Sign(currentRandomSeedHash[:]) + return randomSeedSignature, nil +} + +func SignRandomSeedHash( + signerPrivateKey *bls.PrivateKey, randomSeedHash *RandomSeedHash, +) (*bls.Signature, error) { + randomSeedSignature, err := signerPrivateKey.Sign(randomSeedHash[:]) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GenerateRandomSeedSignature: problem signing CurrentRandomSeedHash: ") + return nil, errors.Wrapf(err, "UtxoView.SignRandomSeedHash: problem signing CurrentRandomSeedHash: ") } return randomSeedSignature, nil } @@ -89,20 +103,31 @@ func (bav *UtxoView) VerifyRandomSeedSignature( if err != nil { return nil, errors.Wrapf(err, "UtxoView.VerifyRandomSeedSignature: problem retrieving CurrentRandomSeedHash: ") } - isVerified, err := signerPublicKey.Verify(randomSeedSignature, currentRandomSeedHash[:]) + isVerified, err := verifySignatureOnRandomSeedHash(signerPublicKey, randomSeedSignature, currentRandomSeedHash) if err != nil { return nil, errors.Wrapf(err, "UtxoView.VerifyRandomSeedSignature: problem verifying RandomSeedSignature: ") } if !isVerified { - return nil, errors.New("UtxoView.VerifyRandomSeedSignature: invalid RandomSeedSignature provided") + return nil, errors.Errorf("UtxoView.VerifyRandomSeedSignature: invalid RandomSeedSignature provided") } + return hashRandomSeedSignature(randomSeedSignature) +} + +func verifySignatureOnRandomSeedHash( + signerPublicKey *bls.PublicKey, randomSeedSignature *bls.Signature, randomSeedHash *RandomSeedHash, +) (bool, error) { + return signerPublicKey.Verify(randomSeedSignature, randomSeedHash[:]) +} + +func hashRandomSeedSignature(randomSeedSignature *bls.Signature) (*RandomSeedHash, error) { + // This function takes in a random seed signature and computes the random seed hash for it // Convert the RandomSeedSignature to a RandomSeedHash. randomSeedSHA256 := sha256.Sum256(randomSeedSignature.ToBytes()) - randomSeedHash, err := (&RandomSeedHash{}).FromBytes(randomSeedSHA256[:]) + newRandomSeedHash, err := (&RandomSeedHash{}).FromBytes(randomSeedSHA256[:]) if err != nil { - return nil, errors.Wrapf(err, "UtxoView.VerifyRandomSeedSignature: problem hashing RandomSeedSignature: ") + return nil, errors.Wrapf(err, "hashRandomSeedSignature: problem hashing RandomSeedSignature: ") } - return randomSeedHash, nil + return newRandomSeedHash, nil } func (bav *UtxoView) GetCurrentRandomSeedHash() (*RandomSeedHash, error) { diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go index e35f1d9fa..ba74c8838 100644 --- a/lib/pos_random_seed_test.go +++ b/lib/pos_random_seed_test.go @@ -27,7 +27,7 @@ func TestCurrentRandomSeedHash(t *testing.T) { // Test generating + verifying RandomSeedSignatures. // PrivateKey1 creates a new RandomSeedSignature. - randomSeedSignature1, err := utxoView.GenerateRandomSeedSignature(privateKey1) + randomSeedSignature1, err := utxoView.GenerateNextRandomSeedSignature(privateKey1) require.NoError(t, err) // PublicKey1 is verified to correspond to PrivateKey that signed the RandomSeedSignature. randomSeedHash1, err := utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature1) @@ -40,7 +40,7 @@ func TestCurrentRandomSeedHash(t *testing.T) { require.Nil(t, randomSeedHash1) // PrivateKey2 creates a new RandomSeedSignature. - randomSeedSignature2, err := utxoView.GenerateRandomSeedSignature(privateKey2) + randomSeedSignature2, err := utxoView.GenerateNextRandomSeedSignature(privateKey2) require.NoError(t, err) // PublicKey1 is not verified to correspond to the PrivateKey that signed the RandomSeedSignature. randomSeedHash2, err := utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature2) @@ -61,7 +61,7 @@ func TestCurrentRandomSeedHash(t *testing.T) { require.True(t, randomSeedHash1.ToUint256().Eq(uint256.NewInt())) // PrivateKey1 generates a new RandomSeedSignature. - randomSeedSignature1, err = utxoView.GenerateRandomSeedSignature(privateKey1) + randomSeedSignature1, err = utxoView.GenerateNextRandomSeedSignature(privateKey1) require.NoError(t, err) // PublicKey1 is verified to correspond to the PrivateKey that signed the RandomSeedSignature. randomSeedHash1, err = utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature1) @@ -74,7 +74,7 @@ func TestCurrentRandomSeedHash(t *testing.T) { require.NoError(t, utxoView.FlushToDb(blockHeight)) // PrivateKey2 generates a new RandomSeedSignature. - randomSeedSignature2, err = utxoView.GenerateRandomSeedSignature(privateKey2) + randomSeedSignature2, err = utxoView.GenerateNextRandomSeedSignature(privateKey2) require.NoError(t, err) // PublicKey2 is verified to correspond to the PrivateKey that signed the RandomSeedSignature. randomSeedHash2, err = utxoView.VerifyRandomSeedSignature(publicKey2, randomSeedSignature2) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index f608e14ac..784e2fc96 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -3,6 +3,8 @@ package lib import ( "bytes" "fmt" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" "math" "sort" @@ -14,13 +16,17 @@ import ( const SnapshotLookbackNumEpochs uint64 = 2 -func (bav *UtxoView) GetSnapshotEpochNumber() (uint64, error) { +func (bav *UtxoView) GetCurrentSnapshotEpochNumber() (uint64, error) { // Retrieve the CurrentEpochNumber. currentEpochNumber, err := bav.GetCurrentEpochNumber() if err != nil { - return 0, errors.Wrapf(err, "GetSnapshotEpochNumber: problem retrieving CurrentEpochNumber: ") + return 0, errors.Wrapf(err, "GetCurrentSnapshotEpochNumber: problem retrieving CurrentEpochNumber: ") } - if currentEpochNumber < SnapshotLookbackNumEpochs { + return bav.ComputeSnapshotEpochNumberForEpoch(currentEpochNumber) +} + +func (bav *UtxoView) ComputeSnapshotEpochNumberForEpoch(epochNumber uint64) (uint64, error) { + if epochNumber < SnapshotLookbackNumEpochs { // We want to return 0 in this case and not error. We start snapshotting with our StateSetup block height, // so we should have the correct number of snapshots and not hit this case once we hit the ConsensusCutover // block height. This case will only be hit immediately following the StateSetup block height. We run one @@ -41,7 +47,7 @@ func (bav *UtxoView) GetSnapshotEpochNumber() (uint64, error) { // then that value will take immediate effect in the first epoch with no lagged snapshot wait period. return 0, nil } - return SafeUint64().Sub(currentEpochNumber, SnapshotLookbackNumEpochs) + return SafeUint64().Sub(epochNumber, SnapshotLookbackNumEpochs) } // @@ -52,7 +58,7 @@ func (bav *UtxoView) GetCurrentGlobalParamsEntry() *GlobalParamsEntry { return _mergeGlobalParamEntryDefaults(bav, bav.GlobalParamsEntry) } -// GetSnapshotGlobalParamsEntry retrieves a snapshot of the GlobalParamsEntry from n epochs ago. If a snapshot +// GetCurrentSnapshotGlobalParamsEntry retrieves a snapshot of the GlobalParamsEntry from n epochs ago. If a snapshot // does not exist for that epoch, it will return the default values. We snapshot GlobalParams to make sure that // the validator set in the PoS consensus is in agreement ahead of time on the params used for an epoch long // before that epoch begins. Snapshot GlobalParams are only appropriate to use in two scenarios: @@ -67,12 +73,16 @@ func (bav *UtxoView) GetCurrentGlobalParamsEntry() *GlobalParamsEntry { // snapshotted. This approach ensures that whenever we create a snapshot of the validator set, leader schedule, // and stakes to reward... the GlobalParams used to create the snapshots are snapshotted along with that data, and // live alongside them. -func (bav *UtxoView) GetSnapshotGlobalParamsEntry() (*GlobalParamsEntry, error) { +func (bav *UtxoView) GetCurrentSnapshotGlobalParamsEntry() (*GlobalParamsEntry, error) { // Calculate the SnapshotEpochNumber. - snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotGlobalParamsEntry: problem calculating SnapshotEpochNumber: ") + return nil, errors.Wrapf(err, "GetCurrentSnapshotGlobalParamsEntry: problem calculating SnapshotEpochNumber: ") } + return bav.GetSnapshotGlobalParamsEntryByEpochNumber(snapshotAtEpochNumber) +} + +func (bav *UtxoView) GetSnapshotGlobalParamsEntryByEpochNumber(snapshotAtEpochNumber uint64) (*GlobalParamsEntry, error) { // Check the UtxoView first. if globalParamsEntry, exists := bav.SnapshotGlobalParamEntries[snapshotAtEpochNumber]; exists { return _mergeGlobalParamEntryDefaults(bav, globalParamsEntry), nil @@ -82,7 +92,7 @@ func (bav *UtxoView) GetSnapshotGlobalParamsEntry() (*GlobalParamsEntry, error) if err != nil { return nil, errors.Wrapf( err, - "GetSnapshotGlobalParamsEntry: problem retrieving SnapshotGlobalParamsEntry from db: ", + "GetSnapshotGlobalParamsEntryByEpochNumber: problem retrieving SnapshotGlobalParamsEntry from db: ", ) } if globalParamsEntry != nil { @@ -231,12 +241,15 @@ type SnapshotValidatorSetMapKey struct { ValidatorPKID PKID } -func (bav *UtxoView) GetSnapshotValidatorSetEntryByPKID(pkid *PKID) (*ValidatorEntry, error) { +func (bav *UtxoView) GetCurrentSnapshotValidatorSetEntryByPKID(pkid *PKID) (*ValidatorEntry, error) { // Calculate the SnapshotEpochNumber. - snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotValidatorSetEntryByPKID: problem calculating SnapshotEpochNumber: ") + return nil, errors.Wrapf(err, "GetCurrentSnapshotValidatorSetEntryByPKID: problem calculating SnapshotEpochNumber: ") } + return bav.GetSnapshotValidatorSetEntryByPKIDAtEpochNumber(pkid, snapshotAtEpochNumber) +} +func (bav *UtxoView) GetSnapshotValidatorSetEntryByPKIDAtEpochNumber(pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { // Check the UtxoView first. mapKey := SnapshotValidatorSetMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *pkid} if validatorEntry, exists := bav.SnapshotValidatorSet[mapKey]; exists { @@ -247,7 +260,7 @@ func (bav *UtxoView) GetSnapshotValidatorSetEntryByPKID(pkid *PKID) (*ValidatorE if err != nil { return nil, errors.Wrapf( err, - "GetSnapshotValidatorSetEntryByPKID: problem retrieving ValidatorEntry from db: ", + "GetSnapshotValidatorSetEntryByPKIDAtEpochNumber: problem retrieving ValidatorEntry from db: ", ) } if validatorEntry != nil { @@ -259,11 +272,13 @@ func (bav *UtxoView) GetSnapshotValidatorSetEntryByPKID(pkid *PKID) (*ValidatorE func (bav *UtxoView) GetSnapshotValidatorSetByStakeAmount(limit uint64) ([]*ValidatorEntry, error) { // Calculate the SnapshotEpochNumber. - snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() if err != nil { return nil, errors.Wrapf(err, "GetSnapshotValidatorSetEntriesByStake: problem calculating SnapshotEpochNumber: ") } - + return bav.GetSnapshotValidatorSetByStakeAmountAtEpochNumber(snapshotAtEpochNumber, limit) +} +func (bav *UtxoView) GetSnapshotValidatorSetByStakeAmountAtEpochNumber(snapshotAtEpochNumber uint64, limit uint64) ([]*ValidatorEntry, error) { // Create a slice of all UtxoView ValidatorEntries to prevent pulling them from the db. var utxoViewValidatorEntries []*ValidatorEntry for mapKey, validatorEntry := range bav.SnapshotValidatorSet { @@ -278,7 +293,7 @@ func (bav *UtxoView) GetSnapshotValidatorSetByStakeAmount(limit uint64) ([]*Vali bav.Handle, bav.Snapshot, limit, snapshotAtEpochNumber, utxoViewValidatorEntries, ) if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotValidatorSetEntriesByStake: error retrieving entries from db: ") + return nil, errors.Wrapf(err, "GetSnapshotValidatorSetByStakeAmountAtEpochNumber: error retrieving entries from db: ") } // Cache top N active ValidatorEntries from the db in the UtxoView. for _, validatorEntry := range dbValidatorEntries { @@ -319,6 +334,25 @@ func (bav *UtxoView) GetSnapshotValidatorSetByStakeAmount(limit uint64) ([]*Vali return validatorEntries[0:upperBound], nil } +// GetAllSnapshotValidatorSetEntriesByStake returns all validators in the snapshot +// ordered by stake. This is useful when we need to know all the validators that +// are in a leader schedule. +func (bav *UtxoView) GetAllSnapshotValidatorSetEntriesByStake() ([]*ValidatorEntry, error) { + snapshotGlobalParams, err := bav.GetCurrentSnapshotGlobalParamsEntry() + if err != nil { + return nil, errors.Wrapf(err, "GetAllSnapshotValidatorSetEntriesByStake: problem getting SnapshotGlobalParamsEntry: ") + } + return bav.GetSnapshotValidatorSetByStakeAmount(snapshotGlobalParams.ValidatorSetMaxNumValidators) +} + +func (bav *UtxoView) GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotAtEpochNumber uint64) ([]*ValidatorEntry, error) { + snapshotGlobalParams, err := bav.GetSnapshotGlobalParamsEntryByEpochNumber(snapshotAtEpochNumber) + if err != nil { + return nil, errors.Wrapf(err, "GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber: problem getting SnapshotGlobalParamsEntry: ") + } + return bav.GetSnapshotValidatorSetByStakeAmountAtEpochNumber(snapshotAtEpochNumber, snapshotGlobalParams.ValidatorSetMaxNumValidators) +} + func (bav *UtxoView) _setSnapshotValidatorSetEntry(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) { if validatorEntry == nil { glog.Errorf("_setSnapshotValidatorSetEntry: called with nil entry, this should never happen") @@ -328,6 +362,8 @@ func (bav *UtxoView) _setSnapshotValidatorSetEntry(validatorEntry *ValidatorEntr SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID, } bav.SnapshotValidatorSet[mapKey] = validatorEntry.Copy() + + bav._setSnapshotValidatorBLSPublicKeyPKIDPairEntry(validatorEntry.ToBLSPublicKeyPKIDPairEntry(), snapshotAtEpochNumber) } func (bav *UtxoView) _deleteSnapshotValidatorSetEntry(validatorEntry *ValidatorEntry, snapshotAtEpochNumber uint64) { @@ -588,13 +624,211 @@ func DBEnumerateAllCurrentValidators(handle *badger.DB, pkidsToSkip []*PKID) ([] return validatorEntries, nil } +// +// SnapshotValidatorBLSPublicKeyToPKID +// + +type SnapshotValidatorBLSPublicKeyMapKey struct { + SnapshotAtEpochNumber uint64 + ValidatorBLSPublicKey bls.SerializedPublicKey +} + +func (bav *UtxoView) GetCurrentSnapshotValidatorBLSPublicKeyPKIDPairEntry(blsPublicKey *bls.PublicKey) (*BLSPublicKeyPKIDPairEntry, error) { + // Calculate the SnapshotEpochNumber. + snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() + if err != nil { + return nil, errors.Wrapf(err, "GetCurrentSnapshotValidatorBLSPublicKeyPKIDPairEntry: problem calculating SnapshotEpochNumber: ") + } + return bav.GetSnapshotValidatorBLSPublicKeyPKIDPairEntry(blsPublicKey, snapshotAtEpochNumber) +} + +func (bav *UtxoView) GetSnapshotValidatorEntryByBLSPublicKey(blsPublicKey *bls.PublicKey, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { + blsPublicKeyPKIDPairEntry, err := bav.GetSnapshotValidatorBLSPublicKeyPKIDPairEntry(blsPublicKey, snapshotAtEpochNumber) + if err != nil { + return nil, errors.Wrapf(err, "GetSnapshotValidatorEntryByBLSPublicKey: problem getting BLSPublicKeyPKIDPairEntry: ") + } + if blsPublicKeyPKIDPairEntry == nil { + return nil, nil + } + return bav.GetSnapshotValidatorSetEntryByPKIDAtEpochNumber(blsPublicKeyPKIDPairEntry.PKID, snapshotAtEpochNumber) +} + +func (bav *UtxoView) GetSnapshotValidatorBLSPublicKeyPKIDPairEntry(blsPublicKey *bls.PublicKey, snapshotAtEpochNumber uint64) (*BLSPublicKeyPKIDPairEntry, error) { + // Check the UtxoView first. + mapKey := SnapshotValidatorBLSPublicKeyMapKey{ + SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorBLSPublicKey: blsPublicKey.Serialize(), + } + if blsPublicKeyPKIDPairEntry, exists := bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries[mapKey]; exists { + if blsPublicKeyPKIDPairEntry == nil || blsPublicKeyPKIDPairEntry.isDeleted { + return nil, nil + } + return blsPublicKeyPKIDPairEntry, nil + } + + // If we don't have it in the UtxoView, check the db. + blsPublicKeyPKIDPairEntry, err := DBGetSnapshotValidatorBLSPublicKeyPKIDPairEntry(bav.Handle, bav.Snapshot, blsPublicKey, snapshotAtEpochNumber) + if err != nil { + return nil, errors.Wrap( + err, + "GetSnapshotValidatorBLSPublicKeyPKIDPairEntry: problem retrieving BLSPublicKeyPKIDPairEntry from db: ", + ) + } + + if blsPublicKeyPKIDPairEntry != nil { + // Cache the result in the UtxoView + bav._setSnapshotValidatorBLSPublicKeyPKIDPairEntry(blsPublicKeyPKIDPairEntry, snapshotAtEpochNumber) + } + return blsPublicKeyPKIDPairEntry, nil +} + +func DBKeyForSnapshotValidatorBLSPublicKeyPKIDPairEntry(blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry, snapshotAtEpochNumber uint64) []byte { + key := append([]byte{}, Prefixes.PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry...) + key = append(key, EncodeUint64(snapshotAtEpochNumber)...) + key = append(key, blsPublicKeyPKIDPairEntry.BLSPublicKey.ToBytes()...) + return key +} + +func DBGetSnapshotValidatorBLSPublicKeyPKIDPairEntry(handle *badger.DB, snap *Snapshot, blsPublicKey *bls.PublicKey, snapshotAtEpochNumber uint64) (*BLSPublicKeyPKIDPairEntry, error) { + var ret *BLSPublicKeyPKIDPairEntry + err := handle.View(func(txn *badger.Txn) error { + var innerErr error + ret, innerErr = DBGetSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn(txn, snap, blsPublicKey, snapshotAtEpochNumber) + return innerErr + }) + return ret, err +} + +func DBGetSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn(txn *badger.Txn, snap *Snapshot, blsPublicKey *bls.PublicKey, snapshotAtEpochNumber uint64) (*BLSPublicKeyPKIDPairEntry, error) { + // Retrieve from db. + key := DBKeyForSnapshotValidatorBLSPublicKeyPKIDPairEntry(&BLSPublicKeyPKIDPairEntry{BLSPublicKey: blsPublicKey}, snapshotAtEpochNumber) + blsPublicKeyPKIDPairEntryBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + // We don't want to error if the key isn't found. Instead, return nil. + if errors.Is(err, badger.ErrKeyNotFound) { + return nil, nil + } + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn: problem retrieving BLSPublicKeyPKIDPairEntry") + } + + // Decode from bytes. + blsPublicKeyPKIDPairEntry := &BLSPublicKeyPKIDPairEntry{} + rr := bytes.NewReader(blsPublicKeyPKIDPairEntryBytes) + if exist, err := DecodeFromBytes(blsPublicKeyPKIDPairEntry, rr); !exist || err != nil { + return nil, errors.Wrapf(err, "DBGetSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn: problem decoding BLSPublicKeyPKIDPairEntry") + } + return blsPublicKeyPKIDPairEntry, nil +} + +func DBPutSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry, + snapshotAtEpochNumber uint64, + blockHeight uint64, + eventManager *EventManager, +) error { + if blsPublicKeyPKIDPairEntry == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBPutSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn: called with nil BLSPublicKeyPKIDPairEntry, this should never happen") + return nil + } + + // Put the BLSPublicKeyPKIDPairEntry in the SnapshotValidatorBLSPublicKeyPKIDPairEntries index. + key := DBKeyForSnapshotValidatorBLSPublicKeyPKIDPairEntry(blsPublicKeyPKIDPairEntry, snapshotAtEpochNumber) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, blsPublicKeyPKIDPairEntry), eventManager); err != nil { + return errors.Wrapf( + err, + "DBPutSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn: problem putting BLSPublicKeyPKIDPairEntry in the SnapshotValidatorBLSPublicKeyPKIDPairEntry index: ", + ) + } + return nil +} + +func DBDeleteSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry, + snapshotAtEpochNumber uint64, + eventManager *EventManager, + entryIsDeleted bool, +) error { + if blsPublicKeyPKIDPairEntry == nil { + // This should never happen but is a sanity check. + glog.Errorf("DBDeleteSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn: called with nil BLSPublicKeyPKIDPairEntry, this should never happen") + return nil + } + + key := DBKeyForSnapshotValidatorBLSPublicKeyPKIDPairEntry(blsPublicKeyPKIDPairEntry, snapshotAtEpochNumber) + if err := DBDeleteWithTxn(txn, snap, key, eventManager, entryIsDeleted); err != nil { + return errors.Wrap( + err, "DBDeleteSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn: problem deleting BLSPublicKeyPKIDPairEntry from index PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry", + ) + } + return nil +} + +func (bav *UtxoView) _setSnapshotValidatorBLSPublicKeyPKIDPairEntry(blsPublicKeyPKIDPairEntry *BLSPublicKeyPKIDPairEntry, snapshotAtEpochNumber uint64) { + if blsPublicKeyPKIDPairEntry == nil { + glog.Errorf("_setSnapshotValidatorBLSPublicKeyPKIDPairEntry: called with nil entry, this should never happen") + return + } + + bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries[blsPublicKeyPKIDPairEntry.ToSnapshotMapKey(snapshotAtEpochNumber)] = blsPublicKeyPKIDPairEntry +} + +func (bav *UtxoView) _flushSnapshotValidatorBLSPublicKeyPKIDPairEntryToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { + // Delete all SnapshotValidatorBLSPublicKeyToPKID entries from the db that are in the UtxoView. + for mapKey, blsPublicKeyPKIDPairEntry := range bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries { + if blsPublicKeyPKIDPairEntry == nil { + return fmt.Errorf( + "_flushSnapshotValidatorBLSPublicKeyPKIDPairEntryToDbWithTxn: found nil entry for EpochNumber %d, this should never happen", + mapKey.SnapshotAtEpochNumber, + ) + } + + if err := DBDeleteSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn( + txn, bav.Snapshot, blsPublicKeyPKIDPairEntry, mapKey.SnapshotAtEpochNumber, bav.EventManager, blsPublicKeyPKIDPairEntry.isDeleted, + ); err != nil { + return errors.Wrapf( + err, + "_flushSnapshotValidatorBLSPublicKeyPKIDPairEntryToDbWithTxn: problem deleting BLSPublicKeyPKIDPairEntry for EpochNumber %d: ", + mapKey.SnapshotAtEpochNumber, + ) + } + } + + // Put all !isDeleted Snapshot BLSPublicKeyPKIDPairEntries into the db from the UtxoView. + for mapKey, blsPublicKeyPKIDPairEntry := range bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries { + if blsPublicKeyPKIDPairEntry == nil { + return fmt.Errorf( + "_flushSnapshotValidatorBLSPublicKeyPKIDPairEntryToDbWithTxn: found nil entry for EpochNumber %d, this should never happen", + mapKey.SnapshotAtEpochNumber, + ) + } + if blsPublicKeyPKIDPairEntry.isDeleted { + // Skip any deleted BLSPublicKeyPKIDPairEntry. + continue + } + if err := DBPutSnapshotValidatorBLSPublicKeyPKIDPairEntryWithTxn( + txn, bav.Snapshot, blsPublicKeyPKIDPairEntry, mapKey.SnapshotAtEpochNumber, blockHeight, bav.EventManager, + ); err != nil { + return errors.Wrapf( + err, + "_flushSnapshotValidatorBLSPublicKeyPKIDPairEntryToDbWithTxn: problem setting BLSPublicKeyPKIDPairEntry for EpochNumber %d: ", + mapKey.SnapshotAtEpochNumber, + ) + } + } + return nil +} + // // SnapshotValidatorSetTotalStakeAmountNanos // func (bav *UtxoView) GetSnapshotValidatorSetTotalStakeAmountNanos() (*uint256.Int, error) { // Calculate the SnapshotEpochNumber. - snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() if err != nil { return nil, errors.Wrapf(err, "GetSnapshotValidatorSetTotalStakeAmountNanos: problem calculating SnapshotEpochNumber: ") } @@ -731,7 +965,7 @@ func (bav *UtxoView) _setSnapshotStakeToReward(stakeEntry *StakeEntry, snapshotA // GetAllSnapshotStakesToReward returns all snapshotted StakeEntries that are eligible to receive staking // rewards for the current snapshot epoch. The order of the returned entries is arbitrary. func (bav *UtxoView) GetAllSnapshotStakesToReward() ([]*StakeEntry, error) { - snapshotGlobalParams, err := bav.GetSnapshotGlobalParamsEntry() + snapshotGlobalParams, err := bav.GetCurrentSnapshotGlobalParamsEntry() if err != nil { return nil, errors.Wrapf(err, "GetAllSnapshotStakesToReward: problem calculating SnapshotEpochNumber: ") } @@ -743,7 +977,7 @@ func (bav *UtxoView) GetAllSnapshotStakesToReward() ([]*StakeEntry, error) { } // Calculate the SnapshotEpochNumber. - snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() if err != nil { return nil, errors.Wrapf(err, "GetAllSnapshotStakesToReward: problem calculating SnapshotEpochNumber: ") } @@ -957,14 +1191,14 @@ type SnapshotLeaderScheduleMapKey struct { func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint16) (*ValidatorEntry, error) { // Calculate the SnapshotEpochNumber. - snapshotAtEpochNumber, err := bav.GetSnapshotEpochNumber() + snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() if err != nil { return nil, errors.Wrapf(err, " GetSnapshotLeaderScheduleValidator: problem calculating SnapshotEpochNumber: ") } // First, check the UtxoView. mapKey := SnapshotLeaderScheduleMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, LeaderIndex: leaderIndex} if validatorPKID, exists := bav.SnapshotLeaderSchedule[mapKey]; exists { - return bav.GetSnapshotValidatorSetEntryByPKID(validatorPKID) + return bav.GetCurrentSnapshotValidatorSetEntryByPKID(validatorPKID) } // Next, check the db. validatorEntry, err := DBGetSnapshotLeaderScheduleValidator(bav.Handle, bav.Snapshot, leaderIndex, snapshotAtEpochNumber) @@ -978,6 +1212,54 @@ func (bav *UtxoView) GetSnapshotLeaderScheduleValidator(leaderIndex uint16) (*Va return validatorEntry, nil } +type LeaderPKIDAndIndex struct { + leaderIdx uint16 + leaderPKID *PKID +} + +func (bav *UtxoView) GetCurrentSnapshotLeaderSchedule() ([]*PKID, error) { + snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() + if err != nil { + return nil, errors.Wrapf(err, "GetCurrentSnapshotLeaderSchedule: problem calculating SnapshotEpochNumber: ") + } + return bav.GetSnapshotLeaderScheduleAtEpochNumber(snapshotAtEpochNumber) +} +func (bav *UtxoView) GetSnapshotLeaderScheduleAtEpochNumber(snapshotAtEpochNumber uint64) ([]*PKID, error) { + // Seek over DB prefix and merge into view. + leaderIdxToValidatorPKIDMap, err := DBSeekSnapshotLeaderSchedule(bav.Handle, snapshotAtEpochNumber) + if err != nil { + return nil, errors.Wrapf(err, "GetSnapshotLeaderScheduleAtEpochNumber: error retrieving ValidatorPKIDs: ") + } + // Merge the DB entries into the UtxoView. + for leaderIdx, validatorPKID := range leaderIdxToValidatorPKIDMap { + snapshotLeaderScheduleMapKey := SnapshotLeaderScheduleMapKey{ + SnapshotAtEpochNumber: snapshotAtEpochNumber, + LeaderIndex: leaderIdx, + } + if _, exists := bav.SnapshotLeaderSchedule[snapshotLeaderScheduleMapKey]; !exists { + bav._setSnapshotLeaderScheduleValidator(validatorPKID, leaderIdx, snapshotAtEpochNumber) + } + } + + // First, check the UtxoView. + var leaderPKIDAndIndexSlice []LeaderPKIDAndIndex + for mapKey, validatorPKID := range bav.SnapshotLeaderSchedule { + if mapKey.SnapshotAtEpochNumber == snapshotAtEpochNumber { + leaderPKIDAndIndexSlice = append(leaderPKIDAndIndexSlice, LeaderPKIDAndIndex{ + leaderIdx: mapKey.LeaderIndex, + leaderPKID: validatorPKID, + }) + } + } + sort.Slice(leaderPKIDAndIndexSlice, func(ii, jj int) bool { + return leaderPKIDAndIndexSlice[ii].leaderIdx < leaderPKIDAndIndexSlice[jj].leaderIdx + }) + leaderPKIDs := collections.Transform(leaderPKIDAndIndexSlice, func(index LeaderPKIDAndIndex) *PKID { + return index.leaderPKID + }) + return leaderPKIDs, nil +} + func (bav *UtxoView) _setSnapshotLeaderScheduleValidator(validatorPKID *PKID, index uint16, snapshotAtEpochNumber uint64) { if validatorPKID == nil { glog.Errorf("_setSnapshotLeaderScheduleValidator: called with nil ValidatorPKID, this should never happen") @@ -1015,6 +1297,12 @@ func DBKeyForSnapshotLeaderScheduleValidator(leaderIndex uint16, snapshotAtEpoch return data } +func DBSSeekKeyForSnapshotLeaderSchedule(snapshotAtEpochNumber uint64) []byte { + data := append([]byte{}, Prefixes.PrefixSnapshotLeaderSchedule...) + data = append(data, EncodeUint64(snapshotAtEpochNumber)...) + return data +} + func DBGetSnapshotLeaderScheduleValidator( handle *badger.DB, snap *Snapshot, @@ -1058,6 +1346,27 @@ func DBGetSnapshotLeaderScheduleValidatorWithTxn( return DBGetSnapshotValidatorSetEntryByPKIDWithTxn(txn, snap, validatorPKID, snapshotAtEpochNumber) } +func DBSeekSnapshotLeaderSchedule( + handle *badger.DB, + snapshotAtEpochNumber uint64, +) (map[uint16]*PKID, error) { + seekKey := DBSSeekKeyForSnapshotLeaderSchedule(snapshotAtEpochNumber) + keysFound, valsFound := EnumerateKeysForPrefix(handle, seekKey) + leaderIdxToPKID := make(map[uint16]*PKID) + for idx, keyFound := range keysFound { + // TODO: Make sure this decode is correct + leaderIndex := DecodeUint16(keyFound[len(seekKey):]) + // Decode ValidatorPKID from bytes. + validatorPKID := &PKID{} + rr := bytes.NewReader(valsFound[idx]) + if exist, err := DecodeFromBytes(validatorPKID, rr); !exist || err != nil { + return nil, errors.Wrapf(err, "DBSeekSnapshotLeaderSchedule: problem decoding ValidatorPKID") + } + leaderIdxToPKID[leaderIndex] = validatorPKID + } + return leaderIdxToPKID, nil +} + func DBPutSnapshotLeaderScheduleValidatorWithTxn( txn *badger.Txn, snap *Snapshot, diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index e40afb1d4..0c8b3e799 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -136,7 +136,7 @@ func (bav *UtxoView) computeStakerRewardAndValidatorCommission( // Fetch the ValidatorEntry that the stake is delegated to. The validator is guaranteed to be in the // snapshot validator set, because only stakes from the snapshot validator set are eligible to receive // rewards. - validatorEntry, err := bav.GetSnapshotValidatorSetEntryByPKID(snapshotStakeEntry.ValidatorPKID) + validatorEntry, err := bav.GetCurrentSnapshotValidatorSetEntryByPKID(snapshotStakeEntry.ValidatorPKID) if err != nil { return 0, 0, errors.Wrapf(err, "computeStakerRewardAndValidatorCommission: problem fetching validator entry: ") } diff --git a/lib/server.go b/lib/server.go index 66a8299dd..550ce34a4 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1376,20 +1376,20 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { // the blockNodes in the header chain and set them in the blockchain data structures. err = srv.blockchain.db.Update(func(txn *badger.Txn) error { for ii := uint64(1); ii <= srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight; ii++ { - curretNode := srv.blockchain.bestHeaderChain[ii] + currentNode := srv.blockchain.bestHeaderChain[ii] // Do not set the StatusBlockStored flag, because we still need to download the past blocks. - curretNode.Status |= StatusBlockProcessed - curretNode.Status |= StatusBlockValidated - srv.blockchain.blockIndex[*curretNode.Hash] = curretNode - srv.blockchain.bestChainMap[*curretNode.Hash] = curretNode - srv.blockchain.bestChain = append(srv.blockchain.bestChain, curretNode) - err := PutHeightHashToNodeInfoWithTxn(txn, srv.snapshot, curretNode, false /*bitcoinNodes*/, srv.eventManager) + currentNode.Status |= StatusBlockProcessed + currentNode.Status |= StatusBlockValidated + srv.blockchain.addNewBlockNodeToBlockIndex(currentNode) + srv.blockchain.bestChainMap[*currentNode.Hash] = currentNode + srv.blockchain.bestChain = append(srv.blockchain.bestChain, currentNode) + err = PutHeightHashToNodeInfoWithTxn(txn, srv.snapshot, currentNode, false /*bitcoinNodes*/, srv.eventManager) if err != nil { return err } } // We will also set the hash of the block at snapshot height as the best chain hash. - err := PutBestHashWithTxn(txn, srv.snapshot, msg.SnapshotMetadata.CurrentEpochBlockHash, ChainTypeDeSoBlock, srv.eventManager) + err = PutBestHashWithTxn(txn, srv.snapshot, msg.SnapshotMetadata.CurrentEpochBlockHash, ChainTypeDeSoBlock, srv.eventManager) return err }) @@ -1898,7 +1898,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { glog.V(1).Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Processing block %v WITHOUT "+ "signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp))) - _, isOrphan, err = srv.blockchain.ProcessBlock(blk, false) + _, isOrphan, _, err = srv.blockchain.ProcessBlock(blk, false) } else { // TODO: Signature checking slows things down because it acquires the ChainLock. @@ -1907,7 +1907,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { glog.V(1).Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Processing block %v WITH "+ "signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp))) - _, isOrphan, err = srv.blockchain.ProcessBlock(blk, true) + _, isOrphan, _, err = srv.blockchain.ProcessBlock(blk, true) } // If we hit an error then abort mission entirely. We should generally never diff --git a/lib/txindex.go b/lib/txindex.go index 310487076..ae9d01b19 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -220,7 +220,8 @@ func (txi *TXIndex) GetTxindexUpdateBlockNodes() ( // The only thing we can really do in this case is rebuild the entire index // from scratch. To do that, we return all the blocks in the index to detach // and all the blocks in the real chain to attach. - txindexTipNode := txi.CoreChain.CopyBlockIndex()[*txindexTipHash.Hash] + blockIndexByHashCopy, _ := txi.TXIndexChain.CopyBlockIndexes() + txindexTipNode := blockIndexByHashCopy[*txindexTipHash.Hash] if txindexTipNode == nil { glog.Info("GetTxindexUpdateBlockNodes: Txindex tip was not found; building txindex starting at genesis block") @@ -364,13 +365,13 @@ func (txi *TXIndex) Update() error { // Delete this block from the chain db so we don't get duplicate block errors. // Remove this block from our bestChain data structures. - newBlockIndex := txi.TXIndexChain.CopyBlockIndex() + newBlockIndexByHash, newBlockIndexByHeight := txi.TXIndexChain.CopyBlockIndexes() newBestChain, newBestChainMap := txi.TXIndexChain.CopyBestChain() newBestChain = newBestChain[:len(newBestChain)-1] delete(newBestChainMap, *(blockToDetach.Hash)) - delete(newBlockIndex, *(blockToDetach.Hash)) + delete(newBlockIndexByHash, *(blockToDetach.Hash)) - txi.TXIndexChain.SetBestChainMap(newBestChain, newBestChainMap, newBlockIndex) + txi.TXIndexChain.SetBestChainMap(newBestChain, newBestChainMap, newBlockIndexByHash, newBlockIndexByHeight) // At this point the entries for the block should have been removed // from both our Txindex chain and our transaction index mappings. @@ -439,7 +440,7 @@ func (txi *TXIndex) Update() error { // Now that we have added all the txns to our TxIndex db, attach the block // to update our chain. - _, _, err = txi.TXIndexChain.ProcessBlock(blockMsg, false /*verifySignatures*/) + _, _, _, err = txi.TXIndexChain.ProcessBlock(blockMsg, false /*verifySignatures*/) if err != nil { return fmt.Errorf("Update: Problem attaching block %v: %v", blockToAttach, err) From 87d16b5c2f7db0b09936079aa908da4431cca417 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 4 Dec 2023 11:31:19 -0500 Subject: [PATCH 303/762] Remove Stubbed Out Functions For Node Syncing (#844) * Remove Stubbed Out Functions For Node Syncing * Add build flag --- lib/block_view_validator.go | 11 ++---- lib/pos_consensus_controller.go | 54 +++++++--------------------- lib/pos_consensus_controller_test.go | 2 ++ 3 files changed, 16 insertions(+), 51 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 9f33f0810..8d6bd53da 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -4,12 +4,13 @@ import ( "bytes" "crypto/sha256" "fmt" - "github.com/deso-protocol/core/consensus" "io" "math" "net/url" "sort" + "github.com/deso-protocol/core/consensus" + "github.com/deso-protocol/core/bls" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" @@ -85,14 +86,6 @@ type ValidatorEntry struct { isDeleted bool } -func (validatorEntry *ValidatorEntry) GetPublicKey() *bls.PublicKey { - return validatorEntry.VotingPublicKey -} - -func (validatorEntry *ValidatorEntry) GetStakeAmount() *uint256.Int { - return validatorEntry.TotalStakeAmountNanos -} - func (validatorEntry *ValidatorEntry) Status() ValidatorStatus { // ValidatorEntry.Status() is a virtual/derived field that is not stored in // the database, but instead constructed from other ValidatorEntry fields. diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index abd171390..56076428b 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -3,6 +3,7 @@ package lib import ( "sync" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" "github.com/pkg/errors" @@ -121,7 +122,7 @@ func (cc *ConsensusController) handleBlockProposerEvent( // Fetch the parent block parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) - parentBlock, parentBlockExists := cc.blockchain.blockIndex[*parentBlockHash] + parentBlock, parentBlockExists := cc.blockchain.blockIndexByHash[*parentBlockHash] if !parentBlockExists { return errors.Errorf("Error fetching parent block: %v", parentBlockHash) } @@ -164,7 +165,7 @@ func (cc *ConsensusController) handleBlockProposerEvent( } // TODO: Compute the random seed hash for the block proposer - var proposerRandomSeedHash *RandomSeedHash + var proposerRandomSeedSignature *bls.Signature var block *MsgDeSoBlock @@ -174,7 +175,7 @@ func (cc *ConsensusController) handleBlockProposerEvent( utxoViewAtParent, event.TipBlockHeight+1, event.View, - proposerRandomSeedHash, + proposerRandomSeedSignature, QuorumCertificateFromConsensusInterface(event.QC), ) if err != nil { @@ -186,7 +187,7 @@ func (cc *ConsensusController) handleBlockProposerEvent( utxoViewAtParent, event.TipBlockHeight+1, event.View, - proposerRandomSeedHash, + proposerRandomSeedSignature, AggregateQuorumCertificateFromConsensusInterface(event.AggregateQC), ) if err != nil { @@ -419,7 +420,7 @@ func (cc *ConsensusController) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]* // safe blocks // Fetch the safe blocks that are eligible to be extended from by the next incoming tip block - safeBlocks, err := cc.blockchain.getSafeBlocks() + safeBlocks, err := cc.blockchain.GetSafeBlocks() if err != nil { return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching safe blocks: %v", err) } @@ -559,42 +560,11 @@ func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) return nil, errors.Errorf("error finding epoch number for block height: %v", blockHeight) } -//////////////////////////////////////////////////////////////////////////////// -// TODO: delete all of the functions below. They are dummy stubbed out functions -// needed by ConsensusController, but are implemented in other feature branches. -// We stub them out here to unblock consensus work, and can delete them once other -// feature branches are merged. -//////////////////////////////////////////////////////////////////////////////// - -func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, error) { - return nil, errors.New("getUtxoViewAtBlockHash: replace me with a real implementation later") -} - func (bav *UtxoView) SimulateNextEpochEntry(epochEntry *EpochEntry) (*EpochEntry, error) { - return nil, errors.New("SimulateNextEpochEntry: replace me with a real implementation later") -} - -func (bav *UtxoView) ComputeSnapshotEpochNumberForEpoch(epochNumber uint64) (uint64, error) { - return 0, errors.New("ComputeSnapshotEpochNumberForEpoch: replace me with a real implementation later") -} - -func (bav *UtxoView) GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotAtEpochNumber uint64) ([]*ValidatorEntry, error) { - return nil, errors.New("GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber: replace me with a real implementation later") -} - -func (epochEntry *EpochEntry) ContainsBlockHeight(blockHeight uint64) bool { - // TODO: Implement this later - return false -} - -func (bc *Blockchain) getSafeBlocks() ([]*MsgDeSoHeader, error) { - return nil, errors.New("getSafeBlocks: replace me with a real implementation later") -} - -func (bav *UtxoView) getNextRandomSeedHash() (*RandomSeedHash, error) { - return nil, errors.New("getNextRandomSeedHash: replace me with a real implementation later") -} - -func (b *BlockNode) IsValidated() bool { - return false + return bav.computeNextEpochEntry( + epochEntry.EpochNumber, + epochEntry.FinalBlockHeight, + epochEntry.InitialView, + epochEntry.CreatedAtBlockTimestampNanoSecs, + ) } diff --git a/lib/pos_consensus_controller_test.go b/lib/pos_consensus_controller_test.go index 84673ed87..5d3e5cb34 100644 --- a/lib/pos_consensus_controller_test.go +++ b/lib/pos_consensus_controller_test.go @@ -1,3 +1,5 @@ +//go:build relic + package lib import ( From 934d7578abf17c01a248a39d480098d918155551 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 4 Dec 2023 13:03:04 -0500 Subject: [PATCH 304/762] grab chainlock in ProcessBlockPoS (#845) Co-authored-by: Lazy Nina <> --- lib/pos_blockchain.go | 8 ++++++++ lib/pos_blockchain_test.go | 22 +++++++++++----------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 4a3381c10..111c22a3a 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -12,6 +12,14 @@ import ( "github.com/pkg/errors" ) +// ProcessBlockPoS simply acquires the chain lock and calls processBlockPoS. +func (bc *Blockchain) ProcessBlockPoS(block *MsgDeSoBlock, currentView uint64, verifySignatures bool) ( + _success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { + bc.ChainLock.Lock() + defer bc.ChainLock.Unlock() + return bc.processBlockPoS(block, currentView, verifySignatures) +} + // processBlockPoS runs the Fast-Hotstuff block connect and commit rule as follows: // 1. Determine if we're missing the parent block of this block. // If so, return the hash of the missing block and add this block to the orphans list. diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 211a07d00..909734f40 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1663,7 +1663,7 @@ func TestProcessBlockPoS(t *testing.T) { { // Create a bad block and try to process it. dummyBlock := _generateDummyBlock(testMeta, 12, 12, 887) - success, isOrphan, missingBlockHashes, err := testMeta.chain.processBlockPoS(dummyBlock, 12, true) + success, isOrphan, missingBlockHashes, err := testMeta.chain.ProcessBlockPoS(dummyBlock, 12, true) require.False(t, success) require.False(t, isOrphan) require.Len(t, missingBlockHashes, 0) @@ -1674,7 +1674,7 @@ func TestProcessBlockPoS(t *testing.T) { { var realBlock *MsgDeSoBlock realBlock = _generateRealBlock(testMeta, 12, 12, 889, testMeta.chain.BlockTip().Hash, false) - success, isOrphan, missingBlockHashes, err := testMeta.chain.processBlockPoS(realBlock, 12, true) + success, isOrphan, missingBlockHashes, err := testMeta.chain.ProcessBlockPoS(realBlock, 12, true) require.True(t, success) require.False(t, isOrphan) require.Len(t, missingBlockHashes, 0) @@ -1692,7 +1692,7 @@ func TestProcessBlockPoS(t *testing.T) { // Now let's try adding two more blocks on top of this one to make sure commit rule works properly. var realBlock2 *MsgDeSoBlock realBlock2 = _generateRealBlock(testMeta, 13, 13, 950, blockHash1, false) - success, _, _, err := testMeta.chain.processBlockPoS(realBlock2, 13, true) + success, _, _, err := testMeta.chain.ProcessBlockPoS(realBlock2, 13, true) require.True(t, success) blockHash2, err = realBlock2.Hash() require.NoError(t, err) @@ -1700,7 +1700,7 @@ func TestProcessBlockPoS(t *testing.T) { var realBlock3 *MsgDeSoBlock realBlock3 = _generateRealBlock(testMeta, 14, 14, 378, blockHash2, false) - success, _, _, err = testMeta.chain.processBlockPoS(realBlock3, 14, true) + success, _, _, err = testMeta.chain.ProcessBlockPoS(realBlock3, 14, true) require.True(t, success) // Okay now we expect blockHash1 to be committed, but blockHash2 and 3 to not be committed. blockHash3, err = realBlock3.Hash() @@ -1714,7 +1714,7 @@ func TestProcessBlockPoS(t *testing.T) { // Okay let's timeout view 15 var timeoutBlock *MsgDeSoBlock timeoutBlock = _generateRealBlock(testMeta, 15, 16, 381, blockHash3, true) - success, _, _, err := testMeta.chain.processBlockPoS(timeoutBlock, 15, true) + success, _, _, err := testMeta.chain.ProcessBlockPoS(timeoutBlock, 15, true) fmt.Println(err) require.True(t, success) timeoutBlockHash, err = timeoutBlock.Hash() @@ -1728,7 +1728,7 @@ func TestProcessBlockPoS(t *testing.T) { // Okay let's introduce a reorg. New block at view 15 with block 3 as its parent. var reorgBlock *MsgDeSoBlock reorgBlock = _generateRealBlock(testMeta, 15, 15, 373, blockHash3, false) - success, _, _, err := testMeta.chain.processBlockPoS(reorgBlock, 15, true) + success, _, _, err := testMeta.chain.ProcessBlockPoS(reorgBlock, 15, true) require.True(t, success) reorgBlockHash, err = reorgBlock.Hash() require.NoError(t, err) @@ -1758,7 +1758,7 @@ func TestProcessBlockPoS(t *testing.T) { updateProposerVotePartialSignatureForBlock(testMeta, orphanBlock) orphanBlockHash, err := orphanBlock.Hash() require.NoError(t, err) - success, isOrphan, missingBlockHashes, err := testMeta.chain.processBlockPoS(orphanBlock, 17, true) + success, isOrphan, missingBlockHashes, err := testMeta.chain.ProcessBlockPoS(orphanBlock, 17, true) require.False(t, success) require.True(t, isOrphan) require.Len(t, missingBlockHashes, 1) @@ -1770,7 +1770,7 @@ func TestProcessBlockPoS(t *testing.T) { require.False(t, orphanBlockInIndex.IsValidated()) // Okay now if we process the parent block, the orphan should get updated to be validated. - success, isOrphan, missingBlockHashes, err = testMeta.chain.processBlockPoS(dummyParentBlock, 16, true) + success, isOrphan, missingBlockHashes, err = testMeta.chain.ProcessBlockPoS(dummyParentBlock, 16, true) require.True(t, success) require.False(t, isOrphan) require.Len(t, missingBlockHashes, 0) @@ -1793,7 +1793,7 @@ func TestProcessBlockPoS(t *testing.T) { updateProposerVotePartialSignatureForBlock(testMeta, malformedOrphanBlock) malformedOrphanBlockHash, err := malformedOrphanBlock.Hash() require.NoError(t, err) - success, isOrphan, missingBlockHashes, err := testMeta.chain.processBlockPoS(malformedOrphanBlock, 18, true) + success, isOrphan, missingBlockHashes, err := testMeta.chain.ProcessBlockPoS(malformedOrphanBlock, 18, true) require.False(t, success) require.True(t, isOrphan) require.Len(t, missingBlockHashes, 1) @@ -1806,7 +1806,7 @@ func TestProcessBlockPoS(t *testing.T) { // If a block can't be hashed, we expect to get an error. malformedOrphanBlock.Header.TxnConnectStatusByIndexHash = nil - success, isOrphan, missingBlockHashes, err = testMeta.chain.processBlockPoS(malformedOrphanBlock, 18, true) + success, isOrphan, missingBlockHashes, err = testMeta.chain.ProcessBlockPoS(malformedOrphanBlock, 18, true) require.False(t, success) require.False(t, isOrphan) require.Len(t, missingBlockHashes, 0) @@ -2231,7 +2231,7 @@ func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { isValid, err := testMeta.chain.hasValidProposerRandomSeedSignaturePoS(realBlock) require.NoError(t, err) require.True(t, isValid) - _, _, _, err = testMeta.chain.processBlockPoS(realBlock, 12, true) + _, _, _, err = testMeta.chain.ProcessBlockPoS(realBlock, 12, true) require.NoError(t, err) realBlockHash, err := realBlock.Hash() require.NoError(t, err) From ada7fbee2eea30935eead4b1734e8d085eb9c3d4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 4 Dec 2023 13:33:29 -0500 Subject: [PATCH 305/762] Add Thread-safe Block Index Read (#846) --- lib/blockchain.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index d7052c0f7..07ff192ef 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5,7 +5,6 @@ import ( "container/list" "encoding/hex" "fmt" - "github.com/deso-protocol/core/collections" "math" "math/big" "reflect" @@ -14,6 +13,8 @@ import ( "strings" "time" + "github.com/deso-protocol/core/collections" + "github.com/google/uuid" "github.com/holiman/uint256" @@ -1081,6 +1082,20 @@ func (bc *Blockchain) HasBlock(blockHash *BlockHash) bool { return true } +// This needs to hold a lock on the blockchain because it read from an in-memory map that is +// not thread-safe. +func (bc *Blockchain) GetBlockHeaderFromIndex(blockHash *BlockHash) *MsgDeSoHeader { + bc.ChainLock.RLock() + defer bc.ChainLock.RUnlock() + + block, blockExists := bc.blockIndexByHash[*blockHash] + if !blockExists { + return nil + } + + return block.Header +} + // Don't need a lock because blocks don't get removed from the db after they're added func (bc *Blockchain) GetBlock(blockHash *BlockHash) *MsgDeSoBlock { blk, err := GetBlock(blockHash, bc.db, bc.snapshot) From 42a2411bd9dbd02a5e35cabc7a00b65bd5096b42 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 4 Dec 2023 20:30:48 -0500 Subject: [PATCH 306/762] Pull RandomSeedHash Generation Out of UtxoView (#848) --- lib/pos_blockchain.go | 2 +- lib/pos_random_seed.go | 18 +++++++----------- lib/pos_random_seed_test.go | 22 ++++++++++------------ 3 files changed, 18 insertions(+), 24 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 111c22a3a..60d701ea8 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -747,7 +747,7 @@ func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(block *MsgDeSoBlock return false, RuleErrorMissingParentBlock } - prevRandomSeedHash, err := hashRandomSeedSignature(parentBlock.Header.ProposerRandomSeedSignature) + prevRandomSeedHash, err := HashRandomSeedSignature(parentBlock.Header.ProposerRandomSeedSignature) if err != nil { return false, errors.Wrapf(err, "hasValidProposerRandomSeedSignaturePoS: Problem converting prev random seed hash to RandomSeedHash") diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index fbcbf56fd..fab607fbd 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -60,18 +60,10 @@ func (randomSeedHash *RandomSeedHash) isEmpty() bool { return randomSeedHash == nil || randomSeedHash.Eq(&RandomSeedHash{}) } -// -// UTXO VIEW UTILS -// - -func (bav *UtxoView) GenerateNextRandomSeedSignature(signerPrivateKey *bls.PrivateKey) (*bls.Signature, error) { +func GenerateNextRandomSeedSignature(currentRandomSeedHash *RandomSeedHash, signerPrivateKey *bls.PrivateKey) (*bls.Signature, error) { // This function generates a RandomSeedSignature by signing the CurrentRandomSeedHash // with the provided bls.PrivateKey. This signature is deterministic: given the same // CurrentRandomSeedHash and bls.PrivateKey, the same signature will always be generated. - currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() - if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GenerateNextRandomSeedSignature: problem retrieving CurrentRandomSeedHash: ") - } randomSeedSignature, err := SignRandomSeedHash(signerPrivateKey, currentRandomSeedHash) if err != nil { return nil, errors.Wrapf(err, "UtxoView.GenerateNextRandomSeedSignature: problem generating RandomSeedSignature: ") @@ -110,7 +102,7 @@ func (bav *UtxoView) VerifyRandomSeedSignature( if !isVerified { return nil, errors.Errorf("UtxoView.VerifyRandomSeedSignature: invalid RandomSeedSignature provided") } - return hashRandomSeedSignature(randomSeedSignature) + return HashRandomSeedSignature(randomSeedSignature) } func verifySignatureOnRandomSeedHash( @@ -119,7 +111,7 @@ func verifySignatureOnRandomSeedHash( return signerPublicKey.Verify(randomSeedSignature, randomSeedHash[:]) } -func hashRandomSeedSignature(randomSeedSignature *bls.Signature) (*RandomSeedHash, error) { +func HashRandomSeedSignature(randomSeedSignature *bls.Signature) (*RandomSeedHash, error) { // This function takes in a random seed signature and computes the random seed hash for it // Convert the RandomSeedSignature to a RandomSeedHash. randomSeedSHA256 := sha256.Sum256(randomSeedSignature.ToBytes()) @@ -130,6 +122,10 @@ func hashRandomSeedSignature(randomSeedSignature *bls.Signature) (*RandomSeedHas return newRandomSeedHash, nil } +// +// UTXO VIEW UTILS +// + func (bav *UtxoView) GetCurrentRandomSeedHash() (*RandomSeedHash, error) { // First, check the UtxoView. if bav.CurrentRandomSeedHash != nil { diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go index ba74c8838..a07e034b1 100644 --- a/lib/pos_random_seed_test.go +++ b/lib/pos_random_seed_test.go @@ -26,8 +26,12 @@ func TestCurrentRandomSeedHash(t *testing.T) { // Test generating + verifying RandomSeedSignatures. + // The initial CurrentRandomSeedHash is the GenesisRandomSeedHash. + genesisRandomSeedHash, err := utxoView.GetCurrentRandomSeedHash() + require.NoError(t, err) + // PrivateKey1 creates a new RandomSeedSignature. - randomSeedSignature1, err := utxoView.GenerateNextRandomSeedSignature(privateKey1) + randomSeedSignature1, err := GenerateNextRandomSeedSignature(genesisRandomSeedHash, privateKey1) require.NoError(t, err) // PublicKey1 is verified to correspond to PrivateKey that signed the RandomSeedSignature. randomSeedHash1, err := utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature1) @@ -40,7 +44,7 @@ func TestCurrentRandomSeedHash(t *testing.T) { require.Nil(t, randomSeedHash1) // PrivateKey2 creates a new RandomSeedSignature. - randomSeedSignature2, err := utxoView.GenerateNextRandomSeedSignature(privateKey2) + randomSeedSignature2, err := GenerateNextRandomSeedSignature(genesisRandomSeedHash, privateKey2) require.NoError(t, err) // PublicKey1 is not verified to correspond to the PrivateKey that signed the RandomSeedSignature. randomSeedHash2, err := utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature2) @@ -54,14 +58,8 @@ func TestCurrentRandomSeedHash(t *testing.T) { // Test updating CurrentRandomSeedHash. - // The initial CurrentRandomSeedHash is the GenesisRandomSeedHash. - randomSeedHash1, err = utxoView.GetCurrentRandomSeedHash() - require.NoError(t, err) - require.True(t, randomSeedHash1.Eq(&RandomSeedHash{})) - require.True(t, randomSeedHash1.ToUint256().Eq(uint256.NewInt())) - - // PrivateKey1 generates a new RandomSeedSignature. - randomSeedSignature1, err = utxoView.GenerateNextRandomSeedSignature(privateKey1) + // PrivateKey1 generates a new RandomSeedSignature from genesisRandomSeedHash. + randomSeedSignature1, err = GenerateNextRandomSeedSignature(genesisRandomSeedHash, privateKey1) require.NoError(t, err) // PublicKey1 is verified to correspond to the PrivateKey that signed the RandomSeedSignature. randomSeedHash1, err = utxoView.VerifyRandomSeedSignature(publicKey1, randomSeedSignature1) @@ -73,8 +71,8 @@ func TestCurrentRandomSeedHash(t *testing.T) { utxoView._setCurrentRandomSeedHash(randomSeedHash1) require.NoError(t, utxoView.FlushToDb(blockHeight)) - // PrivateKey2 generates a new RandomSeedSignature. - randomSeedSignature2, err = utxoView.GenerateNextRandomSeedSignature(privateKey2) + // PrivateKey2 generates a new RandomSeedSignature from randomSeedHash1. + randomSeedSignature2, err = GenerateNextRandomSeedSignature(randomSeedHash1, privateKey2) require.NoError(t, err) // PublicKey2 is verified to correspond to the PrivateKey that signed the RandomSeedSignature. randomSeedHash2, err = utxoView.VerifyRandomSeedSignature(publicKey2, randomSeedSignature2) From 6c8d19577eacc74859726340ab404419ab1c5407 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 4 Dec 2023 20:38:05 -0500 Subject: [PATCH 307/762] Sign Random Seed Hash in PoS Block Proposal (#849) * Sign Random Seed Hash in PoS Block Proposal * Address Nina's nits --- lib/bls_keystore.go | 16 +++++++++++----- lib/pos_consensus_controller.go | 16 ++++++++++++---- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go index cf4908cd8..b1356da6c 100644 --- a/lib/bls_keystore.go +++ b/lib/bls_keystore.go @@ -13,14 +13,16 @@ import ( // - PoS Validator Timeout Messages // - PoS Block Proposals // - PoS Validator Connection Handshakes +// - PoS Random Seed Signature // // TODO: We will likely need to associate individual op-codes for each message type that can be signed, -// so that there no risk of signature collisions between different message types. Ex: the payload +// so that there is no risk of signature collisions between different message types. Ex: the payload // signed per message type must be made up of the following tuples: -// - Validator Vote: (0x01, view uint64, blockHash consensus.BlockHash) -// - Validator Timeout: (0x02, view uint64, highQCView uint64) -// - PoS Block Proposal: (0x03, view uint64, blockHash consensus.BlockHash) -// - PoS Validator Handshake: (0x04, peer's random nonce, our node's random nonce) +// - Validator Vote: (0x01, view uint64, blockHash consensus.BlockHash) +// - Validator Timeout: (0x02, view uint64, highQCView uint64) +// - PoS Block Proposal: (0x03, view uint64, blockHash consensus.BlockHash) +// - PoS Validator Handshake: (0x04, peer's random nonce, our node's random nonce) +// - PoS Random Seed Signature: (previous block's random seed hash) type BLSSignatureOpCode byte @@ -103,6 +105,10 @@ func (signer *BLSSigner) SignValidatorTimeout(view uint64, highQCView uint64) (* return signer.sign(BLSSignatureOpCodeValidatorTimeout, payload[:]) } +func (signer *BLSSigner) SignRandomSeedHash(randomSeedHash *RandomSeedHash) (*bls.Signature, error) { + return SignRandomSeedHash(signer.privateKey, randomSeedHash) +} + // TODO: Add signing function for PoS blocks func (signer *BLSSigner) SignPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64) (*bls.Signature, error) { diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index 56076428b..9f285520f 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -3,7 +3,6 @@ package lib import ( "sync" - "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" "github.com/pkg/errors" @@ -157,6 +156,18 @@ func (cc *ConsensusController) handleBlockProposerEvent( ) } + // Compute the random seed hash for the previous block's proposer signature + parentBlockRandomSeedHash, err := HashRandomSeedSignature(parentBlock.Header.ProposerRandomSeedSignature) + if err != nil { + return errors.Wrapf(err, "Error computing random seed hash for block at height %d: ", event.TipBlockHeight+1) + } + + // Compute the next proposer random seed signature + proposerRandomSeedSignature, err := cc.signer.SignRandomSeedHash(parentBlockRandomSeedHash) + if err != nil { + return errors.Wrapf(err, "Error signing random seed hash for block at height %d: ", event.TipBlockHeight+1) + } + // Build a UtxoView at the parent block utxoViewAtParent, err := cc.blockchain.getUtxoViewAtBlockHash(*parentBlock.Hash) if err != nil { @@ -164,9 +175,6 @@ func (cc *ConsensusController) handleBlockProposerEvent( return errors.Errorf("Error fetching UtxoView for parent block: %v", parentBlockHash) } - // TODO: Compute the random seed hash for the block proposer - var proposerRandomSeedSignature *bls.Signature - var block *MsgDeSoBlock if expectedEventType == consensus.FastHotStuffEventTypeConstructVoteQC { From f66410a47c9d4b57601b969c7c39a0473ae8c3a1 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 4 Dec 2023 20:46:30 -0500 Subject: [PATCH 308/762] Create UtxoView Helper Function to Simulate Next Epoch Entry (#850) --- lib/pos_epoch_complete_hook.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 863af693e..73c9c8604 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -138,12 +138,28 @@ func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) erro return nil } +// simulateNextEpochEntry simulates the block range for the next epoch given the current epoch's final +// block height and epoch number. The view and timestamp for the simulated epoch are left empty since they can't +// be easily simulated, so DO NOT USE CreatedAtBlockTimestampNanoSecs or InitialView from the returned EpochEntry. +func (bav *UtxoView) simulateNextEpochEntry(currentEpochNumber uint64, currentEpochFinalBlockHeight uint64) (*EpochEntry, error) { + return bav.computeNextEpochEntry( + currentEpochNumber, + currentEpochFinalBlockHeight, + 0, + 0, + ) +} + func (bav *UtxoView) computeNextEpochEntry(currentEpochNumber uint64, currentEpochFinalBlockHeight uint64, currentEpochFinalView uint64, nextEpochBlockTimestampNanoSecs uint64) (*EpochEntry, error) { // Retrieve the SnapshotGlobalParamsEntry to determine the next epoch's final block height. We use the // snapshot global params here because the next epoch begin immediately, and its length is used in the PoS // consensus. The validator set for the next epoch needs to be in agreement on the length of the epoch // before the epoch begins. - snapshotGlobalParamsEntry, err := bav.GetCurrentSnapshotGlobalParamsEntry() + snapshotAtEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(currentEpochNumber) + if err != nil { + return nil, errors.Wrapf(err, "simulatePrevEpochEntry: problem computing snapshot epoch number: ") + } + snapshotGlobalParamsEntry, err := bav.GetSnapshotGlobalParamsEntryByEpochNumber(snapshotAtEpochNumber) if err != nil { return nil, errors.Wrapf(err, "computeNextEpochEntry: problem retrieving SnapshotGlobalParamsEntry: ") } From 6cd96b09f8513c924a8fbac4d0cb5a1f884b72af Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 4 Dec 2023 20:53:06 -0500 Subject: [PATCH 309/762] Clean Up FastHotStuff Block Proposal End to End Flow (#851) * Clean Up FastHotStuff Block Proposal End to End Flow * Address Nina's comments --- consensus/event_loop.go | 1 + consensus/event_loop_test.go | 1 + lib/pos_blockchain.go | 3 + lib/pos_consensus_controller.go | 231 ++++++++++++++++++++------------ 4 files changed, 153 insertions(+), 83 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 7d0d57a45..06f507701 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -782,6 +782,7 @@ func (fc *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt View: fc.currentView, // The view that the timeout QC is proposed in TipBlockHash: validatorsHighQC.GetBlockHash(), // The block hash that we extend from TipBlockHeight: safeBlock.GetHeight(), // The block height that we extend from + QC: validatorsHighQC, // The high QC aggregated from the timeout messages AggregateQC: &aggregateQuorumCertificate{ view: fc.currentView - 1, // The timed out view is always the previous view highQC: validatorsHighQC, // The high QC aggregated from the timeout messages diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 2b8a88040..8939a7eb4 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -891,6 +891,7 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { ) // Verify that the high QC is the QC from block 2. It should be unchanged. + require.Equal(t, signal.QC.GetBlockHash(), block2.GetQC().GetBlockHash()) require.Equal(t, signal.AggregateQC.GetHighQC().GetBlockHash(), block2.GetQC().GetBlockHash()) require.Equal(t, signal.AggregateQC.GetHighQC().GetView(), block2.GetQC().GetView()) require.Equal(t, diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 60d701ea8..93c94e2db 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1359,6 +1359,9 @@ func (bc *Blockchain) getCommittedTip() (*BlockNode, int) { // ancestors have been validated and extending from this block would not // change any committed blocks. This means we return the committed tip and // all blocks from the committed tip that have been validated. +// +// This function is not thread-safe. The caller needs to hold the chain lock before +// calling this function. func (bc *Blockchain) GetSafeBlocks() ([]*MsgDeSoHeader, error) { // First get committed tip. committedTip, idx := bc.getCommittedTip() diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index 9f285520f..e26d1dacd 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -3,6 +3,7 @@ package lib import ( "sync" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" "github.com/pkg/errors" @@ -28,13 +29,50 @@ func NewConsensusController(params *DeSoParams, blockchain *Blockchain, mempool } } -func (cc *ConsensusController) Init() { - // This initializes the FastHotStuffEventLoop based on the blockchain state. This should - // only be called once the blockchain has synced, the node is ready to join the validator - // network, and the node is able validate blocks in the steady state. - // - // TODO: Implement this later once the Blockchain struct changes are merged. We need to be - // able to fetch the tip block and current persisted view from DB from the Blockchain struct. +// ConsensusController.Start initializes and starts the FastHotStuffEventLoop based on the +// blockchain state. This should only be called once the blockchain has synced, the node is +// ready to join the validator network, and the node is able to validate blocks in the steady state. +func (cc *ConsensusController) Start() error { + // Hold the write consensus controller's lock for thread-safety. + cc.lock.Lock() + defer cc.lock.Unlock() + + // Hold the blockchain's read lock so that the chain cannot be mutated underneath us. In practice, + // this is a no-op, but it guarantees thread-safety in the event that other parts of the codebase + // change. + cc.blockchain.ChainLock.RLock() + defer cc.blockchain.ChainLock.RUnlock() + + // Fetch the current tip of the chain + tipBlock := cc.blockchain.BlockTip() + + // Fetch the validator set at each safe block + tipBlockWithValidators, err := cc.fetchValidatorListsForSafeBlocks([]*MsgDeSoHeader{tipBlock.Header}) + if err != nil { + return errors.Errorf("ConsensusController.Start: Error fetching validator list for tip blocks: %v", err) + } + + // Fetch the safe blocks that are eligible to be extended from by the next incoming tip block + safeBlocks, err := cc.blockchain.GetSafeBlocks() + if err != nil { + return errors.Errorf("ConsensusController.Start: Error fetching safe blocks: %v", err) + } + + // Fetch the validator set at each safe block + safeBlocksWithValidators, err := cc.fetchValidatorListsForSafeBlocks(safeBlocks) + if err != nil { + return errors.Errorf("ConsensusController.Start: Error fetching validator lists for safe blocks: %v", err) + } + + // Initialize and start the event loop. TODO: Pass in the crank timer duration and timeout duration + cc.fastHotStuffEventLoop.Init(0, 0, tipBlockWithValidators[0], safeBlocksWithValidators) + cc.fastHotStuffEventLoop.Start() + + return nil +} + +func (cc *ConsensusController) IsRunning() bool { + return cc.fastHotStuffEventLoop.IsRunning() } // HandleFastHostStuffBlockProposal is called when FastHotStuffEventLoop has signaled that it can @@ -90,35 +128,16 @@ func (cc *ConsensusController) handleBlockProposerEvent( event *consensus.FastHotStuffEvent, expectedEventType consensus.FastHotStuffEventType, ) error { - // Validate that the expected event type is a block proposal event type - possibleExpectedEventTypes := []consensus.FastHotStuffEventType{ - consensus.FastHotStuffEventTypeConstructVoteQC, - consensus.FastHotStuffEventTypeConstructTimeoutQC, - } - if !collections.Contains(possibleExpectedEventTypes, expectedEventType) { - return errors.Errorf("Invalid expected event type: %v", expectedEventType) + // Validate that the event's type is the expected proposal event type + if !isValidBlockProposalEvent(event, expectedEventType) { + return errors.Errorf("Unexpected event type: %v vs %v", event.EventType, expectedEventType) } - // The event's type should match the expected event type - if event.EventType != expectedEventType { - return errors.Errorf("Unexpected event type: %v", event.EventType) - } - - // If the event is a regular block proposal event, then we validate and process it as a regular block - if expectedEventType == consensus.FastHotStuffEventTypeConstructVoteQC { - if !consensus.IsProperlyFormedConstructVoteQCEvent(event) { - // If the event is not properly formed, we ignore it and log it. This should never happen. - return errors.Errorf("Received improperly formed vote QC construction event: %v", event) - } - } else { // expectedEventType == consensus.FastHotStuffEventTypeConstructTimeoutQC - if !consensus.IsProperlyFormedConstructTimeoutQCEvent(event) { - // If the event is not properly formed, we ignore it and log it. This should never happen. - return errors.Errorf("Received improperly formed timeout QC construction event: %v", event) - } + // Validate that the event is properly formed + if !isProperlyFormedBlockProposalEvent(event) { + return errors.Errorf("Received improperly formed block construction event: %v", event) } - // If the block proposal is properly formed, we try to construct and broadcast the block here. - // Fetch the parent block parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) parentBlock, parentBlockExists := cc.blockchain.blockIndexByHash[*parentBlockHash] @@ -168,53 +187,24 @@ func (cc *ConsensusController) handleBlockProposerEvent( return errors.Wrapf(err, "Error signing random seed hash for block at height %d: ", event.TipBlockHeight+1) } - // Build a UtxoView at the parent block - utxoViewAtParent, err := cc.blockchain.getUtxoViewAtBlockHash(*parentBlock.Hash) + // Construct the unsigned block + unsignedBlock, err := cc.produceUnsignedBlockForBlockProposalEvent(event, proposerRandomSeedSignature) if err != nil { - // This should never happen as long as the parent block is a descendant of the committed tip. - return errors.Errorf("Error fetching UtxoView for parent block: %v", parentBlockHash) - } - - var block *MsgDeSoBlock - - if expectedEventType == consensus.FastHotStuffEventTypeConstructVoteQC { - // Construct an unsigned block - block, err = cc.blockProducer.CreateUnsignedBlock( - utxoViewAtParent, - event.TipBlockHeight+1, - event.View, - proposerRandomSeedSignature, - QuorumCertificateFromConsensusInterface(event.QC), - ) - if err != nil { - return errors.Errorf("Error constructing unsigned block: %v", err) - } - } else { // expectedEventType == consensus.FastHotStuffEventTypeConstructTimeoutQC - // Construct an unsigned timeout block - block, err = cc.blockProducer.CreateUnsignedTimeoutBlock( - utxoViewAtParent, - event.TipBlockHeight+1, - event.View, - proposerRandomSeedSignature, - AggregateQuorumCertificateFromConsensusInterface(event.AggregateQC), - ) - if err != nil { - return errors.Errorf("Error constructing unsigned timeout block: %v", err) - } + return errors.Wrapf(err, "Error producing unsigned block for proposal at height %d", event.TipBlockHeight+1) } // Sign the block - blockHash, err := block.Header.Hash() + blockHash, err := unsignedBlock.Header.Hash() if err != nil { return errors.Errorf("Error hashing block: %v", err) } - block.Header.ProposerVotePartialSignature, err = cc.signer.SignBlockProposal(block.Header.ProposedInView, blockHash) + unsignedBlock.Header.ProposerVotePartialSignature, err = cc.signer.SignBlockProposal(unsignedBlock.Header.ProposedInView, blockHash) if err != nil { return errors.Errorf("Error signing block: %v", err) } // Process the block locally - missingBlockHashes, err := cc.tryProcessBlockAsNewTip(block) + missingBlockHashes, err := cc.tryProcessBlockAsNewTip(unsignedBlock) if err != nil { return errors.Errorf("Error processing block locally: %v", err) } @@ -364,14 +354,6 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH return nil } -func (cc *ConsensusController) HandleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { - // TODO -} - -func (cc *ConsensusController) HandleHeader(pp *Peer, msg *MsgDeSoHeader) { - // TODO -} - func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { // Hold a lock on the consensus controller, because we will need to mutate the Blockchain // and the FastHotStuffEventLoop data structures. @@ -475,6 +457,66 @@ func (cc *ConsensusController) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]* return nil, nil } +// produceUnsignedBlockForBlockProposalEvent is a helper function that can produce a new block for proposal based +// on Fast-HotStuff block proposal event. This function expects the event to have been pre-validated by the caller. +// If the event is malformed or invalid, then the behavior of this function is undefined. +func (cc *ConsensusController) produceUnsignedBlockForBlockProposalEvent( + event *consensus.FastHotStuffEvent, + proposerRandomSeedSignature *bls.Signature, +) (*MsgDeSoBlock, error) { + // We need to hold a lock on the blockchain to make sure that it is not mutated underneath as we are trying + // to construct a block based on the UtxoView. In practice, this lock ends up being a no-op but it guarantees + // thread-safety by making no assumptions about how other parts of the codebase operate outside of this struct. + cc.blockchain.ChainLock.RLock() + defer cc.blockchain.ChainLock.RUnlock() + + // Get the parent block's hash + parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) + + // Build a UtxoView at the parent block + utxoViewAtParent, err := cc.blockchain.getUtxoViewAtBlockHash(*parentBlockHash) + if err != nil { + // This should never happen as long as the parent block is a descendant of the committed tip. + return nil, errors.Errorf("Error fetching UtxoView for parent block: %v", parentBlockHash) + } + + // Construct an unsigned block + if event.EventType == consensus.FastHotStuffEventTypeConstructVoteQC { + block, err := cc.blockProducer.CreateUnsignedBlock( + utxoViewAtParent, + event.TipBlockHeight+1, + event.View, + proposerRandomSeedSignature, + QuorumCertificateFromConsensusInterface(event.QC), + ) + if err != nil { + return nil, errors.Errorf("Error constructing unsigned block: %v", err) + } + + return block, nil + } + + // Construct an unsigned timeout block + if event.EventType == consensus.FastHotStuffEventTypeConstructTimeoutQC { + block, err := cc.blockProducer.CreateUnsignedTimeoutBlock( + utxoViewAtParent, + event.TipBlockHeight+1, + event.View, + proposerRandomSeedSignature, + AggregateQuorumCertificateFromConsensusInterface(event.AggregateQC), + ) + if err != nil { + return nil, errors.Errorf("Error constructing unsigned timeout block: %v", err) + } + + return block, nil + } + + // We should never reach this if the event had been pre-validated by the caller. We support this + // case here + return nil, errors.Errorf("Unexpected FastHotStuffEventType :%v", event.EventType) +} + // fetchValidatorListsForSafeBlocks takes in a set of safe blocks that can be extended from, and fetches the // the validator set for each safe block. The result is returned as type BlockWithValidatorList so it can be // passed to the FastHotStuffEventLoop. If the input blocks precede the committed tip or they do no exist within @@ -508,7 +550,7 @@ func (cc *ConsensusController) fetchValidatorListsForSafeBlocks(blocks []*MsgDeS } // Fetch the next epoch entry - nextEpochEntryAfterCommittedTip, err := utxoView.SimulateNextEpochEntry(epochEntryAtCommittedTip) + nextEpochEntryAfterCommittedTip, err := utxoView.simulateNextEpochEntry(epochEntryAtCommittedTip.EpochNumber, epochEntryAtCommittedTip.FinalBlockHeight) if err != nil { return nil, errors.Errorf("error fetching next epoch entry after committed tip: %v", err) } @@ -568,11 +610,34 @@ func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) return nil, errors.Errorf("error finding epoch number for block height: %v", blockHeight) } -func (bav *UtxoView) SimulateNextEpochEntry(epochEntry *EpochEntry) (*EpochEntry, error) { - return bav.computeNextEpochEntry( - epochEntry.EpochNumber, - epochEntry.FinalBlockHeight, - epochEntry.InitialView, - epochEntry.CreatedAtBlockTimestampNanoSecs, - ) +func isValidBlockProposalEvent(event *consensus.FastHotStuffEvent, expectedEventType consensus.FastHotStuffEventType) bool { + // Validate that the expected event type is a block proposal event type + possibleExpectedEventTypes := []consensus.FastHotStuffEventType{ + consensus.FastHotStuffEventTypeConstructVoteQC, + consensus.FastHotStuffEventTypeConstructTimeoutQC, + } + + // The event's type must be one of the two block proposal hard-coded values + if !collections.Contains(possibleExpectedEventTypes, expectedEventType) { + return false + } + + // The event's type should match the expected event type + if event.EventType != expectedEventType { + return false + } + + return true +} + +func isProperlyFormedBlockProposalEvent(event *consensus.FastHotStuffEvent) bool { + if event.EventType == consensus.FastHotStuffEventTypeConstructVoteQC { + return consensus.IsProperlyFormedConstructVoteQCEvent(event) + } + + if event.EventType == consensus.FastHotStuffEventTypeConstructTimeoutQC { + return consensus.IsProperlyFormedConstructTimeoutQCEvent(event) + } + + return false } From d0b3dbd013bbeceec9489c1818a85370c6465c2d Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 4 Dec 2023 21:05:31 -0500 Subject: [PATCH 310/762] Clean Up ConsensusController Error Cases and Messages (#852) --- lib/pos_consensus_controller.go | 69 +++++++++++++++++++--------- lib/pos_consensus_controller_test.go | 9 ++++ 2 files changed, 56 insertions(+), 22 deletions(-) diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index e26d1dacd..7b69c4913 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -37,6 +37,11 @@ func (cc *ConsensusController) Start() error { cc.lock.Lock() defer cc.lock.Unlock() + // The consensus controller can only be kicked off with an uninitialized event loop + if cc.fastHotStuffEventLoop.IsInitialized() { + return errors.New("ConsensusController.Start: FastHotStuffEventLoop is already initialized") + } + // Hold the blockchain's read lock so that the chain cannot be mutated underneath us. In practice, // this is a no-op, but it guarantees thread-safety in the event that other parts of the codebase // change. @@ -84,9 +89,13 @@ func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus cc.lock.Lock() defer cc.lock.Unlock() + if !cc.fastHotStuffEventLoop.IsRunning() { + return errors.Errorf("ConsensusController.HandleFastHostStuffBlockProposal: FastHotStuffEventLoop is not running") + } + // Handle the event as a block proposal event for a regular block if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructVoteQC); err != nil { - return errors.Wrapf(err, "HandleFastHostStuffBlockProposal:") + return errors.Wrapf(err, "ConsensusController.HandleFastHostStuffBlockProposal: ") } // Happy path: nothing left to do @@ -102,9 +111,13 @@ func (cc *ConsensusController) HandleFastHostStuffEmptyTimeoutBlockProposal(even cc.lock.Lock() defer cc.lock.Unlock() + if !cc.fastHotStuffEventLoop.IsRunning() { + return errors.Errorf("HandleFastHostStuffEmptyTimeoutBlockProposal: FastHotStuffEventLoop is not running") + } + // Handle the event as a block proposal event for a timeout block if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructTimeoutQC); err != nil { - return errors.Wrapf(err, "HandleFastHostStuffEmptyTimeoutBlockProposal:") + return errors.Wrapf(err, "ConsensusController.HandleFastHostStuffEmptyTimeoutBlockProposal: ") } // Happy path: nothing left to do @@ -238,11 +251,15 @@ func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotS cc.lock.Lock() defer cc.lock.Unlock() + if !cc.fastHotStuffEventLoop.IsRunning() { + return errors.Errorf("ConsensusController.HandleFastHostStuffVote: FastHotStuffEventLoop is not running") + } + var err error if !consensus.IsProperlyFormedVoteEvent(event) { // If the event is not properly formed, we ignore it and log it. This should never happen. - return errors.Errorf("HandleFastHostStuffVote: Received improperly formed vote event: %v", event) + return errors.Errorf("ConsensusController.HandleFastHostStuffVote: Received improperly formed vote event: %v", event) } // Provided the vote message is properly formed, we construct and broadcast it in a best effort @@ -267,7 +284,7 @@ func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotS voteMsg.VotePartialSignature, err = cc.signer.SignValidatorVote(event.View, event.TipBlockHash) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. - return errors.Errorf("HandleFastHostStuffVote: Error signing validator vote: %v", err) + return errors.Errorf("ConsensusController.HandleFastHostStuffVote: Error signing validator vote: %v", err) } // Process the vote message locally in the FastHotStuffEventLoop @@ -275,7 +292,7 @@ func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotS // If we can't process the vote locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. Something is very wrong. We should not // broadcast it to the network. - return errors.Errorf("HandleFastHostStuffVote: Error processing vote locally: %v", err) + return errors.Errorf("ConsensusController.HandleFastHostStuffVote: Error processing vote locally: %v", err) } // Broadcast the vote message to the network @@ -299,11 +316,15 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH cc.lock.Lock() defer cc.lock.Unlock() + if !cc.fastHotStuffEventLoop.IsRunning() { + return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: FastHotStuffEventLoop is not running") + } + var err error if !consensus.IsProperlyFormedTimeoutEvent(event) { // If the event is not properly formed, we ignore it and log it. This should never happen. - return errors.Errorf("HandleFastHostStuffTimeout: Received improperly formed timeout event: %v", event) + return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Received improperly formed timeout event: %v", event) } if event.View != cc.fastHotStuffEventLoop.GetCurrentView() { @@ -312,7 +333,7 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH // and an expected race condition in the steady-state. // // Nothing to do here. - return errors.Errorf("HandleFastHostStuffTimeout: Stale timeout event: %v", event) + return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Stale timeout event: %v", event) } // Locally advance the event loop's view so that the node is locally running the Fast-HotStuff @@ -321,7 +342,7 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH if _, err := cc.fastHotStuffEventLoop.AdvanceViewOnTimeout(); err != nil { // This should never happen as long as the event loop is running. If it happens, we return // the error and let the caller handle it. - return errors.Errorf("HandleFastHostStuffTimeout: Error advancing view on timeout: %v", err) + return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Error advancing view on timeout: %v", err) } // Construct the timeout message @@ -335,7 +356,7 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH timeoutMsg.TimeoutPartialSignature, err = cc.signer.SignValidatorTimeout(event.View, event.QC.GetView()) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. - return errors.Errorf("HandleFastHostStuffTimeout: Error signing validator timeout: %v", err) + return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Error signing validator timeout: %v", err) } // Process the timeout message locally in the FastHotStuffEventLoop @@ -344,7 +365,7 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH // beyond the committed tip, the timeout message is malformed, or the timeout message is // is duplicated for the same view. In any case, something is very wrong. We should not // broadcast this message to the network. - return errors.Errorf("HandleFastHostStuffTimeout: Error processing timeout locally: %v", err) + return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Error processing timeout locally: %v", err) } @@ -360,6 +381,10 @@ func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { cc.lock.Lock() defer cc.lock.Unlock() + if !cc.fastHotStuffEventLoop.IsRunning() { + return errors.Errorf("ConsensusController.HandleBlock: FastHotStuffEventLoop is not running") + } + // Try to apply the block as the new tip of the blockchain. If the block is an orphan, then // we will get back a list of missing ancestor block hashes. We can fetch the missing blocks // from the network and retry. @@ -367,7 +392,7 @@ func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { if err != nil { // If we get an error here, it means something went wrong with the block processing algorithm. // Nothing we can do to recover here. - return errors.Errorf("HandleBlock: Error processing block as new tip: %v", err) + return errors.Errorf("ConsensusController.HandleBlock: Error processing block as new tip: %v", err) } // If there are missing block hashes, then we need to fetch the missing blocks from the network @@ -395,7 +420,7 @@ func (cc *ConsensusController) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]* true, // Make sure we verify signatures in the block ) if err != nil { - return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error processing block locally: %v", err) + return nil, errors.Errorf("Error processing block locally: %v", err) } // If the incoming block is an orphan, then there's nothing we can do. We return the missing ancestor @@ -412,13 +437,13 @@ func (cc *ConsensusController) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]* // Fetch the safe blocks that are eligible to be extended from by the next incoming tip block safeBlocks, err := cc.blockchain.GetSafeBlocks() if err != nil { - return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching safe blocks: %v", err) + return nil, errors.Errorf("error fetching safe blocks: %v", err) } // Fetch the validator set at each safe block safeBlocksWithValidators, err := cc.fetchValidatorListsForSafeBlocks(safeBlocks) if err != nil { - return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching validator lists for safe blocks: %v", err) + return nil, errors.Errorf("error fetching validator lists for safe blocks: %v", err) } // If the block was processed successfully but was not applied as the new tip, we need up date the safe @@ -427,7 +452,7 @@ func (cc *ConsensusController) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]* if !successfullyAppliedNewTip { // Update the safe blocks to the FastHotStuffEventLoop if err = cc.fastHotStuffEventLoop.UpdateSafeBlocks(safeBlocksWithValidators); err != nil { - return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error processing safe blocks locally: %v", err) + return nil, errors.Errorf("Error processing safe blocks locally: %v", err) } // Happy path. The safe blocks were successfully updated in the FastHotStuffEventLoop. Nothing left to do. @@ -445,12 +470,12 @@ func (cc *ConsensusController) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]* // Fetch the validator set at the new tip block tipBlockWithValidators, err := cc.fetchValidatorListsForSafeBlocks([]*MsgDeSoHeader{tipBlock}) if err != nil { - return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error fetching validator lists for tip block: %v", err) + return nil, errors.Errorf("Error fetching validator lists for tip block: %v", err) } // Pass the new tip and safe blocks to the FastHotStuffEventLoop if err = cc.fastHotStuffEventLoop.ProcessTipBlock(tipBlockWithValidators[0], safeBlocksWithValidators); err != nil { - return nil, errors.Errorf("HandleFastHostStuffBlockProposal: Error processing tip block locally: %v", err) + return nil, errors.Errorf("Error processing tip block locally: %v", err) } // Happy path. The block was processed successfully and applied as the new tip. Nothing left to do. @@ -540,19 +565,19 @@ func (cc *ConsensusController) fetchValidatorListsForSafeBlocks(blocks []*MsgDeS // all of the safe blocks. utxoView, err := NewUtxoView(cc.blockchain.db, cc.params, cc.blockchain.postgres, cc.blockchain.snapshot, nil) if err != nil { - return nil, errors.Errorf("error creating UtxoView: %v", err) + return nil, errors.Errorf("Error creating UtxoView: %v", err) } // Fetch the current epoch entry for the committed tip epochEntryAtCommittedTip, err := utxoView.GetCurrentEpochEntry() if err != nil { - return nil, errors.Errorf("error fetching epoch entry for committed tip: %v", err) + return nil, errors.Errorf("Error fetching epoch entry for committed tip: %v", err) } // Fetch the next epoch entry nextEpochEntryAfterCommittedTip, err := utxoView.simulateNextEpochEntry(epochEntryAtCommittedTip.EpochNumber, epochEntryAtCommittedTip.FinalBlockHeight) if err != nil { - return nil, errors.Errorf("error fetching next epoch entry after committed tip: %v", err) + return nil, errors.Errorf("Error fetching next epoch entry after committed tip: %v", err) } // The input blocks can only be part of the current or next epoch entries. @@ -566,7 +591,7 @@ func (cc *ConsensusController) fetchValidatorListsForSafeBlocks(blocks []*MsgDeS // block to the blockchain, and triggering an epoch transition (if at an epoch boundary). epochEntryForBlock, err := getEpochEntryForBlockHeight(block.Height+1, possibleEpochEntriesForBlocks) if err != nil { - return nil, errors.Errorf("error fetching epoch number for block: %v", err) + return nil, errors.Errorf("Error fetching epoch number for block: %v", err) } // Compute the snapshot epoch number for the block. This is the epoch number that the validator set @@ -585,7 +610,7 @@ func (cc *ConsensusController) fetchValidatorListsForSafeBlocks(blocks []*MsgDeS // We don't have the validator set for the block cached. Fetch it from the UtxoView. validatorSetAtBlock, err = utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) if err != nil { - return nil, errors.Errorf("error fetching validator set for block: %v", err) + return nil, errors.Errorf("Error fetching validator set for block: %v", err) } } diff --git a/lib/pos_consensus_controller_test.go b/lib/pos_consensus_controller_test.go index 5d3e5cb34..927fa299e 100644 --- a/lib/pos_consensus_controller_test.go +++ b/lib/pos_consensus_controller_test.go @@ -30,6 +30,8 @@ func TestConsensusControllerHandleVoteSignal(t *testing.T) { privateKey: blsPrivateKey, }, fastHotStuffEventLoop: &consensus.MockFastHotStuffEventLoop{ + OnIsInitialized: alwaysReturnTrue, + OnIsRunning: alwaysReturnTrue, OnProcessValidatorVote: func(vote consensus.VoteMessage) error { if !consensus.IsProperlyFormedVote(vote) { return errors.Errorf("Bad vote message") @@ -103,6 +105,8 @@ func TestConsensusControllerHandleTimeoutSignal(t *testing.T) { privateKey: blsPrivateKey, }, fastHotStuffEventLoop: &consensus.MockFastHotStuffEventLoop{ + OnIsInitialized: alwaysReturnTrue, + OnIsRunning: alwaysReturnTrue, OnGetCurrentView: func() uint64 { return currentView }, @@ -181,3 +185,8 @@ func TestConsensusControllerHandleTimeoutSignal(t *testing.T) { require.NoError(t, err) } } + +// Mock function that always returns true +func alwaysReturnTrue() bool { + return true +} From 46f020252d790319f199bf3bc73d8d7a70cf03eb Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 4 Dec 2023 21:17:38 -0500 Subject: [PATCH 311/762] Implement PassThrough Handlers For Peer Vote and Timeout Msgs (#853) --- lib/pos_consensus_controller.go | 35 ++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index 7b69c4913..9e766c882 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -301,6 +301,23 @@ func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotS return nil } +// HandleValidatorVote is called when we receive a validator vote message from a peer. This function processes +// the vote locally in the FastHotStuffEventLoop. +func (cc *ConsensusController) HandleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) error { + // No need to hold a lock on the consensus controller because this function is a pass-through + // for the FastHotStuffEventLoop which guarantees thread-safety for its callers + + // Process the vote message locally in the FastHotStuffEventLoop + if err := cc.fastHotStuffEventLoop.ProcessValidatorVote(msg); err != nil { + // If we can't process the vote locally, then it must somehow be malformed, stale, + // or a duplicate vote/timeout for the same view. + return errors.Wrapf(err, "ConsensusController.HandleValidatorVote: Error processing vote: ") + } + + // Happy path + return nil +} + // HandleFastHostStuffTimeout is triggered when the FastHotStuffEventLoop has signaled that // it is ready to time out the current view. This function validates the timeout signal for // staleness. If the signal is valid, then it constructs and broadcasts the timeout msg here. @@ -366,7 +383,6 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH // is duplicated for the same view. In any case, something is very wrong. We should not // broadcast this message to the network. return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Error processing timeout locally: %v", err) - } // Broadcast the timeout message to the network @@ -375,6 +391,23 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH return nil } +// HandleValidatorTimeout is called when we receive a validator timeout message from a peer. This function +// processes the timeout locally in the FastHotStuffEventLoop. +func (cc *ConsensusController) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) error { + // No need to hold a lock on the consensus controller because this function is a pass-through + // for the FastHotStuffEventLoop which guarantees thread-safety for its callers. + + // Process the timeout message locally in the FastHotStuffEventLoop + if err := cc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { + // If we can't process the timeout locally, then it must somehow be malformed, stale, + // or a duplicate vote/timeout for the same view. + return errors.Wrapf(err, "ConsensusController.HandleValidatorTimeout: Error processing timeout: ") + } + + // Happy path + return nil +} + func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { // Hold a lock on the consensus controller, because we will need to mutate the Blockchain // and the FastHotStuffEventLoop data structures. From 508ce1ba4ba4b9e7c3b0364261d9e91b4efb48dc Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 4 Dec 2023 21:30:23 -0500 Subject: [PATCH 312/762] Comment Out FastHotStuffEventLoop from Server.go (#855) --- lib/server.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/server.go b/lib/server.go index 550ce34a4..b51811915 100644 --- a/lib/server.go +++ b/lib/server.go @@ -61,7 +61,7 @@ type Server struct { eventManager *EventManager TxIndex *TXIndex - fastHotStuffEventLoop consensus.FastHotStuffEventLoop + // fastHotStuffEventLoop consensus.FastHotStuffEventLoop // posMempool *PosMemPool TODO: Add the mempool later // All messages received from peers get sent from the ConnectionManager to the @@ -2325,11 +2325,11 @@ func (srv *Server) _startConsensus() { } select { - case consensusEvent := <-srv.fastHotStuffEventLoop.GetEvents(): - { - glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) - srv._handleFastHostStuffConsensusEvent(consensusEvent) - } + // case consensusEvent := <-srv.fastHotStuffEventLoop.GetEvents(): + // { + // glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) + // srv._handleFastHostStuffConsensusEvent(consensusEvent) + // } case serverMessage := <-srv.incomingMessages: { @@ -2490,11 +2490,11 @@ func (srv *Server) Stop() { glog.Infof(CLog(Yellow, "Server.Stop: Closed the Miner")) } - // Stop the PoS block proposer if we have one running. - if srv.fastHotStuffEventLoop != nil { - srv.fastHotStuffEventLoop.Stop() - glog.Infof(CLog(Yellow, "Server.Stop: Closed the fastHotStuffEventLoop")) - } + // // Stop the PoS block proposer if we have one running. + // if srv.fastHotStuffEventLoop != nil { + // srv.fastHotStuffEventLoop.Stop() + // glog.Infof(CLog(Yellow, "Server.Stop: Closed the fastHotStuffEventLoop")) + // } // TODO: Stop the PoS mempool if we have one running. From bd430b3391272ac8647e3d636b2f0fb81bfb1996 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:55:29 -0500 Subject: [PATCH 313/762] Add an interface exposing relevant mempool functions for backend consumption (#847) Co-authored-by: Lazy Nina <> --- lib/block_view_access_group_test.go | 2 +- lib/block_view_stake.go | 12 ++-- lib/block_view_validator.go | 12 ++-- lib/blockchain.go | 103 ++++++++++++++-------------- lib/db_utils_test.go | 4 +- lib/legacy_mempool.go | 65 +++++++++++++++++- lib/pos_mempool.go | 55 +++++++++++++++ lib/server.go | 7 +- 8 files changed, 190 insertions(+), 70 deletions(-) diff --git a/lib/block_view_access_group_test.go b/lib/block_view_access_group_test.go index 240fcfbb9..1a9427126 100644 --- a/lib/block_view_access_group_test.go +++ b/lib/block_view_access_group_test.go @@ -460,7 +460,7 @@ func _customCreateAccessGroupTxn( accessGroupKeyName []byte, operationType AccessGroupOperationType, extraData map[string][]byte, - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { txn := &MsgDeSoTxn{ diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index fa003c92f..c279aa1de 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1037,7 +1037,7 @@ func (bc *Blockchain) CreateStakeTxn( metadata *StakeMetadata, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) ( _txn *MsgDeSoTxn, @@ -1064,7 +1064,7 @@ func (bc *Blockchain) CreateStakeTxn( err, "Blockchain.CreateStakeTxn: problem creating new utxo view: ", ) } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( @@ -1114,7 +1114,7 @@ func (bc *Blockchain) CreateUnstakeTxn( metadata *UnstakeMetadata, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) ( _txn *MsgDeSoTxn, @@ -1141,7 +1141,7 @@ func (bc *Blockchain) CreateUnstakeTxn( err, "Blockchain.CreateUnstakeTxn: problem creating new utxo view: ", ) } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( @@ -1190,7 +1190,7 @@ func (bc *Blockchain) CreateUnlockStakeTxn( metadata *UnlockStakeMetadata, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) ( _txn *MsgDeSoTxn, @@ -1217,7 +1217,7 @@ func (bc *Blockchain) CreateUnlockStakeTxn( err, "Blockchain.CreateUnlockStakeTxn: problem creating new utxo view: ", ) } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 9f33f0810..3ff80a961 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -934,7 +934,7 @@ func (bc *Blockchain) CreateRegisterAsValidatorTxn( metadata *RegisterAsValidatorMetadata, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) ( _txn *MsgDeSoTxn, @@ -961,7 +961,7 @@ func (bc *Blockchain) CreateRegisterAsValidatorTxn( err, "Blockchain.CreateRegisterAsValidatorTxn: problem creating new utxo view: ", ) } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( @@ -1011,7 +1011,7 @@ func (bc *Blockchain) CreateUnregisterAsValidatorTxn( metadata *UnregisterAsValidatorMetadata, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) ( _txn *MsgDeSoTxn, @@ -1038,7 +1038,7 @@ func (bc *Blockchain) CreateUnregisterAsValidatorTxn( err, "Blockchain.CreateUnregisterAsValidatorTxn: problem creating new utxo view: ", ) } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( @@ -1087,7 +1087,7 @@ func (bc *Blockchain) CreateUnjailValidatorTxn( metadata *UnjailValidatorMetadata, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) ( _txn *MsgDeSoTxn, @@ -1114,7 +1114,7 @@ func (bc *Blockchain) CreateUnjailValidatorTxn( err, "Blockchain.CreateUnjailValidatorTxn: problem creating new utxo view: ", ) } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( diff --git a/lib/blockchain.go b/lib/blockchain.go index 07ff192ef..a7efffd3a 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1633,6 +1633,7 @@ func CheckTransactionSanity(txn *MsgDeSoTxn, blockHeight uint32, params *DeSoPar func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockNode, _detachNodes []*BlockNode, _attachNodes []*BlockNode) { // Find the common ancestor of this block and the main header chain. commonAncestor := _FindCommonAncestor(tip, newNode) + // Log a warning if the reorg is going to be a big one. numBlocks := tip.Height - commonAncestor.Height if numBlocks > 10 { @@ -2842,7 +2843,7 @@ func (bc *Blockchain) DisconnectBlocksToHeight(blockHeight uint64, snap *Snapsho // passed-in transaction in the pool and connect them before trying to connect the // passed-in transaction. func (bc *Blockchain) ValidateTransaction( - txnMsg *MsgDeSoTxn, blockHeight uint32, verifySignatures bool, mempool *DeSoMempool) error { + txnMsg *MsgDeSoTxn, blockHeight uint32, verifySignatures bool, mempool Mempool) error { // Create a new UtxoView. If we have access to a mempool object, use it to // get an augmented view that factors in pending transactions. @@ -2850,7 +2851,7 @@ func (bc *Blockchain) ValidateTransaction( if err != nil { return errors.Wrapf(err, "ValidateTransaction: Problem Problem creating new utxo view: ") } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUtxoViewForPublicKey(txnMsg.PublicKey, txnMsg) if err != nil { return errors.Wrapf(err, "ValidateTransaction: Problem getting augmented UtxoView from mempool: ") @@ -2948,7 +2949,7 @@ func ComputeMerkleRoot(txns []*MsgDeSoTxn) (_merkle *BlockHash, _txHashes []*Blo return rootHash, txHashes, nil } -func (bc *Blockchain) GetSpendableUtxosForPublicKey(spendPublicKeyBytes []byte, mempool *DeSoMempool, referenceUtxoView *UtxoView) ([]*UtxoEntry, error) { +func (bc *Blockchain) GetSpendableUtxosForPublicKey(spendPublicKeyBytes []byte, mempool Mempool, referenceUtxoView *UtxoView) ([]*UtxoEntry, error) { // If we have access to a mempool, use it to account for utxos we might not // get otherwise. utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) @@ -2960,7 +2961,7 @@ func (bc *Blockchain) GetSpendableUtxosForPublicKey(spendPublicKeyBytes []byte, if referenceUtxoView != nil { utxoView = referenceUtxoView } else { - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUtxoViewForPublicKey(spendPublicKeyBytes, nil) if err != nil { return nil, errors.Wrapf(err, "Blockchain.GetSpendableUtxosForPublicKey: Problem getting augmented UtxoView from mempool: ") @@ -3004,7 +3005,7 @@ func (bc *Blockchain) GetSpendableUtxosForPublicKey(spendPublicKeyBytes []byte, } // Don't consider utxos that are already consumed by the mempool. - if mempool != nil && mempool.CheckSpend(*utxoEntry.UtxoKey) != nil { + if !isInterfaceValueNil(mempool) && mempool.CheckSpend(*utxoEntry.UtxoKey) != nil { continue } @@ -3113,7 +3114,7 @@ func (bc *Blockchain) CreatePrivateMessageTxn( senderMessagingPublicKey []byte, senderMessagingKeyName []byte, recipientMessagingPublicKey []byte, recipientMessagingKeyName []byte, tstampNanos uint64, extraData map[string][]byte, - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { var encryptedMessageBytes []byte @@ -3225,7 +3226,7 @@ func (bc *Blockchain) CreatePrivateMessageTxn( func (bc *Blockchain) CreateLikeTxn( userPublicKey []byte, likedPostHash BlockHash, isUnlike bool, - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { @@ -3258,7 +3259,7 @@ func (bc *Blockchain) CreateLikeTxn( func (bc *Blockchain) CreateFollowTxn( senderPublicKey []byte, followedPublicKey []byte, isUnfollow bool, - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { @@ -3298,7 +3299,7 @@ func (bc *Blockchain) CreateUpdateGlobalParamsTxn(updaterPublicKey []byte, forbiddenPubKey []byte, maxNonceExpirationBlockHeightOffset int64, // Standard transaction fields - extraData map[string][]byte, minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + extraData map[string][]byte, minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { if extraData == nil { @@ -3356,7 +3357,7 @@ func (bc *Blockchain) CreateUpdateBitcoinUSDExchangeRateTxn( updaterPublicKey []byte, usdCentsPerbitcoin uint64, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the UpdateBitcoinUSDExchangeRate fields. @@ -3398,7 +3399,7 @@ func (bc *Blockchain) CreateSubmitPostTxn( postExtraData map[string][]byte, isHidden bool, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Initialize txnExtraData to postExtraData. @@ -3468,7 +3469,7 @@ func (bc *Blockchain) CreateUpdateProfileTxn( AdditionalFees uint64, ExtraData map[string][]byte, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the profile fields. @@ -3511,7 +3512,7 @@ func (bc *Blockchain) CreateSwapIdentityTxn( ToPublicKeyBytes []byte, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the profile fields. @@ -3553,7 +3554,7 @@ func (bc *Blockchain) CreateCreatorCoinTxn( MinDeSoExpectedNanos uint64, MinCreatorCoinExpectedNanos uint64, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the creator coin fields. @@ -3600,7 +3601,7 @@ func (bc *Blockchain) CreateCreatorCoinTransferTxn( CreatorCoinToTransferNanos uint64, RecipientPublicKey []byte, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the creator coin fields. @@ -3641,7 +3642,7 @@ func (bc *Blockchain) CreateDAOCoinTxn( // See CreatorCoinMetadataa for an explanation of these fields. metadata *DAOCoinMetadata, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the creator coin fields. @@ -3678,7 +3679,7 @@ func (bc *Blockchain) CreateDAOCoinTransferTxn( UpdaterPublicKey []byte, metadata *DAOCoinTransferMetadata, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the creator coin fields. @@ -3715,7 +3716,7 @@ func (bc *Blockchain) CreateDAOCoinLimitOrderTxn( // See DAOCoinLimitOrderMetadata for an explanation of these fields. metadata *DAOCoinLimitOrderMetadata, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Initialize FeeNanos to the maximum uint64 to provide an upper bound on the size of the transaction. @@ -3738,7 +3739,7 @@ func (bc *Blockchain) CreateDAOCoinLimitOrderTxn( return nil, 0, 0, 0, errors.Wrapf(err, "Blockchain.CreateDAOCoinLimitOrderTxn: Problem creating new utxo view: ") } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, @@ -3916,7 +3917,7 @@ func (bc *Blockchain) CreateCreateNFTTxn( AdditionalCoinRoyalties map[PublicKey]uint64, ExtraData map[string][]byte, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the create NFT fields. @@ -4043,7 +4044,7 @@ func (bc *Blockchain) CreateNFTBidTxn( SerialNumber uint64, BidAmountNanos uint64, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the NFT bid fields. txn := &MsgDeSoTxn{ @@ -4060,7 +4061,7 @@ func (bc *Blockchain) CreateNFTBidTxn( var utxoView *UtxoView var err error - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, @@ -4113,7 +4114,7 @@ func (bc *Blockchain) CreateNFTTransferTxn( SerialNumber uint64, EncryptedUnlockableTextBytes []byte, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the NFT transfer fields. @@ -4152,7 +4153,7 @@ func (bc *Blockchain) CreateAcceptNFTTransferTxn( NFTPostHash *BlockHash, SerialNumber uint64, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the accept NFT transfer fields. @@ -4193,7 +4194,7 @@ func (bc *Blockchain) CreateBurnNFTTxn( NFTPostHash *BlockHash, SerialNumber uint64, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the burn NFT fields. @@ -4233,7 +4234,7 @@ func (bc *Blockchain) CreateAcceptNFTBidTxn( BidAmountNanos uint64, EncryptedUnlockableTextBytes []byte, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a new UtxoView. If we have access to a mempool object, use it to @@ -4243,7 +4244,7 @@ func (bc *Blockchain) CreateAcceptNFTBidTxn( return nil, 0, 0, 0, errors.Wrapf(err, "Blockchain.CreateAcceptNFTBidTxn: Problem creating new utxo view: ") } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, @@ -4317,7 +4318,7 @@ func (bc *Blockchain) CreateUpdateNFTTxn( IsBuyNow bool, BuyNowPriceNanos uint64, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a transaction containing the update NFT fields. @@ -4368,7 +4369,7 @@ func (bc *Blockchain) CreateAccessGroupTxn( accessGroupKeyName []byte, operationType AccessGroupOperationType, extraData map[string][]byte, - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { txn := &MsgDeSoTxn{ @@ -4404,7 +4405,7 @@ func (bc *Blockchain) CreateAccessGroupMembersTxn( accessGroupMemberList []*AccessGroupMember, operationType AccessGroupMemberOperationType, extraData map[string][]byte, - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { txn := &MsgDeSoTxn{ @@ -4443,7 +4444,7 @@ func (bc *Blockchain) CreateNewMessageTxn( messageType NewMessageType, messageOperation NewMessageOperation, extraData map[string][]byte, - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { txn := &MsgDeSoTxn{ @@ -4545,7 +4546,7 @@ func (bc *Blockchain) CreateCreatorCoinTransferTxnWithDiamonds( DiamondPostHash *BlockHash, DiamondLevel int64, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a new UtxoView. If we have access to a mempool object, use it to @@ -4556,7 +4557,7 @@ func (bc *Blockchain) CreateCreatorCoinTransferTxnWithDiamonds( "Blockchain.CreateCreatorCoinTransferTxnWithDiamonds: "+ "Problem creating new utxo view: ") } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, @@ -4629,7 +4630,7 @@ func (bc *Blockchain) CreateAuthorizeDerivedKeyTxn( memo []byte, transactionSpendingLimitHex string, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { blockHeight := bc.blockTip().Height + 1 @@ -4734,7 +4735,7 @@ func (bc *Blockchain) CreateMessagingKeyTxn( messagingOwnerKeySignature []byte, members []*MessagingGroupMember, extraData map[string][]byte, - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // We don't need to validate info here, so just construct the transaction instead. @@ -4772,7 +4773,7 @@ func (bc *Blockchain) CreateBasicTransferTxnWithDiamonds( DiamondPostHash *BlockHash, DiamondLevel int64, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _spendAmount uint64, _changeAmount uint64, _fees uint64, _err error) { // Create a new UtxoView. If we have access to a mempool object, use it to @@ -4783,7 +4784,7 @@ func (bc *Blockchain) CreateBasicTransferTxnWithDiamonds( "Blockchain.CreateBasicTransferTxnWithDiamonds: "+ "Problem creating new utxo view: ") } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, 0, errors.Wrapf(err, @@ -4851,7 +4852,7 @@ func (bc *Blockchain) CreateBasicTransferTxnWithDiamonds( func (bc *Blockchain) CreateMaxSpend( senderPkBytes []byte, recipientPkBytes []byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInputAdded uint64, _spendAmount uint64, _fee uint64, _err error) { txn := &MsgDeSoTxn{ @@ -4871,7 +4872,7 @@ func (bc *Blockchain) CreateMaxSpend( if bc.BlockTip().Height >= bc.params.ForkHeights.BalanceModelBlockHeight { var utxoView *UtxoView var err error - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, @@ -4972,14 +4973,14 @@ func (bc *Blockchain) CreateMaxSpend( // An error is returned if there is not enough input associated with this // public key to satisfy the transaction's output (subject to the minimum feerate). func (bc *Blockchain) AddInputsAndChangeToTransaction( - txArg *MsgDeSoTxn, minFeeRateNanosPerKB uint64, mempool *DeSoMempool) ( + txArg *MsgDeSoTxn, minFeeRateNanosPerKB uint64, mempool Mempool) ( _totalInputAdded uint64, _spendAmount uint64, _totalChangeAdded uint64, _fee uint64, _err error) { return bc.AddInputsAndChangeToTransactionWithSubsidy(txArg, minFeeRateNanosPerKB, 0, mempool, 0) } func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( - txArg *MsgDeSoTxn, minFeeRateNanosPerKB uint64, inputSubsidy uint64, mempool *DeSoMempool, additionalFees uint64) ( + txArg *MsgDeSoTxn, minFeeRateNanosPerKB uint64, inputSubsidy uint64, mempool Mempool, additionalFees uint64) ( _totalInputAdded uint64, _spendAmount uint64, _totalChangeAdded uint64, _fee uint64, _err error) { // The transaction we're working with should never have any inputs @@ -5269,7 +5270,7 @@ func (bc *Blockchain) CreateCreateUserAssociationTxn( metadata *CreateUserAssociationMetadata, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) ( _txn *MsgDeSoTxn, @@ -5297,7 +5298,7 @@ func (bc *Blockchain) CreateDeleteUserAssociationTxn( metadata *DeleteUserAssociationMetadata, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) ( _txn *MsgDeSoTxn, @@ -5325,7 +5326,7 @@ func (bc *Blockchain) CreateCreatePostAssociationTxn( metadata *CreatePostAssociationMetadata, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) ( _txn *MsgDeSoTxn, @@ -5353,7 +5354,7 @@ func (bc *Blockchain) CreateDeletePostAssociationTxn( metadata *DeletePostAssociationMetadata, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) ( _txn *MsgDeSoTxn, @@ -5380,7 +5381,7 @@ func (bc *Blockchain) _createAssociationTxn( callingFuncName string, txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, ) ( _txn *MsgDeSoTxn, _totalInput uint64, @@ -5396,7 +5397,7 @@ func (bc *Blockchain) _createAssociationTxn( "%s: problem creating new utxo view: %v", callingFuncName, err, ) } - if mempool != nil { + if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, fmt.Errorf( @@ -5460,7 +5461,7 @@ func (bc *Blockchain) CreateCoinLockupTxn( UnlockTimestampNanoSecs int64, LockupAmountBaseUnits *uint256.Int, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // NOTE: TxInputs is a remnant of the UTXO transaction model. @@ -5509,7 +5510,7 @@ func (bc *Blockchain) CreateCoinLockupTransferTxn( UnlockTimestampNanoSecs int64, LockedCoinsToTransferBaseUnits *uint256.Int, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // NOTE: TxInputs is a remnant of the UTXO transaction model. @@ -5560,7 +5561,7 @@ func (bc *Blockchain) CreateUpdateCoinLockupParamsTxn( NewLockupTransferRestrictions bool, LockupTransferRestrictionStatus TransferRestrictionStatus, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // NOTE: TxInputs is a remnant of the UTXO transaction model. @@ -5608,7 +5609,7 @@ func (bc *Blockchain) CreateCoinUnlockTxn( TransactorPublicKey []byte, ProfilePublicKey []byte, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _chainAmount uint64, _fees uint64, _err error) { // NOTE: TxInputs is a remnant of the UTXO transaction model. diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index 288ad9a18..f7f08644e 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -3,10 +3,10 @@ package lib import ( "bytes" "fmt" - "io/ioutil" "log" "math" "math/big" + "os" "testing" "time" @@ -95,7 +95,7 @@ func _GetTestBlockNode() *BlockNode { } func GetTestBadgerDb() (_db *badger.DB, _dir string) { - dir, err := ioutil.TempDir("", "badgerdb") + dir, err := os.MkdirTemp("", "badgerdb") if err != nil { log.Fatal(err) } diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 20b06b967..28af69317 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -225,6 +225,63 @@ type DeSoMempool struct { useDefaultBadgerOptions bool } +// Note that all these functions are stubbed out for now. We don't need them +// but they are required to implement the Mempool interface, so we just +// return errors for now to simplify the usage of DeSoMempool in the backend +// repo. We'll eventually be deprecating DeSoMempool, so we're not particularly +// concerned about this. + +func (mp *DeSoMempool) Start() error { + return errors.New("Not implemented") +} + +func (mp *DeSoMempool) IsRunning() bool { + return !mp.stopped +} + +func (mp *DeSoMempool) AddTransaction(txn *MempoolTransaction, verifySignature bool) error { + return errors.New("Not implemented") +} + +func (mp *DeSoMempool) RemoveTransaction(txnHash *BlockHash) error { + return errors.New("Not implemented") +} + +func (mp *DeSoMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { + //TODO implement me + panic("implement me") +} + +func (mp *DeSoMempool) GetTransactions() []*MempoolTransaction { + //TODO implement me + panic("implement me") +} + +func (mp *DeSoMempool) GetIterator() MempoolIterator { + //TODO implement me + panic("implement me") +} + +func (mp *DeSoMempool) Refresh() error { + //TODO implement me + panic("implement me") +} + +func (mp *DeSoMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) { + //TODO implement me + panic("implement me") +} + +func (mp *DeSoMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { + //TODO implement me + panic("implement me") +} + +func (mp *DeSoMempool) GetOrderedTransactions() []*MempoolTx { + orderedTxns, _, _ := mp.GetTransactionsOrderedByTimeAdded() + return orderedTxns +} + func (mp *DeSoMempool) getBadgerOptions(dir string) badger.Options { if mp.useDefaultBadgerOptions { return DefaultBadgerOptions(dir) @@ -544,7 +601,7 @@ func (mp *DeSoMempool) GetTransactionsOrderedByTimeAdded() (_poolTxns []*Mempool return poolTxns, nil, nil } -func (mp *DeSoMempool) GetTransaction(txId *BlockHash) (txn *MempoolTx) { +func (mp *DeSoMempool) GetMempoolTx(txId *BlockHash) *MempoolTx { return mp.readOnlyUniversalTransactionMap[*txId] } @@ -2362,10 +2419,12 @@ func (mp *DeSoMempool) MempoolTxs() []*MempoolTx { } func (mp *DeSoMempool) GetMempoolSummaryStats() (_summaryStatsMap map[string]*SummaryStats) { - allTxns := mp.readOnlyUniversalTransactionList + return convertMempoolTxsToSummaryStats(mp.readOnlyUniversalTransactionList) +} +func convertMempoolTxsToSummaryStats(mempoolTxs []*MempoolTx) map[string]*SummaryStats { transactionSummaryStats := make(map[string]*SummaryStats) - for _, mempoolTx := range allTxns { + for _, mempoolTx := range mempoolTxs { // Update the mempool summary stats. updatedSummaryStats := &SummaryStats{} txnType := mempoolTx.Tx.TxnMeta.GetTxnType().String() diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 291d26a1e..0301d2912 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -30,6 +30,14 @@ type Mempool interface { Refresh() error UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) UpdateGlobalParams(globalParams *GlobalParamsEntry) + + GetAugmentedUniversalView() (*UtxoView, error) + GetAugmentedUtxoViewForPublicKey(pk []byte, optionalTx *MsgDeSoTxn) (*UtxoView, error) + CheckSpend(op UtxoKey) *MsgDeSoTxn + GetOrderedTransactions() []*MempoolTx + IsTransactionInPool(txHash *BlockHash) bool + GetMempoolTx(txHash *BlockHash) *MempoolTx + GetMempoolSummaryStats() map[string]*SummaryStats } type MempoolIterator interface { @@ -580,3 +588,50 @@ func (mp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { glog.Errorf("PosMempool.UpdateGlobalParams: Problem refreshing mempool: %v", err) } } + +// Implementation of the Mempool interface +// These functions are used by the backend to interact with the mempool. + +func (mp *PosMempool) GetAugmentedUniversalView() (*UtxoView, error) { + if !mp.IsRunning() { + return nil, errors.Wrapf(MempoolErrorNotRunning, "PosMempool.GetAugmentedUniversalView: ") + } + newView, err := mp.readOnlyLatestBlockView.CopyUtxoView() + if err != nil { + return nil, errors.Wrapf(err, "PosMempool.GetAugmentedUniversalView: Problem copying utxo view") + } + return newView, nil +} +func (mp *PosMempool) GetAugmentedUtxoViewForPublicKey(pk []byte, optionalTx *MsgDeSoTxn) (*UtxoView, error) { + return mp.GetAugmentedUniversalView() +} +func (mp *PosMempool) CheckSpend(op UtxoKey) *MsgDeSoTxn { + panic("implement me") +} + +func (mp *PosMempool) GetOrderedTransactions() []*MempoolTx { + mp.RLock() + defer mp.RUnlock() + + if !mp.IsRunning() { + return nil + } + return mp.getTransactionsNoLock() +} + +func (mp *PosMempool) IsTransactionInPool(txHash *BlockHash) bool { + mp.RLock() + defer mp.RUnlock() + _, exists := mp.txnRegister.txnMembership[*txHash] + return exists +} + +func (mp *PosMempool) GetMempoolTx(txHash *BlockHash) *MempoolTx { + mp.RLock() + defer mp.RUnlock() + return mp.txnRegister.txnMembership[*txHash] +} + +func (mp *PosMempool) GetMempoolSummaryStats() map[string]*SummaryStats { + return convertMempoolTxsToSummaryStats(mp.txnRegister.GetFeeTimeTransactions()) +} diff --git a/lib/server.go b/lib/server.go index b51811915..f755317f9 100644 --- a/lib/server.go +++ b/lib/server.go @@ -56,6 +56,7 @@ type Server struct { snapshot *Snapshot forceChecksum bool mempool *DeSoMempool + posMempool *PosMempool miner *DeSoMiner blockProducer *DeSoBlockProducer eventManager *EventManager @@ -218,7 +219,11 @@ func (srv *Server) GetBlockchain() *Blockchain { } // TODO: The hallmark of a messy non-law-of-demeter-following interface... -func (srv *Server) GetMempool() *DeSoMempool { +func (srv *Server) GetMempool() Mempool { + tip := srv.blockchain.BlockTip() + if tip.Height >= srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + return srv.posMempool + } return srv.mempool } From be4355d7fdad6f7bbc710563ceca205ae23352cc Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 5 Dec 2023 17:01:34 -0500 Subject: [PATCH 314/762] Clean Up Naming For ConsensusController Message and Event Handlers (#854) --- lib/pos_consensus_controller.go | 44 ++++++++++++++-------------- lib/pos_consensus_controller_test.go | 14 ++++----- lib/server.go | 40 +++++++++++++++++++++---- 3 files changed, 64 insertions(+), 34 deletions(-) diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus_controller.go index 9e766c882..55fcfc19b 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus_controller.go @@ -80,44 +80,44 @@ func (cc *ConsensusController) IsRunning() bool { return cc.fastHotStuffEventLoop.IsRunning() } -// HandleFastHostStuffBlockProposal is called when FastHotStuffEventLoop has signaled that it can +// HandleLocalBlockProposalEvent is called when FastHotStuffEventLoop has signaled that it can // construct a block at a certain block height. This function validates the block proposal signal, // constructs, processes locally, and then broadcasts the block. -func (cc *ConsensusController) HandleFastHostStuffBlockProposal(event *consensus.FastHotStuffEvent) error { +func (cc *ConsensusController) HandleLocalBlockProposalEvent(event *consensus.FastHotStuffEvent) error { // Hold a read and write lock on the consensus controller. This is because we need to check // the current view of the consensus event loop, and to update the blockchain. cc.lock.Lock() defer cc.lock.Unlock() if !cc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("ConsensusController.HandleFastHostStuffBlockProposal: FastHotStuffEventLoop is not running") + return errors.Errorf("ConsensusController.HandleLocalBlockProposalEvent: FastHotStuffEventLoop is not running") } // Handle the event as a block proposal event for a regular block if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructVoteQC); err != nil { - return errors.Wrapf(err, "ConsensusController.HandleFastHostStuffBlockProposal: ") + return errors.Wrapf(err, "ConsensusController.HandleLocalBlockProposalEvent: ") } // Happy path: nothing left to do return nil } -// HandleFastHostStuffEmptyTimeoutBlockProposal is called when FastHotStuffEventLoop has signaled that it can +// HandleLocalTimeoutBlockProposalEvent is called when FastHotStuffEventLoop has signaled that it can // construct a timeout block at a certain block height. This function validates the timeout block proposal // signal, constructs, processes locally, and then broadcasts the block. -func (cc *ConsensusController) HandleFastHostStuffEmptyTimeoutBlockProposal(event *consensus.FastHotStuffEvent) error { +func (cc *ConsensusController) HandleLocalTimeoutBlockProposalEvent(event *consensus.FastHotStuffEvent) error { // Hold a read and write lock on the consensus controller. This is because we need to check // the current view of the consensus event loop, and to update the blockchain. cc.lock.Lock() defer cc.lock.Unlock() if !cc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("HandleFastHostStuffEmptyTimeoutBlockProposal: FastHotStuffEventLoop is not running") + return errors.Errorf("ConsensusController.HandleLocalTimeoutBlockProposalEvent: FastHotStuffEventLoop is not running") } // Handle the event as a block proposal event for a timeout block if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructTimeoutQC); err != nil { - return errors.Wrapf(err, "ConsensusController.HandleFastHostStuffEmptyTimeoutBlockProposal: ") + return errors.Wrapf(err, "ConsensusController.HandleLocalTimeoutBlockProposalEvent: ") } // Happy path: nothing left to do @@ -236,7 +236,7 @@ func (cc *ConsensusController) handleBlockProposerEvent( return nil } -// HandleFastHostStuffVote is triggered when FastHotStuffEventLoop has signaled that it wants to +// HandleLocalVoteEvent is triggered when FastHotStuffEventLoop has signaled that it wants to // vote on the current tip. This functions validates the vote signal, then it constructs the // vote message here. // @@ -245,21 +245,21 @@ func (cc *ConsensusController) handleBlockProposerEvent( // 2. Construct the vote message // 3. Process the vote in the consensus module // 4. Broadcast the vote msg to the network -func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotStuffEvent) error { +func (cc *ConsensusController) HandleLocalVoteEvent(event *consensus.FastHotStuffEvent) error { // Hold a read lock on the consensus controller. This is because we need to check the // current view and block height of the consensus module. cc.lock.Lock() defer cc.lock.Unlock() if !cc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("ConsensusController.HandleFastHostStuffVote: FastHotStuffEventLoop is not running") + return errors.Errorf("ConsensusController.HandleLocalVoteEvent: FastHotStuffEventLoop is not running") } var err error if !consensus.IsProperlyFormedVoteEvent(event) { // If the event is not properly formed, we ignore it and log it. This should never happen. - return errors.Errorf("ConsensusController.HandleFastHostStuffVote: Received improperly formed vote event: %v", event) + return errors.Errorf("ConsensusController.HandleLocalVoteEvent: Received improperly formed vote event: %v", event) } // Provided the vote message is properly formed, we construct and broadcast it in a best effort @@ -284,7 +284,7 @@ func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotS voteMsg.VotePartialSignature, err = cc.signer.SignValidatorVote(event.View, event.TipBlockHash) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. - return errors.Errorf("ConsensusController.HandleFastHostStuffVote: Error signing validator vote: %v", err) + return errors.Errorf("ConsensusController.HandleLocalVoteEvent: Error signing validator vote: %v", err) } // Process the vote message locally in the FastHotStuffEventLoop @@ -292,7 +292,7 @@ func (cc *ConsensusController) HandleFastHostStuffVote(event *consensus.FastHotS // If we can't process the vote locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. Something is very wrong. We should not // broadcast it to the network. - return errors.Errorf("ConsensusController.HandleFastHostStuffVote: Error processing vote locally: %v", err) + return errors.Errorf("ConsensusController.HandleLocalVoteEvent: Error processing vote locally: %v", err) } // Broadcast the vote message to the network @@ -318,7 +318,7 @@ func (cc *ConsensusController) HandleValidatorVote(pp *Peer, msg *MsgDeSoValidat return nil } -// HandleFastHostStuffTimeout is triggered when the FastHotStuffEventLoop has signaled that +// HandleLocalTimeoutEvent is triggered when the FastHotStuffEventLoop has signaled that // it is ready to time out the current view. This function validates the timeout signal for // staleness. If the signal is valid, then it constructs and broadcasts the timeout msg here. // @@ -327,21 +327,21 @@ func (cc *ConsensusController) HandleValidatorVote(pp *Peer, msg *MsgDeSoValidat // 2. Construct the timeout message // 3. Process the timeout in the consensus module // 4. Broadcast the timeout msg to the network -func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastHotStuffEvent) error { +func (cc *ConsensusController) HandleLocalTimeoutEvent(event *consensus.FastHotStuffEvent) error { // Hold a read lock on the consensus controller. This is because we need to check the // current view and block height of the consensus module. cc.lock.Lock() defer cc.lock.Unlock() if !cc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: FastHotStuffEventLoop is not running") + return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: FastHotStuffEventLoop is not running") } var err error if !consensus.IsProperlyFormedTimeoutEvent(event) { // If the event is not properly formed, we ignore it and log it. This should never happen. - return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Received improperly formed timeout event: %v", event) + return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: Received improperly formed timeout event: %v", event) } if event.View != cc.fastHotStuffEventLoop.GetCurrentView() { @@ -350,7 +350,7 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH // and an expected race condition in the steady-state. // // Nothing to do here. - return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Stale timeout event: %v", event) + return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: Stale timeout event: %v", event) } // Locally advance the event loop's view so that the node is locally running the Fast-HotStuff @@ -359,7 +359,7 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH if _, err := cc.fastHotStuffEventLoop.AdvanceViewOnTimeout(); err != nil { // This should never happen as long as the event loop is running. If it happens, we return // the error and let the caller handle it. - return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Error advancing view on timeout: %v", err) + return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: Error advancing view on timeout: %v", err) } // Construct the timeout message @@ -373,7 +373,7 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH timeoutMsg.TimeoutPartialSignature, err = cc.signer.SignValidatorTimeout(event.View, event.QC.GetView()) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. - return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Error signing validator timeout: %v", err) + return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: Error signing validator timeout: %v", err) } // Process the timeout message locally in the FastHotStuffEventLoop @@ -382,7 +382,7 @@ func (cc *ConsensusController) HandleFastHostStuffTimeout(event *consensus.FastH // beyond the committed tip, the timeout message is malformed, or the timeout message is // is duplicated for the same view. In any case, something is very wrong. We should not // broadcast this message to the network. - return errors.Errorf("ConsensusController.HandleFastHostStuffTimeout: Error processing timeout locally: %v", err) + return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: Error processing timeout locally: %v", err) } // Broadcast the timeout message to the network diff --git a/lib/pos_consensus_controller_test.go b/lib/pos_consensus_controller_test.go index 927fa299e..756b8e7e1 100644 --- a/lib/pos_consensus_controller_test.go +++ b/lib/pos_consensus_controller_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestConsensusControllerHandleVoteSignal(t *testing.T) { +func TestConsensusControllerHandleLocalVoteEvent(t *testing.T) { // Create a test private key for the signer blsPrivateKey, err := bls.NewPrivateKey() require.NoError(t, err) @@ -66,7 +66,7 @@ func TestConsensusControllerHandleVoteSignal(t *testing.T) { EventType: consensus.FastHotStuffEventTypeVote, } - err := consensusController.HandleFastHostStuffVote(event) + err := consensusController.HandleLocalVoteEvent(event) require.Contains(t, err.Error(), "Received improperly formed vote event") } @@ -78,12 +78,12 @@ func TestConsensusControllerHandleVoteSignal(t *testing.T) { TipBlockHeight: blockHeader.GetView(), TipBlockHash: blockHash, } - err := consensusController.HandleFastHostStuffVote(event) + err := consensusController.HandleLocalVoteEvent(event) require.NoError(t, err) } } -func TestConsensusControllerHandleTimeoutSignal(t *testing.T) { +func TestConsensusControllerHandleLocalTimeoutEvent(t *testing.T) { // Create a test private key for the signer blsPrivateKey, err := bls.NewPrivateKey() require.NoError(t, err) @@ -155,7 +155,7 @@ func TestConsensusControllerHandleTimeoutSignal(t *testing.T) { EventType: consensus.FastHotStuffEventTypeVote, } - err := consensusController.HandleFastHostStuffTimeout(event) + err := consensusController.HandleLocalTimeoutEvent(event) require.Contains(t, err.Error(), "Received improperly formed timeout event") } @@ -168,7 +168,7 @@ func TestConsensusControllerHandleTimeoutSignal(t *testing.T) { TipBlockHash: blockHash, QC: blockHeader.ValidatorsVoteQC, } - err := consensusController.HandleFastHostStuffTimeout(event) + err := consensusController.HandleLocalTimeoutEvent(event) require.Contains(t, err.Error(), "Stale timeout event") } @@ -181,7 +181,7 @@ func TestConsensusControllerHandleTimeoutSignal(t *testing.T) { TipBlockHash: blockHeader.ValidatorsVoteQC.GetBlockHash(), QC: blockHeader.ValidatorsVoteQC, } - err := consensusController.HandleFastHostStuffTimeout(event) + err := consensusController.HandleLocalTimeoutEvent(event) require.NoError(t, err) } } diff --git a/lib/server.go b/lib/server.go index 1b67a0b7f..5a3929f7b 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2241,6 +2241,10 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { srv._handleMempool(serverMessage.Peer, msg) case *MsgDeSoInv: srv._handleInv(serverMessage.Peer, msg) + case *MsgDeSoValidatorVote: + srv._handleValidatorVote(serverMessage.Peer, msg) + case *MsgDeSoValidatorTimeout: + srv._handleValidatorTimeout(serverMessage.Peer, msg) } } @@ -2248,19 +2252,45 @@ func (srv *Server) _handleFastHostStuffConsensusEvent(event *consensus.FastHotSt // This should never happen. If the consensus message handler isn't defined, then something went // wrong during the node initialization. We log it and return early to avoid panicking. if srv.consensusController == nil { - glog.Errorf("Server._handleFastHostStuffConsensusEvent: Consensus message handler is nil") + glog.Errorf("Server._handleFastHostStuffConsensusEvent: Consensus controller is nil") return } switch event.EventType { case consensus.FastHotStuffEventTypeVote: - srv.consensusController.HandleFastHostStuffVote(event) + srv.consensusController.HandleLocalVoteEvent(event) case consensus.FastHotStuffEventTypeTimeout: - srv.consensusController.HandleFastHostStuffTimeout(event) + srv.consensusController.HandleLocalTimeoutEvent(event) case consensus.FastHotStuffEventTypeConstructVoteQC: - srv.consensusController.HandleFastHostStuffBlockProposal(event) + srv.consensusController.HandleLocalBlockProposalEvent(event) case consensus.FastHotStuffEventTypeConstructTimeoutQC: - srv.consensusController.HandleFastHostStuffEmptyTimeoutBlockProposal(event) + srv.consensusController.HandleLocalTimeoutBlockProposalEvent(event) + } +} + +func (srv *Server) _handleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) { + // It's possible that the consensus controller hasn't been initialized. If so, + // we log an error and move on. + if srv.consensusController == nil { + glog.Errorf("Server._handleValidatorVote: Consensus controller is nil") + return + } + + if err := srv.consensusController.HandleValidatorVote(pp, msg); err != nil { + glog.Errorf("Server._handleValidatorVote: Error handling vote message from peer: %v", err) + } +} + +func (srv *Server) _handleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) { + // It's possible that the consensus controller hasn't been initialized. If so, + // we log an error and move on. + if srv.consensusController == nil { + glog.Errorf("Server._handleValidatorTimeout: Consensus controller is nil") + return + } + + if err := srv.consensusController.HandleValidatorTimeout(pp, msg); err != nil { + glog.Errorf("Server._handleValidatorTimeout: Error handling timeout message from peer: %v", err) } } From 2a2bf0fba4d749909279d20692545859eabc60d4 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 5 Dec 2023 17:21:23 -0500 Subject: [PATCH 315/762] PoS Mempool support for VerifyAndBroadcastTransaction (#856) Co-authored-by: Lazy Nina <> --- lib/server.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/server.go b/lib/server.go index f755317f9..95ef2be74 100644 --- a/lib/server.go +++ b/lib/server.go @@ -262,6 +262,14 @@ func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MempoolTx, error) { func (srv *Server) VerifyAndBroadcastTransaction(txn *MsgDeSoTxn) error { // Grab the block tip and use it as the height for validation. blockHeight := srv.blockchain.BlockTip().Height + if blockHeight >= srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + mtxn := NewMempoolTransaction(txn, uint64(time.Now().UnixMicro())) + // AddTransaction also performs validations. + if err := srv.posMempool.AddTransaction(mtxn, true /*verifySignatures*/); err != nil { + return errors.Wrapf(err, "VerifyAndBroadcastTransaction: problem adding txn to pos mempool") + } + return nil + } err := srv.blockchain.ValidateTransaction( txn, // blockHeight is set to the next block since that's where this From 78f7bb8bdf57b4e972499df7199af38e173871de Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 7 Dec 2023 17:41:47 -0500 Subject: [PATCH 316/762] Add IsDeleted public getters for ValidatorEntry and LockedStakeEntry (#862) Co-authored-by: Lazy Nina <> --- lib/block_view_stake.go | 4 ++++ lib/block_view_validator.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index c279aa1de..b05cc0405 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -225,6 +225,10 @@ func (lockedStakeEntry *LockedStakeEntry) GetEncoderType() EncoderType { return EncoderTypeLockedStakeEntry } +func (lockedStakeEntry *LockedStakeEntry) IsDeleted() bool { + return lockedStakeEntry.isDeleted +} + // // TYPES: StakeMetadata // diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 3ff80a961..b6a662676 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -268,6 +268,10 @@ func (validatorEntry *ValidatorEntry) ToBLSPublicKeyPKIDPairEntry() *BLSPublicKe } } +func (validatorEntry *ValidatorEntry) IsDeleted() bool { + return validatorEntry.isDeleted +} + // // TYPES: BLSPublicKeyPKIDPairEntry // From 350a7c0d2486d173ee4a980858a9ada7e69b4299 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 12 Dec 2023 14:10:41 -0500 Subject: [PATCH 317/762] Feature/end of epoch utxo ops (#831) --- lib/block_view.go | 84 ++++++++++++++++++++++------- lib/block_view_derived_key_test.go | 3 +- lib/block_view_types.go | 11 +++- lib/pos_blockchain_test.go | 5 -- lib/pos_epoch_complete_hook.go | 36 +++++++------ lib/pos_epoch_complete_hook_test.go | 35 +++++++++--- lib/pos_staking_rewards.go | 59 ++++++++++++-------- 7 files changed, 161 insertions(+), 72 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index ac006dbdc..85e6d1241 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -1628,6 +1628,9 @@ func (bav *UtxoView) DisconnectBlock( // After the balance model block height, we may have a delete expired nonces utxo operation. // We need to revert this before iterating over the transactions in the block. + // After the proof of stake fork height, we may have utxo operations for stake distributions. + // Stake distribution UtxoOps may be either an AddBalance or a StakeDistribution operation type. + // We need to revert these before iterating over the transactions in the block. if desoBlock.Header.Height >= uint64(bav.Params.ForkHeights.BalanceModelBlockHeight) { if len(utxoOps) != len(desoBlock.Txns)+1 { return fmt.Errorf( @@ -1635,20 +1638,43 @@ func (bav *UtxoView) DisconnectBlock( " delete expired nonces operation for block %d", desoBlock.Header.Height) } - // We need to revert the delete expired nonces operation. - deleteExpiredNoncesUtxoOps := utxoOps[len(utxoOps)-1] - if deleteExpiredNoncesUtxoOps[0].Type != OperationTypeDeleteExpiredNonces { - return fmt.Errorf( - "DisconnectBlock: Expected last utxo op to be delete expired nonces operation for block %d", - desoBlock.Header.Height) - } - if len(deleteExpiredNoncesUtxoOps) != 1 { - return fmt.Errorf( - "DisconnectBlock: Expected exactly utxo op for deleting expired nonces operation for block %d", - desoBlock.Header.Height) - } - for _, nonceEntry := range deleteExpiredNoncesUtxoOps[0].PrevNonceEntries { - bav.SetTransactorNonceEntry(nonceEntry) + var isLastBlockInEpoch bool + isLastBlockInEpoch, err = bav.IsLastBlockInCurrentEpoch(desoBlock.Header.Height) + if err != nil { + return errors.Wrapf(err, "DisconnectBlock: Problem checking if block is last in epoch") + } + blockLevelUtxoOps := utxoOps[len(utxoOps)-1] + for ii := len(blockLevelUtxoOps) - 1; ii >= 0; ii-- { + utxoOp := blockLevelUtxoOps[ii] + switch utxoOp.Type { + case OperationTypeDeleteExpiredNonces: + // We need to revert the delete expired nonces operation. + for _, nonceEntry := range utxoOp.PrevNonceEntries { + bav.SetTransactorNonceEntry(nonceEntry) + } + case OperationTypeAddBalance: + // We don't allow add balance utxo operations unless it's the end of an epoch. + if !isLastBlockInEpoch { + return fmt.Errorf("DisconnectBlock: Found add balance operation in block %d that is not the end of an epoch", desoBlock.Header.Height) + } + // We need to revert the add balance operation. + if err = bav._unAddBalance(utxoOp.BalanceAmountNanos, utxoOp.BalancePublicKey); err != nil { + return errors.Wrapf(err, "DisconnectBlock: Problem unAdding balance %v: ", utxoOp.BalanceAmountNanos) + } + case OperationTypeStakeDistribution: + // We don't allow stake distribution utxo operations unless it's the end of an epoch. + if !isLastBlockInEpoch { + return fmt.Errorf("DisconnectBlock: Found add balance operation in block %d that is not the end of an epoch", desoBlock.Header.Height) + } + if len(utxoOp.PrevStakeEntries) != 1 { + return fmt.Errorf("DisconnectBlock: Expected exactly one prev stake entry for stake distribution op") + } + if utxoOp.PrevValidatorEntry == nil { + return fmt.Errorf("DisconnectBlock: Expected prev validator entry for stake distribution op") + } + bav._setStakeEntryMappings(utxoOp.PrevStakeEntries[0]) + bav._setValidatorEntryMappings(utxoOp.PrevValidatorEntry) + } } } @@ -1660,8 +1686,7 @@ func (bav *UtxoView) DisconnectBlock( utxoOpsForTxn := utxoOps[txnIndex] desoBlockHeight := desoBlock.Header.Height - err := bav.DisconnectTransaction(currentTxn, txnHash, utxoOpsForTxn, uint32(desoBlockHeight)) - if err != nil { + if err = bav.DisconnectTransaction(currentTxn, txnHash, utxoOpsForTxn, uint32(desoBlockHeight)); err != nil { return errors.Wrapf(err, "DisconnectBlock: Problem disconnecting transaction: %v", currentTxn) } } @@ -4105,17 +4130,40 @@ func (bav *UtxoView) ConnectBlock( return nil, RuleErrorBlockRewardExceedsMaxAllowed } + // blockLevelUtxoOps are used to track all state mutations that happen + // after connecting all transactions in the block. These operations + // are always the last utxo operation in a given block. + var blockLevelUtxoOps []*UtxoOperation if blockHeight >= uint64(bav.Params.ForkHeights.BalanceModelBlockHeight) { prevNonces := bav.GetTransactorNonceEntriesToDeleteAtBlockHeight(blockHeight) - utxoOps = append(utxoOps, []*UtxoOperation{{ + blockLevelUtxoOps = append(blockLevelUtxoOps, &UtxoOperation{ Type: OperationTypeDeleteExpiredNonces, PrevNonceEntries: prevNonces, - }}) + }) for _, prevNonceEntry := range prevNonces { bav.DeleteTransactorNonceEntry(prevNonceEntry) } } + // If we're past the PoS Setup Fork Height, check if we should run the end of epoch hook. + if blockHeight >= uint64(bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight) { + isLastBlockInEpoch, err := bav.IsLastBlockInCurrentEpoch(blockHeight) + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: error checking if block is last in epoch") + } + if isLastBlockInEpoch { + var utxoOperations []*UtxoOperation + utxoOperations, err = bav.RunEpochCompleteHook(blockHeight, blockHeader.ProposedInView, blockHeader.TstampNanoSecs) + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: error running epoch complete hook") + } + blockLevelUtxoOps = append(blockLevelUtxoOps, utxoOperations...) + } + } + + // Append all block level utxo operations to the utxo operations for the block. + utxoOps = append(utxoOps, blockLevelUtxoOps) + // If we made it to the end and this block is valid, advance the tip // of the view to reflect that. blockHash, err := desoBlock.Header.Hash() diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index df8751a9d..db77f44b5 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -10,6 +10,7 @@ import ( "github.com/holiman/uint256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "math" "math/rand" "testing" "time" @@ -877,7 +878,7 @@ func TestBalanceModelAuthorizeDerivedKey(t *testing.T) { // We need to set the block height here to 7 so that encoder migrations have the proper version and heights. // Otherwise, the access groups and associations migrations do not run when encoding Utxo Operations. DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = 7 - DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = 7 + DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = math.MaxUint32 TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t) }) } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 8089ca613..b216804df 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -664,8 +664,8 @@ const ( OperationTypeCoinLockupTransfer OperationType = 46 OperationTypeCoinUnlock OperationType = 47 OperationTypeUpdateCoinLockupParams OperationType = 48 - - // NEXT_TAG = 49 + OperationTypeStakeDistribution OperationType = 49 + // NEXT_TAG = 50 ) func (op OperationType) String() string { @@ -766,6 +766,8 @@ func (op OperationType) String() string { return "OperationTypeCoinLockupTransfer" case OperationTypeCoinUnlock: return "OperationTypeCoinUnlock" + case OperationTypeStakeDistribution: + return "OperationTypeStakeDistribution" } return "OperationTypeUNKNOWN" } @@ -988,6 +990,11 @@ type UtxoOperation struct { // with an UpdateCoinLockupParams transaction. PrevLockupYieldCurvePoint *LockupYieldCurvePoint PrevLockupTransferRestriction TransferRestrictionStatus + + // This value is used by Rosetta to return the amount of DESO that was added + // to a StakeEntry during the end-of-epoch hook. It's needed + // in order to avoid having to re-run the end of epoch hook. + StakeAmountNanosDiff uint64 } func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 909734f40..7beb1b568 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2628,11 +2628,6 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) require.NoError(t, err) - // Run the on epoch complete hook to set the leader schedule. - err = latestBlockView.RunEpochCompleteHook(11, 11, uint64(time.Now().UnixNano())) - require.NoError(t, err) - err = latestBlockView.FlushToDb(11) - require.NoError(t, err) maxMempoolPosSizeBytes := uint64(500) mempoolBackupIntervalMillis := uint64(30000) mempool := NewPosMempool(params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 863af693e..83b06d598 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -50,68 +50,70 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // - Compute the start block height and view number for the next epoch. // - Compute the final block height for the next epoch. // - Update CurrentEpochEntry to the next epoch's. -func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, view uint64, blockTimestampNanoSecs uint64) error { +func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, view uint64, blockTimestampNanoSecs uint64) ([]*UtxoOperation, error) { // Sanity-check that the current block is the last block in the current epoch. // // Note that this will also return true if we're currently at the ProofOfStake1StateSetupBlockHeight // so that we can run the hook for the first time to initialize the CurrentEpochEntry. isLastBlockInCurrentEpoch, err := bav.IsLastBlockInCurrentEpoch(blockHeight) if err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: ") + return nil, errors.Wrapf(err, "RunEpochCompleteHook: ") } if !isLastBlockInCurrentEpoch { - return errors.New("RunEpochCompleteHook: called before current epoch is complete, this should never happen") + return nil, errors.New("RunEpochCompleteHook: called before current epoch is complete, this should never happen") } // Retrieve the CurrentEpochEntry. currentEpochEntry, err := bav.GetCurrentEpochEntry() if err != nil { - return errors.Wrapf(err, "runEpochCompleteSnapshotGeneration: problem retrieving CurrentEpochEntry: ") + return nil, errors.Wrapf(err, "runEpochCompleteSnapshotGeneration: problem retrieving CurrentEpochEntry: ") } if currentEpochEntry == nil { - return errors.New("runEpochCompleteSnapshotGeneration: CurrentEpochEntry is nil, this should never happen") + return nil, errors.New("runEpochCompleteSnapshotGeneration: CurrentEpochEntry is nil, this should never happen") } // Step 1: Run All State Mutating Operations - if err := bav.runEpochCompleteStateTransition(blockHeight, blockTimestampNanoSecs); err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: ") + utxoOperations, err := bav.runEpochCompleteStateTransition(blockHeight, blockTimestampNanoSecs) + if err != nil { + return nil, errors.Wrapf(err, "RunEpochCompleteHook: ") } // Step 2: Run All Snapshotting Operations - if err := bav.runEpochCompleteSnapshotGeneration(currentEpochEntry.EpochNumber); err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: ") + if err = bav.runEpochCompleteSnapshotGeneration(currentEpochEntry.EpochNumber); err != nil { + return nil, errors.Wrapf(err, "RunEpochCompleteHook: ") } // TODO: Evict old snapshots when safe to do so. // Step 3: Roll Over to The Next Epoch - if err := bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, view, blockTimestampNanoSecs); err != nil { - return errors.Wrapf(err, "RunEpochCompleteHook: ") + if err = bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, view, blockTimestampNanoSecs); err != nil { + return nil, errors.Wrapf(err, "RunEpochCompleteHook: ") } - return nil + return utxoOperations, nil } // Runs all state-mutating operations required when completing an epoch. -func (bav *UtxoView) runEpochCompleteStateTransition(blockHeight uint64, blockTimestampNanoSecs uint64) error { +func (bav *UtxoView) runEpochCompleteStateTransition(blockHeight uint64, blockTimestampNanoSecs uint64) ([]*UtxoOperation, error) { // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation // that loops through all active unjailed validators from current epoch's snapshot validator set // and jails them if they have been inactive. // // Note, this this will only run if we are past the ProofOfStake2ConsensusCutoverBlockHeight fork height. if err := bav.JailAllInactiveSnapshotValidators(blockHeight); err != nil { - return errors.Wrapf(err, "runEpochCompleteStateTransition: problem jailing all inactive validators: ") + return nil, errors.Wrapf(err, "runEpochCompleteStateTransition: problem jailing all inactive validators: ") } // Reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation // that loops through all of the snapshotted stakes and rewards them. // // Note, this this will only run if we are past the ProofOfStake2ConsensusCutoverBlockHeight fork height. - if err := bav.DistributeStakingRewardsToSnapshotStakes(blockHeight, blockTimestampNanoSecs); err != nil { - return errors.Wrapf(err, "runEpochCompleteStateTransition: problem rewarding snapshot stakes: ") + utxoOperations, err := bav.DistributeStakingRewardsToSnapshotStakes(blockHeight, blockTimestampNanoSecs) + if err != nil { + return nil, errors.Wrapf(err, "runEpochCompleteStateTransition: problem rewarding snapshot stakes: ") } - return nil + return utxoOperations, nil } // Generates all required snapshots for the current epoch. diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index dc8ca4126..5bfe740cf 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -511,6 +511,13 @@ func TestRunEpochCompleteHook(t *testing.T) { } func TestStakingRewardDistribution(t *testing.T) { + DeSoTestnetParams.DefaultEpochDurationNumBlocks = uint64(1) + GlobalDeSoParams.DefaultEpochDurationNumBlocks = uint64(1) + resetDefaultEpochDurationNumBlocks := func() { + DeSoTestnetParams.DefaultEpochDurationNumBlocks = uint64(3600) + GlobalDeSoParams.DefaultEpochDurationNumBlocks = uint64(3600) + } + defer resetDefaultEpochDurationNumBlocks() // Initialize balance model fork heights. setBalanceModelBlockHeights(t) @@ -722,17 +729,17 @@ func TestStakingRewardDistribution(t *testing.T) { require.Equal(t, m2Balance, uint64(882)) // Test reward computation for m3: - // - m2's original stake was 50 nanos - // - m2's validator m1 has a commission rate of 20% - // - m2's original DESO wallet balance was 932 nanos - // - m2's rewards will be paid out to its DESO wallet + // - m3's original stake was 50 nanos + // - m3's validator m1 has a commission rate of 20% + // - m3's original DESO wallet balance was 932 nanos + // - m3's rewards will be paid out to its DESO wallet // // Reward Computations: - // - m2's total reward for its stake is 50 * [e^0.1 - 1] = 5 nanos - // - m2's reward lost to m1's commission is: 5 nanos * 0.2 = 1 nano + // - m3's total reward for its stake is 50 * [e^0.1 - 1] = 5 nanos + // - m3's reward lost to m1's commission is: 5 nanos * 0.2 = 1 nano // // Final DESO wallet balance: - // - m2's final DESO wallet balance is: 932 + 5 - 1 = 936 nanos + // - m3's final DESO wallet balance is: 932 + 5 - 1 = 936 nanos m3Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m3PkBytes) require.NoError(t, err) require.Equal(t, m3Balance, uint64(936)) @@ -740,6 +747,17 @@ func TestStakingRewardDistribution(t *testing.T) { // Test that m3's stake is unchanged. require.Equal(t, stakeEntries[3].StakerPKID, m3PKID) require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + + // Make sure the validator's total stake amount is correct. + // M0's total stake amount nanos should be the sum of M0 and M2's stakes (444 + 108) + validatorEntry, err := _newUtxoView(testMeta).GetValidatorByPKID(m0PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(444+108)) + + // M1's total stake amount nanos should be the sum of M1 and M3's stakes (200 + 50) + validatorEntry, err = _newUtxoView(testMeta).GetValidatorByPKID(m1PKID) + require.NoError(t, err) + require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(200+50)) } { @@ -883,7 +901,8 @@ func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64, viewNumber // Set blockTimestampNanoSecs to 1 year * block height. Every time the block height increments, // the timestamp increases by 1 year blockTimestampNanoSecs := blockHeight * 365 * 24 * 3600 * 1e9 - require.NoError(testMeta.t, tmpUtxoView.RunEpochCompleteHook(blockHeight, viewNumber, blockTimestampNanoSecs)) + _, err := tmpUtxoView.RunEpochCompleteHook(blockHeight, viewNumber, blockTimestampNanoSecs) + require.NoError(testMeta.t, err) require.NoError(testMeta.t, tmpUtxoView.FlushToDb(blockHeight)) } diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index 0c8b3e799..73dd89138 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -7,23 +7,23 @@ import ( "github.com/pkg/errors" ) -func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64, blockTimestampNanoSecs uint64) error { +func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64, blockTimestampNanoSecs uint64) ([]*UtxoOperation, error) { // Check if we have switched from PoW to PoS yet. If we have not, then the PoS consensus // has not started yet. We don't want to distribute any staking rewards until the PoS consensus begins. if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { - return nil + return nil, nil } // Retrieve the current EpochEntry. currentEpochEntry, err := bav.GetCurrentEpochEntry() if err != nil { - return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving current EpochEntry: ") + return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving current EpochEntry: ") } // Check if the current epoch's timestamp is somehow greater than the block timestamp. This should never happen as long // as timestamps are moving forward when connecting each block. if currentEpochEntry.CreatedAtBlockTimestampNanoSecs >= blockTimestampNanoSecs { - return errors.Wrapf(RuleErrorBlockTimestampBeforeEpochStartTimestamp, "DistributeStakingRewardsToSnapshotStakes: ") + return nil, errors.Wrapf(RuleErrorBlockTimestampBeforeEpochStartTimestamp, "DistributeStakingRewardsToSnapshotStakes: ") } // Compute the amount of time that has elapsed since the current epoch started. As long as the elapsed time is > 0, @@ -36,7 +36,7 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64 apyBasisPoints := bav.GetCurrentGlobalParamsEntry().StakingRewardsAPYBasisPoints if apyBasisPoints == 0 { // If the APY is zero or not yet defined, then there are no staking rewards to distribute. - return nil + return nil, nil } // Convert the APY from a scaled integer to a float. During the conversion, the interest rate @@ -49,16 +49,17 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64 // that loops through all of the snapshotted stakes and rewards them one by one. snapshotStakesToReward, err := bav.GetAllSnapshotStakesToReward() if err != nil { - return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving snapshot stakes to reward: ") + return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem retrieving snapshot stakes to reward: ") } // If there are no stakes to reward, then there's nothing to be done. Exit early here. if len(snapshotStakesToReward) == 0 { - return nil + return nil, nil } // Loop through all of the snapshot stakes; distribute staking rewards to the staker and commissions to // their validator. + var utxoOperations []*UtxoOperation for _, snapshotStakeEntry := range snapshotStakesToReward { if snapshotStakeEntry == nil { // This should never happen. If we encounter a nil entry, then the setter for UtxoView.SnapshotStakesToReward @@ -71,7 +72,7 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64 snapshotStakeEntry, elapsedFractionOfYear, apy, ) if err != nil { - return errors.Wrapf( + return nil, errors.Wrapf( err, "DistributeStakingRewardsToSnapshotStakes: problem computing staker reward and validator commission: ", ) @@ -85,20 +86,24 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64 // Reward the staker their portion of the staking reward. if stakerRewardNanos > 0 { - if err = bav.distributeStakingReward(snapshotStakeEntry.ValidatorPKID, snapshotStakeEntry.StakerPKID, stakerRewardNanos); err != nil { - return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem distributing staker reward: ") + var utxoOperation *UtxoOperation + if utxoOperation, err = bav.distributeStakingReward(snapshotStakeEntry.ValidatorPKID, snapshotStakeEntry.StakerPKID, stakerRewardNanos); err != nil { + return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem distributing staker reward: ") } + utxoOperations = append(utxoOperations, utxoOperation) } // Reward the validator their commission from the staking reward. if validatorCommissionNanos > 0 { - if err = bav.distributeValidatorCommission(snapshotStakeEntry.ValidatorPKID, validatorCommissionNanos); err != nil { - return errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem distributing validator commission reward: ") + var utxoOperation *UtxoOperation + if utxoOperation, err = bav.distributeValidatorCommission(snapshotStakeEntry.ValidatorPKID, validatorCommissionNanos); err != nil { + return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem distributing validator commission reward: ") } + utxoOperations = append(utxoOperations, utxoOperation) } } - return nil + return utxoOperations, nil } func (bav *UtxoView) computeStakerRewardAndValidatorCommission( @@ -177,11 +182,11 @@ func (bav *UtxoView) computeStakerRewardAndValidatorCommission( return stakerRewardNanos.Uint64(), validatorCommissionNanos.Uint64(), nil } -func (bav *UtxoView) distributeStakingReward(validatorPKID *PKID, stakerPKID *PKID, rewardNanos uint64) error { +func (bav *UtxoView) distributeStakingReward(validatorPKID *PKID, stakerPKID *PKID, rewardNanos uint64) (*UtxoOperation, error) { // Fetch the staker's latest StakeEntry. stakeEntry, err := bav.GetStakeEntry(validatorPKID, stakerPKID) if err != nil { - return errors.Wrapf(err, "distributeStakingReward: problem fetching staker's StakeEntry: ") + return nil, errors.Wrapf(err, "distributeStakingReward: problem fetching staker's StakeEntry: ") } // At this point, there are three possible cases: @@ -189,12 +194,24 @@ func (bav *UtxoView) distributeStakingReward(validatorPKID *PKID, stakerPKID *PK // 2. The stake entry still exists and does not want to restake their rewards. // 3. The stake entry has unstaked since the snapshot was taken. + var utxoOperation *UtxoOperation // For case 1, we distribute the rewards by adding them to the staker's staked amount. if stakeEntry != nil && stakeEntry.RewardMethod == StakingRewardMethodRestake { + validatorEntry, err := bav.GetValidatorByPKID(stakeEntry.ValidatorPKID) + if err != nil { + return nil, errors.Wrapf(err, "distributeStakingReward: problem fetching validator entry: ") + } + utxoOperation = &UtxoOperation{ + Type: OperationTypeStakeDistribution, + PrevStakeEntries: []*StakeEntry{stakeEntry.Copy()}, + PrevValidatorEntry: validatorEntry.Copy(), + StakeAmountNanosDiff: rewardNanos, + } stakeEntry.StakeAmountNanos = uint256.NewInt().Add(stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(rewardNanos)) bav._setStakeEntryMappings(stakeEntry) - - return nil + validatorEntry.TotalStakeAmountNanos = uint256.NewInt().Add(validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(rewardNanos)) + bav._setValidatorEntryMappings(validatorEntry) + return utxoOperation, nil } // For cases 2 and 3, the staker does not want their rewards restaked. The staker is still @@ -202,14 +219,14 @@ func (bav *UtxoView) distributeStakingReward(validatorPKID *PKID, stakerPKID *PK // the rewards directly to the staker's wallet. stakerPublicKey := bav.GetPublicKeyForPKID(stakerPKID) - if _, err = bav._addBalance(rewardNanos, stakerPublicKey); err != nil { - return errors.Wrapf(err, "distributeStakingReward: problem adding rewards to staker's DESO balance: ") + if utxoOperation, err = bav._addBalance(rewardNanos, stakerPublicKey); err != nil { + return nil, errors.Wrapf(err, "distributeStakingReward: problem adding rewards to staker's DESO balance: ") } - return nil + return utxoOperation, nil } -func (bav *UtxoView) distributeValidatorCommission(validatorPKID *PKID, commissionNanos uint64) error { +func (bav *UtxoView) distributeValidatorCommission(validatorPKID *PKID, commissionNanos uint64) (*UtxoOperation, error) { // Here, we treat the validator's commission identically to staking rewards. We view commissions as another source of staking // rewards that validators receive at the end of each epoch. And these commissions are eligible to be restaked if the validator // desires. To determine whether to re-stake commissions or pay out the commissions to the validator's wallet, we rely on the From 882e94b0b756e89daf32f81282b623e8f46aa248 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 13 Dec 2023 12:44:41 -0500 Subject: [PATCH 318/762] Add function to validate that ValidatorEntry's total stake amount nanos is equal to sum of all staker's (#872) Co-authored-by: Lazy Nina <> --- lib/block_view_stake.go | 52 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index b05cc0405..343c893a5 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -2137,6 +2137,40 @@ func (bav *UtxoView) IsValidUnlockStakeMetadata(transactorPkBytes []byte, metada return nil } +// IsCorrectValidatorTotalStakeAmountNanos returns true if the total stake amount for the validator +// matches the total stake amount calculated from the StakeEntries in the UtxoView + DB and false otherwise. +func (bav *UtxoView) IsCorrectValidatorTotalStakeAmountNanos(validatorEntry *ValidatorEntry) (bool, error) { + // Map of all the stake entries for this validator. + stakeEntryMap := make(map[StakeMapKey]*StakeEntry) + + dbStakeEntries, err := DBGetStakeEntriesForValidatorPKID(bav.Handle, bav.Snapshot, validatorEntry.ValidatorPKID) + if err != nil { + return false, errors.Wrapf(err, "IsCorrectValidatorTotalStakeAmountNanos: error retrieving StakeEntries: ") + } + // Fill the DB entries into the map first. + for _, dbStakeEntry := range dbStakeEntries { + stakeEntryMap[dbStakeEntry.ToMapKey()] = dbStakeEntry + } + + // Merge in results from the view, overwriting results from the DB. + for stakeMapKey, stakeEntry := range bav.StakeMapKeyToStakeEntry { + // Only add entries for this validator. + if stakeEntry.ValidatorPKID.Eq(validatorEntry.ValidatorPKID) { + stakeEntryMap[stakeMapKey] = stakeEntry.Copy() + } + } + // Calculate the total stake amount for the validator. + totalStakeAmountNanos := uint256.NewInt() + for _, stakeEntry := range stakeEntryMap { + // If an entry is deleted, we don't count it towards the total. + if stakeEntry.isDeleted { + continue + } + totalStakeAmountNanos.Add(totalStakeAmountNanos, stakeEntry.StakeAmountNanos) + } + return totalStakeAmountNanos.Eq(validatorEntry.TotalStakeAmountNanos), nil +} + func (bav *UtxoView) SanityCheckStakeTxn( transactorPKID *PKID, utxoOp *UtxoOperation, @@ -2210,6 +2244,15 @@ func (bav *UtxoView) SanityCheckStakeTxn( return errors.New("SanityCheckStakeTxn: TransactorBalance decrease does not match") } + isCorrectTotalStakeAmountNanos, err := bav.IsCorrectValidatorTotalStakeAmountNanos(currentValidatorEntry) + if err != nil { + return errors.Wrapf(err, "SanityCheckStakeTxn: error validating ValidatorEntry.TotalStakeAmountNanos: ") + } + if !isCorrectTotalStakeAmountNanos { + return errors.New("SanityCheckStakeTxn: incorrect TotalStakeAmountNanos for validator after " + + "connecting transaction") + } + return nil } @@ -2286,6 +2329,15 @@ func (bav *UtxoView) SanityCheckUnstakeTxn(transactorPKID *PKID, utxoOp *UtxoOpe return errors.New("SanityCheckUnstakeTxn: LockedAmountNanos increase does not match") } + isCorrectTotalStakeAmountNanos, err := bav.IsCorrectValidatorTotalStakeAmountNanos(currentValidatorEntry) + if err != nil { + return errors.Wrapf(err, "SanityCheckUnstakeTxn: error validating ValidatorEntry.TotalStakeAmountNanos: ") + } + if !isCorrectTotalStakeAmountNanos { + return errors.New("SanityCheckUnstakeTxn: incorrect TotalStakeAmountNanos for validator after " + + "connecting transaction") + } + return nil } From 681f88348cfdab1c997fb2b8251eb893ede60924 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 14 Dec 2023 11:03:43 -0500 Subject: [PATCH 319/762] Rename ConsensusController to FastHotStuffConsensus (#863) * Rename ConsensusController to FastHotStuffConsensus * Rename in server.go --- ...nsensus_controller.go => pos_consensus.go} | 98 +++++++++---------- ...ntroller_test.go => pos_consensus_test.go} | 22 ++--- lib/server.go | 26 ++--- 3 files changed, 73 insertions(+), 73 deletions(-) rename lib/{pos_consensus_controller.go => pos_consensus.go} (85%) rename lib/{pos_consensus_controller_test.go => pos_consensus_test.go} (89%) diff --git a/lib/pos_consensus_controller.go b/lib/pos_consensus.go similarity index 85% rename from lib/pos_consensus_controller.go rename to lib/pos_consensus.go index 55fcfc19b..8765f137a 100644 --- a/lib/pos_consensus_controller.go +++ b/lib/pos_consensus.go @@ -9,7 +9,7 @@ import ( "github.com/pkg/errors" ) -type ConsensusController struct { +type FastHotStuffConsensus struct { lock sync.RWMutex blockchain *Blockchain blockProducer *PosBlockProducer @@ -19,8 +19,8 @@ type ConsensusController struct { signer *BLSSigner } -func NewConsensusController(params *DeSoParams, blockchain *Blockchain, mempool Mempool, signer *BLSSigner) *ConsensusController { - return &ConsensusController{ +func NewFastHotStuffConsensus(params *DeSoParams, blockchain *Blockchain, mempool Mempool, signer *BLSSigner) *FastHotStuffConsensus { + return &FastHotStuffConsensus{ blockchain: blockchain, blockProducer: NewPosBlockProducer(mempool, params, nil, signer.GetPublicKey()), fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), @@ -29,17 +29,17 @@ func NewConsensusController(params *DeSoParams, blockchain *Blockchain, mempool } } -// ConsensusController.Start initializes and starts the FastHotStuffEventLoop based on the +// FastHotStuffConsensus.Start initializes and starts the FastHotStuffEventLoop based on the // blockchain state. This should only be called once the blockchain has synced, the node is // ready to join the validator network, and the node is able to validate blocks in the steady state. -func (cc *ConsensusController) Start() error { - // Hold the write consensus controller's lock for thread-safety. +func (cc *FastHotStuffConsensus) Start() error { + // Hold the write consensus's lock for thread-safety. cc.lock.Lock() defer cc.lock.Unlock() - // The consensus controller can only be kicked off with an uninitialized event loop + // The consensus can only be kicked off with an uninitialized event loop if cc.fastHotStuffEventLoop.IsInitialized() { - return errors.New("ConsensusController.Start: FastHotStuffEventLoop is already initialized") + return errors.New("FastHotStuffConsensus.Start: FastHotStuffEventLoop is already initialized") } // Hold the blockchain's read lock so that the chain cannot be mutated underneath us. In practice, @@ -54,19 +54,19 @@ func (cc *ConsensusController) Start() error { // Fetch the validator set at each safe block tipBlockWithValidators, err := cc.fetchValidatorListsForSafeBlocks([]*MsgDeSoHeader{tipBlock.Header}) if err != nil { - return errors.Errorf("ConsensusController.Start: Error fetching validator list for tip blocks: %v", err) + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching validator list for tip blocks: %v", err) } // Fetch the safe blocks that are eligible to be extended from by the next incoming tip block safeBlocks, err := cc.blockchain.GetSafeBlocks() if err != nil { - return errors.Errorf("ConsensusController.Start: Error fetching safe blocks: %v", err) + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching safe blocks: %v", err) } // Fetch the validator set at each safe block safeBlocksWithValidators, err := cc.fetchValidatorListsForSafeBlocks(safeBlocks) if err != nil { - return errors.Errorf("ConsensusController.Start: Error fetching validator lists for safe blocks: %v", err) + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching validator lists for safe blocks: %v", err) } // Initialize and start the event loop. TODO: Pass in the crank timer duration and timeout duration @@ -76,26 +76,26 @@ func (cc *ConsensusController) Start() error { return nil } -func (cc *ConsensusController) IsRunning() bool { +func (cc *FastHotStuffConsensus) IsRunning() bool { return cc.fastHotStuffEventLoop.IsRunning() } // HandleLocalBlockProposalEvent is called when FastHotStuffEventLoop has signaled that it can // construct a block at a certain block height. This function validates the block proposal signal, // constructs, processes locally, and then broadcasts the block. -func (cc *ConsensusController) HandleLocalBlockProposalEvent(event *consensus.FastHotStuffEvent) error { - // Hold a read and write lock on the consensus controller. This is because we need to check +func (cc *FastHotStuffConsensus) HandleLocalBlockProposalEvent(event *consensus.FastHotStuffEvent) error { + // Hold a read and write lock on the consensus. This is because we need to check // the current view of the consensus event loop, and to update the blockchain. cc.lock.Lock() defer cc.lock.Unlock() if !cc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("ConsensusController.HandleLocalBlockProposalEvent: FastHotStuffEventLoop is not running") + return errors.Errorf("FastHotStuffConsensus.HandleLocalBlockProposalEvent: FastHotStuffEventLoop is not running") } // Handle the event as a block proposal event for a regular block if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructVoteQC); err != nil { - return errors.Wrapf(err, "ConsensusController.HandleLocalBlockProposalEvent: ") + return errors.Wrapf(err, "FastHotStuffConsensus.HandleLocalBlockProposalEvent: ") } // Happy path: nothing left to do @@ -105,19 +105,19 @@ func (cc *ConsensusController) HandleLocalBlockProposalEvent(event *consensus.Fa // HandleLocalTimeoutBlockProposalEvent is called when FastHotStuffEventLoop has signaled that it can // construct a timeout block at a certain block height. This function validates the timeout block proposal // signal, constructs, processes locally, and then broadcasts the block. -func (cc *ConsensusController) HandleLocalTimeoutBlockProposalEvent(event *consensus.FastHotStuffEvent) error { - // Hold a read and write lock on the consensus controller. This is because we need to check +func (cc *FastHotStuffConsensus) HandleLocalTimeoutBlockProposalEvent(event *consensus.FastHotStuffEvent) error { + // Hold a read and write lock on the consensus. This is because we need to check // the current view of the consensus event loop, and to update the blockchain. cc.lock.Lock() defer cc.lock.Unlock() if !cc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("ConsensusController.HandleLocalTimeoutBlockProposalEvent: FastHotStuffEventLoop is not running") + return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: FastHotStuffEventLoop is not running") } // Handle the event as a block proposal event for a timeout block if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructTimeoutQC); err != nil { - return errors.Wrapf(err, "ConsensusController.HandleLocalTimeoutBlockProposalEvent: ") + return errors.Wrapf(err, "FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: ") } // Happy path: nothing left to do @@ -137,7 +137,7 @@ func (cc *ConsensusController) HandleLocalTimeoutBlockProposalEvent(event *conse // - This will connect the block to the blockchain, remove the transactions from the // mempool, and process the vote in the FastHotStuffEventLoop // 6. Broadcast the block to the network -func (cc *ConsensusController) handleBlockProposerEvent( +func (cc *FastHotStuffConsensus) handleBlockProposerEvent( event *consensus.FastHotStuffEvent, expectedEventType consensus.FastHotStuffEventType, ) error { @@ -245,21 +245,21 @@ func (cc *ConsensusController) handleBlockProposerEvent( // 2. Construct the vote message // 3. Process the vote in the consensus module // 4. Broadcast the vote msg to the network -func (cc *ConsensusController) HandleLocalVoteEvent(event *consensus.FastHotStuffEvent) error { - // Hold a read lock on the consensus controller. This is because we need to check the +func (cc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotStuffEvent) error { + // Hold a read lock on the consensus. This is because we need to check the // current view and block height of the consensus module. cc.lock.Lock() defer cc.lock.Unlock() if !cc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("ConsensusController.HandleLocalVoteEvent: FastHotStuffEventLoop is not running") + return errors.Errorf("FastHotStuffConsensus.HandleLocalVoteEvent: FastHotStuffEventLoop is not running") } var err error if !consensus.IsProperlyFormedVoteEvent(event) { // If the event is not properly formed, we ignore it and log it. This should never happen. - return errors.Errorf("ConsensusController.HandleLocalVoteEvent: Received improperly formed vote event: %v", event) + return errors.Errorf("FastHotStuffConsensus.HandleLocalVoteEvent: Received improperly formed vote event: %v", event) } // Provided the vote message is properly formed, we construct and broadcast it in a best effort @@ -284,7 +284,7 @@ func (cc *ConsensusController) HandleLocalVoteEvent(event *consensus.FastHotStuf voteMsg.VotePartialSignature, err = cc.signer.SignValidatorVote(event.View, event.TipBlockHash) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. - return errors.Errorf("ConsensusController.HandleLocalVoteEvent: Error signing validator vote: %v", err) + return errors.Errorf("FastHotStuffConsensus.HandleLocalVoteEvent: Error signing validator vote: %v", err) } // Process the vote message locally in the FastHotStuffEventLoop @@ -292,7 +292,7 @@ func (cc *ConsensusController) HandleLocalVoteEvent(event *consensus.FastHotStuf // If we can't process the vote locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. Something is very wrong. We should not // broadcast it to the network. - return errors.Errorf("ConsensusController.HandleLocalVoteEvent: Error processing vote locally: %v", err) + return errors.Errorf("FastHotStuffConsensus.HandleLocalVoteEvent: Error processing vote locally: %v", err) } // Broadcast the vote message to the network @@ -303,15 +303,15 @@ func (cc *ConsensusController) HandleLocalVoteEvent(event *consensus.FastHotStuf // HandleValidatorVote is called when we receive a validator vote message from a peer. This function processes // the vote locally in the FastHotStuffEventLoop. -func (cc *ConsensusController) HandleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) error { - // No need to hold a lock on the consensus controller because this function is a pass-through +func (cc *FastHotStuffConsensus) HandleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) error { + // No need to hold a lock on the consensus because this function is a pass-through // for the FastHotStuffEventLoop which guarantees thread-safety for its callers // Process the vote message locally in the FastHotStuffEventLoop if err := cc.fastHotStuffEventLoop.ProcessValidatorVote(msg); err != nil { // If we can't process the vote locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. - return errors.Wrapf(err, "ConsensusController.HandleValidatorVote: Error processing vote: ") + return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorVote: Error processing vote: ") } // Happy path @@ -327,21 +327,21 @@ func (cc *ConsensusController) HandleValidatorVote(pp *Peer, msg *MsgDeSoValidat // 2. Construct the timeout message // 3. Process the timeout in the consensus module // 4. Broadcast the timeout msg to the network -func (cc *ConsensusController) HandleLocalTimeoutEvent(event *consensus.FastHotStuffEvent) error { - // Hold a read lock on the consensus controller. This is because we need to check the +func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHotStuffEvent) error { + // Hold a read lock on the consensus. This is because we need to check the // current view and block height of the consensus module. cc.lock.Lock() defer cc.lock.Unlock() if !cc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: FastHotStuffEventLoop is not running") + return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: FastHotStuffEventLoop is not running") } var err error if !consensus.IsProperlyFormedTimeoutEvent(event) { // If the event is not properly formed, we ignore it and log it. This should never happen. - return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: Received improperly formed timeout event: %v", event) + return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Received improperly formed timeout event: %v", event) } if event.View != cc.fastHotStuffEventLoop.GetCurrentView() { @@ -350,7 +350,7 @@ func (cc *ConsensusController) HandleLocalTimeoutEvent(event *consensus.FastHotS // and an expected race condition in the steady-state. // // Nothing to do here. - return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: Stale timeout event: %v", event) + return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Stale timeout event: %v", event) } // Locally advance the event loop's view so that the node is locally running the Fast-HotStuff @@ -359,7 +359,7 @@ func (cc *ConsensusController) HandleLocalTimeoutEvent(event *consensus.FastHotS if _, err := cc.fastHotStuffEventLoop.AdvanceViewOnTimeout(); err != nil { // This should never happen as long as the event loop is running. If it happens, we return // the error and let the caller handle it. - return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: Error advancing view on timeout: %v", err) + return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error advancing view on timeout: %v", err) } // Construct the timeout message @@ -373,7 +373,7 @@ func (cc *ConsensusController) HandleLocalTimeoutEvent(event *consensus.FastHotS timeoutMsg.TimeoutPartialSignature, err = cc.signer.SignValidatorTimeout(event.View, event.QC.GetView()) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. - return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: Error signing validator timeout: %v", err) + return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error signing validator timeout: %v", err) } // Process the timeout message locally in the FastHotStuffEventLoop @@ -382,7 +382,7 @@ func (cc *ConsensusController) HandleLocalTimeoutEvent(event *consensus.FastHotS // beyond the committed tip, the timeout message is malformed, or the timeout message is // is duplicated for the same view. In any case, something is very wrong. We should not // broadcast this message to the network. - return errors.Errorf("ConsensusController.HandleLocalTimeoutEvent: Error processing timeout locally: %v", err) + return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error processing timeout locally: %v", err) } // Broadcast the timeout message to the network @@ -393,29 +393,29 @@ func (cc *ConsensusController) HandleLocalTimeoutEvent(event *consensus.FastHotS // HandleValidatorTimeout is called when we receive a validator timeout message from a peer. This function // processes the timeout locally in the FastHotStuffEventLoop. -func (cc *ConsensusController) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) error { - // No need to hold a lock on the consensus controller because this function is a pass-through +func (cc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) error { + // No need to hold a lock on the consensus because this function is a pass-through // for the FastHotStuffEventLoop which guarantees thread-safety for its callers. // Process the timeout message locally in the FastHotStuffEventLoop if err := cc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { // If we can't process the timeout locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. - return errors.Wrapf(err, "ConsensusController.HandleValidatorTimeout: Error processing timeout: ") + return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout: ") } // Happy path return nil } -func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { - // Hold a lock on the consensus controller, because we will need to mutate the Blockchain +func (cc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { + // Hold a lock on the consensus, because we will need to mutate the Blockchain // and the FastHotStuffEventLoop data structures. cc.lock.Lock() defer cc.lock.Unlock() if !cc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("ConsensusController.HandleBlock: FastHotStuffEventLoop is not running") + return errors.Errorf("FastHotStuffConsensus.HandleBlock: FastHotStuffEventLoop is not running") } // Try to apply the block as the new tip of the blockchain. If the block is an orphan, then @@ -425,7 +425,7 @@ func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { if err != nil { // If we get an error here, it means something went wrong with the block processing algorithm. // Nothing we can do to recover here. - return errors.Errorf("ConsensusController.HandleBlock: Error processing block as new tip: %v", err) + return errors.Errorf("FastHotStuffConsensus.HandleBlock: Error processing block as new tip: %v", err) } // If there are missing block hashes, then we need to fetch the missing blocks from the network @@ -445,7 +445,7 @@ func (cc *ConsensusController) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { // // Reference Implementation: // https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L573 -func (cc *ConsensusController) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]*BlockHash, error) { +func (cc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]*BlockHash, error) { // Try to apply the block locally as the new tip of the blockchain successfullyAppliedNewTip, _, missingBlockHashes, err := cc.blockchain.processBlockPoS( block, // Pass in the block itself @@ -518,7 +518,7 @@ func (cc *ConsensusController) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]* // produceUnsignedBlockForBlockProposalEvent is a helper function that can produce a new block for proposal based // on Fast-HotStuff block proposal event. This function expects the event to have been pre-validated by the caller. // If the event is malformed or invalid, then the behavior of this function is undefined. -func (cc *ConsensusController) produceUnsignedBlockForBlockProposalEvent( +func (cc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( event *consensus.FastHotStuffEvent, proposerRandomSeedSignature *bls.Signature, ) (*MsgDeSoBlock, error) { @@ -581,7 +581,7 @@ func (cc *ConsensusController) produceUnsignedBlockForBlockProposalEvent( // the current or next epoch after the committed tip, then this function returns an error. Note: it is not possible // for safe blocks to precede the committed tip or to belong to an epoch that is more than one epoch ahead of the // committed tip. -func (cc *ConsensusController) fetchValidatorListsForSafeBlocks(blocks []*MsgDeSoHeader) ( +func (cc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgDeSoHeader) ( []consensus.BlockWithValidatorList, error, ) { @@ -594,7 +594,7 @@ func (cc *ConsensusController) fetchValidatorListsForSafeBlocks(blocks []*MsgDeS // the same validator set, so we can use an in-memory cache to optimize the validator set lookup for them. validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) - // Create a UtxoView for the committed tip block. We will use this to fetch the validator set for the + // Create a UtxoView for the committed tip block. We will use this to fetch the validator set for // all of the safe blocks. utxoView, err := NewUtxoView(cc.blockchain.db, cc.params, cc.blockchain.postgres, cc.blockchain.snapshot, nil) if err != nil { diff --git a/lib/pos_consensus_controller_test.go b/lib/pos_consensus_test.go similarity index 89% rename from lib/pos_consensus_controller_test.go rename to lib/pos_consensus_test.go index 756b8e7e1..17d3c0b6a 100644 --- a/lib/pos_consensus_controller_test.go +++ b/lib/pos_consensus_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestConsensusControllerHandleLocalVoteEvent(t *testing.T) { +func TestFastHotStuffConsensusHandleLocalVoteEvent(t *testing.T) { // Create a test private key for the signer blsPrivateKey, err := bls.NewPrivateKey() require.NoError(t, err) @@ -23,8 +23,8 @@ func TestConsensusControllerHandleLocalVoteEvent(t *testing.T) { blockHash, err := blockHeader.Hash() require.NoError(t, err) - // Create a mock controller - consensusController := ConsensusController{ + // Create a mock consensus + fastHotStuffConsensus := FastHotStuffConsensus{ lock: sync.RWMutex{}, signer: &BLSSigner{ privateKey: blsPrivateKey, @@ -66,7 +66,7 @@ func TestConsensusControllerHandleLocalVoteEvent(t *testing.T) { EventType: consensus.FastHotStuffEventTypeVote, } - err := consensusController.HandleLocalVoteEvent(event) + err := fastHotStuffConsensus.HandleLocalVoteEvent(event) require.Contains(t, err.Error(), "Received improperly formed vote event") } @@ -78,12 +78,12 @@ func TestConsensusControllerHandleLocalVoteEvent(t *testing.T) { TipBlockHeight: blockHeader.GetView(), TipBlockHash: blockHash, } - err := consensusController.HandleLocalVoteEvent(event) + err := fastHotStuffConsensus.HandleLocalVoteEvent(event) require.NoError(t, err) } } -func TestConsensusControllerHandleLocalTimeoutEvent(t *testing.T) { +func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { // Create a test private key for the signer blsPrivateKey, err := bls.NewPrivateKey() require.NoError(t, err) @@ -98,8 +98,8 @@ func TestConsensusControllerHandleLocalTimeoutEvent(t *testing.T) { currentView := blockHeader.ValidatorsVoteQC.GetView() + 1 nextView := currentView + 1 - // Create a mock controller - consensusController := ConsensusController{ + // Create a mock consensus + fastHotStuffConsensus := FastHotStuffConsensus{ lock: sync.RWMutex{}, signer: &BLSSigner{ privateKey: blsPrivateKey, @@ -155,7 +155,7 @@ func TestConsensusControllerHandleLocalTimeoutEvent(t *testing.T) { EventType: consensus.FastHotStuffEventTypeVote, } - err := consensusController.HandleLocalTimeoutEvent(event) + err := fastHotStuffConsensus.HandleLocalTimeoutEvent(event) require.Contains(t, err.Error(), "Received improperly formed timeout event") } @@ -168,7 +168,7 @@ func TestConsensusControllerHandleLocalTimeoutEvent(t *testing.T) { TipBlockHash: blockHash, QC: blockHeader.ValidatorsVoteQC, } - err := consensusController.HandleLocalTimeoutEvent(event) + err := fastHotStuffConsensus.HandleLocalTimeoutEvent(event) require.Contains(t, err.Error(), "Stale timeout event") } @@ -181,7 +181,7 @@ func TestConsensusControllerHandleLocalTimeoutEvent(t *testing.T) { TipBlockHash: blockHeader.ValidatorsVoteQC.GetBlockHash(), QC: blockHeader.ValidatorsVoteQC, } - err := consensusController.HandleLocalTimeoutEvent(event) + err := fastHotStuffConsensus.HandleLocalTimeoutEvent(event) require.NoError(t, err) } } diff --git a/lib/server.go b/lib/server.go index 5a3929f7b..0a05e6db9 100644 --- a/lib/server.go +++ b/lib/server.go @@ -61,7 +61,7 @@ type Server struct { eventManager *EventManager TxIndex *TXIndex - consensusController *ConsensusController + FastHotStuffConsensus *FastHotStuffConsensus // posMempool *PosMemPool TODO: Add the mempool later // All messages received from peers get sent from the ConnectionManager to the @@ -2251,32 +2251,32 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { func (srv *Server) _handleFastHostStuffConsensusEvent(event *consensus.FastHotStuffEvent) { // This should never happen. If the consensus message handler isn't defined, then something went // wrong during the node initialization. We log it and return early to avoid panicking. - if srv.consensusController == nil { + if srv.FastHotStuffConsensus == nil { glog.Errorf("Server._handleFastHostStuffConsensusEvent: Consensus controller is nil") return } switch event.EventType { case consensus.FastHotStuffEventTypeVote: - srv.consensusController.HandleLocalVoteEvent(event) + srv.FastHotStuffConsensus.HandleLocalVoteEvent(event) case consensus.FastHotStuffEventTypeTimeout: - srv.consensusController.HandleLocalTimeoutEvent(event) + srv.FastHotStuffConsensus.HandleLocalTimeoutEvent(event) case consensus.FastHotStuffEventTypeConstructVoteQC: - srv.consensusController.HandleLocalBlockProposalEvent(event) + srv.FastHotStuffConsensus.HandleLocalBlockProposalEvent(event) case consensus.FastHotStuffEventTypeConstructTimeoutQC: - srv.consensusController.HandleLocalTimeoutBlockProposalEvent(event) + srv.FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent(event) } } func (srv *Server) _handleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) { // It's possible that the consensus controller hasn't been initialized. If so, // we log an error and move on. - if srv.consensusController == nil { + if srv.FastHotStuffConsensus == nil { glog.Errorf("Server._handleValidatorVote: Consensus controller is nil") return } - if err := srv.consensusController.HandleValidatorVote(pp, msg); err != nil { + if err := srv.FastHotStuffConsensus.HandleValidatorVote(pp, msg); err != nil { glog.Errorf("Server._handleValidatorVote: Error handling vote message from peer: %v", err) } } @@ -2284,12 +2284,12 @@ func (srv *Server) _handleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) { func (srv *Server) _handleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) { // It's possible that the consensus controller hasn't been initialized. If so, // we log an error and move on. - if srv.consensusController == nil { + if srv.FastHotStuffConsensus == nil { glog.Errorf("Server._handleValidatorTimeout: Consensus controller is nil") return } - if err := srv.consensusController.HandleValidatorTimeout(pp, msg); err != nil { + if err := srv.FastHotStuffConsensus.HandleValidatorTimeout(pp, msg); err != nil { glog.Errorf("Server._handleValidatorTimeout: Error handling timeout message from peer: %v", err) } } @@ -2319,7 +2319,7 @@ func (srv *Server) _startConsensus() { } select { - case consensusEvent := <-srv.consensusController.fastHotStuffEventLoop.GetEvents(): + case consensusEvent := <-srv.FastHotStuffConsensus.fastHotStuffEventLoop.GetEvents(): { glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) srv._handleFastHostStuffConsensusEvent(consensusEvent) @@ -2485,8 +2485,8 @@ func (srv *Server) Stop() { } // Stop the PoS block proposer if we have one running. - if srv.consensusController != nil { - srv.consensusController.fastHotStuffEventLoop.Stop() + if srv.FastHotStuffConsensus != nil { + srv.FastHotStuffConsensus.fastHotStuffEventLoop.Stop() glog.Infof(CLog(Yellow, "Server.Stop: Closed the fastHotStuffEventLoop")) } From 535393e86cc28a75cdc9db60115be9211ecfa517 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Thu, 14 Dec 2023 11:08:46 -0500 Subject: [PATCH 320/762] Fix merge --- lib/server.go | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/lib/server.go b/lib/server.go index 95091c0a2..3bca2514c 100644 --- a/lib/server.go +++ b/lib/server.go @@ -62,7 +62,7 @@ type Server struct { eventManager *EventManager TxIndex *TXIndex - // fastHotStuffEventLoop consensus.FastHotStuffEventLoop + fastHotStuffConsensus *FastHotStuffConsensus // posMempool *PosMemPool TODO: Add the mempool later // All messages received from peers get sent from the ConnectionManager to the @@ -2264,32 +2264,32 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { func (srv *Server) _handleFastHostStuffConsensusEvent(event *consensus.FastHotStuffEvent) { // This should never happen. If the consensus message handler isn't defined, then something went // wrong during the node initialization. We log it and return early to avoid panicking. - if srv.FastHotStuffConsensus == nil { + if srv.fastHotStuffConsensus == nil { glog.Errorf("Server._handleFastHostStuffConsensusEvent: Consensus controller is nil") return } switch event.EventType { case consensus.FastHotStuffEventTypeVote: - srv.FastHotStuffConsensus.HandleLocalVoteEvent(event) + srv.fastHotStuffConsensus.HandleLocalVoteEvent(event) case consensus.FastHotStuffEventTypeTimeout: - srv.FastHotStuffConsensus.HandleLocalTimeoutEvent(event) + srv.fastHotStuffConsensus.HandleLocalTimeoutEvent(event) case consensus.FastHotStuffEventTypeConstructVoteQC: - srv.FastHotStuffConsensus.HandleLocalBlockProposalEvent(event) + srv.fastHotStuffConsensus.HandleLocalBlockProposalEvent(event) case consensus.FastHotStuffEventTypeConstructTimeoutQC: - srv.FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent(event) + srv.fastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent(event) } } func (srv *Server) _handleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) { // It's possible that the consensus controller hasn't been initialized. If so, // we log an error and move on. - if srv.FastHotStuffConsensus == nil { + if srv.fastHotStuffConsensus == nil { glog.Errorf("Server._handleValidatorVote: Consensus controller is nil") return } - if err := srv.FastHotStuffConsensus.HandleValidatorVote(pp, msg); err != nil { + if err := srv.fastHotStuffConsensus.HandleValidatorVote(pp, msg); err != nil { glog.Errorf("Server._handleValidatorVote: Error handling vote message from peer: %v", err) } } @@ -2297,12 +2297,12 @@ func (srv *Server) _handleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) { func (srv *Server) _handleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) { // It's possible that the consensus controller hasn't been initialized. If so, // we log an error and move on. - if srv.FastHotStuffConsensus == nil { + if srv.fastHotStuffConsensus == nil { glog.Errorf("Server._handleValidatorTimeout: Consensus controller is nil") return } - if err := srv.FastHotStuffConsensus.HandleValidatorTimeout(pp, msg); err != nil { + if err := srv.fastHotStuffConsensus.HandleValidatorTimeout(pp, msg); err != nil { glog.Errorf("Server._handleValidatorTimeout: Error handling timeout message from peer: %v", err) } } @@ -2332,11 +2332,11 @@ func (srv *Server) _startConsensus() { } select { - // case consensusEvent := <-srv.fastHotStuffEventLoop.GetEvents(): - // { - // glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) - // srv._handleFastHostStuffConsensusEvent(consensusEvent) - // } + case consensusEvent := <-srv.fastHotStuffConsensus.fastHotStuffEventLoop.GetEvents(): + { + glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) + srv._handleFastHostStuffConsensusEvent(consensusEvent) + } case serverMessage := <-srv.incomingMessages: { @@ -2497,11 +2497,11 @@ func (srv *Server) Stop() { glog.Infof(CLog(Yellow, "Server.Stop: Closed the Miner")) } - // // Stop the PoS block proposer if we have one running. - // if srv.fastHotStuffEventLoop != nil { - // srv.fastHotStuffEventLoop.Stop() - // glog.Infof(CLog(Yellow, "Server.Stop: Closed the fastHotStuffEventLoop")) - // } + // Stop the PoS block proposer if we have one running. + if srv.fastHotStuffConsensus != nil { + srv.fastHotStuffConsensus.fastHotStuffEventLoop.Stop() + glog.Infof(CLog(Yellow, "Server.Stop: Closed the fastHotStuffEventLoop")) + } // TODO: Stop the PoS mempool if we have one running. From fefb1008d25ead44c3f0c0c0a64016cbff3462f9 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Fri, 15 Dec 2023 13:52:10 -0500 Subject: [PATCH 321/762] Add BLS Keystore --- lib/bls_keystore.go | 147 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) create mode 100644 lib/bls_keystore.go diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go new file mode 100644 index 000000000..b1356da6c --- /dev/null +++ b/lib/bls_keystore.go @@ -0,0 +1,147 @@ +package lib + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/consensus" + "github.com/pkg/errors" +) + +// BLSSigner is a wrapper for the bls.PrivateKey type, which abstracts away the private key +// and only exposes protected methods for signing a select set of message types needed for +// Proof of Stake. It allows signing for: +// - PoS Validator Votes Messages +// - PoS Validator Timeout Messages +// - PoS Block Proposals +// - PoS Validator Connection Handshakes +// - PoS Random Seed Signature +// +// TODO: We will likely need to associate individual op-codes for each message type that can be signed, +// so that there is no risk of signature collisions between different message types. Ex: the payload +// signed per message type must be made up of the following tuples: +// - Validator Vote: (0x01, view uint64, blockHash consensus.BlockHash) +// - Validator Timeout: (0x02, view uint64, highQCView uint64) +// - PoS Block Proposal: (0x03, view uint64, blockHash consensus.BlockHash) +// - PoS Validator Handshake: (0x04, peer's random nonce, our node's random nonce) +// - PoS Random Seed Signature: (previous block's random seed hash) + +type BLSSignatureOpCode byte + +const ( + BLSSignatureOpCodeValidatorVote BLSSignatureOpCode = 0 + BLSSignatureOpCodeValidatorTimeout BLSSignatureOpCode = 1 + BLSSignatureOpCodePoSBlockProposal BLSSignatureOpCode = 2 + BLSSignatureOpCodePoSValidatorHandshake BLSSignatureOpCode = 3 +) + +func (opCode BLSSignatureOpCode) Bytes() []byte { + return []byte{byte(opCode)} +} + +////////////////////////////////////////////////////////// +// BLSKeystore +////////////////////////////////////////////////////////// + +type BLSKeystore struct { + signer *BLSSigner +} + +func NewBLSKeystore(seed string) (*BLSKeystore, error) { + privateKey, err := bls.NewPrivateKey() + if err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed: %s", seed) + } + if _, err = privateKey.FromString(seed); err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem retrieving private key from seed: %s", seed) + } + + signer, err := NewBLSSigner(privateKey) + if err != nil { + return nil, err + } + return &BLSKeystore{signer: signer}, nil +} + +func (keystore *BLSKeystore) GetSigner() *BLSSigner { + return keystore.signer +} + +////////////////////////////////////////////////////////// +// BLSSigner +////////////////////////////////////////////////////////// + +type BLSSigner struct { + privateKey *bls.PrivateKey +} + +func NewBLSSigner(privateKey *bls.PrivateKey) (*BLSSigner, error) { + if privateKey == nil { + return nil, errors.New("NewBLSSigner: privateKey cannot be nil") + } + return &BLSSigner{privateKey: privateKey}, nil +} + +func (signer *BLSSigner) sign(opCode BLSSignatureOpCode, payload []byte) (*bls.Signature, error) { + newPayload := append(opCode.Bytes(), payload...) + return signer.privateKey.Sign(newPayload) +} + +func (signer *BLSSigner) GetPublicKey() *bls.PublicKey { + return signer.privateKey.PublicKey() +} + +func (signer *BLSSigner) SignBlockProposal(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { + // A block proposer's signature on a block is just its partial vote signature. This allows us to aggregate + // signatures from the proposer and validators into a single aggregated signature to build a QC. + return signer.SignValidatorVote(view, blockHash) +} + +func (signer *BLSSigner) SignValidatorVote(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { + payload := consensus.GetVoteSignaturePayload(view, blockHash) + return signer.sign(BLSSignatureOpCodeValidatorVote, payload[:]) +} + +func (signer *BLSSigner) SignValidatorTimeout(view uint64, highQCView uint64) (*bls.Signature, error) { + payload := consensus.GetTimeoutSignaturePayload(view, highQCView) + return signer.sign(BLSSignatureOpCodeValidatorTimeout, payload[:]) +} + +func (signer *BLSSigner) SignRandomSeedHash(randomSeedHash *RandomSeedHash) (*bls.Signature, error) { + return SignRandomSeedHash(signer.privateKey, randomSeedHash) +} + +// TODO: Add signing function for PoS blocks + +func (signer *BLSSigner) SignPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64) (*bls.Signature, error) { + // FIXME + payload := []byte{} + return signer.sign(BLSSignatureOpCodePoSValidatorHandshake, payload[:]) +} + +////////////////////////////////////////////////////////// +// BLS Verification +////////////////////////////////////////////////////////// + +func _blsVerify(opCode BLSSignatureOpCode, payload []byte, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + newPayload := append(opCode.Bytes(), payload...) + return publicKey.Verify(signature, newPayload) +} + +func BLSVerifyValidatorVote(view uint64, blockHash consensus.BlockHash, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + payload := consensus.GetVoteSignaturePayload(view, blockHash) + return _blsVerify(BLSSignatureOpCodeValidatorVote, payload[:], signature, publicKey) +} + +func BLSVerifyValidatorTimeout(view uint64, highQCView uint64, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + payload := consensus.GetTimeoutSignaturePayload(view, highQCView) + return _blsVerify(BLSSignatureOpCodeValidatorTimeout, payload[:], signature, publicKey) +} + +// TODO: Add Verifier function for PoS blocks + +func BLSVerifyPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64, + signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + + // FIXME + payload := []byte{} + return _blsVerify(BLSSignatureOpCodePoSValidatorHandshake, payload[:], signature, publicKey) +} From 332f28314a83a10ad3fed48ce4b788f2bb21e9c7 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Sat, 16 Dec 2023 13:16:52 -0500 Subject: [PATCH 322/762] Add TODO for DH --- lib/pos_consensus.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 8765f137a..509f7a8de 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -511,6 +511,9 @@ func (cc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("Error processing tip block locally: %v", err) } + // TODO: Remove the new block's txns from the mempool. Add txns back to the mempoool from blocks that have + // been reorged away from. + // Happy path. The block was processed successfully and applied as the new tip. Nothing left to do. return nil, nil } From 70c7888aa63ec817a1bee771992b2284b0ce20ad Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 22 Dec 2023 11:55:47 -0500 Subject: [PATCH 323/762] Merge main into feature pos (#886) * Add extra data to max spend and diamond txn construction (#883) * Only encode state change metadata if flag is set (#884) Co-authored-by: Lazy Nina <> --------- Co-authored-by: Lazy Nina <> --- cmd/run.go | 3 ++- lib/block_view_post_test.go | 1 + lib/block_view_types.go | 28 ++++++++++++++++++++++++++-- lib/blockchain.go | 14 +++++++++++--- 4 files changed, 40 insertions(+), 6 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index e6231fdd8..a470a5f6e 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -203,7 +203,8 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().Bool("log-db-summary-snapshots", false, "The node will log a snapshot of all DB keys every 30s.") cmd.PersistentFlags().Bool("datadog-profiler", false, "Enable the DataDog profiler for performance testing") cmd.PersistentFlags().Bool("time-events", false, "Enable simple event timer, helpful in hands-on performance testing") - + cmd.PersistentFlags().String("state-change-dir", "", "The directory for state change logs. WARNING: Changing this "+ + "from an empty string to a non-empty string (or from a non-empty string to the empty string) requires a resync.") cmd.PersistentFlags().VisitAll(func(flag *pflag.Flag) { viper.BindPFlag(flag.Name, flag) }) diff --git a/lib/block_view_post_test.go b/lib/block_view_post_test.go index 4901b280d..794ddafe1 100644 --- a/lib/block_view_post_test.go +++ b/lib/block_view_post_test.go @@ -147,6 +147,7 @@ func _giveDeSoDiamonds(t *testing.T, chain *Blockchain, db *badger.DB, params *D senderPkBytes, diamondPostHash, diamondLevel, + nil, feeRateNanosPerKB, nil, []*DeSoOutput{}) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index b216804df..89aba8352 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "github.com/spf13/viper" "io" "math" "math/big" @@ -997,6 +998,17 @@ type UtxoOperation struct { StakeAmountNanosDiff uint64 } +// FIXME: This hackIsRunningStateSyncer() call is a hack to get around the fact that +// we don't have a way to not require a resync while introducing the state change +// metadata to the utxo operation struct. We don't want to use a block height to gate +// this because we want to be able to get state change metadata for ALL transactions. +// We should replace this with a more elegant solution, a better hack, or bundle it +// in with a release that requires a resync anyway. We should remove this function +// when we have a better solution in place. +func hackIsRunningStateSyncer() bool { + return viper.GetString("state-change-dir") != "" +} + func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte // Type @@ -1341,7 +1353,13 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada } // StateChangeMetadata - if op.StateChangeMetadata != nil { + // FIXME: This hackIsRunningStateSyncer() call is a hack to get around the fact that + // we don't have a way to not require a resync while introducing the state change + // metadata to the utxo operation struct. We don't want to use a block height to gate + // this because we want to be able to get state change metadata for ALL transactions. + // We should replace this with a more elegant solution, a better hack, or bundle it + // in with a release that requires a resync anyway. + if hackIsRunningStateSyncer() && op.StateChangeMetadata != nil { data = append(data, EncodeToBytes(blockHeight, op.StateChangeMetadata, skipMetadata...)...) } @@ -2012,7 +2030,13 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. // DeSoEncoder stateChangeMetadata := GetStateChangeMetadataFromOpType(op.Type) - if stateChangeMetadata != nil { + // FIXME: This hackIsRunningStateSyncer() call is a hack to get around the fact that + // we don't have a way to not require a resync while introducing the state change + // metadata to the utxo operation struct. We don't want to use a block height to gate + // this because we want to be able to get state change metadata for ALL transactions. + // We should replace this with a more elegant solution, a better hack, or bundle it + // in with a release that requires a resync anyway. + if hackIsRunningStateSyncer() && stateChangeMetadata != nil { if exist, err := DecodeFromBytes(stateChangeMetadata, rr); exist && err == nil { op.StateChangeMetadata = stateChangeMetadata } else if err != nil { diff --git a/lib/blockchain.go b/lib/blockchain.go index a7efffd3a..4533ab22b 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -4772,6 +4772,7 @@ func (bc *Blockchain) CreateBasicTransferTxnWithDiamonds( SenderPublicKey []byte, DiamondPostHash *BlockHash, DiamondLevel int64, + ExtraData map[string][]byte, // Standard transaction fields minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _spendAmount uint64, _changeAmount uint64, _fees uint64, _err error) { @@ -4821,11 +4822,14 @@ func (bc *Blockchain) CreateBasicTransferTxnWithDiamonds( // This function does not compute a signature. } + delete(ExtraData, DiamondLevelKey) + delete(ExtraData, DiamondPostHashKey) + // Make a map for the diamond extra data and add it. diamondsExtraData := make(map[string][]byte) diamondsExtraData[DiamondLevelKey] = IntToBuf(DiamondLevel) diamondsExtraData[DiamondPostHashKey] = DiamondPostHash[:] - txn.ExtraData = diamondsExtraData + txn.ExtraData = mergeExtraData(ExtraData, diamondsExtraData) // We don't need to make any tweaks to the amount because it's basically // a standard "pay per kilobyte" transaction. @@ -4851,8 +4855,8 @@ func (bc *Blockchain) CreateBasicTransferTxnWithDiamonds( } func (bc *Blockchain) CreateMaxSpend( - senderPkBytes []byte, recipientPkBytes []byte, minFeeRateNanosPerKB uint64, - mempool Mempool, additionalOutputs []*DeSoOutput) ( + senderPkBytes []byte, recipientPkBytes []byte, extraData map[string][]byte, minFeeRateNanosPerKB uint64, + mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInputAdded uint64, _spendAmount uint64, _fee uint64, _err error) { txn := &MsgDeSoTxn{ @@ -4869,6 +4873,10 @@ func (bc *Blockchain) CreateMaxSpend( // This function does not compute a signature. } + if len(extraData) > 0 { + txn.ExtraData = extraData + } + if bc.BlockTip().Height >= bc.params.ForkHeights.BalanceModelBlockHeight { var utxoView *UtxoView var err error From 34e5981e3050ad1543ac5f6eab0a22212924d00b Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 27 Dec 2023 16:55:17 -0500 Subject: [PATCH 324/762] Feature/pos fee estimation v2 (#878) * Skeleton for PoSFeeEstimator, implement init, add and remove blocks (#821) Co-authored-by: Lazy Nina <> * Implement fee estimation algorithm (#822) Co-authored-by: Lazy Nina <> * Refactor pos fee estimator (#877) Co-authored-by: Lazy Nina <> * Address SA feedback (#881) * Address SA feedback * Address SA suggestion to refactor mempool (#882) Co-authored-by: Lazy Nina <> --------- Co-authored-by: Lazy Nina <> * Update lib/pos_mempool.go Co-authored-by: tholonious <99746187+tholonious@users.noreply.github.com> * Address DH feedback on pos fee estimation v2 (#887) Co-authored-by: Lazy Nina <> --------- Co-authored-by: Lazy Nina <> Co-authored-by: tholonious <99746187+tholonious@users.noreply.github.com> --- collections/slice.go | 16 + lib/block_view.go | 8 +- lib/block_view_lockups.go | 2 +- lib/blockchain.go | 31 +- lib/constants.go | 6 + lib/legacy_mempool.go | 19 ++ lib/pos_block_producer_test.go | 6 + lib/pos_blockchain_test.go | 2 + lib/pos_fee_estimator.go | 601 +++++++++++++++++++++++++++++++++ lib/pos_fee_estimator_test.go | 362 ++++++++++++++++++++ lib/pos_mempool.go | 66 +++- lib/pos_mempool_test.go | 8 + lib/pos_staking_rewards.go | 11 +- 13 files changed, 1110 insertions(+), 28 deletions(-) create mode 100644 lib/pos_fee_estimator.go create mode 100644 lib/pos_fee_estimator_test.go diff --git a/collections/slice.go b/collections/slice.go index 409610352..61f511a29 100644 --- a/collections/slice.go +++ b/collections/slice.go @@ -1,5 +1,7 @@ package collections +import "sort" + func All[T any](slice []T, predicate func(T) bool) bool { negatedPredicate := func(val T) bool { return !predicate(val) @@ -24,6 +26,20 @@ func Transform[TInput any, TOutput any](slice []TInput, transformFn func(TInput) return result } +// SortStable wraps the built-in sort.SliceStable function to return a sorted slice +// given an input slice, without any side effects on the input. Params: +// - input: the original slice whose contents will be sorted +// - comparator: anonymous function that takes in two values A and B, and returns true if +// A precedes B in the intended sorting +func SortStable[T any](slice []T, lessFn func(T, T) bool) []T { + result := make([]T, len(slice)) + copy(result, slice) + sort.SliceStable(result, func(ii, jj int) bool { + return lessFn(result[ii], result[jj]) + }) + return result +} + func ToMap[TKey comparable, TValue any](slice []TValue, keyFn func(TValue) TKey) map[TKey]TValue { result := make(map[TKey]TValue) for _, val := range slice { diff --git a/lib/block_view.go b/lib/block_view.go index 85e6d1241..c46e2164b 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3254,10 +3254,10 @@ func (bav *UtxoView) _connectUpdateGlobalParams( val, bytesRead := Uvarint( extraData[FeeBucketGrowthRateBasisPointsKey], ) - if val > _maxBasisPoints { + if val > MaxBasisPoints { return 0, 0, nil, fmt.Errorf( "_connectUpdateGlobalParams: FeeBucketGrowthRateBasisPoints must be <= %d", - _maxBasisPoints, + MaxBasisPoints, ) } newGlobalParamsEntry.FeeBucketGrowthRateBasisPoints = val @@ -3271,10 +3271,10 @@ func (bav *UtxoView) _connectUpdateGlobalParams( val, bytesRead := Uvarint( extraData[FailingTransactionBMFMultiplierBasisPointsKey], ) - if val > _maxBasisPoints { + if val > MaxBasisPoints { return 0, 0, nil, fmt.Errorf( "_connectUpdateGlobalParams: FailingTransactionBMFMultiplierBasisPoints must be <= %d", - _maxBasisPoints, + MaxBasisPoints, ) } newGlobalParamsEntry.FailingTransactionBMFMultiplierBasisPoints = val diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index bc1abdc6e..26abd51a8 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -945,7 +945,7 @@ func CalculateLockupYield( // Compute the denominators from the nanosecond to year conversion and the basis point computation. denominators, err := SafeUint256().Mul( - uint256.NewInt().SetUint64(_nanoSecsPerYear), + uint256.NewInt().SetUint64(NanoSecsPerYear), uint256.NewInt().SetUint64(10000)) if err != nil { return nil, diff --git a/lib/blockchain.go b/lib/blockchain.go index 4533ab22b..9fae05c51 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -4907,12 +4907,21 @@ func (bc *Blockchain) CreateMaxSpend( err, "Blockchain.CreateMaxSpend: Problem getting next nonce: ") } - feeAmountNanos := uint64(0) prevFeeAmountNanos := uint64(0) for feeAmountNanos == 0 || feeAmountNanos != prevFeeAmountNanos { prevFeeAmountNanos = feeAmountNanos - feeAmountNanos = _computeMaxTxV1Fee(txn, minFeeRateNanosPerKB) + if !isInterfaceValueNil(mempool) { + // TODO: replace MaxBasisPoints with variables configured by flags. + feeAmountNanos, err = mempool.EstimateFee(txn, minFeeRateNanosPerKB, + MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, + bc.params.MaxBlockSizeBytes) + if err != nil { + return nil, 0, 0, 0, errors.Wrapf(err, "CreateMaxSpend: Problem estimating fee: ") + } + } else { + feeAmountNanos = _computeMaxTxV1Fee(txn, minFeeRateNanosPerKB) + } txn.TxnFeeNanos = feeAmountNanos txn.TxOutputs[len(txn.TxOutputs)-1].AmountNanos = spendableBalance - feeAmountNanos } @@ -5035,13 +5044,17 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( // Initialize to 0. txArg.TxnFeeNanos = 0 - feeAmountNanos := uint64(0) - if txArg.TxnMeta.GetTxnType() != TxnTypeBlockReward && minFeeRateNanosPerKB != 0 { - prevFeeAmountNanos := uint64(0) - for feeAmountNanos == 0 || feeAmountNanos != prevFeeAmountNanos { - prevFeeAmountNanos = feeAmountNanos - feeAmountNanos = _computeMaxTxV1Fee(txArg, minFeeRateNanosPerKB) - txArg.TxnFeeNanos = feeAmountNanos + if txArg.TxnMeta.GetTxnType() != TxnTypeBlockReward { + if !isInterfaceValueNil(mempool) { + // TODO: replace MaxBasisPoints with variables configured by flags. + txArg.TxnFeeNanos, err = mempool.EstimateFee(txArg, minFeeRateNanosPerKB, MaxBasisPoints, + MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, bc.params.MaxBlockSizeBytes) + if err != nil { + return 0, 0, 0, 0, errors.Wrapf(err, + "AddInputsAndChangeToTransaction: Problem estimating fee: ") + } + } else { + txArg.TxnFeeNanos = EstimateMaxTxnFeeV1(txArg, minFeeRateNanosPerKB) } } diff --git a/lib/constants.go b/lib/constants.go index 308b08e27..465be761a 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1567,3 +1567,9 @@ const MaxAssociationTypeByteLength int = 64 const MaxAssociationValueByteLength int = 256 const AssociationTypeReservedPrefix = "DESO" const AssociationNullTerminator = byte(0) + +// Constants used for staking rewards. +const MaxBasisPoints = uint64(10000) // 1e4 +const NanoSecsPerYear = uint64(365) * 24 * 60 * 60 * 1e9 // 365 days * 24 hours * 60 minutes * 60 seconds * 1e9 nanoseconds + +const BytesPerKB = 1000 diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 28af69317..22ab50ac3 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2422,6 +2422,25 @@ func (mp *DeSoMempool) GetMempoolSummaryStats() (_summaryStatsMap map[string]*Su return convertMempoolTxsToSummaryStats(mp.readOnlyUniversalTransactionList) } +func EstimateMaxTxnFeeV1(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) uint64 { + if minFeeRateNanosPerKB == 0 { + return 0 + } + prevFeeAmountNanos := uint64(0) + feeAmountNanos := uint64(0) + for feeAmountNanos == 0 || feeAmountNanos != prevFeeAmountNanos { + prevFeeAmountNanos = feeAmountNanos + feeAmountNanos = _computeMaxTxV1Fee(txn, minFeeRateNanosPerKB) + txn.TxnFeeNanos = feeAmountNanos + } + return feeAmountNanos +} + +func (mp *DeSoMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, + _ uint64, _ uint64, _ uint64, _ uint64, _ uint64) (uint64, error) { + return EstimateMaxTxnFeeV1(txn, minFeeRateNanosPerKB), nil +} + func convertMempoolTxsToSummaryStats(mempoolTxs []*MempoolTx) map[string]*SummaryStats { transactionSummaryStats := make(map[string]*SummaryStats) for _, mempoolTx := range mempoolTxs { diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index d2c988f9b..bea7b09ba 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -1,3 +1,5 @@ +//go:build relic + package lib import ( @@ -33,6 +35,7 @@ func TestCreateBlockTemplate(t *testing.T) { mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Init(1, nil, 1)) require.NoError(mempool.Start()) defer mempool.Stop() require.True(mempool.IsRunning()) @@ -94,6 +97,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Init(1, nil, 1)) require.NoError(mempool.Start()) defer mempool.Stop() require.True(mempool.IsRunning()) @@ -149,6 +153,7 @@ func TestGetBlockTransactions(t *testing.T) { mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Init(1, nil, 1)) require.NoError(mempool.Start()) defer mempool.Stop() require.True(mempool.IsRunning()) @@ -231,6 +236,7 @@ func TestGetBlockTransactions(t *testing.T) { // be returned in the same order as the transaction from getBlockTransactions. testMempool := NewPosMempool(params, globalParams, latestBlockView, 2, "", true, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + testMempool.Init(1, nil, 1) require.NoError(testMempool.Start()) defer testMempool.Stop() currentTime := uint64(time.Now().UnixMicro()) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 7beb1b568..25fcfc437 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2631,6 +2631,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { maxMempoolPosSizeBytes := uint64(500) mempoolBackupIntervalMillis := uint64(30000) mempool := NewPosMempool(params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(t, mempool.Init(1, nil, 1)) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) priv := _generateRandomBLSPrivateKey(t) @@ -2699,6 +2700,7 @@ func NewTestPoSBlockchain(t *testing.T) *TestMeta { maxMempoolPosSizeBytes := uint64(500) mempoolBackupIntervalMillis := uint64(30000) mempool := NewPosMempool(params, _testGetDefaultGlobalParams(), latestBlockView, 10, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(t, mempool.Init(1, nil, 1)) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) priv := _generateRandomBLSPrivateKey(t) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go new file mode 100644 index 000000000..e78a502e4 --- /dev/null +++ b/lib/pos_fee_estimator.go @@ -0,0 +1,601 @@ +package lib + +import ( + "github.com/deso-protocol/core/collections" + "github.com/pkg/errors" + "math" + "math/big" + "sync" +) + +type PoSFeeEstimator struct { + // mempoolTransactionRegister is a pointer to the mempool's transaction register. The fee estimator + // uses this to estimate fees based on congestion in the mempool. + mempoolTransactionRegister *TransactionRegister + // numMempoolBlocks is a parameter to manage how many blocks in the future we're willing to wait + // to have our block included in the chain. This will most likely be set to 1, meaning that we + // want to estimate a fee such that the transaction will be included in the next block. + numMempoolBlocks uint64 + // pastBlocksTransactionRegister is an internal transaction register to the fee estimator that + // is used to estimate fees based on congestion in the past blocks. The caller is responsible + // for calling AddBlock to add blocks to this transaction register whenever a new block is added + // to the best chain. + pastBlocksTransactionRegister *TransactionRegister + // numPastBlocks is a parameter to manage how many blocks in the past we're willing to look at + // to estimate fees. This will most likely be set to 60, meaning that we want to estimate a fee + // such that it would have been included in the past 60 blocks (assuming 1 block per second, this + // means the past minute of blocks). This parameter also controls how many blocks we store in + // cachedBlocks. When AddBlock is called and we have more than numPastBlocks, we remove the + // oldest block from cachedBlocks and pastBlocksTransactionRegister. + numPastBlocks uint64 + // cachedBlocks is a cache of the past blocks that we use to estimate fees. This is used to + // avoid having to recompute the fee buckets for all the past blocks every time we want to + // estimate a fee. + cachedBlocks []*MsgDeSoBlock + // rwLock is a read-write lock that protects the PoSFeeEstimator from concurrent access. + rwLock *sync.RWMutex +} + +func NewPoSFeeEstimator() *PoSFeeEstimator { + return &PoSFeeEstimator{} +} + +// Init initializes the PoSFeeEstimator with the given mempool and past blocks. The mempool +// must be running and the number of past blocks must equal the numPastBlocks param provided. +// Init will add all the transactions from the past blocks to the pastBlocksTransactionRegister +// and cache the initial past blocks. +func (posFeeEstimator *PoSFeeEstimator) Init( + mempoolTransactionRegister *TransactionRegister, + numMempoolBlocks uint64, + pastBlocks []*MsgDeSoBlock, + numPastBlocks uint64, + globalParams *GlobalParamsEntry, +) error { + posFeeEstimator.rwLock = &sync.RWMutex{} + posFeeEstimator.rwLock.Lock() + defer posFeeEstimator.rwLock.Unlock() + if mempoolTransactionRegister == nil { + return errors.New("PoSFeeEstimator.Init: mempoolTransactionRegister cannot be nil") + } + if numMempoolBlocks == 0 { + return errors.New("PoSFeeEstimator.Init: numMempoolBlocks cannot be zero") + } + if numPastBlocks == 0 { + return errors.New("PoSFeeEstimator.Init: numPastBlocks cannot be zero") + } + if numPastBlocks < uint64(len(pastBlocks)) { + return errors.New("PoSFeeEstimator.Init: numPastBlocks must greater than or equal the number of pastBlocks") + } + // Sort the past blocks by height just to be safe. + sortedPastBlocks := collections.SortStable(pastBlocks, func(ii, jj *MsgDeSoBlock) bool { + return ii.Header.Height < jj.Header.Height + }) + posFeeEstimator.mempoolTransactionRegister = mempoolTransactionRegister + posFeeEstimator.numMempoolBlocks = numMempoolBlocks + posFeeEstimator.numPastBlocks = numPastBlocks + // Create a transaction register we can use to estimate fees for past blocks. + posFeeEstimator.pastBlocksTransactionRegister = NewTransactionRegister(globalParams.Copy()) + + // Add all the txns from the past blocks to the new pastBlocksTransactionRegister. + for _, block := range sortedPastBlocks { + if err := posFeeEstimator.addBlockNoLock(block); err != nil { + return errors.Wrap(err, "PosFeeEstimator.Init: error adding block to pastBlocksMempool") + } + } + return nil +} + +// AddBlock adds a block to the PoSFeeEstimator. This will add all the transactions from the block +// to the pastBlocksTransactionRegister and cache the block. If there are now more blocks cached +// than the numPastBlocks param provided to Init, the oldest block will be removed from the cache +// and all its transactions removed from the pastBlocksTransactionRegister. +func (posFeeEstimator *PoSFeeEstimator) AddBlock(block *MsgDeSoBlock) error { + posFeeEstimator.rwLock.Lock() + defer posFeeEstimator.rwLock.Unlock() + if err := posFeeEstimator.addBlockNoLock(block); err != nil { + return errors.Wrap(err, "PoSFeeEstimator.AddBlock: error adding block to PoSFeeEstimator") + } + return nil +} + +// addBlockNoLock is the same as AddBlock but assumes the caller has already acquired the rwLock. +func (posFeeEstimator *PoSFeeEstimator) addBlockNoLock(block *MsgDeSoBlock) error { + // Add all transactions from the block to the pastBlocksTransactionRegister. + if err := addBlockToTransactionRegister(posFeeEstimator.pastBlocksTransactionRegister, block); err != nil { + return errors.Wrap(err, "PoSFeeEstimator.addBlockNoLock: error adding block to pastBlocksTransactionRegister") + } + posFeeEstimator.cachedBlocks = append(posFeeEstimator.cachedBlocks, block) + // Sort the cached blocks by height & tstamp just to be safe. + posFeeEstimator.sortCachedBlocks() + if uint64(len(posFeeEstimator.cachedBlocks)) > posFeeEstimator.numPastBlocks { + // Remove the oldest block. + if err := posFeeEstimator.removeBlockNoLock(posFeeEstimator.cachedBlocks[0]); err != nil { + return errors.Wrap(err, "PoSFeeEstimator.addBlockNoLock: error removing block from PoSFeeEstimator") + } + } + return nil +} + +// addBlockToTransactionRegister adds all the transactions from the block to the given transaction register. +// Should only be called when the rwLock over a TransactionRegister is held. +func addBlockToTransactionRegister(txnRegister *TransactionRegister, block *MsgDeSoBlock) error { + for _, txn := range block.Txns { + // We explicitly exclude block reward transactions as they do not have fees. + if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { + continue + } + mtxn, err := NewMempoolTx(txn, block.Header.TstampNanoSecs, block.Header.Height) + if err != nil { + return errors.Wrap(err, "PoSFeeEstimator.addBlockToTransactionRegister: error creating MempoolTx") + } + if err = txnRegister.AddTransaction(mtxn); err != nil { + return errors.Wrap(err, + "PoSFeeEstimator.addBlockToTransactionRegister: error adding txn to pastBlocksTransactionRegister") + } + } + return nil +} + +// RemoveBlock removes a block from the PoSFeeEstimator. This will remove all the transactions from the block +// from the pastBlocksTransactionRegister and remove the block from the cache. +func (posFeeEstimator *PoSFeeEstimator) RemoveBlock(block *MsgDeSoBlock) error { + posFeeEstimator.rwLock.Lock() + defer posFeeEstimator.rwLock.Unlock() + + if err := posFeeEstimator.removeBlockNoLock(block); err != nil { + return errors.Wrap(err, "PoSFeeEstimator.RemoveBlock: error removing block from PoSFeeEstimator") + } + return nil +} + +// removeBlockNoLock is the same as RemoveBlock but assumes the caller has already acquired the rwLock. +func (posFeeEstimator *PoSFeeEstimator) removeBlockNoLock(block *MsgDeSoBlock) error { + // Remove all transaction from the block from the pastBlocksTransactionRegister. + for _, txn := range block.Txns { + // We explicitly exclude block reward transactions as they do not have fees. + // They were never added in the first place. + if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { + continue + } + mtxn, err := NewMempoolTx(txn, block.Header.TstampNanoSecs, block.Header.Height) + if err != nil { + return errors.Wrap(err, "PoSFeeEstimator.RemoveBlock: error creating MempoolTx") + } + if err = posFeeEstimator.pastBlocksTransactionRegister.RemoveTransaction(mtxn); err != nil { + return errors.Wrap(err, + "PoSFeeEstimator.removeBlockNoLock: error removing txn from pastBlocksTransactionRegister") + } + } + blockHash, err := block.Hash() + if err != nil { + return errors.Wrap(err, "PoSFeeEstimator.removeBlockNoLock: error computing blockHash") + } + // Remove the block from the cached blocks. + newCachedBlocks := []*MsgDeSoBlock{} + for _, cachedBlock := range posFeeEstimator.cachedBlocks { + cachedBlockHash, err := cachedBlock.Hash() + if err != nil { + return errors.Wrap(err, "PoSFeeEstimator.removeBlockNoLock: error computing cachedBlockHash") + } + if blockHash.IsEqual(cachedBlockHash) { + continue + } + newCachedBlocks = append(newCachedBlocks, cachedBlock) + } + posFeeEstimator.cachedBlocks = newCachedBlocks + return nil +} + +// UpdateGlobalParams updates the global params used by the PoSFeeEstimator. This only modifies the GlobalParams +// used by the pastBlockTransactionRegister and allows it to properly compute the fee buckets. The mempool's +// global params are not modified and are controlled externally. +func (posFeeEstimator *PoSFeeEstimator) UpdateGlobalParams(globalParams *GlobalParamsEntry) error { + posFeeEstimator.rwLock.Lock() + defer posFeeEstimator.rwLock.Unlock() + tempTransactionRegister := NewTransactionRegister(globalParams.Copy()) + for _, block := range posFeeEstimator.cachedBlocks { + if err := addBlockToTransactionRegister(tempTransactionRegister, block); err != nil { + return errors.Wrap(err, "PosFeeEstimator.UpdateGlobalParams: error adding block to tempTransactionRegister") + } + } + return nil +} + +// sortCachedBlocks sorts the cached blocks by height & tstamp just to be safe. +func (posFeeEstimator *PoSFeeEstimator) sortCachedBlocks() { + posFeeEstimator.cachedBlocks = collections.SortStable(posFeeEstimator.cachedBlocks, + func(ii, jj *MsgDeSoBlock) bool { + if ii.Header.Height != jj.Header.Height { + return ii.Header.Height < jj.Header.Height + } + if ii.Header.TstampNanoSecs != jj.Header.TstampNanoSecs { + return ii.Header.TstampNanoSecs < jj.Header.TstampNanoSecs + } + iiHash, err := ii.Hash() + if iiHash == nil || err != nil { + return false + } + jjHash, err := jj.Hash() + if jjHash == nil || err != nil { + return true + } + return iiHash.String() < jjHash.String() + }) +} + +// EstimateFeeRateNanosPerKB estimates the fee rate in nanos per KB for the current mempool +// and past blocks using the congestionFactorBasisPoints, priorityPercentileBasisPoints, and +// maxBlockSize params. +func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( + congestionFactorBasisPoints uint64, + priorityPercentileBasisPoints uint64, + maxBlockSize uint64, +) (uint64, error) { + posFeeEstimator.rwLock.RLock() + defer posFeeEstimator.rwLock.RUnlock() + pastBlockFeeRate, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.pastBlocksTransactionRegister, + congestionFactorBasisPoints, + priorityPercentileBasisPoints, + posFeeEstimator.numPastBlocks, + maxBlockSize, + ) + if err != nil { + return 0, errors.Wrap(err, "EstimateFeeRateNanosPerKB: Problem computing past block fee rate") + } + mempoolFeeRate, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.mempoolTransactionRegister, + congestionFactorBasisPoints, + priorityPercentileBasisPoints, + posFeeEstimator.numMempoolBlocks, + maxBlockSize, + ) + if err != nil { + return 0, errors.Wrap(err, "EstimateFeeRateNanosPerKB: Problem computing mempool fee rate") + } + if pastBlockFeeRate < mempoolFeeRate { + return mempoolFeeRate, nil + } + return pastBlockFeeRate, nil +} + +// EstimateFee estimates the fee in nanos for the provided transaction by taking the +// max of the mempoolFeeEstimate and pastBlocksFeeEstimate. +func (posFeeEstimator *PoSFeeEstimator) EstimateFee( + txn *MsgDeSoTxn, + mempoolCongestionFactorBasisPoints uint64, + mempoolPriorityPercentileBasisPoints uint64, + pastBlocksCongestionFactorBasisPoints uint64, + pastBlocksPriorityPercentileBasisPoints uint64, + maxBlockSize uint64, +) (uint64, error) { + posFeeEstimator.rwLock.RLock() + defer posFeeEstimator.rwLock.RUnlock() + mempoolFeeEstimate, err := posFeeEstimator.mempoolFeeEstimate( + txn, + mempoolCongestionFactorBasisPoints, + mempoolPriorityPercentileBasisPoints, + maxBlockSize, + ) + if err != nil { + return 0, errors.Wrap(err, "PoSFeeEstimator.EstimateFee: Problem computing mempool fee estimate") + } + pastBlocksFeeEstimate, err := posFeeEstimator.pastBlocksFeeEstimate( + txn, + pastBlocksCongestionFactorBasisPoints, + pastBlocksPriorityPercentileBasisPoints, + maxBlockSize, + ) + if err != nil { + return 0, errors.Wrap(err, "PoSFeeEstimator.EstimateFee: Problem computing past blocks fee estimate") + } + if mempoolFeeEstimate < pastBlocksFeeEstimate { + return pastBlocksFeeEstimate, nil + } + return mempoolFeeEstimate, nil +} + +// pastBlocksFeeEstimate estimates the fee in nanos for the provided transaction using the +// pastBlocksTransactionRegister and fee estimation parameters. +func (posFeeEstimator *PoSFeeEstimator) pastBlocksFeeEstimate( + txn *MsgDeSoTxn, + congestionFactorBasisPoints uint64, + priorityPercentileBasisPoints uint64, + maxBlockSize uint64, +) (uint64, error) { + txnFee, err := posFeeEstimator.estimateTxnFeeGivenTransactionRegister( + txn, + posFeeEstimator.pastBlocksTransactionRegister, + congestionFactorBasisPoints, + priorityPercentileBasisPoints, + posFeeEstimator.numPastBlocks, + maxBlockSize, + ) + if err != nil { + return 0, errors.Wrap(err, "pastBlocksFeeEstimate: Problem computing txn fee") + } + return txnFee, nil +} + +// mempoolFeeEstimate estimates the fee in nanos for the provided transaction using the +// mempoolTransactionRegister and fee estimation parameters. +func (posFeeEstimator *PoSFeeEstimator) mempoolFeeEstimate( + txn *MsgDeSoTxn, + congestionFactorBasisPoints uint64, + priorityPercentileBasisPoints uint64, + maxBlockSize uint64, +) (uint64, error) { + txnFee, err := posFeeEstimator.estimateTxnFeeGivenTransactionRegister( + txn, + posFeeEstimator.mempoolTransactionRegister, + congestionFactorBasisPoints, + priorityPercentileBasisPoints, + posFeeEstimator.numMempoolBlocks, + maxBlockSize, + ) + if err != nil { + return 0, errors.Wrap(err, "mempoolFeeEstimate: Problem computing txn fee") + } + return txnFee, nil +} + +// computeFeeGivenTxnAndFeeRate computes the fee in nanos for the provided transaction and fee rate +// in nanos per KB. It does this by recursively computing the fee until the fee converges. +func computeFeeGivenTxnAndFeeRate(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (uint64, error) { + // Create a clone of the txn, so we don't modify the original. + txnClone, err := txn.Copy() + if err != nil { + return 0, errors.Wrap(err, "computeFeeGivenTxnAndFeeRate: Problem copying txn") + } + // Set the nonce to the maximum value if it's not already set. + if txnClone.TxnNonce == nil { + txnClone.TxnNonce = &DeSoNonce{ + ExpirationBlockHeight: math.MaxUint64, + PartialID: math.MaxUint64, + } + } + if txnClone.TxnNonce.ExpirationBlockHeight == 0 { + txnClone.TxnNonce.ExpirationBlockHeight = math.MaxUint64 + } + if txnClone.TxnNonce.PartialID == 0 { + txnClone.TxnNonce.PartialID = math.MaxUint64 + } + + // Set the TxnFeeNanos to the maximum value. + txnClone.TxnFeeNanos = math.MaxUint64 + txnFeeNanos, err := computeFeeRecursive(txnClone, feeRateNanosPerKB) + if err != nil { + return 0, errors.Wrap(err, "computeFeeGivenTxnAndFeeRate: Problem computing fee rate recursively") + } + return txnFeeNanos, nil +} + +// computeFeeRecursive computes the fee in nanos for the provided transaction and fee rate +// in nanos per KB. It does this by recursively computing the fee until the fee converges. +// It should only be called from computeFeeGivenTxnAndFeeRate and assumes that the TxnFeeNanos +// field of the txn is set to the maximum value on the first call. +// It computes the length of the unsigned transaction bytes and then adds the maximum DER sig +// length to estimate an upper bound on the size of the signed transaction. Next, +// it will compute the fee by taking the fee rate in nanos per KB and multiplying it by the +// estimated size of the signed transaction in KB. If the fee computed here is less than the +// TxnFeeNanos field of the txn, it will set the TxnFeeNanos field to the computed fee and +// call itself recursively. This will continue until the computed fee is greater than or equal +// to the TxnFeeNanos field of the txn. At this point, it will return the TxnFeeNanos field. +// This will converge in 9 iterations or fewer. The maximum number of iterations is 9 because +// the transaction fee is the only field that gets modified so there are only 8 possible +// byte lengths that can be used to represent the transaction fee, and we ensure that the +// number of bytes that are required to represent computed transaction fee field is always less than +// the number of bytes to represent the TxnFeeNanos field at the previous iteration. +func computeFeeRecursive(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (uint64, error) { + // Get the length of bytes in the txn. + txnBytesNoSignature, err := txn.ToBytes(true) + if err != nil { + return 0, errors.Wrap(err, "computeFeeRecursive: Problem serializing txn") + } + const MaxDERSigLen = 74 + txnBytesLen := uint64(len(txnBytesNoSignature)) + MaxDERSigLen + + // Compute the new txn fee. If the computed fee is a decimal, we round up to the + // next integer value. We define the math as follows: + // - We need to compute CEIL(txnBytesLen * feeRateNanosPerKB / BytesPerKB) + // - We use integer math to compute FLOOR(txnBytesLen * feeRateNanosPerKB + BytesPerKB - 1) / BytesPerKB) + // Ref: https://stackoverflow.com/questions/17944/how-to-round-up-the-result-of-integer-division + txnFeeNanos := (txnBytesLen*feeRateNanosPerKB + BytesPerKB - 1) / BytesPerKB + if txnFeeNanos < txn.TxnFeeNanos { + txn.TxnFeeNanos = txnFeeNanos + return computeFeeRecursive(txn, feeRateNanosPerKB) + } + return txnFeeNanos, nil +} + +// estimateTxnFeeGivenTransactionRegister estimates the fee in nanos for the provided transaction +// and transaction register using the congestionFactorBasisPoints, priorityPercentileBasisPoints, +// and maxBlockSize params. It calls estimateFeeRateNanosPerKBGivenTransactionRegister to estimate +// the fee rate and then computes the fee using computeFeeGivenTxnAndFeeRate. +func (posFeeEstimator *PoSFeeEstimator) estimateTxnFeeGivenTransactionRegister( + txn *MsgDeSoTxn, + txnRegister *TransactionRegister, + congestionFactorBasisPoints uint64, + priorityPercentileBasisPoints uint64, + numBlocks uint64, + maxBlockSize uint64, +) (uint64, error) { + feeRateNanosPerKB, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + txnRegister, + congestionFactorBasisPoints, + priorityPercentileBasisPoints, + numBlocks, + maxBlockSize, + ) + if err != nil { + return 0, errors.Wrap(err, "estimateTxnFeeGivenTransactionRegister: Problem computing fee rate") + } + txnFee, err := computeFeeGivenTxnAndFeeRate(txn, feeRateNanosPerKB) + if err != nil { + return 0, errors.Wrap(err, "estimateTxnFeeGivenTransactionRegister: Problem computing txn fee") + } + return txnFee, nil +} + +// estimateFeeRateNanosPerKBGivenTransactionRegister estimates the fee rate in nanos per KB for the provided +// transaction register and fee estimation parameters. The congestionFactorBasisPoints param is the congestion +// factor in basis points (100 bps = 1%). The priorityPercentileBasisPoints param is the percentile of the +// priority fee bucket to use for the fee rate estimation. The maxBlockSize param is the maximum block size +// in bytes. +// This estimates fee rates using the following approach: +// 1. Compute the maximum size of numPastBlocks blocks as maxBlockSize * numPastBlocks, called maxSizeOfNumBlocks. +// 2. Iterate over all transactions in the transaction register in fee time order until the total size of +// the transactions is greater than maxSizeOfNumBlocks and append those transactions to a slice. +// 3. If there are no transactions in the slice after step 2, return the minimum network fee. +// 4. Compute the priority fee bucket for the transactions in the slice using the priorityPercentileBasisPoints param +// by calling getPriorityFeeBucketFromTxns on the slice. +// 5. If the resulting priority fee bucket from step 4 is less than the global minimum network fee, return the global +// minimum network fee. +// 6. Compute the congestion threshold as congestionFactorBasisPoints * maxSizeOfNumBlocks / 10000. +// 7. If the total size of the transactions in the slice is less than the congestion threshold, return one bucket lower +// than the priority fee bucket from step 4. +// 8. NOTE: THIS STEP IS DISABLED FOR NOW: +// - If the total size of the transactions in the size is greater than the congestion threshold and less +// than or equal to maxSizeOfNumBlocks, return the priority fee bucket from step 4. +// 9. Otherwise, return one bucket higher than the priority fee bucket from step 4. +// +// DOC: https://docs.google.com/document/d/e/2PACX-1vSs4bX9oeMmcS53ZwmJ4Q6hDZhqCmob0UCHjXWXU9rqZ3jaT56KEcHr8IbLZEW-ma8WNh2wOJCwhc4L/pub +// +// Examples: +// #1 +// When run for mempool: +// numBlocks = 1 (this is set at construction time) +// congestionFactorBasisPoints = 900 (90%) +// priorityPercentileBasisPoints = 10000 (100%) +// +// With these numbers, I think we'd have the following behavior (and notice I'm getting rid of the case where we keep +// the fee the same, see comment below): +// - We will find the lowest fee paid by anyone within the first block's worth ot txns because +// priorityPercentileBasisPoints=100% will return the lowest-fee txn. +// - If the mempool has >90% of 1 block's worth of txns, we will pay one fee bucket HIGHER than this lowest-fee txn. +// - If the mempool has <90% of 1 block's worth of txns, we will pay one fee bucket LOWER than this lowest-fee txn. +// +// #2 +// When run for mempool: +// numBlocks = 5 (this is set at construction time) +// congestionFactorBasisPoints = 10000 (100%) +// priorityPercentileBasisPoints = 2000 (20%) +// +// With these numbers, I think we'd have the following behavior (and notice I'm getting rid of the case where we keep +// the fee the same, see comment below): +// - We find the fee of the 20th percentile txn, which is enough to be included in the next block +// - If the mempool has more than 5 blocks worth of txns, we pay one fee bucket higher than this guy, otherwise we pay +// one fee bucket lower than him. +// +// #3 +// numBlocks = 5 (this is set at construction time) +// congestionFactorBasisPoints = 9000 (90%) +// priorityPercentileBasisPoints = 2000 (20%) +// +// Here, we would find the 20th percentile txn, enough to include in the next block. And if the last 5 blocks are more +// than 90% full we pay 1 bucket higher than this guy. Otherwise, we pay one fee bucket lower. +// +// Suggested Params: +// Mempool +// numBlocks = 1 +// congestionFactorBasisPoints = 9000 (90%) +// priorityPercentileBasisPoints = 9000 (90%) +// +// This means we will take one block's worth of txns from the mempool, and we'll pay 1 fee bucket higher than the 90th +// percentile guy if there's more than 90% of a block's worth of txns in it. +// +// Past Blocks +// numBlocks = 60 +// congestionFactorBasisPoints = 9000 (90%) +// priorityPercentileBasisPoints = 1/60 (since we want to always get included in the next block) +// +// Here, we would take the last 60 blocks worth of txns. If we're more than 90% full, we pay one fee bucket higher than +// the "top block"'s txn. Otherwise, we pay one fee bucket lower than that txn. +func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactionRegister( + txnRegister *TransactionRegister, + congestionFactorBasisPoints uint64, + priorityPercentileBasisPoints uint64, + numBlocks uint64, + maxBlockSize uint64, +) (uint64, error) { + txnRegister.RLock() + defer txnRegister.RUnlock() + it := txnRegister.GetFeeTimeIterator() + maxSizeOfNumBlocks := maxBlockSize * numBlocks + totalTxnsSize := uint64(0) + var txns []*MempoolTx + for it.Next() { + tx, ok := it.Value() + if !ok { + break + } + txnBytes, err := tx.Tx.ToBytes(false) + if err != nil { + return 0, errors.Wrap(err, "estimateFeeRateNanosPerKBGivenTransactionRegister: Problem serializing txn") + } + totalTxnsSize += uint64(len(txnBytes)) + txns = append(txns, tx) + // TODO: I think we want to include the txn that puts us over the limit, but + // we can just move this check up a few lines if that's wrong. + if totalTxnsSize > maxSizeOfNumBlocks { + break + } + } + globalMinFeeRate, _ := txnRegister.minimumNetworkFeeNanosPerKB.Uint64() + if len(txns) == 0 { + // If there are no txns in the transaction register, we simply return the minimum network fee. + return globalMinFeeRate, nil + } + + bucketMinFee, bucketMaxFee := getPriorityFeeBucketFromTxns( + txns, + priorityPercentileBasisPoints, + txnRegister.minimumNetworkFeeNanosPerKB, + txnRegister.feeBucketGrowthRateBasisPoints) + // If the bucketMinFee is less than or equal to the global min fee rate, we return the global min fee rate. + if bucketMinFee <= globalMinFeeRate { + return globalMinFeeRate, nil + } + + // Compute the congestion threshold. If our congestion factor is 100% (or 10,000 bps), + // then congestion threshold is simply max block size * numPastBlocks + // TODO: I don't know if I like this name really. + congestionThreshold := (congestionFactorBasisPoints * maxSizeOfNumBlocks) / MaxBasisPoints + // If the total size of the txns in the transaction register is less than the computed congestion threshold, + // we return one bucket lower than the Priority fee. + if totalTxnsSize <= congestionThreshold { + // Return one bucket lower than Priority fee + bucketExponent := computeFeeTimeBucketExponentFromFeeNanosPerKB( + bucketMinFee, txnRegister.minimumNetworkFeeNanosPerKB, txnRegister.feeBucketGrowthRateBasisPoints) + return computeFeeTimeBucketMinFromExponent( + bucketExponent-1, txnRegister.minimumNetworkFeeNanosPerKB, txnRegister.feeBucketGrowthRateBasisPoints), nil + } + // Otherwise, we return one bucket higher than Priority fee + return bucketMaxFee + 1, nil +} + +// getPriorityFeeBucketFromTxns computes the priority fee bucket for the given transactions using the +// priorityPercentileBasisPoints, minimumNetworkFeeNanosPerKB, and feeBucketGrowthRateBasisPoints params. +// The feeTimeOrderedTxns have the highest fees first and the lowest fees last, so we need to compute +// the percentile position of the priorityPercentileBasisPoints param and then compute the fee bucket +// range based on the fee rate per KB of the transaction at that position. +func getPriorityFeeBucketFromTxns( + feeTimeOrderedTxns []*MempoolTx, + priorityPercentileBasisPoints uint64, + minimumNetworkFeeNanosPerKB *big.Float, + feeBucketGrowthRateBasisPoints *big.Float, +) (uint64, uint64) { + percentilePosition := uint64( + len(feeTimeOrderedTxns)) - ((priorityPercentileBasisPoints * uint64(len(feeTimeOrderedTxns))) / MaxBasisPoints) + // The percentile position should never be greater than the length of feeTimeOrderedTxns, but may be equal to + // it if priorityPercentileBasisPoints is 0. In this case, we simply return the last txn's fee bucket range. + if percentilePosition >= uint64(len(feeTimeOrderedTxns)) { + percentilePosition = uint64(len(feeTimeOrderedTxns)) - 1 + } + bucketMin, bucketMax := computeFeeTimeBucketRangeFromFeeNanosPerKB( + feeTimeOrderedTxns[percentilePosition].FeePerKB, + minimumNetworkFeeNanosPerKB, + feeBucketGrowthRateBasisPoints, + ) + return bucketMin, bucketMax +} diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go new file mode 100644 index 000000000..6fbf727b7 --- /dev/null +++ b/lib/pos_fee_estimator_test.go @@ -0,0 +1,362 @@ +package lib + +import ( + "github.com/stretchr/testify/require" + "math/big" + "math/rand" + "testing" +) + +func TestFeeEstimator(t *testing.T) { + randSource := rand.New(rand.NewSource(2373)) + globalParams := _testGetDefaultGlobalParams() + + maxMempoolPosSizeBytes := uint64(1e9) + mempoolBackupIntervalMillis := uint64(30000) + + params, db := _posTestBlockchainSetup(t) + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + m1PubBytes, _, _ := Base58CheckDecode(m1Pub) + + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) + require.NoError(t, err) + dir := _dbDirSetup(t) + + mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, + maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + mempool.Init(1, nil, 1) + require.NoError(t, mempool.Start()) + require.True(t, mempool.IsRunning()) + minFeeBucketMin, minFeeBucketMax := computeFeeTimeBucketRangeFromFeeNanosPerKB( + globalParams.MinimumNetworkFeeNanosPerKB, + big.NewFloat(float64(globalParams.MinimumNetworkFeeNanosPerKB)), + mempool.txnRegister.feeBucketGrowthRateBasisPoints) + // set the feeMin to the second fee bucket. + feeMin := minFeeBucketMax + 1 + // Construct a FeeEstimator with no transactions in it. We should get the minimum fee bucket. + // We make some dummy block to get around validations. + posFeeEstimator := &PoSFeeEstimator{} + err = posFeeEstimator.Init(mempool.txnRegister, 1, []*MsgDeSoBlock{{ + Header: &MsgDeSoHeader{Height: 10}, + }}, 1, mempool.globalParams) + require.NoError(t, err) + // When there's nothing in the mempool, we return the global minimum fee rate. + baseFeeRate, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.mempoolTransactionRegister, 10000, 10000, 1, 1000) + require.NoError(t, err) + require.Equal(t, globalParams.MinimumNetworkFeeNanosPerKB, baseFeeRate) + // When there's nothing in the past blocks, we return the global minimum fee rate. + baseFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.pastBlocksTransactionRegister, 10000, 10000, 1, 1000) + require.NoError(t, err) + require.Equal(t, globalParams.MinimumNetworkFeeNanosPerKB, baseFeeRate) + // Make a dummy transaction, so we can check the fee rate. + txn := _generateTestTxnWithFeeRate(t, randSource, feeMin, m0PubBytes, m0Priv, 100, 25) + computedFee, err := posFeeEstimator.mempoolFeeEstimate(txn, 10000, 10000, 1000) + require.NoError(t, err) + validateTxnFee(t, txn, computedFee, baseFeeRate) + computedFee, err = posFeeEstimator.pastBlocksFeeEstimate(txn, 10000, 10000, 1000) + require.NoError(t, err) + validateTxnFee(t, txn, computedFee, baseFeeRate) + // Hybrid estimator will also return the base fee rate * number of bytes. + computedFee, err = posFeeEstimator.EstimateFee(txn, 10000, 10000, 1000, 10000, 1000) + require.NoError(t, err) + validateTxnFee(t, txn, computedFee, baseFeeRate) + + numBytesMempool := uint64(0) + var txns []*MsgDeSoTxn + // Generate dummy transactions to put in the mempool. + for ii := 0; ii < 10; ii++ { + pk := m0PubBytes + priv := m0Priv + if ii%2 == 1 { + pk = m1PubBytes + priv = m1Priv + } + // Just add everything with the global min fee. + mempoolTxn := _generateTestTxnWithFeeRate(t, randSource, feeMin, pk, priv, 100, 25) + txnBytes, err := mempoolTxn.ToBytes(false) + + require.NoError(t, err) + numBytesMempool += uint64(len(txnBytes)) + txns = append(txns, mempoolTxn) + + _wrappedPosMempoolAddTransaction(t, mempool, mempoolTxn) + } + // Generate dummy transactions to put in the past blocks. + numBytesPastBlocks := uint64(0) + //congestionFactorBasisPoints := 50 * 100 // 50% + var pastBlocksTxns []*MsgDeSoTxn + for ii := 0; ii < 10; ii++ { + pk := m0PubBytes + priv := m0Priv + if ii%2 == 1 { + pk = m1PubBytes + priv = m1Priv + } + // Just add everything with the global min fee. + pastBlockTxn := _generateTestTxnWithFeeRate(t, randSource, feeMin, pk, priv, 100, 25) + txnBytes, err := pastBlockTxn.ToBytes(false) + + require.NoError(t, err) + numBytesPastBlocks += uint64(len(txnBytes)) + pastBlocksTxns = append(pastBlocksTxns, pastBlockTxn) + } + dummyBlock := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + Height: 10, + }, + Txns: pastBlocksTxns, + } + err = posFeeEstimator.AddBlock(dummyBlock) + require.NoError(t, err) + // Compute the next fee bucket min + _, feeBucketMax := computeFeeTimeBucketRangeFromFeeNanosPerKB( + feeMin, + big.NewFloat(float64(globalParams.MinimumNetworkFeeNanosPerKB)), + mempool.txnRegister.feeBucketGrowthRateBasisPoints) + nextFeeBucketMin := feeBucketMax + 1 + var estimatedMempoolFeeRate uint64 + var estimatedMempoolFee uint64 + var estimatedPastBlocksFeeRate uint64 + var estimatedPastBlocksFee uint64 + var estimatedHybridFee uint64 + { + // Let's set the max block size to be less than total size of transactions we added and make sure + // we get the next fee bucket. + congestionFactor := uint64(10000) + priorityPercentileBasisPoints := uint64(10000) + maxBlockSizeMempool := numBytesMempool - 1 + maxBlockSizePastBlocks := numBytesPastBlocks - 1 + // We use the max to determine which to pass to the hybrid estimator. + maxBlockSizeHybrid := maxBlockSizePastBlocks + if maxBlockSizeMempool > maxBlockSizePastBlocks { + maxBlockSizeHybrid = maxBlockSizeMempool + } + estimatedMempoolFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.mempoolTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, + maxBlockSizeMempool) + require.NoError(t, err) + require.Equal(t, nextFeeBucketMin, estimatedMempoolFeeRate) + estimatedMempoolFee, err = posFeeEstimator.mempoolFeeEstimate(txn, congestionFactor, + priorityPercentileBasisPoints, maxBlockSizeMempool) + require.NoError(t, err) + validateTxnFee(t, txn, estimatedMempoolFee, estimatedMempoolFeeRate) + + // Let's do the same for past blocks estimator + estimatedPastBlocksFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.pastBlocksTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, + maxBlockSizePastBlocks) + require.NoError(t, err) + require.Equal(t, nextFeeBucketMin, estimatedPastBlocksFeeRate) + estimatedPastBlocksFee, err = posFeeEstimator.pastBlocksFeeEstimate(txn, congestionFactor, + priorityPercentileBasisPoints, maxBlockSizePastBlocks) + require.NoError(t, err) + validateTxnFee(t, txn, estimatedPastBlocksFee, estimatedPastBlocksFeeRate) + + // Both the mempool and next block fee and fee rates should be equal since we have + // everything in the same fee bucket. + require.Equal(t, estimatedMempoolFee, estimatedPastBlocksFee) + require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) + + // And the hybrid estimator is just the max, but for completeness, we check it. + estimatedHybridFee, err = posFeeEstimator.EstimateFee( + txn, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, + maxBlockSizeHybrid) + require.NoError(t, err) + require.Equal(t, estimatedMempoolFee, estimatedHybridFee) + require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) + } + + { + // Make the max block size be greater than the total size of transactions we added and set the + // congestion percentage to 95% and make sure we go up one priority fee bucket. + congestionFactor := uint64(95 * 100) + priorityPercentileBasisPoints := uint64(10000) + maxBlockSizeMempool := numBytesMempool + 1 + maxBlockSizePastBlocks := numBytesPastBlocks + 1 + // We use the max to determine which to pass to the hybrid estimator. + maxBlockSizeHybrid := maxBlockSizePastBlocks + if maxBlockSizeMempool > maxBlockSizePastBlocks { + maxBlockSizeHybrid = maxBlockSizeMempool + } + estimatedMempoolFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.mempoolTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, + maxBlockSizeMempool) + require.NoError(t, err) + require.Equal(t, nextFeeBucketMin, estimatedMempoolFeeRate) + estimatedMempoolFee, err = posFeeEstimator.mempoolFeeEstimate(txn, congestionFactor, + priorityPercentileBasisPoints, maxBlockSizeMempool) + require.NoError(t, err) + validateTxnFee(t, txn, estimatedMempoolFee, estimatedMempoolFeeRate) + + // Let's do the same for past blocks estimator + estimatedPastBlocksFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.pastBlocksTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, + maxBlockSizePastBlocks) + require.NoError(t, err) + require.Equal(t, nextFeeBucketMin, estimatedPastBlocksFeeRate) + estimatedPastBlocksFee, err = posFeeEstimator.pastBlocksFeeEstimate(txn, congestionFactor, + priorityPercentileBasisPoints, maxBlockSizePastBlocks) + require.NoError(t, err) + validateTxnFee(t, txn, estimatedPastBlocksFee, estimatedPastBlocksFeeRate) + + // Both the mempool and next block fee and fee rates should be equal since we have + // everything in the same fee bucket. + require.Equal(t, estimatedMempoolFee, estimatedPastBlocksFee) + require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) + + // And the hybrid estimator is just the max, but for completeness, we check it. + estimatedHybridFee, err = posFeeEstimator.EstimateFee( + txn, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, + maxBlockSizeHybrid) + require.NoError(t, err) + require.Equal(t, estimatedMempoolFee, estimatedHybridFee) + require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) + } + { + // Make the max block size 2x-1 the total size of transactions we added and set the congestion factor + // to 50%. We should get the next priority fee bucket. + congestionFactor := uint64(50 * 100) + + priorityPercentileBasisPoints := uint64(10000) + maxBlockSizeMempool := 2*numBytesMempool - 1 + maxBlockSizePastBlocks := 2*numBytesPastBlocks - 1 + // We use the max to determine which to pass to the hybrid estimator. + maxBlockSizeHybrid := maxBlockSizePastBlocks + if maxBlockSizeMempool > maxBlockSizePastBlocks { + maxBlockSizeHybrid = maxBlockSizeMempool + } + estimatedMempoolFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.mempoolTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, + maxBlockSizeMempool) + require.NoError(t, err) + require.Equal(t, nextFeeBucketMin, estimatedMempoolFeeRate) + estimatedMempoolFee, err = posFeeEstimator.mempoolFeeEstimate(txn, congestionFactor, + priorityPercentileBasisPoints, maxBlockSizeMempool) + require.NoError(t, err) + validateTxnFee(t, txn, estimatedMempoolFee, estimatedMempoolFeeRate) + + // Let's do the same for past blocks estimator + estimatedPastBlocksFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.pastBlocksTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, + maxBlockSizePastBlocks) + require.NoError(t, err) + require.Equal(t, nextFeeBucketMin, estimatedPastBlocksFeeRate) + estimatedPastBlocksFee, err = posFeeEstimator.pastBlocksFeeEstimate(txn, congestionFactor, + priorityPercentileBasisPoints, maxBlockSizePastBlocks) + require.NoError(t, err) + validateTxnFee(t, txn, estimatedPastBlocksFee, estimatedPastBlocksFeeRate) + + // Both the mempool and next block fee and fee rates should be equal since we have + // everything in the same fee bucket. + require.Equal(t, estimatedMempoolFee, estimatedPastBlocksFee) + require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) + + // And the hybrid estimator is just the max, but for completeness, we check it. + estimatedHybridFee, err = posFeeEstimator.EstimateFee( + txn, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, + maxBlockSizeHybrid) + require.NoError(t, err) + require.Equal(t, estimatedMempoolFee, estimatedHybridFee) + require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) + } + { + // Okay now make congestion factor 50% and make the max block size be more than 2x the + // total size of transactions we added. We should get the previous fee bucket, in this + // case this is the minimum fee rate bucket. + congestionFactor := uint64(50 * 100) + priorityPercentileBasisPoints := uint64(10000) + maxBlockSizeMempool := 2 * numBytesMempool + maxBlockSizePastBlocks := 2 * numBytesPastBlocks + // We use the max to determine which to pass to the hybrid estimator. + maxBlockSizeHybrid := maxBlockSizePastBlocks + if maxBlockSizeMempool > maxBlockSizePastBlocks { + maxBlockSizeHybrid = maxBlockSizeMempool + } + estimatedMempoolFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.mempoolTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, + maxBlockSizeMempool) + require.NoError(t, err) + require.Equal(t, minFeeBucketMin, estimatedMempoolFeeRate) + estimatedMempoolFee, err = posFeeEstimator.mempoolFeeEstimate(txn, congestionFactor, + priorityPercentileBasisPoints, maxBlockSizeMempool) + require.NoError(t, err) + validateTxnFee(t, txn, estimatedMempoolFee, estimatedMempoolFeeRate) + + // Let's do the same for past blocks estimator + estimatedPastBlocksFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + posFeeEstimator.pastBlocksTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, + maxBlockSizePastBlocks) + require.NoError(t, err) + require.Equal(t, minFeeBucketMin, estimatedPastBlocksFeeRate) + estimatedPastBlocksFee, err = posFeeEstimator.pastBlocksFeeEstimate(txn, congestionFactor, + priorityPercentileBasisPoints, maxBlockSizePastBlocks) + require.NoError(t, err) + validateTxnFee(t, txn, estimatedPastBlocksFee, estimatedPastBlocksFeeRate) + + // Both the mempool and next block fee and fee rates should be equal since we have + // everything in the same fee bucket. + require.Equal(t, estimatedMempoolFee, estimatedPastBlocksFee) + require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) + + // And the hybrid estimator is just the max, but for completeness, we check it. + estimatedHybridFee, err = posFeeEstimator.EstimateFee( + txn, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, + maxBlockSizeHybrid) + require.NoError(t, err) + require.Equal(t, estimatedMempoolFee, estimatedHybridFee) + require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) + } +} + +func _generateTestTxnWithFeeRate(t *testing.T, rand *rand.Rand, feeRate uint64, pk []byte, priv string, + expirationHeight uint64, extraDataBytes int32) *MsgDeSoTxn { + + extraData := make(map[string][]byte) + extraData["key"] = RandomBytes(extraDataBytes) + txn := &MsgDeSoTxn{ + TxnVersion: DeSoTxnVersion1, + PublicKey: pk, + TxnMeta: &BasicTransferMetadata{}, + TxnFeeNanos: 0, + TxnNonce: &DeSoNonce{ + ExpirationBlockHeight: expirationHeight, + PartialID: rand.Uint64() % 10000, + }, + ExtraData: extraData, + } + // Sign it so we have the correct # of bytes + _signTxn(t, txn, priv) + // Compute fee manually so things are correct... ugh. + computeFeeRateIterative(t, txn, priv, feeRate) + return txn +} + +func computeFeeRateIterative(t *testing.T, txn *MsgDeSoTxn, priv string, feeRate uint64) { + txnBytes, err := txn.ToBytes(false) + require.NoError(t, err) + shouldBumpByOne := (feeRate * uint64(len(txnBytes)) % 1000) != 0 + txnFeeNanos := feeRate * uint64(len(txnBytes)) / 1000 + if shouldBumpByOne { + txnFeeNanos++ + } + txn.TxnFeeNanos = txnFeeNanos + _signTxn(t, txn, priv) + newTxnBytes, err := txn.ToBytes(false) + require.NoError(t, err) + if (txn.TxnFeeNanos*1000)/uint64(len(newTxnBytes)) < feeRate { + computeFeeRateIterative(t, txn, priv, feeRate) + } +} + +func validateTxnFee(t *testing.T, txn *MsgDeSoTxn, estimatedFee uint64, estimatedFeeRate uint64) { + // Set the fee and sign the transaction, so we can get the real byte length. + txn.TxnFeeNanos = estimatedFee + _signTxn(t, txn, m0Priv) + // Make sure the fee is the same as the estimated fee. + txnBytes, err := txn.ToBytes(false) + require.NoError(t, err) + computedFeeByFeeRate := (uint64(len(txnBytes)) * estimatedFeeRate) / 1000 + require.GreaterOrEqual(t, estimatedFee, computedFeeByFeeRate) +} diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 0301d2912..cfd1dcaaa 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -14,8 +14,9 @@ import ( type PosMempoolStatus int const ( - PosMempoolStatusRunning PosMempoolStatus = iota - PosMempoolStatusNotRunning + PosMempoolStatusNotInitialized PosMempoolStatus = iota + PosMempoolStatusInitialized + PosMempoolStatusRunning ) type Mempool interface { @@ -38,6 +39,15 @@ type Mempool interface { IsTransactionInPool(txHash *BlockHash) bool GetMempoolTx(txHash *BlockHash) *MempoolTx GetMempoolSummaryStats() map[string]*SummaryStats + EstimateFee( + txn *MsgDeSoTxn, + minFeeRateNanosPerKB uint64, + mempoolCongestionFactorBasisPoints uint64, + mempoolPriorityPercentileBasisPoints uint64, + pastBlocksCongestionFactorBasisPoints uint64, + pastBlocksPriorityPercentileBasisPoints uint64, + maxBlockSize uint64, + ) (uint64, error) } type MempoolIterator interface { @@ -115,6 +125,10 @@ type PosMempool struct { maxMempoolPosSizeBytes uint64 // mempoolBackupIntervalMillis is the frequency with which pos mempool persists transactions to storage. mempoolBackupIntervalMillis uint64 + + // feeEstimator is used to estimate the fee required for a transaction to be included in the next block + // based off the current state of the mempool and the most n recent blocks. + feeEstimator *PoSFeeEstimator } // PosMempoolIterator is a wrapper around FeeTimeIterator, modified to return MsgDeSoTxn instead of MempoolTx. @@ -147,7 +161,7 @@ func NewPosMempool(params *DeSoParams, globalParams *GlobalParamsEntry, readOnly latestBlockHeight uint64, dir string, inMemoryOnly bool, maxMempoolPosSizeBytes uint64, mempoolBackupIntervalMillis uint64) *PosMempool { return &PosMempool{ - status: PosMempoolStatusNotRunning, + status: PosMempoolStatusNotInitialized, params: params, globalParams: globalParams, inMemoryOnly: inMemoryOnly, @@ -156,15 +170,37 @@ func NewPosMempool(params *DeSoParams, globalParams *GlobalParamsEntry, readOnly latestBlockHeight: latestBlockHeight, maxMempoolPosSizeBytes: maxMempoolPosSizeBytes, mempoolBackupIntervalMillis: mempoolBackupIntervalMillis, + txnRegister: NewTransactionRegister(globalParams), + feeEstimator: NewPoSFeeEstimator(), + ledger: NewBalanceLedger(), + nonceTracker: NewNonceTracker(), + } +} + +func (mp *PosMempool) Init( + feeEstimatorNumMempoolBlocks uint64, + feeEstimatorPastBlocks []*MsgDeSoBlock, + feeEstimatorNumPastBlocks uint64, +) error { + if mp.status != PosMempoolStatusNotInitialized { + return errors.New("PosMempool.Init: PosMempool already initialized") + } + // TODO: parameterize num blocks. Also, how to pass in blocks. + if err := mp.feeEstimator.Init( + mp.txnRegister, feeEstimatorNumMempoolBlocks, feeEstimatorPastBlocks, feeEstimatorNumPastBlocks, + mp.globalParams); err != nil { + return errors.Wrapf(err, "PosMempool.Start: Problem initializing fee estimator") } + mp.status = PosMempoolStatusInitialized + return nil } func (mp *PosMempool) Start() error { mp.Lock() defer mp.Unlock() - if mp.IsRunning() { - return nil + if mp.status != PosMempoolStatusInitialized { + return errors.New("PosMempool.Start: PosMempool not initialized") } // Create the transaction register, the ledger, and the nonce tracker, @@ -217,8 +253,9 @@ func (mp *PosMempool) Stop() { mp.txnRegister.Reset() mp.ledger.Reset() mp.nonceTracker.Reset() + mp.feeEstimator = NewPoSFeeEstimator() - mp.status = PosMempoolStatusNotRunning + mp.status = PosMempoolStatusNotInitialized } func (mp *PosMempool) IsRunning() bool { @@ -499,6 +536,10 @@ func (mp *PosMempool) refreshNoLock() error { // Create the temporary in-memory mempool with the most up-to-date readOnlyLatestBlockView, Height, and globalParams. tempPool := NewPosMempool(mp.params, mp.globalParams, mp.readOnlyLatestBlockView, mp.latestBlockHeight, "", true, mp.maxMempoolPosSizeBytes, mp.mempoolBackupIntervalMillis) + if err := tempPool.Init( + mp.feeEstimator.numMempoolBlocks, mp.feeEstimator.cachedBlocks, mp.feeEstimator.numPastBlocks); err != nil { + return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem initializing temp pool") + } if err := tempPool.Start(); err != nil { return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem starting temp pool") } @@ -635,3 +676,16 @@ func (mp *PosMempool) GetMempoolTx(txHash *BlockHash) *MempoolTx { func (mp *PosMempool) GetMempoolSummaryStats() map[string]*SummaryStats { return convertMempoolTxsToSummaryStats(mp.txnRegister.GetFeeTimeTransactions()) } + +func (mp *PosMempool) EstimateFee(txn *MsgDeSoTxn, + _ uint64, + mempoolCongestionFactorBasisPoints uint64, + mempoolPriorityPercentileBasisPoints uint64, + pastBlocksCongestionFactorBasisPoints uint64, + pastBlocksPriorityPercentileBasisPoints uint64, + maxBlockSize uint64) (uint64, error) { + // TODO: replace MaxBasisPoints with variables configured by flags. + return mp.feeEstimator.EstimateFee( + txn, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, + pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, maxBlockSize) +} diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 7d1c08ab5..c848f8ffd 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -23,6 +23,7 @@ func TestPosMempoolStart(t *testing.T) { mempool := NewPosMempool(¶ms, globalParams, nil, 0, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Init(1, nil, 1)) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) require.NoError(mempool.Refresh()) @@ -50,6 +51,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Init(1, nil, 1)) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -67,6 +69,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(newPool.Init(1, nil, 1)) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() @@ -101,6 +104,7 @@ func TestPosMempoolPrune(t *testing.T) { mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Init(1, nil, 1)) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -135,6 +139,7 @@ func TestPosMempoolPrune(t *testing.T) { newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(newPool.Init(1, nil, 1)) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) require.Equal(2, len(newPool.GetTransactions())) @@ -193,6 +198,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Init(1, nil, 1)) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -219,6 +225,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { newPool := NewPosMempool(params, newGlobalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(newPool.Init(1, nil, 1)) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() @@ -249,6 +256,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) + require.NoError(mempool.Init(1, nil, 1)) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index 73dd89138..36db8ba95 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -250,15 +250,10 @@ func (bav *UtxoView) distributeValidatorCommission(validatorPKID *PKID, commissi return bav.distributeStakingReward(validatorPKID, validatorPKID, commissionNanos) } -const ( - _maxBasisPoints = uint64(10000) // 1e4 - _nanoSecsPerYear = uint64(365) * 24 * 60 * 60 * 1e9 // 365 days * 24 hours * 60 minutes * 60 seconds * 1e9 nanoseconds -) - var ( - _basisPointsAsInt = big.NewInt(int64(_maxBasisPoints)) - _basisPointsAsFloat = NewFloat().SetUint64(_maxBasisPoints) - _nanoSecsPerYearAsFloat = NewFloat().SetUint64(_nanoSecsPerYear) + _basisPointsAsInt = big.NewInt(int64(MaxBasisPoints)) + _basisPointsAsFloat = NewFloat().SetUint64(MaxBasisPoints) + _nanoSecsPerYearAsFloat = NewFloat().SetUint64(NanoSecsPerYear) ) func convertBigFloatToBigInt(float *big.Float) *big.Int { From 07edf20302d92e5f035e23f1e2e00d7c431eaf59 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 27 Dec 2023 16:56:06 -0500 Subject: [PATCH 325/762] Add LockedAtEpochNumber to utxo operations to support rosetta, fix utxo operation encoding/decoding issues (#885) Co-authored-by: Lazy Nina <> --- lib/block_view_stake.go | 1 + lib/block_view_types.go | 100 +++++++++++++++++++++++++--------------- 2 files changed, 64 insertions(+), 37 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 343c893a5..01ae98998 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1676,6 +1676,7 @@ func (bav *UtxoView) _connectUnstake( PrevValidatorEntry: prevValidatorEntry, PrevStakeEntries: prevStakeEntries, PrevLockedStakeEntries: prevLockedStakeEntries, + LockedAtEpochNumber: currentEpochNumber, } if err = bav.SanityCheckUnstakeTxn(transactorPKIDEntry.PKID, utxoOpForTxn, txMeta.UnstakeAmountNanos); err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnstake: ") diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 89aba8352..2c10a4544 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -978,6 +978,12 @@ type UtxoOperation struct { // CoinsInCirculation and NumberOfHolders prior to a lockup transaction. PrevLockedBalanceEntry *LockedBalanceEntry + // PrevLockupYieldCurvePoint and PrevLockupTransferRestriction are + // the previous yield curve and transfer restrictions associated + // with an UpdateCoinLockupParams transaction. + PrevLockupYieldCurvePoint *LockupYieldCurvePoint + PrevLockupTransferRestriction TransferRestrictionStatus + // PrevSenderLockedBalanceEntry and PrevReceiverLockedBalanceEntry are the previous LockedBalanceEntry // for both the sender and receiver in the coin lockup transfer operation. PrevSenderLockedBalanceEntry *LockedBalanceEntry @@ -986,16 +992,25 @@ type UtxoOperation struct { // PrevLockedBalanceEntries is a slice of LockedBalanceEntry prior to a coin unlock. PrevLockedBalanceEntries []*LockedBalanceEntry - // PrevLockupYieldCurvePoint and PrevLockupTransferRestriction are - // the previous yield curve and transfer restrictions associated - // with an UpdateCoinLockupParams transaction. - PrevLockupYieldCurvePoint *LockupYieldCurvePoint - PrevLockupTransferRestriction TransferRestrictionStatus - - // This value is used by Rosetta to return the amount of DESO that was added + // StakeAmountNanosDiff is used by Rosetta to return the amount of DESO that was added // to a StakeEntry during the end-of-epoch hook. It's needed // in order to avoid having to re-run the end of epoch hook. StakeAmountNanosDiff uint64 + + // LockedAtEpochNumber is used by Rosetta to uniquely identify a subaccount representing + // a locked stake entry that is created during an Unlock transaction. Without this, we + // would need to consolidate many LockedStakeEntries into a single subaccount which would + // make it difficult to track the history of a particular stake entry and generally lead + // to more complexity in rosetta which is undesirable. Another alternative would be to + // require Rosetta to be able to compute epoch's based on block height, but this would + // require a more structural change to rosetta's codebase so that transaction parsing + // would be aware of the block height. This is also undesirable. Although adding a new + // field to theUtxoOperation struct is not ideal, the tradeoff is worth it for the + // simplicity it provides in rosetta. TODO: When refactoring UtxoOperations in the future, + // consider how we can maintain support for rosetta and situations like this where the + // transaction metadata itself doesn't specify the information we need to return to + // rosetta. + LockedAtEpochNumber uint64 } // FIXME: This hackIsRunningStateSyncer() call is a hack to get around the fact that @@ -1324,6 +1339,17 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada } } + // StateChangeMetadata + // FIXME: This hackIsRunningStateSyncer() call is a hack to get around the fact that + // we don't have a way to not require a resync while introducing the state change + // metadata to the utxo operation struct. We don't want to use a block height to gate + // this because we want to be able to get state change metadata for ALL transactions. + // We should replace this with a more elegant solution, a better hack, or bundle it + // in with a release that requires a resync anyway. + if hackIsRunningStateSyncer() && op.StateChangeMetadata != nil { + data = append(data, EncodeToBytes(blockHeight, op.StateChangeMetadata, skipMetadata...)...) + } + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { // PrevValidatorEntry data = append(data, EncodeToBytes(blockHeight, op.PrevValidatorEntry, skipMetadata...)...) @@ -1347,20 +1373,14 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada data = append(data, EncodeToBytes(blockHeight, op.PrevSenderLockedBalanceEntry, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, op.PrevReceiverLockedBalanceEntry, skipMetadata...)...) - // PrevTransactorBalanceEntry, PrevLockedBalanceEntries - data = append(data, EncodeToBytes(blockHeight, op.PrevTransactorBalanceEntry, skipMetadata...)...) + // PrevLockedBalanceEntries data = append(data, EncodeDeSoEncoderSlice(op.PrevLockedBalanceEntries, blockHeight, skipMetadata...)...) - } - // StateChangeMetadata - // FIXME: This hackIsRunningStateSyncer() call is a hack to get around the fact that - // we don't have a way to not require a resync while introducing the state change - // metadata to the utxo operation struct. We don't want to use a block height to gate - // this because we want to be able to get state change metadata for ALL transactions. - // We should replace this with a more elegant solution, a better hack, or bundle it - // in with a release that requires a resync anyway. - if hackIsRunningStateSyncer() && op.StateChangeMetadata != nil { - data = append(data, EncodeToBytes(blockHeight, op.StateChangeMetadata, skipMetadata...)...) + // StakeAmountNanosDiff + data = append(data, UintToBuf(op.StakeAmountNanosDiff)...) + + // LockedAtEpochNumber + data = append(data, UintToBuf(op.LockedAtEpochNumber)...) } return data @@ -1978,6 +1998,22 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. } } + // DeSoEncoder + stateChangeMetadata := GetStateChangeMetadataFromOpType(op.Type) + // FIXME: This hackIsRunningStateSyncer() call is a hack to get around the fact that + // we don't have a way to not require a resync while introducing the state change + // metadata to the utxo operation struct. We don't want to use a block height to gate + // this because we want to be able to get state change metadata for ALL transactions. + // We should replace this with a more elegant solution, a better hack, or bundle it + // in with a release that requires a resync anyway. + if hackIsRunningStateSyncer() && stateChangeMetadata != nil { + if exist, err := DecodeFromBytes(stateChangeMetadata, rr); exist && err == nil { + op.StateChangeMetadata = stateChangeMetadata + } else if err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading DeSoEncoder") + } + } + if MigrationTriggered(blockHeight, ProofOfStake1StateSetupMigration) { // PrevValidatorEntry if op.PrevValidatorEntry, err = DecodeDeSoEncoder(&ValidatorEntry{}, rr); err != nil { @@ -2019,28 +2055,18 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. return errors.Wrapf(err, "UtxoOperation.Decode: Problem Reading PrevReceiverLockedBalanceEntry: ") } - // PrevTransactorBalanceEntry, PrevLockedBalanceEntries - if op.PrevTransactorBalanceEntry, err = DecodeDeSoEncoder(&BalanceEntry{}, rr); err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevTransactorBalanceEntry: ") - } + // PrevLockedBalanceEntries if op.PrevLockedBalanceEntries, err = DecodeDeSoEncoderSlice[*LockedBalanceEntry](rr); err != nil { return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockedBalanceEntry: ") } - } + // StakeAmountNanosDiff + if op.StakeAmountNanosDiff, err = ReadUvarint(rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading StakeAmountNanosDiff: ") + } - // DeSoEncoder - stateChangeMetadata := GetStateChangeMetadataFromOpType(op.Type) - // FIXME: This hackIsRunningStateSyncer() call is a hack to get around the fact that - // we don't have a way to not require a resync while introducing the state change - // metadata to the utxo operation struct. We don't want to use a block height to gate - // this because we want to be able to get state change metadata for ALL transactions. - // We should replace this with a more elegant solution, a better hack, or bundle it - // in with a release that requires a resync anyway. - if hackIsRunningStateSyncer() && stateChangeMetadata != nil { - if exist, err := DecodeFromBytes(stateChangeMetadata, rr); exist && err == nil { - op.StateChangeMetadata = stateChangeMetadata - } else if err != nil { - return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading DeSoEncoder") + // LockedAtEpochNumber + if op.LockedAtEpochNumber, err = ReadUvarint(rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading LockedAtEpochNumber: ") } } From dacd339c244aa3d73c0ce9fc7de228c6eb550a11 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 27 Dec 2023 19:20:04 -0500 Subject: [PATCH 326/762] Create Entry Points For PoS Consensus In Server (#888) * Create Entry Points For PoS Consensus In Server * Update comments --- lib/blockchain.go | 9 -------- lib/pos_blockchain.go | 51 ++++++++++++++++++++++++++++++++++++++++--- lib/server.go | 32 +++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 12 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 9fae05c51..44d0358ce 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1869,15 +1869,6 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B return isMainChain, false, nil } -// processHeaderPoS is validates and stores an incoming block header as follows: -// 1. Validating the block header's structure and timestamp -// 2. Connect the block header to the header chain's tip -// 3. Store the block header in the db and the in-memory block index -func (bc *Blockchain) processHeaderPoS(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { - // TODO - return false, false, fmt.Errorf("processHeaderPoS: Not implemented") -} - // ProcessHeader is a wrapper around processHeaderPoW and processHeaderPoS, which do the leg-work. func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { bc.ChainLock.Lock() diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 93c94e2db..eaf724925 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -12,15 +12,56 @@ import ( "github.com/pkg/errors" ) +// processHeaderPoS validates and stores an incoming block header to build +// the PoS version of the header chain. +// +// The PoS header chain uses a simplified version of the Fast-HotStuff consensus +// rules. It's used during syncing to build a chain of block headers with the +// minimum set of validations needed to build a template of what the full PoS +// blockchain will look like. +// +// The PoS header chain uses block integrity checks to perform block validations, +// and to connect blocks based on their PrevBlockHash. It does not run the commit +// rule. It does not fully validate QCs or block proposers, or perform any validations +// that require on-chain state. +// +// processHeaderPoS algorithm: +// 1. Validate that the block header is properly formed. +// 2. Add the block header to the block index with status +// StatusHeaderValidated or StatusHeaderValidateFailed. +// 3. Perform the orphan check on the block header. +// 4. If the block is an orphan, or its view is less than the current header chain's tip, +// then we exit early. +// 5. If it is not an orphan, and has a higher view than the current header chain, then +// we re-org the header chain so that the incoming header is the new tip. +func (bc *Blockchain) processHeaderPoS(blockHeader *MsgDeSoHeader, headerHash *BlockHash) ( + _isMainChain bool, + _isOrphan bool, + _err error, +) { + return false, false, fmt.Errorf("processHeaderPoS: Not implemented") +} + // ProcessBlockPoS simply acquires the chain lock and calls processBlockPoS. func (bc *Blockchain) ProcessBlockPoS(block *MsgDeSoBlock, currentView uint64, verifySignatures bool) ( - _success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { + _success bool, + _isOrphan bool, + _missingBlockHashes []*BlockHash, + _err error, +) { + // Grab the chain lock bc.ChainLock.Lock() defer bc.ChainLock.Unlock() + + // Perform a simple nil-check. If the block is nil then we return an error. Nothing we can do here. + if block == nil { + return false, false, nil, fmt.Errorf("ProcessBlockPoS: Block is nil") + } + return bc.processBlockPoS(block, currentView, verifySignatures) } -// processBlockPoS runs the Fast-Hotstuff block connect and commit rule as follows: +// processBlockPoS runs the Fast-HotStuff block connect and commit rule as follows: // 1. Determine if we're missing the parent block of this block. // If so, return the hash of the missing block and add this block to the orphans list. // 2. Validate the incoming block, its header, its block height, the leader, and its QCs (vote or timeout) @@ -28,7 +69,11 @@ func (bc *Blockchain) ProcessBlockPoS(block *MsgDeSoBlock, currentView uint64, v // 4. try to apply the incoming block as the tip (performing reorgs as necessary). If it can't be applied, exit here. // 5. Run the commit rule - If applicable, flushes the incoming block's grandparent to the DB func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, verifySignatures bool) ( - _success bool, _isOrphan bool, _missingBlockHashes []*BlockHash, _err error) { + _success bool, + _isOrphan bool, + _missingBlockHashes []*BlockHash, + _err error, +) { // If we can't hash the block, we can never store in the block index and we should throw it out immediately. if _, err := block.Hash(); err != nil { return false, false, nil, errors.Wrapf(err, "processBlockPoS: Problem hashing block") diff --git a/lib/server.go b/lib/server.go index 3bca2514c..867d2d3b9 100644 --- a/lib/server.go +++ b/lib/server.go @@ -817,6 +817,16 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { len(msg.Headers), srv.blockchain.chainState(), pp, srv.blockchain.headerTip().Header.Height, printHeight))) + // If the node is running a Fast-HotStuff validator and the consensus is running, + // in the steady-state, then it means that we don't sync the header chain separately. + if srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() { + return + } + + // If we get here, it means that the node is not currently running a Fast-HotStuff + // validator or that the node is syncing. In either case, we sync headers according + // to the blocksync rules. + // Start by processing all of the headers given to us. They should start // right after the tip of our header chain ideally. While going through them // tally up the number that we actually process. @@ -1114,6 +1124,17 @@ func (srv *Server) _handleGetSnapshot(pp *Peer, msg *MsgDeSoGetSnapshot) { // at current snapshot epoch. We will set these entries in our node's database as well as update the checksum. func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { srv.timer.End("Get Snapshot") + + // If the node is running a Fast-HotStuff validator and the consensus is running, + // in the steady-state, then it means that we don't download and handle snapshots. + if srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() { + return + } + + // If we get here, it means that the node is not currently running a Fast-HotStuff + // validator or that the node is syncing. In either case, we handle snapshots according + // to the Hypersync rules. + srv.timer.Start("Server._handleSnapshot Main") // If there are no db entries in the msg, we should also disconnect the peer. There should always be // at least one entry sent, which is either the empty entry or the last key we've requested. @@ -1849,6 +1870,17 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Received block ( %v / %v ) from Peer %v", blk.Header.Height, srv.blockchain.headerTip().Height, pp))) + // If the node is running a Fast-HotStuff validator and the consensus is running, + // in the steady-state, then we handle the block according to the consensus rules. + if srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() { + srv.fastHotStuffConsensus.HandleBlock(pp, blk) + return + } + + // If we get here, it means that the node is not currently running a Fast-HotStuff + // validator or that the node is syncing. In either case, we handle the block + // according to the blocksync rules. + srv.timer.Start("Server._handleBlock: General") // Pull out the header for easy access. blockHeader := blk.Header From a7edc6c55cbbf84ff3659e89e43d51ad531b0519 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 28 Dec 2023 14:52:42 +0100 Subject: [PATCH 327/762] PoS Validator Constants and Network Changes (#876) * PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment * Constants and network changes * Test MsgDeSoVerack encoding * Fix snapshot hack * Revert "Remove constants/network" This reverts commit b467ddbcd034c2e8d2728a7e77f4b714b686a760. * Fix compilation errors * Address review comments --- lib/connection_manager.go | 9 +- lib/constants.go | 38 ++++++- lib/network.go | 234 +++++++++++++++++++++++++++++++------- lib/network_test.go | 46 +++++++- lib/peer.go | 16 +-- lib/server.go | 18 +-- lib/snapshot.go | 8 +- 7 files changed, 294 insertions(+), 75 deletions(-) diff --git a/lib/connection_manager.go b/lib/connection_manager.go index a14742c8b..4cefe9527 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -891,13 +891,6 @@ func (cmgr *ConnectionManager) Start() { // Start the peer's message loop. pp.Start() - - // Signal the server about the new Peer in case it wants to do something with it. - cmgr.serverMessageQueue <- &ServerMessage{ - Peer: pp, - Msg: &MsgDeSoNewPeer{}, - } - } case pp := <-cmgr.donePeerChan: { @@ -920,7 +913,7 @@ func (cmgr *ConnectionManager) Start() { // with it. cmgr.serverMessageQueue <- &ServerMessage{ Peer: pp, - Msg: &MsgDeSoDonePeer{}, + Msg: &MsgDeSoDisconnectedPeer{}, } } } diff --git a/lib/constants.go b/lib/constants.go index 308b08e27..eccf1c582 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -478,6 +478,22 @@ func GetEncoderMigrationHeightsList(forkHeights *ForkHeights) ( return migrationHeightsList } +type ProtocolVersionType uint64 + +const ( + ProtocolVersion0 ProtocolVersionType = 0 + ProtocolVersion1 ProtocolVersionType = 1 + ProtocolVersion2 ProtocolVersionType = 2 +) + +func NewProtocolVersionType(version uint64) ProtocolVersionType { + return ProtocolVersionType(version) +} + +func (pvt ProtocolVersionType) ToUint64() uint64 { + return uint64(pvt) +} + // DeSoParams defines the full list of possible parameters for the // DeSo network. type DeSoParams struct { @@ -486,7 +502,7 @@ type DeSoParams struct { // Set to true when we're running in regtest mode. This is useful for testing. ExtraRegtestParamUpdaterKeys map[PkMapKey]bool // The current protocol version we're running. - ProtocolVersion uint64 + ProtocolVersion ProtocolVersionType // The minimum protocol version we'll allow a peer we connect to // to have. MinProtocolVersion uint64 @@ -545,6 +561,9 @@ type DeSoParams struct { // The amount of time we wait to receive a version message from a peer. VersionNegotiationTimeout time.Duration + // The maximum number of addresses to broadcast to peers. + MaxAddressesToBroadcast uint32 + // The genesis block to use as the base of our chain. GenesisBlock *MsgDeSoBlock // The expected hash of the genesis block. Should align with what one @@ -713,6 +732,9 @@ type DeSoParams struct { // in BMF calculations. DefaultFailingTransactionBMFMultiplierBasisPoints uint64 + // HandshakeTimeoutMicroSeconds is the timeout for the peer handshake certificate. The default value is 15 minutes. + HandshakeTimeoutMicroSeconds uint64 + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -917,7 +939,7 @@ var MainnetForkHeights = ForkHeights{ // DeSoMainnetParams defines the DeSo parameters for the mainnet. var DeSoMainnetParams = DeSoParams{ NetworkType: NetworkType_MAINNET, - ProtocolVersion: 1, + ProtocolVersion: ProtocolVersion1, MinProtocolVersion: 1, UserAgent: "Architect", DNSSeeds: []string{ @@ -1000,6 +1022,8 @@ var DeSoMainnetParams = DeSoParams{ DialTimeout: 30 * time.Second, VersionNegotiationTimeout: 30 * time.Second, + MaxAddressesToBroadcast: 10, + BlockRewardMaturity: time.Hour * 3, V1DifficultyAdjustmentFactor: 10, @@ -1126,6 +1150,9 @@ var DeSoMainnetParams = DeSoParams{ // The rate of the failing transaction's fee used in BMF calculations. DefaultFailingTransactionBMFMultiplierBasisPoints: uint64(2500), + // The peer handshake certificate timeout. + HandshakeTimeoutMicroSeconds: uint64(900000000), + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1222,7 +1249,7 @@ var TestnetForkHeights = ForkHeights{ // DeSoTestnetParams defines the DeSo parameters for the testnet. var DeSoTestnetParams = DeSoParams{ NetworkType: NetworkType_TESTNET, - ProtocolVersion: 0, + ProtocolVersion: ProtocolVersion0, MinProtocolVersion: 0, UserAgent: "Architect", DNSSeeds: []string{ @@ -1266,6 +1293,8 @@ var DeSoTestnetParams = DeSoParams{ DialTimeout: 30 * time.Second, VersionNegotiationTimeout: 30 * time.Second, + MaxAddressesToBroadcast: 10, + GenesisBlock: &GenesisBlock, GenesisBlockHashHex: GenesisBlockHashHex, @@ -1393,6 +1422,9 @@ var DeSoTestnetParams = DeSoParams{ // The rate of the failing transaction's fee used in BMF calculations. DefaultFailingTransactionBMFMultiplierBasisPoints: uint64(2500), + // The peer handshake certificate timeout. + HandshakeTimeoutMicroSeconds: uint64(900000000), + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/network.go b/lib/network.go index 15b230a83..bb7fcae4e 100644 --- a/lib/network.go +++ b/lib/network.go @@ -17,11 +17,10 @@ import ( "strings" "time" - "github.com/deso-protocol/core/collections/bitset" - "github.com/golang/glog" - "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/golang/glog" + "github.com/deso-protocol/core/collections/bitset" "github.com/deso-protocol/core/consensus" "github.com/btcsuite/btcd/btcec" @@ -111,11 +110,12 @@ const ( // TODO: Should probably split these out into a separate channel in the server to // make things more parallelized. - MsgTypeQuit MsgType = ControlMessagesStart - MsgTypeNewPeer MsgType = ControlMessagesStart + 1 - MsgTypeDonePeer MsgType = ControlMessagesStart + 2 - MsgTypeBlockAccepted MsgType = ControlMessagesStart + 3 - MsgTypeBitcoinManagerUpdate MsgType = ControlMessagesStart + 4 // Deprecated + MsgTypeQuit MsgType = ControlMessagesStart + MsgTypeDisconnectedPeer MsgType = ControlMessagesStart + 1 + MsgTypeBlockAccepted MsgType = ControlMessagesStart + 2 + MsgTypeBitcoinManagerUpdate MsgType = ControlMessagesStart + 3 // Deprecated + MsgTypePeerHandshakeComplete MsgType = ControlMessagesStart + 4 + MsgTypeNewConnection MsgType = ControlMessagesStart + 5 // NEXT_TAG = 7 ) @@ -173,14 +173,16 @@ func (msgType MsgType) String() string { return "GET_ADDR" case MsgTypeQuit: return "QUIT" - case MsgTypeNewPeer: - return "NEW_PEER" - case MsgTypeDonePeer: + case MsgTypeDisconnectedPeer: return "DONE_PEER" case MsgTypeBlockAccepted: return "BLOCK_ACCEPTED" case MsgTypeBitcoinManagerUpdate: return "BITCOIN_MANAGER_UPDATE" + case MsgTypePeerHandshakeComplete: + return "PEER_HANDSHAKE_COMPLETE" + case MsgTypeNewConnection: + return "NEW_CONNECTION" case MsgTypeGetSnapshot: return "GET_SNAPSHOT" case MsgTypeSnapshotData: @@ -837,34 +839,64 @@ func (msg *MsgDeSoQuit) FromBytes(data []byte) error { return fmt.Errorf("MsgDeSoQuit.FromBytes not implemented") } -type MsgDeSoNewPeer struct { +type MsgDeSoDisconnectedPeer struct { } -func (msg *MsgDeSoNewPeer) GetMsgType() MsgType { - return MsgTypeNewPeer +func (msg *MsgDeSoDisconnectedPeer) GetMsgType() MsgType { + return MsgTypeDisconnectedPeer } -func (msg *MsgDeSoNewPeer) ToBytes(preSignature bool) ([]byte, error) { - return nil, fmt.Errorf("MsgDeSoNewPeer.ToBytes: Not implemented") +func (msg *MsgDeSoDisconnectedPeer) ToBytes(preSignature bool) ([]byte, error) { + return nil, fmt.Errorf("MsgDeSoDisconnectedPeer.ToBytes: Not implemented") } -func (msg *MsgDeSoNewPeer) FromBytes(data []byte) error { - return fmt.Errorf("MsgDeSoNewPeer.FromBytes not implemented") +func (msg *MsgDeSoDisconnectedPeer) FromBytes(data []byte) error { + return fmt.Errorf("MsgDeSoDisconnectedPeer.FromBytes not implemented") } -type MsgDeSoDonePeer struct { +// MsgDeSoPeerHandshakeComplete is a control message that is used to internally signal when a peer has +// connected and completed the Version + Verack handshake and authentication process. +type MsgDeSoPeerHandshakeComplete struct { } -func (msg *MsgDeSoDonePeer) GetMsgType() MsgType { - return MsgTypeDonePeer +func (msg *MsgDeSoPeerHandshakeComplete) GetMsgType() MsgType { + return MsgTypePeerHandshakeComplete } -func (msg *MsgDeSoDonePeer) ToBytes(preSignature bool) ([]byte, error) { - return nil, fmt.Errorf("MsgDeSoDonePeer.ToBytes: Not implemented") +func (msg *MsgDeSoPeerHandshakeComplete) ToBytes(preSignature bool) ([]byte, error) { + return nil, fmt.Errorf("MsgDeSoPeerHandshakeComplete.ToBytes: Not implemented") } -func (msg *MsgDeSoDonePeer) FromBytes(data []byte) error { - return fmt.Errorf("MsgDeSoDonePeer.FromBytes not implemented") +func (msg *MsgDeSoPeerHandshakeComplete) FromBytes(data []byte) error { + return fmt.Errorf("MsgDeSoPeerHandshakeComplete.FromBytes not implemented") +} + +type ConnectionType uint8 + +const ( + ConnectionTypeOutbound ConnectionType = iota + ConnectionTypeInbound +) + +type Connection interface { + GetConnectionType() ConnectionType + Close() +} + +type MsgDeSoNewConnection struct { + Connection Connection +} + +func (msg *MsgDeSoNewConnection) GetMsgType() MsgType { + return MsgTypeNewConnection +} + +func (msg *MsgDeSoNewConnection) ToBytes(preSignature bool) ([]byte, error) { + return nil, fmt.Errorf("MsgDeSoNewConnection.ToBytes: Not implemented") +} + +func (msg *MsgDeSoNewConnection) FromBytes(data []byte) error { + return fmt.Errorf("MsgDeSoNewConnection.FromBytes not implemented") } // ================================================================== @@ -1513,12 +1545,14 @@ type ServiceFlag uint64 const ( // SFFullNodeDeprecated is deprecated, and set on all nodes by default // now. We basically split it into SFHyperSync and SFArchivalMode. - SFFullNodeDeprecated ServiceFlag = 1 << iota + SFFullNodeDeprecated ServiceFlag = 1 << 0 // SFHyperSync is a flag used to indicate that the peer supports hyper sync. - SFHyperSync + SFHyperSync ServiceFlag = 1 << 1 // SFArchivalNode is a flag complementary to SFHyperSync. If node is a hypersync node then // it might not be able to support block sync anymore, unless it has archival mode turned on. - SFArchivalNode + SFArchivalNode ServiceFlag = 1 << 2 + // SFPosValidator is a flag used to indicate that the peer is running a PoS validator. + SFPosValidator ServiceFlag = 1 << 3 ) type MsgDeSoVersion struct { @@ -1864,34 +1898,148 @@ func (msg *MsgDeSoGetAddr) GetMsgType() MsgType { // VERACK Message // ================================================================== -// VERACK messages have no payload. +type VerackVersion uint64 + +func NewVerackVersion(version uint64) VerackVersion { + return VerackVersion(version) +} + +const ( + VerackVersion0 VerackVersion = 0 + VerackVersion1 VerackVersion = 1 +) + +func (vv VerackVersion) ToUint64() uint64 { + return uint64(vv) +} + type MsgDeSoVerack struct { - // A verack message must contain the nonce the peer received in the - // initial version message. This ensures the peer that is communicating - // with us actually controls the address she says she does similar to - // "SYN Cookie" DDOS protection. - Nonce uint64 + // The VerackVersion0 message contains only the NonceReceived field, which is the nonce the sender received in the + // initial version message from the peer. This ensures the sender controls the network address, similarly to the + // "SYN Cookie" DDOS protection. The Version field in the VerackVersion0 message is implied, based on the msg length. + // + // The VerackVersion1 message contains the tuple of which correspond to the + // received and sent nonces in the version message from the sender's perspective, as well as a recent timestamp. + // The VerackVersion1 message is used in context of Proof of Stake, where validators register their BLS public keys + // as part of their validator entry. The sender of this message must be a registered validator, and he must attach + // their public key to the message, along with a BLS signature of the tuple. + Version VerackVersion + + NonceReceived uint64 + NonceSent uint64 + TstampMicro uint64 + + PublicKey *bls.PublicKey + Signature *bls.Signature } func (msg *MsgDeSoVerack) ToBytes(preSignature bool) ([]byte, error) { + switch msg.Version { + case VerackVersion0: + return msg.EncodeVerackV0() + case VerackVersion1: + return msg.EncodeVerackV1() + default: + return nil, fmt.Errorf("MsgDeSoVerack.ToBytes: Unrecognized version: %v", msg.Version) + } +} + +func (msg *MsgDeSoVerack) EncodeVerackV0() ([]byte, error) { retBytes := []byte{} // Nonce - retBytes = append(retBytes, UintToBuf(msg.Nonce)...) + retBytes = append(retBytes, UintToBuf(msg.NonceReceived)...) + return retBytes, nil +} + +func (msg *MsgDeSoVerack) EncodeVerackV1() ([]byte, error) { + if msg.PublicKey == nil || msg.Signature == nil { + return nil, fmt.Errorf("MsgDeSoVerack.EncodeVerackV1: PublicKey and Signature must be set for V1 message") + } + + retBytes := []byte{} + + // Version + retBytes = append(retBytes, UintToBuf(msg.Version.ToUint64())...) + // Nonce Received + retBytes = append(retBytes, UintToBuf(msg.NonceReceived)...) + // Nonce Sent + retBytes = append(retBytes, UintToBuf(msg.NonceSent)...) + // Tstamp Micro + retBytes = append(retBytes, UintToBuf(msg.TstampMicro)...) + // PublicKey + retBytes = append(retBytes, EncodeBLSPublicKey(msg.PublicKey)...) + // Signature + retBytes = append(retBytes, EncodeBLSSignature(msg.Signature)...) + return retBytes, nil } func (msg *MsgDeSoVerack) FromBytes(data []byte) error { rr := bytes.NewReader(data) - retMsg := NewMessage(MsgTypeVerack).(*MsgDeSoVerack) - { - nonce, err := ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Nonce") - } - retMsg.Nonce = nonce + // The V0 verack message is determined from the message length. The V0 message will only contain the NonceReceived field. + if len(data) <= MaxVarintLen64 { + return msg.FromBytesV0(data) + } + + version, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Version") + } + msg.Version = NewVerackVersion(version) + switch msg.Version { + case VerackVersion0: + return fmt.Errorf("MsgDeSoVerack.FromBytes: Outdated Version=0 used for new encoding") + case VerackVersion1: + return msg.FromBytesV1(data) + default: + return fmt.Errorf("MsgDeSoVerack.FromBytes: Unrecognized version: %v", msg.Version) + } +} + +func (msg *MsgDeSoVerack) FromBytesV0(data []byte) error { + var err error + rr := bytes.NewReader(data) + msg.NonceReceived, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Nonce") + } + return nil +} + +func (msg *MsgDeSoVerack) FromBytesV1(data []byte) error { + var err error + rr := bytes.NewReader(data) + version, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Version") + } + msg.Version = NewVerackVersion(version) + + msg.NonceReceived, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Nonce Received") + } + + msg.NonceSent, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Nonce Sent") + } + + msg.TstampMicro, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Tstamp Micro") + } + + msg.PublicKey, err = DecodeBLSPublicKey(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading PublicKey") + } + + msg.Signature, err = DecodeBLSSignature(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Signature") } - *msg = *retMsg return nil } diff --git a/lib/network_test.go b/lib/network_test.go index fbd9932e2..e9d6e6e5e 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -5,6 +5,8 @@ package lib import ( "bytes" "encoding/hex" + "github.com/deso-protocol/core/bls" + "golang.org/x/crypto/sha3" "math/big" "math/rand" "reflect" @@ -68,7 +70,7 @@ func TestVersionConversion(t *testing.T) { "works, add the new field to the test case, and fix this error.") } -func TestVerack(t *testing.T) { +func TestVerackV0(t *testing.T) { assert := assert.New(t) require := require.New(t) _ = assert @@ -78,13 +80,51 @@ func TestVerack(t *testing.T) { var buf bytes.Buffer nonce := uint64(12345678910) - _, err := WriteMessage(&buf, &MsgDeSoVerack{Nonce: nonce}, networkType) + _, err := WriteMessage(&buf, &MsgDeSoVerack{Version: VerackVersion0, NonceReceived: nonce}, networkType) require.NoError(err) verBytes := buf.Bytes() testMsg, _, err := ReadMessage(bytes.NewReader(verBytes), networkType) require.NoError(err) - require.Equal(&MsgDeSoVerack{Nonce: nonce}, testMsg) + require.Equal(&MsgDeSoVerack{Version: VerackVersion0, NonceReceived: nonce}, testMsg) +} + +func TestVerackV1(t *testing.T) { + require := require.New(t) + + networkType := NetworkType_MAINNET + var buf bytes.Buffer + + nonceReceived := uint64(12345678910) + nonceSent := nonceReceived + 1 + tstamp := uint64(2345678910) + // First, test that nil public key and signature are not allowed. + msg := &MsgDeSoVerack{ + Version: VerackVersion1, + NonceReceived: nonceReceived, + NonceSent: nonceSent, + TstampMicro: tstamp, + PublicKey: nil, + Signature: nil, + } + _, err := WriteMessage(&buf, msg, networkType) + require.Error(err) + payload := append(UintToBuf(nonceReceived), UintToBuf(nonceSent)...) + payload = append(payload, UintToBuf(tstamp)...) + hash := sha3.Sum256(payload) + + priv, err := bls.NewPrivateKey() + require.NoError(err) + msg.PublicKey = priv.PublicKey() + msg.Signature, err = priv.Sign(hash[:]) + require.NoError(err) + _, err = WriteMessage(&buf, msg, networkType) + require.NoError(err) + + verBytes := buf.Bytes() + testMsg, _, err := ReadMessage(bytes.NewReader(verBytes), networkType) + require.NoError(err) + require.Equal(msg, testMsg) } // Creates fully formatted a PoS block header with random signatures diff --git a/lib/peer.go b/lib/peer.go index 654a62ada..0eea6317a 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -1153,7 +1153,7 @@ out: // Measure the ping time when we receive a pong. pp.HandlePongMsg(msg) - case *MsgDeSoNewPeer, *MsgDeSoDonePeer, *MsgDeSoQuit: + case *MsgDeSoDisconnectedPeer, *MsgDeSoQuit: // We should never receive control messages from a Peer. Disconnect if we do. glog.Errorf("Peer.inHandler: Received control message of type %v from "+ @@ -1284,7 +1284,7 @@ func (pp *Peer) ReadDeSoMessage() (DeSoMessage, error) { func (pp *Peer) NewVersionMessage(params *DeSoParams) *MsgDeSoVersion { ver := NewMessage(MsgTypeVersion).(*MsgDeSoVersion) - ver.Version = params.ProtocolVersion + ver.Version = params.ProtocolVersion.ToUint64() ver.TstampSecs = time.Now().Unix() // We use an int64 instead of a uint64 for convenience but // this should be fine since we're just looking to generate a @@ -1324,7 +1324,7 @@ func (pp *Peer) sendVerack() error { verackMsg := NewMessage(MsgTypeVerack) // Include the nonce we received in the peer's version message so // we can validate that we actually control our IP address. - verackMsg.(*MsgDeSoVerack).Nonce = pp.VersionNonceReceived + verackMsg.(*MsgDeSoVerack).NonceReceived = pp.VersionNonceReceived if err := pp.WriteDeSoMessage(verackMsg); err != nil { return errors.Wrap(err, "sendVerack: ") } @@ -1343,10 +1343,10 @@ func (pp *Peer) readVerack() error { msg.GetMsgType().String()) } verackMsg := msg.(*MsgDeSoVerack) - if verackMsg.Nonce != pp.VersionNonceSent { + if verackMsg.NonceReceived != pp.VersionNonceSent { return fmt.Errorf( "readVerack: Received VERACK message with nonce %d but expected nonce %d", - verackMsg.Nonce, pp.VersionNonceSent) + verackMsg.NonceReceived, pp.VersionNonceSent) } return nil @@ -1407,10 +1407,10 @@ func (pp *Peer) readVersion() error { pp.serviceFlags = verMsg.Services pp.advertisedProtocolVersion = verMsg.Version negotiatedVersion := pp.Params.ProtocolVersion - if pp.advertisedProtocolVersion < pp.Params.ProtocolVersion { - negotiatedVersion = pp.advertisedProtocolVersion + if pp.advertisedProtocolVersion < pp.Params.ProtocolVersion.ToUint64() { + negotiatedVersion = NewProtocolVersionType(pp.advertisedProtocolVersion) } - pp.negotiatedProtocolVersion = negotiatedVersion + pp.negotiatedProtocolVersion = negotiatedVersion.ToUint64() pp.PeerInfoMtx.Unlock() // Set the stats-related fields. diff --git a/lib/server.go b/lib/server.go index 95ef2be74..4260f65fb 100644 --- a/lib/server.go +++ b/lib/server.go @@ -703,11 +703,13 @@ func (srv *Server) GetSnapshot(pp *Peer) { } // If operationQueueSemaphore is full, we are already storing too many chunks in memory. Block the thread while // we wait for the queue to clear up. - srv.snapshot.operationQueueSemaphore <- struct{}{} - // Now send a message to the peer to fetch the snapshot chunk. - pp.AddDeSoMessage(&MsgDeSoGetSnapshot{ - SnapshotStartKey: lastReceivedKey, - }, false) + go func() { + srv.snapshot.operationQueueSemaphore <- struct{}{} + // Now send a message to the peer to fetch the snapshot chunk. + pp.AddDeSoMessage(&MsgDeSoGetSnapshot{ + SnapshotStartKey: lastReceivedKey, + }, false) + }() glog.V(2).Infof("Server.GetSnapshot: Sending a GetSnapshot message to peer (%v) "+ "with Prefix (%v) and SnapshotStartEntry (%v)", pp, prefix, lastReceivedKey) @@ -1144,6 +1146,8 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { "<%v>, Last entry: <%v>), (number of entries: %v), metadata (%v), and isEmpty (%v), from Peer %v", msg.SnapshotChunk[0].Key, msg.SnapshotChunk[len(msg.SnapshotChunk)-1].Key, len(msg.SnapshotChunk), msg.SnapshotMetadata, msg.SnapshotChunk[0].IsEmpty(), pp))) + // Free up a slot in the operationQueueSemaphore, now that a chunk has been processed. + srv.snapshot.FreeOperationQueueSemaphore() // There is a possibility that during hypersync the network entered a new snapshot epoch. We handle this case by // restarting the node and starting hypersync from scratch. @@ -2217,9 +2221,7 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, msg *MsgDeSoGetAddr) { func (srv *Server) _handleControlMessages(serverMessage *ServerMessage) (_shouldQuit bool) { switch serverMessage.Msg.(type) { // Control messages used internally to signal to the server. - case *MsgDeSoNewPeer: - srv._handleNewPeer(serverMessage.Peer) - case *MsgDeSoDonePeer: + case *MsgDeSoDisconnectedPeer: srv._handleDonePeer(serverMessage.Peer) case *MsgDeSoQuit: return true diff --git a/lib/snapshot.go b/lib/snapshot.go index 9d9eda085..6ab97631a 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -471,8 +471,6 @@ func (snap *Snapshot) Run() { operation.blockHeight); err != nil { glog.Errorf("Snapshot.Run: Problem adding snapshot chunk to the db") } - // Free up a slot in the operationQueueSemaphore, now that a chunk has been processed. - <-snap.operationQueueSemaphore case SnapshotOperationChecksumAdd: if err := snap.Checksum.AddOrRemoveBytesWithMigrations(operation.checksumKey, operation.checksumValue, @@ -1348,6 +1346,12 @@ func (snap *Snapshot) SetSnapshotChunk(mainDb *badger.DB, mainDbMutex *deadlock. return nil } +func (snap *Snapshot) FreeOperationQueueSemaphore() { + if len(snap.operationQueueSemaphore) > 0 { + <-snap.operationQueueSemaphore + } +} + // ------------------------------------------------------------------------------------- // StateChecksum // ------------------------------------------------------------------------------------- From a7fd87dbf23c40b2be1a1ef0024cd3bfb396ca29 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 28 Dec 2023 09:39:20 -0500 Subject: [PATCH 328/762] Refactor Block Integrity Checks to Support Block Headers (#891) * Refactor Block Integrity Checks to Support Block Headers * Address Nina's comment --- lib/pos_blockchain.go | 192 +++++++++++++++++++++---------------- lib/pos_blockchain_test.go | 23 ++--- 2 files changed, 121 insertions(+), 94 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index eaf724925..21457bff4 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -575,23 +575,43 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS(blockHash *BlockHash) (* return bc.validateAndIndexBlockPoS(block) } -// isValidBlockPoS performs all basic validations on a block as it relates to -// the Blockchain struct. Any error resulting from this function implies that -// the block is invalid. +// isValidBlockPoS performs all basic block integrity checks. Any error +// resulting from this function implies that the block is invalid. func (bc *Blockchain) isValidBlockPoS(block *MsgDeSoBlock) error { // Surface Level validation of the block if err := bc.isProperlyFormedBlockPoS(block); err != nil { return err } - if err := bc.isBlockTimestampValidRelativeToParentPoS(block); err != nil { + if err := bc.isBlockTimestampValidRelativeToParentPoS(block.Header); err != nil { return err } - // Validate Block Height - if err := bc.hasValidBlockHeightPoS(block); err != nil { + // Validate block height + if err := bc.hasValidBlockHeightPoS(block.Header); err != nil { return err } - // Validate View - if err := bc.hasValidBlockViewPoS(block); err != nil { + // Validate view + if err := bc.hasValidBlockViewPoS(block.Header); err != nil { + return err + } + return nil +} + +// isValidBlockHeaderPoS performs all basic block header integrity checks. Any +// error resulting from this function implies that the block header is invalid. +func (bc *Blockchain) isValidBlockHeaderPoS(header *MsgDeSoHeader) error { + // Surface Level validation of the block header + if err := bc.isProperlyFormedBlockHeaderPoS(header); err != nil { + return err + } + if err := bc.isBlockTimestampValidRelativeToParentPoS(header); err != nil { + return err + } + // Validate block height + if err := bc.hasValidBlockHeightPoS(header); err != nil { + return err + } + // Validate view + if err := bc.hasValidBlockViewPoS(header); err != nil { return err } return nil @@ -599,79 +619,105 @@ func (bc *Blockchain) isValidBlockPoS(block *MsgDeSoBlock) error { // isBlockTimestampValidRelativeToParentPoS validates that the block's timestamp is // greater than its parent's timestamp. -func (bc *Blockchain) isBlockTimestampValidRelativeToParentPoS(block *MsgDeSoBlock) error { +func (bc *Blockchain) isBlockTimestampValidRelativeToParentPoS(header *MsgDeSoHeader) error { // Validate that the timestamp is not less than its parent. - parentBlock, exists := bc.blockIndexByHash[*block.Header.PrevBlockHash] + parentBlockNode, exists := bc.blockIndexByHash[*header.PrevBlockHash] if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. return RuleErrorMissingParentBlock } - if block.Header.TstampNanoSecs < parentBlock.Header.TstampNanoSecs { + if header.TstampNanoSecs < parentBlockNode.Header.TstampNanoSecs { return RuleErrorPoSBlockTstampNanoSecsTooOld } return nil } -// isProperlyFormedBlockPoS validates the block at a surface level. It checks -// that the timestamp is valid, that the version of the header is valid, -// and other general integrity checks (such as not malformed). +// isProperlyFormedBlockPoS validates the block at a surface level and makes +// sure that all fields are populated in a valid manner. It does not verify +// signatures nor validate the blockchain state resulting from the block. func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock) error { - // First make sure we have a non-nil header - if block.Header == nil { - return RuleErrorNilBlockHeader + // First, make sure we have a non-nil block + if block == nil { + return RuleErrorNilBlock } - // Make sure we have a prevBlockHash - if block.Header.PrevBlockHash == nil { - return RuleErrorNilPrevBlockHash + // Make sure the header is properly formed by itself + if err := bc.isProperlyFormedBlockHeaderPoS(block.Header); err != nil { + return err } - // Timestamp validation + // If the header is properly formed, we can check the rest of the block. + + // All blocks must have at least one txn + if len(block.Txns) == 0 { + return RuleErrorBlockWithNoTxns + } + + // Make sure TxnConnectStatusByIndex is non-nil + if block.TxnConnectStatusByIndex == nil { + return RuleErrorNilTxnConnectStatusByIndex + } + + // Make sure the TxnConnectStatusByIndex matches the TxnConnectStatusByIndexHash + if !(HashBitset(block.TxnConnectStatusByIndex).IsEqual(block.Header.TxnConnectStatusByIndexHash)) { + return RuleErrorTxnConnectStatusByIndexHashMismatch + } + + // Make sure that the first txn in each block is a block reward txn. + if block.Txns[0].TxnMeta.GetTxnType() != TxnTypeBlockReward { + return RuleErrorBlockDoesNotStartWithRewardTxn + } + + // We always need to check the merkle root. + if block.Header.TransactionMerkleRoot == nil { + return RuleErrorNilMerkleRoot + } + computedMerkleRoot, _, err := ComputeMerkleRoot(block.Txns) + if err != nil { + return errors.Wrapf(err, "isProperlyFormedBlockPoS: Problem computing merkle root") + } + if !block.Header.TransactionMerkleRoot.IsEqual(computedMerkleRoot) { + return RuleErrorInvalidMerkleRoot + } + + return nil +} + +// isProperlyFormedBlockHeaderPoS validates the block header based on the header's +// contents alone, and makes sure that all fields are populated in a valid manner. +// It does not verify signatures in the header, nor cross-validate the block with +// past blocks in the block index. +func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader) error { // First make sure we have a non-nil header - if block.Header == nil { + if header == nil { return RuleErrorNilBlockHeader } // Make sure we have a prevBlockHash - if block.Header.PrevBlockHash == nil { + if header.PrevBlockHash == nil { return RuleErrorNilPrevBlockHash } // Timestamp validation // TODO: Add support for putting the drift into global params. - if block.Header.TstampNanoSecs > uint64(time.Now().UnixNano())+bc.params.DefaultBlockTimestampDriftNanoSecs { + if header.TstampNanoSecs > uint64(time.Now().UnixNano())+bc.params.DefaultBlockTimestampDriftNanoSecs { return RuleErrorPoSBlockTstampNanoSecsInFuture } // Header validation - if block.Header.Version != HeaderVersion2 { + if header.Version != HeaderVersion2 { return RuleErrorInvalidPoSBlockHeaderVersion } - // Malformed block checks - // All blocks must have at least one txn - if len(block.Txns) == 0 { - return RuleErrorBlockWithNoTxns - } - // Must have non-nil TxnConnectStatusByIndex - if block.TxnConnectStatusByIndex == nil { - return RuleErrorNilTxnConnectStatusByIndex - } - // Must have TxnConnectStatusByIndexHash - if block.Header.TxnConnectStatusByIndexHash == nil { + if header.TxnConnectStatusByIndexHash == nil { return RuleErrorNilTxnConnectStatusByIndexHash } - // Make sure the TxnConnectStatusByIndex matches the TxnConnectStatusByIndexHash - if !(HashBitset(block.TxnConnectStatusByIndex).IsEqual(block.Header.TxnConnectStatusByIndexHash)) { - return RuleErrorTxnConnectStatusByIndexHashMismatch - } - // Require header to have either vote or timeout QC - isTimeoutQCEmpty := block.Header.ValidatorsTimeoutAggregateQC.isEmpty() - isVoteQCEmpty := block.Header.ValidatorsVoteQC.isEmpty() + isTimeoutQCEmpty := header.ValidatorsTimeoutAggregateQC.isEmpty() + isVoteQCEmpty := header.ValidatorsVoteQC.isEmpty() if isTimeoutQCEmpty && isVoteQCEmpty { return RuleErrorNoTimeoutOrVoteQC } @@ -680,83 +726,62 @@ func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock) error { return RuleErrorBothTimeoutAndVoteQC } - if block.Txns[0].TxnMeta.GetTxnType() != TxnTypeBlockReward { - return RuleErrorBlockDoesNotStartWithRewardTxn - } - - if block.Header.ProposerVotingPublicKey.IsEmpty() { + if header.ProposerVotingPublicKey.IsEmpty() { return RuleErrorInvalidProposerVotingPublicKey } - if block.Header.ProposerPublicKey == nil || block.Header.ProposerPublicKey.IsZeroPublicKey() { + if header.ProposerPublicKey == nil || header.ProposerPublicKey.IsZeroPublicKey() { return RuleErrorInvalidProposerPublicKey } - if block.Header.ProposerRandomSeedSignature.IsEmpty() { + if header.ProposerRandomSeedSignature.IsEmpty() { return RuleErrorInvalidProposerRandomSeedSignature } - merkleRoot := block.Header.TransactionMerkleRoot - - // We always need to check the merkle root. - if merkleRoot == nil { + if header.TransactionMerkleRoot == nil { return RuleErrorNilMerkleRoot } - computedMerkleRoot, _, err := ComputeMerkleRoot(block.Txns) - if err != nil { - return errors.Wrapf(err, "isProperlyFormedBlockPoS: Problem computing merkle root") - } - if !merkleRoot.IsEqual(computedMerkleRoot) { - return RuleErrorInvalidMerkleRoot - } // If a block has a vote QC, then the Header's proposed in view must be exactly one // greater than the QC's proposed in view. - if !isVoteQCEmpty && block.Header.ProposedInView != block.Header.ValidatorsVoteQC.ProposedInView+1 { + if !isVoteQCEmpty && header.ProposedInView != header.ValidatorsVoteQC.ProposedInView+1 { return RuleErrorPoSVoteBlockViewNotOneGreaterThanValidatorsVoteQCView } // If a block has a timeout QC, then the Header's proposed in view be must exactly one // greater than the QC's timed out view. - if !isTimeoutQCEmpty && block.Header.ProposedInView != block.Header.ValidatorsTimeoutAggregateQC.TimedOutView+1 { + if !isTimeoutQCEmpty && header.ProposedInView != header.ValidatorsTimeoutAggregateQC.TimedOutView+1 { return RuleErrorPoSTimeoutBlockViewNotOneGreaterThanValidatorsTimeoutQCView } + return nil } -// hasValidBlockHeightPoS validates the block height for a given block. First, +// hasValidBlockHeightPoS validates the block height for a given block header. First, // it checks that we've passed the PoS cutover fork height. Then it checks // that this block height is exactly one greater than its parent's block height. -func (bc *Blockchain) hasValidBlockHeightPoS(block *MsgDeSoBlock) error { - blockHeight := block.Header.Height +func (bc *Blockchain) hasValidBlockHeightPoS(header *MsgDeSoHeader) error { + blockHeight := header.Height if blockHeight < uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { return RuleErrorPoSBlockBeforeCutoverHeight } // Validate that the block height is exactly one greater than its parent. - parentBlock, exists := bc.blockIndexByHash[*block.Header.PrevBlockHash] + parentBlockNode, exists := bc.blockIndexByHash[*header.PrevBlockHash] if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. return RuleErrorMissingParentBlock } - if block.Header.Height != parentBlock.Header.Height+1 { + if header.Height != parentBlockNode.Header.Height+1 { return RuleErrorInvalidPoSBlockHeight } return nil } -// hasValidBlockViewPoS validates the view for a given block. First, it checks that -// the view is greater than the latest committed block view. If not, -// we return an error indicating that we'll never accept this block. Next, -// it checks that the view is less than or equal to its parent. -// If not, we return an error indicating that we'll want to add this block as an -// orphan. Then it will check if that the view is exactly one greater than the -// latest uncommitted block if we have a regular vote QC. If this block has a -// timeout QC, it will check that the view is at least greater than the latest -// uncommitted block's view + 1. -func (bc *Blockchain) hasValidBlockViewPoS(block *MsgDeSoBlock) error { +// hasValidBlockViewPoS validates the view for a given block header +func (bc *Blockchain) hasValidBlockViewPoS(header *MsgDeSoHeader) error { // Validate that the view is greater than the latest uncommitted block. - parentBlock, exists := bc.blockIndexByHash[*block.Header.PrevBlockHash] + parentBlockNode, exists := bc.blockIndexByHash[*header.PrevBlockHash] if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -764,19 +789,19 @@ func (bc *Blockchain) hasValidBlockViewPoS(block *MsgDeSoBlock) error { } // If the parent block was a PoW block, we can't validate this block's view // in comparison. - if !blockNodeProofOfStakeCutoverMigrationTriggered(parentBlock.Height) { + if !blockNodeProofOfStakeCutoverMigrationTriggered(parentBlockNode.Height) { return nil } // If our current block has a vote QC, then we need to validate that the // view is exactly one greater than the latest uncommitted block. - if block.Header.ValidatorsTimeoutAggregateQC.isEmpty() { - if block.Header.ProposedInView != parentBlock.Header.ProposedInView+1 { + if header.ValidatorsTimeoutAggregateQC.isEmpty() { + if header.ProposedInView != parentBlockNode.Header.ProposedInView+1 { return RuleErrorPoSVoteBlockViewNotOneGreaterThanParent } } else { // If our current block has a timeout QC, then we need to validate that the // view is strictly greater than the latest uncommitted block's view. - if block.Header.ProposedInView <= parentBlock.Header.ProposedInView { + if header.ProposedInView <= parentBlockNode.Header.ProposedInView { return RuleErrorPoSTimeoutBlockViewNotGreaterThanParent } } @@ -1454,6 +1479,7 @@ func (bc *Blockchain) getMaxSequentialBlockHeightAfter(startingHeight uint64) ui } const ( + RuleErrorNilBlock RuleError = "RuleErrorNilBlock" RuleErrorNilBlockHeader RuleError = "RuleErrorNilBlockHeader" RuleErrorNilPrevBlockHash RuleError = "RuleErrorNilPrevBlockHash" RuleErrorPoSBlockTstampNanoSecsTooOld RuleError = "RuleErrorPoSBlockTstampNanoSecsTooOld" diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 25fcfc437..04b420bd5 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -11,6 +11,7 @@ import ( "time" "crypto/sha256" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/collections/bitset" @@ -235,7 +236,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Timestamp validations // Block timestamp must be greater than the previous block timestamp block.Header.TstampNanoSecs = bc.BlockTip().Header.GetTstampSecs() - 1 - err = bc.isBlockTimestampValidRelativeToParentPoS(block) + err = bc.isBlockTimestampValidRelativeToParentPoS(block.Header) require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsTooOld) // Block timestamps can't be in the future. @@ -261,7 +262,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Parent must exist in the block index. block.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) - err = bc.isBlockTimestampValidRelativeToParentPoS(block) + err = bc.isBlockTimestampValidRelativeToParentPoS(block.Header) require.Equal(t, err, RuleErrorMissingParentBlock) // Nil block header not allowed @@ -322,22 +323,22 @@ func TestHasValidBlockHeight(t *testing.T) { // validate that we've cutover to PoS bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 3 - err = bc.hasValidBlockHeightPoS(block) + err = bc.hasValidBlockHeightPoS(block.Header) require.Equal(t, err, RuleErrorPoSBlockBeforeCutoverHeight) // Update the fork height bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 0 - err = bc.hasValidBlockHeightPoS(block) + err = bc.hasValidBlockHeightPoS(block.Header) require.Nil(t, err) block.Header.Height = 1 - err = bc.hasValidBlockHeightPoS(block) + err = bc.hasValidBlockHeightPoS(block.Header) require.Equal(t, err, RuleErrorInvalidPoSBlockHeight) block.Header.Height = 2 bc.blockIndexByHash = map[BlockHash]*BlockNode{} - err = bc.hasValidBlockHeightPoS(block) + err = bc.hasValidBlockHeightPoS(block.Header) require.Equal(t, err, RuleErrorMissingParentBlock) } @@ -553,29 +554,29 @@ func TestHasValidBlockViewPoS(t *testing.T) { block.Header.ProposedInView = 2 // Blocks with timeout QCs must have a view strictly greater than the parent. - err = bc.hasValidBlockViewPoS(block) + err = bc.hasValidBlockViewPoS(block.Header) require.Equal(t, err, RuleErrorPoSTimeoutBlockViewNotGreaterThanParent) // Any arbitrary number GREATER than the parent's view is valid. block.Header.ProposedInView = 10 - err = bc.hasValidBlockViewPoS(block) + err = bc.hasValidBlockViewPoS(block.Header) require.Nil(t, err) // Now we set the timeout QC to nil and provide a vote QC, with height = 2 block.Header.ValidatorsTimeoutAggregateQC = nil block.Header.ValidatorsVoteQC = voteQC block.Header.ProposedInView = 2 - err = bc.hasValidBlockViewPoS(block) + err = bc.hasValidBlockViewPoS(block.Header) require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanParent) // An arbitrary number greater than its parents should fail. block.Header.ProposedInView = 10 - err = bc.hasValidBlockViewPoS(block) + err = bc.hasValidBlockViewPoS(block.Header) require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanParent) // Exactly one great w/ vote QC should pass. block.Header.ProposedInView = 3 - err = bc.hasValidBlockViewPoS(block) + err = bc.hasValidBlockViewPoS(block.Header) require.Nil(t, err) } From 2ad87102fc645918e8516989cb66046df407e4c2 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 28 Dec 2023 20:15:42 -0500 Subject: [PATCH 329/762] Support HeaderValidateFailed and HeaderValidated Statuses in BlockNode Helpers (#893) * Support HeaderValidateFailed and HeaderValidated Statuses in BlockNode Helpers * Address Nina's comment --- lib/blockchain.go | 10 ++++++ lib/pos_blockchain.go | 73 ++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 4 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 44d0358ce..8a5b1a78f 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -81,6 +81,16 @@ const ( StatusBlockCommitted = 1 << 8 // Committed means that the block has been committed to the blockchain according to the Fast HotStuff commit rule. Only set on blocks after the cutover for PoS ) +// IsHeaderValidated returns true if a BlockNode has passed all the block header integrity checks. +func (nn *BlockNode) IsHeaderValidated() bool { + return nn.Status&StatusHeaderValidated != 0 +} + +// IsHeaderValidateFailed returns true if a BlockNode has failed any block header integrity checks. +func (nn *BlockNode) IsHeaderValidateFailed() bool { + return nn.Status&StatusHeaderValidateFailed != 0 +} + // IsStored returns true if the BlockNode has been added to the blockIndexByHash and stored in the DB. func (nn *BlockNode) IsStored() bool { return nn.Status&StatusBlockStored != 0 diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 21457bff4..fdf1abfab 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1021,6 +1021,52 @@ func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock) (* return newBlockNode, nil } +func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) + if err != nil { + return nil, errors.Wrapf(err, "storeValidatedHeaderInBlockIndex: Problem getting or creating block node") + } + // If the block is validated, then this is a no-op. + if blockNode.IsHeaderValidated() { + return blockNode, nil + } + // We should throw an error if the BlockNode has failed header validation + if blockNode.IsHeaderValidateFailed() { + return nil, errors.New( + "storeValidatedHeaderInBlockIndex: can't set block node to header validated after it's already been set to validate failed", + ) + } + blockNode.Status |= StatusHeaderValidated + // If the DB update fails, then we should return an error. + if err = bc.upsertBlockNodeToDB(blockNode); err != nil { + return nil, errors.Wrapf(err, "storeValidatedHeaderInBlockIndex: Problem upserting block node to DB") + } + return blockNode, nil +} + +func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) + if err != nil { + return nil, errors.Wrapf(err, "storeValidateFailedHeaderInBlockIndex: Problem getting or creating block node") + } + // If the block has the header validate failed status, then this is a no-op. + if blockNode.IsHeaderValidateFailed() { + return blockNode, nil + } + // We should throw an error if the BlockNode has already been validated. + if blockNode.IsHeaderValidated() { + return nil, errors.New( + "storeValidatedHeaderInBlockIndex: can't set block node to header validate failed after it's already been set to validated", + ) + } + blockNode.Status |= StatusHeaderValidated + // If the DB update fails, then we should return an error. + if err = bc.upsertBlockNodeToDB(blockNode); err != nil { + return nil, errors.Wrapf(err, "storeValidateFailedHeaderInBlockIndex: Problem upserting block node to DB") + } + return blockNode, nil +} + // storeBlockInBlockIndex upserts the blocks into the in-memory block index and updates its status to // StatusBlockStored. It also writes the block to the block index in badger // by calling upsertBlockAndBlockNodeToDB. @@ -1121,10 +1167,8 @@ func (bc *Blockchain) upsertBlockAndBlockNodeToDB(block *MsgDeSoBlock, blockNode // Store the new block's node in our node index in the db under the // -> // index. - if innerErr := PutHeightHashToNodeInfoWithTxn( - txn, bc.snapshot, blockNode, false /*bitcoinNodes*/, bc.eventManager); innerErr != nil { - return errors.Wrapf(innerErr, - "upsertBlockAndBlockNodeToDB: Problem calling PutHeightHashToNodeInfo before validation") + if innerErr := bc.upsertBlockNodeToDBWithTxn(txn, blockNode); innerErr != nil { + return errors.Wrapf(innerErr, "upsertBlockAndBlockNodeToDB: ") } // Notice we don't call PutBestHash or PutUtxoOperationsForBlockWithTxn because we're not @@ -1138,6 +1182,27 @@ func (bc *Blockchain) upsertBlockAndBlockNodeToDB(block *MsgDeSoBlock, blockNode return nil } +// upsertBlockNodeToDB is a simpler wrapper that calls upsertBlockNodeToDBWithTxn with a new transaction. +func (bc *Blockchain) upsertBlockNodeToDB(blockNode *BlockNode) error { + return bc.db.Update(func(txn *badger.Txn) error { + return bc.upsertBlockNodeToDBWithTxn(txn, blockNode) + }) +} + +// upsertBlockNodeToDBWithTxn writes the BlockNode to the blockIndexByHash in badger. +func (bc *Blockchain) upsertBlockNodeToDBWithTxn(txn *badger.Txn, blockNode *BlockNode) error { + // Store the new block's node in our node index in the db under the + // -> + // index. + err := PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, blockNode, false /*bitcoinNodes*/, bc.eventManager) + if err != nil { + return errors.Wrapf(err, + "upsertBlockNodeToDBWithTxn: Problem calling PutHeightHashToNodeInfo before validation") + } + + return nil +} + // tryApplyNewTip attempts to apply the new tip to the best chain. It will do the following: // 1. Check if we should perform a reorg. If so, it will handle the reorg. If reorging causes an error, // return false and error. From b2b113c7f379c1108de0303cfda3afb00a73ee6c Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Fri, 29 Dec 2023 23:58:18 +0100 Subject: [PATCH 330/762] PoS Validator Connect/Disconnect Flow (#768) * PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment * Another review round * gofmt * Comment change --- lib/connection_manager.go | 577 ++++++++++++--------------------- lib/network_connection.go | 212 ++++++++++++ lib/network_connection_test.go | 167 ++++++++++ lib/peer.go | 32 +- 4 files changed, 603 insertions(+), 385 deletions(-) create mode 100644 lib/network_connection.go create mode 100644 lib/network_connection_test.go diff --git a/lib/connection_manager.go b/lib/connection_manager.go index 4cefe9527..38924bdf9 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -5,6 +5,7 @@ import ( "math" "net" "strconv" + "sync" "sync/atomic" "time" @@ -12,7 +13,6 @@ import ( chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/wire" "github.com/decred/dcrd/lru" - "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -80,17 +80,26 @@ type ConnectionManager struct { // concurrently by many goroutines to figure out if outbound connections // should be made to particular addresses. - mtxOutboundConnIPGroups deadlock.Mutex + mtxOutboundConnIPGroups sync.Mutex outboundConnIPGroups map[string]int // The peer maps map peer ID to peers for various types of peer connections. // // A persistent peer is typically one we got through a commandline argument. // The reason it's called persistent is because we maintain a connection to // it, and retry the connection if it fails. - mtxPeerMaps deadlock.RWMutex + mtxPeerMaps sync.RWMutex persistentPeers map[uint64]*Peer outboundPeers map[uint64]*Peer inboundPeers map[uint64]*Peer + connectedPeers map[uint64]*Peer + + mtxConnectionAttempts sync.Mutex + // outboundConnectionAttempts keeps track of the outbound connections, mapping attemptId [uint64] -> connection attempt. + outboundConnectionAttempts map[uint64]*OutboundConnectionAttempt + // outboundConnectionChan is used to signal successful outbound connections to the connection manager. + outboundConnectionChan chan *outboundConnection + // inboundConnectionChan is used to signal successful inbound connections to the connection manager. + inboundConnectionChan chan *inboundConnection // Track the number of outbound peers we have so that this value can // be accessed concurrently when deciding whether or not to add more // outbound peers. @@ -102,11 +111,9 @@ type ConnectionManager struct { // avoid choosing them in the address manager. We need a mutex on this // guy because many goroutines will be querying the address manager // at once. - mtxConnectedOutboundAddrs deadlock.RWMutex - connectedOutboundAddrs map[string]bool - - // Used to set peer ids. Must be incremented atomically. - peerIndex uint64 + mtxAddrsMaps sync.RWMutex + connectedOutboundAddrs map[string]bool + attemptedOutboundAddrs map[string]bool serverMessageQueue chan *ServerMessage @@ -156,15 +163,19 @@ func NewConnectionManager( //newestBlock: _newestBlock, // Initialize the peer data structures. - outboundConnIPGroups: make(map[string]int), - persistentPeers: make(map[uint64]*Peer), - outboundPeers: make(map[uint64]*Peer), - inboundPeers: make(map[uint64]*Peer), - connectedOutboundAddrs: make(map[string]bool), + outboundConnIPGroups: make(map[string]int), + persistentPeers: make(map[uint64]*Peer), + outboundPeers: make(map[uint64]*Peer), + inboundPeers: make(map[uint64]*Peer), + connectedPeers: make(map[uint64]*Peer), + outboundConnectionAttempts: make(map[uint64]*OutboundConnectionAttempt), + connectedOutboundAddrs: make(map[string]bool), + attemptedOutboundAddrs: make(map[string]bool), // Initialize the channels. - newPeerChan: make(chan *Peer), - donePeerChan: make(chan *Peer), + newPeerChan: make(chan *Peer, 100), + donePeerChan: make(chan *Peer, 100), + outboundConnectionChan: make(chan *outboundConnection, 100), targetOutboundPeers: _targetOutboundPeers, maxInboundPeers: _maxInboundPeers, @@ -177,13 +188,8 @@ func NewConnectionManager( } } -func (cmgr *ConnectionManager) GetAddrManager() *addrmgr.AddrManager { - return cmgr.AddrMgr -} - -// Check if the address passed shares a group with any addresses already in our -// data structures. -func (cmgr *ConnectionManager) isRedundantGroupKey(na *wire.NetAddress) bool { +// Check if the address passed shares a group with any addresses already in our data structures. +func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddress) bool { groupKey := addrmgr.GroupKey(na) cmgr.mtxOutboundConnIPGroups.Lock() @@ -191,7 +197,7 @@ func (cmgr *ConnectionManager) isRedundantGroupKey(na *wire.NetAddress) bool { cmgr.mtxOutboundConnIPGroups.Unlock() if numGroupsForKey != 0 && numGroupsForKey != 1 { - glog.V(2).Infof("isRedundantGroupKey: Found numGroupsForKey != (0 or 1). Is (%d) "+ + glog.V(2).Infof("IsFromRedundantOutboundIPAddress: Found numGroupsForKey != (0 or 1). Is (%d) "+ "instead for addr (%s) and group key (%s). This "+ "should never happen.", numGroupsForKey, na.IP.String(), groupKey) } @@ -220,25 +226,25 @@ func (cmgr *ConnectionManager) subFromGroupKey(na *wire.NetAddress) { func (cmgr *ConnectionManager) getRandomAddr() *wire.NetAddress { for tries := 0; tries < 100; tries++ { - // Lock the address map since multiple threads will be trying to read - // and modify it at the same time. - cmgr.mtxConnectedOutboundAddrs.RLock() addr := cmgr.AddrMgr.GetAddress() - cmgr.mtxConnectedOutboundAddrs.RUnlock() - if addr == nil { glog.V(2).Infof("ConnectionManager.getRandomAddr: addr from GetAddressWithExclusions was nil") break } - if cmgr.connectedOutboundAddrs[addrmgr.NetAddressKey(addr.NetAddress())] { + // Lock the address map since multiple threads will be trying to read + // and modify it at the same time. + cmgr.mtxAddrsMaps.RLock() + ok := cmgr.connectedOutboundAddrs[addrmgr.NetAddressKey(addr.NetAddress())] + cmgr.mtxAddrsMaps.RUnlock() + if ok { glog.V(2).Infof("ConnectionManager.getRandomAddr: Not choosing already connected address %v:%v", addr.NetAddress().IP, addr.NetAddress().Port) continue } // We can only have one outbound address per /16. This is similar to // Bitcoin and we do it to prevent Sybil attacks. - if cmgr.isRedundantGroupKey(addr.NetAddress()) { + if cmgr.IsFromRedundantOutboundIPAddress(addr.NetAddress()) { glog.V(2).Infof("ConnectionManager.getRandomAddr: Not choosing address due to redundant group key %v:%v", addr.NetAddress().IP, addr.NetAddress().Port) continue } @@ -252,14 +258,13 @@ func (cmgr *ConnectionManager) getRandomAddr() *wire.NetAddress { return nil } -func _delayRetry(retryCount int, persistentAddrForLogging *wire.NetAddress) { +func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddress, unit time.Duration) (_retryDuration time.Duration) { // No delay if we haven't tried yet or if the number of retries isn't positive. if retryCount <= 0 { - time.Sleep(time.Second) - return + return 0 } numSecs := int(math.Pow(2.0, float64(retryCount))) - retryDelay := time.Duration(numSecs) * time.Second + retryDelay := time.Duration(numSecs) * unit if persistentAddrForLogging != nil { glog.V(1).Infof("Retrying connection to outbound persistent peer: "+ @@ -268,7 +273,7 @@ func _delayRetry(retryCount int, persistentAddrForLogging *wire.NetAddress) { } else { glog.V(2).Infof("Retrying connection to outbound non-persistent peer in (%d) seconds.", numSecs) } - time.Sleep(retryDelay) + return retryDelay } func (cmgr *ConnectionManager) enoughOutboundPeers() bool { @@ -286,85 +291,6 @@ func (cmgr *ConnectionManager) enoughOutboundPeers() bool { return false } -// Chooses a random address and tries to connect to it. Repeats this process until -// it finds a peer that can pass version negotiation. -func (cmgr *ConnectionManager) _getOutboundConn(persistentAddr *wire.NetAddress) net.Conn { - // If a persistentAddr was provided then the connection is a persistent - // one. - isPersistent := (persistentAddr != nil) - retryCount := 0 - for { - if atomic.LoadInt32(&cmgr.shutdown) != 0 { - glog.Info("_getOutboundConn: Ignoring connection due to shutdown") - return nil - } - // We want to start backing off exponentially once we've gone through enough - // unsuccessful retries. However, we want to give more slack to non-persistent - // peers before we start backing off, which is why it's not as cut and dry as - // just delaying based on the raw number of retries. - adjustedRetryCount := retryCount - if !isPersistent { - // If the address is not persistent, only start backing off once there - // has been a large number of failed attempts in a row as this likely indicates - // that there's a connection issue we need to wait out. - adjustedRetryCount = retryCount - 5 - } - _delayRetry(adjustedRetryCount, persistentAddr) - retryCount++ - - // If the connection manager is saturated with non-persistent - // outbound peers, no need to keep trying non-persistent outbound - // connections. - if !isPersistent && cmgr.enoughOutboundPeers() { - glog.V(1).Infof("Dropping connection request to non-persistent outbound " + - "peer because we have enough of them.") - return nil - } - - // If we don't have a persistentAddr, pick one from our addrmgr. - ipNetAddr := persistentAddr - if ipNetAddr == nil { - ipNetAddr = cmgr.getRandomAddr() - } - if ipNetAddr == nil { - // This should never happen but if it does, sleep a bit and try again. - glog.V(1).Infof("_getOutboundConn: No valid addresses to connect to.") - time.Sleep(time.Second) - continue - } - - netAddr := net.TCPAddr{ - IP: ipNetAddr.IP, - Port: int(ipNetAddr.Port), - } - - // If the peer is not persistent, update the addrmgr. - glog.V(1).Infof("Attempting to connect to addr: %v", netAddr) - if !isPersistent { - cmgr.AddrMgr.Attempt(ipNetAddr) - } - var err error - conn, err := net.DialTimeout(netAddr.Network(), netAddr.String(), cmgr.params.DialTimeout) - if err != nil { - // If we failed to connect to this peer, get a new address and try again. - glog.V(1).Infof("Connection to addr (%v) failed: %v", netAddr, err) - continue - } - - // We were able to dial successfully so we'll break out now. - glog.V(1).Infof("Connected to addr: %v", netAddr) - - // If this was a non-persistent outbound connection, mark the address as - // connected in the addrmgr. - if !isPersistent { - cmgr.AddrMgr.Connected(ipNetAddr) - } - - // We made a successful outbound connection so return. - return conn - } -} - func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) (*wire.NetAddress, error) { port := params.DefaultSocketPort host, portstr, err := net.SplitHostPort(ipStr) @@ -386,159 +312,102 @@ func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) return netAddr, nil } -// ConnectPeer connects either an INBOUND or OUTBOUND peer. If Conn == nil, -// then we will set up an OUTBOUND peer. Otherwise we will use the Conn to -// create an INBOUND peer. If the connection is OUTBOUND and the persistentAddr -// is set, then we will connect only to that addr. Otherwise, we will use -// the addrmgr to randomly select addrs and create OUTBOUND connections -// with them until we find a worthy peer. -func (cmgr *ConnectionManager) ConnectPeer(conn net.Conn, persistentAddr *wire.NetAddress) { - // If we don't have a connection object then we will try and make an - // outbound connection to a peer to get one. - isOutbound := false - if conn == nil { - isOutbound = true - } - isPersistent := (persistentAddr != nil) - retryCount := 0 - for { - // If the peer is persistent use exponential back off delay before retrying. - if isPersistent { - _delayRetry(retryCount, persistentAddr) - } - retryCount++ - - // If this is an outbound peer, create an outbound connection. - if isOutbound { - conn = cmgr._getOutboundConn(persistentAddr) - } - - if conn == nil { - // Conn should only be nil if this is a non-persistent outbound peer. - if isPersistent { - glog.Errorf("ConnectPeer: Got a nil connection for a persistent peer. This should never happen: (%s)", persistentAddr.IP.String()) - } +func (cmgr *ConnectionManager) IsConnectedOutboundIpAddress(netAddr *wire.NetAddress) bool { + cmgr.mtxAddrsMaps.RLock() + defer cmgr.mtxAddrsMaps.RUnlock() + return cmgr.connectedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] +} - // If we end up without a connection object, it implies we had enough - // outbound peers so just return. - return - } +func (cmgr *ConnectionManager) IsAttemptedOutboundIpAddress(netAddr *wire.NetAddress) bool { + cmgr.mtxAddrsMaps.RLock() + defer cmgr.mtxAddrsMaps.RUnlock() + return cmgr.attemptedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] +} - // At this point Conn is set so create a peer object to do - // a version negotiation. - na, err := IPToNetAddr(conn.RemoteAddr().String(), cmgr.AddrMgr, cmgr.params) - if err != nil { - glog.Errorf("ConnectPeer: Problem calling ipToNetAddr for addr: (%s) err: (%v)", conn.RemoteAddr().String(), err) +func (cmgr *ConnectionManager) AddAttemptedOutboundAddrs(netAddr *wire.NetAddress) { + cmgr.mtxAddrsMaps.Lock() + defer cmgr.mtxAddrsMaps.Unlock() + cmgr.attemptedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] = true +} - // If we get an error in the conversion and this is an - // outbound connection, keep trying it. Otherwise, just return. - if isOutbound { - continue - } - return - } - peer := NewPeer(conn, isOutbound, na, isPersistent, - cmgr.stallTimeoutSeconds, - cmgr.minFeeRateNanosPerKB, - cmgr.params, - cmgr.srv.incomingMessages, cmgr, cmgr.srv, cmgr.SyncType) - - if err := peer.NegotiateVersion(cmgr.params.VersionNegotiationTimeout); err != nil { - glog.Errorf("ConnectPeer: Problem negotiating version with peer with addr: (%s) err: (%v)", conn.RemoteAddr().String(), err) - - // If we have an error in the version negotiation we disconnect - // from this peer. - peer.Conn.Close() - - // If the connection is outbound, then - // we try a new connection until we get one that works. Otherwise - // we break. - if isOutbound { - continue - } - return - } - peer._logVersionSuccess() +func (cmgr *ConnectionManager) RemoveAttemptedOutboundAddrs(netAddr *wire.NetAddress) { + cmgr.mtxAddrsMaps.Lock() + defer cmgr.mtxAddrsMaps.Unlock() + delete(cmgr.attemptedOutboundAddrs, addrmgr.NetAddressKey(netAddr)) +} - // If the version negotiation worked and we have an outbound non-persistent - // connection, mark the address as good in the addrmgr. - if isOutbound && !isPersistent { - cmgr.AddrMgr.Good(na) - } +// DialPersistentOutboundConnection attempts to connect to a persistent peer. +func (cmgr *ConnectionManager) DialPersistentOutboundConnection(persistentAddr *wire.NetAddress, attemptId uint64) (_attemptId uint64) { + glog.V(2).Infof("ConnectionManager.DialPersistentOutboundConnection: Connecting to peer %v", persistentAddr.IP.String()) + return cmgr._dialOutboundConnection(persistentAddr, attemptId, true) +} - // We connected to the peer and it passed its version negotiation. - // Handle the next steps in the main loop. - cmgr.newPeerChan <- peer +// DialOutboundConnection attempts to connect to a non-persistent peer. +func (cmgr *ConnectionManager) DialOutboundConnection(addr *wire.NetAddress, attemptId uint64) { + glog.V(2).Infof("ConnectionManager.ConnectOutboundConnection: Connecting to peer %v", addr.IP.String()) + cmgr._dialOutboundConnection(addr, attemptId, false) +} - // Once we've successfully connected to a valid peer we're done. The connection - // manager will handle starting the peer and, if this is an outbound peer and - // the peer later disconnects, - // it will potentially try and reconnect the peer or replace the peer with - // a new one so that we always maintain a fixed number of outbound peers. - return +// CloseAttemptedConnection closes an ongoing connection attempt. +func (cmgr *ConnectionManager) CloseAttemptedConnection(attemptId uint64) { + glog.V(2).Infof("ConnectionManager.CloseAttemptedConnection: Closing connection attempt %d", attemptId) + cmgr.mtxConnectionAttempts.Lock() + defer cmgr.mtxConnectionAttempts.Unlock() + if attempt, exists := cmgr.outboundConnectionAttempts[attemptId]; exists { + attempt.Stop() + delete(cmgr.outboundConnectionAttempts, attemptId) } } -func (cmgr *ConnectionManager) _initiateOutboundConnections() { - // This is a hack to make outbound connections go away. - if cmgr.targetOutboundPeers == 0 { - return - } - if len(cmgr.connectIps) > 0 { - // Connect to addresses passed via the --connect-ips flag. These addresses - // are persistent in the sense that if we disconnect from one, we will - // try to reconnect to the same one. - for _, connectIp := range cmgr.connectIps { - ipNetAddr, err := IPToNetAddr(connectIp, cmgr.AddrMgr, cmgr.params) - if err != nil { - glog.Error(errors.Errorf("Couldn't connect to IP %v: %v", connectIp, err)) - continue - } +// _dialOutboundConnection is the internal method that spawns and initiates an OutboundConnectionAttempt, which handles the +// connection attempt logic. It returns the attemptId of the attempt that was created. +func (cmgr *ConnectionManager) _dialOutboundConnection(addr *wire.NetAddress, attemptId uint64, isPersistent bool) (_attemptId uint64) { + connectionAttempt := NewOutboundConnectionAttempt(attemptId, addr, isPersistent, + cmgr.params.DialTimeout, cmgr.outboundConnectionChan) + cmgr.mtxConnectionAttempts.Lock() + cmgr.outboundConnectionAttempts[connectionAttempt.attemptId] = connectionAttempt + cmgr.mtxConnectionAttempts.Unlock() + cmgr.AddAttemptedOutboundAddrs(addr) + + connectionAttempt.Start() + return attemptId +} - go func(na *wire.NetAddress) { - cmgr.ConnectPeer(nil, na) - }(ipNetAddr) - } - return - } - // Only connect to addresses from the addrmgr if we don't specify --connect-ips. - // These addresses are *not* persistent, meaning if we disconnect from one we'll - // try a different one. - // - // TODO: We should try more addresses than we need initially to increase the - // speed at which we saturate our outbound connections. The ConnectionManager - // will handle the disconnection from peers once we have enough outbound - // connections. I had this as the logic before but removed it because it caused - // contention of the AddrMgr's lock. - for ii := 0; ii < int(cmgr.targetOutboundPeers); ii++ { - go cmgr.ConnectPeer(nil, nil) - } +// ConnectPeer connects either an INBOUND or OUTBOUND peer. If Conn == nil, +// then we will set up an OUTBOUND peer. Otherwise we will use the Conn to +// create an INBOUND peer. If the connection is OUTBOUND and the persistentAddr +// is set, then we will connect only to that addr. Otherwise, we will use +// the addrmgr to randomly select addrs and create OUTBOUND connections +// with them until we find a worthy peer. +func (cmgr *ConnectionManager) ConnectPeer(id uint64, conn net.Conn, na *wire.NetAddress, isOutbound bool, + isPersistent bool) *Peer { + + // At this point Conn is set so create a peer object to do a version negotiation. + peer := NewPeer(id, conn, isOutbound, na, isPersistent, + cmgr.stallTimeoutSeconds, + cmgr.minFeeRateNanosPerKB, + cmgr.params, + cmgr.srv.incomingMessages, cmgr, cmgr.srv, cmgr.SyncType, + cmgr.newPeerChan, cmgr.donePeerChan) + + // Now we can add the peer to our data structures. + peer._logAddPeer() + cmgr.addPeer(peer) + + // Start the peer's message loop. + peer.Start() + + return peer } -func (cmgr *ConnectionManager) _isFromRedundantInboundIPAddress(addrToCheck net.Addr) bool { +func (cmgr *ConnectionManager) IsFromRedundantInboundIPAddress(netAddr *wire.NetAddress) bool { cmgr.mtxPeerMaps.RLock() defer cmgr.mtxPeerMaps.RUnlock() // Loop through all the peers to see if any have the same IP // address. This map is normally pretty small so doing this // every time a Peer connects should be fine. - netAddr, err := IPToNetAddr(addrToCheck.String(), cmgr.AddrMgr, cmgr.params) - if err != nil { - // Return true in case we have an error. We do this because it - // will result in the peer connection not being accepted, which - // is desired in this case. - glog.Warningf(errors.Wrapf(err, - "ConnectionManager._isFromRedundantInboundIPAddress: Problem parsing "+ - "net.Addr to wire.NetAddress so marking as redundant and not "+ - "making connection").Error()) - return true - } - if netAddr == nil { - glog.Warningf("ConnectionManager._isFromRedundantInboundIPAddress: " + - "address was nil after parsing so marking as redundant and not " + - "making connection") - return true - } + // If the IP is a localhost IP let it slide. This is useful for testing fake // nodes on a local machine. // TODO: Should this be a flag? @@ -578,38 +447,9 @@ func (cmgr *ConnectionManager) _handleInboundConnections() { continue } - // As a quick check, reject the peer if we have too many already. Note that - // this check isn't perfect but we have a later check at the end after doing - // a version negotiation that will properly reject the peer if this check - // messes up e.g. due to a concurrency issue. - // - // TODO: We should instead have eviction logic here to prevent - // someone from monopolizing a node's inbound connections. - numInboundPeers := atomic.LoadUint32(&cmgr.numInboundPeers) - if numInboundPeers > cmgr.maxInboundPeers { - - glog.Infof("Rejecting INBOUND peer (%s) due to max inbound peers (%d) hit.", - conn.RemoteAddr().String(), cmgr.maxInboundPeers) - conn.Close() - - continue - } - - // If we want to limit inbound connections to one per IP address, check to - // make sure this address isn't already connected. - if cmgr.limitOneInboundConnectionPerIP && - cmgr._isFromRedundantInboundIPAddress(conn.RemoteAddr()) { - - glog.Infof("Rejecting INBOUND peer (%s) due to already having an "+ - "inbound connection from the same IP with "+ - "limit_one_inbound_connection_per_ip set.", - conn.RemoteAddr().String()) - conn.Close() - - continue + cmgr.inboundConnectionChan <- &inboundConnection{ + connection: conn, } - - go cmgr.ConnectPeer(conn, nil) } }(outerListener) } @@ -622,13 +462,7 @@ func (cmgr *ConnectionManager) GetAllPeers() []*Peer { defer cmgr.mtxPeerMaps.RUnlock() allPeers := []*Peer{} - for _, pp := range cmgr.persistentPeers { - allPeers = append(allPeers, pp) - } - for _, pp := range cmgr.outboundPeers { - allPeers = append(allPeers, pp) - } - for _, pp := range cmgr.inboundPeers { + for _, pp := range cmgr.connectedPeers { allPeers = append(allPeers, pp) } @@ -689,9 +523,9 @@ func (cmgr *ConnectionManager) addPeer(pp *Peer) { cmgr.addToGroupKey(pp.netAddr) atomic.AddUint32(&cmgr.numOutboundPeers, 1) - cmgr.mtxConnectedOutboundAddrs.Lock() + cmgr.mtxAddrsMaps.Lock() cmgr.connectedOutboundAddrs[addrmgr.NetAddressKey(pp.netAddr)] = true - cmgr.mtxConnectedOutboundAddrs.Unlock() + cmgr.mtxAddrsMaps.Unlock() } } else { // This is an inbound peer. @@ -700,10 +534,45 @@ func (cmgr *ConnectionManager) addPeer(pp *Peer) { } peerList[pp.ID] = pp + cmgr.connectedPeers[pp.ID] = pp +} + +func (cmgr *ConnectionManager) getPeer(id uint64) *Peer { + cmgr.mtxPeerMaps.RLock() + defer cmgr.mtxPeerMaps.RUnlock() + + if peer, ok := cmgr.connectedPeers[id]; ok { + return peer + } + return nil +} + +func (cmgr *ConnectionManager) SendMessage(msg DeSoMessage, peerId uint64) error { + peer := cmgr.getPeer(peerId) + if peer == nil { + return fmt.Errorf("SendMessage: Peer with ID %d not found", peerId) + } + glog.V(1).Infof("SendMessage: Sending message %v to peer %d", msg.GetMsgType().String(), peerId) + peer.AddDeSoMessage(msg, false) + return nil +} + +func (cmgr *ConnectionManager) CloseConnection(peerId uint64) { + glog.V(2).Infof("ConnectionManager.CloseConnection: Closing connection to peer (id= %v)", peerId) + + var peer *Peer + var ok bool + cmgr.mtxPeerMaps.Lock() + peer, ok = cmgr.connectedPeers[peerId] + cmgr.mtxPeerMaps.Unlock() + if !ok { + return + } + peer.Disconnect() } // Update our data structures to remove this peer. -func (cmgr *ConnectionManager) RemovePeer(pp *Peer) { +func (cmgr *ConnectionManager) removePeer(pp *Peer) { // Acquire the mtxPeerMaps lock for writing. cmgr.mtxPeerMaps.Lock() defer cmgr.mtxPeerMaps.Unlock() @@ -724,9 +593,9 @@ func (cmgr *ConnectionManager) RemovePeer(pp *Peer) { cmgr.subFromGroupKey(pp.netAddr) atomic.AddUint32(&cmgr.numOutboundPeers, Uint32Dec) - cmgr.mtxConnectedOutboundAddrs.Lock() + cmgr.mtxAddrsMaps.Lock() delete(cmgr.connectedOutboundAddrs, addrmgr.NetAddressKey(pp.netAddr)) - cmgr.mtxConnectedOutboundAddrs.Unlock() + cmgr.mtxAddrsMaps.Unlock() } } else { // This is an inbound peer. @@ -737,25 +606,12 @@ func (cmgr *ConnectionManager) RemovePeer(pp *Peer) { // Update the last seen time before we finish removing the peer. // TODO: Really, we call 'Connected()' on removing a peer? // I can't find a Disconnected() but seems odd. - cmgr.AddrMgr.Connected(pp.netAddr) + // FIXME: Move this to Done Peer + //cmgr.AddrMgr.Connected(pp.netAddr) // Remove the peer from our data structure. delete(peerList, pp.ID) -} - -func (cmgr *ConnectionManager) _maybeReplacePeer(pp *Peer) { - // If the peer was outbound, replace her with a - // new peer to maintain a fixed number of outbound connections. - if pp.isOutbound { - // If the peer is not persistent then we don't want to pass an - // address to connectPeer. The lack of an address will cause it - // to choose random addresses from the addrmgr until one works. - na := pp.netAddr - if !pp.isPersistent { - na = nil - } - go cmgr.ConnectPeer(nil, na) - } + delete(cmgr.connectedPeers, pp.ID) } func (cmgr *ConnectionManager) _logOutboundPeerData() { @@ -775,12 +631,30 @@ func (cmgr *ConnectionManager) _logOutboundPeerData() { cmgr.mtxOutboundConnIPGroups.Unlock() } +func (cmgr *ConnectionManager) AddTimeSample(addrStr string, timeSample time.Time) { + cmgr.timeSource.AddTimeSample(addrStr, timeSample) +} + +func (cmgr *ConnectionManager) GetNumInboundPeers() uint32 { + return atomic.LoadUint32(&cmgr.numInboundPeers) +} + +func (cmgr *ConnectionManager) GetNumOutboundPeers() uint32 { + return atomic.LoadUint32(&cmgr.numOutboundPeers) +} + func (cmgr *ConnectionManager) Stop() { + cmgr.mtxPeerMaps.Lock() + defer cmgr.mtxPeerMaps.Unlock() + if atomic.AddInt32(&cmgr.shutdown, 1) != 1 { glog.Warningf("ConnectionManager.Stop is already in the process of " + "shutting down") return } + for id := range cmgr.outboundConnectionAttempts { + cmgr.CloseAttemptedConnection(id) + } glog.Infof("ConnectionManager: Stopping, number of inbound peers (%v), number of outbound "+ "peers (%v), number of persistent peers (%v).", len(cmgr.inboundPeers), len(cmgr.outboundPeers), len(cmgr.persistentPeers)) @@ -823,10 +697,6 @@ func (cmgr *ConnectionManager) Start() { // - Have the peer enter a switch statement listening for all kinds of messages. // - Send addr and getaddr messages as appropriate. - // Initiate outbound connections with peers either using the --connect-ips passed - // in or using the addrmgr. - cmgr._initiateOutboundConnections() - // Accept inbound connections from peers on our listeners. cmgr._handleInboundConnections() @@ -837,60 +707,24 @@ func (cmgr *ConnectionManager) Start() { cmgr._logOutboundPeerData() select { - case pp := <-cmgr.newPeerChan: - { - // We have successfully connected to a peer and it passed its version - // negotiation. - - // if this is a non-persistent outbound peer and we already have enough - // outbound peers, then don't bother adding this one. - if !pp.isPersistent && pp.isOutbound && cmgr.enoughOutboundPeers() { - // TODO: Make this less verbose - glog.V(1).Infof("Dropping peer because we already have enough outbound peer connections.") - pp.Conn.Close() - continue - } - - // If this is a non-persistent outbound peer and the group key - // overlaps with another peer we're already connected to then - // abort mission. We only connect to one peer per IP group in - // order to prevent Sybil attacks. - if pp.isOutbound && - !pp.isPersistent && - cmgr.isRedundantGroupKey(pp.netAddr) { - - // TODO: Make this less verbose - glog.Infof("Rejecting OUTBOUND NON-PERSISTENT peer (%v) with "+ - "redundant group key (%s).", - pp, addrmgr.GroupKey(pp.netAddr)) - - pp.Conn.Close() - cmgr._maybeReplacePeer(pp) - continue - } - - // Check that we have not exceeded the maximum number of inbound - // peers allowed. - // - // TODO: We should instead have eviction logic to prevent - // someone from monopolizing a node's inbound connections. - numInboundPeers := atomic.LoadUint32(&cmgr.numInboundPeers) - if !pp.isOutbound && numInboundPeers > cmgr.maxInboundPeers { - - // TODO: Make this less verbose - glog.Infof("Rejecting INBOUND peer (%v) due to max inbound peers (%d) hit.", - pp, cmgr.maxInboundPeers) - - pp.Conn.Close() - continue - } - - // Now we can add the peer to our data structures. - pp._logAddPeer() - cmgr.addPeer(pp) - - // Start the peer's message loop. - pp.Start() + case oc := <-cmgr.outboundConnectionChan: + glog.V(2).Infof("ConnectionManager.Start: Successfully established an outbound connection with "+ + "(addr= %v)", oc.connection.RemoteAddr()) + delete(cmgr.outboundConnectionAttempts, oc.attemptId) + cmgr.serverMessageQueue <- &ServerMessage{ + Peer: nil, + Msg: &MsgDeSoNewConnection{ + Connection: oc, + }, + } + case ic := <-cmgr.inboundConnectionChan: + glog.V(2).Infof("ConnectionManager.Start: Successfully received an inbound connection from "+ + "(addr= %v)", ic.connection.RemoteAddr()) + cmgr.serverMessageQueue <- &ServerMessage{ + Peer: nil, + Msg: &MsgDeSoNewConnection{ + Connection: ic, + }, } case pp := <-cmgr.donePeerChan: { @@ -900,14 +734,11 @@ func (cmgr *ConnectionManager) Start() { glog.V(1).Infof("Done with peer (%v).", pp) - if !pp.PeerManuallyRemovedFromConnectionManager { - // Remove the peer from our data structures. - cmgr.RemovePeer(pp) + // Remove the peer from our data structures. + cmgr.removePeer(pp) - // Potentially replace the peer. For example, if the Peer was an outbound Peer - // then we want to find a new peer in order to maintain our TargetOutboundPeers. - cmgr._maybeReplacePeer(pp) - } + // Potentially replace the peer. For example, if the Peer was an outbound Peer + // then we want to find a new peer in order to maintain our TargetOutboundPeers. // Signal the server about the Peer being done in case it wants to do something // with it. diff --git a/lib/network_connection.go b/lib/network_connection.go new file mode 100644 index 000000000..eb6d4ab55 --- /dev/null +++ b/lib/network_connection.go @@ -0,0 +1,212 @@ +package lib + +import ( + "github.com/btcsuite/btcd/wire" + "github.com/golang/glog" + "net" + "sync" + "time" +) + +// outboundConnection is used to store an established connection with a peer. It can also be used to signal that the +// connection was unsuccessful, in which case the failed flag is set to true. outboundConnection is created after an +// OutboundConnectionAttempt concludes. outboundConnection implements the Connection interface. +type outboundConnection struct { + mtx sync.Mutex + terminated bool + + attemptId uint64 + address *wire.NetAddress + connection net.Conn + isPersistent bool + failed bool +} + +func (oc *outboundConnection) GetConnectionType() ConnectionType { + return ConnectionTypeOutbound +} + +func (oc *outboundConnection) Close() { + oc.mtx.Lock() + defer oc.mtx.Unlock() + + if oc.terminated { + return + } + oc.connection.Close() + oc.terminated = true +} + +// inboundConnection is used to store an established connection with a peer. inboundConnection is created after +// an external peer connects to the node. inboundConnection implements the Connection interface. +type inboundConnection struct { + mtx sync.Mutex + terminated bool + + connection net.Conn +} + +func (ic *inboundConnection) GetConnectionType() ConnectionType { + return ConnectionTypeInbound +} + +func (ic *inboundConnection) Close() { + ic.mtx.Lock() + defer ic.mtx.Unlock() + + if ic.terminated { + return + } + + ic.connection.Close() + ic.terminated = true +} + +// OutboundConnectionAttempt is used to store the state of an outbound connection attempt. It is used to initiate +// an outbound connection to a peer, and manage the lifecycle of the connection attempt. +type OutboundConnectionAttempt struct { + mtx sync.Mutex + + // attemptId is used to identify the connection attempt. It will later be the id of the peer, + // if the connection is successful. + attemptId uint64 + + // netAddr is the address of the peer we are attempting to connect to. + netAddr *wire.NetAddress + // isPersistent is used to indicate whether we should retry connecting to the peer if the connection attempt fails. + // If isPersistent is true, we will retry connecting to the peer until we are successful. Each time such connection + // fails, we will sleep according to exponential backoff. Otherwise, we will only attempt to connect to the peer once. + isPersistent bool + // dialTimeout is the amount of time we will wait before timing out an individual connection attempt. + dialTimeout time.Duration + // timeoutUnit is the unit of time we will use to calculate the exponential backoff delay. The initial timeout is + // calculated as timeoutUnit * 2^0, the second timeout is calculated as timeoutUnit * 2^1, and so on. + timeoutUnit time.Duration + // retryCount is the number of times we have attempted to connect to the peer. + retryCount uint64 + // connectionChan is used to send the result of the connection attempt to the caller thread. + connectionChan chan *outboundConnection + + exitChan chan bool + status outboundConnectionAttemptStatus +} + +type outboundConnectionAttemptStatus int + +const ( + outboundConnectionAttemptInitialized outboundConnectionAttemptStatus = 0 + outboundConnectionAttemptRunning outboundConnectionAttemptStatus = 1 + outboundConnectionAttemptTerminated outboundConnectionAttemptStatus = 2 +) + +func NewOutboundConnectionAttempt(attemptId uint64, netAddr *wire.NetAddress, isPersistent bool, + dialTimeout time.Duration, connectionChan chan *outboundConnection) *OutboundConnectionAttempt { + + return &OutboundConnectionAttempt{ + attemptId: attemptId, + netAddr: netAddr, + isPersistent: isPersistent, + dialTimeout: dialTimeout, + timeoutUnit: time.Second, + exitChan: make(chan bool), + connectionChan: connectionChan, + status: outboundConnectionAttemptInitialized, + } +} + +func (oca *OutboundConnectionAttempt) Start() { + oca.mtx.Lock() + defer oca.mtx.Unlock() + + if oca.status != outboundConnectionAttemptInitialized { + return + } + + go oca.start() + oca.status = outboundConnectionAttemptRunning +} + +func (oca *OutboundConnectionAttempt) start() { + oca.retryCount = 0 + +out: + for { + sleepDuration := 0 * time.Second + // for persistent peers, calculate the exponential backoff delay. + if oca.isPersistent { + sleepDuration = _delayRetry(oca.retryCount, oca.netAddr, oca.timeoutUnit) + } + + select { + case <-oca.exitChan: + break out + case <-time.After(sleepDuration): + // If the peer is persistent use exponential back off delay before retrying. + // We want to start backing off exponentially once we've gone through enough + // unsuccessful retries. + if oca.isPersistent { + oca.retryCount++ + } + + conn := oca.attemptOutboundConnection() + if conn == nil && oca.isPersistent { + break + } + if conn == nil { + break out + } + + oca.connectionChan <- &outboundConnection{ + attemptId: oca.attemptId, + address: oca.netAddr, + connection: conn, + isPersistent: oca.isPersistent, + failed: false, + } + return + } + } + oca.connectionChan <- &outboundConnection{ + attemptId: oca.attemptId, + address: oca.netAddr, + connection: nil, + isPersistent: oca.isPersistent, + failed: true, + } +} + +func (oca *OutboundConnectionAttempt) Stop() { + oca.mtx.Lock() + defer oca.mtx.Unlock() + + if oca.status == outboundConnectionAttemptTerminated { + return + } + close(oca.exitChan) + oca.status = outboundConnectionAttemptTerminated +} + +func (oca *OutboundConnectionAttempt) SetTimeoutUnit(timeoutUnit time.Duration) { + oca.timeoutUnit = timeoutUnit +} + +// attemptOutboundConnection dials the peer. If the connection attempt is successful, it will return the connection. +// Otherwise, it will return nil. +func (oca *OutboundConnectionAttempt) attemptOutboundConnection() net.Conn { + // If the peer is not persistent, update the addrmgr. + glog.V(1).Infof("Attempting to connect to addr: %v", oca.netAddr.IP.String()) + + var err error + tcpAddr := net.TCPAddr{ + IP: oca.netAddr.IP, + Port: int(oca.netAddr.Port), + } + conn, err := net.DialTimeout(tcpAddr.Network(), tcpAddr.String(), oca.dialTimeout) + if err != nil { + // If we failed to connect to this peer, get a new address and try again. + glog.V(2).Infof("Connection to addr (%v) failed: %v", tcpAddr, err) + return nil + } + + return conn +} diff --git a/lib/network_connection_test.go b/lib/network_connection_test.go new file mode 100644 index 000000000..5d3008f72 --- /dev/null +++ b/lib/network_connection_test.go @@ -0,0 +1,167 @@ +package lib + +import ( + "fmt" + "github.com/btcsuite/btcd/addrmgr" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" + "net" + "sync" + "testing" + "time" +) + +type simpleListener struct { + t *testing.T + ll net.Listener + addr *wire.NetAddress + closed bool + + connectionChan chan Connection + + exitChan chan struct{} + startGroup sync.WaitGroup + stopGroup sync.WaitGroup +} + +func newSimpleListener(t *testing.T) *simpleListener { + require := require.New(t) + ll, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(err) + params := &DeSoTestnetParams + addr := ll.Addr() + addrMgr := addrmgr.New("", net.LookupIP) + na, err := IPToNetAddr(addr.String(), addrMgr, params) + + return &simpleListener{ + t: t, + ll: ll, + addr: na, + closed: false, + connectionChan: make(chan Connection, 100), + exitChan: make(chan struct{}), + } +} + +func (sl *simpleListener) start() { + require := require.New(sl.t) + if sl.closed { + ll, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%v", sl.addr.Port)) + require.NoError(err) + sl.ll = ll + sl.connectionChan = make(chan Connection, 100) + sl.exitChan = make(chan struct{}) + sl.closed = false + } + sl.startGroup.Add(1) + sl.stopGroup.Add(1) + + go func() { + sl.startGroup.Done() + defer sl.stopGroup.Done() + for { + select { + case <-sl.exitChan: + return + default: + conn, err := sl.ll.Accept() + if err != nil { + fmt.Println("simpleListener.start: ll.Accept:", err) + return + } + sl.connectionChan <- &inboundConnection{ + connection: conn, + } + } + } + }() + sl.startGroup.Wait() +} + +func (sl *simpleListener) stop() { + sl.ll.Close() + sl.closed = true + close(sl.exitChan) + close(sl.connectionChan) + sl.stopGroup.Wait() + fmt.Println("simpleListener.stop: stopped") +} + +func (sl *simpleListener) getTCPAddr() *net.TCPAddr { + return sl.ll.Addr().(*net.TCPAddr) +} + +func verifyOutboundConnection(t *testing.T, conn *outboundConnection, sl *simpleListener, attemptId uint64, isPersistent bool, failed bool) { + require := require.New(t) + require.Equal(attemptId, conn.attemptId) + require.Equal(isPersistent, conn.isPersistent) + require.Equal(failed, conn.failed) + if failed { + require.Nil(conn.connection) + return + } + + require.Equal(conn.address.IP.String(), sl.getTCPAddr().IP.String()) + require.Equal(conn.address.Port, uint16(sl.getTCPAddr().Port)) + require.Equal(conn.address.IP.String(), sl.getTCPAddr().IP.String()) + require.Equal(conn.address.Port, uint16(sl.getTCPAddr().Port)) +} + +func verifyOutboundConnectionSelect(t *testing.T, connectionChan chan *outboundConnection, timeoutDuration time.Duration, + sl *simpleListener, attemptId uint64, isPersistent bool, failed bool) { + + select { + case conn := <-connectionChan: + verifyOutboundConnection(t, conn, sl, attemptId, isPersistent, failed) + case <-time.After(2 * timeoutDuration): + panic("Timed out waiting for outbound connection.") + } +} + +func TestOutboundConnectionAttempt(t *testing.T) { + require := require.New(t) + _ = require + timeoutDuration := 100 * time.Millisecond + + sl := newSimpleListener(t) + sl.start() + + connectionChan := make(chan *outboundConnection, 100) + attempt := NewOutboundConnectionAttempt(0, sl.addr, false, timeoutDuration, connectionChan) + attempt.Start() + verifyOutboundConnectionSelect(t, connectionChan, 2*timeoutDuration, sl, 0, false, false) + t.Log("TestOutboundConnectionAttempt #1 | Happy path, non-persistent | PASS") + + sl.stop() + attemptFailed := NewOutboundConnectionAttempt(1, sl.addr, false, timeoutDuration, connectionChan) + attemptFailed.Start() + verifyOutboundConnectionSelect(t, connectionChan, 2*timeoutDuration, sl, 1, false, true) + t.Log("TestOutboundConnectionAttempt #2 | Failed connection, non-persistent | PASS") + + sl2 := newSimpleListener(t) + sl2.start() + + attemptPersistent := NewOutboundConnectionAttempt(2, sl2.addr, true, timeoutDuration, connectionChan) + attemptPersistent.Start() + verifyOutboundConnectionSelect(t, connectionChan, 2*timeoutDuration, sl2, 2, true, false) + t.Log("TestOutboundConnectionAttempt #3 | Happy path, persistent | PASS") + + sl2.stop() + attemptPersistentDelay := NewOutboundConnectionAttempt(3, sl2.addr, true, timeoutDuration, connectionChan) + attemptPersistentDelay.SetTimeoutUnit(timeoutDuration) + attemptPersistentDelay.Start() + time.Sleep(timeoutDuration) + sl2.start() + verifyOutboundConnectionSelect(t, connectionChan, 2*timeoutDuration, sl2, 3, true, false) + require.Greater(attemptPersistentDelay.retryCount, uint64(0)) + t.Log("TestOutboundConnectionAttempt #4 | Failed connection, persistent, delayed | PASS") + + sl2.stop() + attemptPersistentCancel := NewOutboundConnectionAttempt(4, sl2.addr, true, timeoutDuration, connectionChan) + attemptPersistentCancel.Start() + time.Sleep(timeoutDuration) + attemptPersistentCancel.Stop() + verifyOutboundConnectionSelect(t, connectionChan, 2*timeoutDuration, sl2, 4, true, true) + require.Greater(attemptPersistentCancel.retryCount, uint64(0)) + t.Log("TestOutboundConnectionAttempt #5 | Failed connection, persistent, delayed, canceled | PASS") +} diff --git a/lib/peer.go b/lib/peer.go index 0eea6317a..2c6d76e4d 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -63,9 +63,6 @@ type Peer struct { stallTimeoutSeconds uint64 Params *DeSoParams MessageChan chan *ServerMessage - // A hack to make it so that we can allow an API endpoint to manually - // delete a peer. - PeerManuallyRemovedFromConnectionManager bool // In order to complete a version negotiation successfully, the peer must // reply to the initial version message we send them with a verack message @@ -104,6 +101,8 @@ type Peer struct { // Output queue for messages that need to be sent to the peer. outputQueueChan chan DeSoMessage + newPeerChan chan *Peer + donePeerChan chan *Peer // Set to zero until Disconnect has been called on the Peer. Used to make it // so that the logic in Disconnect will only be executed once. @@ -611,15 +610,18 @@ func (pp *Peer) StartDeSoMessageProcessor() { } // NewPeer creates a new Peer object. -func NewPeer(_conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress, +func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress, _isPersistent bool, _stallTimeoutSeconds uint64, _minFeeRateNanosPerKB uint64, params *DeSoParams, messageChan chan *ServerMessage, _cmgr *ConnectionManager, _srv *Server, - _syncType NodeSyncType) *Peer { + _syncType NodeSyncType, + newPeerChan chan *Peer, + donePeerChan chan *Peer) *Peer { pp := Peer{ + ID: _id, cmgr: _cmgr, srv: _srv, Conn: _conn, @@ -628,6 +630,8 @@ func NewPeer(_conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress, isOutbound: _isOutbound, isPersistent: _isPersistent, outputQueueChan: make(chan DeSoMessage), + newPeerChan: newPeerChan, + donePeerChan: donePeerChan, quit: make(chan interface{}), knownInventory: lru.NewCache(maxKnownInventory), blocksToSend: make(map[BlockHash]bool), @@ -639,9 +643,6 @@ func NewPeer(_conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress, requestedBlocks: make(map[BlockHash]bool), syncType: _syncType, } - if _cmgr != nil { - pp.ID = atomic.AddUint64(&_cmgr.peerIndex, 1) - } // TODO: Before, we would give each Peer its own Logger object. Now we // have a much better way of debugging which is that we include a nonce @@ -784,6 +785,10 @@ func (pp *Peer) Address() string { return pp.addrStr } +func (pp *Peer) NetAddress() *wire.NetAddress { + return pp.netAddr +} + func (pp *Peer) IP() string { return pp.netAddr.IP.String() } @@ -796,6 +801,10 @@ func (pp *Peer) IsOutbound() bool { return pp.isOutbound } +func (pp *Peer) IsPersistent() bool { + return pp.isPersistent +} + func (pp *Peer) QueueMessage(desoMessage DeSoMessage) { // If the peer is disconnected, don't queue anything. if !pp.Connected() { @@ -1495,10 +1504,11 @@ func (pp *Peer) NegotiateVersion(versionNegotiationTimeout time.Duration) error func (pp *Peer) Disconnect() { // Only run the logic the first time Disconnect is called. glog.V(1).Infof(CLog(Yellow, "Peer.Disconnect: Starting")) - if atomic.AddInt32(&pp.disconnected, 1) != 1 { + if atomic.LoadInt32(&pp.disconnected) != 0 { glog.V(1).Infof("Peer.Disconnect: Disconnect call ignored since it was already called before for Peer %v", pp) return } + atomic.AddInt32(&pp.disconnected, 1) glog.V(1).Infof("Peer.Disconnect: Running Disconnect for the first time for Peer %v", pp) @@ -1510,9 +1520,7 @@ func (pp *Peer) Disconnect() { // Add the Peer to donePeers so that the ConnectionManager and Server can do any // cleanup they need to do. - if pp.cmgr != nil && atomic.LoadInt32(&pp.cmgr.shutdown) == 0 && pp.cmgr.donePeerChan != nil { - pp.cmgr.donePeerChan <- pp - } + pp.donePeerChan <- pp } func (pp *Peer) _logVersionSuccess() { From ab6d2322784e37360cd6c1252563793e0058f63c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Sat, 30 Dec 2023 06:56:10 -0500 Subject: [PATCH 331/762] Move TransactionRegister Initialization Params to Init Function (#896) * Move TransactionRegister Initialization Params to Init Function * Comment cleanup --- lib/pos_fee_estimator.go | 11 ++++++---- lib/pos_mempool.go | 6 ++++-- lib/pos_transaction_register.go | 30 ++++++++++++++++++---------- lib/pos_transaction_register_test.go | 20 ++++++++++++------- 4 files changed, 43 insertions(+), 24 deletions(-) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index e78a502e4..75294eb5d 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -1,11 +1,12 @@ package lib import ( - "github.com/deso-protocol/core/collections" - "github.com/pkg/errors" "math" "math/big" "sync" + + "github.com/deso-protocol/core/collections" + "github.com/pkg/errors" ) type PoSFeeEstimator struct { @@ -74,7 +75,8 @@ func (posFeeEstimator *PoSFeeEstimator) Init( posFeeEstimator.numMempoolBlocks = numMempoolBlocks posFeeEstimator.numPastBlocks = numPastBlocks // Create a transaction register we can use to estimate fees for past blocks. - posFeeEstimator.pastBlocksTransactionRegister = NewTransactionRegister(globalParams.Copy()) + posFeeEstimator.pastBlocksTransactionRegister = NewTransactionRegister() + posFeeEstimator.pastBlocksTransactionRegister.Init(globalParams.Copy()) // Add all the txns from the past blocks to the new pastBlocksTransactionRegister. for _, block := range sortedPastBlocks { @@ -192,7 +194,8 @@ func (posFeeEstimator *PoSFeeEstimator) removeBlockNoLock(block *MsgDeSoBlock) e func (posFeeEstimator *PoSFeeEstimator) UpdateGlobalParams(globalParams *GlobalParamsEntry) error { posFeeEstimator.rwLock.Lock() defer posFeeEstimator.rwLock.Unlock() - tempTransactionRegister := NewTransactionRegister(globalParams.Copy()) + tempTransactionRegister := NewTransactionRegister() + tempTransactionRegister.Init(globalParams.Copy()) for _, block := range posFeeEstimator.cachedBlocks { if err := addBlockToTransactionRegister(tempTransactionRegister, block); err != nil { return errors.Wrap(err, "PosFeeEstimator.UpdateGlobalParams: error adding block to tempTransactionRegister") diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index cfd1dcaaa..415d22c07 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -170,7 +170,7 @@ func NewPosMempool(params *DeSoParams, globalParams *GlobalParamsEntry, readOnly latestBlockHeight: latestBlockHeight, maxMempoolPosSizeBytes: maxMempoolPosSizeBytes, mempoolBackupIntervalMillis: mempoolBackupIntervalMillis, - txnRegister: NewTransactionRegister(globalParams), + txnRegister: NewTransactionRegister(), feeEstimator: NewPoSFeeEstimator(), ledger: NewBalanceLedger(), nonceTracker: NewNonceTracker(), @@ -185,6 +185,7 @@ func (mp *PosMempool) Init( if mp.status != PosMempoolStatusNotInitialized { return errors.New("PosMempool.Init: PosMempool already initialized") } + // TODO: parameterize num blocks. Also, how to pass in blocks. if err := mp.feeEstimator.Init( mp.txnRegister, feeEstimatorNumMempoolBlocks, feeEstimatorPastBlocks, feeEstimatorNumPastBlocks, @@ -204,7 +205,8 @@ func (mp *PosMempool) Start() error { } // Create the transaction register, the ledger, and the nonce tracker, - mp.txnRegister = NewTransactionRegister(mp.globalParams) + mp.txnRegister = NewTransactionRegister() + mp.txnRegister.Init(mp.globalParams) mp.ledger = NewBalanceLedger() mp.nonceTracker = NewNonceTracker() diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index 5cfdd2d8d..cfc4b4087 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -3,12 +3,13 @@ package lib import ( "bytes" "fmt" - "github.com/emirpasic/gods/sets/treeset" - "github.com/golang/glog" - "github.com/pkg/errors" "math" "math/big" "sync" + + "github.com/emirpasic/gods/sets/treeset" + "github.com/golang/glog" + "github.com/pkg/errors" ) // ======================== @@ -39,19 +40,26 @@ type TransactionRegister struct { feeBucketGrowthRateBasisPoints *big.Float } -func NewTransactionRegister(globalParams *GlobalParamsEntry) *TransactionRegister { +func NewTransactionRegister() *TransactionRegister { feeTimeBucketSet := treeset.NewWith(feeTimeBucketComparator) - minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() return &TransactionRegister{ - feeTimeBucketSet: feeTimeBucketSet, - feeTimeBucketsByMinFeeMap: make(map[uint64]*FeeTimeBucket), - txnMembership: make(map[BlockHash]*MempoolTx), - totalTxnsSizeBytes: 0, - minimumNetworkFeeNanosPerKB: minNetworkFee, - feeBucketGrowthRateBasisPoints: bucketMultiplier, + feeTimeBucketSet: feeTimeBucketSet, + feeTimeBucketsByMinFeeMap: make(map[uint64]*FeeTimeBucket), + txnMembership: make(map[BlockHash]*MempoolTx), + totalTxnsSizeBytes: 0, + // Set default values for the uninitialized fields. This is safe because any transactions + // added to the register will be re-bucketed once the params are updated. + minimumNetworkFeeNanosPerKB: big.NewFloat(1), // Default to 1 nanos per KB + feeBucketGrowthRateBasisPoints: big.NewFloat(float64(MaxBasisPoints)), // Default to 100% } } +func (tr *TransactionRegister) Init(globalParams *GlobalParamsEntry) { + minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + tr.minimumNetworkFeeNanosPerKB = minNetworkFee + tr.feeBucketGrowthRateBasisPoints = bucketMultiplier +} + // feeTimeBucketComparator is a comparator function for FeeTimeBucket objects. It is used to order FeeTimeBucket objects // in the TransactionRegister's feeTimeBucketSet based on fee ranges (higher fee ranges are ordered first). func feeTimeBucketComparator(a, b interface{}) int { diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index d4f759368..4b5fb41a0 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -2,20 +2,22 @@ package lib import ( "bytes" - "github.com/btcsuite/btcd/btcec" - "github.com/stretchr/testify/require" "math" "math/rand" "sort" "testing" "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/stretchr/testify/require" ) func TestSanityCheckTransactionRegister(t *testing.T) { require := require.New(t) // Empty TransactionRegister - txnRegister := NewTransactionRegister(_testGetDefaultGlobalParams()) + txnRegister := NewTransactionRegister() + txnRegister.Init(_testGetDefaultGlobalParams()) require.Equal(true, txnRegister.Empty()) it := txnRegister.GetFeeTimeIterator() require.Equal(false, it.Next()) @@ -41,7 +43,8 @@ func TestSanityCheckTransactionRegister(t *testing.T) { // TransactionRegister with no transactions and a single empty FeeTimeBucket. // This should never happen but let's see what happens. - txnRegister = NewTransactionRegister(_testGetDefaultGlobalParams()) + txnRegister = NewTransactionRegister() + txnRegister.Init(_testGetDefaultGlobalParams()) emptyFeeTimeBucket := NewFeeTimeBucket(0, 1000) txnRegister.feeTimeBucketSet.Add(emptyFeeTimeBucket) txnRegister.feeTimeBucketsByMinFeeMap[0] = emptyFeeTimeBucket @@ -138,7 +141,8 @@ func TestTransactionRegisterPrune(t *testing.T) { globalParams := _testGetDefaultGlobalParams() txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, 1000, timestampRange, testCases) - txnRegister := NewTransactionRegister(globalParams) + txnRegister := NewTransactionRegister() + txnRegister.Init(globalParams) totalSize := uint64(0) for _, tx := range txnPool { require.Nil(txnRegister.AddTransaction(tx)) @@ -260,7 +264,8 @@ func TestTransactionRegisterWithRemoves(t *testing.T) { globalParams := _testGetDefaultGlobalParams() txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, 1000, timestampRange, testCases) - txnRegister := NewTransactionRegister(globalParams) + txnRegister := NewTransactionRegister() + txnRegister.Init(globalParams) _testBucketStandardRemoveTest(t, txnPool, globalParams, false, func(tx *MempoolTx) { require.Nil(txnRegister.AddTransaction(tx)) @@ -288,7 +293,8 @@ func TestTransactionRegisterBasic(t *testing.T) { globalParams := _testGetDefaultGlobalParams() txnPool := _testGetRandomMempoolTxns(rand, globalParams.MinimumNetworkFeeNanosPerKB, feeRange, 1000, timestampRange, testCases) - txnRegister := NewTransactionRegister(globalParams) + txnRegister := NewTransactionRegister() + txnRegister.Init(globalParams) _testBucketStandardAddTest(t, txnPool, globalParams, false, func(tx *MempoolTx) { require.Nil(txnRegister.AddTransaction(tx)) From b2cbdc3ff47167bf41b6870580acebe4a3a5d67a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Sat, 30 Dec 2023 07:05:34 -0500 Subject: [PATCH 332/762] Implement ProcessHeaderPoS (#892) * Implement ProcessHeaderPoS * Address Nina's initial comments * Call processHeaderPoS from processBlockPoS * Address Nina's comments --- lib/pos_blockchain.go | 193 ++++++++++++++++++++++++++++++++----- lib/pos_blockchain_test.go | 59 ++++++++++-- 2 files changed, 222 insertions(+), 30 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index fdf1abfab..9ce4796da 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -12,8 +12,24 @@ import ( "github.com/pkg/errors" ) +// ProcessHeaderPoS simply acquires the chain lock and calls processHeaderPoS. +func (bc *Blockchain) ProcessHeaderPoS(header *MsgDeSoHeader) (_isMainChain bool, _isOrphan bool, _err error) { + // Grab the chain lock + bc.ChainLock.Lock() + defer bc.ChainLock.Unlock() + + if header == nil { + return false, false, fmt.Errorf("ProcessHeaderPoS: Header is nil") + } + + return bc.processHeaderPoS(header) +} + // processHeaderPoS validates and stores an incoming block header to build -// the PoS version of the header chain. +// the PoS version of the header chain. It requires callers to call it with +// headers in order of increasing block height. If called with an orphan header, +// it still gracefully handles it by returning early and not storing the header +// in the block index. // // The PoS header chain uses a simplified version of the Fast-HotStuff consensus // rules. It's used during syncing to build a chain of block headers with the @@ -26,20 +42,134 @@ import ( // that require on-chain state. // // processHeaderPoS algorithm: -// 1. Validate that the block header is properly formed. -// 2. Add the block header to the block index with status +// 1. Exit early if the header has already been indexed in the block index. +// 2. Do nothing if the header is an orphan. +// 3. Validate the header and verify that its parent is also valid. +// 4. Add the block header to the block index with status // StatusHeaderValidated or StatusHeaderValidateFailed. -// 3. Perform the orphan check on the block header. -// 4. If the block is an orphan, or its view is less than the current header chain's tip, -// then we exit early. -// 5. If it is not an orphan, and has a higher view than the current header chain, then -// we re-org the header chain so that the incoming header is the new tip. -func (bc *Blockchain) processHeaderPoS(blockHeader *MsgDeSoHeader, headerHash *BlockHash) ( - _isMainChain bool, - _isOrphan bool, - _err error, +// 5. Exit early if the's view is less than the current header chain's tip. +// 6. Reorg the best header chain if the header's view is higher than the current tip. +func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader) ( + _isMainChain bool, _isOrphan bool, _err error, ) { - return false, false, fmt.Errorf("processHeaderPoS: Not implemented") + headerHash, err := header.Hash() + if err != nil { + return false, false, errors.Wrapf(err, "processHeaderPoS: Problem hashing header") + } + + // Validate the header and index it in the block index. + blockNode, isOrphan, err := bc.validateAndIndexHeaderPoS(header, headerHash) + if err != nil { + return false, false, errors.Wrapf(err, "processHeaderPoS: Problem validating and indexing header: ") + } + + // Exit early if the header is an orphan. + if isOrphan { + return false, true, nil + } + + // Exit early if the header's view is less than the current header chain's tip. The header is not + // the new tip for the best header chain. + currentTip := bc.headerTip() + if header.ProposedInView <= currentTip.Header.ProposedInView { + return false, false, nil + } + + // The header is not an orphan and has a higher view than the current tip. We reorg the header chain + // and apply the incoming header is the new tip. + _, blocksToDetach, blocksToAttach := GetReorgBlocks(currentTip, blockNode) + bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( + bc.bestHeaderChain, + bc.bestHeaderChainMap, + blocksToDetach, + blocksToAttach, + ) + + // Success. The header is at the tip of the best header chain. + return true, false, nil +} + +func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHash *BlockHash) ( + _headerBlockNode *BlockNode, _isOrphan bool, _err error, +) { + // Look up the header in the block index to check if it has already been validated and indexed. + blockNode, exists := bc.blockIndexByHash[*headerHash] + + // ------------------------------------ Base Cases ----------------------------------- // + + // The header is already validated. Exit early. + if exists && blockNode.IsHeaderValidated() { + return blockNode, false, nil + } + + // The header has already failed validations. Exit early. + if exists && blockNode.IsHeaderValidateFailed() { + return nil, false, errors.New("validateAndIndexHeaderPoS: Header already failed validation") + } + + // The header has an invalid PrevBlockHash field. Exit early. + if header.PrevBlockHash == nil { + return nil, false, errors.New("validateAndIndexHeaderPoS: PrevBlockHash is nil") + } + + // The header is an orphan. No need to store it in the block index. Exit early. + parentBlockNode, parentBlockNodeExists := bc.blockIndexByHash[*header.PrevBlockHash] + if !parentBlockNodeExists { + return nil, true, nil + } + + // ---------------------------------- Recursive Case ---------------------------------- // + + // Recursively call validateAndIndexHeaderPoS on the header's ancestors. It's possible for + // headers to be added to the block index out of order by processBlockPoS. In those cases, + // it's possible for ancestors of this header to exist in the block index but not have their + // header validation statuses set yet. We set them here recursively. + // + // This is safe and efficient as long as validateAndIndexHeaderPoS is only called on non-orphan + // headers. This guarantees that the recursive case for each header can only be hit once. + parentBlockNode, isParentAnOrphan, err := bc.validateAndIndexHeaderPoS(parentBlockNode.Header, header.PrevBlockHash) + if err != nil { + return nil, false, err + } + + // Gracefully handle the case where the parent is still an orphan. This should never happen. + if isParentAnOrphan { + return nil, true, nil + } + // Verify that the parent has not previously failed validation. If it has, then the incoming header + // is also not valid. + if parentBlockNode.IsHeaderValidateFailed() { + return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( + header, errors.New("validateAndIndexHeaderPoS: Parent header failed validations"), + ) + } + + // Verify that the header is properly formed. + if err := bc.isValidBlockHeaderPoS(header); err != nil { + return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( + header, errors.New("validateAndIndexHeaderPoS: Header failed validations"), + ) + } + + // Validate the header's random seed signature. + isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(header) + if err != nil { + return nil, false, errors.Wrap(err, "validateAndIndexHeaderPoS: Problem validating random seed signature") + } + if !isValidRandomSeedSignature { + return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( + header, errors.New("validateAndIndexHeaderPoS: Header has invalid random seed signature"), + ) + } + + // Store it as HeaderValidated now that it has passed all validations. + blockNode, err = bc.storeValidatedHeaderInBlockIndex(header) + if err != nil { + return nil, false, errors.Wrapf(err, "validateAndIndexHeaderPoS: Problem adding header to block index: ") + } + + // Happy path. The header is not an orphan and is valid. + return blockNode, false, nil } // ProcessBlockPoS simply acquires the chain lock and calls processBlockPoS. @@ -66,8 +196,9 @@ func (bc *Blockchain) ProcessBlockPoS(block *MsgDeSoBlock, currentView uint64, v // If so, return the hash of the missing block and add this block to the orphans list. // 2. Validate the incoming block, its header, its block height, the leader, and its QCs (vote or timeout) // 3. Store the block in the block index and save to DB. -// 4. try to apply the incoming block as the tip (performing reorgs as necessary). If it can't be applied, exit here. -// 5. Run the commit rule - If applicable, flushes the incoming block's grandparent to the DB +// 4. Process the block's header. This may reorg the header chain and apply the block as the new header chain tip. +// 5. Try to apply the incoming block as the tip (performing reorgs as necessary). If it can't be applied, exit here. +// 6. Run the commit rule - If applicable, flushes the incoming block's grandparent to the DB func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, verifySignatures bool) ( _success bool, _isOrphan bool, @@ -130,7 +261,17 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v "processBlockPoS: Block not validated after performing all validations.") } - // 4. Try to apply the incoming block as the new tip. This function will + // 4. Process the block's header and update the header chain. We call processHeaderPoS + // here after verifying that the block is not an orphan and has passed all validations, + // but directly before applying the block as the new tip. Any failure when validating the + // header and applying it to the header chain will result in the two chains being out of + // sync. The header chain is less critical and mutations to it are reversible. So we attempt + // to mutate it first before attempting to mutate the block chain. + if _, _, err = bc.processHeaderPoS(block.Header); err != nil { + return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem processing header") + } + + // 5. Try to apply the incoming block as the new tip. This function will // first perform any required reorgs and then determine if the incoming block // extends the chain tip. If it does, it will apply the block to the best chain // and appliedNewTip will be true and we can continue to running the commit rule. @@ -139,7 +280,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem applying new tip: ") } - // 5. Commit grandparent if possible. Only need to do this if we applied a new tip. + // 6. Commit grandparent if possible. Only need to do this if we applied a new tip. if appliedNewTip { if err = bc.runCommitRuleOnBestChain(); err != nil { return false, false, nil, errors.Wrap(err, @@ -148,7 +289,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } // Now that we've processed this block, we check for any blocks that were previously - // stored as orphans, which are children of this block. We can process them now. + // stored as orphans, which are children of this block. We can process them now. blockNodesAtNextHeight := bc.blockIndexByHeight[uint64(blockNode.Height)+1] for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { if blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) && @@ -172,6 +313,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } } } + // Returns whether a new tip was applied, whether the block is an orphan, and any missing blocks, and an error. return appliedNewTip, false, nil, nil } @@ -480,7 +622,7 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, } // Validate the block's random seed signature - isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(block) + isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(block.Header) if err != nil { var innerErr error blockNode, innerErr = bc.storeBlockInBlockIndex(block) @@ -808,9 +950,9 @@ func (bc *Blockchain) hasValidBlockViewPoS(header *MsgDeSoHeader) error { return nil } -func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(block *MsgDeSoBlock) (bool, error) { +func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(header *MsgDeSoHeader) (bool, error) { // Validate that the leader proposed a valid random seed signature. - parentBlock, exists := bc.blockIndexByHash[*block.Header.PrevBlockHash] + parentBlock, exists := bc.blockIndexByHash[*header.PrevBlockHash] if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -823,7 +965,7 @@ func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(block *MsgDeSoBlock "hasValidProposerRandomSeedSignaturePoS: Problem converting prev random seed hash to RandomSeedHash") } isVerified, err := verifySignatureOnRandomSeedHash( - block.Header.ProposerVotingPublicKey, block.Header.ProposerRandomSeedSignature, prevRandomSeedHash) + header.ProposerVotingPublicKey, header.ProposerRandomSeedSignature, prevRandomSeedHash) if err != nil { return false, errors.Wrapf(err, "hasValidProposerRandomSeedSignaturePoS: Problem verifying proposer random seed signature") @@ -1044,6 +1186,13 @@ func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (* return blockNode, nil } +func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(header *MsgDeSoHeader, wrapperError error) error { + if _, innerErr := bc.storeValidateFailedHeaderInBlockIndex(header); innerErr != nil { + return errors.Wrapf(innerErr, "%v", wrapperError) + } + return wrapperError +} + func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) if err != nil { diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 04b420bd5..6ff7228f4 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1652,15 +1652,58 @@ func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, u } } +func TestProcessHeaderPoS(t *testing.T) { + // Initialize the chain and test metadata. + testMeta := NewTestPoSBlockchainWithValidators(t) + + // Capture the starting block height, view, and block hash for the best chain and best header chain. + initialBlockHeight := testMeta.chain.BlockTip().Height + initialView := testMeta.chain.BlockTip().Header.ProposedInView + initialBlockHash := testMeta.chain.BlockTip().Hash + + initialHeaderHeight := testMeta.chain.HeaderTip().Height + initialHeaderView := testMeta.chain.HeaderTip().Header.ProposedInView + initialHeaderHash := testMeta.chain.HeaderTip().Hash + + require.Equal(t, initialBlockHeight, initialHeaderHeight) + require.Equal(t, initialView, initialHeaderView) + require.True(t, initialBlockHash.IsEqual(initialHeaderHash)) + + // Run the ProcessBlockPoS tests end to end. ProcessHeaderPoS is called within ProcessBlockPoS. + // The header chain should progress identically to the block chain, and it should reorg when then + // block chain reorgs. + testProcessBlockPoS(t, testMeta) + + // Capture the final block height, view, and block hash for the best chain and best header chain. + finalBlockHeight := testMeta.chain.BlockTip().Height + finalView := testMeta.chain.BlockTip().Header.ProposedInView + finalBlockHash := testMeta.chain.BlockTip().Hash + + finalHeaderHeight := testMeta.chain.HeaderTip().Height + finalHeaderView := testMeta.chain.HeaderTip().Header.ProposedInView + finalHeaderHash := testMeta.chain.HeaderTip().Hash + + require.Equal(t, finalBlockHeight, finalHeaderHeight) + require.Equal(t, finalView, finalHeaderView) + require.True(t, finalBlockHash.IsEqual(finalHeaderHash)) + + // Verify that the header chain has advanced from the initial state. + require.Greater(t, finalBlockHeight, initialBlockHeight) + require.Greater(t, finalView, initialView) + require.False(t, finalBlockHash.IsEqual(initialBlockHash)) +} + +func TestProcessBlockPoS(t *testing.T) { + testProcessBlockPoS(t, NewTestPoSBlockchainWithValidators(t)) +} + // Test the following series of blocks to make sure that ProcessBlockPoS properly handles all cases as expected during the steady state -// 1. Process a bad block. The block could be bad for any reason, we don't really care the reason, we just want to // see it get rejected. +// 1. Process a bad block. The block could be bad for any reason, we don't really care the reason, we just want to see it get rejected. // 2. Process three good blocks in a row, which tests the commit rule // 3. Process a timeout block that reorgs the previous tip // 4. Process a regular block that reorgs from the previous tip // 5. Process an orphan, which tests the block's storage and the return value of missingBlockHashes -func TestProcessBlockPoS(t *testing.T) { - testMeta := NewTestPoSBlockchainWithValidators(t) - +func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { { // Create a bad block and try to process it. dummyBlock := _generateDummyBlock(testMeta, 12, 12, 887) @@ -2229,7 +2272,7 @@ func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { var realBlock *MsgDeSoBlock realBlock = _generateRealBlock(testMeta, 12, 12, 889, testMeta.chain.BlockTip().Hash, false) // The first PoS block passes the validation. - isValid, err := testMeta.chain.hasValidProposerRandomSeedSignaturePoS(realBlock) + isValid, err := testMeta.chain.hasValidProposerRandomSeedSignaturePoS(realBlock.Header) require.NoError(t, err) require.True(t, isValid) _, _, _, err = testMeta.chain.ProcessBlockPoS(realBlock, 12, true) @@ -2247,7 +2290,7 @@ func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { var childBlock *MsgDeSoBlock childBlock = _generateRealBlock(testMeta, 13, 13, 273, realBlockNode.Hash, false) { - isValid, err = testMeta.chain.hasValidProposerRandomSeedSignaturePoS(childBlock) + isValid, err = testMeta.chain.hasValidProposerRandomSeedSignaturePoS(childBlock.Header) require.NoError(t, err) require.True(t, isValid) } @@ -2256,7 +2299,7 @@ func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { { realBlockNode.Header.ProposerRandomSeedSignature, err = (&bls.Signature{}).FromBytes(RandomBytes(32)) require.NoError(t, err) - isValid, err = testMeta.chain.hasValidProposerRandomSeedSignaturePoS(childBlock) + isValid, err = testMeta.chain.hasValidProposerRandomSeedSignaturePoS(childBlock.Header) require.NoError(t, err) require.False(t, isValid) } @@ -2267,7 +2310,7 @@ func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { prevBlockRandomSeedHashBytes := sha256.Sum256(realBlockNode.Header.ProposerRandomSeedSignature.ToBytes()) childBlock.Header.ProposerRandomSeedSignature, err = wrongProposerPrivateKey.Sign(prevBlockRandomSeedHashBytes[:]) require.NoError(t, err) - isValid, err = testMeta.chain.hasValidProposerRandomSeedSignaturePoS(childBlock) + isValid, err = testMeta.chain.hasValidProposerRandomSeedSignaturePoS(childBlock.Header) require.NoError(t, err) require.False(t, isValid) } From 5612cef8738ffbf3656756b87c3848e0826fe22e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Sat, 30 Dec 2023 07:08:03 -0500 Subject: [PATCH 333/762] Move PosMempool Initialization Params to Init Function (#897) --- lib/pos_block_producer_test.go | 28 ++++++++------ lib/pos_blockchain_test.go | 13 +++++-- lib/pos_fee_estimator_test.go | 10 +++-- lib/pos_mempool.go | 70 +++++++++++++++++++++++----------- lib/pos_mempool_test.go | 54 ++++++++++++++------------ 5 files changed, 108 insertions(+), 67 deletions(-) diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index bea7b09ba..70c7e596d 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -33,9 +33,10 @@ func TestCreateBlockTemplate(t *testing.T) { require.NoError(err) dir := _dbDirSetup(t) - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(mempool.Init(1, nil, 1)) + mempool := NewPosMempool() + require.NoError(mempool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(mempool.Start()) defer mempool.Stop() require.True(mempool.IsRunning()) @@ -95,9 +96,10 @@ func TestCreateBlockWithoutHeader(t *testing.T) { require.NoError(err) dir := _dbDirSetup(t) - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(mempool.Init(1, nil, 1)) + mempool := NewPosMempool() + require.NoError(mempool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(mempool.Start()) defer mempool.Stop() require.True(mempool.IsRunning()) @@ -151,9 +153,10 @@ func TestGetBlockTransactions(t *testing.T) { require.NoError(err) dir := _dbDirSetup(t) - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(mempool.Init(1, nil, 1)) + mempool := NewPosMempool() + require.NoError(mempool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(mempool.Start()) defer mempool.Stop() require.True(mempool.IsRunning()) @@ -234,9 +237,10 @@ func TestGetBlockTransactions(t *testing.T) { // Create an in-memory mempool instance and add the transactions to it. Each transaction will be added with a // Simulated Transaction Timestamp and afterward, mempool will be queried for the transactions. The transactions should // be returned in the same order as the transaction from getBlockTransactions. - testMempool := NewPosMempool(params, globalParams, latestBlockView, 2, "", true, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - testMempool.Init(1, nil, 1) + testMempool := NewPosMempool() + testMempool.Init( + params, globalParams, latestBlockView, 2, "", true, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + ) require.NoError(testMempool.Start()) defer testMempool.Stop() currentTime := uint64(time.Now().UnixMicro()) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 25fcfc437..263a65017 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -11,6 +11,7 @@ import ( "time" "crypto/sha256" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/collections/bitset" @@ -2630,8 +2631,10 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { maxMempoolPosSizeBytes := uint64(500) mempoolBackupIntervalMillis := uint64(30000) - mempool := NewPosMempool(params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(t, mempool.Init(1, nil, 1)) + mempool := NewPosMempool() + require.NoError(t, mempool.Init( + params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) priv := _generateRandomBLSPrivateKey(t) @@ -2699,8 +2702,10 @@ func NewTestPoSBlockchain(t *testing.T) *TestMeta { require.NoError(t, err) maxMempoolPosSizeBytes := uint64(500) mempoolBackupIntervalMillis := uint64(30000) - mempool := NewPosMempool(params, _testGetDefaultGlobalParams(), latestBlockView, 10, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(t, mempool.Init(1, nil, 1)) + mempool := NewPosMempool() + require.NoError(t, mempool.Init( + params, _testGetDefaultGlobalParams(), latestBlockView, 10, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) priv := _generateRandomBLSPrivateKey(t) diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index 6fbf727b7..bfd12054a 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -1,10 +1,11 @@ package lib import ( - "github.com/stretchr/testify/require" "math/big" "math/rand" "testing" + + "github.com/stretchr/testify/require" ) func TestFeeEstimator(t *testing.T) { @@ -22,9 +23,10 @@ func TestFeeEstimator(t *testing.T) { require.NoError(t, err) dir := _dbDirSetup(t) - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - mempool.Init(1, nil, 1) + mempool := NewPosMempool() + mempool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + ) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) minFeeBucketMin, minFeeBucketMax := computeFeeTimeBucketRangeFromFeeNanosPerKB( diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 415d22c07..794d82a55 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -157,27 +157,25 @@ func NewPosMempoolIterator(it *FeeTimeIterator) *PosMempoolIterator { return &PosMempoolIterator{it: it} } -func NewPosMempool(params *DeSoParams, globalParams *GlobalParamsEntry, readOnlyLatestBlockView *UtxoView, - latestBlockHeight uint64, dir string, inMemoryOnly bool, maxMempoolPosSizeBytes uint64, - mempoolBackupIntervalMillis uint64) *PosMempool { +func NewPosMempool() *PosMempool { return &PosMempool{ - status: PosMempoolStatusNotInitialized, - params: params, - globalParams: globalParams, - inMemoryOnly: inMemoryOnly, - dir: dir, - readOnlyLatestBlockView: readOnlyLatestBlockView, - latestBlockHeight: latestBlockHeight, - maxMempoolPosSizeBytes: maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis: mempoolBackupIntervalMillis, - txnRegister: NewTransactionRegister(), - feeEstimator: NewPoSFeeEstimator(), - ledger: NewBalanceLedger(), - nonceTracker: NewNonceTracker(), + status: PosMempoolStatusNotInitialized, + txnRegister: NewTransactionRegister(), + feeEstimator: NewPoSFeeEstimator(), + ledger: NewBalanceLedger(), + nonceTracker: NewNonceTracker(), } } func (mp *PosMempool) Init( + params *DeSoParams, + globalParams *GlobalParamsEntry, + readOnlyLatestBlockView *UtxoView, + latestBlockHeight uint64, + dir string, + inMemoryOnly bool, + maxMempoolPosSizeBytes uint64, + mempoolBackupIntervalMillis uint64, feeEstimatorNumMempoolBlocks uint64, feeEstimatorPastBlocks []*MsgDeSoBlock, feeEstimatorNumPastBlocks uint64, @@ -186,10 +184,25 @@ func (mp *PosMempool) Init( return errors.New("PosMempool.Init: PosMempool already initialized") } + // Initialize the parametrized fields. + mp.params = params + mp.globalParams = globalParams + mp.readOnlyLatestBlockView = readOnlyLatestBlockView + mp.latestBlockHeight = latestBlockHeight + mp.dir = dir + mp.inMemoryOnly = inMemoryOnly + mp.maxMempoolPosSizeBytes = maxMempoolPosSizeBytes + mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis + // TODO: parameterize num blocks. Also, how to pass in blocks. - if err := mp.feeEstimator.Init( - mp.txnRegister, feeEstimatorNumMempoolBlocks, feeEstimatorPastBlocks, feeEstimatorNumPastBlocks, - mp.globalParams); err != nil { + err := mp.feeEstimator.Init( + mp.txnRegister, + feeEstimatorNumMempoolBlocks, + feeEstimatorPastBlocks, + feeEstimatorNumPastBlocks, + mp.globalParams, + ) + if err != nil { return errors.Wrapf(err, "PosMempool.Start: Problem initializing fee estimator") } mp.status = PosMempoolStatusInitialized @@ -536,10 +549,21 @@ func (mp *PosMempool) Refresh() error { func (mp *PosMempool) refreshNoLock() error { // Create the temporary in-memory mempool with the most up-to-date readOnlyLatestBlockView, Height, and globalParams. - tempPool := NewPosMempool(mp.params, mp.globalParams, mp.readOnlyLatestBlockView, mp.latestBlockHeight, "", true, - mp.maxMempoolPosSizeBytes, mp.mempoolBackupIntervalMillis) - if err := tempPool.Init( - mp.feeEstimator.numMempoolBlocks, mp.feeEstimator.cachedBlocks, mp.feeEstimator.numPastBlocks); err != nil { + tempPool := NewPosMempool() + err := tempPool.Init( + mp.params, + mp.globalParams, + mp.readOnlyLatestBlockView, + mp.latestBlockHeight, + "", + true, + mp.maxMempoolPosSizeBytes, + mp.mempoolBackupIntervalMillis, + mp.feeEstimator.numMempoolBlocks, + mp.feeEstimator.cachedBlocks, + mp.feeEstimator.numPastBlocks, + ) + if err != nil { return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem initializing temp pool") } if err := tempPool.Start(); err != nil { diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index c848f8ffd..b1d02d841 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -21,9 +21,10 @@ func TestPosMempoolStart(t *testing.T) { maxMempoolPosSizeBytes := uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) - mempool := NewPosMempool(¶ms, globalParams, nil, 0, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(mempool.Init(1, nil, 1)) + mempool := NewPosMempool() + require.NoError(mempool.Init( + ¶ms, globalParams, nil, 0, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) require.NoError(mempool.Refresh()) @@ -49,9 +50,10 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { require.NoError(err) dir := _dbDirSetup(t) - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(mempool.Init(1, nil, 1)) + mempool := NewPosMempool() + require.NoError(mempool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -67,9 +69,8 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { mempool.Stop() require.False(mempool.IsRunning()) - newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(newPool.Init(1, nil, 1)) + newPool := NewPosMempool() + require.NoError(newPool.Init(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1)) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() @@ -102,9 +103,10 @@ func TestPosMempoolPrune(t *testing.T) { require.NoError(err) dir := _dbDirSetup(t) - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(mempool.Init(1, nil, 1)) + mempool := NewPosMempool() + require.NoError(mempool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -137,9 +139,10 @@ func TestPosMempoolPrune(t *testing.T) { mempool.Stop() require.False(mempool.IsRunning()) - newPool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(newPool.Init(1, nil, 1)) + newPool := NewPosMempool() + require.NoError(newPool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) require.Equal(2, len(newPool.GetTransactions())) @@ -196,9 +199,10 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { require.NoError(err) dir := _dbDirSetup(t) - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(mempool.Init(1, nil, 1)) + mempool := NewPosMempool() + require.NoError(mempool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -223,9 +227,10 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { mempool.Stop() require.False(mempool.IsRunning()) - newPool := NewPosMempool(params, newGlobalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(newPool.Init(1, nil, 1)) + newPool := NewPosMempool() + require.NoError(newPool.Init( + params, newGlobalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() @@ -254,9 +259,10 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { require.NoError(err) dir := _dbDirSetup(t) - mempool := NewPosMempool(params, globalParams, latestBlockView, 2, dir, false, - maxMempoolPosSizeBytes, mempoolBackupIntervalMillis) - require.NoError(mempool.Init(1, nil, 1)) + mempool := NewPosMempool() + require.NoError(mempool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) From 5830191d048355e7c04d2188eb4c5f3e238058ca Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Sat, 30 Dec 2023 07:24:38 -0500 Subject: [PATCH 334/762] Clean up TODOs --- lib/pos_consensus.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 509f7a8de..8765f137a 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -511,9 +511,6 @@ func (cc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("Error processing tip block locally: %v", err) } - // TODO: Remove the new block's txns from the mempool. Add txns back to the mempoool from blocks that have - // been reorged away from. - // Happy path. The block was processed successfully and applied as the new tip. Nothing left to do. return nil, nil } From a901fa6356605e9fcab6206673437b1337c9934b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 5 Jan 2024 14:35:54 -0500 Subject: [PATCH 335/762] Dynamically Create PosBlockProducer With Correct Public Key (#898) * Dynamically Create PosBlockProducer With Correct Public Key * Cleanup * Address Nina's comments --- lib/pos_consensus.go | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 8765f137a..578161d1e 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -12,7 +12,6 @@ import ( type FastHotStuffConsensus struct { lock sync.RWMutex blockchain *Blockchain - blockProducer *PosBlockProducer fastHotStuffEventLoop consensus.FastHotStuffEventLoop mempool Mempool params *DeSoParams @@ -22,7 +21,6 @@ type FastHotStuffConsensus struct { func NewFastHotStuffConsensus(params *DeSoParams, blockchain *Blockchain, mempool Mempool, signer *BLSSigner) *FastHotStuffConsensus { return &FastHotStuffConsensus{ blockchain: blockchain, - blockProducer: NewPosBlockProducer(mempool, params, nil, signer.GetPublicKey()), fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), mempool: mempool, signer: signer, @@ -538,9 +536,15 @@ func (cc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( return nil, errors.Errorf("Error fetching UtxoView for parent block: %v", parentBlockHash) } + // Dynamically create a new block producer at the current block height + blockProducer, err := cc.createBlockProducer(utxoViewAtParent) + if err != nil { + return nil, errors.Errorf("Error creating block producer: %v", err) + } + // Construct an unsigned block if event.EventType == consensus.FastHotStuffEventTypeConstructVoteQC { - block, err := cc.blockProducer.CreateUnsignedBlock( + block, err := blockProducer.CreateUnsignedBlock( utxoViewAtParent, event.TipBlockHeight+1, event.View, @@ -556,7 +560,7 @@ func (cc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( // Construct an unsigned timeout block if event.EventType == consensus.FastHotStuffEventTypeConstructTimeoutQC { - block, err := cc.blockProducer.CreateUnsignedTimeoutBlock( + block, err := blockProducer.CreateUnsignedTimeoutBlock( utxoViewAtParent, event.TipBlockHeight+1, event.View, @@ -657,6 +661,20 @@ func (cc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgD return blocksWithValidatorLists, nil } +func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView) (*PosBlockProducer, error) { + blockProducerBlsPublicKey := fc.signer.GetPublicKey() + blockProducerValidatorEntry, err := bav.GetCurrentSnapshotValidatorBLSPublicKeyPKIDPairEntry(blockProducerBlsPublicKey) + if err != nil { + return nil, errors.Errorf("Error fetching validator entry for block producer: %v", err) + } + blockProducerPublicKeyBytes := bav.GetPublicKeyForPKID(blockProducerValidatorEntry.PKID) + blockProducerPublicKey := NewPublicKey(blockProducerPublicKeyBytes) + if blockProducerPublicKey == nil { + return nil, errors.Errorf("Error fetching public key for block producer: %v", err) + } + return NewPosBlockProducer(fc.mempool, fc.params, blockProducerPublicKey, blockProducerBlsPublicKey), nil +} + // Finds the epoch entry for the block and returns the epoch number. func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { for _, epochEntry := range epochEntries { From 391a7753016a940b7ff54f3c895b46df3e4f8fc8 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 5 Jan 2024 15:58:23 -0500 Subject: [PATCH 336/762] Use mempool interface in CreateMaxSpend (#901) Co-authored-by: Lazy Nina <> --- lib/blockchain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 9fae05c51..9e4902afb 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -4856,7 +4856,7 @@ func (bc *Blockchain) CreateBasicTransferTxnWithDiamonds( func (bc *Blockchain) CreateMaxSpend( senderPkBytes []byte, recipientPkBytes []byte, extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( + mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInputAdded uint64, _spendAmount uint64, _fee uint64, _err error) { txn := &MsgDeSoTxn{ From 43d8e369b2e09fd8f72c9851c475a02607e71c16 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Sat, 6 Jan 2024 01:35:42 +0100 Subject: [PATCH 337/762] PoS RemoteNode and RemoteNodeId (#857) * RemoteNode and RemoteNodeId * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test * Review round * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test * Final pass --- cmd/config.go | 6 + cmd/node.go | 29 +- cmd/run.go | 4 + integration_testing/connection_bridge.go | 4 +- lib/bls_keystore.go | 6 +- lib/network.go | 17 +- lib/network_test.go | 2 +- lib/peer.go | 229 +------- lib/remote_node.go | 651 +++++++++++++++++++++++ lib/server.go | 22 +- 10 files changed, 713 insertions(+), 257 deletions(-) create mode 100644 lib/remote_node.go diff --git a/cmd/config.go b/cmd/config.go index 18da983eb..692795865 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -17,6 +17,7 @@ type Config struct { TXIndex bool Regtest bool PostgresURI string + PosValidatorSeed string // Peers ConnectIPs []string @@ -103,6 +104,7 @@ func LoadConfig() *Config { config.TXIndex = viper.GetBool("txindex") config.Regtest = viper.GetBool("regtest") config.PostgresURI = viper.GetString("postgres-uri") + config.PosValidatorSeed = viper.GetString("pos-validator-seed") config.HyperSync = viper.GetBool("hypersync") config.ForceChecksum = viper.GetBool("force-checksum") config.SyncType = lib.NodeSyncType(viper.GetString("sync-type")) @@ -176,6 +178,10 @@ func (config *Config) Print() { glog.Infof("Postgres URI: %s", config.PostgresURI) } + if config.PosValidatorSeed != "" { + glog.Infof(lib.CLog(lib.Blue, "PoS Validator: ON")) + } + if config.HyperSync { glog.Infof("HyperSync: ON") } diff --git a/cmd/node.go b/cmd/node.go index 5f59d2c2c..2a7e82ecf 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -27,12 +27,13 @@ import ( ) type Node struct { - Server *lib.Server - ChainDB *badger.DB - TXIndex *lib.TXIndex - Params *lib.DeSoParams - Config *Config - Postgres *lib.Postgres + Server *lib.Server + ChainDB *badger.DB + TXIndex *lib.TXIndex + Params *lib.DeSoParams + Config *Config + Postgres *lib.Postgres + Listeners []net.Listener // IsRunning is false when a NewNode is created, set to true on Start(), set to false // after Stop() is called. Mainly used in testing. @@ -117,8 +118,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { // This just gets localhost listening addresses on the protocol port. // Such as [{127.0.0.1 18000 } {::1 18000 }], and associated listener structs. - listeningAddrs, listeners := GetAddrsToListenOn(node.Config.ProtocolPort) - _ = listeningAddrs + _, node.Listeners = GetAddrsToListenOn(node.Config.ProtocolPort) // If --connect-ips is not passed, we will connect the addresses from // --add-ips, DNSSeeds, and DNSSeedGenerators. @@ -202,13 +202,21 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { // Setup eventManager eventManager := lib.NewEventManager() + var blsKeystore *lib.BLSKeystore + if node.Config.PosValidatorSeed != "" { + blsKeystore, err = lib.NewBLSKeystore(node.Config.PosValidatorSeed) + if err != nil { + panic(err) + } + } + // Setup the server. ShouldRestart is used whenever we detect an issue and should restart the node after a recovery // process, just in case. These issues usually arise when the node was shutdown unexpectedly mid-operation. The node // performs regular health checks to detect whenever this occurs. shouldRestart := false node.Server, err, shouldRestart = lib.NewServer( node.Params, - listeners, + node.Listeners, desoAddrMgr, node.Config.ConnectIPs, node.ChainDB, @@ -243,7 +251,8 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.nodeMessageChan, node.Config.ForceChecksum, node.Config.StateChangeDir, - node.Config.HypersyncMaxQueueSize) + node.Config.HypersyncMaxQueueSize, + blsKeystore) if err != nil { // shouldRestart can be true if, on the previous run, we did not finish flushing all ancestral // records to the DB. In this case, the snapshot is corrupted and needs to be computed. See the diff --git a/cmd/run.go b/cmd/run.go index e6231fdd8..212acaa8f 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -58,6 +58,10 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().String("postgres-uri", "", "BETA: Use Postgres as the backing store for chain data."+ "When enabled, most data is stored in postgres although badger is still currently used for some state. Run your "+ "Postgres instance on the same machine as your node for optimal performance.") + cmd.PersistentFlags().String("pos-validator-seed", "", "The private key of the Proof of Stake validator. "+ + "The private key should be passed as hex, optionally prefixed with a '0x', and map to a valid BLS12_381 private key. "+ + "The private key must be 32 bytes, or 64 characters, in length (excluding the '0x' prefix). Setting this flag automatically "+ + "makes the node run Proof of Stake Validator.") cmd.PersistentFlags().Uint32("max-sync-block-height", 0, "Max sync block height") // Hyper Sync diff --git a/integration_testing/connection_bridge.go b/integration_testing/connection_bridge.go index 1d0228467..4c3b28dde 100644 --- a/integration_testing/connection_bridge.go +++ b/integration_testing/connection_bridge.go @@ -172,7 +172,7 @@ func (bridge *ConnectionBridge) getVersionMessage(node *cmd.Node) *lib.MsgDeSoVe } if node.Server != nil { - ver.StartBlockHeight = uint32(node.Server.GetBlockchain().BlockTip().Header.Height) + ver.LatestBlockHeight = uint32(node.Server.GetBlockchain().BlockTip().Header.Height) } ver.MinFeeRateNanosPerKB = node.Config.MinFeerate return ver @@ -186,7 +186,7 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode connection.VersionNonceSent = versionMessage.Nonce // Send the version message. - fmt.Println("Sending version message:", versionMessage, versionMessage.StartBlockHeight) + fmt.Println("Sending version message:", versionMessage, versionMessage.LatestBlockHeight) if err := connection.WriteDeSoMessage(versionMessage); err != nil { return err } diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go index b1356da6c..325c4661f 100644 --- a/lib/bls_keystore.go +++ b/lib/bls_keystore.go @@ -112,8 +112,7 @@ func (signer *BLSSigner) SignRandomSeedHash(randomSeedHash *RandomSeedHash) (*bl // TODO: Add signing function for PoS blocks func (signer *BLSSigner) SignPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64) (*bls.Signature, error) { - // FIXME - payload := []byte{} + payload := GetVerackHandshakePayload(nonceSent, nonceReceived, tstampMicro) return signer.sign(BLSSignatureOpCodePoSValidatorHandshake, payload[:]) } @@ -141,7 +140,6 @@ func BLSVerifyValidatorTimeout(view uint64, highQCView uint64, signature *bls.Si func BLSVerifyPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { - // FIXME - payload := []byte{} + payload := GetVerackHandshakePayload(nonceSent, nonceReceived, tstampMicro) return _blsVerify(BLSSignatureOpCodePoSValidatorHandshake, payload[:], signature, publicKey) } diff --git a/lib/network.go b/lib/network.go index bb7fcae4e..80d412c4f 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1578,8 +1578,7 @@ type MsgDeSoVersion struct { // The height of the last block on the main chain for // this node. // - // TODO: We need to update this to uint64 - StartBlockHeight uint32 + LatestBlockHeight uint64 // MinFeeRateNanosPerKB is the minimum feerate that a peer will // accept from other peers when validating transactions. @@ -1611,11 +1610,11 @@ func (msg *MsgDeSoVersion) ToBytes(preSignature bool) ([]byte, error) { retBytes = append(retBytes, UintToBuf(uint64(len(msg.UserAgent)))...) retBytes = append(retBytes, msg.UserAgent...) - // StartBlockHeight - retBytes = append(retBytes, UintToBuf(uint64(msg.StartBlockHeight))...) + // LatestBlockHeight + retBytes = append(retBytes, UintToBuf(msg.LatestBlockHeight)...) // MinFeeRateNanosPerKB - retBytes = append(retBytes, UintToBuf(uint64(msg.MinFeeRateNanosPerKB))...) + retBytes = append(retBytes, UintToBuf(msg.MinFeeRateNanosPerKB)...) // JSONAPIPort - deprecated retBytes = append(retBytes, UintToBuf(uint64(0))...) @@ -1689,13 +1688,13 @@ func (msg *MsgDeSoVersion) FromBytes(data []byte) error { retVer.UserAgent = string(userAgent) } - // StartBlockHeight + // LatestBlockHeight { - lastBlockHeight, err := ReadUvarint(rr) - if err != nil || lastBlockHeight > math.MaxUint32 { + latestBlockHeight, err := ReadUvarint(rr) + if err != nil || latestBlockHeight > math.MaxUint32 { return errors.Wrapf(err, "MsgDeSoVersion.FromBytes: Problem converting msg.LatestBlockHeight") } - retVer.StartBlockHeight = uint32(lastBlockHeight) + retVer.LatestBlockHeight = latestBlockHeight } // MinFeeRateNanosPerKB diff --git a/lib/network_test.go b/lib/network_test.go index e9d6e6e5e..8a971f75a 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -43,7 +43,7 @@ var expectedVer = &MsgDeSoVersion{ TstampSecs: 2, Nonce: uint64(0xffffffffffffffff), UserAgent: "abcdef", - StartBlockHeight: 4, + LatestBlockHeight: 4, MinFeeRateNanosPerKB: 10, } diff --git a/lib/peer.go b/lib/peer.go index 2c6d76e4d..98d2c135e 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -3,7 +3,6 @@ package lib import ( "fmt" "github.com/decred/dcrd/lru" - "math" "net" "sort" "sync/atomic" @@ -143,6 +142,10 @@ type Peer struct { syncType NodeSyncType } +func (pp *Peer) GetId() uint64 { + return pp.ID +} + func (pp *Peer) AddDeSoMessage(desoMessage DeSoMessage, inbound bool) { // Don't add any more messages if the peer is disconnected if pp.disconnected != 0 { @@ -1140,20 +1143,6 @@ out: // This switch actually processes the message. For most messages, we just // pass them onto the Server. switch msg := rmsg.(type) { - case *MsgDeSoVersion: - // We always receive the VERSION from the Peer before starting this select - // statement, so getting one here is an error. - - glog.Errorf("Peer.inHandler: Already received 'version' from peer %v -- disconnecting", pp) - break out - - case *MsgDeSoVerack: - // We always receive the VERACK from the Peer before starting this select - // statement, so getting one here is an error. - - glog.Errorf("Peer.inHandler: Already received 'verack' from peer %v -- disconnecting", pp) - break out - case *MsgDeSoPing: // Respond to a ping with a pong. pp.HandlePingMsg(msg) @@ -1290,216 +1279,6 @@ func (pp *Peer) ReadDeSoMessage() (DeSoMessage, error) { return msg, nil } -func (pp *Peer) NewVersionMessage(params *DeSoParams) *MsgDeSoVersion { - ver := NewMessage(MsgTypeVersion).(*MsgDeSoVersion) - - ver.Version = params.ProtocolVersion.ToUint64() - ver.TstampSecs = time.Now().Unix() - // We use an int64 instead of a uint64 for convenience but - // this should be fine since we're just looking to generate a - // unique value. - ver.Nonce = uint64(RandInt64(math.MaxInt64)) - ver.UserAgent = params.UserAgent - // TODO: Right now all peers are full nodes. Later on we'll want to change this, - // at which point we'll need to do a little refactoring. - ver.Services = SFFullNodeDeprecated - if pp.cmgr != nil && pp.cmgr.HyperSync { - ver.Services |= SFHyperSync - } - if pp.srv.blockchain.archivalMode { - ver.Services |= SFArchivalNode - } - - // When a node asks you for what height you have, you should reply with - // the height of the latest actual block you have. This makes it so that - // peers who have up-to-date headers but missing blocks won't be considered - // for initial block download. - // - // TODO: This is ugly. It would be nice if the Peer required zero knowledge of the - // Server and the Blockchain. - if pp.srv != nil { - ver.StartBlockHeight = uint32(pp.srv.blockchain.blockTip().Header.Height) - } else { - ver.StartBlockHeight = uint32(0) - } - - // Set the minimum fee rate the peer will accept. - ver.MinFeeRateNanosPerKB = pp.minTxFeeRateNanosPerKB - - return ver -} - -func (pp *Peer) sendVerack() error { - verackMsg := NewMessage(MsgTypeVerack) - // Include the nonce we received in the peer's version message so - // we can validate that we actually control our IP address. - verackMsg.(*MsgDeSoVerack).NonceReceived = pp.VersionNonceReceived - if err := pp.WriteDeSoMessage(verackMsg); err != nil { - return errors.Wrap(err, "sendVerack: ") - } - - return nil -} - -func (pp *Peer) readVerack() error { - msg, err := pp.ReadDeSoMessage() - if err != nil { - return errors.Wrap(err, "readVerack: ") - } - if msg.GetMsgType() != MsgTypeVerack { - return fmt.Errorf( - "readVerack: Received message with type %s but expected type VERACK. ", - msg.GetMsgType().String()) - } - verackMsg := msg.(*MsgDeSoVerack) - if verackMsg.NonceReceived != pp.VersionNonceSent { - return fmt.Errorf( - "readVerack: Received VERACK message with nonce %d but expected nonce %d", - verackMsg.NonceReceived, pp.VersionNonceSent) - } - - return nil -} - -func (pp *Peer) sendVersion() error { - // For an outbound peer, we send a version message and then wait to - // hear back for one. - verMsg := pp.NewVersionMessage(pp.Params) - - // Record the nonce of this version message before we send it so we can - // detect self connections and so we can validate that the peer actually - // controls the IP she's supposedly communicating to us from. - pp.VersionNonceSent = verMsg.Nonce - if pp.cmgr != nil { - pp.cmgr.sentNonces.Add(pp.VersionNonceSent) - } - - if err := pp.WriteDeSoMessage(verMsg); err != nil { - return errors.Wrap(err, "sendVersion: ") - } - - return nil -} - -func (pp *Peer) readVersion() error { - msg, err := pp.ReadDeSoMessage() - if err != nil { - return errors.Wrap(err, "readVersion: ") - } - - verMsg, ok := msg.(*MsgDeSoVersion) - if !ok { - return fmt.Errorf( - "readVersion: Received message with type %s but expected type VERSION. "+ - "The VERSION message must preceed all others", msg.GetMsgType().String()) - } - if verMsg.Version < pp.Params.MinProtocolVersion { - return fmt.Errorf("readVersion: Peer's protocol version too low: %d (min: %v)", - verMsg.Version, pp.Params.MinProtocolVersion) - } - - // If we've sent this nonce before then return an error since this is - // a connection from ourselves. - msgNonce := verMsg.Nonce - if pp.cmgr != nil { - if pp.cmgr.sentNonces.Contains(msgNonce) { - pp.cmgr.sentNonces.Delete(msgNonce) - return fmt.Errorf("readVersion: Rejecting connection to self") - } - } - // Save the version nonce so we can include it in our verack message. - pp.VersionNonceReceived = msgNonce - - // Set the peer info-related fields. - pp.PeerInfoMtx.Lock() - pp.userAgent = verMsg.UserAgent - pp.serviceFlags = verMsg.Services - pp.advertisedProtocolVersion = verMsg.Version - negotiatedVersion := pp.Params.ProtocolVersion - if pp.advertisedProtocolVersion < pp.Params.ProtocolVersion.ToUint64() { - negotiatedVersion = NewProtocolVersionType(pp.advertisedProtocolVersion) - } - pp.negotiatedProtocolVersion = negotiatedVersion.ToUint64() - pp.PeerInfoMtx.Unlock() - - // Set the stats-related fields. - pp.StatsMtx.Lock() - pp.startingHeight = verMsg.StartBlockHeight - pp.minTxFeeRateNanosPerKB = verMsg.MinFeeRateNanosPerKB - pp.TimeConnected = time.Unix(verMsg.TstampSecs, 0) - pp.TimeOffsetSecs = verMsg.TstampSecs - time.Now().Unix() - pp.StatsMtx.Unlock() - - // Update the timeSource now that we've gotten a version message from the - // peer. - if pp.cmgr != nil { - pp.cmgr.timeSource.AddTimeSample(pp.addrStr, pp.TimeConnected) - } - - return nil -} - -func (pp *Peer) ReadWithTimeout(readFunc func() error, readTimeout time.Duration) error { - errChan := make(chan error) - go func() { - errChan <- readFunc() - }() - select { - case err := <-errChan: - { - return err - } - case <-time.After(readTimeout): - { - return fmt.Errorf("ReadWithTimeout: Timed out reading message from peer: (%v)", pp) - } - } -} - -func (pp *Peer) NegotiateVersion(versionNegotiationTimeout time.Duration) error { - if pp.isOutbound { - // Write a version message. - if err := pp.sendVersion(); err != nil { - return errors.Wrapf(err, "negotiateVersion: Problem sending version to Peer %v", pp) - } - // Read the peer's version. - if err := pp.ReadWithTimeout( - pp.readVersion, - versionNegotiationTimeout); err != nil { - - return errors.Wrapf(err, "negotiateVersion: Problem reading OUTBOUND peer version for Peer %v", pp) - } - } else { - // Read the version first since this is an inbound peer. - if err := pp.ReadWithTimeout( - pp.readVersion, - versionNegotiationTimeout); err != nil { - - return errors.Wrapf(err, "negotiateVersion: Problem reading INBOUND peer version for Peer %v", pp) - } - if err := pp.sendVersion(); err != nil { - return errors.Wrapf(err, "negotiateVersion: Problem sending version to Peer %v", pp) - } - } - - // After sending and receiving a compatible version, complete the - // negotiation by sending and receiving a verack message. - if err := pp.sendVerack(); err != nil { - return errors.Wrapf(err, "negotiateVersion: Problem sending verack to Peer %v", pp) - } - if err := pp.ReadWithTimeout( - pp.readVerack, - versionNegotiationTimeout); err != nil { - - return errors.Wrapf(err, "negotiateVersion: Problem reading VERACK message from Peer %v", pp) - } - pp.VersionNegotiated = true - - // At this point we have sent a version and validated our peer's - // version. So the negotiation should be complete. - return nil -} - // Disconnect closes a peer's network connection. func (pp *Peer) Disconnect() { // Only run the logic the first time Disconnect is called. diff --git a/lib/remote_node.go b/lib/remote_node.go new file mode 100644 index 000000000..6da6bf408 --- /dev/null +++ b/lib/remote_node.go @@ -0,0 +1,651 @@ +package lib + +import ( + "encoding/binary" + "fmt" + "github.com/btcsuite/btcd/wire" + "github.com/deso-protocol/core/bls" + "github.com/golang/glog" + "github.com/pkg/errors" + "golang.org/x/crypto/sha3" + "net" + "sync" + "time" +) + +type RemoteNodeStatus int + +const ( + RemoteNodeStatus_NotConnected RemoteNodeStatus = 0 + RemoteNodeStatus_Connected RemoteNodeStatus = 1 + RemoteNodeStatus_HandshakeCompleted RemoteNodeStatus = 2 + RemoteNodeStatus_Attempted RemoteNodeStatus = 3 + RemoteNodeStatus_Terminated RemoteNodeStatus = 4 +) + +type HandshakeStage uint8 + +const ( + HandshakeStage_NotStarted HandshakeStage = 0 + HandshakeStage_VersionSent HandshakeStage = 1 + HandshakeStage_VerackSent HandshakeStage = 2 + HandshakeStage_Completed HandshakeStage = 3 +) + +type RemoteNodeId uint64 + +func NewRemoteNodeId(id uint64) RemoteNodeId { + return RemoteNodeId(id) +} + +func (id RemoteNodeId) ToUint64() uint64 { + return uint64(id) +} + +// RemoteNode is a consensus-aware wrapper around the network Peer object. It is used to manage the lifecycle of a peer +// and to store consensus-related metadata about the peer. The RemoteNode can wrap around either an inbound or outbound +// peer connection. For outbound peers, the RemoteNode is created prior to the connection being established. In this case, +// the RemoteNode will be first used to initiate an OutboundConnectionAttempt, and then store the resulting connected peer. +// For inbound peers, the RemoteNode is created after the connection is established in ConnectionManager. +// +// Once the RemoteNode's peer is set, the RemoteNode is used to manage the handshake with the peer. The handshake involves +// rounds of Version and Verack messages being sent between our node and the peer. The handshake is complete when both +// nodes have sent and received a Version and Verack message. Once the handshake is successful, the RemoteNode will +// emit a MsgDeSoPeerHandshakeComplete control message via the Server. +// +// In steady state, i.e. after the handshake is complete, the RemoteNode can be used to send a message to the peer, +// retrieve the peer's handshake metadata, and close the connection with the peer. The RemoteNode has a single-use +// lifecycle. Once the RemoteNode is terminated, it will be disposed of, and a new RemoteNode must be created if we +// wish to reconnect to the peer in the future. +type RemoteNode struct { + mtx sync.Mutex + + peer *Peer + // The id is the unique identifier of this RemoteNode. For outbound connections, the id will be the same as the + // attemptId of the OutboundConnectionAttempt, and the subsequent id of the outbound peer. For inbound connections, + // the id will be the same as the inbound peer's id. + id RemoteNodeId + connectionStatus RemoteNodeStatus + + params *DeSoParams + srv *Server + cmgr *ConnectionManager + + // minTxFeeRateNanosPerKB is the minimum transaction fee rate in nanos per KB that our node will accept. + minTxFeeRateNanosPerKB uint64 + // latestBlockHeight is the block height of our node's block tip. + latestBlockHeight uint64 + // nodeServices is a bitfield that indicates the services supported by our node. + nodeServices ServiceFlag + + // handshakeMetadata is used to store the information received from the peer during the handshake. + handshakeMetadata *HandshakeMetadata + // keystore is a reference to the node's BLS private key storage. In the context of a RemoteNode, the keystore is + // used in the Verack message for validator nodes to prove ownership of the validator BLS public key. + keystore *BLSKeystore + + // versionTimeExpected is the latest time by which we expect to receive a Version message from the peer. + // If the Version message is not received by this time, the connection will be terminated. + versionTimeExpected *time.Time + // verackTimeExpected is the latest time by which we expect to receive a Verack message from the peer. + // If the Verack message is not received by this time, the connection will be terminated. + verackTimeExpected *time.Time +} + +// HandshakeMetadata stores the information received from the peer during the Version and Verack exchange. +type HandshakeMetadata struct { + // ### The following fields are populated during the MsgDeSoVersion exchange. + // versionNonceSent is the nonce sent in the Version message to the peer. + versionNonceSent uint64 + // versionNonceReceived is the nonce received in the Version message from the peer. + versionNonceReceived uint64 + // userAgent is a meta level label that can be used to analyze the network. + userAgent string + // serviceFlag is a bitfield that indicates the services supported by the peer. + serviceFlag ServiceFlag + // latestBlockHeight is the block height of the peer's block tip during the Version exchange. + latestBlockHeight uint64 + // minTxFeeRateNanosPerKB is the minimum transaction fee rate in nanos per KB that the peer will accept. + minTxFeeRateNanosPerKB uint64 + // advertisedProtocolVersion is the protocol version advertised by the peer. + advertisedProtocolVersion ProtocolVersionType + // negotiatedProtocolVersion is the protocol version negotiated between the peer and our node. This is the minimum + // of the advertised protocol version and our node's protocol version. + negotiatedProtocolVersion ProtocolVersionType + // timeConnected is the unix timestamp of the peer, measured when the peer sent their Version message. + timeConnected *time.Time + // versionNegotiated is true if the peer passed the version negotiation step. + versionNegotiated bool + // timeOffsetSecs is the time offset between our node and the peer, measured by taking the difference between the + // peer's unix timestamp and our node's unix timestamp. + timeOffsetSecs uint64 + + // ### The following fields are populated during the MsgDeSoVerack exchange. + // validatorPublicKey is the BLS public key of the peer, if the peer is a validator node. + validatorPublicKey *bls.PublicKey + + // ### The following fields are handshake control fields. + handshakeStage HandshakeStage +} + +func NewHandshakeMetadata() *HandshakeMetadata { + return &HandshakeMetadata{ + handshakeStage: HandshakeStage_NotStarted, + } +} + +func NewRemoteNode(id RemoteNodeId, srv *Server, cmgr *ConnectionManager, keystore *BLSKeystore, + params *DeSoParams, minTxFeeRateNanosPerKB uint64, latestBlockHeight uint64, nodeServices ServiceFlag) *RemoteNode { + return &RemoteNode{ + id: id, + connectionStatus: RemoteNodeStatus_NotConnected, + handshakeMetadata: NewHandshakeMetadata(), + srv: srv, + cmgr: cmgr, + keystore: keystore, + params: params, + minTxFeeRateNanosPerKB: minTxFeeRateNanosPerKB, + latestBlockHeight: latestBlockHeight, + nodeServices: nodeServices, + } +} + +// setStatusHandshakeCompleted sets the connection status of the remote node to HandshakeCompleted. +func (rn *RemoteNode) setStatusHandshakeCompleted() { + rn.connectionStatus = RemoteNodeStatus_HandshakeCompleted +} + +// setStatusConnected sets the connection status of the remote node to connected. +func (rn *RemoteNode) setStatusConnected() { + rn.connectionStatus = RemoteNodeStatus_Connected +} + +// setStatusTerminated sets the connection status of the remote node to terminated. +func (rn *RemoteNode) setStatusTerminated() { + rn.connectionStatus = RemoteNodeStatus_Terminated +} + +// setStatusAttempted sets the connection status of the remote node to attempted. +func (rn *RemoteNode) setStatusAttempted() { + rn.connectionStatus = RemoteNodeStatus_Attempted +} + +func (rn *RemoteNode) GetId() RemoteNodeId { + return rn.id +} + +func (rn *RemoteNode) GetPeer() *Peer { + return rn.peer +} + +func (rn *RemoteNode) GetNegotiatedProtocolVersion() ProtocolVersionType { + return rn.handshakeMetadata.negotiatedProtocolVersion +} + +func (rn *RemoteNode) GetValidatorPublicKey() *bls.PublicKey { + return rn.handshakeMetadata.validatorPublicKey +} + +func (rn *RemoteNode) GetUserAgent() string { + return rn.handshakeMetadata.userAgent +} + +func (rn *RemoteNode) getHandshakeStage() HandshakeStage { + return rn.handshakeMetadata.handshakeStage +} + +func (rn *RemoteNode) setHandshakeStage(stage HandshakeStage) { + rn.handshakeMetadata.handshakeStage = stage +} + +func (rn *RemoteNode) IsInbound() bool { + return rn.peer != nil && !rn.peer.IsOutbound() +} + +func (rn *RemoteNode) IsOutbound() bool { + return rn.peer != nil && rn.peer.IsOutbound() +} + +func (rn *RemoteNode) IsPersistent() bool { + return rn.peer != nil && rn.peer.IsPersistent() +} + +func (rn *RemoteNode) IsNotConnected() bool { + return rn.connectionStatus == RemoteNodeStatus_NotConnected +} + +func (rn *RemoteNode) IsConnected() bool { + return rn.connectionStatus == RemoteNodeStatus_Connected +} + +func (rn *RemoteNode) IsHandshakeCompleted() bool { + return rn.connectionStatus == RemoteNodeStatus_HandshakeCompleted +} + +func (rn *RemoteNode) IsValidator() bool { + if !rn.IsHandshakeCompleted() { + return false + } + return rn.GetValidatorPublicKey() != nil +} + +// DialOutboundConnection dials an outbound connection to the provided netAddr. +func (rn *RemoteNode) DialOutboundConnection(netAddr *wire.NetAddress) error { + if !rn.IsNotConnected() { + return fmt.Errorf("RemoteNode.DialOutboundConnection: RemoteNode is not in the NotConnected state") + } + + rn.mtx.Lock() + defer rn.mtx.Unlock() + + rn.cmgr.DialOutboundConnection(netAddr, rn.GetId().ToUint64()) + rn.setStatusAttempted() + return nil +} + +// DialPersistentOutboundConnection dials a persistent outbound connection to the provided netAddr. +func (rn *RemoteNode) DialPersistentOutboundConnection(netAddr *wire.NetAddress) error { + if !rn.IsNotConnected() { + return fmt.Errorf("RemoteNode.DialPersistentOutboundConnection: RemoteNode is not in the NotConnected state") + } + + rn.mtx.Lock() + defer rn.mtx.Unlock() + + rn.cmgr.DialPersistentOutboundConnection(netAddr, rn.GetId().ToUint64()) + rn.setStatusAttempted() + return nil +} + +// AttachInboundConnection creates an inbound peer once a successful inbound connection has been established. +func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddress) error { + if !rn.IsNotConnected() { + return fmt.Errorf("RemoteNode.AttachInboundConnection: RemoteNode is not in the NotConnected state") + } + + rn.mtx.Lock() + defer rn.mtx.Unlock() + + id := rn.GetId().ToUint64() + rn.peer = rn.cmgr.ConnectPeer(id, conn, na, false, false) + rn.setStatusConnected() + return nil +} + +// AttachOutboundConnection creates an outbound peer once a successful outbound connection has been established. +func (rn *RemoteNode) AttachOutboundConnection(conn net.Conn, na *wire.NetAddress, isPersistent bool) error { + if rn.connectionStatus != RemoteNodeStatus_Attempted { + return fmt.Errorf("RemoteNode.AttachOutboundConnection: RemoteNode is not in the Attempted state") + } + + rn.mtx.Lock() + defer rn.mtx.Unlock() + + id := rn.GetId().ToUint64() + rn.peer = rn.cmgr.ConnectPeer(id, conn, na, true, isPersistent) + rn.setStatusConnected() + return nil +} + +// Disconnect disconnects the remote node, closing the attempted connection or the established connection. +func (rn *RemoteNode) Disconnect() { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + id := rn.GetId().ToUint64() + switch rn.connectionStatus { + case RemoteNodeStatus_Attempted: + rn.cmgr.CloseAttemptedConnection(id) + case RemoteNodeStatus_Connected, RemoteNodeStatus_HandshakeCompleted: + rn.cmgr.CloseConnection(id) + } + rn.setStatusTerminated() +} + +func (rn *RemoteNode) SendMessage(desoMsg DeSoMessage) error { + if rn.connectionStatus != RemoteNodeStatus_HandshakeCompleted { + return fmt.Errorf("SendMessage: Remote node is not connected") + } + + return rn.sendMessage(desoMsg) +} + +func (rn *RemoteNode) sendMessage(desoMsg DeSoMessage) error { + if err := rn.cmgr.SendMessage(desoMsg, rn.GetId().ToUint64()); err != nil { + return fmt.Errorf("SendMessage: Problem sending message to peer (id= %d): %v", rn.id, err) + } + return nil +} + +// InitiateHandshake is a starting point for a peer handshake. If the peer is outbound, a version message is sent +// to the peer. If the peer is inbound, the peer is expected to send a version message to us first. +func (rn *RemoteNode) InitiateHandshake(nonce uint64) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + if rn.connectionStatus != RemoteNodeStatus_Connected { + return fmt.Errorf("InitiateHandshake: Remote node is not connected") + } + if rn.getHandshakeStage() != HandshakeStage_NotStarted { + return fmt.Errorf("InitiateHandshake: Handshake has already been initiated") + } + + if rn.GetPeer().IsOutbound() { + versionTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) + rn.versionTimeExpected = &versionTimeExpected + if err := rn.sendVersionMessage(nonce); err != nil { + return fmt.Errorf("InitiateHandshake: Problem sending version message to peer (id= %d): %v", rn.id, err) + } + rn.setHandshakeStage(HandshakeStage_VersionSent) + } + return nil +} + +// sendVersionMessage generates and sends a version message to a RemoteNode peer. The message will contain the nonce +// that is passed in as an argument. +func (rn *RemoteNode) sendVersionMessage(nonce uint64) error { + verMsg := rn.newVersionMessage(nonce) + + // Record the nonce of this version message before we send it so we can + // detect self connections and so we can validate that the peer actually + // controls the IP she's supposedly communicating to us from. + rn.handshakeMetadata.versionNonceSent = nonce + + if err := rn.sendMessage(verMsg); err != nil { + return fmt.Errorf("sendVersionMessage: Problem sending version message to peer (id= %d): %v", rn.id, err) + } + return nil +} + +// newVersionMessage returns a new version message that can be sent to a RemoteNode peer. The message will contain the +// nonce that is passed in as an argument. +func (rn *RemoteNode) newVersionMessage(nonce uint64) *MsgDeSoVersion { + ver := NewMessage(MsgTypeVersion).(*MsgDeSoVersion) + + ver.Version = rn.params.ProtocolVersion.ToUint64() + // Set the services bitfield to indicate what services this node supports. + ver.Services = rn.nodeServices + + // We use an int64 instead of a uint64 for convenience. + ver.TstampSecs = time.Now().Unix() + + ver.Nonce = nonce + ver.UserAgent = rn.params.UserAgent + + // When a node asks you for what height you have, you should reply with the height of the latest actual block you + // have. This makes it so that peers who have up-to-date headers but missing blocks won't be considered for initial + // block download. + ver.LatestBlockHeight = rn.latestBlockHeight + + // Set the minimum fee rate the peer will accept. + ver.MinFeeRateNanosPerKB = rn.minTxFeeRateNanosPerKB + + return ver +} + +// HandleVersionMessage is called upon receiving a version message from the RemoteNode's peer. The peer may be the one +// initiating the handshake, in which case, we should respond with our own version message. To do this, we pass the +// responseNonce to this function, which we will use in our response version message. +func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce uint64) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + if rn.connectionStatus != RemoteNodeStatus_Connected { + return fmt.Errorf("HandleVersionMessage: RemoteNode is not connected") + } + if rn.getHandshakeStage() != HandshakeStage_NotStarted && rn.getHandshakeStage() != HandshakeStage_VersionSent { + return fmt.Errorf("HandleVersionMessage: Handshake has already been initiated, stage: %v", rn.getHandshakeStage()) + } + + // Verify that the peer's version matches our minimal supported version. + if verMsg.Version < rn.params.MinProtocolVersion { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v) "+ + "protocol version too low. Peer version: %v, min version: %v", rn.id, verMsg.Version, rn.params.MinProtocolVersion) + } + + // Verify that the peer's version message is sent within the version negotiation timeout. + if rn.versionTimeExpected != nil && rn.versionTimeExpected.Before(time.Now()) { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v) "+ + "version timeout. Time expected: %v, now: %v", rn.id, rn.versionTimeExpected.UnixMicro(), time.Now().UnixMicro()) + } + + vMeta := rn.handshakeMetadata + // Record the version the peer is using. + vMeta.advertisedProtocolVersion = NewProtocolVersionType(verMsg.Version) + // Decide on the protocol version to use for this connection. + negotiatedVersion := rn.params.ProtocolVersion + if verMsg.Version < rn.params.ProtocolVersion.ToUint64() { + negotiatedVersion = NewProtocolVersionType(verMsg.Version) + } + vMeta.negotiatedProtocolVersion = negotiatedVersion + + // Record the services the peer is advertising. + vMeta.serviceFlag = verMsg.Services + + // Record the tstamp sent by the peer and calculate the time offset. + timeConnected := time.Unix(verMsg.TstampSecs, 0) + vMeta.timeConnected = &timeConnected + currentTime := time.Now().Unix() + if currentTime > verMsg.TstampSecs { + vMeta.timeOffsetSecs = uint64(currentTime - verMsg.TstampSecs) + } else { + vMeta.timeOffsetSecs = uint64(verMsg.TstampSecs - currentTime) + } + + // Save the received version nonce so we can include it in our verack message. + vMeta.versionNonceReceived = verMsg.Nonce + + // Set the peer info-related fields. + vMeta.userAgent = verMsg.UserAgent + vMeta.latestBlockHeight = verMsg.LatestBlockHeight + vMeta.minTxFeeRateNanosPerKB = verMsg.MinFeeRateNanosPerKB + + // Respond to the version message if this is an inbound peer. + if !rn.peer.IsOutbound() { + if err := rn.sendVersionMessage(responseNonce); err != nil { + return errors.Wrapf(err, "RemoteNode.HandleVersionMessage: Problem sending version message to peer (id= %d)", rn.id) + } + } + + // After sending and receiving a compatible version, send the verack message. Notice that we don't wait for the + // peer's verack message even if it is an inbound peer. Instead, we just send the verack message right away. + + // Set the latest time by which we should receive a verack message from the peer. + verackTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) + rn.verackTimeExpected = &verackTimeExpected + if err := rn.sendVerack(); err != nil { + return errors.Wrapf(err, "RemoteNode.HandleVersionMessage: Problem sending verack message to peer (id= %d)", rn.id) + } + + // Update the timeSource now that we've gotten a version message from the peer. + rn.cmgr.AddTimeSample(rn.peer.Address(), timeConnected) + rn.setHandshakeStage(HandshakeStage_VerackSent) + return nil +} + +// sendVerack constructs and sends a verack message to the peer. +func (rn *RemoteNode) sendVerack() error { + verackMsg, err := rn.newVerackMessage() + if err != nil { + return err + } + + if err := rn.sendMessage(verackMsg); err != nil { + return errors.Wrapf(err, "RemoteNode.SendVerack: Problem sending verack message to peer (id= %d): %v", rn.id, err) + } + return nil +} + +// newVerackMessage constructs a verack message to be sent to the peer. +func (rn *RemoteNode) newVerackMessage() (*MsgDeSoVerack, error) { + verack := NewMessage(MsgTypeVerack).(*MsgDeSoVerack) + vMeta := rn.handshakeMetadata + + switch vMeta.negotiatedProtocolVersion { + case ProtocolVersion0, ProtocolVersion1: + // For protocol versions 0 and 1, we just send back the nonce we received from the peer in the version message. + verack.Version = VerackVersion0 + verack.NonceReceived = vMeta.versionNonceReceived + case ProtocolVersion2: + // FIXME: resolve the non-validator - validator handshake issues on protocol version 2. + // For protocol version 2, we need to send the nonce we received from the peer in their version message. + // We also need to send our own nonce, which we generate for our version message. In addition, we need to + // send a current timestamp (in microseconds). We then sign the tuple of (nonceReceived, nonceSent, tstampMicro) + // using our validator BLS key, and send the signature along with our public key. + var err error + verack.Version = VerackVersion1 + verack.NonceReceived = vMeta.versionNonceReceived + verack.NonceSent = vMeta.versionNonceSent + tstampMicro := uint64(time.Now().UnixMicro()) + verack.TstampMicro = tstampMicro + verack.PublicKey = rn.keystore.GetSigner().GetPublicKey() + verack.Signature, err = rn.keystore.GetSigner().SignPoSValidatorHandshake(verack.NonceSent, verack.NonceReceived, tstampMicro) + if err != nil { + return nil, fmt.Errorf("RemoteNode.newVerackMessage: Problem signing verack message: %v", err) + } + } + return verack, nil +} + +// HandleVerackMessage handles a verack message received from the peer. +func (rn *RemoteNode) HandleVerackMessage(vrkMsg *MsgDeSoVerack) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + if rn.connectionStatus != RemoteNodeStatus_Connected { + return fmt.Errorf("RemoteNode.HandleVerackMessage: Requesting disconnect for id: (%v) "+ + "verack received while in state: %v", rn.id, rn.connectionStatus) + } + + if rn.getHandshakeStage() != HandshakeStage_VerackSent { + return fmt.Errorf("RemoteNode.HandleVerackMessage: Requesting disconnect for id: (%v) "+ + "verack received while in handshake stage: %v", rn.id, rn.getHandshakeStage()) + } + + if rn.verackTimeExpected != nil && rn.verackTimeExpected.Before(time.Now()) { + return fmt.Errorf("RemoteNode.HandleVerackMessage: Requesting disconnect for id: (%v) "+ + "verack timeout. Time expected: %v, now: %v", rn.id, rn.verackTimeExpected.UnixMicro(), time.Now().UnixMicro()) + } + + var err error + vMeta := rn.handshakeMetadata + switch vMeta.negotiatedProtocolVersion { + case ProtocolVersion0, ProtocolVersion1: + err = rn.validateVerackPoW(vrkMsg) + case ProtocolVersion2: + err = rn.validateVerackPoS(vrkMsg) + } + + if err != nil { + return errors.Wrapf(err, "RemoteNode.HandleVerackMessage: Problem validating verack message from peer (id= %d)", rn.id) + } + + // If we get here then the peer has successfully completed the handshake. + vMeta.versionNegotiated = true + rn._logVersionSuccess(rn.peer) + rn.setStatusHandshakeCompleted() + rn.setHandshakeStage(HandshakeStage_Completed) + rn.srv.NotifyHandshakePeerMessage(rn.peer) + + return nil +} + +func (rn *RemoteNode) validateVerackPoW(vrkMsg *MsgDeSoVerack) error { + vMeta := rn.handshakeMetadata + + // Verify that the verack message is formatted correctly according to the PoW standard. + if vrkMsg.Version != VerackVersion0 { + return fmt.Errorf("RemoteNode.validateVerackPoW: Requesting disconnect for id: (%v) "+ + "verack version mismatch; message: %v; expected: %v", rn.id, vrkMsg.Version, VerackVersion0) + } + + // If the verack message has a nonce that wasn't previously sent to us in the version message, return an error. + if vrkMsg.NonceReceived != vMeta.versionNonceSent { + return fmt.Errorf("RemoteNode.validateVerackPoW: Requesting disconnect for id: (%v) nonce mismatch; "+ + "message: %v; nonceSent: %v", rn.id, vrkMsg.NonceReceived, vMeta.versionNonceSent) + } + + return nil +} + +func (rn *RemoteNode) validateVerackPoS(vrkMsg *MsgDeSoVerack) error { + vMeta := rn.handshakeMetadata + + // Verify that the verack message is formatted correctly according to the PoS standard. + if vrkMsg.Version != VerackVersion1 { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack version mismatch; message: %v; expected: %v", rn.id, vrkMsg.Version, VerackVersion1) + } + + // Verify that the counterparty's verack message's NonceReceived matches the NonceSent we sent. + if vrkMsg.NonceReceived != vMeta.versionNonceSent { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) nonce mismatch; "+ + "message: %v; nonceSent: %v", rn.id, vrkMsg.NonceReceived, vMeta.versionNonceSent) + } + + // Verify that the counterparty's verack message's NonceSent matches the NonceReceived we sent. + if vrkMsg.NonceSent != vMeta.versionNonceReceived { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack nonce mismatch; message: %v; expected: %v", rn.id, vrkMsg.NonceSent, vMeta.versionNonceReceived) + } + + // Get the current time in microseconds and make sure the verack message's timestamp is within 15 minutes of it. + timeNowMicro := uint64(time.Now().UnixMicro()) + if vrkMsg.TstampMicro < timeNowMicro-rn.params.HandshakeTimeoutMicroSeconds { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack timestamp too far in the past. Time now: %v, verack timestamp: %v", rn.id, timeNowMicro, vrkMsg.TstampMicro) + } + + // Make sure the verack message's public key and signature are not nil. + if vrkMsg.PublicKey == nil || vrkMsg.Signature == nil { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack public key or signature is nil", rn.id) + } + + // Verify the verack message's signature. + ok, err := BLSVerifyPoSValidatorHandshake(vrkMsg.NonceSent, vrkMsg.NonceReceived, vrkMsg.TstampMicro, + vrkMsg.Signature, vrkMsg.PublicKey) + if err != nil { + return errors.Wrapf(err, "RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack signature verification failed with error", rn.id) + } + if !ok { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack signature verification failed", rn.id) + } + + // If we get here then the verack message is valid. Set the validator public key on the peer. + vMeta.validatorPublicKey = vrkMsg.PublicKey + return nil +} + +func (rn *RemoteNode) _logVersionSuccess(peer *Peer) { + inboundStr := "INBOUND" + if rn.IsOutbound() { + inboundStr = "OUTBOUND" + } + persistentStr := "PERSISTENT" + if !rn.IsPersistent() { + persistentStr = "NON-PERSISTENT" + } + logStr := fmt.Sprintf("SUCCESS version negotiation for (%s) (%s) peer (%v).", inboundStr, persistentStr, peer) + glog.V(1).Info(logStr) +} + +func GetVerackHandshakePayload(nonceReceived uint64, nonceSent uint64, tstampMicro uint64) [32]byte { + // The payload for the verack message is the two nonces concatenated together. + // We do this so that we can sign the nonces and verify the signature on the other side. + nonceReceivedBytes := make([]byte, 8) + binary.BigEndian.PutUint64(nonceReceivedBytes, nonceReceived) + + nonceSentBytes := make([]byte, 8) + binary.BigEndian.PutUint64(nonceSentBytes, nonceSent) + + tstampBytes := make([]byte, 8) + binary.BigEndian.PutUint64(tstampBytes, tstampMicro) + + payload := append(nonceReceivedBytes, nonceSentBytes...) + payload = append(payload, tstampBytes...) + + return sha3.Sum256(payload) +} diff --git a/lib/server.go b/lib/server.go index 4260f65fb..59213657e 100644 --- a/lib/server.go +++ b/lib/server.go @@ -4,6 +4,8 @@ import ( "bytes" "encoding/hex" "fmt" + "github.com/btcsuite/btcd/wire" + "github.com/deso-protocol/core/consensus" "net" "reflect" "runtime" @@ -17,9 +19,7 @@ import ( "github.com/btcsuite/btcd/addrmgr" chainlib "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" - "github.com/deso-protocol/core/consensus" "github.com/deso-protocol/go-deadlock" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" @@ -232,7 +232,6 @@ func (srv *Server) GetBlockProducer() *DeSoBlockProducer { return srv.blockProducer } -// TODO: The hallmark of a messy non-law-of-demeter-following interface... func (srv *Server) GetConnectionManager() *ConnectionManager { return srv.cmgr } @@ -390,7 +389,8 @@ func NewServer( _nodeMessageChan chan NodeMessage, _forceChecksum bool, _stateChangeDir string, - _hypersyncMaxQueueSize uint32) ( + _hypersyncMaxQueueSize uint32, + _blsKeystore *BLSKeystore) ( _srv *Server, _err error, _shouldRestart bool) { var err error @@ -715,6 +715,13 @@ func (srv *Server) GetSnapshot(pp *Peer) { "with Prefix (%v) and SnapshotStartEntry (%v)", pp, prefix, lastReceivedKey) } +func (srv *Server) NotifyHandshakePeerMessage(peer *Peer) { + srv.incomingMessages <- &ServerMessage{ + Peer: peer, + Msg: &MsgDeSoPeerHandshakeComplete{}, + } +} + // GetBlocksToStore is part of the archival mode, which makes the node download all historical blocks after completing // hypersync. We will go through all blocks corresponding to the snapshot and download the blocks. func (srv *Server) GetBlocksToStore(pp *Peer) { @@ -1569,11 +1576,12 @@ func (srv *Server) _startSync() { } -func (srv *Server) _handleNewPeer(pp *Peer) { +func (srv *Server) _handlePeerHandshakeComplete(pp *Peer) { isSyncCandidate := pp.IsSyncCandidate() isSyncing := srv.blockchain.isSyncing() chainState := srv.blockchain.chainState() - glog.V(1).Infof("Server._handleNewPeer: Processing NewPeer: (%v); IsSyncCandidate(%v), syncPeerIsNil=(%v), IsSyncing=(%v), ChainState=(%v)", + glog.V(1).Infof("Server._handlePeerHandshakeComplete: Processing NewPeer: (%v); IsSyncCandidate(%v), "+ + "syncPeerIsNil=(%v), IsSyncing=(%v), ChainState=(%v)", pp, isSyncCandidate, (srv.SyncPeer == nil), isSyncing, chainState) // Request a sync if we're ready @@ -2221,6 +2229,8 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, msg *MsgDeSoGetAddr) { func (srv *Server) _handleControlMessages(serverMessage *ServerMessage) (_shouldQuit bool) { switch serverMessage.Msg.(type) { // Control messages used internally to signal to the server. + case *MsgDeSoPeerHandshakeComplete: + srv._handlePeerHandshakeComplete(serverMessage.Peer) case *MsgDeSoDisconnectedPeer: srv._handleDonePeer(serverMessage.Peer) case *MsgDeSoQuit: From 10ad1fd0ab15f9155613a2938f7e63a913940c25 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Sat, 6 Jan 2024 18:01:28 -0800 Subject: [PATCH 338/762] Vested coin lockups implementation. --- lib/block_view.go | 13 + lib/block_view_lockups.go | 1091 ++++++++++++++++++++++++++------ lib/block_view_lockups_test.go | 794 ++++++++++++----------- lib/block_view_types.go | 89 ++- lib/blockchain.go | 17 +- lib/constants.go | 46 +- lib/db_utils.go | 479 +++++++++++--- lib/errors.go | 6 +- lib/pos_snapshot_entries.go | 4 + 9 files changed, 1886 insertions(+), 653 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 3836c3dfe..6c25be276 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -125,6 +125,7 @@ type UtxoView struct { LockedStakeMapKeyToLockedStakeEntry map[LockedStakeMapKey]*LockedStakeEntry // Locked DAO coin and locked DESO balance entry mapping. + // NOTE: See comment on LockedBalanceEntryKey before altering. LockedBalanceEntryKeyToLockedBalanceEntry map[LockedBalanceEntryKey]*LockedBalanceEntry // Lockup yield curve points. @@ -3201,6 +3202,18 @@ func (bav *UtxoView) _connectUpdateGlobalParams( ) } } + if len(extraData[MaximumVestedIntersectionsPerLockupTransactionKey]) > 0 { + maximumVestedIntersectionsPerLockupTransaction, bytesRead := Varint( + extraData[MaximumVestedIntersectionsPerLockupTransactionKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: " + + "unable to decode MaximumVestedIntersectionsPerLockupTransaction as uint64") + } + newGlobalParamsEntry.MaximumVestedIntersectionsPerLockupTransaction = + int(maximumVestedIntersectionsPerLockupTransaction) + } if len(extraData[FeeBucketGrowthRateBasisPointsKey]) > 0 { val, bytesRead := Uvarint( extraData[FeeBucketGrowthRateBasisPointsKey], diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index bc1abdc6e..89ba36ae0 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -9,6 +9,7 @@ import ( "github.com/holiman/uint256" "github.com/pkg/errors" "math/big" + "reflect" "sort" ) @@ -16,35 +17,121 @@ import ( // TYPES: LockedBalanceEntry // +// LockedBalanceEntry is a simple struct with different disjoint states: +// +// (1) UnlockTimestampNanoSecs == VestingEndTimestampNanoSecs +// This is the unvested case. It represents a lump sum unlock at a given time. +// The user can unlock the full BalanceBaseUnits AFTER UnlockTimestampNanoSecs. +// When written to disk, we add a special 'byte' to the database entry key such +// that we can quickly find all unvested LockedBalanceEntries. +// (2) UnlockTimestampNanoSecs < VestingEndTimestampNanoSecs +// This is the vested case. It represents a vest schedule for locked DeSo tokens. +// The user can unlock a portion of BalanceBaseUnits proportional to +// how much of the time between (UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs) +// has passed by the time the balance is unlocked. When writing this LockedBalanceEntry +// to disk, we add a special 'byte' to the database entry key such that +// we can quickly find all vested LockedBalanceEntries. +// (3) UnlockTimestampNanoSecs > VestingEndTimestampNanoSecs +// This shouldn't be possible. Any LockedBalanceEntries where this is the case +// are degenerate and should not exist. +// +// These states delineate between vested and unvested LockedBalanceEntries. It's important to note that +// by only specifying the remaining BalanceBaseUnits, UnlockTimestampNanoSecs, and VestingEndTimestampNanoSecs +// this vesting schedule implementation is linear. type LockedBalanceEntry struct { - HODLerPKID *PKID - ProfilePKID *PKID - UnlockTimestampNanoSecs int64 - BalanceBaseUnits uint256.Int - isDeleted bool + HODLerPKID *PKID + ProfilePKID *PKID + UnlockTimestampNanoSecs int64 + VestingEndTimestampNanoSecs int64 + BalanceBaseUnits uint256.Int + isDeleted bool } +// LockedBalanceEntryKey is a very crucial struct in the design of lockups. +// +// Consider a naive utilization of LockedBalanceEntryMapKey in the context of two subsequent vested unlocks +// WITHOUT flushing to db in-between: +// +// (1st Unlock, blockTimestampNanoSecs=x+100) +// (1) We read from disk the LockedBalanceEntry with UnlockTimestampNanoSecs=x +// (2) We cache the entry found in-memory (i.e. in the UtxoView) +// (3) We read from the in-memory cache the LockedBalanceEntry with UnlockTimestampNanoSecs=x +// (4) We update the in-memory entry to have UnlockTimestampNanoSecs=x+100 +// (2nd Unlock, blockTimestampNanoSecs=x+150) +// (1) We read from disk the LockedBalanceEntry with UnlockTimestampNanoSecs=x +// (2) Because no other entry in-memory has UnlockTimestampNanoSecs=x, we cache a duplicate entry in-memory +// (3) We read from the in-memory cache the two LockedBalanceEntries with UnlockTimestampNanoSecs={x,x+100} +// (4) We update the in-memory entries to both {x+150,x+150} +// (5) +// +// NOTE: While it may seem that there's an error on the 1st unlock step #4 as we do not check to see if there's +// a conflicting on-disk LockedBalanceEntry with UnlockTimestampNanoSecs=x+100 that must be consolidated, +// we're assuming that consolidation happens on the lockup transaction rather than the unlock transaction. +// This means that it's impossible there's two vested LockedBalanceEntries who have ANY overlap in their +// (UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs) pairs. That is to say, this would not cause +// a bug in this specific implementation. +// +// At the heart, this problem exists because what is semantically the same LockedBalanceEntry did not have a +// way of deduplicating the on-disk and in-memory versions given the updated UnlockTimestampNanoSecs. +// This is not ideal, and would likely be the root of more complicated problems if not dealt with. +// +// To solve this problem, we opt to deduplicate the in-memory and on-disk entries via a careful utilization of +// the isDeleted field in the various transaction connects related to lockups. For instance, in the case +// where we deal with two subsequent unlocks we must both mark the previous in-memory entry with isDeleted=true and +// store a semantically duplicate entry with the updatedUnlockTimestampNanoSecs with isDeleted=false. To show how +// this would work on the previous example: +// +// (1st Unlock, blockTimestampNanoSecs=x+100) +// (1) We read from disk the LockedBalanceEntry with UnlockTimestampNanoSecs=x +// (2) We cache the entry found in-memory (i.e. in the UtxoView) +// (3) We read from the in-memory cache the LockedBalanceEntry with UnlockTimestampNanoSecs=x +// (4) We update the entry to have UnlockTimestampNanoSecs=x+100 +// (5) We delete the original LockedBalanceEntry with UnlockTimestampNanoSecs from the view +// (6) We set the updated LockedBalanceEntry in-memory +// (2nd Unlock, blockTimestampNanoSecs=x+150) +// (1) We read from disk the LockedBalanceEntry with UnlockTimestampNanoSecs=x +// (2) We see an equivalent entry in the view with isDeleted=true, +// meaning we do NOT cache this entry in-memory. +// (3) We read from the in-memory cache the LockedBalanceEntry with UnlockTimestampNanoSecs=x+100 +// (4) We update the entry to have UnlockTimestampNanoSecs=x+100 +// (5) We set the updated LockedBalanceEntry in-memory +// +// The difference here is the LockedBalanceEntry is not duplicated in-memory on repetitive reads between flushes. +// This is a crucial difference to prevent odd and dangerous caching bugs from happening. It's important +// to note we could instead utilize a UUID on the LockedBalanceEntry that persists across transactions, +// but this creates difficulties around consolidations and (possible) future transfers. While ideally we would +// opt to avoid consolidations, consolidations are inevitable either on lockup or on unlock to ensure on-disk +// entries do not get inadvertently overwritten. +// +// In summary, whenever changing logic that touches in-memory (i.e. in the UtxoView) LockedBalanceEntries, +// make sure of the following: +// IF modifying a LockedBalanceEntry in the UtxoView, THEN ensure to delete the original +// LockedBalanceEntry under the original LockedBalanceEntryKey AND set the new LockedBalanceEntry +// under the new and different LockedBalanceEntryKey in the view. type LockedBalanceEntryKey struct { - HODLerPKID PKID - ProfilePKID PKID - UnlockTimestampNanoSecs int64 + HODLerPKID PKID + ProfilePKID PKID + UnlockTimestampNanoSecs int64 + VestingEndTimestampNanoSecs int64 } func (lockedBalanceEntry *LockedBalanceEntry) Copy() *LockedBalanceEntry { return &LockedBalanceEntry{ - HODLerPKID: lockedBalanceEntry.HODLerPKID.NewPKID(), - ProfilePKID: lockedBalanceEntry.ProfilePKID.NewPKID(), - UnlockTimestampNanoSecs: lockedBalanceEntry.UnlockTimestampNanoSecs, - BalanceBaseUnits: lockedBalanceEntry.BalanceBaseUnits, - isDeleted: lockedBalanceEntry.isDeleted, + HODLerPKID: lockedBalanceEntry.HODLerPKID.NewPKID(), + ProfilePKID: lockedBalanceEntry.ProfilePKID.NewPKID(), + UnlockTimestampNanoSecs: lockedBalanceEntry.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: lockedBalanceEntry.VestingEndTimestampNanoSecs, + BalanceBaseUnits: lockedBalanceEntry.BalanceBaseUnits, + isDeleted: lockedBalanceEntry.isDeleted, } } func (lockedBalanceEntry *LockedBalanceEntry) ToMapKey() LockedBalanceEntryKey { return LockedBalanceEntryKey{ - HODLerPKID: *lockedBalanceEntry.HODLerPKID, - ProfilePKID: *lockedBalanceEntry.ProfilePKID, - UnlockTimestampNanoSecs: lockedBalanceEntry.UnlockTimestampNanoSecs, + HODLerPKID: *lockedBalanceEntry.HODLerPKID, + ProfilePKID: *lockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: lockedBalanceEntry.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: lockedBalanceEntry.VestingEndTimestampNanoSecs, } } @@ -55,6 +142,7 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawEncodeWithoutMetadata(blockHeig data = append(data, EncodeToBytes(blockHeight, lockedBalanceEntry.HODLerPKID, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, lockedBalanceEntry.ProfilePKID, skipMetadata...)...) data = append(data, IntToBuf(lockedBalanceEntry.UnlockTimestampNanoSecs)...) + data = append(data, IntToBuf(lockedBalanceEntry.VestingEndTimestampNanoSecs)...) data = append(data, VariableEncodeUint256(&lockedBalanceEntry.BalanceBaseUnits)...) return data } @@ -80,6 +168,12 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawDecodeWithoutMetadata(blockHeig return errors.Wrap(err, "LockedBalanceEntry.Decode: Problem reading UnlockTimestampNanoSecs") } + // VestingEndTimestampNanoSecs + lockedBalanceEntry.VestingEndTimestampNanoSecs, err = ReadVarint(rr) + if err != nil { + return errors.Wrap(err, "LockedBalanceEntry.Decode: Problem reading VestingEndTimestampNanoSecs") + } + // BalanceBaseUnits balanceBaseUnits, err := VariableDecodeUint256(rr) if err != nil { @@ -103,7 +197,7 @@ func (lockedBalanceEntry *LockedBalanceEntry) GetEncoderType() EncoderType { func (bav *UtxoView) _setLockedBalanceEntry(lockedBalanceEntry *LockedBalanceEntry) { // This function shouldn't be called with nil. if lockedBalanceEntry == nil { - glog.Errorf("_setLockedBalanceEntryMappingsWithPKIDsTimestampType: Called with nil LockedBalanceEntry; " + + glog.Errorf("_setLockedBalanceEntry: Called with nil LockedBalanceEntry; " + "this should never happen.") return } @@ -130,13 +224,128 @@ func (bav *UtxoView) _deleteLockedBalanceEntry(lockedBalanceEntry *LockedBalance // Get Helper Functions for LockedBalanceEntry -func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - hodlerPKID *PKID, profilePKID *PKID, unlockTimestampNanoSecs int64) (_lockedBalanceEntry *LockedBalanceEntry, _err error) { +func (bav *UtxoView) GetLockedBalanceEntryForLockedBalanceEntryKey( + lockedBalanceEntryKey *LockedBalanceEntryKey, +) ( + _lockedBalanceEntry *LockedBalanceEntry, + _err error, +) { + // Check if the key exists in the view. + if viewEntry, viewEntryExists := + bav.LockedBalanceEntryKeyToLockedBalanceEntry[*lockedBalanceEntryKey]; viewEntryExists { + if viewEntry == nil || viewEntry.isDeleted { + return nil, nil + } + return viewEntry, nil + } + + // No mapping exists in the view, check for an entry in the db. + lockedBalanceEntry, err := DBGetLockedBalanceEntryForLockedBalanceEntryKey( + bav.Handle, bav.Snapshot, lockedBalanceEntryKey) + if err != nil { + return nil, + errors.Wrap(err, "GetLockedBalanceEntryForLockedBalanceEntryKey") + } + + // Cache the DB entry in the in-memory map. + if lockedBalanceEntry != nil { + bav._setLockedBalanceEntry(lockedBalanceEntry) + } + + return lockedBalanceEntry, nil +} + +func (bav *UtxoView) GetLimitedVestedLockedBalanceEntriesOverTimeInterval( + hodlerPKID *PKID, + profilePKID *PKID, + unlockTimestampNanoSecs int64, + vestingEndTimestampNanoSecs int64, + limitToFetch int, +) ( + _lockedBalanceEntries []*LockedBalanceEntry, + _err error, +) { + // Step 1: Fetch a limited number of vested locked balance entries from the view. + // Any modified/deleted locked balance entries should have .isDeleted=true + // preventing them from being re-read into the view. The fact that modified + // locked balance entries have .isDeleted=True is a unique feature that + // occurs because keys on locked balance entries can change with time for the same + // semantically equivalent value. + // Also note, we read a limited number of entries based on the passed limitToFetch + // to prevent excessive reads to the db. We explicitly check if the error occurs + // as a result of over-reading the db or from other db errors. + vestedLockedBalanceEntries, err := DBGetLimitedVestedLockedBalanceEntries( + bav.Handle, + bav.Snapshot, + hodlerPKID, + profilePKID, + unlockTimestampNanoSecs, + vestingEndTimestampNanoSecs, + limitToFetch) + if err != nil { + return nil, + errors.Wrap(err, "GetLimitedVestedLockedBalanceEntriesOverTimeInterval") + } + + // Step 2: Cache the fetched locked balance entries from the view into the db. + // Again, any existing modified/deleted vested locked balance entries should result in no insert. + for _, lockedBalanceEntry := range vestedLockedBalanceEntries { + if _, exists := bav.LockedBalanceEntryKeyToLockedBalanceEntry[lockedBalanceEntry.ToMapKey()]; !exists { + bav._setLockedBalanceEntry(lockedBalanceEntry) + } + } + + // Step 3: Read from the view the relevant vested locked balance entries. + // Note that if we over-read the view, meaning we find more than limitToFetch relevant entries, + // we will throw a rule error. + // Also note, this operation is currently quite inefficient as it iterates through all locked + // balance entries in the view. In the future, it may be necessary to replace the + // LockedBalanceEntryKeyToLockedBalanceEntry field of UtxoView with an optimized B-tree implementation. + // We could opt to use an ordered map, but the Go implementation would sort on insertion time. + var lockedBalanceEntries []*LockedBalanceEntry + for _, lockedBalanceEntry := range bav.LockedBalanceEntryKeyToLockedBalanceEntry { + // A relevant vested locked balance entry satisfies all the following conditions: + // (1) Matching profile PKID + // (2) Matching hodler PKID + // (3) An unlock OR end timestamp within the specified (unlock, end) bounds + // (4) A mismatched unlock and vesting end timestamp (vesting condition) + // (5) Not deleted + if lockedBalanceEntry.ProfilePKID.Eq(profilePKID) && + lockedBalanceEntry.HODLerPKID.Eq(hodlerPKID) && + ((lockedBalanceEntry.UnlockTimestampNanoSecs >= unlockTimestampNanoSecs && + lockedBalanceEntry.UnlockTimestampNanoSecs <= vestingEndTimestampNanoSecs) || + (lockedBalanceEntry.VestingEndTimestampNanoSecs >= unlockTimestampNanoSecs && + lockedBalanceEntry.VestingEndTimestampNanoSecs <= vestingEndTimestampNanoSecs)) && + lockedBalanceEntry.UnlockTimestampNanoSecs != lockedBalanceEntry.VestingEndTimestampNanoSecs && + !lockedBalanceEntry.isDeleted { + lockedBalanceEntries = append(lockedBalanceEntries, lockedBalanceEntry) + } + + // If we've fetched more than we're permitted, we throw an error. + if len(lockedBalanceEntries) > limitToFetch { + return nil, errors.Wrap(RuleErrorCoinLockupViolatesVestingIntersectionLimit, + "GetLimitedVestedLockedBalanceEntriesOverTimeInterval") + } + } + + return lockedBalanceEntries, nil +} + +func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + hodlerPKID *PKID, + profilePKID *PKID, + unlockTimestampNanoSecs int64, + vestingEndTimestampNanoSecs int64, +) ( + _lockedBalanceEntry *LockedBalanceEntry, + _err error, +) { // Create a key associated with the LockedBalanceEntry. lockedBalanceEntryKey := (&LockedBalanceEntry{ - HODLerPKID: hodlerPKID, - ProfilePKID: profilePKID, - UnlockTimestampNanoSecs: unlockTimestampNanoSecs, + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: unlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: vestingEndTimestampNanoSecs, }).ToMapKey() // Check if the key exists in the view. @@ -149,11 +358,12 @@ func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestam } // No mapping exists in the view, check for an entry in the DB. - lockedBalanceEntry, err := DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - bav.Handle, bav.Snapshot, hodlerPKID, profilePKID, unlockTimestampNanoSecs) + lockedBalanceEntry, err := + DBGetLockedBalanceEntryForLockedBalanceEntryKey(bav.Handle, bav.Snapshot, &lockedBalanceEntryKey) if err != nil { return nil, - errors.Wrap(err, "GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs") + errors.Wrap(err, + "GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs") } // Cache the DB entry in the in-memory map. @@ -168,22 +378,35 @@ func (bav *UtxoView) GetUnlockableLockedBalanceEntries( hodlerPKID *PKID, profilePKID *PKID, currentTimestampNanoSecs int64, -) ([]*LockedBalanceEntry, error) { +) ( + _unvestedUnlockabeLockedBalanceEntries []*LockedBalanceEntry, + _vestedUnlockableLockedEntries []*LockedBalanceEntry, + _err error, +) { // Validate inputs. if hodlerPKID == nil { - return nil, errors.New("UtxoView.GetUnlockableLockedBalanceEntries: nil hodlerPKID provided as input") + return nil, nil, + errors.New("UtxoView.GetUnlockableLockedBalanceEntries: nil hodlerPKID provided as input") } if profilePKID == nil { - return nil, errors.New("UtxoView.GetUnlockableLockedBalanceEntries: nil profilePKID provided as input") + return nil, nil, + errors.New("UtxoView.GetUnlockableLockedBalanceEntries: nil profilePKID provided as input") } // First, pull unlockable LockedBalanceEntries from the db and cache them in the UtxoView. - dbUnlockableLockedBalanceEntries, err := DBGetUnlockableLockedBalanceEntries( - bav.Handle, bav.Snapshot, hodlerPKID, profilePKID, currentTimestampNanoSecs) + dbUnvestedUnlockableLockedBalanceEntries, dbVestedUnlockableLockedBalanceEntries, err := + DBGetUnlockableLockedBalanceEntries(bav.Handle, bav.Snapshot, hodlerPKID, profilePKID, currentTimestampNanoSecs) if err != nil { - return nil, errors.Wrap(err, "UtxoView.GetUnlockableLockedBalanceEntries") + return nil, nil, + errors.Wrap(err, "UtxoView.GetUnlockableLockedBalanceEntries") + } + for _, lockedBalanceEntry := range dbUnvestedUnlockableLockedBalanceEntries { + // Cache results in the UtxoView. + if _, exists := bav.LockedBalanceEntryKeyToLockedBalanceEntry[lockedBalanceEntry.ToMapKey()]; !exists { + bav._setLockedBalanceEntry(lockedBalanceEntry) + } } - for _, lockedBalanceEntry := range dbUnlockableLockedBalanceEntries { + for _, lockedBalanceEntry := range dbVestedUnlockableLockedBalanceEntries { // Cache results in the UtxoView. if _, exists := bav.LockedBalanceEntryKeyToLockedBalanceEntry[lockedBalanceEntry.ToMapKey()]; !exists { bav._setLockedBalanceEntry(lockedBalanceEntry) @@ -191,25 +414,34 @@ func (bav *UtxoView) GetUnlockableLockedBalanceEntries( } // Then, pull unlockable LockedBalanceEntries from the UtxoView. - var unlockableLockedBalanceEntries []*LockedBalanceEntry + var unlockableUnvestedLockedBalanceEntries []*LockedBalanceEntry + var unlockableVestedLockedBalanceEntries []*LockedBalanceEntry for _, lockedBalanceEntry := range bav.LockedBalanceEntryKeyToLockedBalanceEntry { // Filter to matching LockedBalanceEntries. if !lockedBalanceEntry.HODLerPKID.Eq(hodlerPKID) || !lockedBalanceEntry.ProfilePKID.Eq(profilePKID) || - lockedBalanceEntry.UnlockTimestampNanoSecs > currentTimestampNanoSecs || + lockedBalanceEntry.UnlockTimestampNanoSecs >= currentTimestampNanoSecs || lockedBalanceEntry.BalanceBaseUnits.IsZero() || lockedBalanceEntry.isDeleted { continue } - unlockableLockedBalanceEntries = append(unlockableLockedBalanceEntries, lockedBalanceEntry) + if lockedBalanceEntry.UnlockTimestampNanoSecs == lockedBalanceEntry.VestingEndTimestampNanoSecs { + unlockableUnvestedLockedBalanceEntries = append(unlockableUnvestedLockedBalanceEntries, lockedBalanceEntry) + } else { + unlockableVestedLockedBalanceEntries = append(unlockableVestedLockedBalanceEntries, lockedBalanceEntry) + } } // Sort UnlockableLockedBalanceEntries by timestamp ASC. - sort.Slice(unlockableLockedBalanceEntries, func(ii, jj int) bool { - return unlockableLockedBalanceEntries[ii].UnlockTimestampNanoSecs < - unlockableLockedBalanceEntries[jj].UnlockTimestampNanoSecs + sort.Slice(unlockableUnvestedLockedBalanceEntries, func(ii, jj int) bool { + return unlockableUnvestedLockedBalanceEntries[ii].UnlockTimestampNanoSecs < + unlockableUnvestedLockedBalanceEntries[jj].UnlockTimestampNanoSecs + }) + sort.Slice(unlockableVestedLockedBalanceEntries, func(ii, jj int) bool { + return unlockableVestedLockedBalanceEntries[ii].UnlockTimestampNanoSecs < + unlockableVestedLockedBalanceEntries[jj].UnlockTimestampNanoSecs }) - return unlockableLockedBalanceEntries, nil + return unlockableUnvestedLockedBalanceEntries, unlockableVestedLockedBalanceEntries, nil } // @@ -432,9 +664,11 @@ func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration // type CoinLockupMetadata struct { - ProfilePublicKey *PublicKey - UnlockTimestampNanoSecs int64 - LockupAmountBaseUnits *uint256.Int + ProfilePublicKey *PublicKey + RecipientPublicKey *PublicKey + UnlockTimestampNanoSecs int64 + VestingEndTimestampNanoSecs int64 + LockupAmountBaseUnits *uint256.Int } func (txnData *CoinLockupMetadata) GetTxnType() TxnType { @@ -444,7 +678,9 @@ func (txnData *CoinLockupMetadata) GetTxnType() TxnType { func (txnData *CoinLockupMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte data = append(data, EncodeByteArray(txnData.ProfilePublicKey.ToBytes())...) + data = append(data, EncodeByteArray(txnData.RecipientPublicKey.ToBytes())...) data = append(data, IntToBuf(txnData.UnlockTimestampNanoSecs)...) + data = append(data, IntToBuf(txnData.VestingEndTimestampNanoSecs)...) data = append(data, VariableEncodeUint256(txnData.LockupAmountBaseUnits)...) return data, nil } @@ -459,12 +695,26 @@ func (txnData *CoinLockupMetadata) FromBytes(data []byte) error { } txnData.ProfilePublicKey = NewPublicKey(profilePublicKeyBytes) + // RecipientPublicKey + recipientPublicKey, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrap(err, "CoinLockupMetadata.FromBytes: Problem reading RecipientPublicKey") + } + txnData.RecipientPublicKey = NewPublicKey(recipientPublicKey) + // UnlockTimestampNanoSecs txnData.UnlockTimestampNanoSecs, err = ReadVarint(rr) if err != nil { return errors.Wrap(err, "CoinLockupMetadata.FromBytes: Problem reading UnlockTimestampNanoSecs") } + // VestingEndTimestampNanoSecs + txnData.VestingEndTimestampNanoSecs, err = ReadVarint(rr) + if err != nil { + return errors.Wrap(err, + "CoinLockupMetadata.FromBytes: Problem reading VestingEndTimestampNanoSecs") + } + // LockupAmountBaseUnits txnData.LockupAmountBaseUnits, err = VariableDecodeUint256(rr) if err != nil { @@ -739,16 +989,32 @@ func (bav *UtxoView) _connectCoinLockup( errors.Wrap(RuleErrorCoinLockupInvalidLockupDuration, "_connectCoinLockup") } - // Compute the lockup duration in nanoseconds. - lockupDurationNanoSeconds := txMeta.UnlockTimestampNanoSecs - blockTimestampNanoSecs + // Validate the vesting end timestamp as expiring at a logically valid time. + if txMeta.VestingEndTimestampNanoSecs < txMeta.UnlockTimestampNanoSecs { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupInvalidVestingEndTimestamp, "_connectCoinLockup") + } - // Determine the hodler PKID to use. - transactorPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) - if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { + // In the vested case, validate that the underlying profile is the transactor. + if txMeta.VestingEndTimestampNanoSecs > txMeta.UnlockTimestampNanoSecs && + !reflect.DeepEqual(txn.PublicKey, txMeta.ProfilePublicKey) { return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupInvalidTransactorPKID, "_connectCoinLockup") + errors.Wrapf(RuleErrorCoinLockupInvalidVestedTransactor, "_connectCoinLockup: Profile "+ + "pub key: %v, signer public key: %v", PkToString(txn.PublicKey, bav.Params), + PkToString(txn.PublicKey, bav.Params)) } - hodlerPKID := transactorPKIDEntry.PKID + + // Determine the recipient PKID to use. + if len(txMeta.RecipientPublicKey) != btcec.PubKeyBytesLenCompressed { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupInvalidRecipientPubKey, "_connectCoinLockup") + } + recipientPKIDEntry := bav.GetPKIDForPublicKey(txMeta.RecipientPublicKey.ToBytes()) + if recipientPKIDEntry == nil || recipientPKIDEntry.isDeleted { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupInvalidRecipientPKID, "_connectCoinLockup") + } + hodlerPKID := recipientPKIDEntry.PKID // Validate the transactor as having sufficient DAO Coin or DESO balance for the transaction. var transactorBalanceNanos256 *uint256.Int @@ -818,102 +1084,311 @@ func (bav *UtxoView) _connectCoinLockup( bav._setProfileEntryMappings(profileEntry) } - // By now we know the transaction to be valid. We now source yield information from either - // the profile's yield curve or the raw DeSo yield curve. Because there's some choice in how - // to determine the yield when the lockup duration falls between two profile-specified yield curve - // points, we return here the two local points and choose/interpolate between them below. - leftYieldCurvePoint, rightYieldCurvePoint, err := - bav.GetLocalYieldCurvePoints(profilePKID, lockupDurationNanoSeconds) - if err != nil { - return 0, 0, nil, - errors.Wrap(err, "_connectCoinLockup failed to fetch yield curve points") - } + // If this is an unvested lockup, compute any accrued yield. + // In the vested lockup case, the yield earned is always zero. + yieldFromTxn := uint256.NewInt() + if txMeta.UnlockTimestampNanoSecs == txMeta.VestingEndTimestampNanoSecs { + // Compute the lockup duration in nanoseconds. + lockupDurationNanoSeconds := txMeta.UnlockTimestampNanoSecs - blockTimestampNanoSecs - // Here we interpolate (choose) the yield between the two returned local yield curve points. - // - // If we fall between two points, we choose the left yield curve point (i.e. the one with lesser lockup duration). - // The transactor earns yield only for the lockup duration specified by the left yield curve point but will - // be unable to unlock the coins until the transaction specified lockup duration expires. - txnYieldBasisPoints := uint64(0) - txnYieldEarningDurationNanoSecs := int64(0) - if leftYieldCurvePoint != nil && - leftYieldCurvePoint.LockupDurationNanoSecs < lockupDurationNanoSeconds { - txnYieldBasisPoints = leftYieldCurvePoint.LockupYieldAPYBasisPoints - txnYieldEarningDurationNanoSecs = leftYieldCurvePoint.LockupDurationNanoSecs - } - if rightYieldCurvePoint != nil && - rightYieldCurvePoint.LockupDurationNanoSecs == lockupDurationNanoSeconds { - txnYieldBasisPoints = rightYieldCurvePoint.LockupYieldAPYBasisPoints - txnYieldEarningDurationNanoSecs = rightYieldCurvePoint.LockupDurationNanoSecs - } - - // Convert variables to a consistent uint256 representation. This is to use them in SafeUint256 math. - txnYieldBasisPoints256 := uint256.NewInt().SetUint64(txnYieldBasisPoints) - txnYieldEarningDurationNanoSecs256 := uint256.NewInt().SetUint64(uint64(txnYieldEarningDurationNanoSecs)) - - // Compute the yield associated with this operation, checking to ensure there's no overflow. - yieldFromTxn, err := - CalculateLockupYield(txMeta.LockupAmountBaseUnits, txnYieldBasisPoints256, txnYieldEarningDurationNanoSecs256) - if err != nil { - return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") + // By now we know the transaction to be valid. We now source yield information from either + // the profile's yield curve or the raw DeSo yield curve. Because there's some choice in how + // to determine the yield when the lockup duration falls between two profile-specified yield curve + // points, we return here the two local points and choose/interpolate between them below. + leftYieldCurvePoint, rightYieldCurvePoint, err := + bav.GetLocalYieldCurvePoints(profilePKID, lockupDurationNanoSeconds) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinLockup failed to fetch yield curve points") + } + + // Here we interpolate (choose) the yield between the two returned local yield curve points. + // + // If we fall between two points, we choose the left yield curve point (i.e. the one with lesser lockup duration). + // The transactor earns yield only for the lockup duration specified by the left yield curve point but will + // be unable to unlock the coins until the transaction specified lockup duration expires. + txnYieldBasisPoints := uint64(0) + txnYieldEarningDurationNanoSecs := int64(0) + if leftYieldCurvePoint != nil && + leftYieldCurvePoint.LockupDurationNanoSecs < lockupDurationNanoSeconds { + txnYieldBasisPoints = leftYieldCurvePoint.LockupYieldAPYBasisPoints + txnYieldEarningDurationNanoSecs = leftYieldCurvePoint.LockupDurationNanoSecs + } + if rightYieldCurvePoint != nil && + rightYieldCurvePoint.LockupDurationNanoSecs == lockupDurationNanoSeconds { + txnYieldBasisPoints = rightYieldCurvePoint.LockupYieldAPYBasisPoints + txnYieldEarningDurationNanoSecs = rightYieldCurvePoint.LockupDurationNanoSecs + } + + // Convert variables to a consistent uint256 representation. This is to use them in SafeUint256 math. + txnYieldBasisPoints256 := uint256.NewInt().SetUint64(txnYieldBasisPoints) + txnYieldEarningDurationNanoSecs256 := uint256.NewInt().SetUint64(uint64(txnYieldEarningDurationNanoSecs)) + + // Compute the yield associated with this operation, checking to ensure there's no overflow. + yieldFromTxn, err = + CalculateLockupYield( + txMeta.LockupAmountBaseUnits, txnYieldBasisPoints256, txnYieldEarningDurationNanoSecs256) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") + } } - // Compute the amount awarded to the HODLer on unlock. + // Compute the total amount to be locked up in this transaction. lockupValue, err := SafeUint256().Add(txMeta.LockupAmountBaseUnits, yieldFromTxn) if err != nil { return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup: lockupValue") } - // For consolidation, we fetch equivalent LockedBalanceEntries. - // An equivalent LockedBalanceEntry has the same unlock timestamp and the same profile PKID. - lockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - hodlerPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) - if err != nil { - return 0, 0, nil, - errors.Wrap(err, "_connectCoinLockup failed to fetch lockedBalanceEntry") - } - if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { - lockedBalanceEntry = &LockedBalanceEntry{ - HODLerPKID: hodlerPKID, - ProfilePKID: profilePKID, - UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + // Now we must consolidate the locked balance entry along with those already present + // in the UtxoView and the DB. + // In the unvested case this is simple as we only look for an existing locked balance entry + // with the same unlock time, store the previous locked balance entry, and set the new locked balance entry + // in the view. An equivalent LockedBalanceEntry has the same unlock timestamp and the same profile PKID. + // In the vested case we must make careful modifications to the existing locked balance entry/entries. + var previousLockedBalanceEntry *LockedBalanceEntry + var previousLockedBalanceEntries []*LockedBalanceEntry + if txMeta.UnlockTimestampNanoSecs == txMeta.VestingEndTimestampNanoSecs { + // Unvested consolidation case: + + // (1) Check for a locked balance entry with the same unlock time + lockedBalanceEntry, err := bav.GetLockedBalanceEntryForLockedBalanceEntryKey( + &LockedBalanceEntryKey{ + HODLerPKID: *hodlerPKID, + ProfilePKID: *profilePKID, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: txMeta.VestingEndTimestampNanoSecs, + }) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinLockup failed to fetch unvested lockedBalanceEntry") + } + if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { + lockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: txMeta.VestingEndTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), + } } - } - previousLockedBalanceEntry := *lockedBalanceEntry - // Attempt to add the value of this lockup transaction to the locked balance entry. - newLockedBalanceEntryBalance, err := SafeUint256().Add(&lockedBalanceEntry.BalanceBaseUnits, lockupValue) - if err != nil { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry, - "_connectCoinLockup: New Locked Balance Entry Balance") - } + // (2) Store the previous locked balance entry + previousLockedBalanceEntry = lockedBalanceEntry.Copy() - // Ensure in the case of DESO the resulting locked amount is less than 2**64. - if profilePKID.IsZeroPKID() && !newLockedBalanceEntryBalance.IsUint64() { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup: "+ - "New DESO Locked Balance Entry Balance") - } + // (3) Check for consolidation overflow + newLockedBalanceEntryBalance, err := SafeUint256().Add(&lockedBalanceEntry.BalanceBaseUnits, lockupValue) + if err != nil { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry, + "_connectCoinLockup: New Locked Balance Entry Balance") + } + // Ensure in the case of DESO the resulting locked amount is less than 2**64. + if profilePKID.IsZeroPKID() && !newLockedBalanceEntryBalance.IsUint64() { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup: "+ + "New DESO Locked Balance Entry Balance") + } + + // (4) Set the new locked balance entry in the view + lockedBalanceEntry.BalanceBaseUnits = *newLockedBalanceEntryBalance + bav._setLockedBalanceEntry(lockedBalanceEntry) + + // NOTE: While we could check for "global" overflow here, we let this occur on the unlock transaction instead. + // Global overflow is where the yield causes fields like CoinEntry.CoinsInCirculationNanos to overflow. + // Performing the check here would be redundant and may lead to worse UX in the case of coins being + // burned in the future making current lockups no longer an overflow. Checking here would also + // create a DoS attack vector where a malicious entity takes out an extremely long-dated lockup + // with the sole intent of saturating the CoinsInCirculationNanos field preventing others from locking up. + } else { + // Vested consolidation case: + + // (1) Check for overlapping locked balance entries + lockedBalanceEntries, err := bav.GetLimitedVestedLockedBalanceEntriesOverTimeInterval( + hodlerPKID, profilePKID, txMeta.UnlockTimestampNanoSecs, txMeta.VestingEndTimestampNanoSecs, + bav.GlobalParamsEntry.MaximumVestedIntersectionsPerLockupTransaction) + if err != nil && errors.Is(err, RuleErrorCoinLockupViolatesVestingIntersectionLimit) { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupViolatesVestingIntersectionLimit, "_connectCoinLockup") + } + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinLockup failed to fetch vested locked balance entries") + } - // NOTE: While we could check for "global" overflow here, we let this occur on the unlock transaction instead. - // Global overflow is where the yield causes fields like CoinEntry.CoinsInCirculationNanos to overflow. - // Performing the check here would be redundant and may lead to worse UX in the case of coins being - // burned in the future making current lockups no longer an overflow. Checking here would also - // create a DoS attack vector where a malicious entity takes out an extremely long-dated lockup - // with the sole intent of saturating the CoinsInCirculationNanos field preventing others from locking up. + // (2a) Store the previous locked balance entries + for _, lockedBalanceEntry := range lockedBalanceEntries { + previousLockedBalanceEntries = append(previousLockedBalanceEntries, lockedBalanceEntry) + } + + // (2b) Delete the previous locked balance entries in the view + // See the comment on LockedBalanceEntryKey to understand why. + for _, lockedBalanceEntry := range lockedBalanceEntries { + bav._deleteLockedBalanceEntry(lockedBalanceEntry) + } - // Update the lockedBalanceEntry and update the view. - lockedBalanceEntry.BalanceBaseUnits = *newLockedBalanceEntryBalance - bav._setLockedBalanceEntry(lockedBalanceEntry) + // (3) Consolidate vested locked balance entries + + // (3a) First check if there's no existing vested locked balance entries, this is the no-consolidation case + if len(lockedBalanceEntries) == 0 { + bav._setLockedBalanceEntry(&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: txMeta.VestingEndTimestampNanoSecs, + BalanceBaseUnits: *lockupValue, + }) + } + + // (3b) Go through each existing locked balance entry and consolidate + if len(lockedBalanceEntries) > 0 { + // Construct a "proposed" locked balance entry from the transaction's metadata. + proposedLockedBalanceEntry := &LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: txMeta.VestingEndTimestampNanoSecs, + BalanceBaseUnits: *lockupValue, + } + + for ii, lockedBalanceEntry := range lockedBalanceEntries { + // (3b-i) Determine if there is left overhang by either the existing or the proposed locked balance entry + // e.g. UnlockTimestampNanoSecs --------------------- VestingEndTimestampNanoSecs + // UnlockTimestampNanoSecs --------------------- VestingEndTimestampNanoSecs + // ^ ^ + // left overhang + // We will break any overhang off into its own separate locked balance entry. + + // Check for left overhang by the existing locked balance entry + if lockedBalanceEntry.UnlockTimestampNanoSecs < proposedLockedBalanceEntry.UnlockTimestampNanoSecs { + splitLockedBalanceEntry, remainingLockedBalanceEntry, err := SplitVestedLockedBalanceEntry( + proposedLockedBalanceEntry, + lockedBalanceEntry.UnlockTimestampNanoSecs, + proposedLockedBalanceEntry.UnlockTimestampNanoSecs) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinLockup failed to compute vested split") + } + + // NOTE: While it may seem as though we need to check for a conflicting vested + // locked balance entry here, by design we only ever have one vested locked + // balance entry across any given time interval thus by splitting the locked + // balance entry in half it's impossible to intersect an existing + // vested locked balance entry. + bav._setLockedBalanceEntry(splitLockedBalanceEntry) + proposedLockedBalanceEntry = remainingLockedBalanceEntry + } + + // Check for left overhang by the proposed locked balance entry + if proposedLockedBalanceEntry.UnlockTimestampNanoSecs < lockedBalanceEntry.UnlockTimestampNanoSecs { + splitLockedBalanceEntry, remainingLockedBalanceEntry, err := SplitVestedLockedBalanceEntry( + proposedLockedBalanceEntry, + proposedLockedBalanceEntry.UnlockTimestampNanoSecs, + lockedBalanceEntry.UnlockTimestampNanoSecs) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinLockup failed to compute vested split") + } + + bav._setLockedBalanceEntry(splitLockedBalanceEntry) + proposedLockedBalanceEntry = remainingLockedBalanceEntry + } + + // (3b-ii) Determine if there is right overhang by either the existing or proposed locked balance entry + // e.g. UnlockTimestampNanoSecs --------------------- VestingEndTimestampNanoSecs + // UnlockTimestampNanoSecs --------------------- VestingEndTimestampNanoSecs + // ^ ^ + // right overhang + // We will break any overhang off into its own separate locked balance entry. + + // Check for right overhang by the existing locked balance entry + if lockedBalanceEntry.VestingEndTimestampNanoSecs > + proposedLockedBalanceEntry.VestingEndTimestampNanoSecs { + splitLockedBalanceEntry, remainingLockedBalanceEntry, err := SplitVestedLockedBalanceEntry( + proposedLockedBalanceEntry, + proposedLockedBalanceEntry.VestingEndTimestampNanoSecs, + lockedBalanceEntry.VestingEndTimestampNanoSecs) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinLockup failed to compute vested split") + } + + bav._setLockedBalanceEntry(splitLockedBalanceEntry) + proposedLockedBalanceEntry = remainingLockedBalanceEntry + + // (3b-iii) On the final iteration, consolidate the remaining portions + // e.g. UnlockTimestampNanoSecs --------------------- VestingEndTimestampNanoSecs + // UnlockTimestampNanoSecs --------------------- VestingEndTimestampNanoSecs + // We know them to be aligned at this step as the previous step truncated them, however + // we will perform a sanity check just to be safe. + if ii == len(lockedBalanceEntries)-1 && + (lockedBalanceEntry.UnlockTimestampNanoSecs != + proposedLockedBalanceEntry.UnlockTimestampNanoSecs) || + (lockedBalanceEntry.VestingEndTimestampNanoSecs != + proposedLockedBalanceEntry.VestingEndTimestampNanoSecs) { + return 0, 0, nil, + errors.New("_connectCoinLockup found mismatched unlock and vesting end " + + "timestamps; this shouldn't happen") + } + if ii == len(lockedBalanceEntries)-1 { + finalConsolidatedBalance, err := SafeUint256().Add( + &proposedLockedBalanceEntry.BalanceBaseUnits, &lockedBalanceEntry.BalanceBaseUnits) + if err != nil { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry, + "_connectCoinLockup") + } + proposedLockedBalanceEntry.BalanceBaseUnits = *finalConsolidatedBalance + bav._setLockedBalanceEntry(proposedLockedBalanceEntry) + } + } + + // Check for right overhang by the proposed locked balance entry + if proposedLockedBalanceEntry.VestingEndTimestampNanoSecs > + lockedBalanceEntry.VestingEndTimestampNanoSecs { + // We check if there's another locked balance entry sometime in the future. + splitTimestampEnd := lockedBalanceEntry.VestingEndTimestampNanoSecs + if ii != len(lockedBalanceEntries)-1 { + splitTimestampEnd = lockedBalanceEntries[ii+1].UnlockTimestampNanoSecs + } + splitLockedBalanceEntry, remainingLockedBalanceEntry, err := SplitVestedLockedBalanceEntry( + proposedLockedBalanceEntry, + lockedBalanceEntry.VestingEndTimestampNanoSecs, + splitTimestampEnd) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinLockup failed to compute vested split") + } + + // Consolidate the overlapping special case. + combinedBalanceBaseUnits, err := SafeUint256().Add( + &splitLockedBalanceEntry.BalanceBaseUnits, + &lockedBalanceEntry.BalanceBaseUnits) + if err != nil { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry, + "_connectCoinLockup") + } + splitLockedBalanceEntry.BalanceBaseUnits = *combinedBalanceBaseUnits + + bav._setLockedBalanceEntry(splitLockedBalanceEntry) + proposedLockedBalanceEntry = remainingLockedBalanceEntry + + // (3b-iii) On the final iteration, the remaining proposedLockedBalanceEntry + // is the only vesting schedule left. + if ii == len(lockedBalanceEntries)-1 { + bav._setLockedBalanceEntry(proposedLockedBalanceEntry) + } + } + } + } + } // Add a UtxoOperation for easy reversion during disconnect. utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ Type: OperationTypeCoinLockup, PrevTransactorBalanceEntry: prevTransactorBalanceEntry, - PrevLockedBalanceEntry: &previousLockedBalanceEntry, + PrevLockedBalanceEntry: previousLockedBalanceEntry, PrevCoinEntry: prevCoinEntry, }) @@ -921,6 +1396,153 @@ func (bav *UtxoView) _connectCoinLockup( return totalInput, totalOutput, utxoOpsForTxn, nil } +func SplitVestedLockedBalanceEntry( + lockedBalanceEntry *LockedBalanceEntry, + startSplitTimestampNanoSecs int64, + endSplitTimestampNanoSecs int64, +) ( + _splitLockedBalanceEntry *LockedBalanceEntry, + _remainingLockedBalanceEntry *LockedBalanceEntry, + _err error, +) { + // Sanity check to ensure the start timestamp is before the end timestamp. + if startSplitTimestampNanoSecs >= endSplitTimestampNanoSecs { + return nil, nil, + errors.New("SplitVestedLockedBalanceEntry: cannot use reversed split timestamps") + } + + // Check to ensure the start and end timestamps are within the bounds + if startSplitTimestampNanoSecs < lockedBalanceEntry.UnlockTimestampNanoSecs || + endSplitTimestampNanoSecs > lockedBalanceEntry.VestingEndTimestampNanoSecs { + return nil, nil, + errors.New("SplitVestedLockedBalanceEntry: split timestamps must be within bounds") + } + + // Check to ensure the split will not form three pieces. + if startSplitTimestampNanoSecs != lockedBalanceEntry.UnlockTimestampNanoSecs && + endSplitTimestampNanoSecs != lockedBalanceEntry.VestingEndTimestampNanoSecs { + return nil, nil, + errors.New("SplitVestedLockedBalanceEntry: split would create three pieces") + } + + // Check to ensure the split will not form one. + if startSplitTimestampNanoSecs == lockedBalanceEntry.UnlockTimestampNanoSecs && + endSplitTimestampNanoSecs == lockedBalanceEntry.VestingEndTimestampNanoSecs { + return nil, nil, + errors.New("SplitVestedLockedBalanceEntry: split would result in no-op") + } + + // Create a split locked balance entry. + splitLockedBalanceEntry := &LockedBalanceEntry{ + HODLerPKID: lockedBalanceEntry.HODLerPKID, + ProfilePKID: lockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: startSplitTimestampNanoSecs, + VestingEndTimestampNanoSecs: endSplitTimestampNanoSecs, + BalanceBaseUnits: uint256.Int{}, + } + + // Create the remaining locked balance entry. + var remainingLockedBalanceEntry *LockedBalanceEntry + if startSplitTimestampNanoSecs == lockedBalanceEntry.UnlockTimestampNanoSecs { + remainingLockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: lockedBalanceEntry.HODLerPKID, + ProfilePKID: lockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: startSplitTimestampNanoSecs + 1, + VestingEndTimestampNanoSecs: lockedBalanceEntry.VestingEndTimestampNanoSecs, + BalanceBaseUnits: uint256.Int{}, + } + } + if endSplitTimestampNanoSecs == lockedBalanceEntry.VestingEndTimestampNanoSecs { + remainingLockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: lockedBalanceEntry.HODLerPKID, + ProfilePKID: lockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: lockedBalanceEntry.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: endSplitTimestampNanoSecs - 1, + BalanceBaseUnits: uint256.Int{}, + } + } + + // Compute the balance in the split locked balance entry. + splitValue, err := CalculateLockupSplitValue( + lockedBalanceEntry, startSplitTimestampNanoSecs, endSplitTimestampNanoSecs) + if err != nil { + return nil, nil, + errors.Wrap(err, "SplitVestedLockedBalanceEntry failed to compute split value") + } + splitLockedBalanceEntry.BalanceBaseUnits = *splitValue + + // Compute the balance in the remaining locked balance entry. + remainingValue, err := SafeUint256().Sub(&lockedBalanceEntry.BalanceBaseUnits, splitValue) + if err != nil { + return nil, nil, + errors.Wrap(err, "SplitVestedLockedBalanceEntry failed to compute remaining value") + } + remainingLockedBalanceEntry.BalanceBaseUnits = *remainingValue + + // Sanity check the split does not print money. + if uint256.NewInt().Add( + &splitLockedBalanceEntry.BalanceBaseUnits, &remainingLockedBalanceEntry.BalanceBaseUnits). + Gt(&lockedBalanceEntry.BalanceBaseUnits) { + return nil, nil, + errors.New("SplitVestedLockedBalanceEntry: split would print tokens/DESO") + } + + // Sanity check the split does not result in empty locked balance entries. + if splitLockedBalanceEntry.BalanceBaseUnits.IsZero() || + remainingLockedBalanceEntry.BalanceBaseUnits.IsZero() { + return nil, nil, + errors.New("SplitVestedLockedBalanceEntry: split would result in empty locked balance entry") + } + + return splitLockedBalanceEntry, remainingLockedBalanceEntry, nil +} + +func CalculateLockupSplitValue( + lockedBalanceEntry *LockedBalanceEntry, + starTimestampNanoSecs int64, + endTimestampNanoSecs int64, +) ( + _splitValue *uint256.Int, + _err error, +) { + // Compute the time that passes over the interval [startTimestampNanoSecs, endTimestampNanoSecs] + numerator, err := SafeUint256().Sub( + uint256.NewInt().SetUint64(uint64(endTimestampNanoSecs)), + uint256.NewInt().SetUint64(uint64(starTimestampNanoSecs))) + if err != nil { + return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ + "(start timestamp - end timestamp) underflow") + } + + // Compute the time that passes over the duration of the locked balance entry + denominator, err := SafeUint256().Sub( + uint256.NewInt().SetUint64(uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs)), + uint256.NewInt().SetUint64(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))) + if err != nil { + return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ + "(lockedBalanceEntry.UnlockTimestamp - lockedBalanceEntry.VestingEndTimestamp) underflow") + } + + // Rather than creating a floating point for the fraction of time that passes, we keep + // everything as uint256 by multiplying the locked balance entry value by the numerator and + // dividing by the denominator. We know uint256 division to produce a quotient strictly less + // than the true infinite precision quotient. This approach is an extra layer of protection against + // money printer bugs. + numerator, err = SafeUint256().Mul(numerator, &lockedBalanceEntry.BalanceBaseUnits) + if err != nil { + return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ + "(start timestamp - end timestamp) * lockedBalanceEntry.Balance overflow") + } + splitValue, err := SafeUint256().Div(numerator, denominator) + if err != nil { + return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ + "((start timestamp - end timestamp) * lockedBalanceEntry.Balance overflow)) / "+ + "(lockedBalanceEntry.UnlockTimestamp - lockedBalanceEntry.VestingEndTimestamp) has zero denominator") + } + + return splitValue, nil +} + func CalculateLockupYield( principal *uint256.Int, apyYieldBasisPoints *uint256.Int, @@ -1005,18 +1627,22 @@ func (bav *UtxoView) _disconnectCoinLockup( } // Sanity check the data within the CoinLockup. Reverting a lockup should not result in more coins. - lockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - operationData.PrevLockedBalanceEntry.HODLerPKID, operationData.PrevLockedBalanceEntry.ProfilePKID, - operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs) + lockedBalanceEntry, err := + bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + operationData.PrevLockedBalanceEntry.HODLerPKID, + operationData.PrevLockedBalanceEntry.ProfilePKID, + operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, + operationData.PrevLockedBalanceEntry.VestingEndTimestampNanoSecs) if err != nil { return errors.Wrap(err, "_disconnectCoinLockup failed to fetch current lockedBalanceEntry") } if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { lockedBalanceEntry = &LockedBalanceEntry{ - HODLerPKID: operationData.PrevLockedBalanceEntry.HODLerPKID, - ProfilePKID: operationData.PrevLockedBalanceEntry.ProfilePKID, - UnlockTimestampNanoSecs: operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + HODLerPKID: operationData.PrevLockedBalanceEntry.HODLerPKID, + ProfilePKID: operationData.PrevLockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: operationData.PrevLockedBalanceEntry.VestingEndTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), } } if lockedBalanceEntry.BalanceBaseUnits.Lt(&operationData.PrevLockedBalanceEntry.BalanceBaseUnits) { @@ -1416,18 +2042,20 @@ func (bav *UtxoView) _connectCoinLockupTransfer( } // Fetch the sender's balance entries. - senderLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - senderPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) + senderLockedBalanceEntry, err := + bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + senderPKID, profilePKID, txMeta.UnlockTimestampNanoSecs, txMeta.UnlockTimestampNanoSecs) if err != nil { return 0, 0, nil, errors.Wrap(err, "connectCoinLockupTransfer failed to fetch senderLockedBalanceEntry:w") } if senderLockedBalanceEntry == nil || senderLockedBalanceEntry.isDeleted { senderLockedBalanceEntry = &LockedBalanceEntry{ - HODLerPKID: senderPKID, - ProfilePKID: profilePKID, - UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + HODLerPKID: senderPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), } } prevSenderLockedBalanceEntry := senderLockedBalanceEntry.Copy() @@ -1443,18 +2071,23 @@ func (bav *UtxoView) _connectCoinLockupTransfer( &senderLockedBalanceEntry.BalanceBaseUnits, txMeta.LockedCoinsToTransferBaseUnits) // Fetch the recipient's balance entry. - receiverLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - receiverPKID, profilePKID, txMeta.UnlockTimestampNanoSecs) + receiverLockedBalanceEntry, err := + bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + receiverPKID, + profilePKID, + txMeta.UnlockTimestampNanoSecs, + txMeta.UnlockTimestampNanoSecs) if err != nil { return 0, 0, nil, errors.Wrap(err, "connectCoinLockupTransfer failed to fetch receiverLockedBalanceEntry") } if receiverLockedBalanceEntry == nil || receiverLockedBalanceEntry.isDeleted { receiverLockedBalanceEntry = &LockedBalanceEntry{ - HODLerPKID: receiverPKID, - ProfilePKID: profilePKID, - UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + HODLerPKID: receiverPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), } } prevReceiverLockedBalanceEntry := receiverLockedBalanceEntry.Copy() @@ -1540,34 +2173,40 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( } // Fetch the LockedBalanceEntries in the view. - senderLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - operationData.PrevSenderLockedBalanceEntry.HODLerPKID, - operationData.PrevSenderLockedBalanceEntry.ProfilePKID, - operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs) + senderLockedBalanceEntry, err := + bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + operationData.PrevSenderLockedBalanceEntry.HODLerPKID, + operationData.PrevSenderLockedBalanceEntry.ProfilePKID, + operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs, + operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs) if err != nil { return errors.Wrap(err, "_disconnectCoinLockupTransfer failed to fetch senderLockedBalanceEntry") } if senderLockedBalanceEntry == nil || senderLockedBalanceEntry.isDeleted { senderLockedBalanceEntry = &LockedBalanceEntry{ - HODLerPKID: operationData.PrevSenderLockedBalanceEntry.HODLerPKID, - ProfilePKID: operationData.PrevSenderLockedBalanceEntry.ProfilePKID, - UnlockTimestampNanoSecs: operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + HODLerPKID: operationData.PrevSenderLockedBalanceEntry.HODLerPKID, + ProfilePKID: operationData.PrevSenderLockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: operationData.PrevSenderLockedBalanceEntry.VestingEndTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), } } - receiverLockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - operationData.PrevReceiverLockedBalanceEntry.HODLerPKID, - operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, - operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs) + receiverLockedBalanceEntry, err := + bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + operationData.PrevReceiverLockedBalanceEntry.HODLerPKID, + operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, + operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs, + operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs) if err != nil { return errors.Wrap(err, "_disconnectCoinLockupTransfer failed to fetch receiverLockedBalanceEntry") } if receiverLockedBalanceEntry == nil || receiverLockedBalanceEntry.isDeleted { receiverLockedBalanceEntry = &LockedBalanceEntry{ - HODLerPKID: operationData.PrevReceiverLockedBalanceEntry.HODLerPKID, - ProfilePKID: operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, - UnlockTimestampNanoSecs: operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + HODLerPKID: operationData.PrevReceiverLockedBalanceEntry.HODLerPKID, + ProfilePKID: operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: operationData.PrevReceiverLockedBalanceEntry.VestingEndTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), } } @@ -1666,20 +2305,22 @@ func (bav *UtxoView) _connectCoinUnlock( } // Retrieve unlockable locked balance entries. - unlockableLockedBalanceEntries, err := bav.GetUnlockableLockedBalanceEntries( - hodlerPKID, profilePKID, blockTimestampNanoSecs) + unvestedUnlockableLockedBalanceEntries, vestedUnlockableLockedBalanceEntries, err := + bav.GetUnlockableLockedBalanceEntries(hodlerPKID, profilePKID, blockTimestampNanoSecs) if err != nil { return 0, 0, nil, errors.Wrap(err, "_connectCoinUnlock") } - if len(unlockableLockedBalanceEntries) == 0 { - return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockNoUnlockableCoinsFound, - "_connectCoinUnlock") + if len(unvestedUnlockableLockedBalanceEntries) == 0 && len(vestedUnlockableLockedBalanceEntries) == 0 { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinUnlockNoUnlockableCoinsFound, "_connectCoinUnlock") } - // Unlock all unlockable locked balance entries. - var prevLockedBalanceEntries []*LockedBalanceEntry + // Create an unlockedBalance uint256 to track what will be given back to the user. unlockedBalance := uint256.NewInt() - for _, unlockableLockedBalanceEntry := range unlockableLockedBalanceEntries { + + // Unlock all unvested unlockable locked balance entries. + var prevLockedBalanceEntries []*LockedBalanceEntry + for _, unlockableLockedBalanceEntry := range unvestedUnlockableLockedBalanceEntries { unlockedBalance, err = SafeUint256().Add(unlockedBalance, &unlockableLockedBalanceEntry.BalanceBaseUnits) if err != nil { @@ -1695,6 +2336,44 @@ func (bav *UtxoView) _connectCoinUnlock( bav._deleteLockedBalanceEntry(unlockableLockedBalanceEntry) } + // Unlock all vested locked balance entries. + // NOTE: See the comment on LockedBalanceEntryKey for how we deal with modified vested locked balance entries. + var prevVestedLockedBalanceEntries []*LockedBalanceEntry + for _, unlockableLockedBalanceEntry := range vestedUnlockableLockedBalanceEntries { + // Depending on the time of unlock, compute how much from the balance can be unlocked. + amountToUnlock, err := CalculateVestedEarnings(unlockableLockedBalanceEntry, blockTimestampNanoSecs) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinUnlock: error computing vested earnings") + } + + // Add the unlocked amount and check for overflow. + unlockedBalance, err = + SafeUint256().Add(unlockedBalance, amountToUnlock) + if err != nil { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinUnlockUnlockableCoinsOverflow, "_connectCoinUnlock") + } + + // Append the original LockedBalanceEntry in the event we rollback the transaction. + prevVestedLockedBalanceEntries = append(prevVestedLockedBalanceEntries, unlockableLockedBalanceEntry.Copy()) + + // Depending on when the unlock occurs, we either DELETE or MODIFY the locked balance entry. + if blockTimestampNanoSecs >= unlockableLockedBalanceEntry.VestingEndTimestampNanoSecs { + bav._deleteLockedBalanceEntry(unlockableLockedBalanceEntry) + } else { + // DELETE the previous key. + bav._deleteLockedBalanceEntry(unlockableLockedBalanceEntry) + + // Create and modify a copy to prevent pointer reuse. + modifiedLockedBalanceEntry := unlockableLockedBalanceEntry.Copy() + modifiedLockedBalanceEntry.UnlockTimestampNanoSecs = blockTimestampNanoSecs + + // SET the modified key. + bav._setLockedBalanceEntry(modifiedLockedBalanceEntry) + } + } + // Credit the transactor with either DAO coins or DeSo for this unlock. var prevTransactorBalanceEntry *BalanceEntry var prevCoinEntry *CoinEntry @@ -1754,6 +2433,61 @@ func (bav *UtxoView) _connectCoinUnlock( return totalInput, totalOutput, utxoOpsForTxn, nil } +func CalculateVestedEarnings( + lockedBalanceEntry *LockedBalanceEntry, + blockTimestampNanoSecs int64, +) ( + _vestedEarnings *uint256.Int, + _err error, +) { + // Check if this lockup should not be unlocked right now. + if blockTimestampNanoSecs <= lockedBalanceEntry.UnlockTimestampNanoSecs { + return uint256.NewInt(), nil + } + + // Check if this lockup should be fully unlocked. + if blockTimestampNanoSecs >= lockedBalanceEntry.VestingEndTimestampNanoSecs { + return &lockedBalanceEntry.BalanceBaseUnits, nil + } + + // Here we know that: + // UnlockTimestampNanoSecs < blockTimestampNanoSecs < VestingEndTimestampNanoSecs + // Now we compute the fraction of time that's passed. + numerator := blockTimestampNanoSecs - lockedBalanceEntry.UnlockTimestampNanoSecs + denominator := lockedBalanceEntry.VestingEndTimestampNanoSecs - lockedBalanceEntry.UnlockTimestampNanoSecs + fractionEarned, err := SafeUint256().Div( + uint256.NewInt().SetUint64(uint64(numerator)), + uint256.NewInt().SetUint64(uint64(denominator))) + if err != nil { + return uint256.NewInt(), + errors.Wrap(err, "ComputeVestedEarnings failed to compute fraction of time elapsed") + } + + // Sanity check that fractionEarned <= 1. + if fractionEarned.Gt(uint256.NewInt().SetUint64(1)) { + return uint256.NewInt(), + errors.New("ComputeVestedEarnings: Found fractionEarned > 1; this shouldn't be possible") + } + + // Compute the vested earnings. + vestedEarnings, err := SafeUint256().Mul( + &lockedBalanceEntry.BalanceBaseUnits, + fractionEarned) + if err != nil { + return uint256.NewInt(), errors.Wrap(err, "ComputeVestedEarnings failed to compute vested earnings") + } + + // Sanity check that vestedEarnings < BalanceBaseUnits + if vestedEarnings.Gt(&lockedBalanceEntry.BalanceBaseUnits) || + vestedEarnings.Eq(&lockedBalanceEntry.BalanceBaseUnits) { + return uint256.NewInt(), + errors.New("ComputeVestedEarnings: " + + "vested earnings >= outstanding balance; this shouldn't be possible") + } + + return vestedEarnings, nil +} + func (bav *UtxoView) _disconnectCoinUnlock( operationType OperationType, currentTxn *MsgDeSoTxn, @@ -1793,19 +2527,22 @@ func (bav *UtxoView) _disconnectCoinUnlock( // Sanity check the data within the CoinUnlock. // Reverting an unlock of LockedBalanceEntry should not result in less coins. for _, prevLockedBalanceEntry := range operationData.PrevLockedBalanceEntries { - lockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - prevLockedBalanceEntry.HODLerPKID, - prevLockedBalanceEntry.ProfilePKID, - prevLockedBalanceEntry.UnlockTimestampNanoSecs) + lockedBalanceEntry, err := + bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + prevLockedBalanceEntry.HODLerPKID, + prevLockedBalanceEntry.ProfilePKID, + prevLockedBalanceEntry.UnlockTimestampNanoSecs, + prevLockedBalanceEntry.VestingEndTimestampNanoSecs) if err != nil { return errors.Wrap(err, "_disconnectCoinUnlock failed to fetch lockedBalanceEntry") } if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { lockedBalanceEntry = &LockedBalanceEntry{ - HODLerPKID: prevLockedBalanceEntry.HODLerPKID, - ProfilePKID: prevLockedBalanceEntry.ProfilePKID, - UnlockTimestampNanoSecs: prevLockedBalanceEntry.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + HODLerPKID: prevLockedBalanceEntry.HODLerPKID, + ProfilePKID: prevLockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: prevLockedBalanceEntry.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: prevLockedBalanceEntry.VestingEndTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), } } if prevLockedBalanceEntry.BalanceBaseUnits.Lt(&lockedBalanceEntry.BalanceBaseUnits) { diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 5cd6f3674..7b07e8ae3 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -62,11 +62,12 @@ func TestCoinLockupsForkHeight(t *testing.T) { } _, _, _, err1 := _coinLockupWithConnectTimestamp( - t, chain, db, params, - feeRateNanosPerKb, + t, chain, db, params, feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, + m0Pub, + 1000, 1000, uint256.NewInt().SetUint64(100), 0) @@ -231,14 +232,9 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { // (This should fail -- RuleErrorCoinLockupOfAmountZero) { _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - m0Pub, - 0, - uint256.NewInt(), - 0) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 0, 0, uint256.NewInt(), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupOfAmountZero) } @@ -246,14 +242,9 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { // (This should fail -- RuleErrorCoinLockupOnNonExistentProfile) { _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - m2Pub, - 0, - uint256.NewInt().SetUint64(1), - 0) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m2Pub, m0Pub, + 0, 0, uint256.NewInt().SetUint64(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupOnNonExistentProfile) } @@ -262,14 +253,9 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { // NOTE: This also checks that DESO lockups do not require an associated profile. { _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), - 0, - MaxUint256, - 0) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m0Pub, + 0, 0, MaxUint256, 0) require.Contains(t, err.Error(), RuleErrorCoinLockupExcessiveDeSoLockup) } @@ -277,14 +263,9 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { // (This should fail -- RuleErrorCoinLockupInvalidLockupDuration) { _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), - 0, - uint256.NewInt().SetUint64(1), - 0) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + m0Pub, 0, 0, uint256.NewInt().SetUint64(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) } @@ -292,14 +273,9 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { // (This should fail -- RuleErrorCoinLockupInvalidLockupDuration) { _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), - 0, - uint256.NewInt().SetUint64(1), - 1) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m0Pub, + 0, 0, uint256.NewInt().SetUint64(1), 1) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) } @@ -307,14 +283,9 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { // (This should fail -- RuleErrorCoinLockupInsufficientDeSo) { _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), - 1, - uint256.NewInt().SetUint64(1e10), - 0) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m0Pub, + 1, 1, uint256.NewInt().SetUint64(1e10), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupInsufficientDeSo) } @@ -322,14 +293,9 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { // (This should fail -- RuleErrorCoinLockupInsufficientCoins) { _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - m0Pub, - 1, - uint256.NewInt().SetUint64(1e10), - 0) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1, 1, uint256.NewInt().SetUint64(1e10), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupInsufficientCoins) } @@ -340,14 +306,9 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { // This should succeed :) { _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - m0Pub, - 1000, - uint256.NewInt().SetUint64(1000), - 0) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 1000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } } @@ -577,15 +538,9 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { // Lockup 1000 M0 coins. _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - m0Pub, - 1, - uint256.NewInt().SetUint64(1e6), - 0, - ) + testMeta, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1, 1, uint256.NewInt().SetUint64(1e6), 0) // Send 1000 locked M0 coins to M2. _coinLockupTransferWithTestMeta( @@ -728,14 +683,9 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { // Try and lockup MaxUint256 m2 coins and ensure CoinsInCirculation and NumberOfHolders decreases { _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m2Pub, - m2Priv, - m2Pub, - 1000, - MaxUint256, - 0) + testMeta, testMeta.feeRateNanosPerKb, + m2Pub, m2Priv, m2Pub, m2Pub, + 1000, 1000, MaxUint256, 0) // Ensure CoinsInCirculationNanos and NumberOfHolders are now zero utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) @@ -765,14 +715,9 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { // (This should fail -- RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry) { _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m2Pub, - m2Priv, - m2Pub, - 1000, - uint256.NewInt().SetUint64(1), - 0) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m2Pub, m2Priv, m2Pub, m2Pub, + 1000, 1000, uint256.NewInt().SetUint64(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry) } @@ -792,14 +737,9 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { ) _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m2Pub, - m2Priv, - m2Pub, - 365*24*60*60*1e9, - MaxUint256, - 0) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m2Pub, m2Priv, m2Pub, m2Pub, + 365*24*60*60*1e9, 365*24*60*60*1e9, MaxUint256, 0) require.Contains(t, err.Error(), RuleErrorCoinLockupCoinYieldOverflow) } @@ -821,14 +761,9 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { ) _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m2Pub, - m2Priv, - m2Pub, - 1, - MaxUint256, - 0) + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m2Pub, m2Priv, m2Pub, m2Pub, + 1, 1, MaxUint256, 0) require.Contains(t, err.Error(), RuleErrorCoinLockupYieldCausesOverflow) // Remove the yield curve point. @@ -862,15 +797,8 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { // Lockup MaxUint256 m2 tokens. _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m2Pub, - m2Priv, - m2Pub, - 1000, - MaxUint256, - 0, - ) + testMeta, testMeta.feeRateNanosPerKb, m2Pub, m2Priv, m2Pub, m2Pub, + 1000, 1000, MaxUint256, 0) // Try and perform another transfer. This should fail. _, _, _, err := _coinLockupTransfer( @@ -905,15 +833,9 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { // Lockup MaxUint256 m2 tokens at a different timestamp. _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m2Pub, - m2Priv, - m2Pub, - 1001, - MaxUint256, - 0, - ) + testMeta, testMeta.feeRateNanosPerKb, + m2Pub, m2Priv, m2Pub, m2Pub, + 1001, 1001, MaxUint256, 0) // Try and unlock all locked balance entries simultaneously. // This should cause an overflow. @@ -1022,22 +944,23 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1PkBytes, m1PkBytes, true) _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - m1Pub, + testMeta, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 365*24*60*60*1e9+365*12*60*60*1e9, 365*24*60*60*1e9+365*12*60*60*1e9, uint256.NewInt().SetUint64(10000), - 365*24*60*60*1e9, - ) + 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10000 base units. utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, m1PKID, 365*24*60*60*1e9+365*12*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + m1PKID, + 365*24*60*60*1e9+365*12*60*60*1e9, + 365*24*60*60*1e9+365*12*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(10000), lockedBalanceEntry.BalanceBaseUnits) @@ -1066,22 +989,23 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1PkBytes, m1PkBytes, true) _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - m1Pub, + testMeta, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, uint256.NewInt().SetUint64(10000), - 365*24*60*60*1e9, - ) + 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10500 base units. utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, m1PKID, 2*365*24*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + m1PKID, + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(10500), lockedBalanceEntry.BalanceBaseUnits) @@ -1100,15 +1024,12 @@ func TestLockupStandardProfileFlows(t *testing.T) { // NOTE: This is testing the interpolation algorithm for lockups in the middle of two yield curve points. { _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - m1Pub, + testMeta, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 2*365*24*60*60*1e9+365*12*60*60*1e9, 2*365*24*60*60*1e9+365*12*60*60*1e9, uint256.NewInt().SetUint64(10000), - 365*24*60*60*1e9, - ) + 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10500 base units. utxoView, err := NewUtxoView( @@ -1116,8 +1037,12 @@ func TestLockupStandardProfileFlows(t *testing.T) { require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, m1PKID, 2*365*24*60*60*1e9+365*12*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + m1PKID, + 2*365*24*60*60*1e9+365*12*60*60*1e9, + 2*365*24*60*60*1e9+365*12*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(10500), lockedBalanceEntry.BalanceBaseUnits) } @@ -1127,15 +1052,12 @@ func TestLockupStandardProfileFlows(t *testing.T) { // We expect this to create a locked balance entry with 12000 base units locked inside. { _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - m1Pub, + testMeta, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 3*365*24*60*60*1e9, 3*365*24*60*60*1e9, uint256.NewInt().SetUint64(10000), - 365*24*60*60*1e9, - ) + 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 12000 base units. utxoView, err := NewUtxoView( @@ -1143,8 +1065,12 @@ func TestLockupStandardProfileFlows(t *testing.T) { require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, m1PKID, 3*365*24*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + m1PKID, + 3*365*24*60*60*1e9, + 3*365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(12000), lockedBalanceEntry.BalanceBaseUnits) } @@ -1188,8 +1114,12 @@ func TestLockupStandardProfileFlows(t *testing.T) { require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, m1PKID, 2*365*24*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + m1PKID, + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(9000), lockedBalanceEntry.BalanceBaseUnits) } @@ -1291,15 +1221,12 @@ func TestLockupStandardDeSoFlows(t *testing.T) { require.NoError(t, err) _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + testMeta, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m1Pub, + 365*24*60*60*1e9+365*12*60*60*1e9, 365*24*60*60*1e9+365*12*60*60*1e9, uint256.NewInt().SetUint64(500), - 365*24*60*60*1e9, - ) + 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 525 base units. utxoView, err = NewUtxoView( @@ -1307,8 +1234,12 @@ func TestLockupStandardDeSoFlows(t *testing.T) { require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, ZeroPKID.NewPKID(), 365*24*60*60*1e9+365*12*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + ZeroPKID.NewPKID(), + 365*24*60*60*1e9+365*12*60*60*1e9, + 365*24*60*60*1e9+365*12*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) @@ -1331,15 +1262,12 @@ func TestLockupStandardDeSoFlows(t *testing.T) { require.NoError(t, err) _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + testMeta, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m1Pub, + 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, uint256.NewInt().SetUint64(500), - 365*24*60*60*1e9, - ) + 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 525 base units. utxoView, err = NewUtxoView( @@ -1347,8 +1275,12 @@ func TestLockupStandardDeSoFlows(t *testing.T) { require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, ZeroPKID.NewPKID(), 2*365*24*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + ZeroPKID.NewPKID(), + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(525), lockedBalanceEntry.BalanceBaseUnits) @@ -1371,15 +1303,12 @@ func TestLockupStandardDeSoFlows(t *testing.T) { require.NoError(t, err) _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + testMeta, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m1Pub, + 2*365*24*60*60*1e9+365*12*60*60*1e9, 2*365*24*60*60*1e9+365*12*60*60*1e9, uint256.NewInt().SetUint64(500), - 365*24*60*60*1e9, - ) + 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 525 base units. utxoView, err = NewUtxoView( @@ -1387,8 +1316,12 @@ func TestLockupStandardDeSoFlows(t *testing.T) { require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, ZeroPKID.NewPKID(), 2*365*24*60*60*1e9+365*12*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + ZeroPKID.NewPKID(), + 2*365*24*60*60*1e9+365*12*60*60*1e9, + 2*365*24*60*60*1e9+365*12*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(525), lockedBalanceEntry.BalanceBaseUnits) @@ -1411,15 +1344,12 @@ func TestLockupStandardDeSoFlows(t *testing.T) { require.NoError(t, err) _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + testMeta, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m1Pub, + 3*365*24*60*60*1e9, 3*365*24*60*60*1e9, uint256.NewInt().SetUint64(500), - 365*24*60*60*1e9, - ) + 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 525 base units. utxoView, err = NewUtxoView( @@ -1427,8 +1357,12 @@ func TestLockupStandardDeSoFlows(t *testing.T) { require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, ZeroPKID.NewPKID(), 3*365*24*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + ZeroPKID.NewPKID(), + 3*365*24*60*60*1e9, + 3*365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(600), lockedBalanceEntry.BalanceBaseUnits) @@ -1480,8 +1414,12 @@ func TestLockupStandardDeSoFlows(t *testing.T) { require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, ZeroPKID.NewPKID(), 2*365*24*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + ZeroPKID.NewPKID(), + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.True(t, lockedBalanceEntry == nil) // Check that m1's DESO balance has increased by less than 1025 nDESO (some extra was spent on fees). @@ -1572,7 +1510,9 @@ func TestLockupWithDerivedKey(t *testing.T) { txn, _, _, _, err = testMeta.chain.CreateCoinLockupTxn( transactorPkBytes, txMeta.ProfilePublicKey.ToBytes(), + txMeta.RecipientPublicKey.ToBytes(), txMeta.UnlockTimestampNanoSecs, + txMeta.VestingEndTimestampNanoSecs, txMeta.LockupAmountBaseUnits, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) @@ -1718,9 +1658,11 @@ func TestLockupWithDerivedKey(t *testing.T) { // Have m0 try and lockup m0 tokens. (Incorrect profile + correct operation) coinLockupMetadata := &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m0PkBytes), - UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m0PkBytes), + RecipientPublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1729,15 +1671,12 @@ func TestLockupWithDerivedKey(t *testing.T) { // Have m1 transfer over 1,000 LOCKED m1 tokens for m0 to unlock. (Correct profile + incorrect operation) _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - m1Pub, + testMeta, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 365*24*60*60*1e9, 365*24*60*60*1e9, uint256.NewInt().SetUint64(1000), - 0, - ) + 0) _coinLockupTransferWithTestMeta( testMeta, testMeta.feeRateNanosPerKb, @@ -1762,9 +1701,11 @@ func TestLockupWithDerivedKey(t *testing.T) { ReceiverPublicKey: m0PkBytes, }) coinLockupMetadata = &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m1PkBytes), - UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m1PkBytes), + RecipientPublicKey: NewPublicKey(m1PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1778,9 +1719,11 @@ func TestLockupWithDerivedKey(t *testing.T) { ReceiverPublicKey: m0PkBytes, }) coinLockupMetadata = &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m1PkBytes), - UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m1PkBytes), + RecipientPublicKey: NewPublicKey(m1PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1815,9 +1758,11 @@ func TestLockupWithDerivedKey(t *testing.T) { // Try to submit a transaction locking up 1000 m1 coins with m0's derived key. // This should fail. (Incorrect Profile PKID + Correct Operation) coinLockupMetadata := &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m1PkBytes), - UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m1PkBytes), + RecipientPublicKey: NewPublicKey(m1PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1827,9 +1772,11 @@ func TestLockupWithDerivedKey(t *testing.T) { // Try to submit a transaction locking up 1000 m0 coins with m0's derived key. // This should succeed. (Correct Profile PKID + Correct Operation) coinLockupMetadata = &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m0PkBytes), - UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m0PkBytes), + RecipientPublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1847,9 +1794,11 @@ func TestLockupWithDerivedKey(t *testing.T) { // Try to submit a subsequent lockup transaction. This should fail as we've exhausted the derived key. coinLockupMetadata = &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m0PkBytes), - UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m0PkBytes), + RecipientPublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 365*24*60*60*1e9+1, @@ -1880,9 +1829,11 @@ func TestLockupWithDerivedKey(t *testing.T) { // Have m0 lockup 1000 m0 tokens to be unlocked one year into the future. // This should fail. (Correct PKID + Incorrect Operation Type) coinLockupMetadata := &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m0PkBytes), - UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m0PkBytes), + RecipientPublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1891,15 +1842,12 @@ func TestLockupWithDerivedKey(t *testing.T) { // Have m1 transfer over 1,000 LOCKED m1 tokens for m0 to unlock. _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - m1Pub, + testMeta, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 365*24*60*60*1e9, 365*24*60*60*1e9, uint256.NewInt().SetUint64(1000), - 0, - ) + 0) _coinLockupTransferWithTestMeta( testMeta, testMeta.feeRateNanosPerKb, @@ -1921,9 +1869,11 @@ func TestLockupWithDerivedKey(t *testing.T) { // Try to submit a subsequent lockup transaction. This should fail as we've exhausted the derived key. coinLockupMetadata = &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m0PkBytes), - UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m0PkBytes), + RecipientPublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 365*24*60*60*1e9+1, @@ -2004,9 +1954,11 @@ func TestLockupWithDerivedKey(t *testing.T) { // Perform the first lockup operation of 1000 m0 coins at 1yr coinLockupMetadata := &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m0PkBytes), - UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m0PkBytes), + RecipientPublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -2015,17 +1967,23 @@ func TestLockupWithDerivedKey(t *testing.T) { utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, 365*24*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + 365*24*60*60*1e9, + 365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) // Perform the second lockup operation of 1000 m0 coins at 2yrs coinLockupMetadata = &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m0PkBytes), - UnlockTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m0PkBytes), + RecipientPublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -2034,8 +1992,12 @@ func TestLockupWithDerivedKey(t *testing.T) { utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, 2*365*24*60*60*1e9) + lockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(2*365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) @@ -2054,8 +2016,12 @@ func TestLockupWithDerivedKey(t *testing.T) { utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, m0PKID, 365*24*60*60*1e9) + lockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + m0PKID, + 365*24*60*60*1e9, + 365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) @@ -2074,8 +2040,12 @@ func TestLockupWithDerivedKey(t *testing.T) { utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m1PKID, m0PKID, 2*365*24*60*60*1e9) + lockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m1PKID, + m0PKID, + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(2*365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) @@ -2098,8 +2068,12 @@ func TestLockupWithDerivedKey(t *testing.T) { require.True(t, balanceEntry.BalanceNanos.Gt(&startingBalance)) require.Equal(t, *uint256.NewInt().SetUint64(500), *uint256.NewInt().Sub(&balanceEntry.BalanceNanos, &startingBalance)) - lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, 365*24*60*60*1e9) + lockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + 365*24*60*60*1e9, + 365*24*60*60*1e9) require.NoError(t, err) require.True(t, lockedBalanceEntry == nil) @@ -2118,16 +2092,22 @@ func TestLockupWithDerivedKey(t *testing.T) { require.True(t, balanceEntry.BalanceNanos.Gt(&startingBalance)) require.Equal(t, *uint256.NewInt().SetUint64(1000), *uint256.NewInt().Sub(&balanceEntry.BalanceNanos, &startingBalance)) - lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, 2*365*24*60*60*1e9) + lockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.NoError(t, err) require.True(t, lockedBalanceEntry == nil) // Now we try and perform another operation. This should fail as we've depleted our lockup operations limit. coinLockupMetadata = &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(m0PkBytes), - UnlockTimestampNanoSecs: 3 * 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + ProfilePublicKey: NewPublicKey(m0PkBytes), + RecipientPublicKey: NewPublicKey(m0PkBytes), + UnlockTimestampNanoSecs: 3 * 365 * 24 * 60 * 60 * 1e9, + VestingEndTimestampNanoSecs: 3 * 365 * 24 * 60 * 60 * 1e9, + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 2*365*24*60*60*1e9+2, @@ -2150,21 +2130,17 @@ func TestLockupDisconnects(t *testing.T) { // Test Coin Lockup for Profiles // utxoOps1, txn1, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - m0Pub, + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, uint256.NewInt().SetUint64(1000), 365*24*60*60*1e9) require.NoError(t, err) utxoOps2, txn2, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - m0Pub, + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, uint256.NewInt().SetUint64(1000), 365*24*60*60*1e9) @@ -2181,8 +2157,12 @@ func TestLockupDisconnects(t *testing.T) { testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) m0PKID := utxoView.GetPKIDForPublicKey(m0PkBytes).PKID - lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, 2*365*24*60*60*1e9) + lockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) balanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) @@ -2193,8 +2173,12 @@ func TestLockupDisconnects(t *testing.T) { utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, 2*365*24*60*60*1e9) + lockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.True(t, lockedBalanceEntry == nil) balanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) require.Equal(t, *uint256.NewInt().SetUint64(1000000), balanceEntry.BalanceNanos) @@ -2210,11 +2194,9 @@ func TestLockupDisconnects(t *testing.T) { m2PkBytes, testMeta.chain.BlockTip().Height) require.NoError(t, err) utxoOps1, txn1, _, err = _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m2Pub, - m2Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m2Pub, m2Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m2Pub, + 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, uint256.NewInt().SetUint64(500), 365*24*60*60*1e9) @@ -2226,11 +2208,9 @@ func TestLockupDisconnects(t *testing.T) { m2PkBytes, testMeta.chain.BlockTip().Height) require.NoError(t, err) utxoOps2, txn2, _, err = _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m2Pub, - m2Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m2Pub, m2Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m2Pub, + 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, uint256.NewInt().SetUint64(500), 365*24*60*60*1e9) @@ -2247,8 +2227,12 @@ func TestLockupDisconnects(t *testing.T) { testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) m2PKID := utxoView.GetPKIDForPublicKey(m2PkBytes).PKID - lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m2PKID, &ZeroPKID, 2*365*24*60*60*1e9) + lockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m2PKID, + &ZeroPKID, + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) currentBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( @@ -2261,8 +2245,12 @@ func TestLockupDisconnects(t *testing.T) { utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m2PKID, &ZeroPKID, 2*365*24*60*60*1e9) + lockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m2PKID, + &ZeroPKID, + 2*365*24*60*60*1e9, + 2*365*24*60*60*1e9) require.True(t, lockedBalanceEntry == nil) currentBalance, err = utxoView.GetSpendableDeSoBalanceNanosForPublicKey( m2PkBytes, testMeta.chain.BlockTip().Height) @@ -2478,14 +2466,9 @@ func TestLockupDisconnects(t *testing.T) { TransferRestrictionStatus: 0, }) _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m3Pub, - m3Priv, - m3Pub, - 1000, - MaxUint256, - 0) + testMeta, testMeta.feeRateNanosPerKb, + m3Pub, m3Priv, m3Pub, m3Pub, + 1000, 1000, MaxUint256, 0) utxoOps, txn, _, err = _coinLockupTransfer( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, @@ -2500,8 +2483,18 @@ func TestLockupDisconnects(t *testing.T) { require.NoError(t, err) m3PKID := utxoView.GetPKIDForPublicKey(m3PkBytes).PKID m4PKID := utxoView.GetPKIDForPublicKey(m4PkBytes).PKID - m3BalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs(m3PKID, m3PKID, 1000) - m4BalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs(m4PKID, m3PKID, 1000) + m3BalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m3PKID, + m3PKID, + 1000, + 1000) + m4BalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m4PKID, + m3PKID, + 1000, + 1000) require.True(t, nil == m3BalanceEntry) require.Equal(t, *MaxUint256, m4BalanceEntry.BalanceBaseUnits) txHash = txn.Hash() @@ -2514,8 +2507,18 @@ func TestLockupDisconnects(t *testing.T) { require.NoError(t, err) m3PKID = utxoView.GetPKIDForPublicKey(m3PkBytes).PKID m4PKID = utxoView.GetPKIDForPublicKey(m4PkBytes).PKID - m3BalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs(m3PKID, m3PKID, 1000) - m4BalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs(m4PKID, m3PKID, 1000) + m3BalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m3PKID, + m3PKID, + 1000, + 1000) + m4BalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m4PKID, + m3PKID, + 1000, + 1000) require.True(t, nil == m4BalanceEntry) require.Equal(t, *MaxUint256, m3BalanceEntry.BalanceBaseUnits) @@ -2553,20 +2556,19 @@ func TestLockupDisconnects(t *testing.T) { TransferRestrictionStatus: 0, }) _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m4Pub, - m4Priv, - m4Pub, - 1000, - MaxUint256, - 0) + testMeta, testMeta.feeRateNanosPerKb, + m4Pub, m4Priv, m4Pub, m4Pub, + 1000, 1000, MaxUint256, 0) utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - m4LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m4PKID, m4PKID, 1000) + m4LockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m4PKID, + m4PKID, + 1000, + 1000) m4be, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) require.NoError(t, err) require.Equal(t, *MaxUint256, m4LockedBalanceEntry.BalanceBaseUnits) @@ -2584,8 +2586,12 @@ func TestLockupDisconnects(t *testing.T) { utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m4PKID, m4PKID, 1000) + m4LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m4PKID, + m4PKID, + 1000, + 1000) require.NoError(t, err) m4be, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) require.True(t, nil == m4LockedBalanceEntry) @@ -2603,8 +2609,12 @@ func TestLockupDisconnects(t *testing.T) { utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m4PKID, m4PKID, 1000) + m4LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m4PKID, + m4PKID, + 1000, + 1000) require.NoError(t, err) m4be, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) require.Equal(t, *uint256.NewInt(), m4be.BalanceNanos) @@ -2622,20 +2632,19 @@ func TestLockupDisconnects(t *testing.T) { m4PkBytes, testMeta.chain.BlockTip().Height) require.NoError(t, err) _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, - testMeta.feeRateNanosPerKb, - m4Pub, - m4Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), - 1000, - uint256.NewInt().SetUint64(500), - 0) + testMeta, testMeta.feeRateNanosPerKb, + m4Pub, m4Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m4Pub, + 1000, 1000, uint256.NewInt().SetUint64(500), 0) utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m4PKID, &ZeroPKID, 1000) + m4LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m4PKID, + &ZeroPKID, + 1000, + 1000) preUnlockBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( m4PkBytes, testMeta.chain.BlockTip().Height) require.NoError(t, err) @@ -2655,8 +2664,12 @@ func TestLockupDisconnects(t *testing.T) { utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m4PKID, &ZeroPKID, 1000) + m4LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m4PKID, + &ZeroPKID, + 1000, + 1000) currentBalance, err = utxoView.GetSpendableDeSoBalanceNanosForPublicKey( m4PkBytes, testMeta.chain.BlockTip().Height) require.NoError(t, err) @@ -2676,8 +2689,12 @@ func TestLockupDisconnects(t *testing.T) { utxoView, err = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m4PKID, &ZeroPKID, 1000) + m4LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m4PKID, + &ZeroPKID, + 1000, + 1000) currentBalance, err = utxoView.GetSpendableDeSoBalanceNanosForPublicKey( m4PkBytes, testMeta.chain.BlockTip().Height) require.NoError(t, err) @@ -2711,12 +2728,20 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { m3BalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) require.Equal(t, *uint256.NewInt().SetUint64(1000000), m0BalanceEntry.BalanceNanos) require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, tipTimestamp+2e9) + m0LockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + tipTimestamp+2e9, + tipTimestamp+2e9) require.NoError(t, err) require.True(t, m0LockedBalanceEntry == nil) - m3LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m3PKID, m0PKID, tipTimestamp+2e9) + m3LockedBalanceEntry, err := + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m3PKID, + m0PKID, + tipTimestamp+2e9, + tipTimestamp+2e9) require.NoError(t, err) require.True(t, m3LockedBalanceEntry == nil) @@ -2732,8 +2757,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.NoError(t, err) _signTxn(t, updateTxn, m0Priv) lockupTxn, _, _, _, err := testMeta.chain.CreateCoinLockupTxn( - m0PkBytes, m0PkBytes, tipTimestamp+2e9, uint256.NewInt().SetUint64(1000), - testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + m0PkBytes, m0PkBytes, m0PkBytes, tipTimestamp+2e9, tipTimestamp+2e9, + uint256.NewInt().SetUint64(1000), testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) _signTxn(t, lockupTxn, m0Priv) transferTxn, _, _, _, err := testMeta.chain.CreateCoinLockupTransferTxn( @@ -2783,12 +2808,20 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, tipTimestamp+2e9) + m0LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + tipTimestamp+2e9, + tipTimestamp+2e9) require.NoError(t, err) require.True(t, m0LockedBalanceEntry == nil) - m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m3PKID, m0PKID, tipTimestamp+2e9) + m3LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m3PKID, + m0PKID, + tipTimestamp+2e9, + tipTimestamp+2e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(1000), m3LockedBalanceEntry.BalanceBaseUnits) @@ -2831,12 +2864,20 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) require.Equal(t, *uint256.NewInt().SetUint64(1000), m3BalanceEntry.BalanceNanos) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, tipTimestamp+2e9) + m0LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + tipTimestamp+2e9, + tipTimestamp+2e9) require.NoError(t, err) require.True(t, m0LockedBalanceEntry == nil) - m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m3PKID, m0PKID, tipTimestamp+2e9) + m3LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m3PKID, + m0PKID, + tipTimestamp+2e9, + tipTimestamp+2e9) require.NoError(t, err) require.True(t, m3LockedBalanceEntry == nil) @@ -2878,12 +2919,20 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, tipTimestamp+2e9) + m0LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + tipTimestamp+2e9, + tipTimestamp+2e9) require.NoError(t, err) require.True(t, m0LockedBalanceEntry == nil) - m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m3PKID, m0PKID, tipTimestamp+2e9) + m3LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m3PKID, + m0PKID, + tipTimestamp+2e9, + tipTimestamp+2e9) require.NoError(t, err) require.Equal(t, *uint256.NewInt().SetUint64(1000), m3LockedBalanceEntry.BalanceBaseUnits) @@ -2923,12 +2972,20 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) require.Equal(t, *uint256.NewInt().SetUint64(1000000), m0BalanceEntry.BalanceNanos) require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m0PKID, m0PKID, tipTimestamp+2e9) + m0LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m0PKID, + m0PKID, + tipTimestamp+2e9, + tipTimestamp+2e9) require.NoError(t, err) require.True(t, m0LockedBalanceEntry == nil) - m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - m3PKID, m0PKID, tipTimestamp+2e9) + m3LockedBalanceEntry, err = + utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + m3PKID, + m0PKID, + tipTimestamp+2e9, + tipTimestamp+2e9) require.NoError(t, err) require.True(t, m3LockedBalanceEntry == nil) } @@ -3076,7 +3133,9 @@ func _coinLockupWithTestMetaAndConnectTimestamp( transactorPublicKeyBase58Check string, transactorPrivateKeyBase58Check string, profilePublicKeyBase58Check string, + recipientPublicKeyBase58Check string, unlockTimestampNanoSecs int64, + vestingEndTimestampNanoSecs int64, lockupAmountBaseUnits *uint256.Int, connectTimestamp int64) { @@ -3085,24 +3144,32 @@ func _coinLockupWithTestMetaAndConnectTimestamp( _getBalance(testMeta.t, testMeta.chain, nil, transactorPublicKeyBase58Check)) currentOps, currentTxn, _, err := _coinLockupWithConnectTimestamp( - testMeta.t, testMeta.chain, testMeta.db, testMeta.params, - feeRateNanosPerKB, transactorPublicKeyBase58Check, transactorPrivateKeyBase58Check, - profilePublicKeyBase58Check, unlockTimestampNanoSecs, lockupAmountBaseUnits, connectTimestamp) + testMeta.t, testMeta.chain, testMeta.db, testMeta.params, feeRateNanosPerKB, + transactorPublicKeyBase58Check, + transactorPrivateKeyBase58Check, + profilePublicKeyBase58Check, + recipientPublicKeyBase58Check, + unlockTimestampNanoSecs, + vestingEndTimestampNanoSecs, + lockupAmountBaseUnits, + connectTimestamp) require.NoError(testMeta.t, err) testMeta.txnOps = append(testMeta.txnOps, currentOps) testMeta.txns = append(testMeta.txns, currentTxn) } -func _coinLockupWithConnectTimestamp(t *testing.T, chain *Blockchain, db *badger.DB, - params *DeSoParams, feeRateNanosPerKB uint64, +func _coinLockupWithConnectTimestamp( + t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoParams, feeRateNanosPerKB uint64, transactorPublicKeyBase58Check string, transactorPrivateKeyBase58Check string, profilePublicKeyBase58Check string, + recipientPublicKeyBase58Check string, unlockTimestampNanoSecs int64, + vestingEndTimestampNanoSecs int64, lockupAmountBaseUnits *uint256.Int, - connectTimestamp int64) ( - _utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { + connectTimestamp int64, +) (_utxoOps []*UtxoOperation, _txn *MsgDeSoTxn, _height uint32, _err error) { assert := assert.New(t) require := require.New(t) @@ -3115,13 +3182,22 @@ func _coinLockupWithConnectTimestamp(t *testing.T, chain *Blockchain, db *badger profilePkBytes, _, err := Base58CheckDecode(profilePublicKeyBase58Check) require.NoError(err) + recipientPkBytes, _, err := Base58CheckDecode(recipientPublicKeyBase58Check) + require.NoError(err) + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) require.NoError(err) // Create the coin lockup transaction. txn, totalInputMake, _, feesMake, err := chain.CreateCoinLockupTxn( - transactorPkBytes, profilePkBytes, unlockTimestampNanoSecs, - lockupAmountBaseUnits, feeRateNanosPerKB, nil, []*DeSoOutput{}) + transactorPkBytes, + profilePkBytes, + recipientPkBytes, + unlockTimestampNanoSecs, + vestingEndTimestampNanoSecs, + lockupAmountBaseUnits, + feeRateNanosPerKB, + nil, []*DeSoOutput{}) if err != nil { return nil, nil, 0, err } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index ef8086af8..0c5b89389 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4016,10 +4016,47 @@ type GlobalParamsEntry struct { // blocks) before they are jailed. JailInactiveValidatorGracePeriodEpochs uint64 - // LockedDESOTransferRestrictions is the transfer restrictions on Locked raw DESO. - // We place it here to prevent the creation of a ZeroPKID profile entry. + // LockedDESOTransferRestrictions specifies the transfer restriction status + // of locked unvested DESO. LockedDESOTransferRestrictions TransferRestrictionStatus + // MaximumVestedIntersectionsPerLockupTransaction is used to limit the computational complexity of + // vested lockup transactions. Essentially, vested lockups may overlap in time creating either + // significant complexity on the lockup transaction or the unlock transaction. As a simple example, + // consider a user having the following five vested lockups: + // + // January 1st 2024 -> January 1st 2025; Amount: 1 DESO + // February 1st 2024 -> February 1st 2025; Amount: 1 DESO + // March 1st 2024 -> March 1st 2025; Amount: 1 DESO + // April 1st 2024 -> April 1st 2025; Amount: 1 DESO + // May 1st 2024 -> May 1st 2025; Amount: 1 DESO + // + // Notice that between May 1st 2024 and January 1st 2025 anytime the user wants to perform an unlock + // on these vested locked balance entries they must read five distinct entries, update them, + // possible consolidate them with other existing entries in the db, and write them back to disk. Worse, + // this would happen on the unlock transaction which can be triggered every time. To get around this issue, + // we consolidate these transactions on the lockup instead. For example, what SHOULD be stored in the database + // for these five vested lockups is: + // + // January 1st 2024 -> February 1st 2024; Amount: 1 DESO + // February 1st 2024 -> March 1st 2024; Amount: 2 DESO + // March 1st 2024 -> April 1st 2024; Amount: 3 DESO + // April 1st 2024 -> May 1st 2024; Amount: 4 DESO + // May 1st 2024 -> January 1st 2025; Amount: 5 DESO + // January 1st 2025 -> February 1st 2025; Amount: 4 DESO + // February 1st 2025 -> March 1st 2025; Amount: 3 DESO + // March 1st 2025 -> April 1st 2025; Amount: 2 DESO + // April 1st 2025 -> May 1st 2025; Amount: 1 DESO + // + // Notice that this is functionally identical but at any given point in time we hit exactly one vested + // locked balance entry. This consolidation on the lockup transaction operation could be computationally expensive. + // Hence, we limit this complexity with the MaximumVestedIntersectionsPerLockupTransactions (default: 100). + // When connecting a lockup transaction we check to see how many existing vested locked balance entries + // we would intersect with. If we exceed the MaximumVestedIntersectionsPerLockupTransaction, we reject + // the transaction. A user must split their single transaction into multiple disjoint time intervals which + // satisfies the limit. + MaximumVestedIntersectionsPerLockupTransaction int + // FeeBucketGrowthRateBasisPoints is the rate of growth of the fee bucket ranges. This is part of the new // PoS Mempool. The multiplier is given as basis points. For example a value of 1000 means that the fee bucket // ranges will grow by 10% each time. If, let's say, we start with MinimumNetworkFeeNanosPerKB of 1000 nanos, @@ -4035,23 +4072,24 @@ type GlobalParamsEntry struct { func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { return &GlobalParamsEntry{ - USDCentsPerBitcoin: gp.USDCentsPerBitcoin, - CreateProfileFeeNanos: gp.CreateProfileFeeNanos, - CreateNFTFeeNanos: gp.CreateNFTFeeNanos, - MaxCopiesPerNFT: gp.MaxCopiesPerNFT, - MinimumNetworkFeeNanosPerKB: gp.MinimumNetworkFeeNanosPerKB, - MaxNonceExpirationBlockHeightOffset: gp.MaxNonceExpirationBlockHeightOffset, - StakeLockupEpochDuration: gp.StakeLockupEpochDuration, - ValidatorJailEpochDuration: gp.ValidatorJailEpochDuration, - LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, - ValidatorSetMaxNumValidators: gp.ValidatorSetMaxNumValidators, - StakingRewardsMaxNumStakes: gp.StakingRewardsMaxNumStakes, - StakingRewardsAPYBasisPoints: gp.StakingRewardsAPYBasisPoints, - EpochDurationNumBlocks: gp.EpochDurationNumBlocks, - JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, - LockedDESOTransferRestrictions: gp.LockedDESOTransferRestrictions, - FeeBucketGrowthRateBasisPoints: gp.FeeBucketGrowthRateBasisPoints, - FailingTransactionBMFMultiplierBasisPoints: gp.FailingTransactionBMFMultiplierBasisPoints, + USDCentsPerBitcoin: gp.USDCentsPerBitcoin, + CreateProfileFeeNanos: gp.CreateProfileFeeNanos, + CreateNFTFeeNanos: gp.CreateNFTFeeNanos, + MaxCopiesPerNFT: gp.MaxCopiesPerNFT, + MinimumNetworkFeeNanosPerKB: gp.MinimumNetworkFeeNanosPerKB, + MaxNonceExpirationBlockHeightOffset: gp.MaxNonceExpirationBlockHeightOffset, + StakeLockupEpochDuration: gp.StakeLockupEpochDuration, + ValidatorJailEpochDuration: gp.ValidatorJailEpochDuration, + LeaderScheduleMaxNumValidators: gp.LeaderScheduleMaxNumValidators, + ValidatorSetMaxNumValidators: gp.ValidatorSetMaxNumValidators, + StakingRewardsMaxNumStakes: gp.StakingRewardsMaxNumStakes, + StakingRewardsAPYBasisPoints: gp.StakingRewardsAPYBasisPoints, + EpochDurationNumBlocks: gp.EpochDurationNumBlocks, + JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, + LockedDESOTransferRestrictions: gp.LockedDESOTransferRestrictions, + MaximumVestedIntersectionsPerLockupTransaction: gp.MaximumVestedIntersectionsPerLockupTransaction, + FeeBucketGrowthRateBasisPoints: gp.FeeBucketGrowthRateBasisPoints, + FailingTransactionBMFMultiplierBasisPoints: gp.FailingTransactionBMFMultiplierBasisPoints, } } @@ -4076,6 +4114,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.EpochDurationNumBlocks)...) data = append(data, UintToBuf(gp.JailInactiveValidatorGracePeriodEpochs)...) data = append(data, byte(gp.LockedDESOTransferRestrictions)) + data = append(data, IntToBuf(int64(gp.MaximumVestedIntersectionsPerLockupTransaction))...) data = append(data, UintToBuf(gp.FeeBucketGrowthRateBasisPoints)...) data = append(data, UintToBuf(gp.FailingTransactionBMFMultiplierBasisPoints)...) } @@ -4144,11 +4183,17 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading JailInactiveValidatorGracePeriodEpochs: ") } - statusByte, err := rr.ReadByte() + lockedDESOTransferRestrictions, err := rr.ReadByte() + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading LockedDESOTransferRestrictions: ") + } + gp.LockedDESOTransferRestrictions = TransferRestrictionStatus(lockedDESOTransferRestrictions) + maximumVestedIntersectionsPerLockupTransaction, err := ReadVarint(rr) if err != nil { - return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading LockedDESOTransferRestrictions") + return errors.Wrapf(err, + "GlobalParamsEntry.Decode: Problem reading MaximumVestedIntersectionsPerLockupTransaction") } - gp.LockedDESOTransferRestrictions = TransferRestrictionStatus(statusByte) + gp.MaximumVestedIntersectionsPerLockupTransaction = int(maximumVestedIntersectionsPerLockupTransaction) gp.FeeBucketGrowthRateBasisPoints, err = ReadUvarint(rr) if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading FeeBucketGrowthRateBasisPoints") diff --git a/lib/blockchain.go b/lib/blockchain.go index c5fb3ee41..1581a3a04 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5382,11 +5382,14 @@ func (bc *Blockchain) _createAssociationTxn( func (bc *Blockchain) CreateCoinLockupTxn( TransactorPublicKey []byte, ProfilePublicKey []byte, + RecipientPublicKey []byte, UnlockTimestampNanoSecs int64, + VestingEndTimestampNanoSecs int64, LockupAmountBaseUnits *uint256.Int, - // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool *DeSoMempool, additionalOutputs []*DeSoOutput) ( - _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { + minFeeRateNanosPerKB uint64, + mempool *DeSoMempool, + additionalOutputs []*DeSoOutput, +) (_txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // NOTE: TxInputs is a remnant of the UTXO transaction model. // It's assumed that lockup transactions follow balance model. @@ -5396,9 +5399,11 @@ func (bc *Blockchain) CreateCoinLockupTxn( txn := &MsgDeSoTxn{ PublicKey: TransactorPublicKey, TxnMeta: &CoinLockupMetadata{ - ProfilePublicKey: NewPublicKey(ProfilePublicKey), - UnlockTimestampNanoSecs: UnlockTimestampNanoSecs, - LockupAmountBaseUnits: LockupAmountBaseUnits, + ProfilePublicKey: NewPublicKey(ProfilePublicKey), + RecipientPublicKey: NewPublicKey(RecipientPublicKey), + UnlockTimestampNanoSecs: UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: VestingEndTimestampNanoSecs, + LockupAmountBaseUnits: LockupAmountBaseUnits, }, TxOutputs: additionalOutputs, // The signature will be added once other transaction fields are finalized. diff --git a/lib/constants.go b/lib/constants.go index c4363db03..b99a040ef 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -708,6 +708,11 @@ type DeSoParams struct { // in BMF calculations. DefaultFailingTransactionBMFMultiplierBasisPoints uint64 + // DefaultMaximumVestedIntersectionsPerLockupTransaction is the default value for + // GlobalParamsEntry.MaximumVestedIntersectionsPerLockupTransaction. See the comment + // in GlobalParamsEntry for a detailed description of its usage. + DefaultMaximumVestedIntersectionsPerLockupTransaction int + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -1118,6 +1123,9 @@ var DeSoMainnetParams = DeSoParams{ // The rate of the failing transaction's fee used in BMF calculations. DefaultFailingTransactionBMFMultiplierBasisPoints: uint64(2500), + // The maximum number of vested lockup intersections in a lockup transaction. + DefaultMaximumVestedIntersectionsPerLockupTransaction: 100, + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1382,6 +1390,9 @@ var DeSoTestnetParams = DeSoParams{ // The rate of the failing transaction's fee used in BMF calculations. DefaultFailingTransactionBMFMultiplierBasisPoints: uint64(2500), + // The maximum number of vested lockup intersections in a lockup transaction. + DefaultMaximumVestedIntersectionsPerLockupTransaction: 100, + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), @@ -1419,23 +1430,24 @@ const ( IsFrozenKey = "IsFrozen" // Keys for a GlobalParamUpdate transaction's extra data map. - USDCentsPerBitcoinKey = "USDCentsPerBitcoin" - MinNetworkFeeNanosPerKBKey = "MinNetworkFeeNanosPerKB" - CreateProfileFeeNanosKey = "CreateProfileFeeNanos" - CreateNFTFeeNanosKey = "CreateNFTFeeNanos" - MaxCopiesPerNFTKey = "MaxCopiesPerNFT" - MaxNonceExpirationBlockHeightOffsetKey = "MaxNonceExpirationBlockHeightOffset" - ForbiddenBlockSignaturePubKeyKey = "ForbiddenBlockSignaturePubKey" - StakeLockupEpochDurationKey = "StakeLockupEpochDuration" - ValidatorJailEpochDurationKey = "ValidatorJailEpochDuration" - LeaderScheduleMaxNumValidatorsKey = "LeaderScheduleMaxNumValidators" - ValidatorSetMaxNumValidatorsKey = "ValidatorSetMaxNumValidators" - StakingRewardsMaxNumStakesKey = "StakingRewardsMaxNumStakes" - StakingRewardsAPYBasisPointsKey = "StakingRewardsAPYBasisPoints" - EpochDurationNumBlocksKey = "EpochDurationNumBlocks" - JailInactiveValidatorGracePeriodEpochsKey = "JailInactiveValidatorGracePeriodEpochs" - FeeBucketGrowthRateBasisPointsKey = "FeeBucketGrowthRateBasisPointsKey" - FailingTransactionBMFMultiplierBasisPointsKey = "FailingTransactionBMFMultiplierBasisPoints" + USDCentsPerBitcoinKey = "USDCentsPerBitcoin" + MinNetworkFeeNanosPerKBKey = "MinNetworkFeeNanosPerKB" + CreateProfileFeeNanosKey = "CreateProfileFeeNanos" + CreateNFTFeeNanosKey = "CreateNFTFeeNanos" + MaxCopiesPerNFTKey = "MaxCopiesPerNFT" + MaxNonceExpirationBlockHeightOffsetKey = "MaxNonceExpirationBlockHeightOffset" + ForbiddenBlockSignaturePubKeyKey = "ForbiddenBlockSignaturePubKey" + StakeLockupEpochDurationKey = "StakeLockupEpochDuration" + ValidatorJailEpochDurationKey = "ValidatorJailEpochDuration" + LeaderScheduleMaxNumValidatorsKey = "LeaderScheduleMaxNumValidators" + ValidatorSetMaxNumValidatorsKey = "ValidatorSetMaxNumValidators" + StakingRewardsMaxNumStakesKey = "StakingRewardsMaxNumStakes" + StakingRewardsAPYBasisPointsKey = "StakingRewardsAPYBasisPoints" + EpochDurationNumBlocksKey = "EpochDurationNumBlocks" + JailInactiveValidatorGracePeriodEpochsKey = "JailInactiveValidatorGracePeriodEpochs" + MaximumVestedIntersectionsPerLockupTransactionKey = "MaximumVestedIntersectionsPerLockupTransaction" + FeeBucketGrowthRateBasisPointsKey = "FeeBucketGrowthRateBasisPointsKey" + FailingTransactionBMFMultiplierBasisPointsKey = "FailingTransactionBMFMultiplierBasisPoints" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" diff --git a/lib/db_utils.go b/lib/db_utils.go index d4df2368a..85a652139 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -571,12 +571,13 @@ type DBPrefixes struct { // Note, we parse the ValidatorPKID and StakerPKID from the key. PrefixSnapshotStakeToRewardByValidatorAndStaker []byte `prefix_id:"[92]" is_state:"true"` - // PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs: + // PrefixLockedBalanceEntry: // Retrieves LockedBalanceEntries that may or may not be claimable for unlock. - // LockedBalanceEntries can be retrieved by HodlerPKID and CreatorPKID are have their - // corresponding unlock timestamp appended to sort by timestamp. - // Prefix, , , -> - PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs []byte `prefix_id:"[93]" is_state:"true"` + // A discriminator byte it placed before the UnlockTimestampNanoSecs to allow for separation + // among the vested and unvested locked balance entries without separate indexes. + // Prefix, , , , + // , -> + PrefixLockedBalanceEntry []byte `prefix_id:"[93]" is_state:"true"` // PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs: // Retrieves a LockupYieldCurvePoint. @@ -870,7 +871,7 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotStakeToRewardByValidatorAndStaker) { // prefix_id:"[92]" return true, &StakeEntry{} - } else if bytes.Equal(prefix, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs) { + } else if bytes.Equal(prefix, Prefixes.PrefixLockedBalanceEntry) { // prefix_id:"[93]" return true, &LockedBalanceEntry{} } else if bytes.Equal(prefix, Prefixes.PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs) { @@ -10817,24 +10818,85 @@ func DBDeletePostAssociationWithTxn(txn *badger.Txn, snap *Snapshot, association // LockedBalanceEntry DB Key Operations -func _dbKeyForLockedBalanceEntry(lockedBalanceEntry LockedBalanceEntry) []byte { - key := append([]byte{}, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs...) +const ( + // NOTE: By delineating the vested and unvested locked balance entries with the byte below, + // we know that all vested keys will be lexicographically less than all unvested keys. This enables + // us to construct db key prefixes based on either the vested or unvested keys and use said prefixes + // for seeks through either the unvested or vested keys. This is a convenient optimization that prevents + // us from needing seperate indexes with the addition of only a single added key. + + VestedLockedBalanceEntriesKeyByte = 0 + UnvestedLockedBalanceEntriesKeyByte = 1 +) + +func _dbKeyForLockedBalanceEntry(lockedBalanceEntry *LockedBalanceEntry) []byte { + key := append([]byte{}, Prefixes.PrefixLockedBalanceEntry...) key = append(key, lockedBalanceEntry.HODLerPKID[:]...) key = append(key, lockedBalanceEntry.ProfilePKID[:]...) - return append(key, EncodeUint64(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))...) + + // Delineate between vested and unvested locked balance entries. + if lockedBalanceEntry.UnlockTimestampNanoSecs == lockedBalanceEntry.VestingEndTimestampNanoSecs { + key = append(key, UnvestedLockedBalanceEntriesKeyByte) + } else { + key = append(key, VestedLockedBalanceEntriesKeyByte) + } + + key = append(key, EncodeUint64(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))...) + return append(key, EncodeUint64(uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs))...) } -func DBPrefixKeyForLockedBalanceEntryByHODLerPKIDandProfilePKID(lockedBalanceEntry *LockedBalanceEntry) []byte { - data := append([]byte{}, Prefixes.PrefixLockedBalanceEntryByHODLerPKIDProfilePKIDUnlockTimestampNanoSecs...) - data = append(data, lockedBalanceEntry.HODLerPKID.ToBytes()...) - data = append(data, lockedBalanceEntry.ProfilePKID.ToBytes()...) - return data +func DBPrefixForVestedLockedBalanceEntriesOnUnlockTimestamp(lockedBalanceEntry *LockedBalanceEntry) ([]byte, error) { + key := append([]byte{}, Prefixes.PrefixLockedBalanceEntry...) + key = append(key, lockedBalanceEntry.HODLerPKID[:]...) + key = append(key, lockedBalanceEntry.ProfilePKID[:]...) + + // Delineate between vested and unvested locked balance entries. + if lockedBalanceEntry.UnlockTimestampNanoSecs == lockedBalanceEntry.VestingEndTimestampNanoSecs { + return nil, errors.New("DBPrefixForVestedLockedBalanceEntries: called with unvested lockedBalanceEntry") + } else { + key = append(key, VestedLockedBalanceEntriesKeyByte) + } + + return append(key, EncodeUint64(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))...), nil +} + +func DBPrefixForLockedBalanceEntriesOnType(lockedBalanceEntry *LockedBalanceEntry) []byte { + key := append([]byte{}, Prefixes.PrefixLockedBalanceEntry...) + key = append(key, lockedBalanceEntry.HODLerPKID[:]...) + key = append(key, lockedBalanceEntry.ProfilePKID[:]...) + + // Delineate between vested and unvested locked balance entries. + if lockedBalanceEntry.UnlockTimestampNanoSecs == lockedBalanceEntry.VestingEndTimestampNanoSecs { + key = append(key, UnvestedLockedBalanceEntriesKeyByte) + } else { + key = append(key, VestedLockedBalanceEntriesKeyByte) + } + + return key +} + +func DBKeyForUnvestedLockedBalanceEntryWithVestedType(lockedBalanceEntry *LockedBalanceEntry) []byte { + // NOTE: This key seems odd, but it's used for seeking through vested locked balance entries + // given the current timestamp. Since the current timestamp must be used for both + // the UnlockTimestampNanoSecs and VestingEndTimestampNanoSecs, the other DBKey functions would not + // use the VestedLockedBalanceEntriesKeyByte but rather the UnvestedLockedBalanceEntriesKeyByte. + key := append([]byte{}, Prefixes.PrefixLockedBalanceEntry...) + key = append(key, lockedBalanceEntry.HODLerPKID[:]...) + key = append(key, lockedBalanceEntry.ProfilePKID[:]...) + key = append(key, VestedLockedBalanceEntriesKeyByte) + key = append(key, EncodeUint64(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))...) + return append(key, EncodeUint64(uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs))...) } // LockedBalanceEntry Put/Delete Operations (Badger Writes) -func DbPutLockedBalanceEntryMappingsWithTxn(txn *badger.Txn, snap *Snapshot, blockHeight uint64, - lockedBalanceEntry LockedBalanceEntry, eventManager *EventManager) error { +func DbPutLockedBalanceEntryMappingsWithTxn( + txn *badger.Txn, + snap *Snapshot, + blockHeight uint64, + lockedBalanceEntry LockedBalanceEntry, + eventManager *EventManager, +) error { // Sanity check the fields in the LockedBalanceEntry used in constructing the key. if len(lockedBalanceEntry.HODLerPKID) != btcec.PubKeyBytesLenCompressed { return fmt.Errorf("DbPutLockedBalanceEntryMappingsWithTxn: HODLer PKID "+ @@ -10845,7 +10907,7 @@ func DbPutLockedBalanceEntryMappingsWithTxn(txn *badger.Txn, snap *Snapshot, blo "length %d != %d", len(lockedBalanceEntry.ProfilePKID), btcec.PubKeyBytesLenCompressed) } - if err := DBSetWithTxn(txn, snap, _dbKeyForLockedBalanceEntry(lockedBalanceEntry), + if err := DBSetWithTxn(txn, snap, _dbKeyForLockedBalanceEntry(&lockedBalanceEntry), EncodeToBytes(blockHeight, &lockedBalanceEntry), eventManager); err != nil { return errors.Wrapf(err, "DbPutLockedBalanceEntryMappingsWithTxn: "+ "Problem adding locked balance entry to db") @@ -10853,10 +10915,15 @@ func DbPutLockedBalanceEntryMappingsWithTxn(txn *badger.Txn, snap *Snapshot, blo return nil } -func DbDeleteLockedBalanceEntryWithTxn(txn *badger.Txn, snap *Snapshot, lockedBalanceEntry LockedBalanceEntry, - eventManager *EventManager, entryIsDeleted bool) error { +func DbDeleteLockedBalanceEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + lockedBalanceEntry LockedBalanceEntry, + eventManager *EventManager, + entryIsDeleted bool, +) error { // First check that a mapping exists. If one doesn't then there's nothing to do. - _, err := DBGetWithTxn(txn, snap, _dbKeyForLockedBalanceEntry(lockedBalanceEntry)) + _, err := DBGetWithTxn(txn, snap, _dbKeyForLockedBalanceEntry(&lockedBalanceEntry)) if errors.Is(err, badger.ErrKeyNotFound) { return nil } @@ -10869,7 +10936,7 @@ func DbDeleteLockedBalanceEntryWithTxn(txn *badger.Txn, snap *Snapshot, lockedBa // When a locked balance entry exists, delete the locked balance entry mapping. if err := DBDeleteWithTxn(txn, snap, - _dbKeyForLockedBalanceEntry(lockedBalanceEntry), eventManager, entryIsDeleted); err != nil { + _dbKeyForLockedBalanceEntry(&lockedBalanceEntry), eventManager, entryIsDeleted); err != nil { return errors.Wrapf(err, "DbDeleteLockedBalanceEntryWithTxn: Deleting "+ "locked balance entry for HODLer PKID %s, Profile PKID %s, expiration timestamp %d", lockedBalanceEntry.HODLerPKID.ToString(), lockedBalanceEntry.ProfilePKID.ToString(), @@ -10880,28 +10947,37 @@ func DbDeleteLockedBalanceEntryWithTxn(txn *badger.Txn, snap *Snapshot, lockedBa // LockedBalanceEntry Get Operations (Badger Reads) -func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecs( - handle *badger.DB, snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, - unlockTimestamp int64) (_lockedBalanceEntry *LockedBalanceEntry, _err error) { - +func DBGetLockedBalanceEntryForLockedBalanceEntryKey( + handle *badger.DB, + snap *Snapshot, + lockedBalanceEntryKey *LockedBalanceEntryKey, +) ( + _lockedBalanceEntry *LockedBalanceEntry, + _err error, +) { var ret *LockedBalanceEntry err := handle.View(func(txn *badger.Txn) error { var err error - ret, err = DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithTxn( - txn, snap, hodlerPKID, profilePKID, unlockTimestamp) + ret, err = DBGetLockedBalanceEntryForLockedBalanceEntryKeyWithTxn(txn, snap, lockedBalanceEntryKey) return err }) return ret, err } -func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithTxn( - txn *badger.Txn, snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, - unlockTimestamp int64) (_lockedBalanceEntry *LockedBalanceEntry, _err error) { - - key := _dbKeyForLockedBalanceEntry(LockedBalanceEntry{ - HODLerPKID: hodlerPKID, - ProfilePKID: profilePKID, - UnlockTimestampNanoSecs: unlockTimestamp, +func DBGetLockedBalanceEntryForLockedBalanceEntryKeyWithTxn( + txn *badger.Txn, + snap *Snapshot, + lockedBalanceEntryKey *LockedBalanceEntryKey, +) ( + _lockedBalanceEntry *LockedBalanceEntry, + _err error, +) { + // Construct a key from the LockedBalanceEntry. + key := _dbKeyForLockedBalanceEntry(&LockedBalanceEntry{ + HODLerPKID: &lockedBalanceEntryKey.HODLerPKID, + ProfilePKID: &lockedBalanceEntryKey.ProfilePKID, + UnlockTimestampNanoSecs: lockedBalanceEntryKey.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: lockedBalanceEntryKey.VestingEndTimestampNanoSecs, }) // Get the key from the db. @@ -10911,7 +10987,8 @@ func DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithT } if err != nil { return nil, - errors.Wrap(err, "DBGetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsWithTxn") + errors.Wrap(err, + "DBGetLockedBalanceEntryForLockedBalanceEntryKeyWithTxn") } return DecodeDeSoEncoder(&LockedBalanceEntry{}, bytes.NewReader(lockedBalanceEntryBytes)) @@ -10923,15 +11000,21 @@ func DBGetUnlockableLockedBalanceEntries( hodlerPKID *PKID, profilePKID *PKID, currentTimestampUnixNanoSecs int64, -) ([]*LockedBalanceEntry, error) { - var ret []*LockedBalanceEntry +) ( + _unvestedUnlockabeLockedBalanceEntries []*LockedBalanceEntry, + _vestedUnlockableLockedEntries []*LockedBalanceEntry, + _err error, +) { + + var unvested []*LockedBalanceEntry + var vested []*LockedBalanceEntry var err error handle.View(func(txn *badger.Txn) error { - ret, err = DBGetUnlockableLockedBalanceEntriesWithTxn( + unvested, vested, err = DBGetUnlockableLockedBalanceEntriesWithTxn( txn, snap, hodlerPKID, profilePKID, currentTimestampUnixNanoSecs) return nil }) - return ret, err + return unvested, vested, err } func DBGetUnlockableLockedBalanceEntriesWithTxn( @@ -10940,63 +11023,317 @@ func DBGetUnlockableLockedBalanceEntriesWithTxn( hodlerPKID *PKID, profilePKID *PKID, currentTimestampUnixNanoSecs int64, -) ([]*LockedBalanceEntry, error) { - // Retrieve all LockedBalanceEntries from db matching hodlerPKID, profilePKID, and - // UnlockTimestampNanoSecs <= currentTimestampUnixNanoSecs. - // NOTE: While ideally we would start with and - // seek till , - // Badger does not support this functionality as the ValidForPrefix() function - // stops when a mismatched prefix occurs, not a "lexicographically less than" prefix. - // For this reason, we start with - // and iterate backwards while we're valid for the prefix . +) ( + _unlockableUnvestedLockedBalanceEntries []*LockedBalanceEntry, + _unlockableVestedLockedBalanceEntries []*LockedBalanceEntry, + _err error, +) { + // Get vested unlockable locked balance entries. + unlockableVestedLockedBalanceEntries, err := DBGetUnlockableVestedLockedBalanceEntriesWithTxn( + txn, snap, hodlerPKID, profilePKID, currentTimestampUnixNanoSecs) + if err != nil { + return nil, nil, + errors.Wrap(err, "DBGetUnlockableLockedBalanceEntriesWithTxn") + } - if currentTimestampUnixNanoSecs < 0 || currentTimestampUnixNanoSecs == math.MaxInt64-1 { - return nil, fmt.Errorf("DBGetUnlockableLockedBalanceEntriesWithTxn: invalid " + - "block timestamp; this shouldn't be possible") + // Get unvested unlockable locked balance entries. + unlockableUnvestedLockedBalanceEntries, err := DBGetUnlockableUnvestedLockedBalanceEntriesWithTxn( + txn, snap, hodlerPKID, profilePKID, currentTimestampUnixNanoSecs) + if err != nil { + return nil, nil, + errors.Wrap(err, "DBGetUnlockableLockedBalanceEntriesWithTxn") } - // Start at - startKey := _dbKeyForLockedBalanceEntry(LockedBalanceEntry{ - HODLerPKID: hodlerPKID, - ProfilePKID: profilePKID, - UnlockTimestampNanoSecs: currentTimestampUnixNanoSecs, - }) + return unlockableUnvestedLockedBalanceEntries, unlockableVestedLockedBalanceEntries, nil +} - // Valid for prefix - prefixKey := DBPrefixKeyForLockedBalanceEntryByHODLerPKIDandProfilePKID(&LockedBalanceEntry{ - HODLerPKID: hodlerPKID, - ProfilePKID: profilePKID, - }) +func DBGetUnlockableVestedLockedBalanceEntriesWithTxn( + txn *badger.Txn, + snap *Snapshot, + hodlerPKID *PKID, + profilePKID *PKID, + currentTimestampUnixNanoSecs int64, +) ( + _unlockableVestedLockedBalanceEntries []*LockedBalanceEntry, + _err error, +) { + // Start at + startKey := DBKeyForUnvestedLockedBalanceEntryWithVestedType(&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: currentTimestampUnixNanoSecs, + VestingEndTimestampNanoSecs: currentTimestampUnixNanoSecs, + }) + + // Valid for prefix + // We set the VestingEndTimestampNanoSecs to currentTimestampUnixNanoSecs + 1 to + // make a pusedo-vested LockedBalanceEntry for the purpose of constructing a key prefix. + prefixKey := DBPrefixForLockedBalanceEntriesOnType(&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: currentTimestampUnixNanoSecs, + VestingEndTimestampNanoSecs: currentTimestampUnixNanoSecs + 1, + }) + + // Create a reverse iterator. + opts := badger.DefaultIteratorOptions + opts.Reverse = true + iterator := txn.NewIterator(opts) + defer iterator.Close() + + // Store relevant LockedBalanceEntries to return. + var lockedBalanceEntries []*LockedBalanceEntry + + // Loop until we've exhausted all unlockable unvested locked balance entries. + for iterator.Seek(startKey); iterator.ValidForPrefix(prefixKey); iterator.Next() { + // Retrieve the LockedBalanceEntryBytes. + lockedBalanceEntryBytes, err := iterator.Item().ValueCopy(nil) + if err != nil { + return nil, + errors.Wrapf(err, "DBGetLimitedVestedLockedBalanceEntriesWithTxn: "+ + "error retrieveing LockedBalanceEntry: ") + } - // Create an iterator. We set the iterator to reverse as per the comment at the top of this function. + // Convert the LockedBalanceEntryBytes to LockedBalanceEntry. + rr := bytes.NewReader(lockedBalanceEntryBytes) + lockedBalanceEntry, err := DecodeDeSoEncoder(&LockedBalanceEntry{}, rr) + if err != nil { + return nil, + errors.Wrapf(err, "DBGetLimitedVestedLockedBalanceEntriesWithTxn: "+ + "error decoding LockedBalanceEntry: ") + } + + // Sanity check the locked balance entry as relevant. + if !lockedBalanceEntry.HODLerPKID.Eq(hodlerPKID) || + !lockedBalanceEntry.ProfilePKID.Eq(profilePKID) || + lockedBalanceEntry.UnlockTimestampNanoSecs > currentTimestampUnixNanoSecs { + return nil, + errors.New("DBGetLimitedVestedLockedBalanceEntriesWithTxn: " + + "found invalid LockedBalanceEntry; this shouldn't happen") + } + + // Add the locked balance entry to the return list. + lockedBalanceEntries = append(lockedBalanceEntries, lockedBalanceEntry) + } + + return lockedBalanceEntries, nil +} + +func DBGetUnlockableUnvestedLockedBalanceEntriesWithTxn( + txn *badger.Txn, + snap *Snapshot, + hodlerPKID *PKID, + profilePKID *PKID, + currentTimestampUnixNanoSecs int64, +) ( + _unlockableUnvestedLockedBalanceEntries []*LockedBalanceEntry, + _err error, +) { + // Start at + startKey := _dbKeyForLockedBalanceEntry(&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: currentTimestampUnixNanoSecs, + VestingEndTimestampNanoSecs: currentTimestampUnixNanoSecs, + }) + + // Valid for prefix + prefixKey := DBPrefixForLockedBalanceEntriesOnType(&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: currentTimestampUnixNanoSecs, + VestingEndTimestampNanoSecs: currentTimestampUnixNanoSecs, + }) + + // Create a reverse iterator. opts := badger.DefaultIteratorOptions opts.Reverse = true iterator := txn.NewIterator(opts) defer iterator.Close() - // Store matching LockedBalanceEntries to return + // Store relevant LockedBalanceEntries to return. var lockedBalanceEntries []*LockedBalanceEntry - // Loop. + // Loop until we've exhausted all unlockable unvested locked balance entries. for iterator.Seek(startKey); iterator.ValidForPrefix(prefixKey); iterator.Next() { // Retrieve the LockedBalanceEntryBytes. lockedBalanceEntryBytes, err := iterator.Item().ValueCopy(nil) if err != nil { - return nil, errors.Wrapf(err, "DBGetUnlockableLockedBalanceEntriesWithTxn: "+ + return nil, + errors.Wrapf(err, "DBGetLimitedVestedLockedBalanceEntriesWithTxn: "+ + "error retrieveing LockedBalanceEntry: ") + } + + // Convert the LockedBalanceEntryBytes to LockedBalanceEntry. + rr := bytes.NewReader(lockedBalanceEntryBytes) + lockedBalanceEntry, err := DecodeDeSoEncoder(&LockedBalanceEntry{}, rr) + if err != nil { + return nil, + errors.Wrapf(err, "DBGetLimitedVestedLockedBalanceEntriesWithTxn: "+ + "error decoding LockedBalanceEntry: ") + } + + // Sanity check the locked balance entry as relevant. + if !lockedBalanceEntry.HODLerPKID.Eq(hodlerPKID) || + !lockedBalanceEntry.ProfilePKID.Eq(profilePKID) || + lockedBalanceEntry.UnlockTimestampNanoSecs > currentTimestampUnixNanoSecs { + return nil, + errors.New("DBGetLimitedVestedLockedBalanceEntriesWithTxn: " + + "found invalid LockedBalanceEntry; this shouldn't happen") + } + + // Add the locked balance entry to the return list. + lockedBalanceEntries = append(lockedBalanceEntries, lockedBalanceEntry) + } + + return lockedBalanceEntries, nil +} + +func DBGetLimitedVestedLockedBalanceEntries( + handle *badger.DB, + snap *Snapshot, + hodlerPKID *PKID, + profilePKID *PKID, + unlockTimestampNanoSecs int64, + vestingEndTimestampNanoSecs int64, + limitToFetch int, +) ( + _lockedBalanceEntries []*LockedBalanceEntry, + _err error, +) { + // NOTE: For this operation to work properly, it's important to understand that + // no two vested locked balance entries for a given hodler and profile pair can + // ever overlap in time. This is by design, as during the lockup transaction any + // consolidation to ensure no two vested locked balance entries results in + // overlap occurs. Why is this important? If you imagine a world in which + // overlapping entries are allowed, it makes it extremely inefficient to find + // those entries which straddle unlockTimestampNanoSecs without making a second + // index based on vestingEndTimestampNanoSecs. Our index sorts on + // unlockTimestampNanoSecs first and then vestingEndTimestampNanoSecs later. + // However, because we know that there is no overlap we can simply do a reverse + // iteration on the specified unlockTimestampNanoSecs, check for an overlapping + // entry, and move on. + + var lockedBalanceEntries []*LockedBalanceEntry + var err error + handle.View(func(txn *badger.Txn) error { + lockedBalanceEntries, err = DBGetLimitedVestedLockedBalanceEntriesWithTxn( + txn, snap, hodlerPKID, profilePKID, unlockTimestampNanoSecs, vestingEndTimestampNanoSecs, limitToFetch) + return nil + }) + return lockedBalanceEntries, err +} + +func DBGetLimitedVestedLockedBalanceEntriesWithTxn( + txn *badger.Txn, + snap *Snapshot, + hodlerPKID *PKID, + profilePKID *PKID, + unlockTimestampNanoSecs int64, + vestingEndTimestampNanoSecs int64, + limitToFetch int, +) ( + _lockedBalanceEntries []*LockedBalanceEntry, + _err error, +) { + // Start at + startKey, err := DBPrefixForVestedLockedBalanceEntriesOnUnlockTimestamp(&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: unlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: vestingEndTimestampNanoSecs, + }) + if err != nil { + return nil, errors.Wrap(err, "DBGetLimitedVestedLockedBalanceEntries") + } + + // Valid for prefix + prefixKey := DBPrefixForLockedBalanceEntriesOnType(&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + ProfilePKID: profilePKID, + UnlockTimestampNanoSecs: unlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: vestingEndTimestampNanoSecs, + }) + + // Store matching LockedBalanceEntries to return and track entries found. + var lockedBalanceEntries []*LockedBalanceEntry + var entriesFound int + + // Create a backwards iterator. + backwardOpts := badger.DefaultIteratorOptions + backwardOpts.Reverse = true + backwardIterator := txn.NewIterator(backwardOpts) + defer backwardIterator.Close() + + // Seek backwards and check for a vested locked balance entry which straddles the start time. + // NOTE: Per the comment on the badger Seek implementation, the Seek operation in the reverse direction will find + // the largest key less than the specified start key. Because the specified startKey doesn't ever have an + // exact match (as we're using a prefix of an actual key), we know this operation can only ever return either an + // invalid lockedBalanceEntry or a lockedBalanceEntry which straddles the unlockTimestampNanoSecs specified. + // In the case the lockedBalanceEntry straddles the unlockTimestampNanoSecs, we add it to the lockedBalanceEntries + // list and increment the entriesFound. + backwardIterator.Seek(startKey) + if backwardIterator.ValidForPrefix(prefixKey) { + // Retrieve the LockedBalanceEntryBytes. + lockedBalanceEntryBytes, err := backwardIterator.Item().ValueCopy(nil) + if err != nil { + return nil, errors.Wrapf(err, "DBGetLimitedVestedLockedBalanceEntriesWithTxn: "+ "error retrieveing LockedBalanceEntry: ") } - // Convert LockedBalanceEntryBytes to LockedBalanceEntry. + // Convert the LockedBalanceEntryBytes to LockedBalanceEntry. + rr := bytes.NewReader(lockedBalanceEntryBytes) + lockedBalanceEntry, err := DecodeDeSoEncoder(&LockedBalanceEntry{}, rr) + if err != nil { + return nil, errors.Wrapf(err, "DBGetLimitedVestedLockedBalanceEntriesWithTxn: "+ + "error decoding LockedBalanceEntry: ") + } + + // Check if the LockedBalanceEntry straddles the start time of the period specified. + if lockedBalanceEntry.HODLerPKID.Eq(hodlerPKID) && + lockedBalanceEntry.ProfilePKID.Eq(profilePKID) && + lockedBalanceEntry.UnlockTimestampNanoSecs < unlockTimestampNanoSecs && + lockedBalanceEntry.VestingEndTimestampNanoSecs >= unlockTimestampNanoSecs { + lockedBalanceEntries = append(lockedBalanceEntries, lockedBalanceEntry) + entriesFound++ + } + } + + // Create a forward iterator. We will use t + forwardOpts := badger.DefaultIteratorOptions + forwardIterator := txn.NewIterator(forwardOpts) + defer forwardIterator.Close() + + // Loop until we find an out of range vested locked balance entry. + for forwardIterator.Seek(startKey); forwardIterator.ValidForPrefix(prefixKey); forwardIterator.Next() { + // Retrieve the LockedBalanceEntryBytes. + lockedBalanceEntryBytes, err := forwardIterator.Item().ValueCopy(nil) + if err != nil { + return nil, errors.Wrapf(err, "DBGetLimitedVestedLockedBalanceEntriesWithTxn: "+ + "error retrieveing LockedBalanceEntry: ") + } + + // Convert the LockedBalanceEntryBytes to LockedBalanceEntry. rr := bytes.NewReader(lockedBalanceEntryBytes) lockedBalanceEntry, err := DecodeDeSoEncoder(&LockedBalanceEntry{}, rr) if err != nil { - return nil, errors.Wrapf(err, "DBGetUnlockableLockedBalanceEntriesWithTxn: "+ + return nil, errors.Wrapf(err, "DBGetLimitedVestedLockedBalanceEntriesWithTxn: "+ "error decoding LockedBalanceEntry: ") } - // This check is redundant. It's included to be extra safe only unlockable locked balance entries are included. - if lockedBalanceEntry.UnlockTimestampNanoSecs < currentTimestampUnixNanoSecs { + // Check if the LockedBalanceEntry is relevant to the limited query. + if lockedBalanceEntry.HODLerPKID.Eq(hodlerPKID) && + lockedBalanceEntry.ProfilePKID.Eq(profilePKID) && + lockedBalanceEntry.UnlockTimestampNanoSecs >= unlockTimestampNanoSecs && + lockedBalanceEntry.UnlockTimestampNanoSecs <= vestingEndTimestampNanoSecs { lockedBalanceEntries = append(lockedBalanceEntries, lockedBalanceEntry) + entriesFound++ + } + + // Check if we've found too many entries. + if entriesFound > limitToFetch { + return nil, errors.New("DBGetLimitedVestedLockedBalanceEntriesWithTxn: " + + "limit exhausted. Found too many relevant LockedBalanceEntries.") } } diff --git a/lib/errors.go b/lib/errors.go index a187dcaac..2a8018164 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -491,12 +491,15 @@ const ( // Lockups RuleErrorLockupTxnBeforeBlockHeight RuleError = "RuleErrorLockupTxnBeforeBlockHeight" - RuleErrorCoinLockupInvalidTransactorPKID RuleError = "RuleErrorCoinLockupInvalidTransactorPKID" + RuleErrorCoinLockupInvalidRecipientPKID RuleError = "RuleErrorCoinLockupInvalidRecipientPKID" RuleErrorCoinLockupInvalidLockupDuration RuleError = "RuleErrorCoinLockupInvalidLockupDuration" + RuleErrorCoinLockupInvalidVestingEndTimestamp RuleError = "RuleErrorCoinLockupInvalidVestingEndTimestamp" + RuleErrorCoinLockupInvalidVestedTransactor RuleError = "RuleErrorCoinLockupInvalidVestedTransactor" RuleErrorCoinLockupBalanceEntryDoesNotExist RuleError = "RuleErrorCoinLockupBalanceEntryDoesNotExist" RuleErrorCoinLockupInsufficientCoins RuleError = "RuleErrorCoinLockupInsufficientCoins" RuleErrorCoinLockupInsufficientDeSo RuleError = "RuleErrorCoinLockupInsufficientDeSo" RuleErrorCoinLockupInvalidProfilePubKey RuleError = "RuleErrorCoinLockupInvalidProfilePubKey" + RuleErrorCoinLockupInvalidRecipientPubKey RuleError = "RuleErrorCoinLockupInvalidRecipientPubKey" RuleErrorCoinLockupOnNonExistentProfile RuleError = "RuleErrorCoinLockupOnNonExistentProfile" RuleErrorCoinLockupOfAmountZero RuleError = "RuleErrorCoinLockupOfAmountZero" RuleErrorCoinLockupExcessiveDeSoLockup RuleError = "RuleErrorCoinLockupExcessiveDeSoLockup" @@ -504,6 +507,7 @@ const ( RuleErrorCoinLockupCoinYieldOverflow RuleError = "RuleErrorCoinLockupCoinYieldOverflow" RuleErrorCoinLockupYieldCausesOverflow RuleError = "RuleErrorCoinLockupYieldCausesOverflow" RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry RuleError = "RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry" + RuleErrorCoinLockupViolatesVestingIntersectionLimit RuleError = "RuleErrorCoinLockupViolatesVestingIntersectionLimit" RuleErrorCoinLockupTransferOfAmountZero RuleError = "RuleErrorCoinLockupTransferOfAmountZero" RuleErrorCoinLockupTransferOfDeSoCausesOverflow RuleError = "RuleErrorCoinLockupTransferOfDeSoCausesOverflow" RuleErrorCoinLockupTransferRestrictedToProfileOwner RuleError = "RuleErrorCoinLockupTransferRestrictedToCreator" diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index f608e14ac..957ff367e 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -134,6 +134,10 @@ func _mergeGlobalParamEntryDefaults(bav *UtxoView, globalParamsEntry *GlobalPara if globalParamsEntryCopy.FailingTransactionBMFMultiplierBasisPoints == 0 { globalParamsEntryCopy.FailingTransactionBMFMultiplierBasisPoints = bav.Params.DefaultFailingTransactionBMFMultiplierBasisPoints } + if globalParamsEntryCopy.MaximumVestedIntersectionsPerLockupTransaction == 0 { + globalParamsEntryCopy.MaximumVestedIntersectionsPerLockupTransaction = + bav.Params.DefaultMaximumVestedIntersectionsPerLockupTransaction + } // Return the merged result. return globalParamsEntryCopy From 6ff2aba41e3969e2a873ba5f259213919156bd56 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 8 Jan 2024 10:54:42 -0800 Subject: [PATCH 339/762] Remove ZeroPublicKey as possible recipient of locked DeSo tokens. --- lib/block_view_lockups.go | 10 ++- lib/block_view_lockups_test.go | 129 +++++++++++++++++++++++++++++++++ lib/errors.go | 2 + 3 files changed, 140 insertions(+), 1 deletion(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index c4273cc67..592789d17 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -997,7 +997,7 @@ func (bav *UtxoView) _connectCoinLockup( // In the vested case, validate that the underlying profile is the transactor. if txMeta.VestingEndTimestampNanoSecs > txMeta.UnlockTimestampNanoSecs && - !reflect.DeepEqual(txn.PublicKey, txMeta.ProfilePublicKey) { + !reflect.DeepEqual(txn.PublicKey, txMeta.ProfilePublicKey.ToBytes()) { return 0, 0, nil, errors.Wrapf(RuleErrorCoinLockupInvalidVestedTransactor, "_connectCoinLockup: Profile "+ "pub key: %v, signer public key: %v", PkToString(txn.PublicKey, bav.Params), @@ -1009,6 +1009,10 @@ func (bav *UtxoView) _connectCoinLockup( return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupInvalidRecipientPubKey, "_connectCoinLockup") } + if txMeta.RecipientPublicKey.IsZeroPublicKey() { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupZeroPublicKeyAsRecipient, "_connectCoinLockup") + } recipientPKIDEntry := bav.GetPKIDForPublicKey(txMeta.RecipientPublicKey.ToBytes()) if recipientPKIDEntry == nil || recipientPKIDEntry.isDeleted { return 0, 0, nil, @@ -2030,6 +2034,10 @@ func (bav *UtxoView) _connectCoinLockupTransfer( // Fetch PKIDs for the recipient, sender, and profile. senderPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) senderPKID := senderPKIDEntry.PKID + if txMeta.RecipientPublicKey.IsZeroPublicKey() { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupTransferToZeroPublicKey, "_connectCoinLockupTransfer") + } receiverPKIDEntry := bav.GetPKIDForPublicKey(txMeta.RecipientPublicKey.ToBytes()) receiverPKID := receiverPKIDEntry.PKID profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index a4b7df373..cc34b166f 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -279,6 +279,26 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) } + // Attempt to perform a vested lockup with a logically invalid vesting schedule (vest goes into the past). + // (This should fail -- RuleErrorCoinLockupInvalidLockupDuration) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m0Pub, + 1000, 900, uint256.NewInt().SetUint64(1), 950) + require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidVestingEndTimestamp) + } + + // Attempt to perform an unvested lockup with the ZeroPublicKey as the recipient. + // (This should fail -- RuleErrorCoinLockupZeroPublicKeyAsRecipient) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 1000, 1000, uint256.NewInt().SetUint64(1), 950) + require.Contains(t, err.Error(), RuleErrorCoinLockupZeroPublicKeyAsRecipient) + } + // Attempt to perform a lockup in excess of the user's DESO balance. // (This should fail -- RuleErrorCoinLockupInsufficientDeSo) { @@ -505,6 +525,21 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { require.Contains(t, err.Error(), RuleErrorCoinLockupTransferSenderEqualsReceiver) } + // Attempt to perform a coin lockup transfer where the receiver is the ZeroPublicKey. + // (This should fail -- RuleErrorCoinLockupTransferSenderEqualsReceiver) + { + _, _, _, err := _coinLockupTransfer( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + &ZeroPublicKey, + NewPublicKey(m0PkBytes), + 0, + MaxUint256) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferToZeroPublicKey) + } + // Attempt to perform an excessive coin lockup transfer. // (This should fail -- RuleErrorCoinLockupTransferInsufficientBalance) { @@ -2990,6 +3025,100 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.True(t, m3LockedBalanceEntry == nil) } +func TestCoinLockupIndirectRecipients(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Attempt to create an indirect recipient of an unvested lockup by having m0 lockup and give to m3. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m3Pub, + 1000, 1000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Verify that m3 received the lockup (not m0) and that m0 was credited properly. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + m3PKIDEntry := utxoView.GetPKIDForPublicKey(m3PkBytes) + m3PKID := m3PKIDEntry.PKID + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + + // Check m3 LockedBalanceEntry + m3LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m3PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1000, + }) + require.NoError(t, err) + require.True(t, m3LockedBalanceEntry != nil) + require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) + + // Check the m0 LockedBalanceEntry as non-existent + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + + // Attempt to create an indirect recipient of a vested lockup by having m0 lockup and give to m3. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m3Pub, + 1050, 1100, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Check m3 LockedBalanceEntry + m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m3PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1050, + VestingEndTimestampNanoSecs: 1100, + }) + require.NoError(t, err) + require.True(t, m3LockedBalanceEntry != nil) + require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) + + // Check the m0 LockedBalanceEntry as non-existent + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1050, + VestingEndTimestampNanoSecs: 1100, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) +} + +func TestVestedDeSoLockupAsInvalid(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Ensure that it's impossible for m0 to lockup deso in a vested lockup. + { + zeroPkString := Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params) + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, zeroPkString, m0Pub, + 1050, 1100, uint256.NewInt().SetUint64(1000), 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidVestedTransactor) + } +} + //---------------------------------------------------------- // (Testing) Lockup Setup Helper Functions //---------------------------------------------------------- diff --git a/lib/errors.go b/lib/errors.go index 2a8018164..ef32f81ff 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -500,6 +500,7 @@ const ( RuleErrorCoinLockupInsufficientDeSo RuleError = "RuleErrorCoinLockupInsufficientDeSo" RuleErrorCoinLockupInvalidProfilePubKey RuleError = "RuleErrorCoinLockupInvalidProfilePubKey" RuleErrorCoinLockupInvalidRecipientPubKey RuleError = "RuleErrorCoinLockupInvalidRecipientPubKey" + RuleErrorCoinLockupZeroPublicKeyAsRecipient RuleError = "RuleErrorCoinLockupZeroPublicKeyAsRecipient" RuleErrorCoinLockupOnNonExistentProfile RuleError = "RuleErrorCoinLockupOnNonExistentProfile" RuleErrorCoinLockupOfAmountZero RuleError = "RuleErrorCoinLockupOfAmountZero" RuleErrorCoinLockupExcessiveDeSoLockup RuleError = "RuleErrorCoinLockupExcessiveDeSoLockup" @@ -518,6 +519,7 @@ const ( RuleErrorCoinLockupTransferInvalidRecipientPubKey RuleError = "RuleErrorCoinLockupTransferInvalidRecipientPubKey" RuleErrorCoinLockupTransferInvalidProfilePubKey RuleError = "RuleErrorCoinLockupTransferInvalidProfilePubKey" RuleErrorCoinLockupTransferOnNonExistentProfile RuleError = "RuleErrorCoinLockupTransferOnNonExistentProfile" + RuleErrorCoinLockupTransferToZeroPublicKey RuleError = "RuleErrorCoinLockupTransferToZeroPublicKey" RuleErrorCoinUnlockOnNonExistentProfile RuleError = "RuleErrorCoinUnlockOnNonExistentProfile" RuleErrorCoinUnlockInvalidHODLerPKID RuleError = "RuleErrorCoinUnlockInvalidHODLerPKID" RuleErrorCoinUnlockInvalidProfilePKID RuleError = "RuleErrorCoinUnlockInvalidProfilePKID" From fa2280d8a46d70fea49eb201a751525ff501c33c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 8 Jan 2024 14:28:44 -0500 Subject: [PATCH 340/762] Implement GetTransaction() interface method for legacy mempool (#903) --- lib/legacy_mempool.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 22ab50ac3..bd28e273e 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -6,8 +6,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/btcsuite/btcutil" - "github.com/gernest/mention" "log" "math" "os" @@ -18,6 +16,9 @@ import ( "sync/atomic" "time" + "github.com/btcsuite/btcutil" + "github.com/gernest/mention" + "github.com/dgraph-io/badger/v3" "github.com/btcsuite/btcd/btcec" @@ -248,8 +249,11 @@ func (mp *DeSoMempool) RemoveTransaction(txnHash *BlockHash) error { } func (mp *DeSoMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { - //TODO implement me - panic("implement me") + mempoolTx, exists := mp.readOnlyUniversalTransactionMap[*txnHash] + if !exists { + return nil + } + return NewMempoolTransaction(mempoolTx.Tx, uint64(mempoolTx.Added.UnixMicro())) } func (mp *DeSoMempool) GetTransactions() []*MempoolTransaction { From be7f00f647bea25a8228ac330ba10210757c5e01 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 8 Jan 2024 11:33:09 -0800 Subject: [PATCH 341/762] Fix vested earnings computation; add corresponding test for unlocks. --- lib/block_view_lockups.go | 40 ++++---- lib/block_view_lockups_test.go | 165 +++++++++++++++++++++++++++++++++ 2 files changed, 189 insertions(+), 16 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 592789d17..279ee0fee 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -2376,6 +2376,15 @@ func (bav *UtxoView) _connectCoinUnlock( // Create and modify a copy to prevent pointer reuse. modifiedLockedBalanceEntry := unlockableLockedBalanceEntry.Copy() modifiedLockedBalanceEntry.UnlockTimestampNanoSecs = blockTimestampNanoSecs + newBalanceBaseUnits, err := SafeUint256().Sub( + &modifiedLockedBalanceEntry.BalanceBaseUnits, + amountToUnlock) + if err != nil { + return 0, 0, nil, + errors.New("_connectCoinUnlock: newBalanceBaseUnits underflow; " + + "this shouldn't be possible") + } + modifiedLockedBalanceEntry.BalanceBaseUnits = *newBalanceBaseUnits // SET the modified key. bav._setLockedBalanceEntry(modifiedLockedBalanceEntry) @@ -2461,28 +2470,27 @@ func CalculateVestedEarnings( // Here we know that: // UnlockTimestampNanoSecs < blockTimestampNanoSecs < VestingEndTimestampNanoSecs // Now we compute the fraction of time that's passed. - numerator := blockTimestampNanoSecs - lockedBalanceEntry.UnlockTimestampNanoSecs - denominator := lockedBalanceEntry.VestingEndTimestampNanoSecs - lockedBalanceEntry.UnlockTimestampNanoSecs - fractionEarned, err := SafeUint256().Div( - uint256.NewInt().SetUint64(uint64(numerator)), - uint256.NewInt().SetUint64(uint64(denominator))) - if err != nil { - return uint256.NewInt(), - errors.Wrap(err, "ComputeVestedEarnings failed to compute fraction of time elapsed") - } + numerator := uint256.NewInt().SetUint64( + uint64(blockTimestampNanoSecs - lockedBalanceEntry.UnlockTimestampNanoSecs)) + denominator := uint256.NewInt().SetUint64( + uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs - lockedBalanceEntry.UnlockTimestampNanoSecs)) - // Sanity check that fractionEarned <= 1. - if fractionEarned.Gt(uint256.NewInt().SetUint64(1)) { + // Compute the numerator (lockedBalanceEntry.BalanceBaseUnits * numerator). + var err error + numerator, err = SafeUint256().Mul( + &lockedBalanceEntry.BalanceBaseUnits, + numerator) + if err != nil { return uint256.NewInt(), - errors.New("ComputeVestedEarnings: Found fractionEarned > 1; this shouldn't be possible") + errors.Wrap(err, "ComputeVestedEarnings failed to compute multiplication (time elapsed * balance)") } // Compute the vested earnings. - vestedEarnings, err := SafeUint256().Mul( - &lockedBalanceEntry.BalanceBaseUnits, - fractionEarned) + vestedEarnings, err := SafeUint256().Div(numerator, denominator) if err != nil { - return uint256.NewInt(), errors.Wrap(err, "ComputeVestedEarnings failed to compute vested earnings") + return uint256.NewInt(), + errors.Wrap(err, "ComputeVestedEarnings failed to compute division "+ + "((time elapsed * balance) / total time)") } // Sanity check that vestedEarnings < BalanceBaseUnits diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index cc34b166f..dc22a0969 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -3119,6 +3119,171 @@ func TestVestedDeSoLockupAsInvalid(t *testing.T) { } } +func TestSimpleVestedLockup(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Perform a simple vested lockup in the future over 1000ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Get the original m0 balance entry base units. + utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + originalBalanceEntry, _, _ := + utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) + + // Perform a simple unlock halfway through the vest. Ensure the payout is 500 base units. + { + _, _, _, err := _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 1500, + ) + require.NoError(t, err) + } + + // Verify that the locked balance entry was credited. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1500, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + + // Get the updated m0 balance entry base units and ensure it's been credited 500 base units. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + updatedBalanceEntry, _, _ := + utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) + require.True(t, uint256.NewInt().SetUint64(500).Eq( + uint256.NewInt().Sub( + &updatedBalanceEntry.BalanceNanos, + &originalBalanceEntry.BalanceNanos))) + originalBalanceEntry = updatedBalanceEntry + + // Perform another simple unlock halfway through the remaining vest. Ensure the payout is 250 base units. + { + _, _, _, err := _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 1750, + ) + require.NoError(t, err) + } + + // Verify that the locked balance entry was credited. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1500, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1750, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) + + // Get the updated m0 balance entry base units and ensure it's been credited 250 base units. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + updatedBalanceEntry, _, _ = + utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) + require.True(t, uint256.NewInt().SetUint64(250).Eq( + uint256.NewInt().Sub( + &updatedBalanceEntry.BalanceNanos, + &originalBalanceEntry.BalanceNanos))) + originalBalanceEntry = updatedBalanceEntry + + // Try and unlock the remaining amount. + { + _, _, _, err := _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + 2000, + ) + require.NoError(t, err) + } + + // Verify that the locked balance entry was credited. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1750, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 2000, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + + // Get the updated m0 balance entry base units and ensure it's been credited 250 base units. + utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + updatedBalanceEntry, _, _ = + utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) + require.True(t, uint256.NewInt().SetUint64(250).Eq( + uint256.NewInt().Sub( + &updatedBalanceEntry.BalanceNanos, + &originalBalanceEntry.BalanceNanos))) + originalBalanceEntry = updatedBalanceEntry + + // Check that we're back to where we started (1e6 base units) + require.True(t, uint256.NewInt().SetUint64(1e6).Eq(&updatedBalanceEntry.BalanceNanos)) +} + //---------------------------------------------------------- // (Testing) Lockup Setup Helper Functions //---------------------------------------------------------- From 9fd4e336b8a6b27dce305baed91deb51aa2e58f6 Mon Sep 17 00:00:00 2001 From: Lazy Nina <> Date: Mon, 8 Jan 2024 17:49:57 -0500 Subject: [PATCH 342/762] Add extra data to txn lockup construction --- lib/block_view_lockups.go | 34 ++++++++++++++++++++++++++++++++++ lib/block_view_lockups_test.go | 23 ++++++++++++----------- lib/blockchain.go | 13 +++++++++---- 3 files changed, 55 insertions(+), 15 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 279ee0fee..bfd3e4067 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -480,6 +480,10 @@ func (lockupYieldCurvePoint *LockupYieldCurvePoint) ToMapKey() LockupYieldCurveP } } +func (lockupYieldCurvePoint *LockupYieldCurvePoint) IsDeleted() bool { + return lockupYieldCurvePoint.isDeleted +} + // DeSoEncoder Interface Implementation for LockupYieldCurvePoint func (lockupYieldCurvePoint *LockupYieldCurvePoint) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { @@ -659,6 +663,36 @@ func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration return leftLockupPoint, rightLockupPoint, nil } +func (bav *UtxoView) GetAllYieldCurvePoints(profilePKID *PKID) (map[LockupYieldCurvePointKey]*LockupYieldCurvePoint, error) { + // Fetch all yield curve points in the db. + dbYieldCurvePoints, err := DBGetAllYieldCurvePointsByProfilePKID( + bav.GetDbAdapter().badgerDb, bav.Snapshot, profilePKID) + if err != nil { + return nil, errors.Wrap(err, "GetLocalYieldCurvePoints") + } + + // Cache the db points in the view. + // While there's more efficient ways to do this with specialized badger seek operations, this is sufficient for now. + if len(dbYieldCurvePoints) > 0 { + // Check if there's a yield curve in the view for the associated profile. + if _, mapInView := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID]; !mapInView { + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID] = + make(map[LockupYieldCurvePointKey]*LockupYieldCurvePoint) + } + + // Check if any of the points needs to be cached in the view. + for _, yieldCurvePoint := range dbYieldCurvePoints { + _, pointInView := + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID][yieldCurvePoint.ToMapKey()] + if !pointInView { + bav._setLockupYieldCurvePoint(yieldCurvePoint) + } + } + } + + return bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID], nil +} + // // TYPES: CoinLockupMetadata // diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index dc22a0969..3f0d3cabd 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -1549,7 +1549,7 @@ func TestLockupWithDerivedKey(t *testing.T) { txMeta.UnlockTimestampNanoSecs, txMeta.VestingEndTimestampNanoSecs, txMeta.LockupAmountBaseUnits, - testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) case TxnTypeUpdateCoinLockupParams: txMeta := inputTxn.TxnMeta.(*UpdateCoinLockupParamsMetadata) @@ -1560,7 +1560,7 @@ func TestLockupWithDerivedKey(t *testing.T) { txMeta.RemoveYieldCurvePoint, txMeta.NewLockupTransferRestrictions, txMeta.LockupTransferRestrictionStatus, - testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) case TxnTypeCoinLockupTransfer: txMeta := inputTxn.TxnMeta.(*CoinLockupTransferMetadata) @@ -1570,14 +1570,14 @@ func TestLockupWithDerivedKey(t *testing.T) { txMeta.ProfilePublicKey.ToBytes(), txMeta.UnlockTimestampNanoSecs, txMeta.LockedCoinsToTransferBaseUnits, - testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) case TxnTypeCoinUnlock: txMeta := inputTxn.TxnMeta.(*CoinUnlockMetadata) txn, _, _, _, err = testMeta.chain.CreateCoinUnlockTxn( transactorPkBytes, txMeta.ProfilePublicKey.ToBytes(), - testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) default: return 0, errors.New("invalid txn type") @@ -2788,17 +2788,17 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { updateTxn, _, _, _, err := testMeta.chain.CreateUpdateCoinLockupParamsTxn( m0PkBytes, 365*24*60*60*1e9, 1000, false, true, TransferRestrictionStatusProfileOwnerOnly, - testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) _signTxn(t, updateTxn, m0Priv) lockupTxn, _, _, _, err := testMeta.chain.CreateCoinLockupTxn( m0PkBytes, m0PkBytes, m0PkBytes, tipTimestamp+2e9, tipTimestamp+2e9, - uint256.NewInt().SetUint64(1000), testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + uint256.NewInt().SetUint64(1000), nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) _signTxn(t, lockupTxn, m0Priv) transferTxn, _, _, _, err := testMeta.chain.CreateCoinLockupTransferTxn( m0PkBytes, m3PkBytes, m0PkBytes, tipTimestamp+2e9, - uint256.NewInt().SetUint64(1000), testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + uint256.NewInt().SetUint64(1000), nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) _signTxn(t, transferTxn, m0Priv) @@ -2866,7 +2866,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { // Construct transactions unlockTxn, _, _, _, err := testMeta.chain.CreateCoinUnlockTxn( - m3PkBytes, m0PkBytes, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + m3PkBytes, m0PkBytes, nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) _signTxn(t, unlockTxn, m3Priv) @@ -3490,6 +3490,7 @@ func _coinLockupWithConnectTimestamp( unlockTimestampNanoSecs, vestingEndTimestampNanoSecs, lockupAmountBaseUnits, + nil, feeRateNanosPerKB, nil, []*DeSoOutput{}) if err != nil { @@ -3578,7 +3579,7 @@ func _updateCoinLockupParams(t *testing.T, chain *Blockchain, db *badger.DB, // Create the update coin lockup params transaction. txn, totalInputMake, _, feesMake, err := chain.CreateUpdateCoinLockupParamsTxn( transactorPkBytes, lockupYieldDurationNanoSecs, lockupYieldAPYBasisPoints, removeYieldCurvePoint, - newLockupTransferRestrictions, lockupTransferRestrictionStatus, feeRateNanosPerKB, nil, []*DeSoOutput{}) + newLockupTransferRestrictions, lockupTransferRestrictionStatus, nil, feeRateNanosPerKB, nil, []*DeSoOutput{}) if err != nil { return nil, nil, 0, err } @@ -3657,7 +3658,7 @@ func _coinLockupTransfer(t *testing.T, chain *Blockchain, db *badger.DB, // Create the update coin lockup params transaction. txn, totalInputMake, _, feesMake, err := chain.CreateCoinLockupTransferTxn( transactorPkBytes, recipientPublicKey.ToBytes(), profilePublicKey.ToBytes(), unlockTimestampNanoSecs, - lockedCoinsToTransferBaseUnits, feeRateNanosPerKB, nil, []*DeSoOutput{}) + lockedCoinsToTransferBaseUnits, nil, feeRateNanosPerKB, nil, []*DeSoOutput{}) if err != nil { return nil, nil, 0, err } @@ -3734,7 +3735,7 @@ func _coinUnlockWithConnectTimestamp(t *testing.T, chain *Blockchain, db *badger // Create the coin unlock transaction. txn, totalInputMake, _, feesMake, err := chain.CreateCoinUnlockTxn( - transactorPkBytes, profilePkBytes, feeRateNanosPerKB, nil, []*DeSoOutput{}) + transactorPkBytes, profilePkBytes, nil, feeRateNanosPerKB, nil, []*DeSoOutput{}) if err != nil { return nil, nil, 0, err } diff --git a/lib/blockchain.go b/lib/blockchain.go index f62816466..cda1e33b9 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5483,8 +5483,9 @@ func (bc *Blockchain) CreateCoinLockupTxn( UnlockTimestampNanoSecs int64, VestingEndTimestampNanoSecs int64, LockupAmountBaseUnits *uint256.Int, + extraData map[string][]byte, minFeeRateNanosPerKB uint64, - mempool *DeSoMempool, + mempool Mempool, additionalOutputs []*DeSoOutput, ) (_txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { @@ -5503,6 +5504,7 @@ func (bc *Blockchain) CreateCoinLockupTxn( LockupAmountBaseUnits: LockupAmountBaseUnits, }, TxOutputs: additionalOutputs, + ExtraData: extraData, // The signature will be added once other transaction fields are finalized. } @@ -5536,7 +5538,7 @@ func (bc *Blockchain) CreateCoinLockupTransferTxn( UnlockTimestampNanoSecs int64, LockedCoinsToTransferBaseUnits *uint256.Int, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( + extraData map[string][]byte, minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // NOTE: TxInputs is a remnant of the UTXO transaction model. @@ -5553,6 +5555,7 @@ func (bc *Blockchain) CreateCoinLockupTransferTxn( LockedCoinsToTransferBaseUnits: LockedCoinsToTransferBaseUnits, }, TxOutputs: additionalOutputs, + ExtraData: extraData, // The signature will be added once other transaction fields are finalized. } @@ -5587,7 +5590,7 @@ func (bc *Blockchain) CreateUpdateCoinLockupParamsTxn( NewLockupTransferRestrictions bool, LockupTransferRestrictionStatus TransferRestrictionStatus, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( + extraData map[string][]byte, minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _changeAmount uint64, _fees uint64, _err error) { // NOTE: TxInputs is a remnant of the UTXO transaction model. @@ -5605,6 +5608,7 @@ func (bc *Blockchain) CreateUpdateCoinLockupParamsTxn( LockupTransferRestrictionStatus: LockupTransferRestrictionStatus, }, TxOutputs: additionalOutputs, + ExtraData: extraData, // The signature will be added once other transaction fields are finalized. } @@ -5635,7 +5639,7 @@ func (bc *Blockchain) CreateCoinUnlockTxn( TransactorPublicKey []byte, ProfilePublicKey []byte, // Standard transaction fields - minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( + extraData map[string][]byte, minFeeRateNanosPerKB uint64, mempool Mempool, additionalOutputs []*DeSoOutput) ( _txn *MsgDeSoTxn, _totalInput uint64, _chainAmount uint64, _fees uint64, _err error) { // NOTE: TxInputs is a remnant of the UTXO transaction model. @@ -5647,6 +5651,7 @@ func (bc *Blockchain) CreateCoinUnlockTxn( PublicKey: TransactorPublicKey, TxnMeta: &CoinUnlockMetadata{ProfilePublicKey: NewPublicKey(ProfilePublicKey)}, TxOutputs: additionalOutputs, + ExtraData: extraData, // The signature will be added once other transaction fields are finalized. } From 66d151a305aa2347461725175cc6a7e62c6cbe6e Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Tue, 9 Jan 2024 10:44:19 -0800 Subject: [PATCH 343/762] Add overlap tests for vested lockups. --- lib/block_view_lockups.go | 176 ++++++++++++------ lib/block_view_lockups_test.go | 315 +++++++++++++++++++++++++++++++++ 2 files changed, 434 insertions(+), 57 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 279ee0fee..007dcf590 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1210,7 +1210,7 @@ func (bav *UtxoView) _connectCoinLockup( // (1) Check for overlapping locked balance entries lockedBalanceEntries, err := bav.GetLimitedVestedLockedBalanceEntriesOverTimeInterval( hodlerPKID, profilePKID, txMeta.UnlockTimestampNanoSecs, txMeta.VestingEndTimestampNanoSecs, - bav.GlobalParamsEntry.MaximumVestedIntersectionsPerLockupTransaction) + bav.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction) if err != nil && errors.Is(err, RuleErrorCoinLockupViolatesVestingIntersectionLimit) { return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupViolatesVestingIntersectionLimit, "_connectCoinLockup") @@ -1255,7 +1255,7 @@ func (bav *UtxoView) _connectCoinLockup( BalanceBaseUnits: *lockupValue, } - for ii, lockedBalanceEntry := range lockedBalanceEntries { + for ii, existingLockedBalanceEntry := range lockedBalanceEntries { // (3b-i) Determine if there is left overhang by either the existing or the proposed locked balance entry // e.g. UnlockTimestampNanoSecs --------------------- VestingEndTimestampNanoSecs // UnlockTimestampNanoSecs --------------------- VestingEndTimestampNanoSecs @@ -1264,37 +1264,46 @@ func (bav *UtxoView) _connectCoinLockup( // We will break any overhang off into its own separate locked balance entry. // Check for left overhang by the existing locked balance entry - if lockedBalanceEntry.UnlockTimestampNanoSecs < proposedLockedBalanceEntry.UnlockTimestampNanoSecs { + if existingLockedBalanceEntry.UnlockTimestampNanoSecs < + proposedLockedBalanceEntry.UnlockTimestampNanoSecs { + // Split the overhanging portion off the existing locked balance entry. + // Following the split, the existing and the remaining should have the same start time. splitLockedBalanceEntry, remainingLockedBalanceEntry, err := SplitVestedLockedBalanceEntry( - proposedLockedBalanceEntry, - lockedBalanceEntry.UnlockTimestampNanoSecs, - proposedLockedBalanceEntry.UnlockTimestampNanoSecs) + existingLockedBalanceEntry, + existingLockedBalanceEntry.UnlockTimestampNanoSecs, + proposedLockedBalanceEntry.UnlockTimestampNanoSecs-1) if err != nil { return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup failed to compute vested split") } + // Set the splitLockedBalanceEntry into the view. // NOTE: While it may seem as though we need to check for a conflicting vested // locked balance entry here, by design we only ever have one vested locked // balance entry across any given time interval thus by splitting the locked // balance entry in half it's impossible to intersect an existing // vested locked balance entry. bav._setLockedBalanceEntry(splitLockedBalanceEntry) - proposedLockedBalanceEntry = remainingLockedBalanceEntry + + // We update the existingLockedBalanceEntry as broke the left overhanging portion off. + existingLockedBalanceEntry = remainingLockedBalanceEntry } // Check for left overhang by the proposed locked balance entry - if proposedLockedBalanceEntry.UnlockTimestampNanoSecs < lockedBalanceEntry.UnlockTimestampNanoSecs { + if proposedLockedBalanceEntry.UnlockTimestampNanoSecs < existingLockedBalanceEntry.UnlockTimestampNanoSecs { splitLockedBalanceEntry, remainingLockedBalanceEntry, err := SplitVestedLockedBalanceEntry( proposedLockedBalanceEntry, proposedLockedBalanceEntry.UnlockTimestampNanoSecs, - lockedBalanceEntry.UnlockTimestampNanoSecs) + existingLockedBalanceEntry.UnlockTimestampNanoSecs-1) if err != nil { return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup failed to compute vested split") } + // Set the splitLockedBalanceEntry into the view. bav._setLockedBalanceEntry(splitLockedBalanceEntry) + + // We update the proposedLockedBalanceEntry as the left overhanging portion was broken off. proposedLockedBalanceEntry = remainingLockedBalanceEntry } @@ -1306,68 +1315,75 @@ func (bav *UtxoView) _connectCoinLockup( // We will break any overhang off into its own separate locked balance entry. // Check for right overhang by the existing locked balance entry - if lockedBalanceEntry.VestingEndTimestampNanoSecs > + if existingLockedBalanceEntry.VestingEndTimestampNanoSecs > proposedLockedBalanceEntry.VestingEndTimestampNanoSecs { splitLockedBalanceEntry, remainingLockedBalanceEntry, err := SplitVestedLockedBalanceEntry( - proposedLockedBalanceEntry, - proposedLockedBalanceEntry.VestingEndTimestampNanoSecs, - lockedBalanceEntry.VestingEndTimestampNanoSecs) + existingLockedBalanceEntry, + proposedLockedBalanceEntry.VestingEndTimestampNanoSecs+1, + existingLockedBalanceEntry.VestingEndTimestampNanoSecs) if err != nil { return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup failed to compute vested split") } + // Set the splitLockedBalanceEntry into the view. bav._setLockedBalanceEntry(splitLockedBalanceEntry) - proposedLockedBalanceEntry = remainingLockedBalanceEntry - // (3b-iii) On the final iteration, consolidate the remaining portions - // e.g. UnlockTimestampNanoSecs --------------------- VestingEndTimestampNanoSecs - // UnlockTimestampNanoSecs --------------------- VestingEndTimestampNanoSecs - // We know them to be aligned at this step as the previous step truncated them, however - // we will perform a sanity check just to be safe. - if ii == len(lockedBalanceEntries)-1 && - (lockedBalanceEntry.UnlockTimestampNanoSecs != - proposedLockedBalanceEntry.UnlockTimestampNanoSecs) || - (lockedBalanceEntry.VestingEndTimestampNanoSecs != - proposedLockedBalanceEntry.VestingEndTimestampNanoSecs) { - return 0, 0, nil, - errors.New("_connectCoinLockup found mismatched unlock and vesting end " + - "timestamps; this shouldn't happen") - } - if ii == len(lockedBalanceEntries)-1 { - finalConsolidatedBalance, err := SafeUint256().Add( - &proposedLockedBalanceEntry.BalanceBaseUnits, &lockedBalanceEntry.BalanceBaseUnits) - if err != nil { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry, - "_connectCoinLockup") - } - proposedLockedBalanceEntry.BalanceBaseUnits = *finalConsolidatedBalance - bav._setLockedBalanceEntry(proposedLockedBalanceEntry) - } + // We update the existingLockedBalanceEntry as broke the right overhanging portion off. + existingLockedBalanceEntry = remainingLockedBalanceEntry } // Check for right overhang by the proposed locked balance entry if proposedLockedBalanceEntry.VestingEndTimestampNanoSecs > - lockedBalanceEntry.VestingEndTimestampNanoSecs { + existingLockedBalanceEntry.VestingEndTimestampNanoSecs { + // NOTE: This case is particularly interesting as there's two situations in + // which we might find ourselves. + // + // First case: + // proposed: <-----------------------------------------> + // existing: <------------------> + // ^ ^ Overhang + // Second case: + // proposed: <-----------------------------------------> + // existing: <------------------> <---------> + // ^ ^ Overhang + // In the second case there exists another conflicting LockedBalanceEntry + // sometime in the future that we must be aware of. Also note that + // the first case is only possible in the very last iteration. + // + // To account for this, we split from the proposed LockedBalanceEntry + // and combine the left overlapping portion. This leaves us with two + // remaining cases: + // First case: + // proposed: <---------------------> + // existing: + // + // Second case: + // proposed: <---------------------> + // existing: <---------> + // + // The second case is fine to leave as it will be taken care of in the + // subsequent iteration. However, we make a special note to capture the + // remaining LockedBalanceEntry present in the first case. + // We check if there's another locked balance entry sometime in the future. - splitTimestampEnd := lockedBalanceEntry.VestingEndTimestampNanoSecs + splitTimestampEnd := existingLockedBalanceEntry.VestingEndTimestampNanoSecs if ii != len(lockedBalanceEntries)-1 { - splitTimestampEnd = lockedBalanceEntries[ii+1].UnlockTimestampNanoSecs + splitTimestampEnd = lockedBalanceEntries[ii+1].UnlockTimestampNanoSecs - 1 } splitLockedBalanceEntry, remainingLockedBalanceEntry, err := SplitVestedLockedBalanceEntry( proposedLockedBalanceEntry, - lockedBalanceEntry.VestingEndTimestampNanoSecs, + existingLockedBalanceEntry.UnlockTimestampNanoSecs, splitTimestampEnd) if err != nil { return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup failed to compute vested split") } - // Consolidate the overlapping special case. + // Consolidate the split and existing locked balance entry. combinedBalanceBaseUnits, err := SafeUint256().Add( &splitLockedBalanceEntry.BalanceBaseUnits, - &lockedBalanceEntry.BalanceBaseUnits) + &existingLockedBalanceEntry.BalanceBaseUnits) if err != nil { return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry, @@ -1375,7 +1391,10 @@ func (bav *UtxoView) _connectCoinLockup( } splitLockedBalanceEntry.BalanceBaseUnits = *combinedBalanceBaseUnits + // Set the now combined splitLockedBalanceEntry into the view. bav._setLockedBalanceEntry(splitLockedBalanceEntry) + + // Update the proposed locked balance entry with the remaining portion. proposedLockedBalanceEntry = remainingLockedBalanceEntry // (3b-iii) On the final iteration, the remaining proposedLockedBalanceEntry @@ -1384,6 +1403,27 @@ func (bav *UtxoView) _connectCoinLockup( bav._setLockedBalanceEntry(proposedLockedBalanceEntry) } } + + // (3b-iv) By now, we know the edges to be trimmed as best possible. + // We check if the existing and proposed now overlap perfectly in time and combine if so. + if (existingLockedBalanceEntry.UnlockTimestampNanoSecs == + proposedLockedBalanceEntry.UnlockTimestampNanoSecs) && + (existingLockedBalanceEntry.VestingEndTimestampNanoSecs == + proposedLockedBalanceEntry.VestingEndTimestampNanoSecs) { + // Combine the remaining balance. + combinedBalanceBaseUnits, err := SafeUint256().Add( + &existingLockedBalanceEntry.BalanceBaseUnits, + &proposedLockedBalanceEntry.BalanceBaseUnits) + if err != nil { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry, + "_connectCoinLockup") + } + + // Update the remaining entry. + proposedLockedBalanceEntry.BalanceBaseUnits = *combinedBalanceBaseUnits + bav._setLockedBalanceEntry(proposedLockedBalanceEntry) + } } } } @@ -1446,12 +1486,17 @@ func SplitVestedLockedBalanceEntry( } // Create the remaining locked balance entry. + // NOTE: The SplitVestedLockedBalanceEntry function is designed such that + // the portion being split off from the LockedBalanceEntry either starts + // at the lockedBalanceEntry.UnlockTimestampNanoSecs OR ends at the + // lockedBalanceEntry.VestingEndTimestampNanoSecs. Based on these two cases + // we can determine the remaining LockedBalanceEntry. var remainingLockedBalanceEntry *LockedBalanceEntry if startSplitTimestampNanoSecs == lockedBalanceEntry.UnlockTimestampNanoSecs { remainingLockedBalanceEntry = &LockedBalanceEntry{ HODLerPKID: lockedBalanceEntry.HODLerPKID, ProfilePKID: lockedBalanceEntry.ProfilePKID, - UnlockTimestampNanoSecs: startSplitTimestampNanoSecs + 1, + UnlockTimestampNanoSecs: endSplitTimestampNanoSecs + 1, VestingEndTimestampNanoSecs: lockedBalanceEntry.VestingEndTimestampNanoSecs, BalanceBaseUnits: uint256.Int{}, } @@ -1461,19 +1506,36 @@ func SplitVestedLockedBalanceEntry( HODLerPKID: lockedBalanceEntry.HODLerPKID, ProfilePKID: lockedBalanceEntry.ProfilePKID, UnlockTimestampNanoSecs: lockedBalanceEntry.UnlockTimestampNanoSecs, - VestingEndTimestampNanoSecs: endSplitTimestampNanoSecs - 1, + VestingEndTimestampNanoSecs: startSplitTimestampNanoSecs - 1, BalanceBaseUnits: uint256.Int{}, } } // Compute the balance in the split locked balance entry. - splitValue, err := CalculateLockupSplitValue( - lockedBalanceEntry, startSplitTimestampNanoSecs, endSplitTimestampNanoSecs) - if err != nil { - return nil, nil, - errors.Wrap(err, "SplitVestedLockedBalanceEntry failed to compute split value") + // NOTE: The reason we add 1 in the first case is because a 1 nanosecond loss occurs + // between the end of the splitLockedBalanceEntry and the beginning of the remainingLockedBalanceEntry + // that would cause small numerical differences between both cases otherwise. It's easier + // to see this by drawing a straight line to represent time and imagining where the + // startSplitTimestampNanoSecs and endSplitTimestampNanoSecs are on the line in both cases. + var splitValue *uint256.Int + var err error + if startSplitTimestampNanoSecs == lockedBalanceEntry.UnlockTimestampNanoSecs { + splitValue, err = CalculateLockupSplitValue( + lockedBalanceEntry, startSplitTimestampNanoSecs, endSplitTimestampNanoSecs+1) + if err != nil { + return nil, nil, + errors.Wrap(err, "SplitVestedLockedBalanceEntry failed to compute split value") + } + splitLockedBalanceEntry.BalanceBaseUnits = *splitValue + } else { + splitValue, err = CalculateLockupSplitValue( + lockedBalanceEntry, startSplitTimestampNanoSecs, endSplitTimestampNanoSecs) + if err != nil { + return nil, nil, + errors.Wrap(err, "SplitVestedLockedBalanceEntry failed to compute split value") + } + splitLockedBalanceEntry.BalanceBaseUnits = *splitValue } - splitLockedBalanceEntry.BalanceBaseUnits = *splitValue // Compute the balance in the remaining locked balance entry. remainingValue, err := SafeUint256().Sub(&lockedBalanceEntry.BalanceBaseUnits, splitValue) @@ -1503,7 +1565,7 @@ func SplitVestedLockedBalanceEntry( func CalculateLockupSplitValue( lockedBalanceEntry *LockedBalanceEntry, - starTimestampNanoSecs int64, + startTimestampNanoSecs int64, endTimestampNanoSecs int64, ) ( _splitValue *uint256.Int, @@ -1512,7 +1574,7 @@ func CalculateLockupSplitValue( // Compute the time that passes over the interval [startTimestampNanoSecs, endTimestampNanoSecs] numerator, err := SafeUint256().Sub( uint256.NewInt().SetUint64(uint64(endTimestampNanoSecs)), - uint256.NewInt().SetUint64(uint64(starTimestampNanoSecs))) + uint256.NewInt().SetUint64(uint64(startTimestampNanoSecs))) if err != nil { return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ "(start timestamp - end timestamp) underflow") @@ -1535,12 +1597,12 @@ func CalculateLockupSplitValue( numerator, err = SafeUint256().Mul(numerator, &lockedBalanceEntry.BalanceBaseUnits) if err != nil { return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ - "(start timestamp - end timestamp) * lockedBalanceEntry.Balance overflow") + "((start timestamp - end timestamp) + 1) * lockedBalanceEntry.Balance overflow") } splitValue, err := SafeUint256().Div(numerator, denominator) if err != nil { return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ - "((start timestamp - end timestamp) * lockedBalanceEntry.Balance overflow)) / "+ + "(((start timestamp - end timestamp) + 1) * lockedBalanceEntry.Balance overflow)) / "+ "(lockedBalanceEntry.UnlockTimestamp - lockedBalanceEntry.VestingEndTimestamp) has zero denominator") } diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index dc22a0969..c92278862 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -3284,6 +3284,321 @@ func TestSimpleVestedLockup(t *testing.T) { require.True(t, uint256.NewInt().SetUint64(1e6).Eq(&updatedBalanceEntry.BalanceNanos)) } +func TestNoOverlapVestedLockupConsolidation(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Perform a simple vested lockup in the future over 1000ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that causes a left overhang. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 3000, 4000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Verify that the left overhang was computed correctly. + utxoView, err := + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 3000, + VestingEndTimestampNanoSecs: 4000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) +} + +func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Perform a simple vested lockup in the future over 1000ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that causes a left overhang. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Verify that the left overhang was computed correctly. + utxoView, err := + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(2000))) +} + +func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Submit a transaction updating the MaximumVestedIntersectionsPerLockupTransaction + + // First we test the following vested lockup consolidation type: + // existing lockup: -------------------------------- + // proposed lockup: ------------------- + // overhang: ^ ^ + // + // In theory the below operation should generate just two locked balance entries. + + // Perform a simple vested lockup in the future over 1000ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that causes a left overhang. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1500, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Verify that the left overhang was computed correctly. + utxoView, err := + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1499, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1500, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) + + // Now we test the opposite vested lockup consolidation type: + // existing lockup: ------------------- + // proposed lockup: -------------------------------- + // overhang: ^ ^ + // + // In theory the below operation should generate just two locked balance entries. + + // Perform a simple vested lockup in the future over 1000ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 1500, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that causes a left overhang. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Check m0 LockedBalanceEntry + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m1PKID, + ProfilePKID: *m1PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1499, + }) + require.NoError(t, err) + require.True(t, m1LockedBalanceEntry != nil) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m1PKID, + ProfilePKID: *m1PKID, + UnlockTimestampNanoSecs: 1500, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m1LockedBalanceEntry != nil) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) +} + +func TestRightOverhangVestedLockupConsolidation(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Submit a transaction updating the MaximumVestedIntersectionsPerLockupTransaction + + // First we test the following vested lockup consolidation type: + // existing lockup: -------------------------------- + // proposed lockup: ------------------- + // overhang: ^ ^ + // + // In theory the below operation should generate just two locked balance entries. + + // Perform a simple vested lockup in the future over 1000ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that causes a left overhang. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 1499, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Verify that the left overhang was computed correctly. + utxoView, err := + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1499, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1500, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + + // Now we test the opposite vested lockup consolidation type: + // existing lockup: ------------------- + // proposed lockup: -------------------------------- + // overhang: ^ ^ + // + // In theory the below operation should generate just two locked balance entries. + + // Perform a simple vested lockup in the future over 1000ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 1000, 1499, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that causes a left overhang. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Check m0 LockedBalanceEntry + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m1PKID, + ProfilePKID: *m1PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1499, + }) + require.NoError(t, err) + require.True(t, m1LockedBalanceEntry != nil) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m1PKID, + ProfilePKID: *m1PKID, + UnlockTimestampNanoSecs: 1500, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m1LockedBalanceEntry != nil) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) +} + //---------------------------------------------------------- // (Testing) Lockup Setup Helper Functions //---------------------------------------------------------- From 7d0b1830366aabd0ad21441e9dc97437e66ce5ea Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 9 Jan 2024 17:27:37 -0500 Subject: [PATCH 344/762] Wire up PoS Mempool Into Server (#889) * Wire up PoS Mempool Into Server * Add connect and disconnect functions * Revisions * Address Nina's comments * Wire up PoS mempool alongside legacy mempool in server.go * Cleanup * Finish wiring up mempool * Cleanup * Start and stop the PoS mempool * Address Nina's first pass --- lib/peer.go | 17 +-- lib/pos_mempool.go | 88 +++++++++++++ lib/pos_transaction_register.go | 7 ++ lib/server.go | 210 +++++++++++++++++++++++++------- 4 files changed, 268 insertions(+), 54 deletions(-) diff --git a/lib/peer.go b/lib/peer.go index 654a62ada..cba2a6620 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -2,13 +2,14 @@ package lib import ( "fmt" - "github.com/decred/dcrd/lru" "math" "net" "sort" "sync/atomic" "time" + "github.com/decred/dcrd/lru" + "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" @@ -183,14 +184,16 @@ func (pp *Peer) HandleGetTransactionsMsg(getTxnMsg *MsgDeSoGetTransactions) { "MsgDeSoGetTransactions message with %v txns from peer %v", len(getTxnMsg.HashList), pp) - mempoolTxs := []*MempoolTx{} - txnMap := pp.srv.mempool.readOnlyUniversalTransactionMap + mempoolTxs := []*MempoolTransaction{} + + // We fetch the requested txns from either the PoW mempool or the PoS mempool + // whichever one is used for the consensus protocol at the current block height. for _, txHash := range getTxnMsg.HashList { - mempoolTx, exists := txnMap[*txHash] + mempoolTx := pp.srv.GetMempool().GetTransaction(txHash) // If the transaction isn't in the pool, just continue without adding // it. It is generally OK to respond with only a subset of the transactions // that were requested. - if !exists { + if mempoolTx == nil { continue } @@ -201,13 +204,13 @@ func (pp *Peer) HandleGetTransactionsMsg(getTxnMsg *MsgDeSoGetTransactions) { // Doing this helps the Peer when they go to add the transactions by reducing // unconnectedTxns and transactions being rejected due to missing dependencies. sort.Slice(mempoolTxs, func(ii, jj int) bool { - return mempoolTxs[ii].Added.Before(mempoolTxs[jj].Added) + return mempoolTxs[ii].TimestampUnixMicro < mempoolTxs[jj].TimestampUnixMicro }) // Create a list of the fetched transactions to a response. txnList := []*MsgDeSoTxn{} for _, mempoolTx := range mempoolTxs { - txnList = append(txnList, mempoolTx.Tx) + txnList = append(txnList, mempoolTx.MsgDeSoTxn) } // At this point the txnList should have all of the transactions that diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 794d82a55..af474e687 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -277,6 +277,94 @@ func (mp *PosMempool) IsRunning() bool { return mp.status == PosMempoolStatusRunning } +// OnBlockConnected is an event handler provided by the PoS mempool to handle the blockchain +// event where a block is connected to the tip of the blockchain. The mempool updates its +// internal state based on the new block that has been connected. +// +// Whenever a block is connected, this event handler removes the block's transactions from +// the mempool and updates the internal fee estimation to include new block. +func (mp *PosMempool) OnBlockConnected(block *MsgDeSoBlock) { + mp.Lock() + defer mp.Unlock() + + if block.Header == nil || !mp.IsRunning() { + return + } + + // Remove all transactions in the block from the mempool. + for _, txn := range block.Txns { + txnHash := txn.Hash() + + // This should never happen. We perform a nil check on the txn hash to avoid a panic. + if txnHash == nil { + continue + } + + // Get the transaction from the register. If the txn doesn't exist in the register, + // then there's nothing left to do. + existingTxn := mp.txnRegister.GetTransaction(txnHash) + if existingTxn == nil { + continue + } + + mp.removeTransactionNoLock(existingTxn, true) + } + + // Add the block to the fee estimator. This is a best effort operation. If we fail to add the block + // to the fee estimator, we log an error and continue. + if err := mp.feeEstimator.AddBlock(block); err != nil { + glog.Errorf("PosMempool.OnBlockConnected: Problem adding block to fee estimator: %v", err) + } +} + +// OnBlockDisconnected is an event handler provided by the PoS mempool to handle the blockchain +// event where a block is disconnected from the tip of the blockchain. The mempool updates its +// internal state based on the block that has been disconnected. +// +// Whenever a block is disconnected, this event handler adds the block's transactions back to +// the mempool and updates the internal fee estimation to exclude the disconnected block. +func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { + mp.Lock() + defer mp.Unlock() + + if block.Header == nil || !mp.IsRunning() { + return + } + + // Remove all transactions in the block from the mempool. + for _, txn := range block.Txns { + txnHash := txn.Hash() + + // This should never happen. We perform a nil check on the txn hash to avoid a panic. + if txnHash == nil { + continue + } + + // Add all transactions in the block to the mempool. + + // Construct the MempoolTx from the MsgDeSoTxn. + mempoolTx, err := NewMempoolTx(txn, block.Header.TstampNanoSecs/1000, mp.latestBlockHeight) + if err != nil { + continue + } + + // Add the transaction to the mempool and then prune if needed. + if err := mp.addTransactionNoLock(mempoolTx, true); err != nil { + glog.Errorf("PosMempool.AddTransaction: Problem adding transaction to mempool: %v", err) + } + } + + // This is a best effort operation. If we fail to prune the mempool, we log an error and continue. + if err := mp.pruneNoLock(); err != nil { + glog.Errorf("PosMempool.AddTransaction: Problem pruning mempool: %v", err) + } + + // Remove the block from the fee estimator. + if err := mp.feeEstimator.RemoveBlock(block); err != nil { + glog.Errorf("PosMempool.OnBlockDisconnected: Problem removing block from fee estimator: %v", err) + } +} + // AddTransaction validates a MsgDeSoTxn transaction and adds it to the mempool if it is valid. // If the mempool overflows as a result of adding the transaction, the mempool is pruned. The // transaction signature verification can be skipped if verifySignature is passed as true. diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index cfc4b4087..7873705cf 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -214,6 +214,13 @@ func (tr *TransactionRegister) Size() uint64 { return tr.totalTxnsSizeBytes } +func (tr *TransactionRegister) Count() uint64 { + tr.RLock() + defer tr.RUnlock() + + return uint64(len(tr.txnMembership)) +} + func (tr *TransactionRegister) Includes(txn *MempoolTx) bool { tr.RLock() defer tr.RUnlock() diff --git a/lib/server.go b/lib/server.go index 867d2d3b9..7e8f04c9b 100644 --- a/lib/server.go +++ b/lib/server.go @@ -63,7 +63,6 @@ type Server struct { TxIndex *TXIndex fastHotStuffConsensus *FastHotStuffConsensus - // posMempool *PosMemPool TODO: Add the mempool later // All messages received from peers get sent from the ConnectionManager to the // Server through this channel. @@ -220,6 +219,9 @@ func (srv *Server) GetBlockchain() *Blockchain { // TODO: The hallmark of a messy non-law-of-demeter-following interface... func (srv *Server) GetMempool() Mempool { + srv.blockchain.ChainLock.RLock() + defer srv.blockchain.ChainLock.RUnlock() + tip := srv.blockchain.BlockTip() if tip.Height >= srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { return srv.posMempool @@ -242,7 +244,7 @@ func (srv *Server) GetMiner() *DeSoMiner { return srv.miner } -func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MempoolTx, error) { +func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MsgDeSoTxn, error) { // Use the backendServer to add the transaction to the mempool and // relay it to peers. When a transaction is created by the user there // is no need to consider a rateLimit and also no need to verifySignatures @@ -252,33 +254,24 @@ func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MempoolTx, error) { return nil, errors.Wrapf(err, "BroadcastTransaction: ") } - // At this point, we know the transaction has been run through the mempool. - // Now wait for an update of the ReadOnlyUtxoView so we don't break anything. - srv.mempool.BlockUntilReadOnlyViewRegenerated() - return mempoolTxs, nil } func (srv *Server) VerifyAndBroadcastTransaction(txn *MsgDeSoTxn) error { // Grab the block tip and use it as the height for validation. blockHeight := srv.blockchain.BlockTip().Height - if blockHeight >= srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { - mtxn := NewMempoolTransaction(txn, uint64(time.Now().UnixMicro())) - // AddTransaction also performs validations. - if err := srv.posMempool.AddTransaction(mtxn, true /*verifySignatures*/); err != nil { - return errors.Wrapf(err, "VerifyAndBroadcastTransaction: problem adding txn to pos mempool") - } - return nil - } - err := srv.blockchain.ValidateTransaction( - txn, - // blockHeight is set to the next block since that's where this - // transaction will be mined at the earliest. - blockHeight+1, - true, - srv.mempool) - if err != nil { - return fmt.Errorf("VerifyAndBroadcastTransaction: Problem validating txn: %v", err) + // Only add the txn to the PoW mempool if we are below the PoS cutover height. + if blockHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + err := srv.blockchain.ValidateTransaction( + txn, + // blockHeight is set to the next block since that's where this + // transaction will be mined at the earliest. + blockHeight+1, + true, + srv.mempool) + if err != nil { + return fmt.Errorf("VerifyAndBroadcastTransaction: Problem validating txn: %v", err) + } } if _, err := srv.BroadcastTransaction(txn); err != nil { @@ -490,6 +483,36 @@ func NewServer( _mempool := NewDeSoMempool(_chain, _rateLimitFeerateNanosPerKB, _minFeeRateNanosPerKB, _blockCypherAPIKey, _runReadOnlyUtxoViewUpdater, _dataDir, _mempoolDumpDir, false) + _posMempool := NewPosMempool() + + // Initialize the PoS mempool. We need to initialize a best-effort UtxoView based on the current + // known state of the chain. This will all be overwritten as we process blocks later on. + currentUtxoView, err := _chain.GetUncommittedTipView() + if err != nil { + return nil, errors.Wrapf(err, "NewServer: Problem initializing latest UtxoView"), true + } + currentGlobalParamsEntry := currentUtxoView.GetCurrentGlobalParamsEntry() + latestBlockHash := _chain.blockTip().Hash + latestBlock := _chain.GetBlock(latestBlockHash) + if latestBlock == nil { + return nil, errors.New("NewServer: Problem getting latest block from chain"), true + } + err = _posMempool.Init( + _params, + currentGlobalParamsEntry, + currentUtxoView, + uint64(_chain.blockTip().Height), + _mempoolDumpDir, + false, + 1024*1024*1024*3, // Max mempool Size = 3GB; TODO make this a param + 60*1000, // Mempool dumper frequency = 60 seconds; TODO make this a param + 1, // Fee estimator mempool blocks; TODO make this a param + []*MsgDeSoBlock{latestBlock}, + 1, // Fee estimator past blocks; TODO make this a param + ) + if err != nil { + return nil, errors.Wrapf(err, "NewServer: Problem initializing PoS mempool"), true + } // Useful for debugging. Every second, it outputs the contents of the mempool // and the contents of the addrmanager. @@ -549,6 +572,7 @@ func NewServer( srv.cmgr = _cmgr srv.blockchain = _chain srv.mempool = _mempool + srv.posMempool = _posMempool srv.miner = _miner srv.blockProducer = _blockProducer srv.incomingMessages = _incomingMessages @@ -1684,14 +1708,25 @@ func (srv *Server) _handleDonePeer(pp *Peer) { } func (srv *Server) _relayTransactions() { - glog.V(1).Infof("Server._relayTransactions: Waiting for mempool readOnlyView to regenerate") - srv.mempool.BlockUntilReadOnlyViewRegenerated() - glog.V(1).Infof("Server._relayTransactions: Mempool view has regenerated") - // For each peer, compute the transactions they're missing from the mempool and // send them an inv. allPeers := srv.cmgr.GetAllPeers() - txnList := srv.mempool.readOnlyUniversalTransactionList + + srv.blockchain.ChainLock.RLock() + blockHeight := srv.blockchain.BlockTip().Height + srv.blockchain.ChainLock.RUnlock() + + // If we're on the PoW protocol, we need to wait for the mempool readOnlyView to regenerate. + if blockHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + glog.V(1).Infof("Server._relayTransactions: Waiting for mempool readOnlyView to regenerate") + srv.mempool.BlockUntilReadOnlyViewRegenerated() + glog.V(1).Infof("Server._relayTransactions: Mempool view has regenerated") + } + + // We pull the transactions from either the PoW mempool or the PoS mempool depending + // on the current block height. + txnList := srv.GetMempool().GetTransactions() + for _, pp := range allPeers { if !pp.canReceiveInvMessagess { glog.V(1).Infof("Skipping invs for peer %v because not ready "+ @@ -1704,7 +1739,7 @@ func (srv *Server) _relayTransactions() { for _, newTxn := range txnList { invVect := &InvVect{ Type: InvTypeTx, - Hash: *newTxn.Hash, + Hash: *newTxn.Hash(), } // If the peer has this txn already then skip it. @@ -1723,7 +1758,7 @@ func (srv *Server) _relayTransactions() { } func (srv *Server) _addNewTxn( - pp *Peer, txn *MsgDeSoTxn, rateLimit bool, verifySignatures bool) ([]*MempoolTx, error) { + pp *Peer, txn *MsgDeSoTxn, rateLimit bool, verifySignatures bool) ([]*MsgDeSoTxn, error) { if srv.ReadOnlyMode { err := fmt.Errorf("Server._addNewTxnAndRelay: Not processing txn from peer %v "+ @@ -1749,16 +1784,36 @@ func (srv *Server) _addNewTxn( } srv.blockchain.ChainLock.RLock() - newlyAcceptedTxns, err := srv.mempool.ProcessTransaction( - txn, true /*allowUnconnectedTxn*/, rateLimit, peerID, verifySignatures) - srv.blockchain.ChainLock.RUnlock() - if err != nil { - return nil, errors.Wrapf(err, "Server._handleTransaction: Problem adding transaction to mempool: ") + defer srv.blockchain.ChainLock.RUnlock() + + // Only attempt to add the transaction to the PoW mempool if we're on the + // PoW protocol. If we're on the PoW protocol, then we use the PoW mempool's, + // txn validity checks to signal whether the txn has been added or not. The PoW + // mempool has stricter txn validity checks than the PoW mempool, so this works + // out conveniently, as it allows us to always add a txn to the PoS mempool. + blockHeight := srv.blockchain.BlockTip().Height + if blockHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + _, err := srv.mempool.ProcessTransaction( + txn, true /*allowUnconnectedTxn*/, rateLimit, peerID, verifySignatures) + if err != nil { + return nil, errors.Wrapf(err, "Server._addNewTxn: Problem adding transaction to mempool: ") + } + + // At this point, we know the transaction has been run through the mempool. + // Now wait for an update of the ReadOnlyUtxoView so we don't break anything. + srv.mempool.BlockUntilReadOnlyViewRegenerated() + + glog.V(1).Infof("Server._addNewTxn: newly accepted txn: %v, Peer: %v", txn, pp) } - glog.V(1).Infof("Server._addNewTxnAndRelay: newlyAcceptedTxns: %v, Peer: %v", newlyAcceptedTxns, pp) + // Always add the txn to the PoS mempool. This should always succeed if the txn + // addition into the PoW mempool succeeded above. + mempoolTxn := NewMempoolTransaction(txn, uint64(time.Now().UnixMicro())) + if err := srv.posMempool.AddTransaction(mempoolTxn, true /*verifySignatures*/); err != nil { + return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") + } - return newlyAcceptedTxns, nil + return []*MsgDeSoTxn{txn}, nil } // It's assumed that the caller will hold the ChainLock for reading so @@ -1782,6 +1837,11 @@ func (srv *Server) _handleBlockMainChainConnectedd(event *BlockEvent) { // we connected the blocks and this wouldn't be guaranteed if we kicked // off a goroutine for each update. srv.mempool.UpdateAfterConnectBlock(blk) + srv.posMempool.OnBlockConnected(blk) + + if err := srv._updatePosMempoolAfterTipChange(); err != nil { + glog.Errorf("Server._handleBlockMainChainDisconnected: Problem updating pos mempool after tip change: %v", err) + } blockHash, _ := blk.Header.Hash() glog.V(1).Infof("_handleBlockMainChainConnected: Block %s height %d connected to "+ @@ -1805,12 +1865,32 @@ func (srv *Server) _handleBlockMainChainDisconnectedd(event *BlockEvent) { // we connected the blocks and this wouldn't be guaranteed if we kicked // off a goroutine for each update. srv.mempool.UpdateAfterDisconnectBlock(blk) + srv.posMempool.OnBlockDisconnected(blk) + + if err := srv._updatePosMempoolAfterTipChange(); err != nil { + glog.Errorf("Server._handleBlockMainChainDisconnected: Problem updating pos mempool after tip change: %v", err) + } blockHash, _ := blk.Header.Hash() glog.V(1).Infof("_handleBlockMainChainDisconnect: Block %s height %d disconnected from "+ "main chain and chain is current.", hex.EncodeToString(blockHash[:]), blk.Header.Height) } +// _updatePosMempoolAfterTipChange updates the PoS mempool's latest UtxoView, block height, and +// global params. +func (srv *Server) _updatePosMempoolAfterTipChange() error { + // Update the PoS mempool's global params + currentBlockHeight := srv.blockchain.BlockTip().Height + currentUtxoView, err := srv.blockchain.GetUncommittedTipView() + if err != nil { + return err + } + currentGlobalParams := currentUtxoView.GetCurrentGlobalParamsEntry() + srv.posMempool.UpdateLatestBlock(currentUtxoView, uint64(currentBlockHeight)) + srv.posMempool.UpdateGlobalParams(currentGlobalParams) + return nil +} + func (srv *Server) _maybeRequestSync(pp *Peer) { // Send the mempool message if DeSo and Bitcoin are fully current if srv.blockchain.chainState() == SyncStateFullyCurrent { @@ -2058,25 +2138,52 @@ func (srv *Server) _handleGetTransactions(pp *Peer, msg *MsgDeSoGetTransactions) pp.AddDeSoMessage(msg, true /*inbound*/) } -func (srv *Server) ProcessSingleTxnWithChainLock( - pp *Peer, txn *MsgDeSoTxn) ([]*MempoolTx, error) { +func (srv *Server) ProcessSingleTxnWithChainLock(pp *Peer, txn *MsgDeSoTxn) ([]*MsgDeSoTxn, error) { // Lock the chain for reading so that transactions don't shift under our feet // when processing this bundle. Not doing this could cause us to miss transactions // erroneously. // // TODO(performance): We should probably do this less frequently. srv.blockchain.ChainLock.RLock() - defer func() { - srv.blockchain.ChainLock.RUnlock() - }() + defer srv.blockchain.ChainLock.RUnlock() + // Note we set rateLimit=false because we have a global minimum txn fee that should // prevent spam on its own. - return srv.mempool.ProcessTransaction( - txn, true /*allowUnconnectedTxn*/, false, /*rateLimit*/ - pp.ID, true /*verifySignatures*/) + + // Only attempt to add the transaction to the PoW mempool if we're on the + // PoW protocol. If we're on the PoW protocol, then we use the PoW mempool's + // txn validity checks to signal whether the txn has been added or not. The PoW + // mempool has stricter txn validity checks than the PoS mempool, so this works + // out conveniently, as it allows us to always add a txn to the PoS mempool. + blockHeight := srv.blockchain.blockTip().Height + if blockHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + _, err := srv.mempool.ProcessTransaction( + txn, + true, /*allowUnconnectedTxn*/ + false, /*rateLimit*/ + pp.ID, + true, /*verifySignatures*/ + ) + + // If we're on the PoW chain, and the txn doesn't pass the PoW mempool's validity checks, then + // it's an invalid txn. + if err != nil { + return nil, errors.Wrapf(err, "Server.ProcessSingleTxnWithChainLock: Problem adding transaction to PoW mempool: ") + } + } + + // Regardless of the consensus protocol we're running (PoW or PoS), we use the PoS mempool's to house all + // mempool txns. If a txn can't make it into the PoS mempool, which uses a looser unspent balance check for + // the the transactor, then it must be invalid. + if err := srv.posMempool.AddTransaction(NewMempoolTransaction(txn, uint64(time.Now().UnixMicro())), true); err != nil { + return nil, errors.Wrapf(err, "Server.ProcessSingleTxnWithChainLock: Problem adding transaction to PoS mempool: ") + } + + // Happy path, the txn was successfully added to the PoS (and optionally PoW) mempool. + return []*MsgDeSoTxn{txn}, nil } -func (srv *Server) _processTransactions(pp *Peer, transactions []*MsgDeSoTxn) []*MempoolTx { +func (srv *Server) _processTransactions(pp *Peer, transactions []*MsgDeSoTxn) []*MsgDeSoTxn { // Try and add all the transactions to our mempool in the order we received // them. If any fail to get added, just log an error. // @@ -2087,7 +2194,7 @@ func (srv *Server) _processTransactions(pp *Peer, transactions []*MsgDeSoTxn) [] // will eventually add it as opposed to just forgetting about it. glog.V(2).Infof("Server._processTransactions: Processing %d transactions from "+ "peer %v", len(transactions), pp) - transactionsToRelay := []*MempoolTx{} + transactionsToRelay := []*MsgDeSoTxn{} for _, txn := range transactions { // Process the transaction with rate-limiting while allowing unconnectedTxns and // verifying signatures. @@ -2153,6 +2260,10 @@ func (srv *Server) StartStatsdReporter() { mempoolTotal := len(srv.mempool.readOnlyUniversalTransactionList) srv.statsdClient.Gauge("MEMPOOL.COUNT", float64(mempoolTotal), tags, 1) + // Report PoS Mempool size + posMempoolTotal := srv.posMempool.txnRegister.Count() + srv.statsdClient.Gauge("POS_MEMPOOL.COUNT", float64(posMempoolTotal), tags, 1) + // Report block + headers height blocksHeight := srv.blockchain.BlockTip().Height srv.statsdClient.Gauge("BLOCKS.HEIGHT", float64(blocksHeight), tags, 1) @@ -2552,6 +2663,9 @@ func (srv *Server) Stop() { glog.Infof(CLog(Yellow, "Server.Stop: Closed Mempool")) } + glog.Infof(CLog(Yellow, "Server.Stop: Closed PosMempool")) + srv.posMempool.Stop() + // Stop the block producer if srv.blockProducer != nil { if srv.blockchain.MaxSyncBlockHeight == 0 { @@ -2596,6 +2710,8 @@ func (srv *Server) Start() { go srv._startTransactionRelayer() + srv.posMempool.Start() + // Once the ConnectionManager is started, peers will be found and connected to and // messages will begin to flow in to be processed. if !srv.DisableNetworking { From da61986566bde8a7d95b41a9e88d261fd5637afe Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 9 Jan 2024 18:47:10 -0500 Subject: [PATCH 345/762] Initialize FastHotStuffConsensus when node is a validator (#906) * Initialize FastHotStuffConsensus when node is a validator * Wrap up wiring * Address Nina's comments * Address final nits for Nina --- cmd/config.go | 11 +++++++++-- cmd/node.go | 12 +++++++++++- cmd/run.go | 4 ++++ lib/pos_consensus.go | 10 +++++++++- lib/server.go | 34 ++++++++++++++++++++++++++++++++-- 5 files changed, 65 insertions(+), 6 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 18da983eb..d1807dd62 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -1,11 +1,12 @@ package cmd import ( + "os" + "path/filepath" + "github.com/deso-protocol/core/lib" "github.com/golang/glog" "github.com/spf13/viper" - "os" - "path/filepath" ) type Config struct { @@ -17,6 +18,7 @@ type Config struct { TXIndex bool Regtest bool PostgresURI string + PosValidatorSeedHex string // Peers ConnectIPs []string @@ -103,6 +105,7 @@ func LoadConfig() *Config { config.TXIndex = viper.GetBool("txindex") config.Regtest = viper.GetBool("regtest") config.PostgresURI = viper.GetString("postgres-uri") + config.PosValidatorSeedHex = viper.GetString("pos-validator-seed-hex") config.HyperSync = viper.GetBool("hypersync") config.ForceChecksum = viper.GetBool("force-checksum") config.SyncType = lib.NodeSyncType(viper.GetString("sync-type")) @@ -176,6 +179,10 @@ func (config *Config) Print() { glog.Infof("Postgres URI: %s", config.PostgresURI) } + if config.PosValidatorSeedHex != "" { + glog.Infof(lib.CLog(lib.Blue, "PoS Validator: ON")) + } + if config.HyperSync { glog.Infof("HyperSync: ON") } diff --git a/cmd/node.go b/cmd/node.go index 5f59d2c2c..219cbe843 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -202,6 +202,14 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { // Setup eventManager eventManager := lib.NewEventManager() + var blsKeystore *lib.BLSKeystore + if node.Config.PosValidatorSeedHex != "" { + blsKeystore, err = lib.NewBLSKeystore(node.Config.PosValidatorSeedHex) + if err != nil { + panic(err) + } + } + // Setup the server. ShouldRestart is used whenever we detect an issue and should restart the node after a recovery // process, just in case. These issues usually arise when the node was shutdown unexpectedly mid-operation. The node // performs regular health checks to detect whenever this occurs. @@ -243,7 +251,9 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.nodeMessageChan, node.Config.ForceChecksum, node.Config.StateChangeDir, - node.Config.HypersyncMaxQueueSize) + node.Config.HypersyncMaxQueueSize, + blsKeystore, + ) if err != nil { // shouldRestart can be true if, on the previous run, we did not finish flushing all ancestral // records to the DB. In this case, the snapshot is corrupted and needs to be computed. See the diff --git a/cmd/run.go b/cmd/run.go index a470a5f6e..6729c375b 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -58,6 +58,10 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().String("postgres-uri", "", "BETA: Use Postgres as the backing store for chain data."+ "When enabled, most data is stored in postgres although badger is still currently used for some state. Run your "+ "Postgres instance on the same machine as your node for optimal performance.") + cmd.PersistentFlags().String("pos-validator-seed-hex", "", "The private key of the Proof of Stake validator. "+ + "The private key should be passed as hex, optionally prefixed with a '0x', and map to a valid BLS12_381 private key. "+ + "The private key must be 32 bytes, or 64 characters, in length (excluding the '0x' prefix). Setting this flag automatically "+ + "makes the node run as a Proof of Stake Validator.") cmd.PersistentFlags().Uint32("max-sync-block-height", 0, "Max sync block height") // Hyper Sync diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 578161d1e..482078899 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -31,7 +31,7 @@ func NewFastHotStuffConsensus(params *DeSoParams, blockchain *Blockchain, mempoo // blockchain state. This should only be called once the blockchain has synced, the node is // ready to join the validator network, and the node is able to validate blocks in the steady state. func (cc *FastHotStuffConsensus) Start() error { - // Hold the write consensus's lock for thread-safety. + // Hold the consensus' write lock for thread-safety. cc.lock.Lock() defer cc.lock.Unlock() @@ -78,6 +78,14 @@ func (cc *FastHotStuffConsensus) IsRunning() bool { return cc.fastHotStuffEventLoop.IsRunning() } +func (fc *FastHotStuffConsensus) Stop() { + // Hold the consensus' write lock for thread-safety. + fc.lock.Lock() + defer fc.lock.Unlock() + + fc.fastHotStuffEventLoop.Stop() +} + // HandleLocalBlockProposalEvent is called when FastHotStuffEventLoop has signaled that it can // construct a block at a certain block height. This function validates the block proposal signal, // constructs, processes locally, and then broadcasts the block. diff --git a/lib/server.go b/lib/server.go index 7e8f04c9b..c6ad463c0 100644 --- a/lib/server.go +++ b/lib/server.go @@ -383,7 +383,8 @@ func NewServer( _nodeMessageChan chan NodeMessage, _forceChecksum bool, _stateChangeDir string, - _hypersyncMaxQueueSize uint32) ( + _hypersyncMaxQueueSize uint32, + _blsKeystore *BLSKeystore) ( _srv *Server, _err error, _shouldRestart bool) { var err error @@ -568,6 +569,11 @@ func NewServer( _miner = nil } + // Only initialize the FastHotStuffConsensus if the node is a validator with a BLS keystore + if _blsKeystore != nil { + srv.fastHotStuffConsensus = NewFastHotStuffConsensus(_params, _chain, _posMempool, _blsKeystore.GetSigner()) + } + // Set all the fields on the Server object. srv.cmgr = _cmgr srv.blockchain = _chain @@ -2112,9 +2118,28 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { return } - // If we get here, it means we're in SyncStateFullySynced, which is great. + // If we get here, it means we're in SyncStateFullyCurrent, which is great. // In this case we shoot a MEMPOOL message over to the peer to bootstrap the mempool. srv._maybeRequestSync(pp) + + ///////////////////// PoS Validator Consensus Initialization ///////////////////// + + // Exit early if the chain isn't SyncStateFullyCurrent. + if srv.blockchain.chainState() != SyncStateFullyCurrent { + return + } + + // Exit early if the current tip height is below the PoS cutover height. + tipHeight := srv.blockchain.blockTip().Height + if tipHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + return + } + + // If the PoS validator FastHotStuffConsensus is initialized but not yet running, then + // we can start the validator consensus, and transition to it in the steady-state. + if srv.fastHotStuffConsensus != nil && !srv.fastHotStuffConsensus.IsRunning() { + srv.fastHotStuffConsensus.Start() + } } func (srv *Server) _handleInv(peer *Peer, msg *MsgDeSoInv) { @@ -2640,6 +2665,11 @@ func (srv *Server) Stop() { glog.Infof(CLog(Yellow, "Server.Stop: Closed the Miner")) } + // Stop the PoS validator consensus if one is running + if srv.fastHotStuffConsensus != nil { + srv.fastHotStuffConsensus.Stop() + } + // Stop the PoS block proposer if we have one running. if srv.fastHotStuffConsensus != nil { srv.fastHotStuffConsensus.fastHotStuffEventLoop.Stop() From 2eb2632b15c9598f597e9b2c65bd09f3201bc1e0 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Tue, 9 Jan 2024 16:54:13 -0800 Subject: [PATCH 346/762] PoS Remote Node Indexer and Manager (#879) * Add RemoteNodeIndexer * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation Simplify RemoteNodeManager More RemoteNodeManager updates Nits --- collections/concurrent_map.go | 69 +++++++ collections/concurrent_map_test.go | 61 ++++++ lib/remote_node.go | 52 +++-- lib/remote_node_indexer.go | 46 +++++ lib/remote_node_manager.go | 297 +++++++++++++++++++++++++++++ lib/server.go | 6 +- 6 files changed, 512 insertions(+), 19 deletions(-) create mode 100644 collections/concurrent_map.go create mode 100644 collections/concurrent_map_test.go create mode 100644 lib/remote_node_indexer.go create mode 100644 lib/remote_node_manager.go diff --git a/collections/concurrent_map.go b/collections/concurrent_map.go new file mode 100644 index 000000000..7e3b1b09f --- /dev/null +++ b/collections/concurrent_map.go @@ -0,0 +1,69 @@ +package collections + +import "sync" + +type ConcurrentMap[Key comparable, Value any] struct { + mtx sync.RWMutex + m map[Key]Value +} + +func NewConcurrentMap[Key comparable, Value any]() *ConcurrentMap[Key, Value] { + return &ConcurrentMap[Key, Value]{ + m: make(map[Key]Value), + } +} + +func (cm *ConcurrentMap[Key, Value]) Set(key Key, val Value) { + cm.mtx.Lock() + defer cm.mtx.Unlock() + + cm.m[key] = val +} + +func (cm *ConcurrentMap[Key, Value]) Remove(key Key) { + cm.mtx.Lock() + defer cm.mtx.Unlock() + + _, ok := cm.m[key] + if !ok { + return + } + delete(cm.m, key) +} + +func (cm *ConcurrentMap[Key, Value]) Get(key Key) (Value, bool) { + cm.mtx.RLock() + defer cm.mtx.RUnlock() + + val, ok := cm.m[key] + return val, ok +} + +func (cm *ConcurrentMap[Key, Value]) Copy() map[Key]Value { + cm.mtx.RLock() + defer cm.mtx.RUnlock() + + index := make(map[Key]Value) + for key, node := range cm.m { + index[key] = node + } + return index +} + +func (cm *ConcurrentMap[Key, Value]) GetAll() []Value { + cm.mtx.RLock() + defer cm.mtx.RUnlock() + + var vals []Value + for _, val := range cm.m { + vals = append(vals, val) + } + return vals +} + +func (cm *ConcurrentMap[Key, Value]) Count() int { + cm.mtx.RLock() + defer cm.mtx.RUnlock() + + return len(cm.m) +} diff --git a/collections/concurrent_map_test.go b/collections/concurrent_map_test.go new file mode 100644 index 000000000..46fc0c9ba --- /dev/null +++ b/collections/concurrent_map_test.go @@ -0,0 +1,61 @@ +package collections + +import ( + "fmt" + "testing" +) + +func TestConcurrentMap(t *testing.T) { + m := NewConcurrentMap[string, int]() + control := make(map[string]int) + + // test add + for ii := 0; ii < 100; ii++ { + key := fmt.Sprintf("%v", ii) + m.Set(key, ii) + control[key] = ii + } + + for key, val := range control { + if mVal, ok := m.Get(key); !ok || mVal != val { + t.Errorf("Expected %d, got %d", val, m.m[key]) + } + } + + // test remove + for ii := 0; ii < 50; ii++ { + key := fmt.Sprintf("%v", ii) + m.Remove(key) + delete(control, key) + } + + for key, val := range control { + if mVal, ok := m.Get(key); !ok || mVal != val { + t.Errorf("Expected %d, got %d", val, m.m[key]) + } + } + + // test copy + copy := m.Copy() + for key, val := range control { + if mVal, ok := copy[key]; !ok || mVal != val { + t.Errorf("Expected %d, got %d", val, m.m[key]) + } + } + if len(copy) != len(control) { + t.Errorf("Expected %d, got %d", len(control), len(copy)) + } + + // test get all + vals := m.GetAll() + for _, val := range vals { + if _, ok := control[fmt.Sprintf("%v", val)]; !ok { + t.Errorf("Expected %d, got %d", val, m.m[fmt.Sprintf("%v", val)]) + } + } + + // test size + if m.Count() != len(control) { + t.Errorf("Expected %d, got %d", len(control), m.Count()) + } +} diff --git a/lib/remote_node.go b/lib/remote_node.go index 6da6bf408..64ed0be8f 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -58,13 +58,19 @@ func (id RemoteNodeId) ToUint64() uint64 { // lifecycle. Once the RemoteNode is terminated, it will be disposed of, and a new RemoteNode must be created if we // wish to reconnect to the peer in the future. type RemoteNode struct { - mtx sync.Mutex + mtx sync.RWMutex peer *Peer // The id is the unique identifier of this RemoteNode. For outbound connections, the id will be the same as the // attemptId of the OutboundConnectionAttempt, and the subsequent id of the outbound peer. For inbound connections, // the id will be the same as the inbound peer's id. - id RemoteNodeId + id RemoteNodeId + // validatorPublicKey is the BLS public key of the validator node. This is only set for validator nodes. For + // non-validator nodes, this will be nil. For outbound validators nodes, the validatorPublicKey will be set when + // the RemoteNode is instantiated. And for inbound validator nodes, the validatorPublicKey will be set when the + // handshake is completed. + validatorPublicKey *bls.PublicKey + connectionStatus RemoteNodeStatus params *DeSoParams @@ -134,10 +140,11 @@ func NewHandshakeMetadata() *HandshakeMetadata { } } -func NewRemoteNode(id RemoteNodeId, srv *Server, cmgr *ConnectionManager, keystore *BLSKeystore, +func NewRemoteNode(id RemoteNodeId, validatorPublicKey *bls.PublicKey, srv *Server, cmgr *ConnectionManager, keystore *BLSKeystore, params *DeSoParams, minTxFeeRateNanosPerKB uint64, latestBlockHeight uint64, nodeServices ServiceFlag) *RemoteNode { return &RemoteNode{ id: id, + validatorPublicKey: validatorPublicKey, connectionStatus: RemoteNodeStatus_NotConnected, handshakeMetadata: NewHandshakeMetadata(), srv: srv, @@ -183,7 +190,7 @@ func (rn *RemoteNode) GetNegotiatedProtocolVersion() ProtocolVersionType { } func (rn *RemoteNode) GetValidatorPublicKey() *bls.PublicKey { - return rn.handshakeMetadata.validatorPublicKey + return rn.validatorPublicKey } func (rn *RemoteNode) GetUserAgent() string { @@ -231,13 +238,13 @@ func (rn *RemoteNode) IsValidator() bool { // DialOutboundConnection dials an outbound connection to the provided netAddr. func (rn *RemoteNode) DialOutboundConnection(netAddr *wire.NetAddress) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + if !rn.IsNotConnected() { return fmt.Errorf("RemoteNode.DialOutboundConnection: RemoteNode is not in the NotConnected state") } - rn.mtx.Lock() - defer rn.mtx.Unlock() - rn.cmgr.DialOutboundConnection(netAddr, rn.GetId().ToUint64()) rn.setStatusAttempted() return nil @@ -245,13 +252,13 @@ func (rn *RemoteNode) DialOutboundConnection(netAddr *wire.NetAddress) error { // DialPersistentOutboundConnection dials a persistent outbound connection to the provided netAddr. func (rn *RemoteNode) DialPersistentOutboundConnection(netAddr *wire.NetAddress) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + if !rn.IsNotConnected() { return fmt.Errorf("RemoteNode.DialPersistentOutboundConnection: RemoteNode is not in the NotConnected state") } - rn.mtx.Lock() - defer rn.mtx.Unlock() - rn.cmgr.DialPersistentOutboundConnection(netAddr, rn.GetId().ToUint64()) rn.setStatusAttempted() return nil @@ -259,13 +266,13 @@ func (rn *RemoteNode) DialPersistentOutboundConnection(netAddr *wire.NetAddress) // AttachInboundConnection creates an inbound peer once a successful inbound connection has been established. func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddress) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + if !rn.IsNotConnected() { return fmt.Errorf("RemoteNode.AttachInboundConnection: RemoteNode is not in the NotConnected state") } - rn.mtx.Lock() - defer rn.mtx.Unlock() - id := rn.GetId().ToUint64() rn.peer = rn.cmgr.ConnectPeer(id, conn, na, false, false) rn.setStatusConnected() @@ -274,13 +281,13 @@ func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddress // AttachOutboundConnection creates an outbound peer once a successful outbound connection has been established. func (rn *RemoteNode) AttachOutboundConnection(conn net.Conn, na *wire.NetAddress, isPersistent bool) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + if rn.connectionStatus != RemoteNodeStatus_Attempted { return fmt.Errorf("RemoteNode.AttachOutboundConnection: RemoteNode is not in the Attempted state") } - rn.mtx.Lock() - defer rn.mtx.Unlock() - id := rn.GetId().ToUint64() rn.peer = rn.cmgr.ConnectPeer(id, conn, na, true, isPersistent) rn.setStatusConnected() @@ -292,6 +299,10 @@ func (rn *RemoteNode) Disconnect() { rn.mtx.Lock() defer rn.mtx.Unlock() + if rn.connectionStatus == RemoteNodeStatus_Terminated { + return + } + id := rn.GetId().ToUint64() switch rn.connectionStatus { case RemoteNodeStatus_Attempted: @@ -303,6 +314,9 @@ func (rn *RemoteNode) Disconnect() { } func (rn *RemoteNode) SendMessage(desoMsg DeSoMessage) error { + rn.mtx.RLock() + rn.mtx.RUnlock() + if rn.connectionStatus != RemoteNodeStatus_HandshakeCompleted { return fmt.Errorf("SendMessage: Remote node is not connected") } @@ -614,8 +628,14 @@ func (rn *RemoteNode) validateVerackPoS(vrkMsg *MsgDeSoVerack) error { "verack signature verification failed", rn.id) } + if rn.validatorPublicKey != nil || rn.validatorPublicKey.Serialize() != vrkMsg.PublicKey.Serialize() { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack public key mismatch; message: %v; expected: %v", rn.id, vrkMsg.PublicKey, rn.validatorPublicKey) + } + // If we get here then the verack message is valid. Set the validator public key on the peer. vMeta.validatorPublicKey = vrkMsg.PublicKey + rn.validatorPublicKey = vrkMsg.PublicKey return nil } diff --git a/lib/remote_node_indexer.go b/lib/remote_node_indexer.go new file mode 100644 index 000000000..834dbda51 --- /dev/null +++ b/lib/remote_node_indexer.go @@ -0,0 +1,46 @@ +package lib + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" +) + +// RemoteNodeIndexer is a structure that holds information about all remote nodes and their indices. +type RemoteNodeIndexer struct { + // AllRemoteNodes is a map storing all remote nodes by their IDs. + AllRemoteNodes *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] + + // Indices for various types of remote nodes. + ValidatorIndex *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] + NonValidatorOutboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] + NonValidatorInboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] +} + +// NewRemoteNodeIndexer initializes and returns a new instance of RemoteNodeIndexer. +func NewRemoteNodeIndexer() *RemoteNodeIndexer { + rni := &RemoteNodeIndexer{ + AllRemoteNodes: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + ValidatorIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), + NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + } + + return rni +} + +// Getter methods for accessing the different indices. +func (rni *RemoteNodeIndexer) GetAllRemoteNodes() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return rni.AllRemoteNodes +} + +func (rni *RemoteNodeIndexer) GetValidatorIndex() *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] { + return rni.ValidatorIndex +} + +func (rni *RemoteNodeIndexer) GetNonValidatorOutboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return rni.NonValidatorOutboundIndex +} + +func (rni *RemoteNodeIndexer) GetNonValidatorInboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return rni.NonValidatorInboundIndex +} diff --git a/lib/remote_node_manager.go b/lib/remote_node_manager.go new file mode 100644 index 000000000..a41fe4606 --- /dev/null +++ b/lib/remote_node_manager.go @@ -0,0 +1,297 @@ +package lib + +import ( + "fmt" + "github.com/btcsuite/btcd/wire" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" + "github.com/pkg/errors" + "net" + "sync/atomic" +) + +// RemoteNodeManager manages all the RemoteNode that the node is connected to. It is responsible for starting, maintaining, +// and stopping remote node connections. It is also responsible for organizing the remote nodes into indices for easy +// access, through the RemoteNodeIndexer. +type RemoteNodeManager struct { + // remoteNodeIndexer is a structure that stores and indexes all created remote nodes. + remoteNodeIndexer *RemoteNodeIndexer + + params *DeSoParams + srv *Server + bc *Blockchain + cmgr *ConnectionManager + + // keystore is a reference to the node's BLS private key storage. + keystore *BLSKeystore + + // configs + minTxFeeRateNanosPerKB uint64 + nodeServices ServiceFlag + + // Used to set remote node ids. Must be incremented atomically. + remoteNodeIndex uint64 +} + +func NewRemoteNodeManager(srv *Server, bc *Blockchain, cmgr *ConnectionManager, keystore *BLSKeystore, params *DeSoParams, + minTxFeeRateNanosPerKB uint64, nodeServices ServiceFlag) *RemoteNodeManager { + return &RemoteNodeManager{ + remoteNodeIndexer: NewRemoteNodeIndexer(), + params: params, + srv: srv, + bc: bc, + cmgr: cmgr, + keystore: keystore, + minTxFeeRateNanosPerKB: minTxFeeRateNanosPerKB, + nodeServices: nodeServices, + } +} + +func (manager *RemoteNodeManager) newRemoteNode(validatorPublicKey *bls.PublicKey) *RemoteNode { + id := atomic.AddUint64(&manager.remoteNodeIndex, 1) + remoteNodeId := NewRemoteNodeId(id) + latestBlockHeight := uint64(manager.bc.BlockTip().Height) + return NewRemoteNode(remoteNodeId, validatorPublicKey, manager.srv, manager.cmgr, manager.keystore, manager.params, + manager.minTxFeeRateNanosPerKB, latestBlockHeight, manager.nodeServices) +} + +func (manager *RemoteNodeManager) ProcessCompletedHandshake(remoteNode *RemoteNode) { + if remoteNode == nil { + return + } + + if remoteNode.IsValidator() { + manager.SetValidator(remoteNode) + } else { + manager.SetNonValidator(remoteNode) + } + manager.srv.HandleAcceptedPeer(remoteNode.GetPeer()) +} + +func (manager *RemoteNodeManager) Disconnect(rn *RemoteNode) { + rn.Disconnect() + manager.removeRemoteNodeFromIndexer(rn) +} + +func (manager *RemoteNodeManager) DisconnectById(id RemoteNodeId) { + rn := manager.GetRemoteNodeById(id) + if rn == nil { + return + } + + manager.Disconnect(rn) +} + +func (manager *RemoteNodeManager) removeRemoteNodeFromIndexer(rn *RemoteNode) { + if rn == nil { + return + } + + indexer := manager.remoteNodeIndexer + indexer.GetAllRemoteNodes().Remove(rn.GetId()) + if rn.validatorPublicKey != nil { + indexer.GetValidatorIndex().Remove(rn.validatorPublicKey.Serialize()) + } + indexer.GetNonValidatorOutboundIndex().Remove(rn.GetId()) + indexer.GetNonValidatorInboundIndex().Remove(rn.GetId()) +} + +func (manager *RemoteNodeManager) SendMessage(rn *RemoteNode, desoMessage DeSoMessage) error { + if rn == nil { + return fmt.Errorf("RemoteNodeManager.SendMessage: RemoteNode is nil") + } + + return rn.SendMessage(desoMessage) +} + +// ########################### +// ## Create RemoteNode +// ########################### + +func (manager *RemoteNodeManager) CreateValidatorConnection(netAddr *wire.NetAddress, publicKey *bls.PublicKey) error { + if netAddr == nil || publicKey == nil { + return fmt.Errorf("RemoteNodeManager.CreateValidatorConnection: netAddr or public key is nil") + } + + remoteNode := manager.newRemoteNode(publicKey) + if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { + return errors.Wrapf(err, "RemoteNodeManager.CreateValidatorConnection: Problem calling DialPersistentOutboundConnection "+ + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + } + manager.setRemoteNode(remoteNode) + manager.GetValidatorIndex().Set(publicKey.Serialize(), remoteNode) + return nil +} + +func (manager *RemoteNodeManager) CreateNonValidatorPersistentOutboundConnection(netAddr *wire.NetAddress) error { + if netAddr == nil { + return fmt.Errorf("RemoteNodeManager.CreateNonValidatorPersistentOutboundConnection: netAddr is nil") + } + + remoteNode := manager.newRemoteNode(nil) + if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { + return errors.Wrapf(err, "RemoteNodeManager.CreateNonValidatorPersistentOutboundConnection: Problem calling DialPersistentOutboundConnection "+ + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + } + manager.setRemoteNode(remoteNode) + manager.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) + return nil +} + +func (manager *RemoteNodeManager) CreateNonValidatorOutboundConnection(netAddr *wire.NetAddress) error { + if netAddr == nil { + return fmt.Errorf("RemoteNodeManager.CreateNonValidatorOutboundConnection: netAddr is nil") + } + + remoteNode := manager.newRemoteNode(nil) + if err := remoteNode.DialOutboundConnection(netAddr); err != nil { + return errors.Wrapf(err, "RemoteNodeManager.CreateNonValidatorOutboundConnection: Problem calling DialOutboundConnection "+ + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + } + manager.setRemoteNode(remoteNode) + manager.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) + return nil +} + +func (manager *RemoteNodeManager) AttachInboundConnection(conn net.Conn, + na *wire.NetAddress) (*RemoteNode, error) { + + remoteNode := manager.newRemoteNode(nil) + if err := remoteNode.AttachInboundConnection(conn, na); err != nil { + return nil, errors.Wrapf(err, "RemoteNodeManager.AttachInboundConnection: Problem calling AttachInboundConnection "+ + "for addr: (%s)", conn.RemoteAddr().String()) + } + + manager.setRemoteNode(remoteNode) + return remoteNode, nil +} + +func (manager *RemoteNodeManager) AttachOutboundConnection(conn net.Conn, na *wire.NetAddress, + remoteNodeId uint64, isPersistent bool) (*RemoteNode, error) { + + id := NewRemoteNodeId(remoteNodeId) + remoteNode := manager.GetRemoteNodeById(id) + if remoteNode == nil { + return nil, fmt.Errorf("RemoteNodeManager.AttachOutboundConnection: Problem getting remote node by id (%d)", + id.ToUint64()) + } + + if err := remoteNode.AttachOutboundConnection(conn, na, isPersistent); err != nil { + manager.Disconnect(remoteNode) + return nil, errors.Wrapf(err, "RemoteNodeManager.AttachOutboundConnection: Problem calling AttachOutboundConnection "+ + "for addr: (%s)", conn.RemoteAddr().String()) + } + + return remoteNode, nil +} + +// ########################### +// ## Setters +// ########################### + +func (manager *RemoteNodeManager) setRemoteNode(rn *RemoteNode) { + if rn == nil { + return + } + + manager.GetAllRemoteNodes().Set(rn.GetId(), rn) +} + +func (manager *RemoteNodeManager) SetNonValidator(rn *RemoteNode) { + if rn == nil { + return + } + + if rn.IsOutbound() { + manager.GetNonValidatorOutboundIndex().Set(rn.GetId(), rn) + } else if rn.IsInbound() { + manager.GetNonValidatorInboundIndex().Set(rn.GetId(), rn) + } else { + manager.Disconnect(rn) + return + } + + manager.UnsetValidator(rn) +} + +func (manager *RemoteNodeManager) SetValidator(remoteNode *RemoteNode) { + if remoteNode == nil { + return + } + + pk := remoteNode.GetValidatorPublicKey() + if pk == nil { + manager.Disconnect(remoteNode) + return + } + manager.GetValidatorIndex().Set(pk.Serialize(), remoteNode) +} + +func (manager *RemoteNodeManager) UnsetValidator(remoteNode *RemoteNode) { + if remoteNode == nil { + return + } + + pk := remoteNode.GetValidatorPublicKey() + if pk == nil { + return + } + manager.GetValidatorIndex().Remove(pk.Serialize()) +} + +func (manager *RemoteNodeManager) UnsetNonValidator(rn *RemoteNode) { + if rn == nil { + return + } + + if rn.IsOutbound() { + manager.GetNonValidatorOutboundIndex().Remove(rn.GetId()) + } else if rn.IsInbound() { + manager.GetNonValidatorInboundIndex().Remove(rn.GetId()) + } else { + manager.Disconnect(rn) + } +} + +// ########################### +// ## Getters +// ########################### + +func (manager *RemoteNodeManager) GetAllRemoteNodes() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return manager.remoteNodeIndexer.GetAllRemoteNodes() +} + +func (manager *RemoteNodeManager) GetValidatorIndex() *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] { + return manager.remoteNodeIndexer.GetValidatorIndex() +} + +func (manager *RemoteNodeManager) GetNonValidatorOutboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return manager.remoteNodeIndexer.GetNonValidatorOutboundIndex() +} + +func (manager *RemoteNodeManager) GetNonValidatorInboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return manager.remoteNodeIndexer.GetNonValidatorInboundIndex() +} + +func (manager *RemoteNodeManager) GetRemoteNodeFromPeer(peer *Peer) *RemoteNode { + if peer == nil { + return nil + } + id := NewRemoteNodeId(peer.GetId()) + rn, _ := manager.GetAllRemoteNodes().Get(id) + return rn +} + +func (manager *RemoteNodeManager) GetRemoteNodeById(id RemoteNodeId) *RemoteNode { + rn, ok := manager.GetAllRemoteNodes().Get(id) + if !ok { + return nil + } + return rn +} + +func (manager *RemoteNodeManager) GetAllNonValidators() []*RemoteNode { + outboundRemoteNodes := manager.GetNonValidatorOutboundIndex().GetAll() + inboundRemoteNodes := manager.GetNonValidatorInboundIndex().GetAll() + return append(outboundRemoteNodes, inboundRemoteNodes...) +} diff --git a/lib/server.go b/lib/server.go index 59213657e..d1c82e5b3 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1576,11 +1576,11 @@ func (srv *Server) _startSync() { } -func (srv *Server) _handlePeerHandshakeComplete(pp *Peer) { +func (srv *Server) HandleAcceptedPeer(pp *Peer) { isSyncCandidate := pp.IsSyncCandidate() isSyncing := srv.blockchain.isSyncing() chainState := srv.blockchain.chainState() - glog.V(1).Infof("Server._handlePeerHandshakeComplete: Processing NewPeer: (%v); IsSyncCandidate(%v), "+ + glog.V(1).Infof("Server.HandleAcceptedPeer: Processing NewPeer: (%v); IsSyncCandidate(%v), "+ "syncPeerIsNil=(%v), IsSyncing=(%v), ChainState=(%v)", pp, isSyncCandidate, (srv.SyncPeer == nil), isSyncing, chainState) @@ -2230,7 +2230,7 @@ func (srv *Server) _handleControlMessages(serverMessage *ServerMessage) (_should switch serverMessage.Msg.(type) { // Control messages used internally to signal to the server. case *MsgDeSoPeerHandshakeComplete: - srv._handlePeerHandshakeComplete(serverMessage.Peer) + break case *MsgDeSoDisconnectedPeer: srv._handleDonePeer(serverMessage.Peer) case *MsgDeSoQuit: From cfc4ae9eeba95dfdfa809c48af0897844c3e1d22 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 9 Jan 2024 19:59:14 -0500 Subject: [PATCH 347/762] Refactor tryApplyNewTip Helper Functions (#907) * Refactor tryApplyNewTip Helper Functions * Address Nina's comments * Better comments * Fix typo * Dummy commit to trigger CI --- lib/pos_blockchain.go | 37 +++++++++++++++++++------------------ lib/pos_blockchain_test.go | 14 +++++++------- 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 9ce4796da..d6d9dd680 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1364,7 +1364,7 @@ func (bc *Blockchain) tryApplyNewTip(blockNode *BlockNode, currentView uint64, l // and can just add this block to the best chain. chainTip := bc.BlockTip() if chainTip.Hash.IsEqual(blockNode.Header.PrevBlockHash) { - bc.addBlockToBestChain(blockNode) + bc.addTipBlockToBestChain(blockNode) return true, nil } // Check if we should perform a reorg here. @@ -1375,26 +1375,15 @@ func (bc *Blockchain) tryApplyNewTip(blockNode *BlockNode, currentView uint64, l } // We need to perform a reorg here. For simplicity, we remove all uncommitted blocks and then re-add them. - committedTip, idx := bc.getCommittedTip() - if committedTip == nil || idx == -1 { - // This is an edge case we'll never hit in practice since all the PoW blocks - // are committed. - return false, errors.New("tryApplyNewTip: No committed blocks found") + for !bc.blockTip().IsCommitted() { + bc.removeTipBlockFromBestChain() } - // Remove all uncommitted blocks. These are all blocks that come after the committedTip - // in the best chain. - // Delete all blocks from bc.bestChainMap that come after the highest committed block. - for ii := idx + 1; ii < len(bc.bestChain); ii++ { - delete(bc.bestChainMap, *bc.bestChain[ii].Hash) - } - // Shorten best chain back to committed tip. - bc.bestChain = bc.bestChain[:idx+1] // Add the ancestors of the new tip to the best chain. for _, ancestor := range lineageFromCommittedTip { - bc.addBlockToBestChain(ancestor) + bc.addTipBlockToBestChain(ancestor) } // Add the new tip to the best chain. - bc.addBlockToBestChain(blockNode) + bc.addTipBlockToBestChain(blockNode) return true, nil } @@ -1412,12 +1401,24 @@ func (bc *Blockchain) shouldReorg(blockNode *BlockNode, currentView uint64) bool return blockNode.Header.ProposedInView >= currentView } -// addBlockToBestChain adds the block to the best chain. -func (bc *Blockchain) addBlockToBestChain(blockNode *BlockNode) { +// addTipBlockToBestChain adds the block as the new tip of the best chain. +func (bc *Blockchain) addTipBlockToBestChain(blockNode *BlockNode) { bc.bestChain = append(bc.bestChain, blockNode) bc.bestChainMap[*blockNode.Hash] = blockNode } +// removeTipBlockFromBestChain removes the current tip from the best chain. It +// naively removes the tip regardless of the tip's status (committed or not). +// This function is a general purpose helper function that bundles mutations to +// the bestChain slice and bestChainMap map. +func (bc *Blockchain) removeTipBlockFromBestChain() *BlockNode { + // Remove the last block from the best chain. + lastBlock := bc.bestChain[len(bc.bestChain)-1] + delete(bc.bestChainMap, *lastBlock.Hash) + bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] + return lastBlock +} + // runCommitRuleOnBestChain commits the grandparent of the block if possible. // Specifically, this updates the CommittedBlockStatus of its grandparent // and flushes the view after connecting the grandparent block to the DB. diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 36eca77d7..0edb29898 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1291,9 +1291,9 @@ func TestTryApplyNewTip(t *testing.T) { ProposedInView: 4, }, } - bc.addBlockToBestChain(bn1) - bc.addBlockToBestChain(bn2) - bc.addBlockToBestChain(bn3) + bc.addTipBlockToBestChain(bn1) + bc.addTipBlockToBestChain(bn2) + bc.addTipBlockToBestChain(bn3) bc.blockIndexByHash[*hash1] = bn1 bc.blockIndexByHash[*hash2] = bn2 bc.blockIndexByHash[*hash3] = bn3 @@ -1349,7 +1349,7 @@ func TestTryApplyNewTip(t *testing.T) { bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] delete(bc.bestChainMap, *newBlockHash) // Add block 3 back - bc.addBlockToBestChain(bn3) + bc.addTipBlockToBestChain(bn3) // Add a series of blocks that are not part of the best chain // to the block index and reorg to them @@ -1418,8 +1418,8 @@ func TestTryApplyNewTip(t *testing.T) { bc.bestChain = bc.bestChain[:len(bc.bestChain)-3] // Add block 2 and 3 back. - bc.addBlockToBestChain(bn2) - bc.addBlockToBestChain(bn3) + bc.addTipBlockToBestChain(bn2) + bc.addTipBlockToBestChain(bn3) // No reorg tests // currentView > newBlock.View @@ -2403,7 +2403,7 @@ func _generateBlockAndAddToBestChain(testMeta *TestMeta, blockHeight uint64, vie require.NoError(testMeta.t, err) newBlockNode, exists := testMeta.chain.blockIndexByHash[*newBlockHash] require.True(testMeta.t, exists) - testMeta.chain.addBlockToBestChain(newBlockNode) + testMeta.chain.addTipBlockToBestChain(newBlockNode) // Update the latest block view latestBlockView, err := testMeta.chain.GetUncommittedTipView() require.NoError(testMeta.t, err) From cab405d09a56ff5bbd45b6f9aaa175f718b802ad Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 10 Jan 2024 12:12:56 -0500 Subject: [PATCH 348/762] Create Helper Connection Utils to Diff Two Slices (#908) --- collections/map.go | 5 +++++ collections/slice.go | 25 +++++++++++++++++++++ collections/slice_test.go | 46 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+) diff --git a/collections/map.go b/collections/map.go index bf445215f..5821c6a63 100644 --- a/collections/map.go +++ b/collections/map.go @@ -7,3 +7,8 @@ func MapValues[TKey comparable, TValue any](m map[TKey]TValue) []TValue { } return result } + +func MapContains[TKey comparable, TValue any](m map[TKey]TValue, key TKey) bool { + _, ok := m[key] + return ok +} diff --git a/collections/slice.go b/collections/slice.go index c6fe0f69a..9f787d8d2 100644 --- a/collections/slice.go +++ b/collections/slice.go @@ -61,3 +61,28 @@ func Reverse[T any](input []T) []T { } return output } + +// RemoveDuplicates takes in two slices A and B and returns two slices A' and B' such that +// A' contains all elements of A that are not in B, and B' contains all elements of B that +// are not in A. The order of the elements in the output slices is maintained from the originals. +func RemoveDuplicates[T comparable](slice1 []T, slice2 []T) (_slice1Unique []T, _slice2Unique []T) { + slice1Contents := ToMap(slice1, func(val T) T { return val }) + slice2Contents := ToMap(slice2, func(val T) T { return val }) + + var slice1Unique []T + var slice2Unique []T + + for _, val := range slice1 { + if !MapContains(slice2Contents, val) { + slice1Unique = append(slice1Unique, val) + } + } + + for _, val := range slice2 { + if !MapContains(slice1Contents, val) { + slice2Unique = append(slice2Unique, val) + } + } + + return slice1Unique, slice2Unique +} diff --git a/collections/slice_test.go b/collections/slice_test.go index 7b521bb6e..31f191ab4 100644 --- a/collections/slice_test.go +++ b/collections/slice_test.go @@ -109,3 +109,49 @@ func TestSliceToMap(t *testing.T) { require.Equal(t, "2", result["b"].Value) } } + +func TestRemoveDuplicates(t *testing.T) { + // Test empty slices + { + slices1 := []int{} + slices2 := []int{} + + slice1Unique, slice2Unique := RemoveDuplicates(slices1, slices2) + + require.Equal(t, 0, len(slice1Unique)) + require.Equal(t, 0, len(slice2Unique)) + } + + // Test slices with no duplicates + { + slices1 := []int{1, 2, 3, 4, 5} + slices2 := []int{6, 7, 8, 9, 10} + + slice1Unique, slice2Unique := RemoveDuplicates(slices1, slices2) + + require.Equal(t, slices1, slice1Unique) + require.Equal(t, slices2, slice2Unique) + } + + // Test slices with only duplicates + { + slices1 := []int{1, 2, 3, 4, 5} + slices2 := []int{1, 2, 3, 4, 5} + + slice1Unique, slice2Unique := RemoveDuplicates(slices1, slices2) + + require.Equal(t, 0, len(slice1Unique)) + require.Equal(t, 0, len(slice2Unique)) + } + + // Test slices with both duplicate and unique values + { + slices1 := []int{1, 2, 3, 4, 5} + slices2 := []int{2, 3, 4, 5, 6, 7, 8, 9, 10} + + slice1Unique, slice2Unique := RemoveDuplicates(slices1, slices2) + + require.Equal(t, slice1Unique, []int{1}) + require.Equal(t, slice2Unique, []int{6, 7, 8, 9, 10}) + } +} From 84a22d2ac01f86a9a22d179516c00dffe863d187 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 10 Jan 2024 12:32:07 -0500 Subject: [PATCH 349/762] Plug in EventManager Block Connect/Disconnect Events into ProcessBlockPoS (#909) * Plug in EventManager Block Connect/Disconnect Events into ProcessBlockPoS * Rename things in tests --- lib/blockchain_test.go | 2 +- lib/pos_blockchain.go | 51 +++++++++++++++++++++++++++++++++----- lib/pos_blockchain_test.go | 22 +++++++++++++--- 3 files changed, 64 insertions(+), 11 deletions(-) diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index eda5838cf..bf15adfa7 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -254,7 +254,7 @@ func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, } } chain, err := NewBlockchain([]string{blockSignerPk}, 0, 0, - &testParams, timesource, db, postgresDb, nil, snap, false) + &testParams, timesource, db, postgresDb, NewEventManager(), snap, false) if err != nil { log.Fatal(err) } diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index d6d9dd680..021ab8468 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -199,6 +199,7 @@ func (bc *Blockchain) ProcessBlockPoS(block *MsgDeSoBlock, currentView uint64, v // 4. Process the block's header. This may reorg the header chain and apply the block as the new header chain tip. // 5. Try to apply the incoming block as the tip (performing reorgs as necessary). If it can't be applied, exit here. // 6. Run the commit rule - If applicable, flushes the incoming block's grandparent to the DB +// 7. Notify listeners via the EventManager of which blocks have been removed and added. func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, verifySignatures bool) ( _success bool, _isOrphan bool, @@ -275,7 +276,9 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // first perform any required reorgs and then determine if the incoming block // extends the chain tip. If it does, it will apply the block to the best chain // and appliedNewTip will be true and we can continue to running the commit rule. - appliedNewTip, err := bc.tryApplyNewTip(blockNode, currentView, lineageFromCommittedTip) + appliedNewTip, connectedBlockHashes, disconnectedBlockHashes, err := bc.tryApplyNewTip( + blockNode, currentView, lineageFromCommittedTip, + ) if err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem applying new tip: ") } @@ -288,6 +291,24 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } } + // 7. Notify listeners via the EventManager of which blocks have been removed and added. + for ii := len(disconnectedBlockHashes) - 1; ii >= 0; ii-- { + disconnectedBlock := bc.GetBlock(&disconnectedBlockHashes[ii]) + if disconnectedBlock == nil { + glog.Errorf("processBlockPoS: Problem getting disconnected block %v", disconnectedBlockHashes[ii]) + continue + } + bc.eventManager.blockDisconnected(&BlockEvent{Block: disconnectedBlock}) + } + for ii := 0; ii < len(connectedBlockHashes); ii++ { + connectedBlock := bc.GetBlock(&connectedBlockHashes[ii]) + if connectedBlock == nil { + glog.Errorf("processBlockPoS: Problem getting connected block %v", connectedBlockHashes[ii]) + continue + } + bc.eventManager.blockConnected(&BlockEvent{Block: connectedBlock}) + } + // Now that we've processed this block, we check for any blocks that were previously // stored as orphans, which are children of this block. We can process them now. blockNodesAtNextHeight := bc.blockIndexByHeight[uint64(blockNode.Height)+1] @@ -1358,33 +1379,51 @@ func (bc *Blockchain) upsertBlockNodeToDBWithTxn(txn *badger.Txn, blockNode *Blo // 2. Check if the incoming block extends the chain tip after reorg. If not, return false and nil // 3. If the incoming block extends the chain tip, we can apply it by calling addBlockToBestChain. Return true and nil. func (bc *Blockchain) tryApplyNewTip(blockNode *BlockNode, currentView uint64, lineageFromCommittedTip []*BlockNode) ( - _appliedNewTip bool, _err error) { + _appliedNewTip bool, + _connectedBlockHashes []BlockHash, + _disconnectedBlocksHashes []BlockHash, + _err error, +) { // Check if the incoming block extends the chain tip. If so, we don't need to reorg // and can just add this block to the best chain. chainTip := bc.BlockTip() if chainTip.Hash.IsEqual(blockNode.Header.PrevBlockHash) { bc.addTipBlockToBestChain(blockNode) - return true, nil + return true, []BlockHash{*blockNode.Hash}, nil, nil } // Check if we should perform a reorg here. // If we shouldn't reorg AND the incoming block doesn't extend the chain tip, we know that // the incoming block will not get applied as the new tip. if !bc.shouldReorg(blockNode, currentView) { - return false, nil + return false, nil, nil, nil } + // We need to track the hashes of the blocks that we connected and disconnected during the reorg. + connectedBlockHashes := []BlockHash{} + disconnectedBlockHashes := []BlockHash{} + // We need to perform a reorg here. For simplicity, we remove all uncommitted blocks and then re-add them. for !bc.blockTip().IsCommitted() { - bc.removeTipBlockFromBestChain() + disconnectedBlockNode := bc.removeTipBlockFromBestChain() + disconnectedBlockHashes = append(disconnectedBlockHashes, *disconnectedBlockNode.Hash) } // Add the ancestors of the new tip to the best chain. for _, ancestor := range lineageFromCommittedTip { bc.addTipBlockToBestChain(ancestor) + connectedBlockHashes = append(connectedBlockHashes, *ancestor.Hash) } // Add the new tip to the best chain. bc.addTipBlockToBestChain(blockNode) - return true, nil + connectedBlockHashes = append(connectedBlockHashes, *blockNode.Hash) + + // We need to dedupe the added and removed block hashes because we may have removed a + // block and added it back during the reorg. + uniqueConnectedBlockHashes, uniqueDisconnectedBlockHashes := collections.RemoveDuplicates( + connectedBlockHashes, + disconnectedBlockHashes, + ) + return true, uniqueConnectedBlockHashes, uniqueDisconnectedBlockHashes, nil } // shouldReorg determines if we should reorg to the block provided. We should reorg if diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 0edb29898..2166f2351 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1322,13 +1322,15 @@ func TestTryApplyNewTip(t *testing.T) { Header: newBlock.Header, Hash: newBlockHash, } - appliedNewTip, err := bc.tryApplyNewTip(newBlockNode, 9, ancestors) + appliedNewTip, connectedBlockHashes, disconnectedBlockHashes, err := bc.tryApplyNewTip(newBlockNode, 9, ancestors) require.NoError(t, err) require.True(t, appliedNewTip) // hash 3 should no longer be in the best chain or best chain map _, hash3ExistsInBestChainMap := bc.bestChainMap[*hash3] require.False(t, hash3ExistsInBestChainMap) require.False(t, checkBestChainForHash(hash3)) + require.Len(t, connectedBlockHashes, 1) + require.Len(t, disconnectedBlockHashes, 1) // newBlock should be in the best chain and the best chain map and should be the tip. _, newBlockExistsInBestChainMap := bc.bestChainMap[*newBlockHash] @@ -1389,7 +1391,7 @@ func TestTryApplyNewTip(t *testing.T) { require.NoError(t, err) // Try to apply newBlock as tip. - appliedNewTip, err = bc.tryApplyNewTip(newBlockNode, 6, ancestors) + appliedNewTip, connectedBlockHashes, disconnectedBlockHashes, err = bc.tryApplyNewTip(newBlockNode, 6, ancestors) require.NoError(t, err) require.True(t, appliedNewTip) // newBlockHash should be tip. @@ -1411,6 +1413,10 @@ func TestTryApplyNewTip(t *testing.T) { require.True(t, hash5ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash5)) + // We have added three blocks and removed two blocks + require.Len(t, connectedBlockHashes, 3) + require.Len(t, disconnectedBlockHashes, 2) + // Reset the state of the best chain. delete(bc.bestChainMap, *hash4) delete(bc.bestChainMap, *hash5) @@ -1426,10 +1432,14 @@ func TestTryApplyNewTip(t *testing.T) { newBlockNode.Header.ProposedInView = 8 // we should not apply the new tip if it doesn't extend the current tip. - appliedNewTip, err = bc.tryApplyNewTip(newBlockNode, 9, ancestors) + appliedNewTip, connectedBlockHashes, disconnectedBlockHashes, err = bc.tryApplyNewTip(newBlockNode, 9, ancestors) require.False(t, appliedNewTip) require.NoError(t, err) + // No blocks have been removed or added. + require.Len(t, connectedBlockHashes, 0) + require.Len(t, disconnectedBlockHashes, 0) + // Super Happy path: no reorg, just extending tip. newBlockNode.Header.ProposedInView = 10 newBlockNode.Header.PrevBlockHash = hash3 @@ -1438,11 +1448,15 @@ func TestTryApplyNewTip(t *testing.T) { require.NoError(t, err) ancestors, err = bc.getLineageFromCommittedTip(newBlock) require.NoError(t, err) - appliedNewTip, err = bc.tryApplyNewTip(newBlockNode, 6, ancestors) + appliedNewTip, connectedBlockHashes, disconnectedBlockHashes, err = bc.tryApplyNewTip(newBlockNode, 6, ancestors) require.True(t, appliedNewTip) require.NoError(t, err) // newBlockHash should be tip. require.True(t, bc.BlockTip().Hash.IsEqual(newBlockHash)) + + // One block has been added to the best chain. + require.Len(t, connectedBlockHashes, 1) + require.Len(t, disconnectedBlockHashes, 0) } // TestCanCommitGrandparent tests the canCommitGrandparent function From b7f821a49ea99c2ea72ee6c616c5901d182d5abe Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 10 Jan 2024 14:19:10 -0800 Subject: [PATCH 350/762] Add simplified logic and more comments. --- lib/block_view_lockups.go | 95 +++++++----- lib/block_view_lockups_test.go | 264 ++++++++++++++++++++++++++++++++- 2 files changed, 315 insertions(+), 44 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 007dcf590..74a109888 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -307,7 +307,8 @@ func (bav *UtxoView) GetLimitedVestedLockedBalanceEntriesOverTimeInterval( // A relevant vested locked balance entry satisfies all the following conditions: // (1) Matching profile PKID // (2) Matching hodler PKID - // (3) An unlock OR end timestamp within the specified (unlock, end) bounds + // (3) An unlock OR end timestamp within the specified (unlock, end) bounds OR + // the lockedBalanceEntry interval is a superset of the specified (unlock, end) bounds // (4) A mismatched unlock and vesting end timestamp (vesting condition) // (5) Not deleted if lockedBalanceEntry.ProfilePKID.Eq(profilePKID) && @@ -315,7 +316,9 @@ func (bav *UtxoView) GetLimitedVestedLockedBalanceEntriesOverTimeInterval( ((lockedBalanceEntry.UnlockTimestampNanoSecs >= unlockTimestampNanoSecs && lockedBalanceEntry.UnlockTimestampNanoSecs <= vestingEndTimestampNanoSecs) || (lockedBalanceEntry.VestingEndTimestampNanoSecs >= unlockTimestampNanoSecs && - lockedBalanceEntry.VestingEndTimestampNanoSecs <= vestingEndTimestampNanoSecs)) && + lockedBalanceEntry.VestingEndTimestampNanoSecs <= vestingEndTimestampNanoSecs) || + (lockedBalanceEntry.UnlockTimestampNanoSecs < unlockTimestampNanoSecs && + lockedBalanceEntry.VestingEndTimestampNanoSecs > vestingEndTimestampNanoSecs)) && lockedBalanceEntry.UnlockTimestampNanoSecs != lockedBalanceEntry.VestingEndTimestampNanoSecs && !lockedBalanceEntry.isDeleted { lockedBalanceEntries = append(lockedBalanceEntries, lockedBalanceEntry) @@ -1290,7 +1293,8 @@ func (bav *UtxoView) _connectCoinLockup( } // Check for left overhang by the proposed locked balance entry - if proposedLockedBalanceEntry.UnlockTimestampNanoSecs < existingLockedBalanceEntry.UnlockTimestampNanoSecs { + if proposedLockedBalanceEntry.UnlockTimestampNanoSecs < + existingLockedBalanceEntry.UnlockTimestampNanoSecs { splitLockedBalanceEntry, remainingLockedBalanceEntry, err := SplitVestedLockedBalanceEntry( proposedLockedBalanceEntry, proposedLockedBalanceEntry.UnlockTimestampNanoSecs, @@ -1449,6 +1453,39 @@ func SplitVestedLockedBalanceEntry( _remainingLockedBalanceEntry *LockedBalanceEntry, _err error, ) { + // SplitVestedLockedBalanceEntry performs simple split operations where + // either startSplitTimestampNanoSecs or endSplitTimestampNanoSecs corresponds with the + // start or end timestamp in lockedBalanceEntry. + // + // This means the function can take in one of the two following configurations: + // + // Valid Input Configuration 1: + // lockedBalanceEntry + // startSplitTimestampNanoSecs ^ + // endSplitTimestampNanoSecs ^ + // + // Valid Input Configuration 2: + // lockedBalanceEntry + // startSplitTimestampNanoSecs ^ + // endSplitTimestampNanoSecs ^ + // + // NOTE: We can imagine the split operation taking a lockedBalanceEntry with interval [t1, t2] and + // splitting it into two separate lockedBalanceEntry intervals: [t3, t4] & [t5, t6] where t4 + 1 = t5. + // Notice however that there is a 1 nanosecond loss in this that we must account for between + // t4 and t5. This becomes computationally tricky when trying to consistently compute the split's value. + // To deal with this, we always let the lockedBalanceEntry using the split off interval + // (passed as [startSplitTimestampNanoSecs, endSplitTimestampNanoSecs]) take on the extra 1 nanosecond of + // value. This ends up being the best way to ensure numerical consistency for all caller of this function, + // but other decisions on where to put the extra nanosecond of value can be made as well, and they will also work. + // + // Stated another way: While we return lockedBalanceEntries with intervals [t3, t4] & [t5, t6] + // where t4 + 1 = t5, we compute the balance in each of those entries based on the time elapsed + // in the intervals [t3, t5) and [t5, t6]. This ensures a computationally consistent means of computing + // the value in the returned lockedBalanceEntries. + // + // You can see this implemented below where CalculateLockupValueOverElapsedDuration is called + // with (endSplitTimestampNanoSecs - startSplitTimestampNanoSecs + 1) as the elapsed duration. + // Sanity check to ensure the start timestamp is before the end timestamp. if startSplitTimestampNanoSecs >= endSplitTimestampNanoSecs { return nil, nil, @@ -1512,30 +1549,15 @@ func SplitVestedLockedBalanceEntry( } // Compute the balance in the split locked balance entry. - // NOTE: The reason we add 1 in the first case is because a 1 nanosecond loss occurs - // between the end of the splitLockedBalanceEntry and the beginning of the remainingLockedBalanceEntry - // that would cause small numerical differences between both cases otherwise. It's easier - // to see this by drawing a straight line to represent time and imagining where the - // startSplitTimestampNanoSecs and endSplitTimestampNanoSecs are on the line in both cases. - var splitValue *uint256.Int - var err error - if startSplitTimestampNanoSecs == lockedBalanceEntry.UnlockTimestampNanoSecs { - splitValue, err = CalculateLockupSplitValue( - lockedBalanceEntry, startSplitTimestampNanoSecs, endSplitTimestampNanoSecs+1) - if err != nil { - return nil, nil, - errors.Wrap(err, "SplitVestedLockedBalanceEntry failed to compute split value") - } - splitLockedBalanceEntry.BalanceBaseUnits = *splitValue - } else { - splitValue, err = CalculateLockupSplitValue( - lockedBalanceEntry, startSplitTimestampNanoSecs, endSplitTimestampNanoSecs) - if err != nil { - return nil, nil, - errors.Wrap(err, "SplitVestedLockedBalanceEntry failed to compute split value") - } - splitLockedBalanceEntry.BalanceBaseUnits = *splitValue + // See the note at the top of this function for why we do this. + splitValue, err := CalculateLockupValueOverElapsedDuration( + lockedBalanceEntry, + endSplitTimestampNanoSecs-startSplitTimestampNanoSecs+1) + if err != nil { + return nil, nil, + errors.Wrap(err, "SplitVestedLockedBalanceEntry failed to compute split value") } + splitLockedBalanceEntry.BalanceBaseUnits = *splitValue // Compute the balance in the remaining locked balance entry. remainingValue, err := SafeUint256().Sub(&lockedBalanceEntry.BalanceBaseUnits, splitValue) @@ -1563,23 +1585,22 @@ func SplitVestedLockedBalanceEntry( return splitLockedBalanceEntry, remainingLockedBalanceEntry, nil } -func CalculateLockupSplitValue( +func CalculateLockupValueOverElapsedDuration( lockedBalanceEntry *LockedBalanceEntry, - startTimestampNanoSecs int64, - endTimestampNanoSecs int64, + elapsedDuration int64, ) ( _splitValue *uint256.Int, _err error, ) { - // Compute the time that passes over the interval [startTimestampNanoSecs, endTimestampNanoSecs] - numerator, err := SafeUint256().Sub( - uint256.NewInt().SetUint64(uint64(endTimestampNanoSecs)), - uint256.NewInt().SetUint64(uint64(startTimestampNanoSecs))) - if err != nil { - return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ - "(start timestamp - end timestamp) underflow") + // Sanity check the passed values. + if elapsedDuration <= 0 { + return nil, errors.New("CalculateLockupSplitValue: " + + "elapsedDuration specified is either zero or negative.") } + // Convert the elapsedDuration to an uint256 + numerator := uint256.NewInt().SetUint64(uint64(elapsedDuration)) + // Compute the time that passes over the duration of the locked balance entry denominator, err := SafeUint256().Sub( uint256.NewInt().SetUint64(uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs)), @@ -1602,7 +1623,7 @@ func CalculateLockupSplitValue( splitValue, err := SafeUint256().Div(numerator, denominator) if err != nil { return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ - "(((start timestamp - end timestamp) + 1) * lockedBalanceEntry.Balance overflow)) / "+ + "(elapsedDuration * lockedBalanceEntry.BalanceBaseUnits) / "+ "(lockedBalanceEntry.UnlockTimestamp - lockedBalanceEntry.VestingEndTimestamp) has zero denominator") } diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index c92278862..35c887254 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -3388,8 +3388,6 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { // Initialize m0, m1, m2, m3, m4, and paramUpdater _setUpProfilesAndMintM0M1DAOCoins(testMeta) - // Submit a transaction updating the MaximumVestedIntersectionsPerLockupTransaction - // First we test the following vested lockup consolidation type: // existing lockup: -------------------------------- // proposed lockup: ------------------- @@ -3497,8 +3495,6 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { // Initialize m0, m1, m2, m3, m4, and paramUpdater _setUpProfilesAndMintM0M1DAOCoins(testMeta) - // Submit a transaction updating the MaximumVestedIntersectionsPerLockupTransaction - // First we test the following vested lockup consolidation type: // existing lockup: -------------------------------- // proposed lockup: ------------------- @@ -3540,7 +3536,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1499))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3549,7 +3545,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(501))) // Now we test the opposite vested lockup consolidation type: // existing lockup: ------------------- @@ -3576,7 +3572,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { require.NoError(t, err) } - // Check m0 LockedBalanceEntry + // Check m1 LockedBalanceEntry m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ @@ -3599,6 +3595,260 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) } +func TestExternalThreeWayLockupConsolidation(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // First we test the following vested lockup consolidation type: + // existing lockup: -------------------------------- + // proposed lockup: ------------------- + // + // In theory the below operation should generate three locked balance entries. + + // Perform a simple vested lockup in the future over 1000ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1250, 1750, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that causes a three-way split. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 1500, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Validate that the split was performed correctly. + utxoView, err := + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1249, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1501, + VestingEndTimestampNanoSecs: 1750, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1250, + VestingEndTimestampNanoSecs: 1500, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1250, + VestingEndTimestampNanoSecs: 1750, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry == nil) + + // Now we test the opposite vested lockup consolidation type: + // existing lockup: ------------------ + // proposed lockup: --------------------------------- + // + // In theory the below operation should generate three locked balance entries. + + // Perform a simple vested lockup in the future over 1000ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 1000, 1500, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that causes a three-way split. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 1250, 1750, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Check m1 LockedBalanceEntry + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m1PKID, + ProfilePKID: *m1PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1249, + }) + require.NoError(t, err) + require.True(t, m1LockedBalanceEntry != nil) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m1PKID, + ProfilePKID: *m1PKID, + UnlockTimestampNanoSecs: 1250, + VestingEndTimestampNanoSecs: 1500, + }) + require.NoError(t, err) + require.True(t, m1LockedBalanceEntry != nil) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1002))) + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m1PKID, + ProfilePKID: *m1PKID, + UnlockTimestampNanoSecs: 1501, + VestingEndTimestampNanoSecs: 1750, + }) + require.NoError(t, err) + require.True(t, m1LockedBalanceEntry != nil) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(498))) +} + +func TestInternalThreeWayLockupConsolidation(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // First we test the following vested lockup consolidation type: + // existing lockup: -------------------------------- + // proposed lockup: ------------------- + // + // In theory the below operation should generate three locked balance entries. + + // Perform a simple vested lockup in the future over 1000ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that causes a three-way split. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1250, 1750, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Validate that the split was performed correctly. + utxoView, err := + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1249, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1250, + VestingEndTimestampNanoSecs: 1750, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1751, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) + + // Now we test the opposite vested lockup consolidation type: + // existing lockup: ------------------ + // proposed lockup: --------------------------------- + // + // In theory the below operation should generate three locked balance entries. + + // Perform a simple vested lockup in the future over 500ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 1250, 1750, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that causes a three-way split. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m1Pub, m1Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Check m1 LockedBalanceEntry + m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) + m1PKID := m1PKIDEntry.PKID + m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m1PKID, + ProfilePKID: *m1PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1249, + }) + require.NoError(t, err) + require.True(t, m1LockedBalanceEntry != nil) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m1PKID, + ProfilePKID: *m1PKID, + UnlockTimestampNanoSecs: 1250, + VestingEndTimestampNanoSecs: 1750, + }) + require.NoError(t, err) + require.True(t, m1LockedBalanceEntry != nil) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1501))) + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m1PKID, + ProfilePKID: *m1PKID, + UnlockTimestampNanoSecs: 1751, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m1LockedBalanceEntry != nil) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(249))) +} + //---------------------------------------------------------- // (Testing) Lockup Setup Helper Functions //---------------------------------------------------------- From 287172cf544474e02b1b76ea55c411921c821c95 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 10 Jan 2024 14:55:24 -0800 Subject: [PATCH 351/762] Simplify logic further, add complex consolidation tests. --- lib/block_view_lockups.go | 12 ++- lib/block_view_lockups_test.go | 163 +++++++++++++++++++++++++++++++++ 2 files changed, 170 insertions(+), 5 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 74a109888..92cb5885d 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -331,6 +331,12 @@ func (bav *UtxoView) GetLimitedVestedLockedBalanceEntriesOverTimeInterval( } } + // Step 4: Sort by unlock time. + sort.Slice(lockedBalanceEntries, func(ii, jj int) bool { + return lockedBalanceEntries[ii].UnlockTimestampNanoSecs < + lockedBalanceEntries[jj].UnlockTimestampNanoSecs + }) + return lockedBalanceEntries, nil } @@ -1371,14 +1377,10 @@ func (bav *UtxoView) _connectCoinLockup( // remaining LockedBalanceEntry present in the first case. // We check if there's another locked balance entry sometime in the future. - splitTimestampEnd := existingLockedBalanceEntry.VestingEndTimestampNanoSecs - if ii != len(lockedBalanceEntries)-1 { - splitTimestampEnd = lockedBalanceEntries[ii+1].UnlockTimestampNanoSecs - 1 - } splitLockedBalanceEntry, remainingLockedBalanceEntry, err := SplitVestedLockedBalanceEntry( proposedLockedBalanceEntry, existingLockedBalanceEntry.UnlockTimestampNanoSecs, - splitTimestampEnd) + existingLockedBalanceEntry.VestingEndTimestampNanoSecs) if err != nil { return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup failed to compute vested split") diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 35c887254..4110ba7f6 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -3849,6 +3849,169 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(249))) } +func TestSimpleJointExistingVestedLockups(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // First we test the following vested lockup consolidation type: + // existing lockups: <------------><-------------> + // proposed lockup: <---------------------------> + // + // In theory the below operation should generate two locked balance entries. + + // Perform a simple vested lockup in the future over 500ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 1500, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that does not overlap but is continuous. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1501, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a third simple vested lockup that overlaps both. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Validate that the split was performed correctly. + utxoView, err := + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1500, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1501))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1501, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1499))) +} + +func TestSimpleDisjointExistingVestedLockups(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // First we test the following vested lockup consolidation type: + // existing lockups: ------------ ------------- + // proposed lockup: ------------------- + // + // In theory the below operation should generate five locked balance entries. + + // Perform a simple vested lockup in the future over 250ns. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a second simple vested lockup in the future that does not overlap. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 3000, 4000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Perform a third simple vested lockup in the future that triggers a five way split. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1500, 3500, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Validate that the split was performed correctly. + utxoView, err := + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + + // Check m0 LockedBalanceEntry + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1000, + VestingEndTimestampNanoSecs: 1499, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 1500, + VestingEndTimestampNanoSecs: 2000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(750))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 2001, + VestingEndTimestampNanoSecs: 2999, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(499))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 3000, + VestingEndTimestampNanoSecs: 3500, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(751))) + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: 3501, + VestingEndTimestampNanoSecs: 4000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) +} + //---------------------------------------------------------- // (Testing) Lockup Setup Helper Functions //---------------------------------------------------------- From a86d1c8db2b1db440b5941aff5e591ee2003985c Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 10 Jan 2024 19:03:22 -0800 Subject: [PATCH 352/762] PoS HandshakeController (#860) * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation Simplify RemoteNodeManager * Merge HandshakeStage with RemoteNodeStatus; small HandshakeController nits * Nit * HandshakeController updates * Nits * Quick nit * Nits * Comment nit --- lib/constants.go | 4 + lib/pos_handshake_controller.go | 163 ++++++++++++++++++++++++++++++++ lib/remote_node.go | 85 ++++++++--------- 3 files changed, 204 insertions(+), 48 deletions(-) create mode 100644 lib/pos_handshake_controller.go diff --git a/lib/constants.go b/lib/constants.go index eccf1c582..460b8dadb 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -494,6 +494,10 @@ func (pvt ProtocolVersionType) ToUint64() uint64 { return uint64(pvt) } +func (pvt ProtocolVersionType) Before(version ProtocolVersionType) bool { + return pvt.ToUint64() < version.ToUint64() +} + // DeSoParams defines the full list of possible parameters for the // DeSo network. type DeSoParams struct { diff --git a/lib/pos_handshake_controller.go b/lib/pos_handshake_controller.go new file mode 100644 index 000000000..6f4804f2e --- /dev/null +++ b/lib/pos_handshake_controller.go @@ -0,0 +1,163 @@ +package lib + +import ( + "fmt" + "github.com/decred/dcrd/lru" + "github.com/golang/glog" + "math" +) + +// HandshakeController is a structure that handles the handshake process with remote nodes. It is the entry point for +// initiating a handshake with a remote node. It is also responsible for handling version/verack messages from remote +// nodes. And for handling the handshake complete control message. +type HandshakeController struct { + rnManager *RemoteNodeManager + usedNonces lru.Cache +} + +func NewHandshakeController(rnManager *RemoteNodeManager) *HandshakeController { + + vm := &HandshakeController{ + rnManager: rnManager, + usedNonces: lru.NewCache(1000), + } + + return vm +} + +// InitiateHandshake kicks off handshake with a remote node. +func (hc *HandshakeController) InitiateHandshake(rn *RemoteNode) { + nonce := uint64(RandInt64(math.MaxInt64)) + if err := rn.InitiateHandshake(nonce); err != nil { + glog.Errorf("RemoteNode.InitiateHandshake: Error initiating handshake: %v", err) + hc.rnManager.Disconnect(rn) + } + hc.usedNonces.Add(nonce) +} + +// _handleHandshakeCompleteMessage handles HandshakeComplete control messages, sent by RemoteNodes. +func (hc *HandshakeController) _handleHandshakeCompleteMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypePeerHandshakeComplete { + return + } + + // Get the handshake information of this peer. + remoteNode := hc.rnManager.GetRemoteNodeFromPeer(origin) + if remoteNode == nil { + return + } + + if remoteNode.GetNegotiatedProtocolVersion().Before(ProtocolVersion2) { + hc.rnManager.ProcessCompletedHandshake(remoteNode) + return + } + + if err := hc.handleHandshakeCompletePoSMessage(remoteNode); err != nil { + glog.Errorf("HandshakeController._handleHandshakeCompleteMessage: Error handling PoS handshake peer message: %v", err) + hc.rnManager.Disconnect(remoteNode) + return + } + hc.rnManager.ProcessCompletedHandshake(remoteNode) +} + +func (hc *HandshakeController) handleHandshakeCompletePoSMessage(remoteNode *RemoteNode) error { + + validatorPk := remoteNode.GetValidatorPublicKey() + // If the remote node is not a potential validator, we don't need to do anything. + if validatorPk == nil { + return nil + } + + // Lookup the validator in the ValidatorIndex with the same public key. + existingValidator, ok := hc.rnManager.GetValidatorIndex().Get(validatorPk.Serialize()) + // For inbound RemoteNodes, we should ensure that there isn't an existing validator connected with the same public key. + // Inbound nodes are not initiated by us, so we shouldn't have added the RemoteNode to the ValidatorIndex yet. + if remoteNode.IsInbound() && ok { + return fmt.Errorf("HandshakeController.handleHandshakeCompletePoSMessage: Inbound RemoteNode with duplicate validator public key") + } + // For outbound RemoteNodes, we have two possible scenarios. Either the RemoteNode has been initiated as a validator, + // in which case it should already be in the ValidatorIndex. Or the RemoteNode has been initiated as a regular node, + // in which case it should not be in the ValidatorIndex, but in the NonValidatorOutboundIndex. So to ensure there is + // no duplicate connection with the same public key, we only check whether there is a validator in the ValidatorIndex + // with the RemoteNode's public key. If there is one, we want to ensure that these two RemoteNodes have identical ids. + if remoteNode.IsOutbound() && ok { + if remoteNode.GetId() != existingValidator.GetId() { + return fmt.Errorf("HandshakeController.handleHandshakeCompletePoSMessage: Outbound RemoteNode with duplicate validator public key. "+ + "Existing validator id: %v, new validator id: %v", existingValidator.GetId().ToUint64(), remoteNode.GetId().ToUint64()) + } + } + return nil +} + +// _handleVersionMessage handles version messages, sent by RemoteNodes. +func (hc *HandshakeController) _handleVersionMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeVersion { + return + } + + rn := hc.rnManager.GetRemoteNodeFromPeer(origin) + if rn == nil { + // This should never happen. + return + } + + var verMsg *MsgDeSoVersion + var ok bool + if verMsg, ok = desoMsg.(*MsgDeSoVersion); !ok { + glog.Errorf("HandshakeController._handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ + "error casting version message", origin.ID) + hc.rnManager.Disconnect(rn) + return + } + + // If we've seen this nonce before then return an error since this is a connection from ourselves. + msgNonce := verMsg.Nonce + if hc.usedNonces.Contains(msgNonce) { + hc.usedNonces.Delete(msgNonce) + glog.Errorf("HandshakeController._handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ + "nonce collision", origin.ID) + hc.rnManager.Disconnect(rn) + return + } + + // Call HandleVersionMessage on the RemoteNode. + responseNonce := uint64(RandInt64(math.MaxInt64)) + if err := rn.HandleVersionMessage(verMsg, responseNonce); err != nil { + glog.Errorf("HandshakeController._handleVersionMessage: Requesting PeerDisconnect for id: (%v) "+ + "error handling version message: %v", origin.ID, err) + hc.rnManager.Disconnect(rn) + return + + } + hc.usedNonces.Add(responseNonce) +} + +// _handleVerackMessage handles verack messages, sent by RemoteNodes. +func (hc *HandshakeController) _handleVerackMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeVerack { + return + } + + rn := hc.rnManager.GetRemoteNodeFromPeer(origin) + if rn == nil { + // This should never happen. + return + } + + var vrkMsg *MsgDeSoVerack + var ok bool + if vrkMsg, ok = desoMsg.(*MsgDeSoVerack); !ok { + glog.Errorf("HandshakeController._handleVerackMessage: Disconnecting RemoteNode with id: (%v) "+ + "error casting verack message", origin.ID) + hc.rnManager.Disconnect(rn) + return + } + + // Call HandleVerackMessage on the RemoteNode. + if err := rn.HandleVerackMessage(vrkMsg); err != nil { + glog.Errorf("HandshakeController._handleVerackMessage: Requesting PeerDisconnect for id: (%v) "+ + "error handling verack message: %v", origin.ID, err) + hc.rnManager.Disconnect(rn) + } + return +} diff --git a/lib/remote_node.go b/lib/remote_node.go index 64ed0be8f..a357118a2 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -18,18 +18,11 @@ type RemoteNodeStatus int const ( RemoteNodeStatus_NotConnected RemoteNodeStatus = 0 RemoteNodeStatus_Connected RemoteNodeStatus = 1 - RemoteNodeStatus_HandshakeCompleted RemoteNodeStatus = 2 - RemoteNodeStatus_Attempted RemoteNodeStatus = 3 - RemoteNodeStatus_Terminated RemoteNodeStatus = 4 -) - -type HandshakeStage uint8 - -const ( - HandshakeStage_NotStarted HandshakeStage = 0 - HandshakeStage_VersionSent HandshakeStage = 1 - HandshakeStage_VerackSent HandshakeStage = 2 - HandshakeStage_Completed HandshakeStage = 3 + RemoteNodeStatus_VersionSent RemoteNodeStatus = 2 + RemoteNodeStatus_VerackSent RemoteNodeStatus = 3 + RemoteNodeStatus_HandshakeCompleted RemoteNodeStatus = 4 + RemoteNodeStatus_Attempted RemoteNodeStatus = 5 + RemoteNodeStatus_Terminated RemoteNodeStatus = 6 ) type RemoteNodeId uint64 @@ -129,15 +122,10 @@ type HandshakeMetadata struct { // ### The following fields are populated during the MsgDeSoVerack exchange. // validatorPublicKey is the BLS public key of the peer, if the peer is a validator node. validatorPublicKey *bls.PublicKey - - // ### The following fields are handshake control fields. - handshakeStage HandshakeStage } func NewHandshakeMetadata() *HandshakeMetadata { - return &HandshakeMetadata{ - handshakeStage: HandshakeStage_NotStarted, - } + return &HandshakeMetadata{} } func NewRemoteNode(id RemoteNodeId, validatorPublicKey *bls.PublicKey, srv *Server, cmgr *ConnectionManager, keystore *BLSKeystore, @@ -167,6 +155,16 @@ func (rn *RemoteNode) setStatusConnected() { rn.connectionStatus = RemoteNodeStatus_Connected } +// setStatusVersionSent sets the connection status of the remote node to version sent. +func (rn *RemoteNode) setStatusVersionSent() { + rn.connectionStatus = RemoteNodeStatus_VersionSent +} + +// setStatusVerackSent sets the connection status of the remote node to verack sent. +func (rn *RemoteNode) setStatusVerackSent() { + rn.connectionStatus = RemoteNodeStatus_VerackSent +} + // setStatusTerminated sets the connection status of the remote node to terminated. func (rn *RemoteNode) setStatusTerminated() { rn.connectionStatus = RemoteNodeStatus_Terminated @@ -197,14 +195,6 @@ func (rn *RemoteNode) GetUserAgent() string { return rn.handshakeMetadata.userAgent } -func (rn *RemoteNode) getHandshakeStage() HandshakeStage { - return rn.handshakeMetadata.handshakeStage -} - -func (rn *RemoteNode) setHandshakeStage(stage HandshakeStage) { - rn.handshakeMetadata.handshakeStage = stage -} - func (rn *RemoteNode) IsInbound() bool { return rn.peer != nil && !rn.peer.IsOutbound() } @@ -307,7 +297,8 @@ func (rn *RemoteNode) Disconnect() { switch rn.connectionStatus { case RemoteNodeStatus_Attempted: rn.cmgr.CloseAttemptedConnection(id) - case RemoteNodeStatus_Connected, RemoteNodeStatus_HandshakeCompleted: + case RemoteNodeStatus_Connected, RemoteNodeStatus_VersionSent, RemoteNodeStatus_VerackSent, + RemoteNodeStatus_HandshakeCompleted: rn.cmgr.CloseConnection(id) } rn.setStatusTerminated() @@ -340,9 +331,6 @@ func (rn *RemoteNode) InitiateHandshake(nonce uint64) error { if rn.connectionStatus != RemoteNodeStatus_Connected { return fmt.Errorf("InitiateHandshake: Remote node is not connected") } - if rn.getHandshakeStage() != HandshakeStage_NotStarted { - return fmt.Errorf("InitiateHandshake: Handshake has already been initiated") - } if rn.GetPeer().IsOutbound() { versionTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) @@ -350,7 +338,7 @@ func (rn *RemoteNode) InitiateHandshake(nonce uint64) error { if err := rn.sendVersionMessage(nonce); err != nil { return fmt.Errorf("InitiateHandshake: Problem sending version message to peer (id= %d): %v", rn.id, err) } - rn.setHandshakeStage(HandshakeStage_VersionSent) + rn.setStatusVersionSent() } return nil } @@ -404,11 +392,9 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce rn.mtx.Lock() defer rn.mtx.Unlock() - if rn.connectionStatus != RemoteNodeStatus_Connected { - return fmt.Errorf("HandleVersionMessage: RemoteNode is not connected") - } - if rn.getHandshakeStage() != HandshakeStage_NotStarted && rn.getHandshakeStage() != HandshakeStage_VersionSent { - return fmt.Errorf("HandleVersionMessage: Handshake has already been initiated, stage: %v", rn.getHandshakeStage()) + if rn.connectionStatus != RemoteNodeStatus_Connected && rn.connectionStatus != RemoteNodeStatus_VersionSent { + return fmt.Errorf("HandleVersionMessage: RemoteNode is not connected or version exchange has already "+ + "been completed, connectionStatus: %v", rn.connectionStatus) } // Verify that the peer's version matches our minimal supported version. @@ -429,8 +415,17 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce // Decide on the protocol version to use for this connection. negotiatedVersion := rn.params.ProtocolVersion if verMsg.Version < rn.params.ProtocolVersion.ToUint64() { + // In order to smoothly transition to the PoS fork, we prevent establishing new outbound connections with + // outdated nodes that run on ProtocolVersion1. This is because ProtocolVersion1 nodes will not be able to + // validate the PoS blocks and will be stuck on the PoW chain, unless they upgrade to ProtocolVersion2. + if rn.params.ProtocolVersion == ProtocolVersion2 && rn.IsOutbound() { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v). Version too low. "+ + "Outbound RemoteNodes must use at least ProtocolVersion2, instead received version: %v", rn.id, verMsg.Version) + } + negotiatedVersion = NewProtocolVersionType(verMsg.Version) } + vMeta.negotiatedProtocolVersion = negotiatedVersion // Record the services the peer is advertising. @@ -455,7 +450,7 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce vMeta.minTxFeeRateNanosPerKB = verMsg.MinFeeRateNanosPerKB // Respond to the version message if this is an inbound peer. - if !rn.peer.IsOutbound() { + if !rn.IsOutbound() { if err := rn.sendVersionMessage(responseNonce); err != nil { return errors.Wrapf(err, "RemoteNode.HandleVersionMessage: Problem sending version message to peer (id= %d)", rn.id) } @@ -473,7 +468,7 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce // Update the timeSource now that we've gotten a version message from the peer. rn.cmgr.AddTimeSample(rn.peer.Address(), timeConnected) - rn.setHandshakeStage(HandshakeStage_VerackSent) + rn.setStatusVerackSent() return nil } @@ -526,16 +521,11 @@ func (rn *RemoteNode) HandleVerackMessage(vrkMsg *MsgDeSoVerack) error { rn.mtx.Lock() defer rn.mtx.Unlock() - if rn.connectionStatus != RemoteNodeStatus_Connected { + if rn.connectionStatus != RemoteNodeStatus_VerackSent { return fmt.Errorf("RemoteNode.HandleVerackMessage: Requesting disconnect for id: (%v) "+ "verack received while in state: %v", rn.id, rn.connectionStatus) } - if rn.getHandshakeStage() != HandshakeStage_VerackSent { - return fmt.Errorf("RemoteNode.HandleVerackMessage: Requesting disconnect for id: (%v) "+ - "verack received while in handshake stage: %v", rn.id, rn.getHandshakeStage()) - } - if rn.verackTimeExpected != nil && rn.verackTimeExpected.Before(time.Now()) { return fmt.Errorf("RemoteNode.HandleVerackMessage: Requesting disconnect for id: (%v) "+ "verack timeout. Time expected: %v, now: %v", rn.id, rn.verackTimeExpected.UnixMicro(), time.Now().UnixMicro()) @@ -556,9 +546,8 @@ func (rn *RemoteNode) HandleVerackMessage(vrkMsg *MsgDeSoVerack) error { // If we get here then the peer has successfully completed the handshake. vMeta.versionNegotiated = true - rn._logVersionSuccess(rn.peer) + rn._logVersionSuccess() rn.setStatusHandshakeCompleted() - rn.setHandshakeStage(HandshakeStage_Completed) rn.srv.NotifyHandshakePeerMessage(rn.peer) return nil @@ -639,7 +628,7 @@ func (rn *RemoteNode) validateVerackPoS(vrkMsg *MsgDeSoVerack) error { return nil } -func (rn *RemoteNode) _logVersionSuccess(peer *Peer) { +func (rn *RemoteNode) _logVersionSuccess() { inboundStr := "INBOUND" if rn.IsOutbound() { inboundStr = "OUTBOUND" @@ -648,7 +637,7 @@ func (rn *RemoteNode) _logVersionSuccess(peer *Peer) { if !rn.IsPersistent() { persistentStr = "NON-PERSISTENT" } - logStr := fmt.Sprintf("SUCCESS version negotiation for (%s) (%s) peer (%v).", inboundStr, persistentStr, peer) + logStr := fmt.Sprintf("SUCCESS version negotiation for (%s) (%s) id=(%v).", inboundStr, persistentStr, rn.id.ToUint64()) glog.V(1).Info(logStr) } From c54aac3c7bc8e6e03275bb7d566cca01b1f5e536 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 12 Jan 2024 11:43:53 -0800 Subject: [PATCH 353/762] Add db intersection limit test. --- lib/block_view_lockups_test.go | 118 +++++++++++++++++++++++++++++++++ lib/db_utils.go | 5 +- 2 files changed, 121 insertions(+), 2 deletions(-) diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 4110ba7f6..7e6e48311 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -4012,6 +4012,124 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) } +func TestVestingIntersectionLimit(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // Validate the default value of the MaximumVestedIntersectionsPerLockupTransaction parameter. + utxoView, err := + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + require.Equal(t, utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction, 100) + + // Generate consecutive vested locked balance entries equal to this limit. + for ii := 0; ii < utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction; ii++ { + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + int64(ii*1000)+1, int64(ii*1000)+1000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + } + + // Ensure we can consolidate on top of . + maxIntersections := utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1, int64((maxIntersections-1)*1000)+1000, + uint256.NewInt().SetUint64(uint64(maxIntersections)*1000), 0) + require.NoError(t, err) + } + + // Validate the consolidation. + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + utxoView, err = + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + for ii := 0; ii < utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction; ii++ { + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + HODLerPKID: *m0PKID, + ProfilePKID: *m0PKID, + UnlockTimestampNanoSecs: int64(ii*1000) + 1, + VestingEndTimestampNanoSecs: int64(ii*1000) + 1000, + }) + require.NoError(t, err) + require.True(t, m0LockedBalanceEntry != nil) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(2000))) + } + + // Now add another vested lockup, pushing us over the limit. + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + int64(maxIntersections*1000)+1, + int64(maxIntersections*1000)+1000, + uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Now try to consolidate on top of all previous entries. + // (This should fail -- RuleErrorCoinLockupViolatesVestingIntersectionLimit) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1, int64((maxIntersections)*1000)+1000, + uint256.NewInt().SetUint64(uint64(maxIntersections)*1000), 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupViolatesVestingIntersectionLimit) + } + + // Now we try to consolidate on top of all previous entries with offset bounds to ensure db reads are + // functioning properly. + // (This should fail -- RuleErrorCoinLockupViolatesVestingIntersectionLimit) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + 1000, int64((maxIntersections)*1000)+1, + uint256.NewInt().SetUint64(uint64(maxIntersections)*1000), 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupViolatesVestingIntersectionLimit) + } + + // Now we try to unlock all previous entries just to ensure GetUnlockableLockedBalanceEntries is functioning. + utxoView, err = + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + startingBalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) + require.True(t, startingBalanceEntry != nil) + { + _, _, _, err := _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + m0Pub, + int64(maxIntersections*1000)+1001, + ) + require.NoError(t, err) + } + utxoView, err = + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + finalBalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) + require.True(t, finalBalanceEntry != nil) + require.True(t, + uint256.NewInt().Sub(&finalBalanceEntry.BalanceNanos, &startingBalanceEntry.BalanceNanos).Eq( + uint256.NewInt().SetUint64(uint64(maxIntersections)*2000+1000))) + + // Now just to be extra sure, check to make sure there's no more unlockable locked balance entries. + unvestedUnlockable, vestedUnlockable, err := + utxoView.GetUnlockableLockedBalanceEntries(m0PKID, m0PKID, int64(maxIntersections*1000)+1001) + require.NoError(t, err) + require.True(t, len(unvestedUnlockable) == 0) + require.True(t, len(vestedUnlockable) == 0) +} + //---------------------------------------------------------- // (Testing) Lockup Setup Helper Functions //---------------------------------------------------------- diff --git a/lib/db_utils.go b/lib/db_utils.go index 125b9bf77..ae398f334 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -11350,8 +11350,9 @@ func DBGetLimitedVestedLockedBalanceEntriesWithTxn( // Check if we've found too many entries. if entriesFound > limitToFetch { - return nil, errors.New("DBGetLimitedVestedLockedBalanceEntriesWithTxn: " + - "limit exhausted. Found too many relevant LockedBalanceEntries.") + return nil, errors.Wrap(RuleErrorCoinLockupViolatesVestingIntersectionLimit, + "DBGetLimitedVestedLockedBalanceEntriesWithTxn: "+ + "limit exhausted. Found too many relevant LockedBalanceEntries.") } } From a5c9d26636bd92ef53fda529d8291587808c2f16 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 12 Jan 2024 14:16:14 -0800 Subject: [PATCH 354/762] Add distribution simulator to tests. --- lib/block_view_lockups_test.go | 90 ++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 7e6e48311..27e0708cb 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" + "time" ) func TestCoinLockupsForkHeight(t *testing.T) { @@ -4130,6 +4131,95 @@ func TestVestingIntersectionLimit(t *testing.T) { require.True(t, len(vestedUnlockable) == 0) } +func TestRealWorldLockupsUseCase(t *testing.T) { + // Initialize test chain, miner, and testMeta + testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) + + // Initialize m0, m1, m2, m3, m4, and paramUpdater + _setUpProfilesAndMintM0M1DAOCoins(testMeta) + + // We'll assume the following: + // -- Weekly Deposits + // -- Unlock starts the 1st day of the month 1 years in the future + // -- Unlocks end 1st day of the month 5 years in the future + // -- We'll simulate this for 5 years (it becomes repetitive after this) + // + // Since we're doing months as the granularity, the most we can intersect + // at any given time using this strategy is 48 which is below the intersection limit + // for vested lockups. Hence, we should not expect that error to trigger here. + // + // Note that this will simulate 3,650/7 lockups. At 1,000 DAO coin base units per day locked up, + // this means we require ~500k m0 DAO coin base units to correctly simulate. Additionally, + // it would require (ballpark) ~500k nDESO to process the transaction properly. + + // Mint more m0 DAO coins to ensure we have enough. + { + _daoCoinTxnWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + DAOCoinMetadata{ + ProfilePublicKey: m0PkBytes, + OperationType: DAOCoinOperationTypeMint, + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e7), + CoinsToBurnNanos: uint256.Int{}, + TransferRestrictionStatus: 0, + }) + } + + // Have the miner send m0 sufficient DESO over to cover all transaction fees. + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e7) + + // Construct a time simplification function. + simplifyTime := func(t time.Time) time.Time { + return time.Date(t.Year(), t.Month(), 1, 14, 0, 0, 0, time.UTC) + } + + // We'll start the simulation at 9am January 1st, 2024. + startTime := time.Date(2024, time.January, 1, 14, 0, 0, 0, time.UTC) + + // We iterate for 10 years. + totalLocked := uint256.NewInt() + for ii := 0; ii < 365*5; ii++ { + // Check if it's time for a deposit. + if ii%7 != 0 { + continue + } + + // We simulate the block connect time as well for fun. + blockConnectTime := startTime.AddDate(0, 0, ii) + + // Find the lockup start time in 1 year. + nextLockupStartTime := startTime.AddDate(1, 0, ii) + + // Find the lockup end time in 5 years. + nextLockupEndTime := startTime.AddDate(5, 0, ii) + + // Simplify both to increase lockup overlap probabilities. + nextLockupStartTime = simplifyTime(nextLockupStartTime) + nextLockupEndTime = simplifyTime(nextLockupEndTime) + + // Construct and execute the lockup. + { + // NOTE: Subtracting 1 nanosecond from the end time is important and prevents + // annoying "empty balance" errors from consolidation in the future. + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m0Pub, m0Priv, m0Pub, m0Pub, + nextLockupStartTime.UnixNano(), + nextLockupEndTime.UnixNano()-1, + uint256.NewInt().SetUint64(1000), + blockConnectTime.UnixNano()) + require.NoError(t, err) + } + + // Add to total locked. + totalLocked = uint256.NewInt().Add( + totalLocked, uint256.NewInt().SetUint64(1000)) + } +} + //---------------------------------------------------------- // (Testing) Lockup Setup Helper Functions //---------------------------------------------------------- From 6949c1d7112d736026016ac7c1033af6965ace32 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 12 Jan 2024 14:54:25 -0800 Subject: [PATCH 355/762] Add support for reading all LockedBalanceEntries held by a PKID. --- lib/block_view_lockups.go | 68 +++++++++++++++++++++++++++ lib/block_view_lockups_test.go | 25 ++++++++++ lib/db_utils.go | 84 ++++++++++++++++++++++++++++++++++ 3 files changed, 177 insertions(+) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 92cb5885d..6c611cf85 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -224,6 +224,44 @@ func (bav *UtxoView) _deleteLockedBalanceEntry(lockedBalanceEntry *LockedBalance // Get Helper Functions for LockedBalanceEntry +func (bav *UtxoView) GetAllLockedBalanceEntriesForHodlerPKID( + hodlerPKID *PKID, +) ( + _lockedBalanceEntries []*LockedBalanceEntry, + _err error, +) { + // Pull entries from db. + dbLockedBalanceEntries, err := DBGetAllLockedBalanceEntriesForHodlerPKID( + bav.Handle, bav.Snapshot, hodlerPKID) + if err != nil { + return nil, + errors.Wrap(err, "GetLockedBalanceEntryForLockedBalanceEntryKey") + } + + // Cache entries found in the db. + for _, lockedBalanceEntry := range dbLockedBalanceEntries { + if _, exists := bav.LockedBalanceEntryKeyToLockedBalanceEntry[lockedBalanceEntry.ToMapKey()]; !exists { + bav._setLockedBalanceEntry(lockedBalanceEntry) + } + } + + // Pull relevant entries from the view and return. + var lockedBalanceEntries []*LockedBalanceEntry + for _, lockedBalanceEntry := range bav.LockedBalanceEntryKeyToLockedBalanceEntry { + if lockedBalanceEntry.HODLerPKID.Eq(hodlerPKID) && !lockedBalanceEntry.isDeleted { + lockedBalanceEntries = append(lockedBalanceEntries, lockedBalanceEntry) + } + } + + // Sort by unlock time for convenience. + sort.Slice(lockedBalanceEntries, func(ii, jj int) bool { + return lockedBalanceEntries[ii].UnlockTimestampNanoSecs < + lockedBalanceEntries[jj].UnlockTimestampNanoSecs + }) + + return lockedBalanceEntries, nil +} + func (bav *UtxoView) GetLockedBalanceEntryForLockedBalanceEntryKey( lockedBalanceEntryKey *LockedBalanceEntryKey, ) ( @@ -569,6 +607,36 @@ func (bav *UtxoView) _deleteLockupYieldCurvePoint(point *LockupYieldCurvePoint) // Get Helper Functions for LockupYieldCurvePoint +func (bav *UtxoView) GetAllYieldCurvePoints(profilePKID *PKID) (map[LockupYieldCurvePointKey]*LockupYieldCurvePoint, error) { + // Fetch all yield curve points in the db. + dbYieldCurvePoints, err := DBGetAllYieldCurvePointsByProfilePKID( + bav.GetDbAdapter().badgerDb, bav.Snapshot, profilePKID) + if err != nil { + return nil, errors.Wrap(err, "GetLocalYieldCurvePoints") + } + + // Cache the db points in the view. + // While there's more efficient ways to do this with specialized badger seek operations, this is sufficient for now. + if len(dbYieldCurvePoints) > 0 { + // Check if there's a yield curve in the view for the associated profile. + if _, mapInView := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID]; !mapInView { + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID] = + make(map[LockupYieldCurvePointKey]*LockupYieldCurvePoint) + } + + // Check if any of the points needs to be cached in the view. + for _, yieldCurvePoint := range dbYieldCurvePoints { + _, pointInView := + bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID][yieldCurvePoint.ToMapKey()] + if !pointInView { + bav._setLockupYieldCurvePoint(yieldCurvePoint) + } + } + } + + return bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID], nil +} + func (bav *UtxoView) GetYieldCurvePointByProfilePKIDAndDurationNanoSecs(profilePKID *PKID, lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint, _err error) { var lockupYieldCurvePoint *LockupYieldCurvePoint diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 27e0708cb..c3f05c7ce 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -4218,6 +4218,31 @@ func TestRealWorldLockupsUseCase(t *testing.T) { totalLocked = uint256.NewInt().Add( totalLocked, uint256.NewInt().SetUint64(1000)) } + + // Verify the locked balance entries in the db. + utxoView, err := + NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) + m0PKID := m0PKIDEntry.PKID + lockedBalanceEntries, err := utxoView.GetAllLockedBalanceEntriesForHodlerPKID(m0PKID) + require.NoError(t, err) + + // Verify the lockedBalanceEntries locked the correct amount and that the entries are consecutive. + totalLockedFound := uint256.NewInt() + for ii, lockedBalanceEntry := range lockedBalanceEntries { + // Add to the balance found. + totalLockedFound = uint256.NewInt().Add( + totalLockedFound, &lockedBalanceEntry.BalanceBaseUnits) + + // Check if we're consecutive. + if ii != len(lockedBalanceEntries)-1 { + require.Equal(t, + lockedBalanceEntry.VestingEndTimestampNanoSecs+1, + lockedBalanceEntries[ii+1].UnlockTimestampNanoSecs) + } + } + require.True(t, totalLockedFound.Eq(totalLocked)) } //---------------------------------------------------------- diff --git a/lib/db_utils.go b/lib/db_utils.go index ae398f334..960dc178c 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -10863,6 +10863,12 @@ func _dbKeyForLockedBalanceEntry(lockedBalanceEntry *LockedBalanceEntry) []byte return append(key, EncodeUint64(uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs))...) } +func DBPrefixForLockedBalanceEntriesOnHodler(lockedBalanceEntry *LockedBalanceEntry) []byte { + key := append([]byte{}, Prefixes.PrefixLockedBalanceEntry...) + key = append(key, lockedBalanceEntry.HODLerPKID[:]...) + return key +} + func DBPrefixForVestedLockedBalanceEntriesOnUnlockTimestamp(lockedBalanceEntry *LockedBalanceEntry) ([]byte, error) { key := append([]byte{}, Prefixes.PrefixLockedBalanceEntry...) key = append(key, lockedBalanceEntry.HODLerPKID[:]...) @@ -11012,6 +11018,84 @@ func DBGetLockedBalanceEntryForLockedBalanceEntryKeyWithTxn( return DecodeDeSoEncoder(&LockedBalanceEntry{}, bytes.NewReader(lockedBalanceEntryBytes)) } +func DBGetAllLockedBalanceEntriesForHodlerPKID( + handle *badger.DB, + snap *Snapshot, + hodlerPKID *PKID, +) ( + _lockedBalanceEntries []*LockedBalanceEntry, + _err error, +) { + var lockedBalanceEntries []*LockedBalanceEntry + var err error + handle.View(func(txn *badger.Txn) error { + lockedBalanceEntries, err = DBGetAllLockedBalanceEntriesForHodlerPKIDWithTxn( + txn, snap, hodlerPKID) + return nil + }) + return lockedBalanceEntries, err +} + +func DBGetAllLockedBalanceEntriesForHodlerPKIDWithTxn( + txn *badger.Txn, + snap *Snapshot, + hodlerPKID *PKID, +) ( + _lockedBalanceEntries []*LockedBalanceEntry, + _err error, +) { + // Start at + startKey := DBPrefixForLockedBalanceEntriesOnHodler(&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + }) + + // Valid for prefix + prefixKey := DBPrefixForLockedBalanceEntriesOnHodler(&LockedBalanceEntry{ + HODLerPKID: hodlerPKID, + }) + + // Create a reverse iterator. + opts := badger.DefaultIteratorOptions + iterator := txn.NewIterator(opts) + defer iterator.Close() + + // Store relevant LockedBalanceEntries to return. + var lockedBalanceEntries []*LockedBalanceEntry + + // Loop until we've exhausted all unlockable unvested locked balance entries. + for iterator.Seek(startKey); iterator.ValidForPrefix(prefixKey); iterator.Next() { + // Retrieve the LockedBalanceEntryBytes. + lockedBalanceEntryBytes, err := iterator.Item().ValueCopy(nil) + if err != nil { + return nil, + errors.Wrapf(err, "DBGetAllLockedBalanceEntriesForHodlerPKIDWithTxn: "+ + "error retrieveing LockedBalanceEntry: ") + } + + // Convert the LockedBalanceEntryBytes to LockedBalanceEntry. + rr := bytes.NewReader(lockedBalanceEntryBytes) + lockedBalanceEntry, err := DecodeDeSoEncoder(&LockedBalanceEntry{}, rr) + if err != nil { + return nil, + errors.Wrapf(err, "DBGetAllLockedBalanceEntriesForHodlerPKIDWithTxn: "+ + "error decoding LockedBalanceEntry: ") + } + + // Sanity check the locked balance entry as relevant. + if !lockedBalanceEntry.HODLerPKID.Eq(hodlerPKID) { + return nil, + errors.New("DBGetAllLockedBalanceEntriesForHodlerPKIDWithTxn: " + + "found invalid LockedBalanceEntry; this shouldn't happen") + } + + // Add the locked balance entry to the return list. + lockedBalanceEntries = append(lockedBalanceEntries, lockedBalanceEntry) + } + + return lockedBalanceEntries, nil + +} + func DBGetUnlockableLockedBalanceEntries( handle *badger.DB, snap *Snapshot, From e849e91557b1e00abd93f795a3b4ce1dbd6000d9 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 12 Jan 2024 15:00:43 -0800 Subject: [PATCH 356/762] Remove redundant function. --- lib/block_view_lockups.go | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 6c611cf85..5276eafec 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -607,36 +607,6 @@ func (bav *UtxoView) _deleteLockupYieldCurvePoint(point *LockupYieldCurvePoint) // Get Helper Functions for LockupYieldCurvePoint -func (bav *UtxoView) GetAllYieldCurvePoints(profilePKID *PKID) (map[LockupYieldCurvePointKey]*LockupYieldCurvePoint, error) { - // Fetch all yield curve points in the db. - dbYieldCurvePoints, err := DBGetAllYieldCurvePointsByProfilePKID( - bav.GetDbAdapter().badgerDb, bav.Snapshot, profilePKID) - if err != nil { - return nil, errors.Wrap(err, "GetLocalYieldCurvePoints") - } - - // Cache the db points in the view. - // While there's more efficient ways to do this with specialized badger seek operations, this is sufficient for now. - if len(dbYieldCurvePoints) > 0 { - // Check if there's a yield curve in the view for the associated profile. - if _, mapInView := bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID]; !mapInView { - bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID] = - make(map[LockupYieldCurvePointKey]*LockupYieldCurvePoint) - } - - // Check if any of the points needs to be cached in the view. - for _, yieldCurvePoint := range dbYieldCurvePoints { - _, pointInView := - bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID][yieldCurvePoint.ToMapKey()] - if !pointInView { - bav._setLockupYieldCurvePoint(yieldCurvePoint) - } - } - } - - return bav.PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints[*profilePKID], nil -} - func (bav *UtxoView) GetYieldCurvePointByProfilePKIDAndDurationNanoSecs(profilePKID *PKID, lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint, _err error) { var lockupYieldCurvePoint *LockupYieldCurvePoint From 10c59cfe7d7ade63f1eff7487ec19385e4c1b72d Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 12 Jan 2024 15:05:54 -0800 Subject: [PATCH 357/762] Change new function signature to not span multiple lines. --- lib/block_view_lockups.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index b0a5a3e5b..cafbb5417 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -710,7 +710,12 @@ func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration return leftLockupPoint, rightLockupPoint, nil } -func (bav *UtxoView) GetAllYieldCurvePoints(profilePKID *PKID) (map[LockupYieldCurvePointKey]*LockupYieldCurvePoint, error) { +func (bav *UtxoView) GetAllYieldCurvePoints( + profilePKID *PKID, +) ( + map[LockupYieldCurvePointKey]*LockupYieldCurvePoint, + error, +) { // Fetch all yield curve points in the db. dbYieldCurvePoints, err := DBGetAllYieldCurvePointsByProfilePKID( bav.GetDbAdapter().badgerDb, bav.Snapshot, profilePKID) From f249c47b2e82987a55467fe3f403d4cae334e15c Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Tue, 16 Jan 2024 09:34:37 -0800 Subject: [PATCH 358/762] Add separate blockheight for lockups. --- lib/block_view_lockups.go | 8 ++++---- lib/block_view_lockups_test.go | 13 ++++++------- lib/constants.go | 15 +++++++++++++++ 3 files changed, 25 insertions(+), 11 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index cafbb5417..a43302b98 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1007,7 +1007,7 @@ func (bav *UtxoView) _connectCoinLockup( var utxoOpsForTxn []*UtxoOperation // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || + if blockHeight < bav.Params.ForkHeights.LockupsBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, errors.Wrap(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinLockup") @@ -1884,7 +1884,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( var utxoOpsForTxn []*UtxoOperation // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || + if blockHeight < bav.Params.ForkHeights.LockupsBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, errors.Wrap(RuleErrorLockupTxnBeforeBlockHeight, "_connectUpdateCoinLockupParams") @@ -2140,7 +2140,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( var utxoOpsForTxn []*UtxoOperation // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || + if blockHeight < bav.Params.ForkHeights.LockupsBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, errors.Wrap(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinLockupTransfer") @@ -2415,7 +2415,7 @@ func (bav *UtxoView) _connectCoinUnlock( var utxoOpsForTxn []*UtxoOperation // Validate the starting block height. - if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight || + if blockHeight < bav.Params.ForkHeights.LockupsBlockHeight || blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight { return 0, 0, nil, errors.Wrap(RuleErrorLockupTxnBeforeBlockHeight, "_connectCoinUnlock") diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index bc6151b0a..1fd24f348 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -13,7 +13,7 @@ import ( func TestCoinLockupsForkHeight(t *testing.T) { // Test and ensure lockup transactions cannot trigger without: - // (a) ProofOfStake1StateSetupBlockHeight Fork + // (a) LockupsBlockHeight Fork // Initialize balance model fork heights. setBalanceModelBlockHeights(t) @@ -27,7 +27,7 @@ func TestCoinLockupsForkHeight(t *testing.T) { params.ForkHeights.BalanceModelBlockHeight = uint32(1) // Initialize PoS fork heights. - params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(25) + params.ForkHeights.LockupsBlockHeight = uint32(25) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) @@ -58,7 +58,7 @@ func TestCoinLockupsForkHeight(t *testing.T) { _, err := miner.MineAndProcessSingleBlock(0, mempool) require.NoError(t, err) currentBlockHeight := uint64(chain.blockTip().Height) + 1 - if currentBlockHeight == uint64(params.ForkHeights.ProofOfStake1StateSetupBlockHeight) { + if currentBlockHeight == uint64(params.ForkHeights.LockupsBlockHeight) { break } @@ -100,7 +100,7 @@ func TestCoinLockupsForkHeight(t *testing.T) { m0Pub, 0) - if currentBlockHeight < uint64(params.ForkHeights.ProofOfStake1StateSetupBlockHeight) { + if currentBlockHeight < uint64(params.ForkHeights.LockupsBlockHeight) { require.Contains(t, err1.Error(), RuleErrorLockupTxnBeforeBlockHeight) require.Contains(t, err2.Error(), RuleErrorLockupTxnBeforeBlockHeight) require.Contains(t, err3.Error(), RuleErrorLockupTxnBeforeBlockHeight) @@ -4351,9 +4351,8 @@ func _setUpMinerAndTestMetaForTimestampBasedLockupTests(t *testing.T) *TestMeta // Ensure DAO coins are enabled (a pre-requisite for lockups) params.ForkHeights.DAOCoinBlockHeight = uint32(0) - // Initialize PoS fork heights. - params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) - params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(1) + // Initialize lockups block height. + params.ForkHeights.LockupsBlockHeight = uint32(1) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) diff --git a/lib/constants.go b/lib/constants.go index 3dbc9ed12..eb986574e 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -339,6 +339,13 @@ type ForkHeights struct { // leader schedule snapshots to be generated in advance. ProofOfStake1StateSetupBlockHeight uint32 + // LockupsBlockHeight defines the height at which we begin accepting lockup + // related transactions. These can include things like CoinLockup, UpdateCoinLockupParams, + // CoinLockupTransfer, and CoinUnlock. + // + // We specify this separately to enable independent testing when compared with other features. + LockupsBlockHeight uint32 + // ProofOfStake2ConsensusCutoverBlockHeight defines the height at which we cut over // from PoW consensus to PoS consensus. ProofOfStake2ConsensusCutoverBlockHeight uint32 @@ -756,6 +763,8 @@ var RegtestForkHeights = ForkHeights{ // FIXME: set to real block height when ready ProofOfStake2ConsensusCutoverBlockHeight: uint32(math.MaxUint32), + LockupsBlockHeight: uint32(1), + BlockRewardPatchBlockHeight: uint32(0), // Be sure to update EncoderMigrationHeights as well via @@ -915,6 +924,9 @@ var MainnetForkHeights = ForkHeights{ // FIXME: set to real block height when ready ProofOfStake2ConsensusCutoverBlockHeight: uint32(math.MaxUint32), + // FIXME: set to real block height when ready + LockupsBlockHeight: uint32(math.MaxUint32), + // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. } @@ -1223,6 +1235,9 @@ var TestnetForkHeights = ForkHeights{ // FIXME: set to real block height when ready ProofOfStake2ConsensusCutoverBlockHeight: uint32(math.MaxUint32), + // FIXME: set to real block height when ready + LockupsBlockHeight: uint32(math.MaxUint32), + // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. } From 482ee44998110026a547def178a2510ef0e0464d Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Tue, 16 Jan 2024 10:12:24 -0800 Subject: [PATCH 359/762] Remove error prone pointer equality check from core. --- lib/blockchain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 9e4902afb..d807c58f0 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1548,7 +1548,7 @@ func _FindCommonAncestor(node1 *BlockNode, node2 *BlockNode) *BlockNode { // reach the end of the lists. We only need to check node1 for nil // since they're the same height and we are iterating both back // in tandem. - for node1 != nil && node1 != node2 { + for node1 != nil && !node1.Hash.IsEqual(node2.Hash) { node1 = node1.Parent node2 = node2.Parent } From 9312bfc1b77380286822a7c9334fa74406a79185 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 17 Jan 2024 14:27:00 -0500 Subject: [PATCH 360/762] Address Minor DH comments on PoS syncing PR (#915) --- lib/pos_epoch_complete_hook.go | 3 +++ lib/server.go | 12 ++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 728eec41d..6ee56886f 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -143,6 +143,9 @@ func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) erro // simulateNextEpochEntry simulates the block range for the next epoch given the current epoch's final // block height and epoch number. The view and timestamp for the simulated epoch are left empty since they can't // be easily simulated, so DO NOT USE CreatedAtBlockTimestampNanoSecs or InitialView from the returned EpochEntry. +// +// We use this function to simulate the next epoch's entry so we can predict the leader schedule and validator set +// for the next epoch before the current epoch is over. This is useful for validating orphan blocks. func (bav *UtxoView) simulateNextEpochEntry(currentEpochNumber uint64, currentEpochFinalBlockHeight uint64) (*EpochEntry, error) { return bav.computeNextEpochEntry( currentEpochNumber, diff --git a/lib/server.go b/lib/server.go index c6ad463c0..8fb934bd5 100644 --- a/lib/server.go +++ b/lib/server.go @@ -505,11 +505,11 @@ func NewServer( uint64(_chain.blockTip().Height), _mempoolDumpDir, false, - 1024*1024*1024*3, // Max mempool Size = 3GB; TODO make this a param - 60*1000, // Mempool dumper frequency = 60 seconds; TODO make this a param - 1, // Fee estimator mempool blocks; TODO make this a param + 1024*1024*1024*3, // Max mempool Size = 3GB; TODO make this a flag + 60*1000, // Mempool dumper frequency = 60 seconds; TODO make this a flag + 1, // Fee estimator mempool blocks; TODO make this a flag []*MsgDeSoBlock{latestBlock}, - 1, // Fee estimator past blocks; TODO make this a param + 1, // Fee estimator past blocks; TODO make this a flag ) if err != nil { return nil, errors.Wrapf(err, "NewServer: Problem initializing PoS mempool"), true @@ -1790,14 +1790,14 @@ func (srv *Server) _addNewTxn( } srv.blockchain.ChainLock.RLock() - defer srv.blockchain.ChainLock.RUnlock() + blockHeight := srv.blockchain.BlockTip().Height + srv.blockchain.ChainLock.RUnlock() // Only attempt to add the transaction to the PoW mempool if we're on the // PoW protocol. If we're on the PoW protocol, then we use the PoW mempool's, // txn validity checks to signal whether the txn has been added or not. The PoW // mempool has stricter txn validity checks than the PoW mempool, so this works // out conveniently, as it allows us to always add a txn to the PoS mempool. - blockHeight := srv.blockchain.BlockTip().Height if blockHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { _, err := srv.mempool.ProcessTransaction( txn, true /*allowUnconnectedTxn*/, rateLimit, peerID, verifySignatures) From 9d7a73b42e0b9ac011670c529ebb91f018e9673c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 18 Jan 2024 13:24:58 -0500 Subject: [PATCH 361/762] Fail ProcessBlockPoS When Incoming Block Is Before PoS Cutover (#916) --- lib/pos_blockchain.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 021ab8468..5736fe280 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -52,6 +52,13 @@ func (bc *Blockchain) ProcessHeaderPoS(header *MsgDeSoHeader) (_isMainChain bool func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader) ( _isMainChain bool, _isOrphan bool, _err error, ) { + if header.Height < uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + return false, false, errors.Errorf( + "processHeaderPoS: Header height %d is less than the ProofOfStake2ConsensusCutoverBlockHeight %d", + header.Height, bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight, + ) + } + headerHash, err := header.Hash() if err != nil { return false, false, errors.Wrapf(err, "processHeaderPoS: Problem hashing header") @@ -206,6 +213,13 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v _missingBlockHashes []*BlockHash, _err error, ) { + if block.Header.Height < uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + return false, false, nil, errors.Errorf( + "processHeaderPoS: Header height %d is less than the ProofOfStake2ConsensusCutoverBlockHeight %d", + block.Header.Height, bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight, + ) + } + // If we can't hash the block, we can never store in the block index and we should throw it out immediately. if _, err := block.Hash(); err != nil { return false, false, nil, errors.Wrapf(err, "processBlockPoS: Problem hashing block") From 7fc01844302f8f3ffe1ba662fdf56131520ccbe2 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 18 Jan 2024 13:33:18 -0500 Subject: [PATCH 362/762] Differentiate AggQC Validator Set from HighQC Validator Set (#917) --- consensus/utils.go | 18 +++++++--- consensus/utils_test.go | 9 ++--- lib/pos_blockchain.go | 9 ++--- lib/pos_blockchain_test.go | 68 +++++++++++++++++++------------------- 4 files changed, 58 insertions(+), 46 deletions(-) diff --git a/consensus/utils.go b/consensus/utils.go index 90e1024a9..44e14bdd7 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -66,16 +66,26 @@ func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Va return isValidSignatureManyPublicKeys(validatorPublicKeysInQC, qc.GetAggregatedSignature().GetSignature(), signaturePayload[:]) } -func IsValidSuperMajorityAggregateQuorumCertificate(aggQC AggregateQuorumCertificate, validators []Validator) bool { - if !isProperlyFormedAggregateQC(aggQC) || !isProperlyFormedValidatorSet(validators) { +// IsValidSuperMajorityAggregateQuorumCertificate validates that the aggregate QC is properly formed and signed +// by a super-majority of validators in the network. It takes in two sets of validator sets because the validator +// set may shift between the view that is timed out and the high QC view. The two validator +// sets are defined as: +// - aggQCValidators: The validator set that signed the timeouts for the view that has timed out (the view in the aggregate QC) +// - highQCValidators: The validator set that signed the high QC (the view in the high QC) +func IsValidSuperMajorityAggregateQuorumCertificate(aggQC AggregateQuorumCertificate, aggQCValidators []Validator, highQCValidators []Validator) bool { + if !isProperlyFormedAggregateQC(aggQC) { return false } - if !IsValidSuperMajorityQuorumCertificate(aggQC.GetHighQC(), validators) { + if !isProperlyFormedValidatorSet(aggQCValidators) || !isProperlyFormedValidatorSet(highQCValidators) { return false } - hasSuperMajorityStake, signerPublicKeys := isSuperMajorityStakeSignersList(aggQC.GetAggregatedSignature().GetSignersList(), validators) + if !IsValidSuperMajorityQuorumCertificate(aggQC.GetHighQC(), highQCValidators) { + return false + } + + hasSuperMajorityStake, signerPublicKeys := isSuperMajorityStakeSignersList(aggQC.GetAggregatedSignature().GetSignersList(), aggQCValidators) if !hasSuperMajorityStake { return false } diff --git a/consensus/utils_test.go b/consensus/utils_test.go index da7216b80..740203b1d 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -96,12 +96,13 @@ func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { // Test malformed aggregate QC { - require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(nil, createDummyValidatorList())) + validatorList := createDummyValidatorList() + require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(nil, validatorList, validatorList)) } // Test malformed validator set { - require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(createDummyAggQc(2, 1), nil)) + require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(createDummyAggQc(2, 1), nil, nil)) } // Set up test validator data @@ -163,7 +164,7 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { signature: validator1TimeoutSignature, }, } - require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators)) + require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) } // Test with 5/6 super-majority stake @@ -186,7 +187,7 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { signature: timeoutAggSig, }, } - require.True(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators)) + require.True(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) } } diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 5736fe280..68ce93b78 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -503,7 +503,7 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { } } // Okay now we have the validator set ordered by stake, we can validate the QC. - if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { + if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake, validatorsByStake); err != nil { // If we hit an error, we know that the QC is invalid, and we'll never accept this block, // As a spam-prevention measure, we just throw away this block and don't store it. return nil @@ -580,7 +580,7 @@ func (bc *Blockchain) validateLeaderAndQC(block *MsgDeSoBlock) (_passedSpamPreve } // Validate the block's QC. If it's invalid, we return true for failed spam prevention check. - if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { + if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake, validatorsByStake); err != nil { return false, nil } @@ -1122,11 +1122,12 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc // isValidPoSQuorumCertificate validates that the QC of this block is valid, meaning a super majority // of the validator set has voted (or timed out). Assumes ValidatorEntry list is sorted. -func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validatorSet []*ValidatorEntry) error { +func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validatorSet []*ValidatorEntry, previousValidatorSet []*ValidatorEntry) error { validators := toConsensusValidators(validatorSet) + previousValidators := toConsensusValidators(previousValidatorSet) if !block.Header.ValidatorsTimeoutAggregateQC.isEmpty() { if !consensus.IsValidSuperMajorityAggregateQuorumCertificate( - block.Header.ValidatorsTimeoutAggregateQC, validators) { + block.Header.ValidatorsTimeoutAggregateQC, validators, previousValidators) { return RuleErrorInvalidTimeoutQC } return nil diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 2166f2351..f9af8ca43 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -912,7 +912,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { }, } // Empty QC for both vote and timeout should fail - err := bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err := bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -934,11 +934,11 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { }, } desoBlock.Header.ValidatorsVoteQC = voteQC - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.NoError(t, err) // Empty validator set should fail - err = bc.isValidPoSQuorumCertificate(desoBlock, []*ValidatorEntry{}) + err = bc.isValidPoSQuorumCertificate(desoBlock, []*ValidatorEntry{}, []*ValidatorEntry{}) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -946,13 +946,13 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { { // Zero stake amount validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(0) - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil stake amount validatorSet[0].TotalStakeAmountNanos = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -960,14 +960,14 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(3) // Nil voting public key validatorSet[0].VotingPublicKey = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Reset voting public key validatorSet[0].VotingPublicKey = m1VotingPrivateKey.PublicKey() // Nil validator entry - err = bc.isValidPoSQuorumCertificate(desoBlock, append(validatorSet, nil)) + err = bc.isValidPoSQuorumCertificate(desoBlock, append(validatorSet, nil), append(validatorSet, nil)) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) } @@ -976,41 +976,41 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // Malformed vote QC should fail // Nil vote QC desoBlock.Header.ValidatorsVoteQC = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // View is 0 desoBlock.Header.ValidatorsVoteQC = voteQC voteQC.ProposedInView = 0 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil block hash voteQC.ProposedInView = 6 voteQC.BlockHash = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil signers list voteQC.ValidatorsVoteAggregatedSignature.SignersList = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil Signature voteQC.ValidatorsVoteAggregatedSignature.SignersList = signersList1And2 voteQC.ValidatorsVoteAggregatedSignature.Signature = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil aggregate signature voteQC.BlockHash = hash1 voteQC.ValidatorsVoteAggregatedSignature = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Reset the ValidatorsVoteAggregatedSignature @@ -1024,21 +1024,21 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // No supermajority in vote QC voteQC.ValidatorsVoteAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x1}) // 0b0001, which represents validator 1 voteQC.ValidatorsVoteAggregatedSignature.Signature = vote1Signature - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) } { // Only having signature for validator 1 should fail even if signers list has validator 2 voteQC.ValidatorsVoteAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x3}) // 0b0010, which represents validator 1 and 2 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Having 1 and 3 in signers list, but including signature for 2 should fail voteQC.ValidatorsVoteAggregatedSignature.SignersList = bitset.NewBitset().Set(0, true).Set(2, true) // represents validator 1 and 3 voteQC.ValidatorsVoteAggregatedSignature.Signature = aggregateSig - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -1070,7 +1070,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { desoBlock.Header.ValidatorsVoteQC = nil // Set the timeout qc to the timeout qc constructed above desoBlock.Header.ValidatorsTimeoutAggregateQC = timeoutQC - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.NoError(t, err) { @@ -1079,55 +1079,55 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // timeout QC is interpreted as empty // View = 0 timeoutQC.TimedOutView = 0 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil high QC timeoutQC.TimedOutView = 8 timeoutQC.ValidatorsHighQC = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // High QC has view of 0 timeoutQC.ValidatorsHighQC = voteQC timeoutQC.ValidatorsHighQC.ProposedInView = 0 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // No high QC views timeoutQC.ValidatorsHighQC.ProposedInView = 6 timeoutQC.ValidatorsTimeoutHighQCViews = []uint64{} - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil high QC block hash timeoutQC.ValidatorsTimeoutHighQCViews = []uint64{6, 5} timeoutQC.ValidatorsHighQC.BlockHash = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil high QC signers list timeoutQC.ValidatorsHighQC.BlockHash = hash1 timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature.SignersList = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil high QC signature timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature.SignersList = signersList1And2 timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature.Signature = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil High QC Aggregated signature timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -1137,13 +1137,13 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { Signature: aggregateSig, } - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.NoError(t, err) } { // Timed out view is not exactly one greater than high QC view timeoutQC.ValidatorsHighQC.ProposedInView = 7 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) } @@ -1151,13 +1151,13 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // Invalid validator set tests // Zero stake amount validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(0) - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) // Nil stake amount validatorSet[0].TotalStakeAmountNanos = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) @@ -1165,14 +1165,14 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(3) // Nil voting public key validatorSet[0].VotingPublicKey = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) // Reset voting public key validatorSet[0].VotingPublicKey = m1VotingPrivateKey.PublicKey() // Nil validator entry - err = bc.isValidPoSQuorumCertificate(desoBlock, append(validatorSet, nil)) + err = bc.isValidPoSQuorumCertificate(desoBlock, append(validatorSet, nil), append(validatorSet, nil)) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) } @@ -1181,7 +1181,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // No supermajority test timeoutQC.ValidatorsTimeoutAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x1}) // 0b0001, which represents validator 1 timeoutQC.ValidatorsTimeoutAggregatedSignature.Signature = timeout1Signature - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) } @@ -1189,14 +1189,14 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { { // Only having signature for validator 1 should fail even if signers list has validator 2 timeoutQC.ValidatorsTimeoutAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x3}) // 0b0010, which represents validator 1 and 2 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) // Having 1 and 3 in signers list, but including signature for 2 should fail timeoutQC.ValidatorsTimeoutAggregatedSignature.SignersList = bitset.NewBitset().Set(0, true).Set(2, true) // represents validator 1 and 3 timeoutQC.ValidatorsTimeoutAggregatedSignature.Signature = timeoutAggSig - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) } From 6da150148e42abbb46076b69ddf2421688aa6770 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 18 Jan 2024 18:34:57 -0500 Subject: [PATCH 363/762] Move Validator Set Fetch Helper Function to UtxoView (#918) * Move ValidatorSet Fetch By BlockHeight Into Blockchain Struct * Fix typos * Move helper into UtxoView --- lib/pos_consensus.go | 70 +++++++---------------------------- lib/pos_validator_set.go | 79 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+), 56 deletions(-) create mode 100644 lib/pos_validator_set.go diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 482078899..b0031c289 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -602,61 +602,30 @@ func (cc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgD return nil, nil } - // Create a map to cache the validator set entries by epoch number. Two blocks in the same epoch will have - // the same validator set, so we can use an in-memory cache to optimize the validator set lookup for them. - validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) + blockHeights := collections.Transform(blocks, func(block *MsgDeSoHeader) uint64 { + // We add 1 to the block height because we need the validator set that results AFTER connecting the + // block to the blockchain, and triggering an epoch transition (if at an epoch boundary). + return block.Height + 1 + }) - // Create a UtxoView for the committed tip block. We will use this to fetch the validator set for - // all of the safe blocks. - utxoView, err := NewUtxoView(cc.blockchain.db, cc.params, cc.blockchain.postgres, cc.blockchain.snapshot, nil) + // Create a UtxoView for the committed tip block. We will use this view to simulate all epoch entries. + utxoView, err := NewUtxoView(cc.blockchain.db, cc.blockchain.params, cc.blockchain.postgres, cc.blockchain.snapshot, nil) if err != nil { return nil, errors.Errorf("Error creating UtxoView: %v", err) } - // Fetch the current epoch entry for the committed tip - epochEntryAtCommittedTip, err := utxoView.GetCurrentEpochEntry() - if err != nil { - return nil, errors.Errorf("Error fetching epoch entry for committed tip: %v", err) - } - - // Fetch the next epoch entry - nextEpochEntryAfterCommittedTip, err := utxoView.simulateNextEpochEntry(epochEntryAtCommittedTip.EpochNumber, epochEntryAtCommittedTip.FinalBlockHeight) + // Fetch the validator set for each block + validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsForBlockHeights(blockHeights) if err != nil { - return nil, errors.Errorf("Error fetching next epoch entry after committed tip: %v", err) + return nil, errors.Errorf("Error fetching validator set for blocks: %v", err) } - // The input blocks can only be part of the current or next epoch entries. - possibleEpochEntriesForBlocks := []*EpochEntry{epochEntryAtCommittedTip, nextEpochEntryAfterCommittedTip} - - // Fetch the validator set at each block + // Extract the validator set at each block blocksWithValidatorLists := make([]consensus.BlockWithValidatorList, len(blocks)) for ii, block := range blocks { - // Find the epoch entry for the block. It'll either be the current epoch entry or the next one. - // We add 1 to the block height because we need the validator set that results AFTER connecting the - // block to the blockchain, and triggering an epoch transition (if at an epoch boundary). - epochEntryForBlock, err := getEpochEntryForBlockHeight(block.Height+1, possibleEpochEntriesForBlocks) - if err != nil { - return nil, errors.Errorf("Error fetching epoch number for block: %v", err) - } - - // Compute the snapshot epoch number for the block. This is the epoch number that the validator set - // for the block was snapshotted in. - snapshotEpochNumber, err := utxoView.ComputeSnapshotEpochNumberForEpoch(epochEntryForBlock.EpochNumber) - if err != nil { - return nil, errors.Errorf("error computing snapshot epoch number for epoch: %v", err) - } - - var validatorSetAtBlock []*ValidatorEntry - var ok bool - - // If the validator set for the block is already cached by the snapshot epoch number, then use it. - // Otherwise, fetch it from the UtxoView. - if validatorSetAtBlock, ok = validatorSetEntriesBySnapshotEpochNumber[snapshotEpochNumber]; !ok { - // We don't have the validator set for the block cached. Fetch it from the UtxoView. - validatorSetAtBlock, err = utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) - if err != nil { - return nil, errors.Errorf("Error fetching validator set for block: %v", err) - } + validatorSetAtBlock, ok := validatorSetByBlockHeight[block.Height+1] + if !ok { + return nil, errors.Errorf("Error fetching validator set for block: %v", block) } blocksWithValidatorLists[ii] = consensus.BlockWithValidatorList{ @@ -683,17 +652,6 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView) (*PosBlockPr return NewPosBlockProducer(fc.mempool, fc.params, blockProducerPublicKey, blockProducerBlsPublicKey), nil } -// Finds the epoch entry for the block and returns the epoch number. -func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { - for _, epochEntry := range epochEntries { - if epochEntry.ContainsBlockHeight(blockHeight) { - return epochEntry, nil - } - } - - return nil, errors.Errorf("error finding epoch number for block height: %v", blockHeight) -} - func isValidBlockProposalEvent(event *consensus.FastHotStuffEvent, expectedEventType consensus.FastHotStuffEventType) bool { // Validate that the expected event type is a block proposal event type possibleExpectedEventTypes := []consensus.FastHotStuffEventType{ diff --git a/lib/pos_validator_set.go b/lib/pos_validator_set.go new file mode 100644 index 000000000..9cd0f9c7c --- /dev/null +++ b/lib/pos_validator_set.go @@ -0,0 +1,79 @@ +package lib + +import "github.com/pkg/errors" + +// GetSnapshotValidatorSetsForBlockHeights returns the validator set for each block height provided. +// It requires all input block heights to be in the previous, current, or next epoch. +func (bav *UtxoView) GetSnapshotValidatorSetsForBlockHeights(blockHeights []uint64) (map[uint64][]*ValidatorEntry, error) { + // Create a map to cache the validator set entries by epoch number. Two blocks in the same epoch will have + // the same validator set, so we can use an in-memory cache to optimize the validator set lookup for them. + validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) + + // Fetch the current epoch entry + currentEpochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return nil, errors.Errorf("Error fetching current epoch entry: %v", err) + } + + // Fetch the previous epoch entry + prevEpochEntry, err := bav.simulatePrevEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) + if err != nil { + return nil, errors.Errorf("Error fetching previous epoch entry: %v", err) + } + + // Fetch the next epoch entry + nextEpochEntry, err := bav.simulateNextEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) + if err != nil { + return nil, errors.Errorf("Error fetching next epoch entry: %v", err) + } + + // The supported block heights can only be part of the previous, current, or next epoch. + possibleEpochEntriesForBlocks := []*EpochEntry{prevEpochEntry, currentEpochEntry, nextEpochEntry} + + // Output map that will hold the validator set for each block height + validatorSetByBlockHeight := map[uint64][]*ValidatorEntry{} + + // Fetch the validator set at each block height + for _, blockHeight := range blockHeights { + epochEntryForBlock, err := findEpochEntryForBlockHeight(blockHeight, possibleEpochEntriesForBlocks) + if err != nil { + return nil, errors.Errorf("Error fetching epoch number for block height %d: %v", blockHeight, err) + } + + // Compute the snapshot epoch number for the block height. This is the epoch number that the validator set + // for the block was snapshotted in. + snapshotEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(epochEntryForBlock.EpochNumber) + if err != nil { + return nil, errors.Errorf("error computing snapshot epoch number for epoch number %d: %v", epochEntryForBlock.EpochNumber, err) + } + + var validatorSet []*ValidatorEntry + var ok bool + + // If the validator set for the block is already cached by the snapshot epoch number, then use it. + // Otherwise, fetch it from the UtxoView. + if validatorSet, ok = validatorSetEntriesBySnapshotEpochNumber[snapshotEpochNumber]; !ok { + // We don't have the validator set for the block cached. Fetch it from the UtxoView. + validatorSet, err = bav.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) + if err != nil { + return nil, errors.Errorf("Error fetching validator set for block: %v", err) + } + } + + validatorSetByBlockHeight[blockHeight] = validatorSet + } + + // Happy path: we fetched the validator lists for all block heights successfully. + return validatorSetByBlockHeight, nil +} + +// Given a list of epoch entries, this finds the epoch entry for the given block height. +func findEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { + for _, epochEntry := range epochEntries { + if epochEntry.ContainsBlockHeight(blockHeight) { + return epochEntry, nil + } + } + + return nil, errors.Errorf("error finding epoch entry for block height: %v", blockHeight) +} From feb8d297876278df974e1b4bdb46b201d4dcad0a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 18 Jan 2024 18:59:56 -0500 Subject: [PATCH 364/762] Update ProcessBlockPoS Validator Set Verification To Use New Validator Set Fetch Helper (#919) * Update Validator Set Fetches in blockchain.go to Use New Helper * Cleanup --- lib/pos_blockchain.go | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 68ce93b78..cdab3255e 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -570,17 +570,25 @@ func (bc *Blockchain) validateLeaderAndQC(block *MsgDeSoBlock) (_passedSpamPreve if !isValidPartialSig { return false, nil } - // 2. Validate QC - validatorsByStake, err := utxoView.GetAllSnapshotValidatorSetEntriesByStake() + + // 2. Fetch the validator set for the current and previous block heights. + validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsForBlockHeights([]uint64{ + block.Header.Height, block.Header.Height - 1, + }) if err != nil { - // This should never happen. If the parent is validated and extends from the tip, then we should - // be able to fetch the validator set at its block height for it. This failure can only happen due - // to transient badger issues. We return false for failed spam prevention check and the error. - return false, errors.Wrap(err, "validateLeaderAndQC: Problem getting validator set") + return false, errors.Wrap(err, "validateLeaderAndQC: Problem getting validator sets") + } + currentBlockValidatorSet, ok := validatorSetByBlockHeight[block.Header.Height] + if !ok { + return false, errors.Errorf("validateLeaderAndQC: Validator set for block height %d not found", block.Header.Height) + } + prevBlockValidatorSetSet, ok := validatorSetByBlockHeight[block.Header.Height-1] + if !ok { + return false, errors.Errorf("validateLeaderAndQC: Validator set for block height %d not found", block.Header.Height-1) } - // Validate the block's QC. If it's invalid, we return true for failed spam prevention check. - if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake, validatorsByStake); err != nil { + // 3. Validate the block's QC. If it's invalid, we return true for failed spam prevention check. + if err = bc.isValidPoSQuorumCertificate(block, currentBlockValidatorSet, prevBlockValidatorSetSet); err != nil { return false, nil } From 2d34f585dff35b1cc85268d1607c2fa2144a839e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 18 Jan 2024 19:17:16 -0500 Subject: [PATCH 365/762] Update processOrphanBlockPoS To Use New Validator Set Fetch Helper (#920) --- lib/pos_blockchain.go | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index cdab3255e..1e49bf55d 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -370,7 +370,7 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // We can't validate the QC without getting the current epoch entry. return errors.Wrap(err, "processOrphanBlockPoS: Problem getting current epoch entry") } - var validatorsByStake []*ValidatorEntry + // If the block is in a previous or future epoch, we need to compute the // proper validator set for the block. We do this by computing the prev/next // epoch entry and then fetching the validator set at the snapshot of the @@ -457,13 +457,6 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // potential leaders. As a spam-prevention measure, we simply return nil and throw it away. return nil } - validatorsByStake, err = utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber( - epochEntrySnapshotAtEpochNumber) - if err != nil { - return errors.Wrapf(err, - "processOrphanBlockPoS: Problem getting validator set at snapshot at epoch number %d", - epochEntrySnapshotAtEpochNumber) - } } else { // This block is in the current epoch! // First we validate the proposer vote partial signature @@ -495,19 +488,31 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // measure, we just throw away this block and don't store it. return nil } - // If we get here, we know we have the correct block proposer. We now fetch the validators ordered by - // stake, so we can validate the QC. - validatorsByStake, err = utxoView.GetAllSnapshotValidatorSetEntriesByStake() - if err != nil { - return errors.Wrap(err, "processOrphanBlockPoS: Problem getting validator set") - } } + + // Get the validator set for the current and previous block heights. + validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsForBlockHeights([]uint64{ + block.Header.Height, block.Header.Height - 1, + }) + if err != nil { + return errors.Wrap(err, "processOrphanBlockPoS: Problem getting validator sets") + } + currentBlockValidatorSet, ok := validatorSetByBlockHeight[block.Header.Height] + if !ok { + return errors.Errorf("processOrphanBlockPoS: Validator set for block height %d not found", block.Header.Height) + } + prevBlockValidatorSet, ok := validatorSetByBlockHeight[block.Header.Height-1] + if !ok { + return errors.Errorf("processOrphanBlockPoS: Validator set for block height %d not found", block.Header.Height-1) + } + // Okay now we have the validator set ordered by stake, we can validate the QC. - if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake, validatorsByStake); err != nil { + if err = bc.isValidPoSQuorumCertificate(block, currentBlockValidatorSet, prevBlockValidatorSet); err != nil { // If we hit an error, we know that the QC is invalid, and we'll never accept this block, // As a spam-prevention measure, we just throw away this block and don't store it. return nil } + // All blocks should pass the basic integrity validations, which ensure the block // is not malformed. If the block is malformed, we should store it as ValidateFailed. if err = bc.isProperlyFormedBlockPoS(block); err != nil { @@ -517,6 +522,7 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { } return nil } + // Add to blockIndexByHash with status STORED only as we are not sure if it's valid yet. _, err = bc.storeBlockInBlockIndex(block) return errors.Wrap(err, "processBlockPoS: Problem adding block to block index: ") From 2f1f3a890a80d4e3c9d1a5841ee4d2a967b0a248 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Thu, 18 Jan 2024 19:24:47 -0500 Subject: [PATCH 366/762] Consolidate pos_validator_set.go with post_snapshot_entries.go --- lib/pos_blockchain.go | 4 +- lib/pos_consensus.go | 2 +- lib/pos_snapshot_entries.go | 82 ++++++++++++++++++++++++++++++++++++- lib/pos_validator_set.go | 79 ----------------------------------- 4 files changed, 83 insertions(+), 84 deletions(-) delete mode 100644 lib/pos_validator_set.go diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 1e49bf55d..697732506 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -491,7 +491,7 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { } // Get the validator set for the current and previous block heights. - validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsForBlockHeights([]uint64{ + validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsByBlockHeights([]uint64{ block.Header.Height, block.Header.Height - 1, }) if err != nil { @@ -578,7 +578,7 @@ func (bc *Blockchain) validateLeaderAndQC(block *MsgDeSoBlock) (_passedSpamPreve } // 2. Fetch the validator set for the current and previous block heights. - validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsForBlockHeights([]uint64{ + validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsByBlockHeights([]uint64{ block.Header.Height, block.Header.Height - 1, }) if err != nil { diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index b0031c289..193c5096e 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -615,7 +615,7 @@ func (cc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgD } // Fetch the validator set for each block - validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsForBlockHeights(blockHeights) + validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsByBlockHeights(blockHeights) if err != nil { return nil, errors.Errorf("Error fetching validator set for blocks: %v", err) } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 784e2fc96..d4987628e 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -3,11 +3,12 @@ package lib import ( "bytes" "fmt" - "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/collections" "math" "sort" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" @@ -241,6 +242,71 @@ type SnapshotValidatorSetMapKey struct { ValidatorPKID PKID } +// GetSnapshotValidatorSetsByBlockHeights returns the validator set for each block height provided. +// It requires all input block heights to be in the previous, current, or next epoch. +func (bav *UtxoView) GetSnapshotValidatorSetsByBlockHeights(blockHeights []uint64) (map[uint64][]*ValidatorEntry, error) { + // Create a map to cache the validator set entries by epoch number. Two blocks in the same epoch will have + // the same validator set, so we can use an in-memory cache to optimize the validator set lookup for them. + validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) + + // Fetch the current epoch entry + currentEpochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return nil, errors.Errorf("Error fetching current epoch entry: %v", err) + } + + // Fetch the previous epoch entry + prevEpochEntry, err := bav.simulatePrevEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) + if err != nil { + return nil, errors.Errorf("Error fetching previous epoch entry: %v", err) + } + + // Fetch the next epoch entry + nextEpochEntry, err := bav.simulateNextEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) + if err != nil { + return nil, errors.Errorf("Error fetching next epoch entry: %v", err) + } + + // The supported block heights can only be part of the previous, current, or next epoch. + possibleEpochEntriesForBlocks := []*EpochEntry{prevEpochEntry, currentEpochEntry, nextEpochEntry} + + // Output map that will hold the validator set for each block height + validatorSetByBlockHeight := map[uint64][]*ValidatorEntry{} + + // Fetch the validator set at each block height + for _, blockHeight := range blockHeights { + epochEntryForBlock, err := findEpochEntryForBlockHeight(blockHeight, possibleEpochEntriesForBlocks) + if err != nil { + return nil, errors.Errorf("Error fetching epoch number for block height %d: %v", blockHeight, err) + } + + // Compute the snapshot epoch number for the block height. This is the epoch number that the validator set + // for the block was snapshotted in. + snapshotEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(epochEntryForBlock.EpochNumber) + if err != nil { + return nil, errors.Errorf("error computing snapshot epoch number for epoch number %d: %v", epochEntryForBlock.EpochNumber, err) + } + + var validatorSet []*ValidatorEntry + var ok bool + + // If the validator set for the block is already cached by the snapshot epoch number, then use it. + // Otherwise, fetch it from the UtxoView. + if validatorSet, ok = validatorSetEntriesBySnapshotEpochNumber[snapshotEpochNumber]; !ok { + // We don't have the validator set for the block cached. Fetch it from the UtxoView. + validatorSet, err = bav.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) + if err != nil { + return nil, errors.Errorf("Error fetching validator set for block: %v", err) + } + } + + validatorSetByBlockHeight[blockHeight] = validatorSet + } + + // Happy path: we fetched the validator lists for all block heights successfully. + return validatorSetByBlockHeight, nil +} + func (bav *UtxoView) GetCurrentSnapshotValidatorSetEntryByPKID(pkid *PKID) (*ValidatorEntry, error) { // Calculate the SnapshotEpochNumber. snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() @@ -249,6 +315,7 @@ func (bav *UtxoView) GetCurrentSnapshotValidatorSetEntryByPKID(pkid *PKID) (*Val } return bav.GetSnapshotValidatorSetEntryByPKIDAtEpochNumber(pkid, snapshotAtEpochNumber) } + func (bav *UtxoView) GetSnapshotValidatorSetEntryByPKIDAtEpochNumber(pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { // Check the UtxoView first. mapKey := SnapshotValidatorSetMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *pkid} @@ -1390,3 +1457,14 @@ func DBPutSnapshotLeaderScheduleValidatorWithTxn( } return nil } + +// Given a list of epoch entries, this finds the epoch entry for the given block height. +func findEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { + for _, epochEntry := range epochEntries { + if epochEntry.ContainsBlockHeight(blockHeight) { + return epochEntry, nil + } + } + + return nil, errors.Errorf("error finding epoch entry for block height: %v", blockHeight) +} diff --git a/lib/pos_validator_set.go b/lib/pos_validator_set.go deleted file mode 100644 index 9cd0f9c7c..000000000 --- a/lib/pos_validator_set.go +++ /dev/null @@ -1,79 +0,0 @@ -package lib - -import "github.com/pkg/errors" - -// GetSnapshotValidatorSetsForBlockHeights returns the validator set for each block height provided. -// It requires all input block heights to be in the previous, current, or next epoch. -func (bav *UtxoView) GetSnapshotValidatorSetsForBlockHeights(blockHeights []uint64) (map[uint64][]*ValidatorEntry, error) { - // Create a map to cache the validator set entries by epoch number. Two blocks in the same epoch will have - // the same validator set, so we can use an in-memory cache to optimize the validator set lookup for them. - validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) - - // Fetch the current epoch entry - currentEpochEntry, err := bav.GetCurrentEpochEntry() - if err != nil { - return nil, errors.Errorf("Error fetching current epoch entry: %v", err) - } - - // Fetch the previous epoch entry - prevEpochEntry, err := bav.simulatePrevEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) - if err != nil { - return nil, errors.Errorf("Error fetching previous epoch entry: %v", err) - } - - // Fetch the next epoch entry - nextEpochEntry, err := bav.simulateNextEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) - if err != nil { - return nil, errors.Errorf("Error fetching next epoch entry: %v", err) - } - - // The supported block heights can only be part of the previous, current, or next epoch. - possibleEpochEntriesForBlocks := []*EpochEntry{prevEpochEntry, currentEpochEntry, nextEpochEntry} - - // Output map that will hold the validator set for each block height - validatorSetByBlockHeight := map[uint64][]*ValidatorEntry{} - - // Fetch the validator set at each block height - for _, blockHeight := range blockHeights { - epochEntryForBlock, err := findEpochEntryForBlockHeight(blockHeight, possibleEpochEntriesForBlocks) - if err != nil { - return nil, errors.Errorf("Error fetching epoch number for block height %d: %v", blockHeight, err) - } - - // Compute the snapshot epoch number for the block height. This is the epoch number that the validator set - // for the block was snapshotted in. - snapshotEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(epochEntryForBlock.EpochNumber) - if err != nil { - return nil, errors.Errorf("error computing snapshot epoch number for epoch number %d: %v", epochEntryForBlock.EpochNumber, err) - } - - var validatorSet []*ValidatorEntry - var ok bool - - // If the validator set for the block is already cached by the snapshot epoch number, then use it. - // Otherwise, fetch it from the UtxoView. - if validatorSet, ok = validatorSetEntriesBySnapshotEpochNumber[snapshotEpochNumber]; !ok { - // We don't have the validator set for the block cached. Fetch it from the UtxoView. - validatorSet, err = bav.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) - if err != nil { - return nil, errors.Errorf("Error fetching validator set for block: %v", err) - } - } - - validatorSetByBlockHeight[blockHeight] = validatorSet - } - - // Happy path: we fetched the validator lists for all block heights successfully. - return validatorSetByBlockHeight, nil -} - -// Given a list of epoch entries, this finds the epoch entry for the given block height. -func findEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { - for _, epochEntry := range epochEntries { - if epochEntry.ContainsBlockHeight(blockHeight) { - return epochEntry, nil - } - } - - return nil, errors.Errorf("error finding epoch entry for block height: %v", blockHeight) -} From 10c2aa5f278cc4fcf01abd6c5031f22d272bf8b9 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 19 Jan 2024 13:03:35 -0500 Subject: [PATCH 367/762] Fix Of By One Error To Trigger PoS Consensus (#924) --- lib/pos_blockchain.go | 1 + lib/server.go | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 697732506..2e290261b 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -213,6 +213,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v _missingBlockHashes []*BlockHash, _err error, ) { + // If the incoming block's height is under the PoS cutover fork height, then we can't process it. Exit early. if block.Header.Height < uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { return false, false, nil, errors.Errorf( "processHeaderPoS: Header height %d is less than the ProofOfStake2ConsensusCutoverBlockHeight %d", diff --git a/lib/server.go b/lib/server.go index 8fb934bd5..d39dfb4a2 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2129,9 +2129,11 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { return } - // Exit early if the current tip height is below the PoS cutover height. + // Exit early if the current tip height is below the PoS cutover height. We are ready to enable + // the PoS validator FastHotStuffConsensus once we reach the final block of the PoW protocol. + // This requires the block height check to use ProofOfStake2ConsensusCutoverBlockHeight-1 tipHeight := srv.blockchain.blockTip().Height - if tipHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + if tipHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1 { return } From 9729848bf393aa1142b3dc29215f7e7a7d341139 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Mon, 22 Jan 2024 13:05:22 -0500 Subject: [PATCH 368/762] Revert "Update ProcessBlockPoS Validator Set Verification To Use New Validator Set Fetch Helper (#919)" This reverts commit feb8d297876278df974e1b4bdb46b201d4dcad0a. --- lib/pos_blockchain.go | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 2e290261b..37bcd423a 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -577,25 +577,17 @@ func (bc *Blockchain) validateLeaderAndQC(block *MsgDeSoBlock) (_passedSpamPreve if !isValidPartialSig { return false, nil } - - // 2. Fetch the validator set for the current and previous block heights. - validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsByBlockHeights([]uint64{ - block.Header.Height, block.Header.Height - 1, - }) + // 2. Validate QC + validatorsByStake, err := utxoView.GetAllSnapshotValidatorSetEntriesByStake() if err != nil { - return false, errors.Wrap(err, "validateLeaderAndQC: Problem getting validator sets") - } - currentBlockValidatorSet, ok := validatorSetByBlockHeight[block.Header.Height] - if !ok { - return false, errors.Errorf("validateLeaderAndQC: Validator set for block height %d not found", block.Header.Height) - } - prevBlockValidatorSetSet, ok := validatorSetByBlockHeight[block.Header.Height-1] - if !ok { - return false, errors.Errorf("validateLeaderAndQC: Validator set for block height %d not found", block.Header.Height-1) + // This should never happen. If the parent is validated and extends from the tip, then we should + // be able to fetch the validator set at its block height for it. This failure can only happen due + // to transient badger issues. We return false for failed spam prevention check and the error. + return false, errors.Wrap(err, "validateLeaderAndQC: Problem getting validator set") } - // 3. Validate the block's QC. If it's invalid, we return true for failed spam prevention check. - if err = bc.isValidPoSQuorumCertificate(block, currentBlockValidatorSet, prevBlockValidatorSetSet); err != nil { + // Validate the block's QC. If it's invalid, we return true for failed spam prevention check. + if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake, validatorsByStake); err != nil { return false, nil } From acc1f3d1b0c4c0c7d71a31aa406e41f205612cb0 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Mon, 22 Jan 2024 13:07:16 -0500 Subject: [PATCH 369/762] Revert "Update processOrphanBlockPoS To Use New Validator Set Fetch Helper (#920)" This reverts commit 2d34f585dff35b1cc85268d1607c2fa2144a839e. --- lib/pos_blockchain.go | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 37bcd423a..0c839874a 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -371,7 +371,7 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // We can't validate the QC without getting the current epoch entry. return errors.Wrap(err, "processOrphanBlockPoS: Problem getting current epoch entry") } - + var validatorsByStake []*ValidatorEntry // If the block is in a previous or future epoch, we need to compute the // proper validator set for the block. We do this by computing the prev/next // epoch entry and then fetching the validator set at the snapshot of the @@ -458,6 +458,13 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // potential leaders. As a spam-prevention measure, we simply return nil and throw it away. return nil } + validatorsByStake, err = utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber( + epochEntrySnapshotAtEpochNumber) + if err != nil { + return errors.Wrapf(err, + "processOrphanBlockPoS: Problem getting validator set at snapshot at epoch number %d", + epochEntrySnapshotAtEpochNumber) + } } else { // This block is in the current epoch! // First we validate the proposer vote partial signature @@ -489,31 +496,19 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // measure, we just throw away this block and don't store it. return nil } + // If we get here, we know we have the correct block proposer. We now fetch the validators ordered by + // stake, so we can validate the QC. + validatorsByStake, err = utxoView.GetAllSnapshotValidatorSetEntriesByStake() + if err != nil { + return errors.Wrap(err, "processOrphanBlockPoS: Problem getting validator set") + } } - - // Get the validator set for the current and previous block heights. - validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsByBlockHeights([]uint64{ - block.Header.Height, block.Header.Height - 1, - }) - if err != nil { - return errors.Wrap(err, "processOrphanBlockPoS: Problem getting validator sets") - } - currentBlockValidatorSet, ok := validatorSetByBlockHeight[block.Header.Height] - if !ok { - return errors.Errorf("processOrphanBlockPoS: Validator set for block height %d not found", block.Header.Height) - } - prevBlockValidatorSet, ok := validatorSetByBlockHeight[block.Header.Height-1] - if !ok { - return errors.Errorf("processOrphanBlockPoS: Validator set for block height %d not found", block.Header.Height-1) - } - // Okay now we have the validator set ordered by stake, we can validate the QC. - if err = bc.isValidPoSQuorumCertificate(block, currentBlockValidatorSet, prevBlockValidatorSet); err != nil { + if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake, validatorsByStake); err != nil { // If we hit an error, we know that the QC is invalid, and we'll never accept this block, // As a spam-prevention measure, we just throw away this block and don't store it. return nil } - // All blocks should pass the basic integrity validations, which ensure the block // is not malformed. If the block is malformed, we should store it as ValidateFailed. if err = bc.isProperlyFormedBlockPoS(block); err != nil { @@ -523,7 +518,6 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { } return nil } - // Add to blockIndexByHash with status STORED only as we are not sure if it's valid yet. _, err = bc.storeBlockInBlockIndex(block) return errors.Wrap(err, "processBlockPoS: Problem adding block to block index: ") From 8069dbbbf32e79404e06da890b5f6137b79b43ce Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Mon, 22 Jan 2024 15:16:50 -0500 Subject: [PATCH 370/762] Revert "Consolidate pos_validator_set.go with post_snapshot_entries.go" This reverts commit 2f1f3a890a80d4e3c9d1a5841ee4d2a967b0a248. --- lib/pos_consensus.go | 2 +- lib/pos_snapshot_entries.go | 82 +------------------------------------ lib/pos_validator_set.go | 79 +++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 81 deletions(-) create mode 100644 lib/pos_validator_set.go diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 193c5096e..b0031c289 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -615,7 +615,7 @@ func (cc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgD } // Fetch the validator set for each block - validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsByBlockHeights(blockHeights) + validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsForBlockHeights(blockHeights) if err != nil { return nil, errors.Errorf("Error fetching validator set for blocks: %v", err) } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index d4987628e..784e2fc96 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -3,11 +3,10 @@ package lib import ( "bytes" "fmt" - "math" - "sort" - "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" + "math" + "sort" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" @@ -242,71 +241,6 @@ type SnapshotValidatorSetMapKey struct { ValidatorPKID PKID } -// GetSnapshotValidatorSetsByBlockHeights returns the validator set for each block height provided. -// It requires all input block heights to be in the previous, current, or next epoch. -func (bav *UtxoView) GetSnapshotValidatorSetsByBlockHeights(blockHeights []uint64) (map[uint64][]*ValidatorEntry, error) { - // Create a map to cache the validator set entries by epoch number. Two blocks in the same epoch will have - // the same validator set, so we can use an in-memory cache to optimize the validator set lookup for them. - validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) - - // Fetch the current epoch entry - currentEpochEntry, err := bav.GetCurrentEpochEntry() - if err != nil { - return nil, errors.Errorf("Error fetching current epoch entry: %v", err) - } - - // Fetch the previous epoch entry - prevEpochEntry, err := bav.simulatePrevEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) - if err != nil { - return nil, errors.Errorf("Error fetching previous epoch entry: %v", err) - } - - // Fetch the next epoch entry - nextEpochEntry, err := bav.simulateNextEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) - if err != nil { - return nil, errors.Errorf("Error fetching next epoch entry: %v", err) - } - - // The supported block heights can only be part of the previous, current, or next epoch. - possibleEpochEntriesForBlocks := []*EpochEntry{prevEpochEntry, currentEpochEntry, nextEpochEntry} - - // Output map that will hold the validator set for each block height - validatorSetByBlockHeight := map[uint64][]*ValidatorEntry{} - - // Fetch the validator set at each block height - for _, blockHeight := range blockHeights { - epochEntryForBlock, err := findEpochEntryForBlockHeight(blockHeight, possibleEpochEntriesForBlocks) - if err != nil { - return nil, errors.Errorf("Error fetching epoch number for block height %d: %v", blockHeight, err) - } - - // Compute the snapshot epoch number for the block height. This is the epoch number that the validator set - // for the block was snapshotted in. - snapshotEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(epochEntryForBlock.EpochNumber) - if err != nil { - return nil, errors.Errorf("error computing snapshot epoch number for epoch number %d: %v", epochEntryForBlock.EpochNumber, err) - } - - var validatorSet []*ValidatorEntry - var ok bool - - // If the validator set for the block is already cached by the snapshot epoch number, then use it. - // Otherwise, fetch it from the UtxoView. - if validatorSet, ok = validatorSetEntriesBySnapshotEpochNumber[snapshotEpochNumber]; !ok { - // We don't have the validator set for the block cached. Fetch it from the UtxoView. - validatorSet, err = bav.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) - if err != nil { - return nil, errors.Errorf("Error fetching validator set for block: %v", err) - } - } - - validatorSetByBlockHeight[blockHeight] = validatorSet - } - - // Happy path: we fetched the validator lists for all block heights successfully. - return validatorSetByBlockHeight, nil -} - func (bav *UtxoView) GetCurrentSnapshotValidatorSetEntryByPKID(pkid *PKID) (*ValidatorEntry, error) { // Calculate the SnapshotEpochNumber. snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() @@ -315,7 +249,6 @@ func (bav *UtxoView) GetCurrentSnapshotValidatorSetEntryByPKID(pkid *PKID) (*Val } return bav.GetSnapshotValidatorSetEntryByPKIDAtEpochNumber(pkid, snapshotAtEpochNumber) } - func (bav *UtxoView) GetSnapshotValidatorSetEntryByPKIDAtEpochNumber(pkid *PKID, snapshotAtEpochNumber uint64) (*ValidatorEntry, error) { // Check the UtxoView first. mapKey := SnapshotValidatorSetMapKey{SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *pkid} @@ -1457,14 +1390,3 @@ func DBPutSnapshotLeaderScheduleValidatorWithTxn( } return nil } - -// Given a list of epoch entries, this finds the epoch entry for the given block height. -func findEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { - for _, epochEntry := range epochEntries { - if epochEntry.ContainsBlockHeight(blockHeight) { - return epochEntry, nil - } - } - - return nil, errors.Errorf("error finding epoch entry for block height: %v", blockHeight) -} diff --git a/lib/pos_validator_set.go b/lib/pos_validator_set.go new file mode 100644 index 000000000..9cd0f9c7c --- /dev/null +++ b/lib/pos_validator_set.go @@ -0,0 +1,79 @@ +package lib + +import "github.com/pkg/errors" + +// GetSnapshotValidatorSetsForBlockHeights returns the validator set for each block height provided. +// It requires all input block heights to be in the previous, current, or next epoch. +func (bav *UtxoView) GetSnapshotValidatorSetsForBlockHeights(blockHeights []uint64) (map[uint64][]*ValidatorEntry, error) { + // Create a map to cache the validator set entries by epoch number. Two blocks in the same epoch will have + // the same validator set, so we can use an in-memory cache to optimize the validator set lookup for them. + validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) + + // Fetch the current epoch entry + currentEpochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return nil, errors.Errorf("Error fetching current epoch entry: %v", err) + } + + // Fetch the previous epoch entry + prevEpochEntry, err := bav.simulatePrevEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) + if err != nil { + return nil, errors.Errorf("Error fetching previous epoch entry: %v", err) + } + + // Fetch the next epoch entry + nextEpochEntry, err := bav.simulateNextEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) + if err != nil { + return nil, errors.Errorf("Error fetching next epoch entry: %v", err) + } + + // The supported block heights can only be part of the previous, current, or next epoch. + possibleEpochEntriesForBlocks := []*EpochEntry{prevEpochEntry, currentEpochEntry, nextEpochEntry} + + // Output map that will hold the validator set for each block height + validatorSetByBlockHeight := map[uint64][]*ValidatorEntry{} + + // Fetch the validator set at each block height + for _, blockHeight := range blockHeights { + epochEntryForBlock, err := findEpochEntryForBlockHeight(blockHeight, possibleEpochEntriesForBlocks) + if err != nil { + return nil, errors.Errorf("Error fetching epoch number for block height %d: %v", blockHeight, err) + } + + // Compute the snapshot epoch number for the block height. This is the epoch number that the validator set + // for the block was snapshotted in. + snapshotEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(epochEntryForBlock.EpochNumber) + if err != nil { + return nil, errors.Errorf("error computing snapshot epoch number for epoch number %d: %v", epochEntryForBlock.EpochNumber, err) + } + + var validatorSet []*ValidatorEntry + var ok bool + + // If the validator set for the block is already cached by the snapshot epoch number, then use it. + // Otherwise, fetch it from the UtxoView. + if validatorSet, ok = validatorSetEntriesBySnapshotEpochNumber[snapshotEpochNumber]; !ok { + // We don't have the validator set for the block cached. Fetch it from the UtxoView. + validatorSet, err = bav.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) + if err != nil { + return nil, errors.Errorf("Error fetching validator set for block: %v", err) + } + } + + validatorSetByBlockHeight[blockHeight] = validatorSet + } + + // Happy path: we fetched the validator lists for all block heights successfully. + return validatorSetByBlockHeight, nil +} + +// Given a list of epoch entries, this finds the epoch entry for the given block height. +func findEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { + for _, epochEntry := range epochEntries { + if epochEntry.ContainsBlockHeight(blockHeight) { + return epochEntry, nil + } + } + + return nil, errors.Errorf("error finding epoch entry for block height: %v", blockHeight) +} From 0809f8262e28d0f44e927b1a5b4a28cd40e429c6 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Mon, 22 Jan 2024 15:20:44 -0500 Subject: [PATCH 371/762] Revert "Move Validator Set Fetch Helper Function to UtxoView (#918)" This reverts commit 6da150148e42abbb46076b69ddf2421688aa6770. --- lib/pos_consensus.go | 70 ++++++++++++++++++++++++++++------- lib/pos_validator_set.go | 79 ---------------------------------------- 2 files changed, 56 insertions(+), 93 deletions(-) delete mode 100644 lib/pos_validator_set.go diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index b0031c289..482078899 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -602,30 +602,61 @@ func (cc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgD return nil, nil } - blockHeights := collections.Transform(blocks, func(block *MsgDeSoHeader) uint64 { - // We add 1 to the block height because we need the validator set that results AFTER connecting the - // block to the blockchain, and triggering an epoch transition (if at an epoch boundary). - return block.Height + 1 - }) + // Create a map to cache the validator set entries by epoch number. Two blocks in the same epoch will have + // the same validator set, so we can use an in-memory cache to optimize the validator set lookup for them. + validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) - // Create a UtxoView for the committed tip block. We will use this view to simulate all epoch entries. - utxoView, err := NewUtxoView(cc.blockchain.db, cc.blockchain.params, cc.blockchain.postgres, cc.blockchain.snapshot, nil) + // Create a UtxoView for the committed tip block. We will use this to fetch the validator set for + // all of the safe blocks. + utxoView, err := NewUtxoView(cc.blockchain.db, cc.params, cc.blockchain.postgres, cc.blockchain.snapshot, nil) if err != nil { return nil, errors.Errorf("Error creating UtxoView: %v", err) } - // Fetch the validator set for each block - validatorSetByBlockHeight, err := utxoView.GetSnapshotValidatorSetsForBlockHeights(blockHeights) + // Fetch the current epoch entry for the committed tip + epochEntryAtCommittedTip, err := utxoView.GetCurrentEpochEntry() + if err != nil { + return nil, errors.Errorf("Error fetching epoch entry for committed tip: %v", err) + } + + // Fetch the next epoch entry + nextEpochEntryAfterCommittedTip, err := utxoView.simulateNextEpochEntry(epochEntryAtCommittedTip.EpochNumber, epochEntryAtCommittedTip.FinalBlockHeight) if err != nil { - return nil, errors.Errorf("Error fetching validator set for blocks: %v", err) + return nil, errors.Errorf("Error fetching next epoch entry after committed tip: %v", err) } - // Extract the validator set at each block + // The input blocks can only be part of the current or next epoch entries. + possibleEpochEntriesForBlocks := []*EpochEntry{epochEntryAtCommittedTip, nextEpochEntryAfterCommittedTip} + + // Fetch the validator set at each block blocksWithValidatorLists := make([]consensus.BlockWithValidatorList, len(blocks)) for ii, block := range blocks { - validatorSetAtBlock, ok := validatorSetByBlockHeight[block.Height+1] - if !ok { - return nil, errors.Errorf("Error fetching validator set for block: %v", block) + // Find the epoch entry for the block. It'll either be the current epoch entry or the next one. + // We add 1 to the block height because we need the validator set that results AFTER connecting the + // block to the blockchain, and triggering an epoch transition (if at an epoch boundary). + epochEntryForBlock, err := getEpochEntryForBlockHeight(block.Height+1, possibleEpochEntriesForBlocks) + if err != nil { + return nil, errors.Errorf("Error fetching epoch number for block: %v", err) + } + + // Compute the snapshot epoch number for the block. This is the epoch number that the validator set + // for the block was snapshotted in. + snapshotEpochNumber, err := utxoView.ComputeSnapshotEpochNumberForEpoch(epochEntryForBlock.EpochNumber) + if err != nil { + return nil, errors.Errorf("error computing snapshot epoch number for epoch: %v", err) + } + + var validatorSetAtBlock []*ValidatorEntry + var ok bool + + // If the validator set for the block is already cached by the snapshot epoch number, then use it. + // Otherwise, fetch it from the UtxoView. + if validatorSetAtBlock, ok = validatorSetEntriesBySnapshotEpochNumber[snapshotEpochNumber]; !ok { + // We don't have the validator set for the block cached. Fetch it from the UtxoView. + validatorSetAtBlock, err = utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) + if err != nil { + return nil, errors.Errorf("Error fetching validator set for block: %v", err) + } } blocksWithValidatorLists[ii] = consensus.BlockWithValidatorList{ @@ -652,6 +683,17 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView) (*PosBlockPr return NewPosBlockProducer(fc.mempool, fc.params, blockProducerPublicKey, blockProducerBlsPublicKey), nil } +// Finds the epoch entry for the block and returns the epoch number. +func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { + for _, epochEntry := range epochEntries { + if epochEntry.ContainsBlockHeight(blockHeight) { + return epochEntry, nil + } + } + + return nil, errors.Errorf("error finding epoch number for block height: %v", blockHeight) +} + func isValidBlockProposalEvent(event *consensus.FastHotStuffEvent, expectedEventType consensus.FastHotStuffEventType) bool { // Validate that the expected event type is a block proposal event type possibleExpectedEventTypes := []consensus.FastHotStuffEventType{ diff --git a/lib/pos_validator_set.go b/lib/pos_validator_set.go deleted file mode 100644 index 9cd0f9c7c..000000000 --- a/lib/pos_validator_set.go +++ /dev/null @@ -1,79 +0,0 @@ -package lib - -import "github.com/pkg/errors" - -// GetSnapshotValidatorSetsForBlockHeights returns the validator set for each block height provided. -// It requires all input block heights to be in the previous, current, or next epoch. -func (bav *UtxoView) GetSnapshotValidatorSetsForBlockHeights(blockHeights []uint64) (map[uint64][]*ValidatorEntry, error) { - // Create a map to cache the validator set entries by epoch number. Two blocks in the same epoch will have - // the same validator set, so we can use an in-memory cache to optimize the validator set lookup for them. - validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) - - // Fetch the current epoch entry - currentEpochEntry, err := bav.GetCurrentEpochEntry() - if err != nil { - return nil, errors.Errorf("Error fetching current epoch entry: %v", err) - } - - // Fetch the previous epoch entry - prevEpochEntry, err := bav.simulatePrevEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) - if err != nil { - return nil, errors.Errorf("Error fetching previous epoch entry: %v", err) - } - - // Fetch the next epoch entry - nextEpochEntry, err := bav.simulateNextEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) - if err != nil { - return nil, errors.Errorf("Error fetching next epoch entry: %v", err) - } - - // The supported block heights can only be part of the previous, current, or next epoch. - possibleEpochEntriesForBlocks := []*EpochEntry{prevEpochEntry, currentEpochEntry, nextEpochEntry} - - // Output map that will hold the validator set for each block height - validatorSetByBlockHeight := map[uint64][]*ValidatorEntry{} - - // Fetch the validator set at each block height - for _, blockHeight := range blockHeights { - epochEntryForBlock, err := findEpochEntryForBlockHeight(blockHeight, possibleEpochEntriesForBlocks) - if err != nil { - return nil, errors.Errorf("Error fetching epoch number for block height %d: %v", blockHeight, err) - } - - // Compute the snapshot epoch number for the block height. This is the epoch number that the validator set - // for the block was snapshotted in. - snapshotEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(epochEntryForBlock.EpochNumber) - if err != nil { - return nil, errors.Errorf("error computing snapshot epoch number for epoch number %d: %v", epochEntryForBlock.EpochNumber, err) - } - - var validatorSet []*ValidatorEntry - var ok bool - - // If the validator set for the block is already cached by the snapshot epoch number, then use it. - // Otherwise, fetch it from the UtxoView. - if validatorSet, ok = validatorSetEntriesBySnapshotEpochNumber[snapshotEpochNumber]; !ok { - // We don't have the validator set for the block cached. Fetch it from the UtxoView. - validatorSet, err = bav.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) - if err != nil { - return nil, errors.Errorf("Error fetching validator set for block: %v", err) - } - } - - validatorSetByBlockHeight[blockHeight] = validatorSet - } - - // Happy path: we fetched the validator lists for all block heights successfully. - return validatorSetByBlockHeight, nil -} - -// Given a list of epoch entries, this finds the epoch entry for the given block height. -func findEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { - for _, epochEntry := range epochEntries { - if epochEntry.ContainsBlockHeight(blockHeight) { - return epochEntry, nil - } - } - - return nil, errors.Errorf("error finding epoch entry for block height: %v", blockHeight) -} From f3bcded99ec649a5b9980b5c5b912db10c58f53c Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 22 Jan 2024 20:36:47 -0800 Subject: [PATCH 372/762] Add block timestamp to ValidateTransaction. --- lib/blockchain.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 6bb232d31..b36241106 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -2867,7 +2867,8 @@ func (bc *Blockchain) ValidateTransaction( txnSize := int64(len(txnBytes)) // We don't care about the utxoOps or the fee it returns. _, _, _, _, err = utxoView._connectTransaction( - txnMsg, txHash, txnSize, blockHeight, 0, verifySignatures, false) + txnMsg, txHash, txnSize, blockHeight, time.Now().UnixNano(), + verifySignatures, false) if err != nil { return errors.Wrapf(err, "ValidateTransaction: Problem validating transaction: ") } From f8e423d7be7ce3334b867041b1516a497f7781a6 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 23 Jan 2024 14:56:22 -0500 Subject: [PATCH 373/762] Add Unknown reward method (#904) Co-authored-by: Lazy Nina <> --- lib/block_view_stake.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 01ae98998..5b2ba0f0b 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -37,6 +37,7 @@ type StakingRewardMethod = uint8 const ( StakingRewardMethodPayToBalance StakingRewardMethod = 0 StakingRewardMethodRestake StakingRewardMethod = 1 + StakingRewardMethodUnknown StakingRewardMethod = 2 ) type StakeEntry struct { From c69e65943ccbe362627893e5400e3cfcc9426898 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Tue, 23 Jan 2024 15:22:32 -0500 Subject: [PATCH 374/762] Revert function signature change for isValidPoSQuorumCertificate --- lib/pos_blockchain.go | 9 +++-- lib/pos_blockchain_test.go | 68 +++++++++++++++++++------------------- 2 files changed, 38 insertions(+), 39 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 0c839874a..065cdf36e 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -504,7 +504,7 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { } } // Okay now we have the validator set ordered by stake, we can validate the QC. - if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake, validatorsByStake); err != nil { + if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { // If we hit an error, we know that the QC is invalid, and we'll never accept this block, // As a spam-prevention measure, we just throw away this block and don't store it. return nil @@ -581,7 +581,7 @@ func (bc *Blockchain) validateLeaderAndQC(block *MsgDeSoBlock) (_passedSpamPreve } // Validate the block's QC. If it's invalid, we return true for failed spam prevention check. - if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake, validatorsByStake); err != nil { + if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { return false, nil } @@ -1123,12 +1123,11 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc // isValidPoSQuorumCertificate validates that the QC of this block is valid, meaning a super majority // of the validator set has voted (or timed out). Assumes ValidatorEntry list is sorted. -func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validatorSet []*ValidatorEntry, previousValidatorSet []*ValidatorEntry) error { +func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validatorSet []*ValidatorEntry) error { validators := toConsensusValidators(validatorSet) - previousValidators := toConsensusValidators(previousValidatorSet) if !block.Header.ValidatorsTimeoutAggregateQC.isEmpty() { if !consensus.IsValidSuperMajorityAggregateQuorumCertificate( - block.Header.ValidatorsTimeoutAggregateQC, validators, previousValidators) { + block.Header.ValidatorsTimeoutAggregateQC, validators, validators) { return RuleErrorInvalidTimeoutQC } return nil diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index f9af8ca43..2166f2351 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -912,7 +912,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { }, } // Empty QC for both vote and timeout should fail - err := bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err := bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -934,11 +934,11 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { }, } desoBlock.Header.ValidatorsVoteQC = voteQC - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.NoError(t, err) // Empty validator set should fail - err = bc.isValidPoSQuorumCertificate(desoBlock, []*ValidatorEntry{}, []*ValidatorEntry{}) + err = bc.isValidPoSQuorumCertificate(desoBlock, []*ValidatorEntry{}) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -946,13 +946,13 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { { // Zero stake amount validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(0) - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil stake amount validatorSet[0].TotalStakeAmountNanos = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -960,14 +960,14 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(3) // Nil voting public key validatorSet[0].VotingPublicKey = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Reset voting public key validatorSet[0].VotingPublicKey = m1VotingPrivateKey.PublicKey() // Nil validator entry - err = bc.isValidPoSQuorumCertificate(desoBlock, append(validatorSet, nil), append(validatorSet, nil)) + err = bc.isValidPoSQuorumCertificate(desoBlock, append(validatorSet, nil)) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) } @@ -976,41 +976,41 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // Malformed vote QC should fail // Nil vote QC desoBlock.Header.ValidatorsVoteQC = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // View is 0 desoBlock.Header.ValidatorsVoteQC = voteQC voteQC.ProposedInView = 0 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil block hash voteQC.ProposedInView = 6 voteQC.BlockHash = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil signers list voteQC.ValidatorsVoteAggregatedSignature.SignersList = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil Signature voteQC.ValidatorsVoteAggregatedSignature.SignersList = signersList1And2 voteQC.ValidatorsVoteAggregatedSignature.Signature = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil aggregate signature voteQC.BlockHash = hash1 voteQC.ValidatorsVoteAggregatedSignature = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Reset the ValidatorsVoteAggregatedSignature @@ -1024,21 +1024,21 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // No supermajority in vote QC voteQC.ValidatorsVoteAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x1}) // 0b0001, which represents validator 1 voteQC.ValidatorsVoteAggregatedSignature.Signature = vote1Signature - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) } { // Only having signature for validator 1 should fail even if signers list has validator 2 voteQC.ValidatorsVoteAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x3}) // 0b0010, which represents validator 1 and 2 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Having 1 and 3 in signers list, but including signature for 2 should fail voteQC.ValidatorsVoteAggregatedSignature.SignersList = bitset.NewBitset().Set(0, true).Set(2, true) // represents validator 1 and 3 voteQC.ValidatorsVoteAggregatedSignature.Signature = aggregateSig - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -1070,7 +1070,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { desoBlock.Header.ValidatorsVoteQC = nil // Set the timeout qc to the timeout qc constructed above desoBlock.Header.ValidatorsTimeoutAggregateQC = timeoutQC - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.NoError(t, err) { @@ -1079,55 +1079,55 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // timeout QC is interpreted as empty // View = 0 timeoutQC.TimedOutView = 0 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil high QC timeoutQC.TimedOutView = 8 timeoutQC.ValidatorsHighQC = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // High QC has view of 0 timeoutQC.ValidatorsHighQC = voteQC timeoutQC.ValidatorsHighQC.ProposedInView = 0 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // No high QC views timeoutQC.ValidatorsHighQC.ProposedInView = 6 timeoutQC.ValidatorsTimeoutHighQCViews = []uint64{} - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil high QC block hash timeoutQC.ValidatorsTimeoutHighQCViews = []uint64{6, 5} timeoutQC.ValidatorsHighQC.BlockHash = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil high QC signers list timeoutQC.ValidatorsHighQC.BlockHash = hash1 timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature.SignersList = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil high QC signature timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature.SignersList = signersList1And2 timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature.Signature = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) // Nil High QC Aggregated signature timeoutQC.ValidatorsHighQC.ValidatorsVoteAggregatedSignature = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -1137,13 +1137,13 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { Signature: aggregateSig, } - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.NoError(t, err) } { // Timed out view is not exactly one greater than high QC view timeoutQC.ValidatorsHighQC.ProposedInView = 7 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) } @@ -1151,13 +1151,13 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // Invalid validator set tests // Zero stake amount validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(0) - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) // Nil stake amount validatorSet[0].TotalStakeAmountNanos = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) @@ -1165,14 +1165,14 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(3) // Nil voting public key validatorSet[0].VotingPublicKey = nil - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) // Reset voting public key validatorSet[0].VotingPublicKey = m1VotingPrivateKey.PublicKey() // Nil validator entry - err = bc.isValidPoSQuorumCertificate(desoBlock, append(validatorSet, nil), append(validatorSet, nil)) + err = bc.isValidPoSQuorumCertificate(desoBlock, append(validatorSet, nil)) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) } @@ -1181,7 +1181,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // No supermajority test timeoutQC.ValidatorsTimeoutAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x1}) // 0b0001, which represents validator 1 timeoutQC.ValidatorsTimeoutAggregatedSignature.Signature = timeout1Signature - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) } @@ -1189,14 +1189,14 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { { // Only having signature for validator 1 should fail even if signers list has validator 2 timeoutQC.ValidatorsTimeoutAggregatedSignature.SignersList = bitset.NewBitset().FromBytes([]byte{0x3}) // 0b0010, which represents validator 1 and 2 - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) // Having 1 and 3 in signers list, but including signature for 2 should fail timeoutQC.ValidatorsTimeoutAggregatedSignature.SignersList = bitset.NewBitset().Set(0, true).Set(2, true) // represents validator 1 and 3 timeoutQC.ValidatorsTimeoutAggregatedSignature.Signature = timeoutAggSig - err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet, validatorSet) + err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) } From 43a980f8dbaf2359c82a81d69e2b1a48c3f90070 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 23 Jan 2024 15:35:08 -0500 Subject: [PATCH 375/762] Add Blockchain Locks to FastHotStuffConsensus Struct (#926) --- lib/pos_consensus.go | 31 +++++++++++++++++++++++++++++++ lib/pos_consensus_test.go | 4 ++++ 2 files changed, 35 insertions(+) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 482078899..38169c029 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -49,6 +49,13 @@ func (cc *FastHotStuffConsensus) Start() error { // Fetch the current tip of the chain tipBlock := cc.blockchain.BlockTip() + // If the chain is not at the final PoW block height or higher, then we cannot start the PoS consensus. + if tipBlock.Height < cc.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1 { + return errors.Errorf( + "FastHotStuffConsensus.Start: Block tip %d is not at the final PoW block height", tipBlock.Height, + ) + } + // Fetch the validator set at each safe block tipBlockWithValidators, err := cc.fetchValidatorListsForSafeBlocks([]*MsgDeSoHeader{tipBlock.Header}) if err != nil { @@ -99,6 +106,12 @@ func (cc *FastHotStuffConsensus) HandleLocalBlockProposalEvent(event *consensus. return errors.Errorf("FastHotStuffConsensus.HandleLocalBlockProposalEvent: FastHotStuffEventLoop is not running") } + // Hold the blockchain's write lock so that the chain cannot be mutated underneath us. + // In practice, this is a no-op, but it guarantees thread-safety in the event that other + // parts of the codebase change. + cc.blockchain.ChainLock.Lock() + defer cc.blockchain.ChainLock.Unlock() + // Handle the event as a block proposal event for a regular block if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructVoteQC); err != nil { return errors.Wrapf(err, "FastHotStuffConsensus.HandleLocalBlockProposalEvent: ") @@ -121,6 +134,12 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutBlockProposalEvent(event *con return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: FastHotStuffEventLoop is not running") } + // Hold the blockchain's write lock so that the chain cannot be mutated underneath us. + // In practice, this is a no-op, but it guarantees thread-safety in the event that other + // parts of the codebase change. + cc.blockchain.ChainLock.Lock() + defer cc.blockchain.ChainLock.Unlock() + // Handle the event as a block proposal event for a timeout block if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructTimeoutQC); err != nil { return errors.Wrapf(err, "FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: ") @@ -343,6 +362,12 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: FastHotStuffEventLoop is not running") } + // Hold the blockchain's write lock so that the chain cannot be mutated underneath us. + // In practice, this is a no-op, but it guarantees thread-safety in the event that other + // parts of the codebase change. + cc.blockchain.ChainLock.RLock() + defer cc.blockchain.ChainLock.RUnlock() + var err error if !consensus.IsProperlyFormedTimeoutEvent(event) { @@ -424,6 +449,12 @@ func (cc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error return errors.Errorf("FastHotStuffConsensus.HandleBlock: FastHotStuffEventLoop is not running") } + // Hold the blockchain's write lock so that the chain cannot be mutated underneath us. + // In practice, this is a no-op, but it guarantees thread-safety in the event that other + // parts of the codebase change. + cc.blockchain.ChainLock.Lock() + defer cc.blockchain.ChainLock.Unlock() + // Try to apply the block as the new tip of the blockchain. If the block is an orphan, then // we will get back a list of missing ancestor block hashes. We can fetch the missing blocks // from the network and retry. diff --git a/lib/pos_consensus_test.go b/lib/pos_consensus_test.go index 17d3c0b6a..542e387e0 100644 --- a/lib/pos_consensus_test.go +++ b/lib/pos_consensus_test.go @@ -8,6 +8,7 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/consensus" + "github.com/deso-protocol/go-deadlock" "github.com/pkg/errors" "github.com/stretchr/testify/require" ) @@ -104,6 +105,9 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { signer: &BLSSigner{ privateKey: blsPrivateKey, }, + blockchain: &Blockchain{ + ChainLock: deadlock.RWMutex{}, + }, fastHotStuffEventLoop: &consensus.MockFastHotStuffEventLoop{ OnIsInitialized: alwaysReturnTrue, OnIsRunning: alwaysReturnTrue, From d8e282813b1e1d53d843151c861937520083964f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:05:23 -0500 Subject: [PATCH 376/762] Create Hard-Coded Validator To Sign PoS Cutover Block (#928) --- lib/pos_constants.go | 30 ++++++++++++++++++++++++++++++ lib/pos_constants_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 lib/pos_constants.go create mode 100644 lib/pos_constants_test.go diff --git a/lib/pos_constants.go b/lib/pos_constants.go new file mode 100644 index 000000000..40045b122 --- /dev/null +++ b/lib/pos_constants.go @@ -0,0 +1,30 @@ +package lib + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/consensus" + "github.com/holiman/uint256" + "github.com/pkg/errors" +) + +// The Proof of Work -> Proof of Stake cutover requires a synthetic QC to protect against a timeout +// during the exact point of the cutover. This synthetic QC is built and signed locally by every node +// using a known and consistent private key. +const proofOfStakeCutoverValidatorBLSPrivateKeyHex = "0x0570b78ce822f902b203ee075a7e2147d6b9a420a9409c038154589de64eec96" + +func BuildProofOfStakeCutoverValidatorBLSPrivateKey() (*bls.PrivateKey, error) { + return (&bls.PrivateKey{}).FromString(proofOfStakeCutoverValidatorBLSPrivateKeyHex) +} + +func BuildProofOfStakeCutoverValidator() (consensus.Validator, error) { + // Parse the BLS private key + blsPrivateKey, err := BuildProofOfStakeCutoverValidatorBLSPrivateKey() + if err != nil { + return nil, errors.Wrapf(err, "BuildProofOfStakeCutoverValidator: Problem parsing BLS private key") + } + validatorEntry := &ValidatorEntry{ + VotingPublicKey: blsPrivateKey.PublicKey(), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(1e9), + } + return validatorEntry, nil +} diff --git a/lib/pos_constants_test.go b/lib/pos_constants_test.go new file mode 100644 index 000000000..b2075e0b1 --- /dev/null +++ b/lib/pos_constants_test.go @@ -0,0 +1,28 @@ +//go:build relic + +package lib + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBuildProofOfStakeCutoverValidatorBLSPrivateKey(t *testing.T) { + privateKey, err := BuildProofOfStakeCutoverValidatorBLSPrivateKey() + require.NoError(t, err) + + privateKeyString := privateKey.ToString() + require.Equal(t, privateKeyString, "0x0570b78ce822f902b203ee075a7e2147d6b9a420a9409c038154589de64eec96") +} + +func TestBuildProofOfStakeCutoverValidator(t *testing.T) { + validator, err := BuildProofOfStakeCutoverValidator() + require.NoError(t, err) + + validatorPublicKeyString := validator.GetPublicKey().ToString() + require.Equal(t, validatorPublicKeyString, "0x91c92a48cc731489deeaa6752ebeab8410f2f25b4cce5415495d39b7ffdc32e257cdb7eaeefe1c3d6f607a248a057f530c7a55b0755a2c9adb48a5da19cc1ef55c46c4fd4719c1a63224e302b2da5a1d394fe6e516f56a021c8a5a3048a2a794") + + validatorStakeAmount := validator.GetStakeAmount().ToBig().String() + require.Equal(t, validatorStakeAmount, "1000000000") +} From cb8fcf20d6e8a2c65212a19d872c88846914dbfa Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:14:21 -0500 Subject: [PATCH 377/762] Override QC Validation with ProofOfStakeCutoverValidator (#929) --- lib/pos_blockchain.go | 32 +++++++++++++++++---- lib/pos_blockchain_test.go | 59 ++++++++++++++++++++++++-------------- lib/pos_constants.go | 24 ++++++++++++++++ 3 files changed, 87 insertions(+), 28 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 065cdf36e..3d497ac3b 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1122,19 +1122,39 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc } // isValidPoSQuorumCertificate validates that the QC of this block is valid, meaning a super majority -// of the validator set has voted (or timed out). Assumes ValidatorEntry list is sorted. +// of the validator set has voted (or timed out). It special cases the first block after the PoS cutover +// by overriding the validator set used to validate the vote QC or high QC in the first block after the +// PoS cutover. func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validatorSet []*ValidatorEntry) error { - validators := toConsensusValidators(validatorSet) - if !block.Header.ValidatorsTimeoutAggregateQC.isEmpty() { - if !consensus.IsValidSuperMajorityAggregateQuorumCertificate( - block.Header.ValidatorsTimeoutAggregateQC, validators, validators) { + voteQCValidators := toConsensusValidators(validatorSet) + aggregateQCValidators := voteQCValidators + + voteQC := block.Header.ValidatorsVoteQC + timeoutAggregateQC := block.Header.ValidatorsTimeoutAggregateQC + + // If the block is the first block after the PoS cutover, it must contain a synthetic QC signed by the + // PoS cutover validator. We need to override the vote QC validator set here. + if block.Header.Height == uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + posCutoverValidator, err := BuildProofOfStakeCutoverValidator() + if err != nil { + return errors.Wrapf(err, "isValidPoSQuorumCertificate: Problem building PoS cutover validator") + } + voteQCValidators = []consensus.Validator{posCutoverValidator} + } + + // Validate the timeout aggregate QC. + if !timeoutAggregateQC.isEmpty() { + if !consensus.IsValidSuperMajorityAggregateQuorumCertificate(timeoutAggregateQC, aggregateQCValidators, voteQCValidators) { return RuleErrorInvalidTimeoutQC } return nil } - if !consensus.IsValidSuperMajorityQuorumCertificate(block.Header.ValidatorsVoteQC, validators) { + + // Validate the vote QC. + if !consensus.IsValidSuperMajorityQuorumCertificate(voteQC, voteQCValidators) { return RuleErrorInvalidVoteQC } + return nil } diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 2166f2351..be3618111 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1812,7 +1812,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { // Set the prev block hash manually on orphan block orphanBlock.Header.PrevBlockHash = dummyParentBlockHash // Create a QC on the dummy parent block - orphanBlock.Header.ValidatorsVoteQC = _getVoteQC(testMeta, testMeta.posMempool.readOnlyLatestBlockView, dummyParentBlockHash, 16) + orphanBlock.Header.ValidatorsVoteQC = _getVoteQC(testMeta, testMeta.posMempool.readOnlyLatestBlockView, orphanBlock.Header.Height, dummyParentBlockHash, 16) updateProposerVotePartialSignatureForBlock(testMeta, orphanBlock) orphanBlockHash, err := orphanBlock.Hash() require.NoError(t, err) @@ -2467,28 +2467,43 @@ func updateProposerVotePartialSignatureForBlock(testMeta *TestMeta, block *MsgDe block.Header.ProposerVotePartialSignature = sig } -func _getVoteQC(testMeta *TestMeta, latestBlockView *UtxoView, qcBlockHash *BlockHash, qcView uint64) *QuorumCertificate { - votePayload := consensus.GetVoteSignaturePayload(qcView, qcBlockHash) - allSnapshotValidators, err := latestBlockView.GetAllSnapshotValidatorSetEntriesByStake() - require.NoError(testMeta.t, err) - // QC stuff. +func _getVoteQC(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint64, qcBlockHash *BlockHash, qcView uint64) *QuorumCertificate { + var validators []consensus.Validator + var signersList *bitset.Bitset + var aggregatedSignature *bls.Signature - // Get all the bls keys for the validators that aren't the leader. - signersList := bitset.NewBitset() - var signatures []*bls.Signature - require.NoError(testMeta.t, err) - for ii, validatorEntry := range allSnapshotValidators { - validatorPublicKeyBytes := latestBlockView.GetPublicKeyForPKID(validatorEntry.ValidatorPKID) - validatorPublicKey := Base58CheckEncode(validatorPublicKeyBytes, false, testMeta.chain.params) - validatorBLSPrivateKey := testMeta.pubKeyToBLSKeyMap[validatorPublicKey] - sig, err := validatorBLSPrivateKey.Sign(votePayload[:]) + if blockHeight == uint64(testMeta.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + posCutoverValidator, err := BuildProofOfStakeCutoverValidator() + require.NoError(testMeta.t, err) + + aggregatedSignature, signersList, err = BuildQuorumCertificateAsProofOfStakeCutoverValidator(qcView, qcBlockHash) + require.NoError(testMeta.t, err) + validators = []consensus.Validator{posCutoverValidator} + } else { + votePayload := consensus.GetVoteSignaturePayload(qcView, qcBlockHash) + allSnapshotValidators, err := latestBlockView.GetAllSnapshotValidatorSetEntriesByStake() + require.NoError(testMeta.t, err) + validators = toConsensusValidators(allSnapshotValidators) + + // Get all the bls keys for the validators that aren't the leader. + signersList = bitset.NewBitset() + var signatures []*bls.Signature + require.NoError(testMeta.t, err) + for ii, validatorEntry := range allSnapshotValidators { + validatorPublicKeyBytes := latestBlockView.GetPublicKeyForPKID(validatorEntry.ValidatorPKID) + validatorPublicKey := Base58CheckEncode(validatorPublicKeyBytes, false, testMeta.chain.params) + validatorBLSPrivateKey := testMeta.pubKeyToBLSKeyMap[validatorPublicKey] + sig, err := validatorBLSPrivateKey.Sign(votePayload[:]) + require.NoError(testMeta.t, err) + signatures = append(signatures, sig) + signersList = signersList.Set(ii, true) + } + + // Create the aggregated signature. + aggregatedSignature, err = bls.AggregateSignatures(signatures) require.NoError(testMeta.t, err) - signatures = append(signatures, sig) - signersList = signersList.Set(ii, true) } - // Create the aggregated signature. - aggregatedSignature, err := bls.AggregateSignatures(signatures) - require.NoError(testMeta.t, err) + // Create the vote QC. voteQC := &QuorumCertificate{ BlockHash: qcBlockHash, @@ -2499,7 +2514,7 @@ func _getVoteQC(testMeta *TestMeta, latestBlockView *UtxoView, qcBlockHash *Bloc }, } - isValid := consensus.IsValidSuperMajorityQuorumCertificate(voteQC, toConsensusValidators(allSnapshotValidators)) + isValid := consensus.IsValidSuperMajorityQuorumCertificate(voteQC, validators) require.True(testMeta.t, isValid) return voteQC } @@ -2528,7 +2543,7 @@ func _getFullRealBlockTemplate(testMeta *TestMeta, latestBlockView *UtxoView, bl } // Create the vote QC. - voteQC := _getVoteQC(testMeta, latestBlockView, chainTipHash, qcView) + voteQC := _getVoteQC(testMeta, latestBlockView, blockHeight, chainTipHash, qcView) if !isTimeout { blockTemplate.Header.ValidatorsVoteQC = voteQC } else { diff --git a/lib/pos_constants.go b/lib/pos_constants.go index 40045b122..a387673f0 100644 --- a/lib/pos_constants.go +++ b/lib/pos_constants.go @@ -2,6 +2,7 @@ package lib import ( "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" "github.com/deso-protocol/core/consensus" "github.com/holiman/uint256" "github.com/pkg/errors" @@ -28,3 +29,26 @@ func BuildProofOfStakeCutoverValidator() (consensus.Validator, error) { } return validatorEntry, nil } + +func BuildQuorumCertificateAsProofOfStakeCutoverValidator(view uint64, blockHash *BlockHash) ( + _aggregatedSignature *bls.Signature, + _signersList *bitset.Bitset, + _err error, +) { + // Construct the payload first + votePayload := consensus.GetVoteSignaturePayload(view, blockHash) + + // Build the validator's private key + privateKey, err := BuildProofOfStakeCutoverValidatorBLSPrivateKey() + if err != nil { + return nil, nil, errors.Errorf("BuildQuorumCertificateAsProofOfStakeCutoverValidator: %v", err) + } + + // Sign the payload + signature, err := privateKey.Sign(votePayload[:]) + if err != nil { + return nil, nil, errors.Errorf("BuildQuorumCertificateAsProofOfStakeCutoverValidator: Error signing payload: %v", err) + } + + return signature, bitset.NewBitset().Set(0, true), nil +} From 9f24098b70e410c44cd2e37a4de36d6599ba0d4e Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 24 Jan 2024 11:01:37 -0500 Subject: [PATCH 378/762] Remove StakerPKID from StakeLimitKey (#933) --- lib/block_view.go | 4 +- lib/block_view_stake.go | 49 ++++------------------- lib/block_view_stake_test.go | 77 ++++++------------------------------ lib/network.go | 9 ----- 4 files changed, 21 insertions(+), 118 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index c46e2164b..23b6f7cf4 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -2568,13 +2568,13 @@ func (bav *UtxoView) _checkAndUpdateDerivedKeySpendingLimit( case TxnTypeUnstake: txnMeta := txn.TxnMeta.(*UnstakeMetadata) if derivedKeyEntry, err = bav._checkUnstakeTxnSpendingLimitAndUpdateDerivedKey( - derivedKeyEntry, txn.PublicKey, txnMeta); err != nil { + derivedKeyEntry, txnMeta); err != nil { return utxoOpsForTxn, err } case TxnTypeUnlockStake: txnMeta := txn.TxnMeta.(*UnlockStakeMetadata) if derivedKeyEntry, err = bav._checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey( - derivedKeyEntry, txn.PublicKey, txnMeta); err != nil { + derivedKeyEntry, txnMeta); err != nil { return utxoOpsForTxn, err } default: diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 5b2ba0f0b..1acf463dc 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -2917,20 +2917,17 @@ func (bav *UtxoView) CreateUnlockStakeTxindexMetadata(utxoOp *UtxoOperation, txn type StakeLimitKey struct { ValidatorPKID PKID - StakerPKID PKID } -func MakeStakeLimitKey(validatorPKID *PKID, stakerPKID *PKID) StakeLimitKey { +func MakeStakeLimitKey(validatorPKID *PKID) StakeLimitKey { return StakeLimitKey{ ValidatorPKID: *validatorPKID, - StakerPKID: *stakerPKID, } } func (stakeLimitKey *StakeLimitKey) Encode() []byte { var data []byte data = append(data, stakeLimitKey.ValidatorPKID.ToBytes()...) - data = append(data, stakeLimitKey.StakerPKID.ToBytes()...) return data } @@ -2944,13 +2941,6 @@ func (stakeLimitKey *StakeLimitKey) Decode(rr *bytes.Reader) error { } stakeLimitKey.ValidatorPKID = *validatorPKID - // StakerPKID - stakerPKID := &PKID{} - if err = stakerPKID.FromBytes(rr); err != nil { - return errors.Wrap(err, "StakeLimitKey.Decode: Problem reading StakerPKID: ") - } - stakeLimitKey.StakerPKID = *stakerPKID - return nil } @@ -2960,15 +2950,9 @@ func (bav *UtxoView) _checkStakeTxnSpendingLimitAndUpdateDerivedKey( txMeta *StakeMetadata, ) (DerivedKeyEntry, error) { // The DerivedKeyEntry.TransactionSpendingLimit for staking maps - // ValidatorPKID || StakerPKID to the amount of stake-able DESO + // ValidatorPKID to the amount of stake-able DESO // nanos allowed for this derived key. - // Convert TransactorPublicKeyBytes to StakerPKID. - stakerPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKeyBytes) - if stakerPKIDEntry == nil || stakerPKIDEntry.isDeleted { - return derivedKeyEntry, errors.Wrapf(RuleErrorInvalidStakerPKID, "UtxoView._checkStakeTxnSpendingLimitAndUpdateDerivedKey: ") - } - // Convert ValidatorPublicKey to ValidatorPKID. validatorEntry, err := bav.GetValidatorByPublicKey(txMeta.ValidatorPublicKey) if err != nil { @@ -2984,7 +2968,7 @@ func (bav *UtxoView) _checkStakeTxnSpendingLimitAndUpdateDerivedKey( for _, validatorPKID := range []*PKID{validatorEntry.ValidatorPKID, &ZeroPKID} { // Retrieve DerivedKeyEntry.TransactionSpendingLimit. - stakeLimitKey := MakeStakeLimitKey(validatorPKID, stakerPKIDEntry.PKID) + stakeLimitKey := MakeStakeLimitKey(validatorPKID) spendingLimit, exists := derivedKeyEntry.TransactionSpendingLimitTracker.StakeLimitMap[stakeLimitKey] if !exists { continue @@ -3036,19 +3020,12 @@ func (bav *UtxoView) _checkStakeTxnSpendingLimitAndUpdateDerivedKey( // to consolidate in the future. func (bav *UtxoView) _checkUnstakeTxnSpendingLimitAndUpdateDerivedKey( derivedKeyEntry DerivedKeyEntry, - transactorPublicKeyBytes []byte, txMeta *UnstakeMetadata, ) (DerivedKeyEntry, error) { // The DerivedKeyEntry.TransactionSpendingLimit for unstaking maps - // ValidatorPKID || StakerPKID to the amount of unstake-able DESO + // ValidatorPKID to the amount of unstake-able DESO // nanos allowed for this derived key. - // Convert TransactorPublicKeyBytes to StakerPKID. - stakerPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKeyBytes) - if stakerPKIDEntry == nil || stakerPKIDEntry.isDeleted { - return derivedKeyEntry, errors.Wrapf(RuleErrorInvalidStakerPKID, "UtxoView._checkUnstakeTxnSpendingLimitAndUpdateDerivedKey: ") - } - // Convert ValidatorPublicKey to ValidatorPKID. validatorEntry, err := bav.GetValidatorByPublicKey(txMeta.ValidatorPublicKey) if err != nil { @@ -3064,7 +3041,7 @@ func (bav *UtxoView) _checkUnstakeTxnSpendingLimitAndUpdateDerivedKey( for _, validatorPKID := range []*PKID{validatorEntry.ValidatorPKID, &ZeroPKID} { // Retrieve DerivedKeyEntry.TransactionSpendingLimit. - stakeLimitKey := MakeStakeLimitKey(validatorPKID, stakerPKIDEntry.PKID) + stakeLimitKey := MakeStakeLimitKey(validatorPKID) spendingLimit, exists := derivedKeyEntry.TransactionSpendingLimitTracker.UnstakeLimitMap[stakeLimitKey] if !exists { continue @@ -3112,19 +3089,12 @@ func (bav *UtxoView) _checkUnstakeTxnSpendingLimitAndUpdateDerivedKey( func (bav *UtxoView) _checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey( derivedKeyEntry DerivedKeyEntry, - transactorPublicKeyBytes []byte, txMeta *UnlockStakeMetadata, ) (DerivedKeyEntry, error) { // The DerivedKeyEntry.TransactionSpendingLimit for unlocking stake maps - // ValidatorPKID || StakerPKID to the number of UnlockStake transactions + // ValidatorPKID to the number of UnlockStake transactions // this derived key is allowed to perform. - // Convert TransactorPublicKeyBytes to StakerPKID. - stakerPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKeyBytes) - if stakerPKIDEntry == nil || stakerPKIDEntry.isDeleted { - return derivedKeyEntry, errors.Wrapf(RuleErrorInvalidStakerPKID, "UtxoView._checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey: ") - } - // Convert ValidatorPublicKey to ValidatorPKID. validatorEntry, err := bav.GetValidatorByPublicKey(txMeta.ValidatorPublicKey) if err != nil { @@ -3138,7 +3108,7 @@ func (bav *UtxoView) _checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey( // If not found, check spending limit for any validator. for _, validatorPKID := range []*PKID{validatorEntry.ValidatorPKID, &ZeroPKID} { // Retrieve DerivedKeyEntry.TransactionSpendingLimit. - stakeLimitKey := MakeStakeLimitKey(validatorPKID, stakerPKIDEntry.PKID) + stakeLimitKey := MakeStakeLimitKey(validatorPKID) spendingLimit, exists := derivedKeyEntry.TransactionSpendingLimitTracker.UnlockStakeLimitMap[stakeLimitKey] if !exists || spendingLimit <= 0 { continue @@ -3167,11 +3137,6 @@ func (bav *UtxoView) IsValidStakeLimitKey(transactorPublicKeyBytes []byte, stake return errors.Wrapf(RuleErrorTransactionSpendingLimitInvalidStaker, "UtxoView.IsValidStakeLimitKey: ") } - // Verify TransactorPKID == StakerPKID. - if !transactorPKIDEntry.PKID.Eq(&stakeLimitKey.StakerPKID) { - return errors.Wrapf(RuleErrorTransactionSpendingLimitInvalidStaker, "UtxoView.IsValidStakeLimitKey: ") - } - // Verify ValidatorEntry. if stakeLimitKey.ValidatorPKID.IsZeroPKID() { // The ZeroPKID is a special case that indicates that the spending limit diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 42c2481f3..4e7c36230 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -967,25 +967,10 @@ func TestStakingWithDerivedKey(t *testing.T) { _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) } - { - // RuleErrorTransactionSpendingLimitInvalidStaker - // sender tries to create a DerivedKey that would allow - // m1 to stake 100 $DESO nanos with m0. Errors. - stakeLimitKey := MakeStakeLimitKey(m0PKID, m1PKID) - txnSpendingLimit := &TransactionSpendingLimit{ - GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit - TransactionCountLimitMap: map[TxnType]uint64{ - TxnTypeAuthorizeDerivedKey: 1, - }, - StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, - } - derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) - require.Error(t, err) - } { // RuleErrorTransactionSpendingLimitInvalidValidator // sender tries to create a DerivedKey to stake with m2. Validator doesn't exist. Errors. - stakeLimitKey := MakeStakeLimitKey(m2PKID, senderPKID) + stakeLimitKey := MakeStakeLimitKey(m2PKID) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit TransactionCountLimitMap: map[TxnType]uint64{ @@ -996,25 +981,10 @@ func TestStakingWithDerivedKey(t *testing.T) { derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) require.Error(t, err) } - { - // RuleErrorTransactionSpendingLimitInvalidStaker - // sender tries to create a DerivedKey that would allow - // m1 to unstake 100 $DESO nanos from m0. Errors. - stakeLimitKey := MakeStakeLimitKey(m0PKID, m1PKID) - txnSpendingLimit := &TransactionSpendingLimit{ - GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit - TransactionCountLimitMap: map[TxnType]uint64{ - TxnTypeAuthorizeDerivedKey: 1, - }, - UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, - } - derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) - require.Error(t, err) - } { // RuleErrorTransactionSpendingLimitInvalidValidator // sender tries to create a DerivedKey to unstake from m2. Validator doesn't exist. Errors. - stakeLimitKey := MakeStakeLimitKey(m2PKID, senderPKID) + stakeLimitKey := MakeStakeLimitKey(m2PKID) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit TransactionCountLimitMap: map[TxnType]uint64{ @@ -1025,25 +995,10 @@ func TestStakingWithDerivedKey(t *testing.T) { derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) require.Error(t, err) } - { - // RuleErrorTransactionSpendingLimitInvalidStaker - // sender tries to create a DerivedKey that would allow - // m1 to unlock stake from m0. Errors. - stakeLimitKey := MakeStakeLimitKey(m0PKID, m1PKID) - txnSpendingLimit := &TransactionSpendingLimit{ - GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit - TransactionCountLimitMap: map[TxnType]uint64{ - TxnTypeAuthorizeDerivedKey: 1, - }, - UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 100}, - } - derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) - require.Error(t, err) - } { // RuleErrorTransactionSpendingLimitInvalidValidator // sender tries to create a DerivedKey to stake with m2. Validator doesn't exist. Errors. - stakeLimitKey := MakeStakeLimitKey(m2PKID, senderPKID) + stakeLimitKey := MakeStakeLimitKey(m2PKID) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit TransactionCountLimitMap: map[TxnType]uint64{ @@ -1058,7 +1013,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m0 using a DerivedKey. // sender creates a DerivedKey to stake up to 100 $DESO nanos with m0. - stakeLimitKey := MakeStakeLimitKey(m0PKID, senderPKID) + stakeLimitKey := MakeStakeLimitKey(m0PKID) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit TransactionCountLimitMap: map[TxnType]uint64{ @@ -1116,7 +1071,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m0 using a DerivedKey. // sender creates a DerivedKey to unstake up to 50 $DESO nanos from m0. - stakeLimitKey := MakeStakeLimitKey(m0PKID, senderPKID) + stakeLimitKey := MakeStakeLimitKey(m0PKID) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit TransactionCountLimitMap: map[TxnType]uint64{ @@ -1207,7 +1162,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unlocks stake using a DerivedKey. // sender creates a DerivedKey to perform 1 unlock stake operation with m0. - stakeLimitKey := MakeStakeLimitKey(m0PKID, senderPKID) + stakeLimitKey := MakeStakeLimitKey(m0PKID) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit TransactionCountLimitMap: map[TxnType]uint64{ @@ -1307,7 +1262,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes, unstakes, and unlocks stake using a DerivedKey scoped to any validator. // sender creates a DerivedKey that can stake, unstake, and unlock stake with any validator. - stakeLimitKey := MakeStakeLimitKey(&ZeroPKID, senderPKID) + stakeLimitKey := MakeStakeLimitKey(&ZeroPKID) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit TransactionCountLimitMap: map[TxnType]uint64{ @@ -1461,8 +1416,8 @@ func TestStakingWithDerivedKey(t *testing.T) { // any validator to cover their staking + unstaking + unlocking stake txns. // sender creates a DerivedKey to stake, unstake, and unlock stake with m1 or any validator. - scopedStakeLimitKey := MakeStakeLimitKey(m1PKID, senderPKID) - globalStakeLimitKey := MakeStakeLimitKey(&ZeroPKID, senderPKID) + scopedStakeLimitKey := MakeStakeLimitKey(m1PKID) + globalStakeLimitKey := MakeStakeLimitKey(&ZeroPKID) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit TransactionCountLimitMap: map[TxnType]uint64{ @@ -1545,8 +1500,8 @@ func TestStakingWithDerivedKey(t *testing.T) { } { // Test TransactionSpendingLimit.ToMetamaskString() scoped to one validator. - stakeLimitKey1 := MakeStakeLimitKey(m0PKID, senderPKID) - stakeLimitKey2 := MakeStakeLimitKey(m1PKID, senderPKID) + stakeLimitKey1 := MakeStakeLimitKey(m0PKID) + stakeLimitKey2 := MakeStakeLimitKey(m1PKID) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit TransactionCountLimitMap: map[TxnType]uint64{ @@ -1570,36 +1525,31 @@ func TestStakingWithDerivedKey(t *testing.T) { "\tStaking Restrictions:\n"+ "\t\t[\n"+ "\t\t\tValidator PKID: "+m0Pub+"\n"+ - "\t\t\tStaker PKID: "+senderPkString+"\n"+ "\t\t\tStaking Limit: 1.50 $DESO\n"+ "\t\t]\n"+ "\t\t[\n"+ "\t\t\tValidator PKID: "+m1Pub+"\n"+ - "\t\t\tStaker PKID: "+senderPkString+"\n"+ "\t\t\tStaking Limit: 2.00 $DESO\n"+ "\t\t]\n"+ "\tUnstaking Restrictions:\n"+ "\t\t[\n"+ "\t\t\tValidator PKID: "+m0Pub+"\n"+ - "\t\t\tStaker PKID: "+senderPkString+"\n"+ "\t\t\tUnstaking Limit: 3.25 $DESO\n"+ "\t\t]\n"+ "\tUnlocking Stake Restrictions:\n"+ "\t\t[\n"+ "\t\t\tValidator PKID: "+m0Pub+"\n"+ - "\t\t\tStaker PKID: "+senderPkString+"\n"+ "\t\t\tTransaction Count: 2\n"+ "\t\t]\n"+ "\t\t[\n"+ "\t\t\tValidator PKID: "+m1Pub+"\n"+ - "\t\t\tStaker PKID: "+senderPkString+"\n"+ "\t\t\tTransaction Count: 3\n"+ "\t\t]\n", ) } { // Test TransactionSpendingLimit.ToMetamaskString() scoped to any validator. - stakeLimitKey := MakeStakeLimitKey(&ZeroPKID, senderPKID) + stakeLimitKey := MakeStakeLimitKey(&ZeroPKID) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit TransactionCountLimitMap: map[TxnType]uint64{ @@ -1622,19 +1572,16 @@ func TestStakingWithDerivedKey(t *testing.T) { "\tStaking Restrictions:\n"+ "\t\t[\n"+ "\t\t\tValidator PKID: Any\n"+ - "\t\t\tStaker PKID: "+senderPkString+"\n"+ "\t\t\tStaking Limit: 0.65 $DESO\n"+ "\t\t]\n"+ "\tUnstaking Restrictions:\n"+ "\t\t[\n"+ "\t\t\tValidator PKID: Any\n"+ - "\t\t\tStaker PKID: "+senderPkString+"\n"+ "\t\t\tUnstaking Limit: 2.10 $DESO\n"+ "\t\t]\n"+ "\tUnlocking Stake Restrictions:\n"+ "\t\t[\n"+ "\t\t\tValidator PKID: Any\n"+ - "\t\t\tStaker PKID: "+senderPkString+"\n"+ "\t\t\tTransaction Count: 1\n"+ "\t\t]\n", ) diff --git a/lib/network.go b/lib/network.go index 15b230a83..83bc4520d 100644 --- a/lib/network.go +++ b/lib/network.go @@ -6180,9 +6180,6 @@ func (tsl *TransactionSpendingLimit) ToMetamaskString(params *DeSoParams) string validatorPublicKeyBase58Check = Base58CheckEncode(limitKey.ValidatorPKID.ToBytes(), false, params) } opString += _indt(indentationCounter) + "Validator PKID: " + validatorPublicKeyBase58Check + "\n" - // StakerPKID - stakerPublicKeyBase58Check := Base58CheckEncode(limitKey.StakerPKID.ToBytes(), false, params) - opString += _indt(indentationCounter) + "Staker PKID: " + stakerPublicKeyBase58Check + "\n" // StakeLimit stakeLimitDESO := NewFloat().Quo( NewFloat().SetInt(limit.ToBig()), NewFloat().SetUint64(NanosPerUnit), @@ -6213,9 +6210,6 @@ func (tsl *TransactionSpendingLimit) ToMetamaskString(params *DeSoParams) string validatorPublicKeyBase58Check = Base58CheckEncode(limitKey.ValidatorPKID.ToBytes(), false, params) } opString += _indt(indentationCounter) + "Validator PKID: " + validatorPublicKeyBase58Check + "\n" - // StakerPKID - stakerPublicKeyBase58Check := Base58CheckEncode(limitKey.StakerPKID.ToBytes(), false, params) - opString += _indt(indentationCounter) + "Staker PKID: " + stakerPublicKeyBase58Check + "\n" // UnstakeLimit unstakeLimitDESO := NewFloat().Quo( NewFloat().SetInt(limit.ToBig()), NewFloat().SetUint64(NanosPerUnit), @@ -6246,9 +6240,6 @@ func (tsl *TransactionSpendingLimit) ToMetamaskString(params *DeSoParams) string validatorPublicKeyBase58Check = Base58CheckEncode(limitKey.ValidatorPKID.ToBytes(), false, params) } opString += _indt(indentationCounter) + "Validator PKID: " + validatorPublicKeyBase58Check + "\n" - // StakerPKID - stakerPublicKeyBase58Check := Base58CheckEncode(limitKey.StakerPKID.ToBytes(), false, params) - opString += _indt(indentationCounter) + "Staker PKID: " + stakerPublicKeyBase58Check + "\n" // UnlockStakeLimit opString += _indt(indentationCounter) + "Transaction Count: " + strconv.FormatUint(limit, 10) + "\n" From 07660f78c79568a35c66ae6ac63c3b767b025bc7 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 24 Jan 2024 11:09:14 -0500 Subject: [PATCH 379/762] Use PoS Cutover Validator Only On Timeouts (#936) --- lib/pos_blockchain.go | 8 ++++--- lib/pos_blockchain_test.go | 47 +++++++++++++++----------------------- 2 files changed, 24 insertions(+), 31 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 3d497ac3b..fdf795d87 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1132,9 +1132,11 @@ func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validator voteQC := block.Header.ValidatorsVoteQC timeoutAggregateQC := block.Header.ValidatorsTimeoutAggregateQC - // If the block is the first block after the PoS cutover, it must contain a synthetic QC signed by the - // PoS cutover validator. We need to override the vote QC validator set here. - if block.Header.Height == uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + cutoverHeight := uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) + + // If the block is the first block after the PoS cutover and has a timeout aggregate QC, then the + // highQC must be a synthetic QC. We need to override the validator set used to validate the high QC. + if block.Header.Height == cutoverHeight && !timeoutAggregateQC.isEmpty() { posCutoverValidator, err := BuildProofOfStakeCutoverValidator() if err != nil { return errors.Wrapf(err, "isValidPoSQuorumCertificate: Problem building PoS cutover validator") diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index be3618111..b97e6b0fd 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2472,38 +2472,29 @@ func _getVoteQC(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint6 var signersList *bitset.Bitset var aggregatedSignature *bls.Signature - if blockHeight == uint64(testMeta.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { - posCutoverValidator, err := BuildProofOfStakeCutoverValidator() - require.NoError(testMeta.t, err) - - aggregatedSignature, signersList, err = BuildQuorumCertificateAsProofOfStakeCutoverValidator(qcView, qcBlockHash) - require.NoError(testMeta.t, err) - validators = []consensus.Validator{posCutoverValidator} - } else { - votePayload := consensus.GetVoteSignaturePayload(qcView, qcBlockHash) - allSnapshotValidators, err := latestBlockView.GetAllSnapshotValidatorSetEntriesByStake() - require.NoError(testMeta.t, err) - validators = toConsensusValidators(allSnapshotValidators) - - // Get all the bls keys for the validators that aren't the leader. - signersList = bitset.NewBitset() - var signatures []*bls.Signature - require.NoError(testMeta.t, err) - for ii, validatorEntry := range allSnapshotValidators { - validatorPublicKeyBytes := latestBlockView.GetPublicKeyForPKID(validatorEntry.ValidatorPKID) - validatorPublicKey := Base58CheckEncode(validatorPublicKeyBytes, false, testMeta.chain.params) - validatorBLSPrivateKey := testMeta.pubKeyToBLSKeyMap[validatorPublicKey] - sig, err := validatorBLSPrivateKey.Sign(votePayload[:]) - require.NoError(testMeta.t, err) - signatures = append(signatures, sig) - signersList = signersList.Set(ii, true) - } + votePayload := consensus.GetVoteSignaturePayload(qcView, qcBlockHash) + allSnapshotValidators, err := latestBlockView.GetAllSnapshotValidatorSetEntriesByStake() + require.NoError(testMeta.t, err) + validators = toConsensusValidators(allSnapshotValidators) - // Create the aggregated signature. - aggregatedSignature, err = bls.AggregateSignatures(signatures) + // Get all the bls keys for the validators that aren't the leader. + signersList = bitset.NewBitset() + var signatures []*bls.Signature + require.NoError(testMeta.t, err) + for ii, validatorEntry := range allSnapshotValidators { + validatorPublicKeyBytes := latestBlockView.GetPublicKeyForPKID(validatorEntry.ValidatorPKID) + validatorPublicKey := Base58CheckEncode(validatorPublicKeyBytes, false, testMeta.chain.params) + validatorBLSPrivateKey := testMeta.pubKeyToBLSKeyMap[validatorPublicKey] + sig, err := validatorBLSPrivateKey.Sign(votePayload[:]) require.NoError(testMeta.t, err) + signatures = append(signatures, sig) + signersList = signersList.Set(ii, true) } + // Create the aggregated signature. + aggregatedSignature, err = bls.AggregateSignatures(signatures) + require.NoError(testMeta.t, err) + // Create the vote QC. voteQC := &QuorumCertificate{ BlockHash: qcBlockHash, From 60e278bb112b0953459ad81803dc015064952ce8 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 24 Jan 2024 11:35:20 -0500 Subject: [PATCH 380/762] Add Genesis QC to FastHotStuffEventLoop (#935) * Add Trusted Geness QC to FastHotStuffEventLoop * Clearer naming * Address Nina's comments --- consensus/event_loop.go | 7 +++ consensus/event_loop_test.go | 90 +++++++++++++++++++---------- consensus/integration_test.go | 2 +- consensus/integration_test_types.go | 4 +- consensus/mocking.go | 6 +- consensus/types.go | 7 ++- lib/pos_consensus.go | 54 ++++++++++++++++- 7 files changed, 132 insertions(+), 38 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 06f507701..b35fd7659 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -25,6 +25,9 @@ func NewFastHotStuffEventLoop() *fastHotStuffEventLoop { // params: // - crankTimerInterval: crank timer interval duration must be > 0 // - timeoutBaseDuration: timeout base duration must be > 0 +// - genesisQC: quorum certificate used as the genesis for the PoS chain. This QC is a trusted input +// that is used to override the highQC in timeout messages and timeout aggregate QCs when there +// is a timeout at the first block height of the PoS chain. // - tip: the current tip of the blockchain, with the validator list at that block height. This may // be a committed or uncommitted block. // - safeBlocks: an unordered slice of blocks including the committed tip, the uncommitted tip, @@ -37,6 +40,7 @@ func NewFastHotStuffEventLoop() *fastHotStuffEventLoop { func (fc *fastHotStuffEventLoop) Init( crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, + genesisQC QuorumCertificate, tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList, ) error { @@ -57,6 +61,9 @@ func (fc *fastHotStuffEventLoop) Init( return errors.New("FastHotStuffEventLoop.Init: Timeout base duration must be > 0") } + // Store the genesis QC + fc.genesisQC = genesisQC + // Validate the safe blocks and validator lists, and store them if err := fc.storeBlocks(tip, safeBlocks); err != nil { return errors.Wrap(err, "FastHotStuffEventLoop.Init: ") diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 8939a7eb4..e233644ec 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -24,9 +24,11 @@ func TestInit(t *testing.T) { // Test Init() function with invalid block construction interval { fc := NewFastHotStuffEventLoop() + genesisBlock := createDummyBlock(2) err := fc.Init(0, 1, - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetQC(), // genesisQC + BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } @@ -34,9 +36,11 @@ func TestInit(t *testing.T) { // Test Init() function with invalid timeout duration { fc := NewFastHotStuffEventLoop() + genesisBlock := createDummyBlock(2) err := fc.Init(1, 0, - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetQC(), // genesisQC + BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } @@ -44,9 +48,11 @@ func TestInit(t *testing.T) { // Test Init() function with malformed tip block { fc := NewFastHotStuffEventLoop() + genesisBlock := createDummyBlock(2) err := fc.Init(1, 1, - BlockWithValidatorList{nil, createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetQC(), // genesisQC + BlockWithValidatorList{nil, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } @@ -54,9 +60,11 @@ func TestInit(t *testing.T) { // Test Init() function with malformed validator list for tip block { fc := NewFastHotStuffEventLoop() + genesisBlock := createDummyBlock(2) err := fc.Init(1, 1, - BlockWithValidatorList{createDummyBlock(2), nil}, // tip - []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetQC(), // genesisQC + BlockWithValidatorList{genesisBlock, nil}, // tip + []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } @@ -64,9 +72,11 @@ func TestInit(t *testing.T) { // Test Init() function with malformed safe block { fc := NewFastHotStuffEventLoop() + genesisBlock := createDummyBlock(2) err := fc.Init(1, 1, - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{nil, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetQC(), // genesisQC + BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{nil, createDummyValidatorList()}}, // safeBlocks ) require.Error(t, err) } @@ -74,21 +84,23 @@ func TestInit(t *testing.T) { // Test Init() function with malformed validator list for safe block { fc := NewFastHotStuffEventLoop() + genesisBlock := createDummyBlock(2) err := fc.Init(1, 1, - BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip - []BlockWithValidatorList{{createDummyBlock(2), nil}}, // safeBlocks + genesisBlock.GetQC(), // genesisQC + BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{genesisBlock, nil}}, // safeBlocks ) require.Error(t, err) } // Test Init() function with valid parameters { - block := createDummyBlock(2) - fc := NewFastHotStuffEventLoop() + genesisBlock := createDummyBlock(2) err := fc.Init(100, 101, - BlockWithValidatorList{block, createDummyValidatorList()}, // tip - []BlockWithValidatorList{{block, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetQC(), // genesisQC + BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks ) require.NoError(t, err) @@ -97,7 +109,7 @@ func TestInit(t *testing.T) { require.NotPanics(t, fc.Stop) // Calling Stop() on an initialized instance should be a no-op require.Equal(t, fc.status, eventLoopStatusInitialized) - require.Equal(t, fc.tip.block.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) + require.Equal(t, fc.tip.block.GetBlockHash().GetValue(), genesisBlock.GetBlockHash().GetValue()) require.Equal(t, fc.tip.block.GetView(), uint64(2)) require.Equal(t, fc.tip.block.GetHeight(), uint64(2)) @@ -109,7 +121,7 @@ func TestInit(t *testing.T) { require.Equal(t, len(fc.tip.validatorLookup), 2) require.Equal(t, len(fc.safeBlocks), 1) - require.Equal(t, fc.safeBlocks[0].block.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) + require.Equal(t, fc.safeBlocks[0].block.GetBlockHash().GetValue(), genesisBlock.GetBlockHash().GetValue()) require.Equal(t, fc.safeBlocks[0].block.GetView(), uint64(2)) require.Equal(t, fc.safeBlocks[0].block.GetHeight(), uint64(2)) require.Equal(t, len(fc.safeBlocks[0].validatorList), 2) @@ -124,8 +136,9 @@ func TestProcessTipBlock(t *testing.T) { // Initialize the event loop { - tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) + genesisBlock := createDummyBlock(2) + tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock}) require.NoError(t, err) } @@ -257,8 +270,9 @@ func TestAdvanceViewOnTimeout(t *testing.T) { // Init the event loop { // BlockHeight = 1, Current View = 3 - tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) + genesisBlock := createDummyBlock(2) + tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock}) require.NoError(t, err) } @@ -348,8 +362,9 @@ func TestProcessValidatorVote(t *testing.T) { // Init the event loop { // BlockHeight = 1, Current View = 3 - tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) + genesisBlock := createDummyBlock(2) + tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock}) require.NoError(t, err) } @@ -439,9 +454,15 @@ func TestProcessValidatorTimeout(t *testing.T) { // Init the event loop { // BlockHeight = 3, Current View = 4 - genesisBlock := BlockWithValidatorList{createDummyBlock(2), validatorList} - tipBlock := BlockWithValidatorList{createBlockWithParentAndValidators(genesisBlock.Block, privateKeys), validatorList} - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock, genesisBlock}) + genesisBlock := createDummyBlock(2) + tipBlock := BlockWithValidatorList{createBlockWithParentAndValidators(genesisBlock, privateKeys), validatorList} + err := fc.Init( + oneHourInNanoSecs, + oneHourInNanoSecs, + genesisBlock.GetQC(), + tipBlock, + []BlockWithValidatorList{tipBlock, {genesisBlock, validatorList}}, + ) require.NoError(t, err) } @@ -552,6 +573,7 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { fc := NewFastHotStuffEventLoop() err := fc.Init(oneHourInNanoSecs, oneMilliSecondInNanoSeconds, + dummyBlock.GetQC(), // genesisQC BlockWithValidatorList{dummyBlock, createDummyValidatorList()}, // tip []BlockWithValidatorList{{dummyBlock, createDummyValidatorList()}}, // safeBlocks ) @@ -596,8 +618,9 @@ func TestResetEventLoopSignal(t *testing.T) { fc := NewFastHotStuffEventLoop() // Init the event loop - tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) + genesisBlock := createDummyBlock(2) + tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock}) require.NoError(t, err) // Start the event loop @@ -664,6 +687,7 @@ func TestVoteQCConstructionSignal(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(time.Microsecond, time.Hour, + block.GetQC(), // genesisQC BlockWithValidatorList{block, validatorList}, // tip []BlockWithValidatorList{{block, validatorList}}, // safeBlocks ) @@ -701,6 +725,7 @@ func TestVoteQCConstructionSignal(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(time.Microsecond, time.Hour, + block.GetQC(), // genesisQC BlockWithValidatorList{block, validatorList}, // tip []BlockWithValidatorList{{block, validatorList}}, // safeBlocks ) @@ -786,6 +811,7 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(time.Microsecond, time.Hour, + block1.GetQC(), // genesisQC BlockWithValidatorList{block2, validatorList}, // tip []BlockWithValidatorList{ // safeBlocks {block1, validatorList}, @@ -829,6 +855,7 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { { fc := NewFastHotStuffEventLoop() err := fc.Init(time.Microsecond, time.Hour, + block1.GetQC(), // genesisQC BlockWithValidatorList{block2, validatorList}, // tip []BlockWithValidatorList{ // safeBlocks {block1, validatorList}, @@ -911,8 +938,9 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { fc := NewFastHotStuffEventLoop() // Init the event loop - tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, tipBlock, []BlockWithValidatorList{tipBlock}) + genesisBlock := createDummyBlock(2) + tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} + err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock}) require.NoError(t, err) // Start the event loop diff --git a/consensus/integration_test.go b/consensus/integration_test.go index 6f924294e..545fc9a5d 100644 --- a/consensus/integration_test.go +++ b/consensus/integration_test.go @@ -234,7 +234,7 @@ func TestNetworkRecoveryAfterCatastrophicFailure(t *testing.T) { // After a catastrophic network failure, nodes with a super-majority of stake need to somehow // agree on a starting state of the chain. As long as they are able to sync from or to any peer // that eventually becomes a block proposer, the network will recover. - node3.Resync(node1.eventLoop.tip.block.(*block), collections.MapValues(node1.safeBlocks)) + node3.Resync(genesisBlock, node1.eventLoop.tip.block.(*block), collections.MapValues(node1.safeBlocks)) node3.Start() // Let all nodes run for 10 seconds. Eventually all nodes will converge on a single view and diff --git a/consensus/integration_test_types.go b/consensus/integration_test_types.go index 3613e63e7..d0a9ce08e 100644 --- a/consensus/integration_test_types.go +++ b/consensus/integration_test_types.go @@ -62,6 +62,7 @@ func (node *validatorNode) Init( return node.eventLoop.Init( crankTimerInterval, timeoutBaseDuration, + genesisBlock.qc, BlockWithValidatorList{genesisBlock, node.getValidators()}, []BlockWithValidatorList{ {genesisBlock, node.getValidators()}, @@ -69,7 +70,7 @@ func (node *validatorNode) Init( ) } -func (node *validatorNode) Resync(tipBlock *block, safeBlocks []*block) error { +func (node *validatorNode) Resync(genesisBlock *block, tipBlock *block, safeBlocks []*block) error { node.lock.Lock() defer node.lock.Unlock() @@ -81,6 +82,7 @@ func (node *validatorNode) Resync(tipBlock *block, safeBlocks []*block) error { return node.eventLoop.Init( node.eventLoop.crankTimerInterval, node.eventLoop.timeoutBaseDuration, + genesisBlock.qc, BlockWithValidatorList{tipBlock, node.getValidators()}, collections.Transform(safeBlocks, func(bb *block) BlockWithValidatorList { return BlockWithValidatorList{bb, node.getValidators()} diff --git a/consensus/mocking.go b/consensus/mocking.go index efc855fc7..6c0b8be82 100644 --- a/consensus/mocking.go +++ b/consensus/mocking.go @@ -4,7 +4,7 @@ import "time" type MockFastHotStuffEventLoop struct { OnGetEvents func() chan *FastHotStuffEvent - OnInit func(time.Duration, time.Duration, BlockWithValidatorList, []BlockWithValidatorList) error + OnInit func(time.Duration, time.Duration, QuorumCertificate, BlockWithValidatorList, []BlockWithValidatorList) error OnGetCurrentView func() uint64 OnAdvanceViewOnTimeout func() (uint64, error) OnProcessTipBlock func(BlockWithValidatorList, []BlockWithValidatorList) error @@ -20,8 +20,8 @@ type MockFastHotStuffEventLoop struct { func (fc *MockFastHotStuffEventLoop) GetEvents() chan *FastHotStuffEvent { return fc.OnGetEvents() } -func (fc *MockFastHotStuffEventLoop) Init(crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { - return fc.OnInit(crankTimerInterval, timeoutBaseDuration, tip, safeBlocks) +func (fc *MockFastHotStuffEventLoop) Init(crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, genesisQC QuorumCertificate, tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { + return fc.OnInit(crankTimerInterval, timeoutBaseDuration, genesisQC, tip, safeBlocks) } func (fc *MockFastHotStuffEventLoop) GetCurrentView() uint64 { diff --git a/consensus/types.go b/consensus/types.go index a4714f109..f0967c46f 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -51,7 +51,7 @@ type BlockHashValue = [32]byte type FastHotStuffEventLoop interface { GetEvents() chan *FastHotStuffEvent - Init(time.Duration, time.Duration, BlockWithValidatorList, []BlockWithValidatorList) error + Init(time.Duration, time.Duration, QuorumCertificate, BlockWithValidatorList, []BlockWithValidatorList) error GetCurrentView() uint64 AdvanceViewOnTimeout() (uint64, error) ProcessTipBlock(BlockWithValidatorList, []BlockWithValidatorList) error @@ -189,6 +189,11 @@ type fastHotStuffEventLoop struct { hasCrankTimerRunForCurrentView bool hasConstructedQCInCurrentView bool + // Quorum certificate used as the genesis for the PoS chain. This QC is a trusted input that is used + // to override the highQC in timeout messages and timeout aggregate QCs when there is a timeout at the + // first block height of the PoS chain. + genesisQC QuorumCertificate + // Block hash of the current tip of the block-chain. tip blockWithValidatorLookup diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 38169c029..5d555c44c 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -56,6 +56,16 @@ func (cc *FastHotStuffConsensus) Start() error { ) } + finalPoWBlock, err := cc.getFinalCommittedPoWBlock() + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching final PoW block: %v", err) + } + + genesisQC, err := cc.createGenesisQC(finalPoWBlock.Hash, uint64(finalPoWBlock.Height)) + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error creating PoS cutover genesis QC: %v", err) + } + // Fetch the validator set at each safe block tipBlockWithValidators, err := cc.fetchValidatorListsForSafeBlocks([]*MsgDeSoHeader{tipBlock.Header}) if err != nil { @@ -75,7 +85,7 @@ func (cc *FastHotStuffConsensus) Start() error { } // Initialize and start the event loop. TODO: Pass in the crank timer duration and timeout duration - cc.fastHotStuffEventLoop.Init(0, 0, tipBlockWithValidators[0], safeBlocksWithValidators) + cc.fastHotStuffEventLoop.Init(0, 0, genesisQC, tipBlockWithValidators[0], safeBlocksWithValidators) cc.fastHotStuffEventLoop.Start() return nil @@ -714,6 +724,48 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView) (*PosBlockPr return NewPosBlockProducer(fc.mempool, fc.params, blockProducerPublicKey, blockProducerBlsPublicKey), nil } +func (fc *FastHotStuffConsensus) createGenesisQC(blockHash *BlockHash, view uint64) (*QuorumCertificate, error) { + aggregatedSignature, signersList, err := BuildQuorumCertificateAsProofOfStakeCutoverValidator(view, blockHash) + if err != nil { + return nil, err + } + + qc := &QuorumCertificate{ + BlockHash: blockHash, + ProposedInView: view, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: aggregatedSignature, + SignersList: signersList, + }, + } + + return qc, nil +} + +func (fc *FastHotStuffConsensus) getFinalCommittedPoWBlock() (*BlockNode, error) { + // Fetch the block node for the cutover block + blockNodes, blockNodesExist := + fc.blockchain.blockIndexByHeight[uint64(fc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1)] + if !blockNodesExist { + return nil, errors.Errorf( + "Error fetching cutover block nodes before height %d", + fc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1, + ) + } + + // Fetch the block node with the committed status + for _, blockNode := range blockNodes { + if blockNode.Status == StatusBlockCommitted { + return blockNode, nil + } + } + + return nil, errors.Errorf( + "Error fetching committed cutover block node before height %d", + fc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1, + ) +} + // Finds the epoch entry for the block and returns the epoch number. func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { for _, epochEntry := range epochEntries { From 48c0677ae8ab04c0a3882f436a318da58494c2b0 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 24 Jan 2024 11:48:38 -0500 Subject: [PATCH 381/762] Add HandshakeController (#861) PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation Simplify RemoteNodeManager More RemoteNodeManager updates Nits --- integration_testing/blocksync_test.go | 10 +- integration_testing/connection_bridge.go | 64 ++- .../connection_controller_test.go | 451 ++++++++++++++++++ .../connection_controller_utils_test.go | 206 ++++++++ integration_testing/hypersync_test.go | 10 + integration_testing/migrations_test.go | 1 + integration_testing/mining_test.go | 4 +- integration_testing/tools.go | 40 +- integration_testing/txindex_test.go | 1 + lib/connection_controller.go | 329 +++++++++++++ lib/connection_manager.go | 123 +---- lib/constants.go | 8 + ..._controller.go => handshake_controller.go} | 7 + lib/network.go | 11 +- lib/network_test.go | 12 +- lib/peer.go | 5 +- lib/remote_node.go | 45 +- lib/remote_node_manager.go | 59 ++- lib/server.go | 50 +- 19 files changed, 1252 insertions(+), 184 deletions(-) create mode 100644 integration_testing/connection_controller_test.go create mode 100644 integration_testing/connection_controller_utils_test.go create mode 100644 lib/connection_controller.go rename lib/{pos_handshake_controller.go => handshake_controller.go} (96%) diff --git a/integration_testing/blocksync_test.go b/integration_testing/blocksync_test.go index 8be96d735..be87aae3a 100644 --- a/integration_testing/blocksync_test.go +++ b/integration_testing/blocksync_test.go @@ -40,9 +40,8 @@ func TestSimpleBlockSync(t *testing.T) { // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) + // TODO: Dial an outbound connection from node2 to node1 + // Fix other integration tests. // wait for node2 to sync blocks. waitForNodeToFullySync(node2) @@ -99,6 +98,7 @@ func TestSimpleSyncRestart(t *testing.T) { compareNodesByDB(t, node1, node2, 0) fmt.Println("Random restart successful! Random height was", randomHeight) fmt.Println("Databases match!") + bridge.Disconnect() node1.Stop() node2.Stop() } @@ -153,7 +153,7 @@ func TestSimpleSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { randomHeight := randomUint32Between(t, 10, config2.MaxSyncBlockHeight) fmt.Println("Random height for a restart (re-use if test failed):", randomHeight) - disconnectAtBlockHeight(t, node2, bridge12, randomHeight) + disconnectAtBlockHeight(node2, bridge12, randomHeight) // bridge the nodes together. bridge23 := NewConnectionBridge(node2, node3) @@ -167,6 +167,8 @@ func TestSimpleSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { compareNodesByDB(t, node3, node2, 0) fmt.Println("Random restart successful! Random height was", randomHeight) fmt.Println("Databases match!") + bridge12.Disconnect() + bridge23.Disconnect() node1.Stop() node2.Stop() node3.Stop() diff --git a/integration_testing/connection_bridge.go b/integration_testing/connection_bridge.go index 4c3b28dde..f6a9897ed 100644 --- a/integration_testing/connection_bridge.go +++ b/integration_testing/connection_bridge.go @@ -13,6 +13,7 @@ import ( "time" ) +// TODO: DEPRECATE // ConnectionBridge is a bidirectional communication channel between two nodes. A bridge creates a pair of inbound and // outbound peers for each of the nodes to handle communication. In total, it creates four peers. // @@ -111,13 +112,14 @@ func (bridge *ConnectionBridge) createInboundConnection(node *cmd.Node) *lib.Pee } // This channel is redundant in our setting. - messagesFromPeer := make(chan *lib.ServerMessage) + messagesFromPeer := make(chan *lib.ServerMessage, 100) + newPeerChan := make(chan *lib.Peer, 100) + donePeerChan := make(chan *lib.Peer, 100) // Because it is an inbound Peer of the node, it is simultaneously a "fake" outbound Peer of the bridge. // Hence, we will mark the _isOutbound parameter as "true" in NewPeer. - peer := lib.NewPeer(conn, true, netAddress, true, - 10000, 0, &lib.DeSoMainnetParams, - messagesFromPeer, nil, nil, lib.NodeSyncTypeAny) - peer.ID = uint64(lib.RandInt64(math.MaxInt64)) + peer := lib.NewPeer(uint64(lib.RandInt64(math.MaxInt64)), conn, true, + netAddress, true, 10000, 0, &lib.DeSoMainnetParams, + messagesFromPeer, nil, nil, lib.NodeSyncTypeAny, newPeerChan, donePeerChan) return peer } @@ -139,27 +141,28 @@ func (bridge *ConnectionBridge) createOutboundConnection(node *cmd.Node, otherNo fmt.Println("createOutboundConnection: Got a connection from remote:", conn.RemoteAddr().String(), "on listener:", ll.Addr().String()) - na, err := lib.IPToNetAddr(conn.RemoteAddr().String(), otherNode.Server.GetConnectionManager().AddrMgr, - otherNode.Params) - messagesFromPeer := make(chan *lib.ServerMessage) - peer := lib.NewPeer(conn, false, na, false, - 10000, 0, bridge.nodeB.Params, - messagesFromPeer, nil, nil, lib.NodeSyncTypeAny) - peer.ID = uint64(lib.RandInt64(math.MaxInt64)) + addrMgr := addrmgr.New("", net.LookupIP) + na, err := lib.IPToNetAddr(conn.RemoteAddr().String(), addrMgr, otherNode.Params) + messagesFromPeer := make(chan *lib.ServerMessage, 100) + newPeerChan := make(chan *lib.Peer, 100) + donePeerChan := make(chan *lib.Peer, 100) + peer := lib.NewPeer(uint64(lib.RandInt64(math.MaxInt64)), conn, + false, na, false, 10000, 0, bridge.nodeB.Params, + messagesFromPeer, nil, nil, lib.NodeSyncTypeAny, newPeerChan, donePeerChan) bridge.newPeerChan <- peer //} }(ll) // Make the provided node to make an outbound connection to our listener. - netAddress, _ := lib.IPToNetAddr(ll.Addr().String(), addrmgr.New("", net.LookupIP), &lib.DeSoMainnetParams) - fmt.Println("createOutboundConnection: IP:", netAddress.IP, "Port:", netAddress.Port) - go node.Server.GetConnectionManager().ConnectPeer(nil, netAddress) + addrMgr := addrmgr.New("", net.LookupIP) + addr, _ := lib.IPToNetAddr(ll.Addr().String(), addrMgr, node.Params) + go node.Server.GetConnectionManager().DialOutboundConnection(addr, uint64(lib.RandInt64(math.MaxInt64))) } // getVersionMessage simulates a version message that the provided node would have sent. func (bridge *ConnectionBridge) getVersionMessage(node *cmd.Node) *lib.MsgDeSoVersion { ver := lib.NewMessage(lib.MsgTypeVersion).(*lib.MsgDeSoVersion) - ver.Version = node.Params.ProtocolVersion + ver.Version = node.Params.ProtocolVersion.ToUint64() ver.TstampSecs = time.Now().Unix() ver.Nonce = uint64(lib.RandInt64(math.MaxInt64)) ver.UserAgent = node.Params.UserAgent @@ -172,12 +175,29 @@ func (bridge *ConnectionBridge) getVersionMessage(node *cmd.Node) *lib.MsgDeSoVe } if node.Server != nil { - ver.LatestBlockHeight = uint32(node.Server.GetBlockchain().BlockTip().Header.Height) + ver.LatestBlockHeight = node.Server.GetBlockchain().BlockTip().Header.Height } ver.MinFeeRateNanosPerKB = node.Config.MinFeerate return ver } +func ReadWithTimeout(readFunc func() error, readTimeout time.Duration) error { + errChan := make(chan error) + go func() { + errChan <- readFunc() + }() + select { + case err := <-errChan: + { + return err + } + case <-time.After(readTimeout): + { + return fmt.Errorf("ReadWithTimeout: Timed out reading message") + } + } +} + // startConnection starts the connection by performing version and verack exchange with // the provided connection, pretending to be the otherNode. func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode *cmd.Node) error { @@ -192,7 +212,7 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode } // Wait for a response to the version message. - if err := connection.ReadWithTimeout( + if err := ReadWithTimeout( func() error { msg, err := connection.ReadDeSoMessage() if err != nil { @@ -215,7 +235,7 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode // Now prepare the verack message. verackMsg := lib.NewMessage(lib.MsgTypeVerack) - verackMsg.(*lib.MsgDeSoVerack).Nonce = connection.VersionNonceReceived + verackMsg.(*lib.MsgDeSoVerack).NonceReceived = connection.VersionNonceReceived // And send it to the connection. if err := connection.WriteDeSoMessage(verackMsg); err != nil { @@ -223,7 +243,7 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode } // And finally wait for connection's response to the verack message. - if err := connection.ReadWithTimeout( + if err := ReadWithTimeout( func() error { msg, err := connection.ReadDeSoMessage() if err != nil { @@ -234,9 +254,9 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode return fmt.Errorf("message is not verack! Type: %v", msg.GetMsgType()) } verackMsg := msg.(*lib.MsgDeSoVerack) - if verackMsg.Nonce != connection.VersionNonceSent { + if verackMsg.NonceReceived != connection.VersionNonceSent { return fmt.Errorf("verack message nonce doesn't match (received: %v, sent: %v)", - verackMsg.Nonce, connection.VersionNonceSent) + verackMsg.NonceReceived, connection.VersionNonceSent) } return nil }, lib.DeSoMainnetParams.VersionNegotiationTimeout); err != nil { diff --git a/integration_testing/connection_controller_test.go b/integration_testing/connection_controller_test.go new file mode 100644 index 000000000..01fb01046 --- /dev/null +++ b/integration_testing/connection_controller_test.go @@ -0,0 +1,451 @@ +package integration_testing + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/lib" + "github.com/stretchr/testify/require" + "testing" +) + +func TestConnectionControllerNonValidator(t *testing.T) { + require := require.New(t) + + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1 = startNode(t, node1) + defer node1.Stop() + + // Make sure NonValidator Node1 can create an outbound connection to NonValidator Node2 + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node2 = startNode(t, node2) + + cc := node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + waitForNonValidatorOutboundConnection(t, node1, node2) + waitForNonValidatorInboundConnection(t, node2, node1) + + node2.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #1 passed | Successfully created outbound connection from NonValidator Node1 to NonValidator Node2") + + // Make sure NonValidator Node1 can create an outbound connection to validator Node3 + blsPriv3, err := bls.NewPrivateKey() + require.NoError(err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv3) + node3 = startNode(t, node3) + + cc = node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + waitForValidatorConnection(t, node1, node3) + waitForNonValidatorInboundConnection(t, node3, node1) + + node3.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #2 passed | Successfully created outbound connection from NonValidator Node1 to Validator Node3") + + // Make sure NonValidator Node1 can create a non-validator connection to validator Node4 + blsPriv4, err := bls.NewPrivateKey() + require.NoError(err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + node4 = startNode(t, node4) + defer node4.Stop() + + cc = node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + waitForValidatorConnection(t, node1, node4) + waitForNonValidatorInboundConnection(t, node4, node1) + t.Logf("Test #3 passed | Successfully created outbound connection from NonValidator Node1 to Validator Node4") +} + +func TestConnectionControllerValidator(t *testing.T) { + require := require.New(t) + + blsPriv1, err := bls.NewPrivateKey() + require.NoError(err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsPriv1) + node1 = startNode(t, node1) + defer node1.Stop() + + // Make sure Validator Node1 can create an outbound connection to Validator Node2 + blsPriv2, err := bls.NewPrivateKey() + blsPub2 := blsPriv2.PublicKey() + require.NoError(err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) + node2 = startNode(t, node2) + + cc := node1.Server.GetConnectionController() + require.NoError(cc.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsPub2)) + waitForValidatorConnection(t, node1, node2) + waitForValidatorConnection(t, node2, node1) + + node2.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #1 passed | Successfully created outbound connection from Validator Node1 to Validator Node2") + + // Make sure Validator Node1 can create an outbound connection to NonValidator Node3 + node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node3 = startNode(t, node3) + + cc = node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + waitForNonValidatorOutboundConnection(t, node1, node3) + waitForValidatorConnection(t, node3, node1) + + node3.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #2 passed | Successfully created outbound connection from Validator Node1 to NonValidator Node3") + + // Make sure Validator Node1 can create an outbound non-validator connection to Validator Node4 + blsPriv4, err := bls.NewPrivateKey() + require.NoError(err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + node4 = startNode(t, node4) + defer node4.Stop() + + cc = node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + waitForValidatorConnection(t, node1, node4) + waitForValidatorConnection(t, node4, node1) + t.Logf("Test #3 passed | Successfully created non-validator outbound connection from Validator Node1 to Validator Node4") +} + +func TestConnectionControllerHandshakeDataErrors(t *testing.T) { + require := require.New(t) + + blsPriv1, err := bls.NewPrivateKey() + require.NoError(err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsPriv1) + + // This node should have ProtocolVersion2, but it has ProtocolVersion1 as we want it to disconnect. + blsPriv2, err := bls.NewPrivateKey() + require.NoError(err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) + node2.Params.ProtocolVersion = lib.ProtocolVersion1 + + node1 = startNode(t, node1) + node2 = startNode(t, node2) + defer node1.Stop() + defer node2.Stop() + + cc := node2.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node2) + t.Logf("Test #1 passed | Successfuly disconnected node with SFValidator flag and ProtocolVersion1 mismatch") + + // This node shouldn't have ProtocolVersion3, which is beyond latest ProtocolVersion2, meaning nodes should disconnect. + blsPriv3, err := bls.NewPrivateKey() + require.NoError(err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv3) + node3.Params.ProtocolVersion = lib.ProtocolVersionType(3) + node3 = startNode(t, node3) + defer node3.Stop() + + cc = node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node3) + t.Logf("Test #2 passed | Successfuly disconnected node with ProtocolVersion3") + + // This node shouldn't have ProtocolVersion0, which is outdated. + node4 := spawnNonValidatorNodeProtocol2(t, 18003, "node4") + node4.Params.ProtocolVersion = lib.ProtocolVersion0 + node4 = startNode(t, node4) + defer node4.Stop() + + cc = node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node4) + t.Logf("Test #3 passed | Successfuly disconnected node with ProtocolVersion0") + + // This node will have a different public key than the one it's supposed to have. + blsPriv5, err := bls.NewPrivateKey() + require.NoError(err) + blsPriv5Wrong, err := bls.NewPrivateKey() + require.NoError(err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsPriv5) + node5 = startNode(t, node5) + defer node5.Stop() + + cc = node1.Server.GetConnectionController() + require.NoError(cc.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsPriv5Wrong.PublicKey())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node5) + t.Logf("Test #4 passed | Successfuly disconnected node with public key mismatch") + + // This node will be missing SFPosValidator flag while being connected as a validator. + blsPriv6, err := bls.NewPrivateKey() + require.NoError(err) + node6 := spawnNonValidatorNodeProtocol2(t, 18005, "node6") + node6 = startNode(t, node6) + defer node6.Stop() + + cc = node1.Server.GetConnectionController() + require.NoError(cc.CreateValidatorConnection(node6.Listeners[0].Addr().String(), blsPriv6.PublicKey())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node6) + t.Logf("Test #5 passed | Successfuly disconnected supposed validator node with missing SFPosValidator flag") + + // This node will have ProtocolVersion1 and be connected as an outbound non-validator node. + node7 := spawnNonValidatorNodeProtocol2(t, 18006, "node7") + node7.Params.ProtocolVersion = lib.ProtocolVersion1 + node7 = startNode(t, node7) + defer node7.Stop() + + cc = node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node7.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node7) + t.Logf("Test #6 passed | Successfuly disconnected outbound non-validator node with ProtocolVersion1") +} + +func TestConnectionControllerHandshakeTimeouts(t *testing.T) { + require := require.New(t) + + // Set version negotiation timeout to 0 to make sure that the node will be disconnected + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.VersionNegotiationTimeout = 0 + node1 = startNode(t, node1) + defer node1.Stop() + + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node2 = startNode(t, node2) + defer node2.Stop() + + cc := node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node2) + t.Logf("Test #1 passed | Successfuly disconnected node after version negotiation timeout") + + // Now let's try timing out verack exchange + node1.Params.VersionNegotiationTimeout = lib.DeSoTestnetParams.VersionNegotiationTimeout + node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node3.Params.VerackNegotiationTimeout = 0 + node3 = startNode(t, node3) + defer node3.Stop() + + cc = node3.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node3) + t.Logf("Test #2 passed | Successfuly disconnected node after verack exchange timeout") + + // Now let's try timing out handshake between two validators node4 and node5 + blsPriv4, err := bls.NewPrivateKey() + require.NoError(err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + node4.Params.HandshakeTimeoutMicroSeconds = 0 + node4 = startNode(t, node4) + defer node4.Stop() + + blsPriv5, err := bls.NewPrivateKey() + require.NoError(err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsPriv5) + node5 = startNode(t, node5) + defer node5.Stop() + + cc = node4.Server.GetConnectionController() + require.NoError(cc.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsPriv5.PublicKey())) + waitForEmptyRemoteNodeIndexer(t, node4) + waitForEmptyRemoteNodeIndexer(t, node5) + t.Logf("Test #3 passed | Successfuly disconnected validator node after handshake timeout") +} + +func TestConnectionControllerValidatorDuplication(t *testing.T) { + require := require.New(t) + + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1 = startNode(t, node1) + defer node1.Stop() + + // Create a validator Node2 + blsPriv2, err := bls.NewPrivateKey() + require.NoError(err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) + node2 = startNode(t, node2) + + // Create a duplicate validator Node3 + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv2) + node3 = startNode(t, node3) + + // Create validator connection from Node1 to Node2 and from Node1 to Node3 + cc := node1.Server.GetConnectionController() + require.NoError(cc.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsPriv2.PublicKey())) + // This should fail out right because Node3 has a duplicate public key. + require.Error(cc.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsPriv2.PublicKey())) + waitForValidatorConnection(t, node1, node2) + waitForNonValidatorInboundConnection(t, node2, node1) + + // Now create an outbound connection from Node3 to Node1, which should pass handshake, but then fail because + // Node1 already has a validator connection to Node2 with the same public key. + cc3 := node3.Server.GetConnectionController() + require.NoError(cc3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node3) + waitForCountRemoteNodeIndexer(t, node1, 1, 1, 0, 0) + t.Logf("Test #1 passed | Successfuly rejected duplicate validator connection with inbound/outbound validators") + + node3.Stop() + node2.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + + // Create two more validators Node4, Node5 with duplicate public keys + blsPriv4, err := bls.NewPrivateKey() + require.NoError(err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + node4 = startNode(t, node4) + defer node4.Stop() + + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsPriv4) + node5 = startNode(t, node5) + defer node5.Stop() + + // Create validator connections from Node4 to Node1 and from Node5 to Node1 + cc4 := node4.Server.GetConnectionController() + require.NoError(cc4.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForValidatorConnection(t, node1, node4) + waitForNonValidatorOutboundConnection(t, node4, node1) + cc5 := node5.Server.GetConnectionController() + require.NoError(cc5.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node5) + waitForCountRemoteNodeIndexer(t, node1, 1, 1, 0, 0) + t.Logf("Test #2 passed | Successfuly rejected duplicate validator connection with multiple outbound validators") +} + +func TestConnectionControllerProtocolDifference(t *testing.T) { + require := require.New(t) + + // Create a ProtocolVersion1 Node1 + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.ProtocolVersion = lib.ProtocolVersion1 + node1 = startNode(t, node1) + defer node1.Stop() + + // Create a ProtocolVersion2 NonValidator Node2 + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node2 = startNode(t, node2) + + // Create non-validator connection from Node1 to Node2 + cc := node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + waitForNonValidatorOutboundConnection(t, node1, node2) + waitForNonValidatorInboundConnection(t, node2, node1) + t.Logf("Test #1 passed | Successfuly connected to a ProtocolVersion1 node with a ProtocolVersion2 non-validator") + + // Create a ProtocolVersion2 Validator Node3 + blsPriv3, err := bls.NewPrivateKey() + require.NoError(err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv3) + node3 = startNode(t, node3) + + // Create validator connection from Node1 to Node3 + require.NoError(cc.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsPriv3.PublicKey())) + waitForValidatorConnection(t, node1, node3) + waitForNonValidatorInboundConnection(t, node3, node1) + t.Logf("Test #2 passed | Successfuly connected to a ProtocolVersion1 node with a ProtocolVersion2 validator") + + node2.Stop() + node3.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + + // Create a ProtocolVersion2 validator Node4 + blsPriv4, err := bls.NewPrivateKey() + require.NoError(err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + node4 = startNode(t, node4) + defer node4.Stop() + + // Attempt to create non-validator connection from Node4 to Node1 + cc = node4.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node4) + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #3 passed | Successfuly rejected outbound connection from ProtocolVersion2 node to ProtcolVersion1 node") + + // Attempt to create validator connection from Node4 to Node1 + require.NoError(cc.CreateValidatorConnection(node1.Listeners[0].Addr().String(), blsPriv4.PublicKey())) + waitForEmptyRemoteNodeIndexer(t, node4) + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #4 passed | Successfuly rejected validator connection from ProtocolVersion2 node to ProtcolVersion1 node") + + // Create a ProtocolVersion2 non-validator Node5 + node5 := spawnNonValidatorNodeProtocol2(t, 18004, "node5") + node5 = startNode(t, node5) + defer node5.Stop() + + // Attempt to create non-validator connection from Node5 to Node1 + cc = node5.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node5) + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #5 passed | Successfuly rejected outbound connection from ProtocolVersion2 node to ProtcolVersion1 node") +} + +func TestConnectionControllerPersistentConnection(t *testing.T) { + require := require.New(t) + + // Create a NonValidator Node1 + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1 = startNode(t, node1) + + // Create a Validator Node2 + blsPriv2, err := bls.NewPrivateKey() + require.NoError(err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) + node2 = startNode(t, node2) + + // Create a persistent connection from Node1 to Node2 + cc := node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorPersistentOutboundConnection(node2.Listeners[0].Addr().String())) + waitForValidatorConnection(t, node1, node2) + waitForNonValidatorInboundConnection(t, node2, node1) + node2.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #1 passed | Successfuly created persistent connection from non-validator Node1 to validator Node2") + + // Create a Non-validator Node3 + node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node3 = startNode(t, node3) + + // Create a persistent connection from Node1 to Node3 + require.NoError(cc.CreateNonValidatorPersistentOutboundConnection(node3.Listeners[0].Addr().String())) + waitForNonValidatorOutboundConnection(t, node1, node3) + waitForNonValidatorInboundConnection(t, node3, node1) + node3.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + node1.Stop() + t.Logf("Test #2 passed | Successfuly created persistent connection from non-validator Node1 to non-validator Node3") + + // Create a Validator Node4 + blsPriv4, err := bls.NewPrivateKey() + require.NoError(err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + node4 = startNode(t, node4) + defer node4.Stop() + + // Create a non-validator Node5 + node5 := spawnNonValidatorNodeProtocol2(t, 18004, "node5") + node5 = startNode(t, node5) + + // Create a persistent connection from Node4 to Node5 + cc = node4.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorPersistentOutboundConnection(node5.Listeners[0].Addr().String())) + waitForNonValidatorOutboundConnection(t, node4, node5) + waitForValidatorConnection(t, node5, node4) + node5.Stop() + waitForEmptyRemoteNodeIndexer(t, node4) + t.Logf("Test #3 passed | Successfuly created persistent connection from validator Node4 to non-validator Node5") + + // Create a Validator Node6 + blsPriv6, err := bls.NewPrivateKey() + require.NoError(err) + node6 := spawnValidatorNodeProtocol2(t, 18005, "node6", blsPriv6) + node6 = startNode(t, node6) + defer node6.Stop() + + // Create a persistent connection from Node4 to Node6 + require.NoError(cc.CreateNonValidatorPersistentOutboundConnection(node6.Listeners[0].Addr().String())) + waitForValidatorConnection(t, node4, node6) + waitForValidatorConnection(t, node6, node4) + t.Logf("Test #4 passed | Successfuly created persistent connection from validator Node4 to validator Node6") +} diff --git a/integration_testing/connection_controller_utils_test.go b/integration_testing/connection_controller_utils_test.go new file mode 100644 index 000000000..4d5594634 --- /dev/null +++ b/integration_testing/connection_controller_utils_test.go @@ -0,0 +1,206 @@ +package integration_testing + +import ( + "fmt" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/cmd" + "github.com/deso-protocol/core/lib" + "os" + "testing" +) + +func waitForValidatorConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { + userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() + n1ValidatedN2 := func() bool { + if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, true, false, false) { + return false + } + rnFromN2 := getRemoteNodeWithUserAgent(node1, userAgentN2) + if rnFromN2 == nil { + return false + } + if !rnFromN2.IsHandshakeCompleted() { + return false + } + return true + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to outbound non-validator Node (%s)", userAgentN1, userAgentN2), n1ValidatedN2) +} + +func waitForNonValidatorOutboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { + userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() + n1ValidatedN2 := func() bool { + if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, false, true, false) { + return false + } + rnFromN2 := getRemoteNodeWithUserAgent(node1, userAgentN2) + if rnFromN2 == nil { + return false + } + if !rnFromN2.IsHandshakeCompleted() { + return false + } + if rnFromN2.GetValidatorPublicKey() != nil { + return false + } + return true + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to outbound non-validator Node (%s)", userAgentN1, userAgentN2), n1ValidatedN2) +} + +func waitForNonValidatorInboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { + userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() + n1ValidatedN2 := func() bool { + if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, false, false, true) { + return false + } + rnFromN2 := getRemoteNodeWithUserAgent(node1, userAgentN2) + if rnFromN2 == nil { + return false + } + if !rnFromN2.IsHandshakeCompleted() { + return false + } + if rnFromN2.GetValidatorPublicKey() != nil { + return false + } + return true + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to inbound non-validator Node (%s)", userAgentN1, userAgentN2), n1ValidatedN2) +} + +func waitForEmptyRemoteNodeIndexer(t *testing.T, node1 *cmd.Node) { + userAgentN1 := node1.Params.UserAgent + rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() + n1ValidatedN2 := func() bool { + if true != checkRemoteNodeIndexerEmpty(rnManagerN1) { + return false + } + return true + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to disconnect from all RemoteNodes", userAgentN1), n1ValidatedN2) +} + +func waitForCountRemoteNodeIndexer(t *testing.T, node1 *cmd.Node, allCount int, validatorCount int, + nonValidatorOutboundCount int, nonValidatorInboundCount int) { + + userAgentN1 := node1.Params.UserAgent + rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() + n1ValidatedN2 := func() bool { + if true != checkRemoteNodeIndexerCount(rnManagerN1, allCount, validatorCount, nonValidatorOutboundCount, nonValidatorInboundCount) { + return false + } + return true + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have appropriate RemoteNodes counts", userAgentN1), n1ValidatedN2) +} + +func checkRemoteNodeIndexerUserAgent(manager *lib.RemoteNodeManager, userAgent string, validator bool, + nonValidatorOutbound bool, nonValidatorInbound bool) bool { + + if true != checkUserAgentInRemoteNodeList(userAgent, manager.GetAllRemoteNodes().GetAll()) { + return false + } + if validator != checkUserAgentInRemoteNodeList(userAgent, manager.GetValidatorIndex().GetAll()) { + return false + } + if nonValidatorOutbound != checkUserAgentInRemoteNodeList(userAgent, manager.GetNonValidatorOutboundIndex().GetAll()) { + return false + } + if nonValidatorInbound != checkUserAgentInRemoteNodeList(userAgent, manager.GetNonValidatorInboundIndex().GetAll()) { + return false + } + + return true +} + +func checkRemoteNodeIndexerCount(manager *lib.RemoteNodeManager, allCount int, validatorCount int, + nonValidatorOutboundCount int, nonValidatorInboundCount int) bool { + + if allCount != manager.GetAllRemoteNodes().Count() { + return false + } + if validatorCount != manager.GetValidatorIndex().Count() { + return false + } + if nonValidatorOutboundCount != manager.GetNonValidatorOutboundIndex().Count() { + return false + } + if nonValidatorInboundCount != manager.GetNonValidatorInboundIndex().Count() { + return false + } + + return true +} + +func checkRemoteNodeIndexerEmpty(manager *lib.RemoteNodeManager) bool { + if manager.GetAllRemoteNodes().Count() != 0 { + return false + } + if manager.GetValidatorIndex().Count() != 0 { + return false + } + if manager.GetNonValidatorOutboundIndex().Count() != 0 { + return false + } + if manager.GetNonValidatorInboundIndex().Count() != 0 { + return false + } + return true +} + +func checkUserAgentInRemoteNodeList(userAgent string, rnList []*lib.RemoteNode) bool { + for _, rn := range rnList { + if rn == nil { + continue + } + if rn.GetUserAgent() == userAgent { + return true + } + } + return false +} + +func getRemoteNodeWithUserAgent(node *cmd.Node, userAgent string) *lib.RemoteNode { + rnManager := node.Server.GetConnectionController().GetRemoteNodeManager() + rnList := rnManager.GetAllRemoteNodes().GetAll() + for _, rn := range rnList { + if rn.GetUserAgent() == userAgent { + return rn + } + } + return nil +} + +func spawnNonValidatorNodeProtocol2(t *testing.T, port uint32, id string) *cmd.Node { + dbDir := getDirectory(t) + t.Cleanup(func() { + os.RemoveAll(dbDir) + }) + config := generateConfig(t, port, dbDir, 10) + config.SyncType = lib.NodeSyncTypeBlockSync + node := cmd.NewNode(config) + node.Params.UserAgent = id + node.Params.ProtocolVersion = lib.ProtocolVersion2 + return node +} + +func spawnValidatorNodeProtocol2(t *testing.T, port uint32, id string, blsPriv *bls.PrivateKey) *cmd.Node { + dbDir := getDirectory(t) + t.Cleanup(func() { + os.RemoveAll(dbDir) + }) + config := generateConfig(t, port, dbDir, 10) + config.SyncType = lib.NodeSyncTypeBlockSync + config.PosValidatorSeed = blsPriv.ToString() + node := cmd.NewNode(config) + node.Params.UserAgent = id + node.Params.ProtocolVersion = lib.ProtocolVersion2 + return node +} diff --git a/integration_testing/hypersync_test.go b/integration_testing/hypersync_test.go index aad90ee0e..bc4c8a7c0 100644 --- a/integration_testing/hypersync_test.go +++ b/integration_testing/hypersync_test.go @@ -53,6 +53,7 @@ func TestSimpleHyperSync(t *testing.T) { //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) fmt.Println("Databases match!") + bridge.Disconnect() node1.Stop() node2.Stop() } @@ -122,6 +123,8 @@ func TestHyperSyncFromHyperSyncedNode(t *testing.T) { compareNodesByChecksum(t, node2, node3) fmt.Println("Databases match!") + bridge12.Disconnect() + bridge23.Disconnect() node1.Stop() node2.Stop() node3.Stop() @@ -178,6 +181,7 @@ func TestSimpleHyperSyncRestart(t *testing.T) { compareNodesByChecksum(t, node1, node2) fmt.Println("Random restart successful! Random sync prefix was", syncPrefix) fmt.Println("Databases match!") + bridge.Disconnect() node1.Stop() node2.Stop() } @@ -255,6 +259,8 @@ func TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { compareNodesByChecksum(t, node1, node2) fmt.Println("Random restart successful! Random sync prefix was", syncPrefix) fmt.Println("Databases match!") + bridge12.Disconnect() + bridge23.Disconnect() node1.Stop() node2.Stop() node3.Stop() @@ -349,6 +355,7 @@ func TestArchivalMode(t *testing.T) { //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) fmt.Println("Databases match!") + bridge.Disconnect() node1.Stop() node2.Stop() } @@ -406,6 +413,9 @@ func TestBlockSyncFromArchivalModeHyperSync(t *testing.T) { //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) fmt.Println("Databases match!") + bridge12.Disconnect() + bridge23.Disconnect() node1.Stop() node2.Stop() + node3.Stop() } diff --git a/integration_testing/migrations_test.go b/integration_testing/migrations_test.go index b0a692b52..1419d483e 100644 --- a/integration_testing/migrations_test.go +++ b/integration_testing/migrations_test.go @@ -59,6 +59,7 @@ func TestEncoderMigrations(t *testing.T) { compareNodesByChecksum(t, node1, node2) fmt.Println("Databases match!") + bridge.Disconnect() node1.Stop() node2.Stop() } diff --git a/integration_testing/mining_test.go b/integration_testing/mining_test.go index 49a23333c..88de5e097 100644 --- a/integration_testing/mining_test.go +++ b/integration_testing/mining_test.go @@ -29,9 +29,7 @@ func TestRegtestMiner(t *testing.T) { // wait for node1 to sync blocks mineHeight := uint32(40) - listener := make(chan bool) - listenForBlockHeight(t, node1, mineHeight, listener) - <-listener + <-listenForBlockHeight(node1, mineHeight) node1.Stop() } diff --git a/integration_testing/tools.go b/integration_testing/tools.go index c73b82873..2f97e942d 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -150,7 +150,8 @@ func compareNodesByChecksum(t *testing.T, nodeA *cmd.Node, nodeB *cmd.Node) { // compareNodesByState will look through all state records in nodeA and nodeB databases and will compare them. // The nodes pass this comparison iff they have identical states. func compareNodesByState(t *testing.T, nodeA *cmd.Node, nodeB *cmd.Node, verbose int) { - compareNodesByStateWithPrefixList(t, nodeA.ChainDB, nodeB.ChainDB, lib.StatePrefixes.StatePrefixesList, verbose) + compareNodesByStateWithPrefixList(t, nodeA.Server.GetBlockchain().DB(), nodeB.Server.GetBlockchain().DB(), + lib.StatePrefixes.StatePrefixesList, verbose) } // compareNodesByDB will look through all records in nodeA and nodeB databases and will compare them. @@ -164,7 +165,8 @@ func compareNodesByDB(t *testing.T, nodeA *cmd.Node, nodeB *cmd.Node, verbose in } prefixList = append(prefixList, []byte{prefix}) } - compareNodesByStateWithPrefixList(t, nodeA.ChainDB, nodeB.ChainDB, prefixList, verbose) + compareNodesByStateWithPrefixList(t, nodeA.Server.GetBlockchain().DB(), nodeB.Server.GetBlockchain().DB(), + prefixList, verbose) } // compareNodesByDB will look through all records in nodeA and nodeB txindex databases and will compare them. @@ -386,25 +388,25 @@ func restartNode(t *testing.T, node *cmd.Node) *cmd.Node { } // listenForBlockHeight busy-waits until the node's block tip reaches provided height. -func listenForBlockHeight(t *testing.T, node *cmd.Node, height uint32, signal chan<- bool) { +func listenForBlockHeight(node *cmd.Node, height uint32) (_listener chan bool) { + listener := make(chan bool) ticker := time.NewTicker(1 * time.Millisecond) go func() { for { <-ticker.C if node.Server.GetBlockchain().BlockTip().Height >= height { - signal <- true + listener <- true break } } }() + return listener } // disconnectAtBlockHeight busy-waits until the node's block tip reaches provided height, and then disconnects // from the provided bridge. -func disconnectAtBlockHeight(t *testing.T, syncingNode *cmd.Node, bridge *ConnectionBridge, height uint32) { - listener := make(chan bool) - listenForBlockHeight(t, syncingNode, height, listener) - <-listener +func disconnectAtBlockHeight(syncingNode *cmd.Node, bridge *ConnectionBridge, height uint32) { + <-listenForBlockHeight(syncingNode, height) bridge.Disconnect() } @@ -414,7 +416,7 @@ func restartAtHeightAndReconnectNode(t *testing.T, node *cmd.Node, source *cmd.N height uint32) (_node *cmd.Node, _bridge *ConnectionBridge) { require := require.New(t) - disconnectAtBlockHeight(t, node, currentBridge, height) + disconnectAtBlockHeight(node, currentBridge, height) newNode := restartNode(t, node) // Wait after the restart. time.Sleep(1 * time.Second) @@ -475,3 +477,23 @@ func randomUint32Between(t *testing.T, min, max uint32) uint32 { randomHeight := uint32(randomNumber) % (max - min) return randomHeight + min } + +func waitForCondition(t *testing.T, id string, condition func() bool) { + signalChan := make(chan struct{}) + go func() { + for { + if condition() { + signalChan <- struct{}{} + return + } + time.Sleep(1 * time.Millisecond) + } + }() + + select { + case <-signalChan: + return + case <-time.After(5 * time.Second): + t.Fatalf("Condition timed out | %s", id) + } +} diff --git a/integration_testing/txindex_test.go b/integration_testing/txindex_test.go index aa13fd265..dfd398557 100644 --- a/integration_testing/txindex_test.go +++ b/integration_testing/txindex_test.go @@ -57,6 +57,7 @@ func TestSimpleTxIndex(t *testing.T) { compareNodesByDB(t, node1, node2, 0) compareNodesByTxIndex(t, node1, node2, 0) fmt.Println("Databases match!") + bridge.Disconnect() node1.Stop() node2.Stop() } diff --git a/lib/connection_controller.go b/lib/connection_controller.go new file mode 100644 index 000000000..fef9fa887 --- /dev/null +++ b/lib/connection_controller.go @@ -0,0 +1,329 @@ +package lib + +import ( + "fmt" + "github.com/btcsuite/btcd/addrmgr" + "github.com/btcsuite/btcd/wire" + "github.com/deso-protocol/core/bls" + "github.com/golang/glog" + "github.com/pkg/errors" + "net" + "strconv" +) + +// ConnectionController is a structure that oversees all connections to remote nodes. It is responsible for kicking off +// the initial connections a node makes to the network. It is also responsible for creating RemoteNodes from all +// successful outbound and inbound connections. The ConnectionController also ensures that the node is connected to +// the active validators, once the node reaches Proof of Stake. +// TODO: Document more in later PRs +type ConnectionController struct { + // The parameters we are initialized with. + params *DeSoParams + + cmgr *ConnectionManager + blsKeystore *BLSKeystore + + handshake *HandshakeController + + rnManager *RemoteNodeManager + + // The address manager keeps track of peer addresses we're aware of. When + // we need to connect to a new outbound peer, it chooses one of the addresses + // it's aware of at random and provides it to us. + AddrMgr *addrmgr.AddrManager + + // When --connectips is set, we don't connect to anything from the addrmgr. + connectIps []string + + // The target number of non-validator outbound remote nodes we want to have. We will disconnect remote nodes once + // we've exceeded this number of outbound connections. + targetNonValidatorOutboundRemoteNodes uint32 + // The target number of non-validator inbound remote nodes we want to have. We will disconnect remote nodes once + // we've exceeded this number of inbound connections. + targetNonValidatorInboundRemoteNodes uint32 + // When true, only one connection per IP is allowed. Prevents eclipse attacks + // among other things. + limitOneInboundRemoteNodePerIP bool +} + +func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, handshakeController *HandshakeController, + rnManager *RemoteNodeManager, blsKeystore *BLSKeystore, addrMgr *addrmgr.AddrManager, targetNonValidatorOutboundRemoteNodes uint32, + targetNonValidatorInboundRemoteNodes uint32, limitOneInboundConnectionPerIP bool) *ConnectionController { + + return &ConnectionController{ + params: params, + cmgr: cmgr, + blsKeystore: blsKeystore, + handshake: handshakeController, + rnManager: rnManager, + AddrMgr: addrMgr, + targetNonValidatorOutboundRemoteNodes: targetNonValidatorOutboundRemoteNodes, + targetNonValidatorInboundRemoteNodes: targetNonValidatorInboundRemoteNodes, + limitOneInboundRemoteNodePerIP: limitOneInboundConnectionPerIP, + } +} + +func (cc *ConnectionController) GetRemoteNodeManager() *RemoteNodeManager { + return cc.rnManager +} + +// ########################### +// ## Handlers (Peer, DeSoMessage) +// ########################### + +func (cc *ConnectionController) _handleDonePeerMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeDisconnectedPeer { + return + } + + cc.rnManager.DisconnectById(NewRemoteNodeId(origin.ID)) +} + +func (cc *ConnectionController) _handleAddrMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeAddr { + return + } + + // TODO +} + +func (cc *ConnectionController) _handleGetAddrMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeGetAddr { + return + } + + // TODO +} + +// _handleNewConnectionMessage is called when a new outbound or inbound connection is established. It is responsible +// for creating a RemoteNode from the connection and initiating the handshake. The incoming DeSoMessage is a control message. +func (cc *ConnectionController) _handleNewConnectionMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeNewConnection { + return + } + + msg, ok := desoMsg.(*MsgDeSoNewConnection) + if !ok { + return + } + + var remoteNode *RemoteNode + var err error + switch msg.Connection.GetConnectionType() { + case ConnectionTypeInbound: + remoteNode, err = cc.processInboundConnection(msg.Connection) + if err != nil { + glog.Errorf("ConnectionController.handleNewConnectionMessage: Problem handling inbound connection: %v", err) + msg.Connection.Close() + return + } + case ConnectionTypeOutbound: + remoteNode, err = cc.processOutboundConnection(msg.Connection) + if err != nil { + glog.Errorf("ConnectionController.handleNewConnectionMessage: Problem handling outbound connection: %v", err) + cc.cleanupFailedOutboundConnection(msg.Connection) + return + } + } + + // If we made it here, we have a valid remote node. We will now initiate the handshake. + cc.handshake.InitiateHandshake(remoteNode) +} + +func (cc *ConnectionController) cleanupFailedOutboundConnection(connection Connection) { + oc, ok := connection.(*outboundConnection) + if !ok { + return + } + + id := NewRemoteNodeId(oc.attemptId) + rn := cc.rnManager.GetRemoteNodeById(id) + if rn != nil { + cc.rnManager.Disconnect(rn) + } + cc.cmgr.RemoveAttemptedOutboundAddrs(oc.address) +} + +// ########################### +// ## Connections +// ########################### + +func (cc *ConnectionController) CreateValidatorConnection(ipStr string, publicKey *bls.PublicKey) error { + netAddr, err := cc.ConvertIPStringToNetAddress(ipStr) + if err != nil { + return err + } + return cc.rnManager.CreateValidatorConnection(netAddr, publicKey) +} + +func (cc *ConnectionController) CreateNonValidatorPersistentOutboundConnection(ipStr string) error { + netAddr, err := cc.ConvertIPStringToNetAddress(ipStr) + if err != nil { + return err + } + return cc.rnManager.CreateNonValidatorPersistentOutboundConnection(netAddr) +} + +func (cc *ConnectionController) CreateNonValidatorOutboundConnection(ipStr string) error { + netAddr, err := cc.ConvertIPStringToNetAddress(ipStr) + if err != nil { + return err + } + return cc.rnManager.CreateNonValidatorOutboundConnection(netAddr) +} + +func (cc *ConnectionController) SetTargetOutboundPeers(numPeers uint32) { + cc.targetNonValidatorOutboundRemoteNodes = numPeers +} + +func (cc *ConnectionController) enoughNonValidatorInboundConnections() bool { + return uint32(cc.rnManager.GetNonValidatorInboundIndex().Count()) >= cc.targetNonValidatorInboundRemoteNodes +} + +func (cc *ConnectionController) enoughNonValidatorOutboundConnections() bool { + return uint32(cc.rnManager.GetNonValidatorOutboundIndex().Count()) >= cc.targetNonValidatorOutboundRemoteNodes +} + +// processInboundConnection is called when a new inbound connection is established. At this point, the connection is not validated, +// nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. +// Once the RemoteNode is created, we will initiate handshake. +func (cc *ConnectionController) processInboundConnection(conn Connection) (*RemoteNode, error) { + var ic *inboundConnection + var ok bool + if ic, ok = conn.(*inboundConnection); !ok { + return nil, fmt.Errorf("ConnectionController.handleInboundConnection: Connection is not an inboundConnection") + } + + // Reject the peer if we have too many inbound connections already. + if cc.enoughNonValidatorInboundConnections() { + return nil, fmt.Errorf("ConnectionController.handleInboundConnection: Rejecting INBOUND peer (%s) due to max "+ + "inbound peers (%d) hit", ic.connection.RemoteAddr().String(), cc.targetNonValidatorInboundRemoteNodes) + } + + // If we want to limit inbound connections to one per IP address, check to make sure this address isn't already connected. + if cc.limitOneInboundRemoteNodePerIP && + cc.isDuplicateInboundIPAddress(ic.connection.RemoteAddr()) { + + return nil, fmt.Errorf("ConnectionController.handleInboundConnection: Rejecting INBOUND peer (%s) due to "+ + "already having an inbound connection from the same IP with limit_one_inbound_connection_per_ip set", + ic.connection.RemoteAddr().String()) + } + + na, err := cc.ConvertIPStringToNetAddress(ic.connection.RemoteAddr().String()) + if err != nil { + return nil, errors.Wrapf(err, "ConnectionController.handleInboundConnection: Problem calling "+ + "ConvertIPStringToNetAddress for addr: (%s)", ic.connection.RemoteAddr().String()) + } + + remoteNode, err := cc.rnManager.AttachInboundConnection(ic.connection, na) + if remoteNode == nil || err != nil { + return nil, errors.Wrapf(err, "ConnectionController.handleInboundConnection: Problem calling "+ + "AttachInboundConnection for addr: (%s)", ic.connection.RemoteAddr().String()) + } + + return remoteNode, nil +} + +// processOutboundConnection is called when a new outbound connection is established. At this point, the connection is not validated, +// nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. +// Once the RemoteNode is created, we will initiate handshake. +func (cc *ConnectionController) processOutboundConnection(conn Connection) (*RemoteNode, error) { + var oc *outboundConnection + var ok bool + if oc, ok = conn.(*outboundConnection); !ok { + return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Connection is not an outboundConnection") + } + + if oc.failed { + return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Failed to connect to peer (%s)", + oc.address.IP.String()) + } + + if !oc.isPersistent { + cc.AddrMgr.Connected(oc.address) + cc.AddrMgr.Good(oc.address) + } + + // if this is a non-persistent outbound peer, and we already have enough outbound peers, then don't bother adding this one. + if !oc.isPersistent && cc.enoughNonValidatorOutboundConnections() { + return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Connected to maximum number of outbound "+ + "peers (%d)", cc.targetNonValidatorOutboundRemoteNodes) + } + + // If this is a non-persistent outbound peer and the group key overlaps with another peer we're already connected to then + // abort mission. We only connect to one peer per IP group in order to prevent Sybil attacks. + if !oc.isPersistent && cc.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { + return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ + "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) + } + + na, err := cc.ConvertIPStringToNetAddress(oc.connection.RemoteAddr().String()) + if err != nil { + return nil, errors.Wrapf(err, "ConnectionController.handleOutboundConnection: Problem calling ipToNetAddr "+ + "for addr: (%s)", oc.connection.RemoteAddr().String()) + } + + remoteNode, err := cc.rnManager.AttachOutboundConnection(oc.connection, na, oc.attemptId, oc.isPersistent) + if remoteNode == nil || err != nil { + return nil, errors.Wrapf(err, "ConnectionController.handleOutboundConnection: Problem calling rnManager.AttachOutboundConnection "+ + "for addr: (%s)", oc.connection.RemoteAddr().String()) + } + return remoteNode, nil +} + +func (cc *ConnectionController) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAddress, error) { + netAddr, err := IPToNetAddr(ipStr, cc.AddrMgr, cc.params) + if err != nil { + return nil, errors.Wrapf(err, + "ConnectionController.ConvertIPStringToNetAddress: Problem parsing "+ + "ipString to wire.NetAddress") + } + if netAddr == nil { + return nil, fmt.Errorf("ConnectionController.ConvertIPStringToNetAddress: " + + "address was nil after parsing") + } + return netAddr, nil +} + +func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) (*wire.NetAddress, error) { + port := params.DefaultSocketPort + host, portstr, err := net.SplitHostPort(ipStr) + if err != nil { + // No port specified so leave port=default and set + // host to the ipStr. + host = ipStr + } else { + pp, err := strconv.ParseUint(portstr, 10, 16) + if err != nil { + return nil, errors.Wrapf(err, "IPToNetAddr: Can not parse port from %s for ip", ipStr) + } + port = uint16(pp) + } + netAddr, err := addrMgr.HostToNetAddress(host, port, 0) + if err != nil { + return nil, errors.Wrapf(err, "IPToNetAddr: Can not parse port from %s for ip", ipStr) + } + return netAddr, nil +} + +func (cc *ConnectionController) isDuplicateInboundIPAddress(addr net.Addr) bool { + netAddr, err := IPToNetAddr(addr.String(), cc.AddrMgr, cc.params) + if err != nil { + // Return true in case we have an error. We do this because it + // will result in the peer connection not being accepted, which + // is desired in this case. + glog.Warningf(errors.Wrapf(err, + "ConnectionController.isDuplicateInboundIPAddress: Problem parsing "+ + "net.Addr to wire.NetAddress so marking as redundant and not "+ + "making connection").Error()) + return true + } + if netAddr == nil { + glog.Warningf("ConnectionController.isDuplicateInboundIPAddress: " + + "address was nil after parsing so marking as redundant and not " + + "making connection") + return true + } + + return cc.cmgr.IsDuplicateInboundIPAddress(netAddr) +} diff --git a/lib/connection_manager.go b/lib/connection_manager.go index 38924bdf9..7c6f510ac 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -4,7 +4,6 @@ import ( "fmt" "math" "net" - "strconv" "sync" "sync/atomic" "time" @@ -14,7 +13,6 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/decred/dcrd/lru" "github.com/golang/glog" - "github.com/pkg/errors" ) // connection_manager.go contains most of the logic for creating and managing @@ -36,24 +34,10 @@ type ConnectionManager struct { // doesn't need a reference to the Server object. But for now we keep things lazy. srv *Server - // When --connectips is set, we don't connect to anything from the addrmgr. - connectIps []string - - // The address manager keeps track of peer addresses we're aware of. When - // we need to connect to a new outbound peer, it chooses one of the addresses - // it's aware of at random and provides it to us. - AddrMgr *addrmgr.AddrManager // The interfaces we listen on for new incoming connections. listeners []net.Listener // The parameters we are initialized with. params *DeSoParams - // The target number of outbound peers we want to have. - targetOutboundPeers uint32 - // The maximum number of inbound peers we allow. - maxInboundPeers uint32 - // When true, only one connection per IP is allowed. Prevents eclipse attacks - // among other things. - limitOneInboundConnectionPerIP bool // When --hypersync is set to true we will attempt fast block synchronization HyperSync bool @@ -136,10 +120,8 @@ type ConnectionManager struct { } func NewConnectionManager( - _params *DeSoParams, _addrMgr *addrmgr.AddrManager, _listeners []net.Listener, + _params *DeSoParams, _listeners []net.Listener, _connectIps []string, _timeSource chainlib.MedianTimeSource, - _targetOutboundPeers uint32, _maxInboundPeers uint32, - _limitOneInboundConnectionPerIP bool, _hyperSync bool, _syncType NodeSyncType, _stallTimeoutSeconds uint64, @@ -150,16 +132,13 @@ func NewConnectionManager( ValidateHyperSyncFlags(_hyperSync, _syncType) return &ConnectionManager{ - srv: _srv, - params: _params, - AddrMgr: _addrMgr, - listeners: _listeners, - connectIps: _connectIps, + srv: _srv, + params: _params, + listeners: _listeners, // We keep track of the last N nonces we've sent in order to detect // self connections. sentNonces: lru.NewCache(1000), timeSource: _timeSource, - //newestBlock: _newestBlock, // Initialize the peer data structures. @@ -176,15 +155,13 @@ func NewConnectionManager( newPeerChan: make(chan *Peer, 100), donePeerChan: make(chan *Peer, 100), outboundConnectionChan: make(chan *outboundConnection, 100), + inboundConnectionChan: make(chan *inboundConnection, 100), - targetOutboundPeers: _targetOutboundPeers, - maxInboundPeers: _maxInboundPeers, - limitOneInboundConnectionPerIP: _limitOneInboundConnectionPerIP, - HyperSync: _hyperSync, - SyncType: _syncType, - serverMessageQueue: _serverMessageQueue, - stallTimeoutSeconds: _stallTimeoutSeconds, - minFeeRateNanosPerKB: _minFeeRateNanosPerKB, + HyperSync: _hyperSync, + SyncType: _syncType, + serverMessageQueue: _serverMessageQueue, + stallTimeoutSeconds: _stallTimeoutSeconds, + minFeeRateNanosPerKB: _minFeeRateNanosPerKB, } } @@ -224,40 +201,6 @@ func (cmgr *ConnectionManager) subFromGroupKey(na *wire.NetAddress) { cmgr.mtxOutboundConnIPGroups.Unlock() } -func (cmgr *ConnectionManager) getRandomAddr() *wire.NetAddress { - for tries := 0; tries < 100; tries++ { - addr := cmgr.AddrMgr.GetAddress() - if addr == nil { - glog.V(2).Infof("ConnectionManager.getRandomAddr: addr from GetAddressWithExclusions was nil") - break - } - - // Lock the address map since multiple threads will be trying to read - // and modify it at the same time. - cmgr.mtxAddrsMaps.RLock() - ok := cmgr.connectedOutboundAddrs[addrmgr.NetAddressKey(addr.NetAddress())] - cmgr.mtxAddrsMaps.RUnlock() - if ok { - glog.V(2).Infof("ConnectionManager.getRandomAddr: Not choosing already connected address %v:%v", addr.NetAddress().IP, addr.NetAddress().Port) - continue - } - - // We can only have one outbound address per /16. This is similar to - // Bitcoin and we do it to prevent Sybil attacks. - if cmgr.IsFromRedundantOutboundIPAddress(addr.NetAddress()) { - glog.V(2).Infof("ConnectionManager.getRandomAddr: Not choosing address due to redundant group key %v:%v", addr.NetAddress().IP, addr.NetAddress().Port) - continue - } - - glog.V(2).Infof("ConnectionManager.getRandomAddr: Returning %v:%v at %d iterations", - addr.NetAddress().IP, addr.NetAddress().Port, tries) - return addr.NetAddress() - } - - glog.V(2).Infof("ConnectionManager.getRandomAddr: Returning nil") - return nil -} - func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddress, unit time.Duration) (_retryDuration time.Duration) { // No delay if we haven't tried yet or if the number of retries isn't positive. if retryCount <= 0 { @@ -276,42 +219,6 @@ func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddress, u return retryDelay } -func (cmgr *ConnectionManager) enoughOutboundPeers() bool { - val := atomic.LoadUint32(&cmgr.numOutboundPeers) - if val > cmgr.targetOutboundPeers { - glog.Errorf("enoughOutboundPeers: Connected to too many outbound "+ - "peers: (%d). Should be "+ - "no more than (%d).", val, cmgr.targetOutboundPeers) - return true - } - - if val == cmgr.targetOutboundPeers { - return true - } - return false -} - -func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) (*wire.NetAddress, error) { - port := params.DefaultSocketPort - host, portstr, err := net.SplitHostPort(ipStr) - if err != nil { - // No port specified so leave port=default and set - // host to the ipStr. - host = ipStr - } else { - pp, err := strconv.ParseUint(portstr, 10, 16) - if err != nil { - return nil, errors.Wrapf(err, "IPToNetAddr: Can not parse port from %s for ip", ipStr) - } - port = uint16(pp) - } - netAddr, err := addrMgr.HostToNetAddress(host, port, 0) - if err != nil { - return nil, errors.Wrapf(err, "IPToNetAddr: Can not parse port from %s for ip", ipStr) - } - return netAddr, nil -} - func (cmgr *ConnectionManager) IsConnectedOutboundIpAddress(netAddr *wire.NetAddress) bool { cmgr.mtxAddrsMaps.RLock() defer cmgr.mtxAddrsMaps.RUnlock() @@ -338,13 +245,15 @@ func (cmgr *ConnectionManager) RemoveAttemptedOutboundAddrs(netAddr *wire.NetAdd // DialPersistentOutboundConnection attempts to connect to a persistent peer. func (cmgr *ConnectionManager) DialPersistentOutboundConnection(persistentAddr *wire.NetAddress, attemptId uint64) (_attemptId uint64) { - glog.V(2).Infof("ConnectionManager.DialPersistentOutboundConnection: Connecting to peer %v", persistentAddr.IP.String()) + glog.V(2).Infof("ConnectionManager.DialPersistentOutboundConnection: Connecting to peer (IP=%v, Port=%v)", + persistentAddr.IP.String(), persistentAddr.Port) return cmgr._dialOutboundConnection(persistentAddr, attemptId, true) } // DialOutboundConnection attempts to connect to a non-persistent peer. func (cmgr *ConnectionManager) DialOutboundConnection(addr *wire.NetAddress, attemptId uint64) { - glog.V(2).Infof("ConnectionManager.ConnectOutboundConnection: Connecting to peer %v", addr.IP.String()) + glog.V(2).Infof("ConnectionManager.ConnectOutboundConnection: Connecting to peer (IP=%v, Port=%v)", + addr.IP.String(), addr.Port) cmgr._dialOutboundConnection(addr, attemptId, false) } @@ -400,7 +309,7 @@ func (cmgr *ConnectionManager) ConnectPeer(id uint64, conn net.Conn, na *wire.Ne return peer } -func (cmgr *ConnectionManager) IsFromRedundantInboundIPAddress(netAddr *wire.NetAddress) bool { +func (cmgr *ConnectionManager) IsDuplicateInboundIPAddress(netAddr *wire.NetAddress) bool { cmgr.mtxPeerMaps.RLock() defer cmgr.mtxPeerMaps.RUnlock() @@ -412,7 +321,7 @@ func (cmgr *ConnectionManager) IsFromRedundantInboundIPAddress(netAddr *wire.Net // nodes on a local machine. // TODO: Should this be a flag? if net.IP([]byte{127, 0, 0, 1}).Equal(netAddr.IP) { - glog.V(1).Infof("ConnectionManager._isFromRedundantInboundIPAddress: Allowing " + + glog.V(1).Infof("ConnectionManager.IsDuplicateInboundIPAddress: Allowing " + "localhost IP address to connect") return false } diff --git a/lib/constants.go b/lib/constants.go index 460b8dadb..0a525332e 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -498,6 +498,10 @@ func (pvt ProtocolVersionType) Before(version ProtocolVersionType) bool { return pvt.ToUint64() < version.ToUint64() } +func (pvt ProtocolVersionType) After(version ProtocolVersionType) bool { + return pvt.ToUint64() > version.ToUint64() +} + // DeSoParams defines the full list of possible parameters for the // DeSo network. type DeSoParams struct { @@ -564,6 +568,8 @@ type DeSoParams struct { DialTimeout time.Duration // The amount of time we wait to receive a version message from a peer. VersionNegotiationTimeout time.Duration + // The amount of time we wait to receive a verack message from a peer. + VerackNegotiationTimeout time.Duration // The maximum number of addresses to broadcast to peers. MaxAddressesToBroadcast uint32 @@ -1025,6 +1031,7 @@ var DeSoMainnetParams = DeSoParams{ DialTimeout: 30 * time.Second, VersionNegotiationTimeout: 30 * time.Second, + VerackNegotiationTimeout: 30 * time.Second, MaxAddressesToBroadcast: 10, @@ -1296,6 +1303,7 @@ var DeSoTestnetParams = DeSoParams{ DialTimeout: 30 * time.Second, VersionNegotiationTimeout: 30 * time.Second, + VerackNegotiationTimeout: 30 * time.Second, MaxAddressesToBroadcast: 10, diff --git a/lib/pos_handshake_controller.go b/lib/handshake_controller.go similarity index 96% rename from lib/pos_handshake_controller.go rename to lib/handshake_controller.go index 6f4804f2e..bde07745a 100644 --- a/lib/pos_handshake_controller.go +++ b/lib/handshake_controller.go @@ -5,12 +5,15 @@ import ( "github.com/decred/dcrd/lru" "github.com/golang/glog" "math" + "sync" ) // HandshakeController is a structure that handles the handshake process with remote nodes. It is the entry point for // initiating a handshake with a remote node. It is also responsible for handling version/verack messages from remote // nodes. And for handling the handshake complete control message. type HandshakeController struct { + mtxHandshakeComplete sync.Mutex + rnManager *RemoteNodeManager usedNonces lru.Cache } @@ -37,6 +40,10 @@ func (hc *HandshakeController) InitiateHandshake(rn *RemoteNode) { // _handleHandshakeCompleteMessage handles HandshakeComplete control messages, sent by RemoteNodes. func (hc *HandshakeController) _handleHandshakeCompleteMessage(origin *Peer, desoMsg DeSoMessage) { + // Prevent race conditions while handling handshake complete messages. + hc.mtxHandshakeComplete.Lock() + defer hc.mtxHandshakeComplete.Unlock() + if desoMsg.GetMsgType() != MsgTypePeerHandshakeComplete { return } diff --git a/lib/network.go b/lib/network.go index 80d412c4f..75474ea7c 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1543,8 +1543,7 @@ func (msg *MsgDeSoPong) FromBytes(data []byte) error { type ServiceFlag uint64 const ( - // SFFullNodeDeprecated is deprecated, and set on all nodes by default - // now. We basically split it into SFHyperSync and SFArchivalMode. + // SFFullNodeDeprecated is deprecated, and set on all nodes by default now. SFFullNodeDeprecated ServiceFlag = 1 << 0 // SFHyperSync is a flag used to indicate that the peer supports hyper sync. SFHyperSync ServiceFlag = 1 << 1 @@ -1555,6 +1554,10 @@ const ( SFPosValidator ServiceFlag = 1 << 3 ) +func (sf ServiceFlag) HasService(serviceFlag ServiceFlag) bool { + return sf&serviceFlag == serviceFlag +} + type MsgDeSoVersion struct { // What is the current version we're on? Version uint64 @@ -1952,10 +1955,6 @@ func (msg *MsgDeSoVerack) EncodeVerackV0() ([]byte, error) { } func (msg *MsgDeSoVerack) EncodeVerackV1() ([]byte, error) { - if msg.PublicKey == nil || msg.Signature == nil { - return nil, fmt.Errorf("MsgDeSoVerack.EncodeVerackV1: PublicKey and Signature must be set for V1 message") - } - retBytes := []byte{} // Version diff --git a/lib/network_test.go b/lib/network_test.go index 8a971f75a..c0f721a99 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -93,12 +93,12 @@ func TestVerackV1(t *testing.T) { require := require.New(t) networkType := NetworkType_MAINNET - var buf bytes.Buffer + var buf1, buf2 bytes.Buffer nonceReceived := uint64(12345678910) nonceSent := nonceReceived + 1 tstamp := uint64(2345678910) - // First, test that nil public key and signature are not allowed. + // First, test that nil public key and signature are allowed. msg := &MsgDeSoVerack{ Version: VerackVersion1, NonceReceived: nonceReceived, @@ -107,8 +107,8 @@ func TestVerackV1(t *testing.T) { PublicKey: nil, Signature: nil, } - _, err := WriteMessage(&buf, msg, networkType) - require.Error(err) + _, err := WriteMessage(&buf1, msg, networkType) + require.NoError(err) payload := append(UintToBuf(nonceReceived), UintToBuf(nonceSent)...) payload = append(payload, UintToBuf(tstamp)...) hash := sha3.Sum256(payload) @@ -118,10 +118,10 @@ func TestVerackV1(t *testing.T) { msg.PublicKey = priv.PublicKey() msg.Signature, err = priv.Sign(hash[:]) require.NoError(err) - _, err = WriteMessage(&buf, msg, networkType) + _, err = WriteMessage(&buf2, msg, networkType) require.NoError(err) - verBytes := buf.Bytes() + verBytes := buf2.Bytes() testMsg, _, err := ReadMessage(bytes.NewReader(verBytes), networkType) require.NoError(err) require.Equal(msg, testMsg) diff --git a/lib/peer.go b/lib/peer.go index 98d2c135e..0af9aa0b7 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -1192,11 +1192,12 @@ func (pp *Peer) Start() { // If the address manager needs more addresses, then send a GetAddr message // to the peer. This is best-effort. if pp.cmgr != nil { - if pp.cmgr.AddrMgr.NeedMoreAddresses() { + // TODO: Move this to ConnectionController. + /*if pp.cmgr.AddrMgr.NeedMoreAddresses() { go func() { pp.QueueMessage(&MsgDeSoGetAddr{}) }() - } + }*/ } // Send our verack message now that the IO processing machinery has started. diff --git a/lib/remote_node.go b/lib/remote_node.go index a357118a2..f2d849a36 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -191,6 +191,10 @@ func (rn *RemoteNode) GetValidatorPublicKey() *bls.PublicKey { return rn.validatorPublicKey } +func (rn *RemoteNode) GetServiceFlag() ServiceFlag { + return rn.handshakeMetadata.serviceFlag +} + func (rn *RemoteNode) GetUserAgent() string { return rn.handshakeMetadata.userAgent } @@ -223,7 +227,11 @@ func (rn *RemoteNode) IsValidator() bool { if !rn.IsHandshakeCompleted() { return false } - return rn.GetValidatorPublicKey() != nil + return rn.hasValidatorServiceFlag() +} + +func (rn *RemoteNode) hasValidatorServiceFlag() bool { + return rn.GetServiceFlag().HasService(SFPosValidator) } // DialOutboundConnection dials an outbound connection to the provided netAddr. @@ -359,7 +367,7 @@ func (rn *RemoteNode) sendVersionMessage(nonce uint64) error { return nil } -// newVersionMessage returns a new version message that can be sent to a RemoteNode peer. The message will contain the +// newVersionMessage returns a new version message that can be sent to a RemoteNode. The message will contain the // nonce that is passed in as an argument. func (rn *RemoteNode) newVersionMessage(nonce uint64) *MsgDeSoVersion { ver := NewMessage(MsgTypeVersion).(*MsgDeSoVersion) @@ -412,6 +420,12 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce vMeta := rn.handshakeMetadata // Record the version the peer is using. vMeta.advertisedProtocolVersion = NewProtocolVersionType(verMsg.Version) + // Make sure the latest supported protocol version is ProtocolVersion2. + if vMeta.advertisedProtocolVersion.After(ProtocolVersion2) { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v) "+ + "protocol version too high. Peer version: %v, max version: %v", rn.id, verMsg.Version, ProtocolVersion2) + } + // Decide on the protocol version to use for this connection. negotiatedVersion := rn.params.ProtocolVersion if verMsg.Version < rn.params.ProtocolVersion.ToUint64() { @@ -430,6 +444,17 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce // Record the services the peer is advertising. vMeta.serviceFlag = verMsg.Services + // If the RemoteNode was connected with an expectation of being a validator, make sure that its advertised ServiceFlag + // indicates that it is a validator. + if !rn.hasValidatorServiceFlag() && rn.validatorPublicKey != nil { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v). "+ + "Expected validator, but received invalid ServiceFlag: %v", rn.id, verMsg.Services) + } + // If the RemoteNode is on ProtocolVersion1, then it must not have the validator service flag set. + if rn.hasValidatorServiceFlag() && vMeta.advertisedProtocolVersion.Before(ProtocolVersion2) { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v). "+ + "RemoteNode has SFValidator service flag, but doesn't have ProtocolVersion2 or later", rn.id) + } // Record the tstamp sent by the peer and calculate the time offset. timeConnected := time.Unix(verMsg.TstampSecs, 0) @@ -450,7 +475,7 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce vMeta.minTxFeeRateNanosPerKB = verMsg.MinFeeRateNanosPerKB // Respond to the version message if this is an inbound peer. - if !rn.IsOutbound() { + if rn.IsInbound() { if err := rn.sendVersionMessage(responseNonce); err != nil { return errors.Wrapf(err, "RemoteNode.HandleVersionMessage: Problem sending version message to peer (id= %d)", rn.id) } @@ -460,7 +485,7 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce // peer's verack message even if it is an inbound peer. Instead, we just send the verack message right away. // Set the latest time by which we should receive a verack message from the peer. - verackTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) + verackTimeExpected := time.Now().Add(rn.params.VerackNegotiationTimeout) rn.verackTimeExpected = &verackTimeExpected if err := rn.sendVerack(); err != nil { return errors.Wrapf(err, "RemoteNode.HandleVersionMessage: Problem sending verack message to peer (id= %d)", rn.id) @@ -496,7 +521,6 @@ func (rn *RemoteNode) newVerackMessage() (*MsgDeSoVerack, error) { verack.Version = VerackVersion0 verack.NonceReceived = vMeta.versionNonceReceived case ProtocolVersion2: - // FIXME: resolve the non-validator - validator handshake issues on protocol version 2. // For protocol version 2, we need to send the nonce we received from the peer in their version message. // We also need to send our own nonce, which we generate for our version message. In addition, we need to // send a current timestamp (in microseconds). We then sign the tuple of (nonceReceived, nonceSent, tstampMicro) @@ -507,6 +531,10 @@ func (rn *RemoteNode) newVerackMessage() (*MsgDeSoVerack, error) { verack.NonceSent = vMeta.versionNonceSent tstampMicro := uint64(time.Now().UnixMicro()) verack.TstampMicro = tstampMicro + // If the RemoteNode is not a validator, then we don't need to sign the verack message. + if !rn.nodeServices.HasService(SFPosValidator) { + break + } verack.PublicKey = rn.keystore.GetSigner().GetPublicKey() verack.Signature, err = rn.keystore.GetSigner().SignPoSValidatorHandshake(verack.NonceSent, verack.NonceReceived, tstampMicro) if err != nil { @@ -599,6 +627,11 @@ func (rn *RemoteNode) validateVerackPoS(vrkMsg *MsgDeSoVerack) error { "verack timestamp too far in the past. Time now: %v, verack timestamp: %v", rn.id, timeNowMicro, vrkMsg.TstampMicro) } + // If the RemoteNode is not a validator, then we don't need to verify the verack message's signature. + if !rn.hasValidatorServiceFlag() { + return nil + } + // Make sure the verack message's public key and signature are not nil. if vrkMsg.PublicKey == nil || vrkMsg.Signature == nil { return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ @@ -617,7 +650,7 @@ func (rn *RemoteNode) validateVerackPoS(vrkMsg *MsgDeSoVerack) error { "verack signature verification failed", rn.id) } - if rn.validatorPublicKey != nil || rn.validatorPublicKey.Serialize() != vrkMsg.PublicKey.Serialize() { + if rn.validatorPublicKey != nil && rn.validatorPublicKey.Serialize() != vrkMsg.PublicKey.Serialize() { return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ "verack public key mismatch; message: %v; expected: %v", rn.id, vrkMsg.PublicKey, rn.validatorPublicKey) } diff --git a/lib/remote_node_manager.go b/lib/remote_node_manager.go index a41fe4606..fb269d072 100644 --- a/lib/remote_node_manager.go +++ b/lib/remote_node_manager.go @@ -5,8 +5,10 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" + "github.com/golang/glog" "github.com/pkg/errors" "net" + "sync" "sync/atomic" ) @@ -14,6 +16,8 @@ import ( // and stopping remote node connections. It is also responsible for organizing the remote nodes into indices for easy // access, through the RemoteNodeIndexer. type RemoteNodeManager struct { + mtx sync.Mutex + // remoteNodeIndexer is a structure that stores and indexes all created remote nodes. remoteNodeIndexer *RemoteNodeIndexer @@ -62,13 +66,19 @@ func (manager *RemoteNodeManager) ProcessCompletedHandshake(remoteNode *RemoteNo if remoteNode.IsValidator() { manager.SetValidator(remoteNode) + manager.UnsetNonValidator(remoteNode) } else { + manager.UnsetValidator(remoteNode) manager.SetNonValidator(remoteNode) } manager.srv.HandleAcceptedPeer(remoteNode.GetPeer()) } func (manager *RemoteNodeManager) Disconnect(rn *RemoteNode) { + if rn == nil { + return + } + glog.V(2).Infof("RemoteNodeManager.Disconnect: Disconnecting from remote node %v", rn.GetId()) rn.Disconnect() manager.removeRemoteNodeFromIndexer(rn) } @@ -83,17 +93,29 @@ func (manager *RemoteNodeManager) DisconnectById(id RemoteNodeId) { } func (manager *RemoteNodeManager) removeRemoteNodeFromIndexer(rn *RemoteNode) { + manager.mtx.Lock() + defer manager.mtx.Unlock() + if rn == nil { return } indexer := manager.remoteNodeIndexer indexer.GetAllRemoteNodes().Remove(rn.GetId()) - if rn.validatorPublicKey != nil { - indexer.GetValidatorIndex().Remove(rn.validatorPublicKey.Serialize()) - } indexer.GetNonValidatorOutboundIndex().Remove(rn.GetId()) indexer.GetNonValidatorInboundIndex().Remove(rn.GetId()) + + // Try to evict the remote node from the validator index. If the remote node is not a validator, then there is nothing to do. + if rn.GetValidatorPublicKey() == nil { + return + } + // Only remove from the validator index if the fetched remote node is the same as the one we are trying to remove. + // Otherwise, we could have a fun edge-case where a duplicated validator connection ends up removing an + // existing validator connection from the index. + fetchedRn, ok := indexer.GetValidatorIndex().Get(rn.GetValidatorPublicKey().Serialize()) + if ok && fetchedRn.GetId() == rn.GetId() { + indexer.GetValidatorIndex().Remove(rn.GetValidatorPublicKey().Serialize()) + } } func (manager *RemoteNodeManager) SendMessage(rn *RemoteNode, desoMessage DeSoMessage) error { @@ -113,6 +135,10 @@ func (manager *RemoteNodeManager) CreateValidatorConnection(netAddr *wire.NetAdd return fmt.Errorf("RemoteNodeManager.CreateValidatorConnection: netAddr or public key is nil") } + if _, ok := manager.GetValidatorIndex().Get(publicKey.Serialize()); ok { + return fmt.Errorf("RemoteNodeManager.CreateValidatorConnection: RemoteNode already exists for public key: %v", publicKey) + } + remoteNode := manager.newRemoteNode(publicKey) if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { return errors.Wrapf(err, "RemoteNodeManager.CreateValidatorConnection: Problem calling DialPersistentOutboundConnection "+ @@ -190,6 +216,9 @@ func (manager *RemoteNodeManager) AttachOutboundConnection(conn net.Conn, na *wi // ########################### func (manager *RemoteNodeManager) setRemoteNode(rn *RemoteNode) { + manager.mtx.Lock() + defer manager.mtx.Unlock() + if rn == nil { return } @@ -198,36 +227,39 @@ func (manager *RemoteNodeManager) setRemoteNode(rn *RemoteNode) { } func (manager *RemoteNodeManager) SetNonValidator(rn *RemoteNode) { + manager.mtx.Lock() + defer manager.mtx.Unlock() + if rn == nil { return } if rn.IsOutbound() { manager.GetNonValidatorOutboundIndex().Set(rn.GetId(), rn) - } else if rn.IsInbound() { - manager.GetNonValidatorInboundIndex().Set(rn.GetId(), rn) } else { - manager.Disconnect(rn) - return + manager.GetNonValidatorInboundIndex().Set(rn.GetId(), rn) } - - manager.UnsetValidator(rn) } func (manager *RemoteNodeManager) SetValidator(remoteNode *RemoteNode) { + manager.mtx.Lock() + defer manager.mtx.Unlock() + if remoteNode == nil { return } pk := remoteNode.GetValidatorPublicKey() if pk == nil { - manager.Disconnect(remoteNode) return } manager.GetValidatorIndex().Set(pk.Serialize(), remoteNode) } func (manager *RemoteNodeManager) UnsetValidator(remoteNode *RemoteNode) { + manager.mtx.Lock() + defer manager.mtx.Unlock() + if remoteNode == nil { return } @@ -240,16 +272,17 @@ func (manager *RemoteNodeManager) UnsetValidator(remoteNode *RemoteNode) { } func (manager *RemoteNodeManager) UnsetNonValidator(rn *RemoteNode) { + manager.mtx.Lock() + defer manager.mtx.Unlock() + if rn == nil { return } if rn.IsOutbound() { manager.GetNonValidatorOutboundIndex().Remove(rn.GetId()) - } else if rn.IsInbound() { - manager.GetNonValidatorInboundIndex().Remove(rn.GetId()) } else { - manager.Disconnect(rn) + manager.GetNonValidatorInboundIndex().Remove(rn.GetId()) } } diff --git a/lib/server.go b/lib/server.go index d1c82e5b3..d4c371955 100644 --- a/lib/server.go +++ b/lib/server.go @@ -62,7 +62,9 @@ type Server struct { eventManager *EventManager TxIndex *TXIndex + handshakeController *HandshakeController // fastHotStuffEventLoop consensus.FastHotStuffEventLoop + connectionController *ConnectionController // posMempool *PosMemPool TODO: Add the mempool later // All messages received from peers get sent from the ConnectionManager to the @@ -175,6 +177,10 @@ func (srv *Server) ResetRequestQueues() { srv.requestedTransactionsMap = make(map[BlockHash]*GetDataRequestInfo) } +func (srv *Server) GetConnectionController() *ConnectionController { + return srv.connectionController +} + // dataLock must be acquired for writing before calling this function. func (srv *Server) _removeRequest(hash *BlockHash) { // Just be lazy and remove the hash from everything indiscriminately to @@ -445,8 +451,7 @@ func NewServer( // Create a new connection manager but note that it won't be initialized until Start(). _incomingMessages := make(chan *ServerMessage, (_targetOutboundPeers+_maxInboundPeers)*3) _cmgr := NewConnectionManager( - _params, _desoAddrMgr, _listeners, _connectIps, timesource, - _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP, + _params, _listeners, _connectIps, timesource, _hyperSync, _syncType, _stallTimeoutSeconds, _minFeeRateNanosPerKB, _incomingMessages, srv) @@ -481,6 +486,22 @@ func NewServer( hex.EncodeToString(_chain.blockTip().Hash[:]), hex.EncodeToString(BigintToHash(_chain.blockTip().CumWork)[:])) + nodeServices := SFFullNodeDeprecated + if _hyperSync { + nodeServices |= SFHyperSync + } + if archivalMode { + nodeServices |= SFArchivalNode + } + if _blsKeystore != nil { + nodeServices |= SFPosValidator + } + rnManager := NewRemoteNodeManager(srv, _chain, _cmgr, _blsKeystore, _params, _minFeeRateNanosPerKB, nodeServices) + + srv.handshakeController = NewHandshakeController(rnManager) + srv.connectionController = NewConnectionController(_params, _cmgr, srv.handshakeController, rnManager, + _blsKeystore, _desoAddrMgr, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP) + if srv.stateChangeSyncer != nil { srv.stateChangeSyncer.BlockHeight = uint64(_chain.headerTip().Height) } @@ -2176,7 +2197,9 @@ func (srv *Server) _handleAddrMessage(pp *Peer, msg *MsgDeSoAddr) { netAddrsReceived = append( netAddrsReceived, addrAsNetAddr) } - srv.cmgr.AddrMgr.AddAddresses(netAddrsReceived, pp.netAddr) + // TODO: temporary + addressMgr := addrmgr.New("", net.LookupIP) + addressMgr.AddAddresses(netAddrsReceived, pp.netAddr) // If the message had <= 10 addrs in it, then queue all the addresses for relaying // on the next cycle. @@ -2207,7 +2230,9 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, msg *MsgDeSoGetAddr) { glog.V(1).Infof("Server._handleGetAddrMessage: Received GetAddr from peer %v", pp) // When we get a GetAddr message, choose MaxAddrsPerMsg from the AddrMgr // and send them back to the peer. - netAddrsFound := srv.cmgr.AddrMgr.AddressCache() + // TODO: temporary + addressMgr := addrmgr.New("", net.LookupIP) + netAddrsFound := addressMgr.AddressCache() if len(netAddrsFound) > MaxAddrsPerAddrMsg { netAddrsFound = netAddrsFound[:MaxAddrsPerAddrMsg] } @@ -2230,9 +2255,12 @@ func (srv *Server) _handleControlMessages(serverMessage *ServerMessage) (_should switch serverMessage.Msg.(type) { // Control messages used internally to signal to the server. case *MsgDeSoPeerHandshakeComplete: - break + srv.handshakeController._handleHandshakeCompleteMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoDisconnectedPeer: srv._handleDonePeer(serverMessage.Peer) + srv.connectionController._handleDonePeerMessage(serverMessage.Peer, serverMessage.Msg) + case *MsgDeSoNewConnection: + srv.connectionController._handleNewConnectionMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoQuit: return true } @@ -2244,6 +2272,10 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { // Handle all non-control message types from our Peers. switch msg := serverMessage.Msg.(type) { // Messages sent among peers. + case *MsgDeSoAddr: + srv.connectionController._handleAddrMessage(serverMessage.Peer, serverMessage.Msg) + case *MsgDeSoGetAddr: + srv.connectionController._handleGetAddrMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoGetHeaders: srv._handleGetHeaders(serverMessage.Peer, msg) case *MsgDeSoHeaderBundle: @@ -2266,6 +2298,10 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { srv._handleMempool(serverMessage.Peer, msg) case *MsgDeSoInv: srv._handleInv(serverMessage.Peer, msg) + case *MsgDeSoVersion: + srv.handshakeController._handleVersionMessage(serverMessage.Peer, serverMessage.Msg) + case *MsgDeSoVerack: + srv.handshakeController._handleVerackMessage(serverMessage.Peer, serverMessage.Msg) } } @@ -2443,10 +2479,12 @@ func (srv *Server) _startAddressRelayer() { } // For the first ten minutes after the server starts, relay our address to all // peers. After the first ten minutes, do it once every 24 hours. + // TODO: temporary + addressMgr := addrmgr.New("", net.LookupIP) glog.V(1).Infof("Server.Start._startAddressRelayer: Relaying our own addr to peers") if numMinutesPassed < 10 || numMinutesPassed%(RebroadcastNodeAddrIntervalMinutes) == 0 { for _, pp := range srv.cmgr.GetAllPeers() { - bestAddress := srv.cmgr.AddrMgr.GetBestLocalAddress(pp.netAddr) + bestAddress := addressMgr.GetBestLocalAddress(pp.netAddr) if bestAddress != nil { glog.V(2).Infof("Server.Start._startAddressRelayer: Relaying address %v to "+ "peer %v", bestAddress.IP.String(), pp) From 0194b165225e96b73cb6c758e65163efbb3a514f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 24 Jan 2024 12:04:15 -0500 Subject: [PATCH 382/762] Cross-Validate Genesis QC View with The Tip and Safe Block Views (#937) * Cross-Validate Genesis QC View with The Tip and Safe Block Views * Use strict inequality * Better comments --- consensus/event_loop.go | 11 +++++++++++ consensus/event_loop_test.go | 13 +++++++++++++ 2 files changed, 24 insertions(+) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index b35fd7659..75e87c408 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -237,6 +237,17 @@ func (fc *fastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlo return errors.New("Invalid safe blocks or validator lists") } + // Sanity check: the tip block and safe blocks must not have lower views than the genesis QC's view. + if tip.Block.GetView() < fc.genesisQC.GetView() { + return errors.New("Tip block view must be greater than or qual to the genesis QC view") + } + + for _, block := range safeBlocks { + if block.Block.GetView() < fc.genesisQC.GetView() { + return errors.New("Safe block view must be greater than or equal to the genesis QC view") + } + } + // Extract the block hashes for the tip block and safe blocks tipBlockHash := tip.Block.GetBlockHash() safeBlockHashes := collections.Transform(safeBlocks, extractBlockHash) diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index e233644ec..0276c2b5b 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -45,6 +45,19 @@ func TestInit(t *testing.T) { require.Error(t, err) } + // Test Init() function with invalid genesis QC and tip block views + { + fc := NewFastHotStuffEventLoop() + genesisBlock := createDummyBlock(2) + fakeGenesisBlock := createDummyBlock(4) + err := fc.Init(1, 1, + fakeGenesisBlock.GetQC(), // genesisQC + BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks + ) + require.Error(t, err) + } + // Test Init() function with malformed tip block { fc := NewFastHotStuffEventLoop() From 69247d6ef7e89a072f646c21445bdba1b91c0076 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 24 Jan 2024 13:28:10 -0500 Subject: [PATCH 383/762] Remove QC From FastHotStuffEventLoop Timeout Event (#938) --- consensus/event_loop.go | 1 - consensus/event_loop_test.go | 14 +++++++------- consensus/integration_test_types.go | 5 +++-- consensus/types.go | 8 -------- consensus/utils.go | 16 +++++----------- consensus/utils_test.go | 6 ------ lib/pos_consensus.go | 13 +++++++++++-- lib/pos_consensus_test.go | 7 ++++--- 8 files changed, 30 insertions(+), 40 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 75e87c408..ef9a2488c 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -839,7 +839,6 @@ func (fc *fastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin View: timedOutView, // The view we timed out TipBlockHash: fc.tip.block.GetBlockHash(), // The last block we saw TipBlockHeight: fc.tip.block.GetHeight(), // The last block we saw - QC: fc.tip.block.GetQC(), // The highest QC we have }) // Cancel the timeout task. The server will reschedule it when it advances the view. diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 0276c2b5b..7eba6a3b7 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -465,6 +465,7 @@ func TestProcessValidatorTimeout(t *testing.T) { validatorList := createValidatorListForPrivateKeys(validatorPrivateKey1, validatorPrivateKey2) // Init the event loop + { // BlockHeight = 3, Current View = 4 genesisBlock := createDummyBlock(2) @@ -498,7 +499,7 @@ func TestProcessValidatorTimeout(t *testing.T) { // Test with stale view { - timeout := createTimeoutMessageWithPrivateKeyAndHighQC(3, validatorPrivateKey1, fc.tip.block.GetQC()) + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(3, validatorPrivateKey1, fc.tip.block.(*block).GetQC()) err := fc.ProcessValidatorTimeout(timeout) require.Error(t, err) require.Contains(t, err.Error(), "Timeout has a stale view") @@ -533,7 +534,7 @@ func TestProcessValidatorTimeout(t *testing.T) { // Test unknown high QC { - timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.(*block).GetQC()) timeout.highQC = createDummyQC(2, createDummyBlockHash()) err := fc.ProcessValidatorTimeout(timeout) require.Error(t, err) @@ -542,7 +543,7 @@ func TestProcessValidatorTimeout(t *testing.T) { // Test unknown public key in timeout message { - timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.(*block).GetQC()) timeout.publicKey = createDummyBLSPublicKey() err := fc.ProcessValidatorTimeout(timeout) require.Error(t, err) @@ -551,7 +552,7 @@ func TestProcessValidatorTimeout(t *testing.T) { // Test invalid signature { - timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.(*block).GetQC()) timeout.signature = createDummyBLSSignature() err := fc.ProcessValidatorTimeout(timeout) require.Error(t, err) @@ -560,7 +561,7 @@ func TestProcessValidatorTimeout(t *testing.T) { // Test invalid high QC { - timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.(*block).GetQC()) timeout.highQC = createDummyQC(timeout.highQC.GetView(), timeout.highQC.GetBlockHash()) err := fc.ProcessValidatorTimeout(timeout) require.Error(t, err) @@ -569,7 +570,7 @@ func TestProcessValidatorTimeout(t *testing.T) { // Test happy path { - timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.GetQC()) + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.(*block).GetQC()) err := fc.ProcessValidatorTimeout(timeout) require.NoError(t, err) } @@ -616,7 +617,6 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { require.Equal(t, timeoutSignal.EventType, FastHotStuffEventTypeTimeout) require.Equal(t, timeoutSignal.View, dummyBlock.GetView()+2) require.Equal(t, timeoutSignal.TipBlockHash.GetValue(), dummyBlock.GetBlockHash().GetValue()) - require.Equal(t, timeoutSignal.QC.GetBlockHash().GetValue(), dummyBlock.qc.GetBlockHash().GetValue()) // Confirm that the timeout is no longer running require.False(t, fc.nextTimeoutTask.IsScheduled()) diff --git a/consensus/integration_test_types.go b/consensus/integration_test_types.go index d0a9ce08e..dfc2397db 100644 --- a/consensus/integration_test_types.go +++ b/consensus/integration_test_types.go @@ -375,7 +375,8 @@ func (node *validatorNode) handleTimeoutEvent(event *FastHotStuffEvent) { } func (node *validatorNode) broadcastTimeout(event *FastHotStuffEvent) { - payload := GetTimeoutSignaturePayload(event.View, event.QC.GetView()) + highQC := node.safeBlocks[event.TipBlockHash.GetValue()].GetQC() + payload := GetTimeoutSignaturePayload(event.View, highQC.GetView()) signature, err := node.privateKey.Sign(payload[:]) if err != nil { panic(err) @@ -383,7 +384,7 @@ func (node *validatorNode) broadcastTimeout(event *FastHotStuffEvent) { timeout := &timeoutMessage{ view: event.View, - highQC: event.QC, + highQC: highQC, publicKey: node.privateKey.PublicKey(), signature: signature, } diff --git a/consensus/types.go b/consensus/types.go index f0967c46f..44f51c353 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -120,14 +120,6 @@ type Block interface { GetBlockHash() BlockHash GetHeight() uint64 GetView() uint64 - // This is a hybrid function that returns the QC from the block. - // - If the block is a normal block, this returns the QC from validators' votes for the previous block - // - If the block contains a timeout QC, this returns the validator high QC aggregated from - // validators' timeout messages - // We are able to simplify the GetQC() to this behavior because this QC is only needed to construct - // a timeout QC for the next block in the event of a timeout. So, this QC will always be the latest QC - // at the current chain's tip that subsequent blocks will build on top of. - GetQC() QuorumCertificate } type BlockWithValidatorList struct { diff --git a/consensus/utils.go b/consensus/utils.go index 44e14bdd7..94cf12304 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -36,7 +36,8 @@ func IsProperlyFormedVoteEvent(event *FastHotStuffEvent) bool { event.EventType == FastHotStuffEventTypeVote && // Event type is vote event.View > 0 && // The view the tip block was proposed in is non-zero event.TipBlockHeight > 0 && // Tip block height voted on is non-zero - !isInterfaceNil(event.TipBlockHash) // Tip block hash voted on is non-nil + !isInterfaceNil(event.TipBlockHash) && // Tip block hash voted on is non-nil + isInterfaceNil(event.QC) // The high QC is nil } func IsProperlyFormedTimeoutEvent(event *FastHotStuffEvent) bool { @@ -45,7 +46,7 @@ func IsProperlyFormedTimeoutEvent(event *FastHotStuffEvent) bool { event.View > 0 && // The view that was timed out is non-zero event.TipBlockHeight > 0 && // Tip block height is non-zero !isInterfaceNil(event.TipBlockHash) && // Tip block hash is non-nil - !isInterfaceNil(event.QC) // The high QC is non-nil + isInterfaceNil(event.QC) // The high QC is nil. The receiver will determine their own high QC. } // Given a QC and a sorted validator list, this function returns true if the QC contains a valid @@ -67,9 +68,7 @@ func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Va } // IsValidSuperMajorityAggregateQuorumCertificate validates that the aggregate QC is properly formed and signed -// by a super-majority of validators in the network. It takes in two sets of validator sets because the validator -// set may shift between the view that is timed out and the high QC view. The two validator -// sets are defined as: +// by a super-majority of validators in the network. It takes in two sets of validators defines as: // - aggQCValidators: The validator set that signed the timeouts for the view that has timed out (the view in the aggregate QC) // - highQCValidators: The validator set that signed the high QC (the view in the high QC) func IsValidSuperMajorityAggregateQuorumCertificate(aggQC AggregateQuorumCertificate, aggQCValidators []Validator, highQCValidators []Validator) bool { @@ -184,12 +183,7 @@ func isProperlyFormedBlock(block Block) bool { } // The block hash and QC must be non-nil - if isInterfaceNil(block.GetBlockHash()) || !isProperlyFormedQC(block.GetQC()) { - return false - } - - // The QC's view must be less than the block's view - if block.GetQC().GetView() >= block.GetView() { + if isInterfaceNil(block.GetBlockHash()) { return false } diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 740203b1d..b85971fbe 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -215,12 +215,6 @@ func TestIsProperlyFormedBlock(t *testing.T) { require.False(t, isProperlyFormedBlock(&block)) } - // Test nil QC - { - block := block{height: 1, view: 1, blockHash: createDummyBlockHash(), qc: nil} - require.False(t, isProperlyFormedBlock(&block)) - } - // Test valid block { require.True(t, isProperlyFormedBlock(createDummyBlock(2))) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 5d555c44c..b959d5872 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -403,15 +403,24 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error advancing view on timeout: %v", err) } + // Extract the tip block hash from the timeout message + tipBlockHash := BlockHashFromConsensusInterface(event.TipBlockHash) + + // Fetch the HighQC from the Blockchain struct + tipBlockNode, tipBlockExists := cc.blockchain.blockIndexByHash[*tipBlockHash] + if !tipBlockExists { + return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error fetching tip block: %v", tipBlockHash) + } + // Construct the timeout message timeoutMsg := NewMessage(MsgTypeValidatorTimeout).(*MsgDeSoValidatorTimeout) timeoutMsg.MsgVersion = MsgValidatorTimeoutVersion0 timeoutMsg.TimedOutView = event.View timeoutMsg.VotingPublicKey = cc.signer.GetPublicKey() - timeoutMsg.HighQC = QuorumCertificateFromConsensusInterface(event.QC) + timeoutMsg.HighQC = QuorumCertificateFromConsensusInterface(tipBlockNode.Header.GetQC()) // Sign the timeout message - timeoutMsg.TimeoutPartialSignature, err = cc.signer.SignValidatorTimeout(event.View, event.QC.GetView()) + timeoutMsg.TimeoutPartialSignature, err = cc.signer.SignValidatorTimeout(event.View, tipBlockNode.Header.GetQC().GetView()) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error signing validator timeout: %v", err) diff --git a/lib/pos_consensus_test.go b/lib/pos_consensus_test.go index 542e387e0..c22de7109 100644 --- a/lib/pos_consensus_test.go +++ b/lib/pos_consensus_test.go @@ -107,6 +107,9 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { }, blockchain: &Blockchain{ ChainLock: deadlock.RWMutex{}, + blockIndexByHash: map[BlockHash]*BlockNode{ + *blockHash: {Header: blockHeader}, + }, }, fastHotStuffEventLoop: &consensus.MockFastHotStuffEventLoop{ OnIsInitialized: alwaysReturnTrue, @@ -170,7 +173,6 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { View: currentView - 1, TipBlockHeight: currentView - 1, TipBlockHash: blockHash, - QC: blockHeader.ValidatorsVoteQC, } err := fastHotStuffConsensus.HandleLocalTimeoutEvent(event) require.Contains(t, err.Error(), "Stale timeout event") @@ -182,8 +184,7 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { EventType: consensus.FastHotStuffEventTypeTimeout, View: currentView, TipBlockHeight: currentView, - TipBlockHash: blockHeader.ValidatorsVoteQC.GetBlockHash(), - QC: blockHeader.ValidatorsVoteQC, + TipBlockHash: blockHash, } err := fastHotStuffConsensus.HandleLocalTimeoutEvent(event) require.NoError(t, err) From c6ca4c5b0b48951ac7f1565c5330db544b549015 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 24 Jan 2024 13:44:25 -0500 Subject: [PATCH 384/762] Use PoS Genesis QC During Timeout After Cutover (#939) --- lib/pos_consensus.go | 18 ++++++++++++++++-- lib/pos_consensus_test.go | 1 + 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index b959d5872..bfaea1ed6 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -417,10 +417,20 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo timeoutMsg.MsgVersion = MsgValidatorTimeoutVersion0 timeoutMsg.TimedOutView = event.View timeoutMsg.VotingPublicKey = cc.signer.GetPublicKey() - timeoutMsg.HighQC = QuorumCertificateFromConsensusInterface(tipBlockNode.Header.GetQC()) + + if cc.isFinalPoWBlockHeight(tipBlockNode.Header.Height) { + // If the tip block is the final block of the PoW chain, then we can use the PoS chain's genesis block + // as the highQC for it. + if timeoutMsg.HighQC, err = cc.createGenesisQC(tipBlockNode.Hash, tipBlockNode.Header.Height); err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error creating PoS cutover genesis QC: %v", err) + } + } else { + // Otherwise, we use the QC from the tip block as the highQC + timeoutMsg.HighQC = QuorumCertificateFromConsensusInterface(tipBlockNode.Header.GetQC()) + } // Sign the timeout message - timeoutMsg.TimeoutPartialSignature, err = cc.signer.SignValidatorTimeout(event.View, tipBlockNode.Header.GetQC().GetView()) + timeoutMsg.TimeoutPartialSignature, err = cc.signer.SignValidatorTimeout(event.View, timeoutMsg.HighQC.GetView()) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error signing validator timeout: %v", err) @@ -775,6 +785,10 @@ func (fc *FastHotStuffConsensus) getFinalCommittedPoWBlock() (*BlockNode, error) ) } +func (fc *FastHotStuffConsensus) isFinalPoWBlockHeight(blockHeight uint64) bool { + return blockHeight == uint64(fc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1) +} + // Finds the epoch entry for the block and returns the epoch number. func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { for _, epochEntry := range epochEntries { diff --git a/lib/pos_consensus_test.go b/lib/pos_consensus_test.go index c22de7109..81c4b739c 100644 --- a/lib/pos_consensus_test.go +++ b/lib/pos_consensus_test.go @@ -105,6 +105,7 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { signer: &BLSSigner{ privateKey: blsPrivateKey, }, + params: &DeSoTestnetParams, blockchain: &Blockchain{ ChainLock: deadlock.RWMutex{}, blockIndexByHash: map[BlockHash]*BlockNode{ From 6d7bf24854737d3fa4ac75a0243a944f7ce1be71 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 24 Jan 2024 13:50:56 -0500 Subject: [PATCH 385/762] Support GenesisQC As HighQC in Incoming Timeout Messages (#940) * Use OverrideHighQC in Timeout Messages * Add bitset equality * Add equality check for QC --- collections/bitset/bitset.go | 7 ++++++ collections/bitset/bitset_test.go | 37 +++++++++++++++++++++++++++++++ consensus/event_loop.go | 6 ++--- consensus/utils.go | 19 ++++++++++++++++ consensus/utils_test.go | 36 ++++++++++++++++++++++++++++++ 5 files changed, 102 insertions(+), 3 deletions(-) diff --git a/collections/bitset/bitset.go b/collections/bitset/bitset.go index 085013b67..d97551352 100644 --- a/collections/bitset/bitset.go +++ b/collections/bitset/bitset.go @@ -85,3 +85,10 @@ func (b *Bitset) FromBytes(bytes []byte) *Bitset { b.store.SetBytes(bytes) return b } + +func (b *Bitset) Eq(other *Bitset) bool { + if b == nil || other == nil { + return false + } + return b.store.Cmp(other.store) == 0 +} diff --git a/collections/bitset/bitset_test.go b/collections/bitset/bitset_test.go index 47bb083f6..ff24b2014 100644 --- a/collections/bitset/bitset_test.go +++ b/collections/bitset/bitset_test.go @@ -94,3 +94,40 @@ func TestEmptyBitsetByteEncodeDecode(t *testing.T) { require.Zero(t, decodedBitset.Size()) } + +func TestEquality(t *testing.T) { + // Test nil bitsets + { + var bitset1 *Bitset + var bitset2 *Bitset + + require.False(t, bitset1.Eq(bitset2)) + } + + // Test one nil and one non-nil bitset + { + var bitset1 *Bitset + bitset2 := NewBitset().Set(0, true) + + require.False(t, bitset1.Eq(bitset2)) + require.False(t, bitset2.Eq(bitset1)) + } + + // Test two non-equal non-nil bitsets + { + bitset1 := NewBitset().Set(0, true) + bitset2 := NewBitset().Set(1, true) + + require.False(t, bitset1.Eq(bitset2)) + require.False(t, bitset2.Eq(bitset1)) + } + + // Test two equal non-nil bitsets + { + bitset1 := NewBitset().Set(0, true) + bitset2 := NewBitset().Set(0, true).Set(1, false) + + require.True(t, bitset1.Eq(bitset2)) + require.True(t, bitset2.Eq(bitset1)) + } +} diff --git a/consensus/event_loop.go b/consensus/event_loop.go index ef9a2488c..892bf56f0 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -449,9 +449,9 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) ) } - // Verify the high QC in the timeout message. We can use the validator list at the exact block height of - // the high QC's block hash. - if !IsValidSuperMajorityQuorumCertificate(timeout.GetHighQC(), validatorList) { + // Verify the high QC in the timeout message. The highQC is valid if it exactly matches the genesis QC or it is a + // valid QC signed by a super-majority of validators for a safe block. + if !IsEqualQC(timeout.GetHighQC(), fc.genesisQC) && !IsValidSuperMajorityQuorumCertificate(timeout.GetHighQC(), validatorList) { return errors.Errorf( "FastHotStuffEventLoop.ProcessValidatorTimeout: Invalid high QC received in timeout message from validator %s for view %d", timeout.GetPublicKey().ToString(), diff --git a/consensus/utils.go b/consensus/utils.go index 94cf12304..173af3e10 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -381,6 +381,25 @@ func containsBlockHash(blockHashes []BlockHash, blockHash BlockHash) bool { }) } +func IsEqualQC(qc1 QuorumCertificate, qc2 QuorumCertificate) bool { + if !isProperlyFormedQC(qc1) || !isProperlyFormedQC(qc2) { + return false + } + + return qc1.GetView() == qc2.GetView() && + IsEqualBlockHash(qc1.GetBlockHash(), qc2.GetBlockHash()) && + IsEqualAggregatedSignature(qc1.GetAggregatedSignature(), qc2.GetAggregatedSignature()) +} + +func IsEqualAggregatedSignature(agg1 AggregatedSignature, agg2 AggregatedSignature) bool { + if !isProperlyFormedAggregateSignature(agg1) || !isProperlyFormedAggregateSignature(agg2) { + return false + } + + return agg1.GetSignature().Eq(agg2.GetSignature()) && + agg1.GetSignersList().Eq(agg2.GetSignersList()) +} + func IsEqualBlockHash(hash1 BlockHash, hash2 BlockHash) bool { hash1Value := hash1.GetValue() hash2Value := hash2.GetValue() diff --git a/consensus/utils_test.go b/consensus/utils_test.go index b85971fbe..ae51e7d19 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -402,3 +402,39 @@ func TestIsSuperMajorityStake(t *testing.T) { require.True(t, isSuperMajorityStake(totalStake, totalStake)) } } + +func TestIsEqualQC(t *testing.T) { + // Test nil QCs + { + require.False(t, IsEqualQC(nil, nil)) + } + + // Test one nil and one non-nil QC + { + require.False(t, IsEqualQC(nil, createDummyQC(1, createDummyBlockHash()))) + require.False(t, IsEqualQC(createDummyQC(1, createDummyBlockHash()), nil)) + } + + // Test two non-equal non-nil QCs with different block hashes + { + require.False(t, IsEqualQC(createDummyQC(1, createDummyBlockHash()), createDummyQC(1, createDummyBlockHash()))) + } + + // Test two non-equal non-nil QCs with different views + { + blockHash := createDummyBlockHash() + require.False(t, IsEqualQC(createDummyQC(1, blockHash), createDummyQC(2, blockHash))) + } + + // Test two non-equal non-nil QCs with different aggregated signatures + { + blockHash := createDummyBlockHash() + require.False(t, IsEqualQC(createDummyQC(1, blockHash), createDummyQC(1, blockHash))) + } + + // Test two equal QCs + { + qc := createDummyQC(1, createDummyBlockHash()) + require.True(t, IsEqualQC(qc, qc)) + } +} From d3f0ade31d24acc5b01ebafbd2d7431c8737754c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 24 Jan 2024 18:24:59 -0500 Subject: [PATCH 386/762] Consolidate Code For PoW and PoS Block Height Checks (#941) * Consolidate Code For PoW and PoS block Height Checks * Replace usage in server.go * Cleanup * More cleanup * Address Nina's comment --- lib/pos_blockchain.go | 51 +++++++++++++++++++++++++++++++++------ lib/pos_consensus.go | 35 +++------------------------ lib/pos_consensus_test.go | 4 +++ lib/server.go | 28 +++++++++++---------- 4 files changed, 66 insertions(+), 52 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index fdf795d87..65792d7d1 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -52,10 +52,10 @@ func (bc *Blockchain) ProcessHeaderPoS(header *MsgDeSoHeader) (_isMainChain bool func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader) ( _isMainChain bool, _isOrphan bool, _err error, ) { - if header.Height < uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + if !bc.IsPoSBlockHeight(header.Height) { return false, false, errors.Errorf( "processHeaderPoS: Header height %d is less than the ProofOfStake2ConsensusCutoverBlockHeight %d", - header.Height, bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight, + header.Height, bc.GetFirstPoSBlockHeight(), ) } @@ -214,10 +214,10 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v _err error, ) { // If the incoming block's height is under the PoS cutover fork height, then we can't process it. Exit early. - if block.Header.Height < uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + if !bc.IsPoSBlockHeight(block.Header.Height) { return false, false, nil, errors.Errorf( "processHeaderPoS: Header height %d is less than the ProofOfStake2ConsensusCutoverBlockHeight %d", - block.Header.Height, bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight, + block.Header.Height, bc.GetFirstPoSBlockHeight(), ) } @@ -940,7 +940,7 @@ func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader) erro // that this block height is exactly one greater than its parent's block height. func (bc *Blockchain) hasValidBlockHeightPoS(header *MsgDeSoHeader) error { blockHeight := header.Height - if blockHeight < uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + if !bc.IsPoSBlockHeight(blockHeight) { return RuleErrorPoSBlockBeforeCutoverHeight } // Validate that the block height is exactly one greater than its parent. @@ -1132,11 +1132,9 @@ func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validator voteQC := block.Header.ValidatorsVoteQC timeoutAggregateQC := block.Header.ValidatorsTimeoutAggregateQC - cutoverHeight := uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) - // If the block is the first block after the PoS cutover and has a timeout aggregate QC, then the // highQC must be a synthetic QC. We need to override the validator set used to validate the high QC. - if block.Header.Height == cutoverHeight && !timeoutAggregateQC.isEmpty() { + if block.Header.Height == bc.GetFirstPoSBlockHeight() && !timeoutAggregateQC.isEmpty() { posCutoverValidator, err := BuildProofOfStakeCutoverValidator() if err != nil { return errors.Wrapf(err, "isValidPoSQuorumCertificate: Problem building PoS cutover validator") @@ -1769,6 +1767,43 @@ func (bc *Blockchain) getMaxSequentialBlockHeightAfter(startingHeight uint64) ui return maxSequentialHeightWithBlocks } +func (bc *Blockchain) GetFinalCommittedPoWBlock() (*BlockNode, error) { + // Fetch the block node for the cutover block + blockNodes, blockNodesExist := bc.blockIndexByHeight[bc.GetFinalPoWBlockHeight()] + if !blockNodesExist { + return nil, errors.Errorf("Error fetching cutover block nodes before height %d", bc.GetFinalPoWBlockHeight()) + } + + // Fetch the block node with the committed status + for _, blockNode := range blockNodes { + if blockNode.Status == StatusBlockCommitted { + return blockNode, nil + } + } + + return nil, errors.Errorf("Error fetching committed cutover block node before height %d", bc.GetFinalPoWBlockHeight()) +} + +func (bc *Blockchain) IsPoWBlockHeight(blockHeight uint64) bool { + return !bc.IsPoSBlockHeight(blockHeight) +} + +func (bc *Blockchain) IsPoSBlockHeight(blockHeight uint64) bool { + return blockHeight >= bc.GetFirstPoSBlockHeight() +} + +func (bc *Blockchain) IsFinalPoWBlockHeight(blockHeight uint64) bool { + return blockHeight == bc.GetFinalPoWBlockHeight() +} + +func (bc *Blockchain) GetFinalPoWBlockHeight() uint64 { + return uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight - 1) +} + +func (bc *Blockchain) GetFirstPoSBlockHeight() uint64 { + return uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) +} + const ( RuleErrorNilBlock RuleError = "RuleErrorNilBlock" RuleErrorNilBlockHeader RuleError = "RuleErrorNilBlockHeader" diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index bfaea1ed6..499e40a50 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -48,15 +48,16 @@ func (cc *FastHotStuffConsensus) Start() error { // Fetch the current tip of the chain tipBlock := cc.blockchain.BlockTip() + tipHeight := tipBlock.Header.Height // If the chain is not at the final PoW block height or higher, then we cannot start the PoS consensus. - if tipBlock.Height < cc.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1 { + if cc.blockchain.IsPoWBlockHeight(tipHeight) && !cc.blockchain.IsFinalPoWBlockHeight(tipHeight) { return errors.Errorf( "FastHotStuffConsensus.Start: Block tip %d is not at the final PoW block height", tipBlock.Height, ) } - finalPoWBlock, err := cc.getFinalCommittedPoWBlock() + finalPoWBlock, err := cc.blockchain.GetFinalCommittedPoWBlock() if err != nil { return errors.Errorf("FastHotStuffConsensus.Start: Error fetching final PoW block: %v", err) } @@ -418,7 +419,7 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo timeoutMsg.TimedOutView = event.View timeoutMsg.VotingPublicKey = cc.signer.GetPublicKey() - if cc.isFinalPoWBlockHeight(tipBlockNode.Header.Height) { + if cc.blockchain.IsFinalPoWBlockHeight(tipBlockNode.Header.Height) { // If the tip block is the final block of the PoW chain, then we can use the PoS chain's genesis block // as the highQC for it. if timeoutMsg.HighQC, err = cc.createGenesisQC(tipBlockNode.Hash, tipBlockNode.Header.Height); err != nil { @@ -761,34 +762,6 @@ func (fc *FastHotStuffConsensus) createGenesisQC(blockHash *BlockHash, view uint return qc, nil } -func (fc *FastHotStuffConsensus) getFinalCommittedPoWBlock() (*BlockNode, error) { - // Fetch the block node for the cutover block - blockNodes, blockNodesExist := - fc.blockchain.blockIndexByHeight[uint64(fc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1)] - if !blockNodesExist { - return nil, errors.Errorf( - "Error fetching cutover block nodes before height %d", - fc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1, - ) - } - - // Fetch the block node with the committed status - for _, blockNode := range blockNodes { - if blockNode.Status == StatusBlockCommitted { - return blockNode, nil - } - } - - return nil, errors.Errorf( - "Error fetching committed cutover block node before height %d", - fc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1, - ) -} - -func (fc *FastHotStuffConsensus) isFinalPoWBlockHeight(blockHeight uint64) bool { - return blockHeight == uint64(fc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1) -} - // Finds the epoch entry for the block and returns the epoch number. func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { for _, epochEntry := range epochEntries { diff --git a/lib/pos_consensus_test.go b/lib/pos_consensus_test.go index 81c4b739c..9bdb136a0 100644 --- a/lib/pos_consensus_test.go +++ b/lib/pos_consensus_test.go @@ -27,6 +27,9 @@ func TestFastHotStuffConsensusHandleLocalVoteEvent(t *testing.T) { // Create a mock consensus fastHotStuffConsensus := FastHotStuffConsensus{ lock: sync.RWMutex{}, + blockchain: &Blockchain{ + params: &DeSoTestnetParams, + }, signer: &BLSSigner{ privateKey: blsPrivateKey, }, @@ -111,6 +114,7 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { blockIndexByHash: map[BlockHash]*BlockNode{ *blockHash: {Header: blockHeader}, }, + params: &DeSoTestnetParams, }, fastHotStuffEventLoop: &consensus.MockFastHotStuffEventLoop{ OnIsInitialized: alwaysReturnTrue, diff --git a/lib/server.go b/lib/server.go index d39dfb4a2..3ba60f686 100644 --- a/lib/server.go +++ b/lib/server.go @@ -222,8 +222,7 @@ func (srv *Server) GetMempool() Mempool { srv.blockchain.ChainLock.RLock() defer srv.blockchain.ChainLock.RUnlock() - tip := srv.blockchain.BlockTip() - if tip.Height >= srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + if srv.blockchain.IsPoSBlockHeight(uint64(srv.blockchain.BlockTip().Height)) { return srv.posMempool } return srv.mempool @@ -259,14 +258,17 @@ func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MsgDeSoTxn, error) func (srv *Server) VerifyAndBroadcastTransaction(txn *MsgDeSoTxn) error { // Grab the block tip and use it as the height for validation. - blockHeight := srv.blockchain.BlockTip().Height + srv.blockchain.ChainLock.RLock() + tipHeight := srv.blockchain.BlockTip().Height + srv.blockchain.ChainLock.RUnlock() + // Only add the txn to the PoW mempool if we are below the PoS cutover height. - if blockHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + if srv.blockchain.IsPoWBlockHeight(uint64(tipHeight)) { err := srv.blockchain.ValidateTransaction( txn, // blockHeight is set to the next block since that's where this // transaction will be mined at the earliest. - blockHeight+1, + tipHeight+1, true, srv.mempool) if err != nil { @@ -1719,11 +1721,11 @@ func (srv *Server) _relayTransactions() { allPeers := srv.cmgr.GetAllPeers() srv.blockchain.ChainLock.RLock() - blockHeight := srv.blockchain.BlockTip().Height + tipHeight := uint64(srv.blockchain.BlockTip().Height) srv.blockchain.ChainLock.RUnlock() // If we're on the PoW protocol, we need to wait for the mempool readOnlyView to regenerate. - if blockHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + if srv.blockchain.IsPoWBlockHeight(tipHeight) { glog.V(1).Infof("Server._relayTransactions: Waiting for mempool readOnlyView to regenerate") srv.mempool.BlockUntilReadOnlyViewRegenerated() glog.V(1).Infof("Server._relayTransactions: Mempool view has regenerated") @@ -1790,7 +1792,7 @@ func (srv *Server) _addNewTxn( } srv.blockchain.ChainLock.RLock() - blockHeight := srv.blockchain.BlockTip().Height + tipHeight := uint64(srv.blockchain.BlockTip().Height) srv.blockchain.ChainLock.RUnlock() // Only attempt to add the transaction to the PoW mempool if we're on the @@ -1798,7 +1800,7 @@ func (srv *Server) _addNewTxn( // txn validity checks to signal whether the txn has been added or not. The PoW // mempool has stricter txn validity checks than the PoW mempool, so this works // out conveniently, as it allows us to always add a txn to the PoS mempool. - if blockHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + if srv.blockchain.IsPoWBlockHeight(tipHeight) { _, err := srv.mempool.ProcessTransaction( txn, true /*allowUnconnectedTxn*/, rateLimit, peerID, verifySignatures) if err != nil { @@ -2132,8 +2134,8 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { // Exit early if the current tip height is below the PoS cutover height. We are ready to enable // the PoS validator FastHotStuffConsensus once we reach the final block of the PoW protocol. // This requires the block height check to use ProofOfStake2ConsensusCutoverBlockHeight-1 - tipHeight := srv.blockchain.blockTip().Height - if tipHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1 { + tipHeight := uint64(srv.blockchain.blockTip().Height) + if tipHeight < srv.blockchain.GetFinalPoWBlockHeight() { return } @@ -2182,8 +2184,8 @@ func (srv *Server) ProcessSingleTxnWithChainLock(pp *Peer, txn *MsgDeSoTxn) ([]* // txn validity checks to signal whether the txn has been added or not. The PoW // mempool has stricter txn validity checks than the PoS mempool, so this works // out conveniently, as it allows us to always add a txn to the PoS mempool. - blockHeight := srv.blockchain.blockTip().Height - if blockHeight < srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + tipHeight := uint64(srv.blockchain.blockTip().Height) + if srv.blockchain.IsPoWBlockHeight(tipHeight) { _, err := srv.mempool.ProcessTransaction( txn, true, /*allowUnconnectedTxn*/ From 7cd044e152fa43e560c4c938ac6717735758a0d2 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 24 Jan 2024 18:33:54 -0500 Subject: [PATCH 387/762] Use BIP39 Seed Phrase For PoSValidatorSeed (#946) * Use Seed Phrase For PoSValidatorSeed * Create FromSeed function for BLS library * Clean up command line flag description * Better error message --- bls/signature.go | 12 ++++++++++++ bls/signature_no_relic.go | 4 ++++ cmd/config.go | 6 +++--- cmd/node.go | 4 ++-- cmd/run.go | 6 ++---- lib/bls_keystore.go | 15 +++++++++++---- lib/bls_keystore_test.go | 38 ++++++++++++++++++++++++++++++++++++++ 7 files changed, 72 insertions(+), 13 deletions(-) create mode 100644 lib/bls_keystore_test.go diff --git a/bls/signature.go b/bls/signature.go index a86a934f2..673f24307 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -115,6 +115,18 @@ func (privateKey *PrivateKey) ToString() string { return privateKey.flowPrivateKey.String() } +func (privateKey *PrivateKey) FromSeed(seed []byte) (*PrivateKey, error) { + var err error + + if privateKey == nil { + return nil, nil + } + + // Generate a new private key from the seed. + privateKey.flowPrivateKey, err = flowCrypto.GeneratePrivateKey(signingAlgorithm, seed) + return privateKey, err +} + func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { if privateKey == nil || privateKeyString == "" { return nil, nil diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index 288714278..b3d472739 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -38,6 +38,10 @@ func (privateKey *PrivateKey) ToString() string { panic(BLSNoRelicError) } +func (privateKey *PrivateKey) FromSeed(seed []byte) (*PrivateKey, error) { + panic(BLSNoRelicError) +} + func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { panic(BLSNoRelicError) } diff --git a/cmd/config.go b/cmd/config.go index d1807dd62..fac31b6d8 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -18,7 +18,7 @@ type Config struct { TXIndex bool Regtest bool PostgresURI string - PosValidatorSeedHex string + PosValidatorSeed string // Peers ConnectIPs []string @@ -105,7 +105,7 @@ func LoadConfig() *Config { config.TXIndex = viper.GetBool("txindex") config.Regtest = viper.GetBool("regtest") config.PostgresURI = viper.GetString("postgres-uri") - config.PosValidatorSeedHex = viper.GetString("pos-validator-seed-hex") + config.PosValidatorSeed = viper.GetString("pos-validator-seed") config.HyperSync = viper.GetBool("hypersync") config.ForceChecksum = viper.GetBool("force-checksum") config.SyncType = lib.NodeSyncType(viper.GetString("sync-type")) @@ -179,7 +179,7 @@ func (config *Config) Print() { glog.Infof("Postgres URI: %s", config.PostgresURI) } - if config.PosValidatorSeedHex != "" { + if config.PosValidatorSeed != "" { glog.Infof(lib.CLog(lib.Blue, "PoS Validator: ON")) } diff --git a/cmd/node.go b/cmd/node.go index 219cbe843..f6f612a56 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -203,8 +203,8 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { eventManager := lib.NewEventManager() var blsKeystore *lib.BLSKeystore - if node.Config.PosValidatorSeedHex != "" { - blsKeystore, err = lib.NewBLSKeystore(node.Config.PosValidatorSeedHex) + if node.Config.PosValidatorSeed != "" { + blsKeystore, err = lib.NewBLSKeystore(node.Config.PosValidatorSeed) if err != nil { panic(err) } diff --git a/cmd/run.go b/cmd/run.go index 6729c375b..a9b8a5dcb 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -58,10 +58,8 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().String("postgres-uri", "", "BETA: Use Postgres as the backing store for chain data."+ "When enabled, most data is stored in postgres although badger is still currently used for some state. Run your "+ "Postgres instance on the same machine as your node for optimal performance.") - cmd.PersistentFlags().String("pos-validator-seed-hex", "", "The private key of the Proof of Stake validator. "+ - "The private key should be passed as hex, optionally prefixed with a '0x', and map to a valid BLS12_381 private key. "+ - "The private key must be 32 bytes, or 64 characters, in length (excluding the '0x' prefix). Setting this flag automatically "+ - "makes the node run as a Proof of Stake Validator.") + cmd.PersistentFlags().String("pos-validator-seed", "", "A BIP39 seed phrase used to generate the private key of the "+ + "Proof of Stake validator. Setting this flag automatically makes the node run as a Proof of Stake Validator.") cmd.PersistentFlags().Uint32("max-sync-block-height", 0, "Max sync block height") // Hyper Sync diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go index b1356da6c..92d8fc8e8 100644 --- a/lib/bls_keystore.go +++ b/lib/bls_keystore.go @@ -4,6 +4,7 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/consensus" "github.com/pkg/errors" + "github.com/tyler-smith/go-bip39" ) // BLSSigner is a wrapper for the bls.PrivateKey type, which abstracts away the private key @@ -45,13 +46,19 @@ type BLSKeystore struct { signer *BLSSigner } -func NewBLSKeystore(seed string) (*BLSKeystore, error) { +func NewBLSKeystore(seedPhrase string) (*BLSKeystore, error) { + seedBytes, err := bip39.NewSeedWithErrorChecking(seedPhrase, "") + if err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating seed bytes from seed phrase") + } + privateKey, err := bls.NewPrivateKey() if err != nil { - return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed: %s", seed) + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed phrase") } - if _, err = privateKey.FromString(seed); err != nil { - return nil, errors.Wrapf(err, "NewBLSKeystore: Problem retrieving private key from seed: %s", seed) + + if _, err = privateKey.FromSeed(seedBytes); err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed phrase") } signer, err := NewBLSSigner(privateKey) diff --git a/lib/bls_keystore_test.go b/lib/bls_keystore_test.go new file mode 100644 index 000000000..a96389389 --- /dev/null +++ b/lib/bls_keystore_test.go @@ -0,0 +1,38 @@ +//go:build relic + +package lib + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewBLSKeystore(t *testing.T) { + // Test empty string + { + _, err := NewBLSKeystore("") + require.Error(t, err) + } + + // Test invalid seed phrase + { + _, err := NewBLSKeystore("invalid seed phrase") + require.Error(t, err) + } + + // Test valid 12 word seed phrase + { + keystore, err := NewBLSKeystore("suit three minute series empty virtual snake safe joke gold pear emerge") + require.NoError(t, err) + require.Equal(t, keystore.GetSigner().privateKey.ToString(), "0x2000bd5d14801e3a96f27a25ae4ebd26ec08a67c207b04c21703b40d80b8de71") + } + + // Test valid 24 word seed phrase + { + keystore, err := NewBLSKeystore("vapor educate wood post fiber proof cannon chunk luggage hedgehog merit dove network lemon scorpion job law more salt market excuse auction refuse apart") + require.NoError(t, err) + require.NoError(t, err) + require.Equal(t, keystore.GetSigner().privateKey.ToString(), "0x13b5febb384a3d3dec5c579724872607cd0ddb97adef592efaf144f6d25a70d7") + } +} From 78e8270ab1f996f2f3b089266c7ecaa53dc6cb6b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 25 Jan 2024 10:33:08 -0500 Subject: [PATCH 388/762] Fix BlockNode Committed Check (#947) --- lib/pos_blockchain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 65792d7d1..290176658 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1776,7 +1776,7 @@ func (bc *Blockchain) GetFinalCommittedPoWBlock() (*BlockNode, error) { // Fetch the block node with the committed status for _, blockNode := range blockNodes { - if blockNode.Status == StatusBlockCommitted { + if blockNode.IsCommitted() { return blockNode, nil } } From 8584aa15d16ad5360cc931ce1cc137e6736c1ee0 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 25 Jan 2024 12:32:31 -0500 Subject: [PATCH 389/762] Initialize Events Channel on FastHotStuffEventLoopConstructor (#948) * Initialize Events Channel on FastHotStuffEventLoopConstructor * Fix merge conf --- consensus/event_loop.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 892bf56f0..3eba47067 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -17,6 +17,7 @@ func NewFastHotStuffEventLoop() *fastHotStuffEventLoop { status: eventLoopStatusNotInitialized, crankTimerTask: NewScheduledTask[uint64](), nextTimeoutTask: NewScheduledTask[uint64](), + Events: make(chan *FastHotStuffEvent, signalChannelBufferSize), } } @@ -80,9 +81,6 @@ func (fc *fastHotStuffEventLoop) Init( fc.votesSeenByBlockHash = make(map[BlockHashValue]map[string]VoteMessage) fc.timeoutsSeenByView = make(map[uint64]map[string]TimeoutMessage) - // Reset the external channel used for signaling - fc.Events = make(chan *FastHotStuffEvent, signalChannelBufferSize) - // Set the crank timer interval and timeout base duration fc.crankTimerInterval = crankTimerInterval fc.timeoutBaseDuration = timeoutBaseDuration From e99d5ed47595d48b0e1a59851aca2df487918135 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 25 Jan 2024 12:42:07 -0500 Subject: [PATCH 390/762] Pass PoS Mempool Init Params From Command Line Flags (#949) * Pass PoS Mempool Init Params From Command Line Flags * Add configs for past blocks * Address Nina's feedback --- cmd/config.go | 16 ++++++++++++---- cmd/node.go | 4 ++++ cmd/run.go | 10 ++++++++-- lib/server.go | 22 +++++++++++++++------- 4 files changed, 39 insertions(+), 13 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index fac31b6d8..2839868f8 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -18,7 +18,6 @@ type Config struct { TXIndex bool Regtest bool PostgresURI string - PosValidatorSeed string // Peers ConnectIPs []string @@ -44,9 +43,14 @@ type Config struct { DisableEncoderMigrations bool HypersyncMaxQueueSize uint32 + // PoS Validator + PosValidatorSeed string + // Mempool - MempoolBackupIntervalMillis uint64 - MaxMempoolPosSizeBytes uint64 + MempoolBackupIntervalMillis uint64 + MaxMempoolPosSizeBytes uint64 + MempoolFeeEstimatorNumMempoolBlocks uint64 + MempoolFeeEstimatorNumPastBlocks uint64 // Mining MinerPublicKeys []string @@ -105,7 +109,6 @@ func LoadConfig() *Config { config.TXIndex = viper.GetBool("txindex") config.Regtest = viper.GetBool("regtest") config.PostgresURI = viper.GetString("postgres-uri") - config.PosValidatorSeed = viper.GetString("pos-validator-seed") config.HyperSync = viper.GetBool("hypersync") config.ForceChecksum = viper.GetBool("force-checksum") config.SyncType = lib.NodeSyncType(viper.GetString("sync-type")) @@ -114,9 +117,14 @@ func LoadConfig() *Config { config.DisableEncoderMigrations = viper.GetBool("disable-encoder-migrations") config.HypersyncMaxQueueSize = viper.GetUint32("hypersync-max-queue-size") + // PoS Validator + config.PosValidatorSeed = viper.GetString("pos-validator-seed") + // Mempool config.MempoolBackupIntervalMillis = viper.GetUint64("mempool-backup-time-millis") config.MaxMempoolPosSizeBytes = viper.GetUint64("max-mempool-pos-size-bytes") + config.MempoolFeeEstimatorNumMempoolBlocks = viper.GetUint64("mempool-fee-estimator-num-mempool-blocks") + config.MempoolFeeEstimatorNumPastBlocks = viper.GetUint64("mempool-fee-estimator-num-past-blocks") // Peers config.ConnectIPs = viper.GetStringSlice("connect-ips") diff --git a/cmd/node.go b/cmd/node.go index f6f612a56..c075b0625 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -253,6 +253,10 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.StateChangeDir, node.Config.HypersyncMaxQueueSize, blsKeystore, + node.Config.MaxMempoolPosSizeBytes, + node.Config.MempoolBackupIntervalMillis, + node.Config.MempoolFeeEstimatorNumMempoolBlocks, + node.Config.MempoolFeeEstimatorNumPastBlocks, ) if err != nil { // shouldRestart can be true if, on the previous run, we did not finish flushing all ancestral diff --git a/cmd/run.go b/cmd/run.go index a9b8a5dcb..abf1c5d4d 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -58,8 +58,6 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().String("postgres-uri", "", "BETA: Use Postgres as the backing store for chain data."+ "When enabled, most data is stored in postgres although badger is still currently used for some state. Run your "+ "Postgres instance on the same machine as your node for optimal performance.") - cmd.PersistentFlags().String("pos-validator-seed", "", "A BIP39 seed phrase used to generate the private key of the "+ - "Proof of Stake validator. Setting this flag automatically makes the node run as a Proof of Stake Validator.") cmd.PersistentFlags().Uint32("max-sync-block-height", 0, "Max sync block height") // Hyper Sync @@ -84,12 +82,20 @@ func SetupRunFlags(cmd *cobra.Command) { - hypersync: Will sync by downloading historical state, and will NOT download historical blocks. Can only be set if HyperSync is true.`) + // PoS Validator + cmd.PersistentFlags().String("pos-validator-seed", "", "A BIP39 seed phrase used to generate the private key of the "+ + "Proof of Stake validator. Setting this flag automatically makes the node run as a Proof of Stake Validator.") + // Mempool cmd.PersistentFlags().Uint64("mempool-backup-time-millis", 30000, "The frequency in milliseconds with which the mempool will persist its state to disk. "+ "The default value is 30 seconds, or 30,000 milliseconds.") cmd.PersistentFlags().Uint64("max-mempool-pos-size-bytes", 3000000000, "The maximum size of the PoS mempool in bytes. The default value is 3GB.") + cmd.PersistentFlags().Uint64("mempool-fee-estimator-num-mempool-blocks", 1, + "The number of future blocks to break the PoS mempool into when estimating txn fee for the next block.") + cmd.PersistentFlags().Uint64("mempool-fee-estimator-num-past-blocks", 50, + "The number of past blocks to use when estimating txn fee for the next block from the PoS mempool.") // Peers cmd.PersistentFlags().StringSlice("connect-ips", []string{}, diff --git a/lib/server.go b/lib/server.go index 3ba60f686..820f5a067 100644 --- a/lib/server.go +++ b/lib/server.go @@ -386,8 +386,16 @@ func NewServer( _forceChecksum bool, _stateChangeDir string, _hypersyncMaxQueueSize uint32, - _blsKeystore *BLSKeystore) ( - _srv *Server, _err error, _shouldRestart bool) { + _blsKeystore *BLSKeystore, + _maxMempoolPosSizeBytes uint64, + _mempoolBackupIntervalMillis uint64, + _mempoolFeeEstimatorNumMempoolBlocks uint64, + _mempoolFeeEstimatorNumPastBlocks uint64, +) ( + _srv *Server, + _err error, + _shouldRestart bool, +) { var err error @@ -486,7 +494,6 @@ func NewServer( _mempool := NewDeSoMempool(_chain, _rateLimitFeerateNanosPerKB, _minFeeRateNanosPerKB, _blockCypherAPIKey, _runReadOnlyUtxoViewUpdater, _dataDir, _mempoolDumpDir, false) - _posMempool := NewPosMempool() // Initialize the PoS mempool. We need to initialize a best-effort UtxoView based on the current // known state of the chain. This will all be overwritten as we process blocks later on. @@ -500,6 +507,7 @@ func NewServer( if latestBlock == nil { return nil, errors.New("NewServer: Problem getting latest block from chain"), true } + _posMempool := NewPosMempool() err = _posMempool.Init( _params, currentGlobalParamsEntry, @@ -507,11 +515,11 @@ func NewServer( uint64(_chain.blockTip().Height), _mempoolDumpDir, false, - 1024*1024*1024*3, // Max mempool Size = 3GB; TODO make this a flag - 60*1000, // Mempool dumper frequency = 60 seconds; TODO make this a flag - 1, // Fee estimator mempool blocks; TODO make this a flag + _maxMempoolPosSizeBytes, + _mempoolBackupIntervalMillis, + _mempoolFeeEstimatorNumMempoolBlocks, []*MsgDeSoBlock{latestBlock}, - 1, // Fee estimator past blocks; TODO make this a flag + _mempoolFeeEstimatorNumPastBlocks, ) if err != nil { return nil, errors.Wrapf(err, "NewServer: Problem initializing PoS mempool"), true From d049d2774e73e4e242a313e6ad5b99dbf502499f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 25 Jan 2024 12:47:55 -0500 Subject: [PATCH 391/762] Implement DeSoMempool.GetTransactions() Interface Function (#950) --- lib/legacy_mempool.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index bd28e273e..24deed649 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -22,6 +22,7 @@ import ( "github.com/dgraph-io/badger/v3" "github.com/btcsuite/btcd/btcec" + "github.com/deso-protocol/core/collections" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" "github.com/pkg/errors" @@ -257,8 +258,11 @@ func (mp *DeSoMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { } func (mp *DeSoMempool) GetTransactions() []*MempoolTransaction { - //TODO implement me - panic("implement me") + return collections.Transform( + mp.GetOrderedTransactions(), func(mempoolTx *MempoolTx) *MempoolTransaction { + return NewMempoolTransaction(mempoolTx.Tx, uint64(mempoolTx.Added.UnixMicro())) + }, + ) } func (mp *DeSoMempool) GetIterator() MempoolIterator { From a86f387e0983183183378c7582050de3315e5216 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 26 Jan 2024 11:45:10 -0500 Subject: [PATCH 392/762] Support Edge Case Where HighQC After PoS Cutover Uses Real Validator Set (#951) --- lib/pos_blockchain.go | 38 +++++++++++++++++++++++++++++++++++--- lib/pos_consensus.go | 27 ++------------------------- 2 files changed, 37 insertions(+), 28 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 290176658..b5716fe65 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1135,11 +1135,20 @@ func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validator // If the block is the first block after the PoS cutover and has a timeout aggregate QC, then the // highQC must be a synthetic QC. We need to override the validator set used to validate the high QC. if block.Header.Height == bc.GetFirstPoSBlockHeight() && !timeoutAggregateQC.isEmpty() { - posCutoverValidator, err := BuildProofOfStakeCutoverValidator() + genesisQC, err := bc.GetProofOfStakeGenesisQuorumCertificate() if err != nil { - return errors.Wrapf(err, "isValidPoSQuorumCertificate: Problem building PoS cutover validator") + return errors.Wrapf(err, "isValidPoSQuorumCertificate: Problem getting PoS genesis QC") + } + + // Only override the validator set if the high QC is the genesis QC. Otherwise, we should use the + // true validator set at the current epoch. + if consensus.IsEqualQC(genesisQC, timeoutAggregateQC.GetHighQC()) { + posCutoverValidator, err := BuildProofOfStakeCutoverValidator() + if err != nil { + return errors.Wrapf(err, "isValidPoSQuorumCertificate: Problem building PoS cutover validator") + } + voteQCValidators = []consensus.Validator{posCutoverValidator} } - voteQCValidators = []consensus.Validator{posCutoverValidator} } // Validate the timeout aggregate QC. @@ -1767,6 +1776,29 @@ func (bc *Blockchain) getMaxSequentialBlockHeightAfter(startingHeight uint64) ui return maxSequentialHeightWithBlocks } +func (bc *Blockchain) GetProofOfStakeGenesisQuorumCertificate() (*QuorumCertificate, error) { + finalPoWBlock, err := bc.GetFinalCommittedPoWBlock() + if err != nil { + return nil, err + } + + aggregatedSignature, signersList, err := BuildQuorumCertificateAsProofOfStakeCutoverValidator(finalPoWBlock.Header.Height, finalPoWBlock.Hash) + if err != nil { + return nil, err + } + + qc := &QuorumCertificate{ + BlockHash: finalPoWBlock.Hash, + ProposedInView: finalPoWBlock.Header.Height, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: aggregatedSignature, + SignersList: signersList, + }, + } + + return qc, nil +} + func (bc *Blockchain) GetFinalCommittedPoWBlock() (*BlockNode, error) { // Fetch the block node for the cutover block blockNodes, blockNodesExist := bc.blockIndexByHeight[bc.GetFinalPoWBlockHeight()] diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 499e40a50..57ac26cba 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -57,12 +57,7 @@ func (cc *FastHotStuffConsensus) Start() error { ) } - finalPoWBlock, err := cc.blockchain.GetFinalCommittedPoWBlock() - if err != nil { - return errors.Errorf("FastHotStuffConsensus.Start: Error fetching final PoW block: %v", err) - } - - genesisQC, err := cc.createGenesisQC(finalPoWBlock.Hash, uint64(finalPoWBlock.Height)) + genesisQC, err := cc.blockchain.GetProofOfStakeGenesisQuorumCertificate() if err != nil { return errors.Errorf("FastHotStuffConsensus.Start: Error creating PoS cutover genesis QC: %v", err) } @@ -422,7 +417,7 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo if cc.blockchain.IsFinalPoWBlockHeight(tipBlockNode.Header.Height) { // If the tip block is the final block of the PoW chain, then we can use the PoS chain's genesis block // as the highQC for it. - if timeoutMsg.HighQC, err = cc.createGenesisQC(tipBlockNode.Hash, tipBlockNode.Header.Height); err != nil { + if timeoutMsg.HighQC, err = cc.blockchain.GetProofOfStakeGenesisQuorumCertificate(); err != nil { return errors.Errorf("FastHotStuffConsensus.Start: Error creating PoS cutover genesis QC: %v", err) } } else { @@ -744,24 +739,6 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView) (*PosBlockPr return NewPosBlockProducer(fc.mempool, fc.params, blockProducerPublicKey, blockProducerBlsPublicKey), nil } -func (fc *FastHotStuffConsensus) createGenesisQC(blockHash *BlockHash, view uint64) (*QuorumCertificate, error) { - aggregatedSignature, signersList, err := BuildQuorumCertificateAsProofOfStakeCutoverValidator(view, blockHash) - if err != nil { - return nil, err - } - - qc := &QuorumCertificate{ - BlockHash: blockHash, - ProposedInView: view, - ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ - Signature: aggregatedSignature, - SignersList: signersList, - }, - } - - return qc, nil -} - // Finds the epoch entry for the block and returns the epoch number. func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { for _, epochEntry := range epochEntries { From 1c0eefec15ec5a7eed9bc61fef48386480c76395 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 26 Jan 2024 15:49:02 -0500 Subject: [PATCH 393/762] Fix IsProperlyFormedConstructTimeoutQCEvent Check (#952) --- consensus/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/utils.go b/consensus/utils.go index 173af3e10..d0c57253c 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -28,7 +28,7 @@ func IsProperlyFormedConstructTimeoutQCEvent(event *FastHotStuffEvent) bool { event.View > 0 && // The view the block was proposed in is non-zero event.TipBlockHeight > 0 && // Tip block height is non-zero !isInterfaceNil(event.TipBlockHash) && // Tip block hash is non-nil - !isProperlyFormedAggregateQC(event.AggregateQC) // The high QC is non-nil + isProperlyFormedAggregateQC(event.AggregateQC) // The high QC is properly formed } func IsProperlyFormedVoteEvent(event *FastHotStuffEvent) bool { From 4083d42616f2d638337bac6d7a7cba2cdf1cbda3 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 26 Jan 2024 15:58:28 -0500 Subject: [PATCH 394/762] Cleanup Minor Bugs in FastHotStuffConsensus (#953) --- lib/pos_consensus.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 57ac26cba..3c8d17966 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -23,6 +23,7 @@ func NewFastHotStuffConsensus(params *DeSoParams, blockchain *Blockchain, mempoo blockchain: blockchain, fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), mempool: mempool, + params: params, signer: signer, } } @@ -584,12 +585,6 @@ func (cc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( event *consensus.FastHotStuffEvent, proposerRandomSeedSignature *bls.Signature, ) (*MsgDeSoBlock, error) { - // We need to hold a lock on the blockchain to make sure that it is not mutated underneath as we are trying - // to construct a block based on the UtxoView. In practice, this lock ends up being a no-op but it guarantees - // thread-safety by making no assumptions about how other parts of the codebase operate outside of this struct. - cc.blockchain.ChainLock.RLock() - defer cc.blockchain.ChainLock.RUnlock() - // Get the parent block's hash parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) From c00cee05bb34bd2bf2cccf47ad510750eb683ac7 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 26 Jan 2024 16:27:12 -0500 Subject: [PATCH 395/762] Set BlockReward.PublicKey and TxnConnectStatusByIndexHash in PosBlockProducer (#956) --- lib/pos_block_producer.go | 27 +++++++++++++++++---------- lib/pos_block_producer_test.go | 4 +++- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 090542047..62d2dd0f7 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -1,11 +1,12 @@ package lib import ( + "math" + "time" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" "github.com/pkg/errors" - "math" - "time" ) // BlockTemplate is a dummy type that is used to label incomplete blocks. The only purpose of this type is to make it @@ -34,11 +35,6 @@ func NewPosBlockProducer(mp Mempool, params *DeSoParams, proposerPublicKey *Publ } } -func (pbp *PosBlockProducer) SignBlock(blockTemplate BlockTemplate, signerPrivateKey *bls.PrivateKey) (*MsgDeSoBlock, error) { - // TODO - return nil, nil -} - // CreateUnsignedBlock constructs an unsigned, PoS block with Fee-Time ordered transactions. This function should be used // during happy path in consensus when a vote QC has been assembled. The block is unsigned, so to indicate its incompleteness, // the block is returned as a BlockTemplate. @@ -103,6 +99,9 @@ func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newB block.Header.ProposerPublicKey = pbp.proposerPublicKey block.Header.ProposerVotingPublicKey = pbp.proposerVotingPublicKey block.Header.ProposerRandomSeedSignature = proposerRandomSeedSignature + + // Hash the TxnConnectStatusByIndex + block.Header.TxnConnectStatusByIndexHash = HashBitset(block.TxnConnectStatusByIndex) return block, nil } @@ -116,6 +115,7 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( blockRewardTxn := NewMessage(MsgTypeTxn).(*MsgDeSoTxn) blockRewardOutput := &DeSoOutput{} blockRewardOutput.AmountNanos = math.MaxUint64 + blockRewardOutput.PublicKey = pbp.proposerPublicKey.ToBytes() blockRewardTxn.TxOutputs = append(blockRewardTxn.TxOutputs, blockRewardOutput) blockRewardTxn.TxnMeta = &BlockRewardMetadataa{} blockRewardTxnSizeBytes, err := blockRewardTxn.ToBytes(true) @@ -142,9 +142,16 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( // getBlockTransactions is used to retrieve fee-time ordered transactions from the mempool. func (pbp *PosBlockProducer) getBlockTransactions( - latestBlockView *UtxoView, newBlockHeight uint64, newBlockTimestampNanoSecs uint64, - maxBlockSizeBytes uint64) (_txns []*MsgDeSoTxn, _txnConnectStatusByIndex *bitset.Bitset, - _maxUtilityFee uint64, _err error) { + latestBlockView *UtxoView, + newBlockHeight uint64, + newBlockTimestampNanoSecs uint64, + maxBlockSizeBytes uint64, +) ( + _txns []*MsgDeSoTxn, + _txnConnectStatusByIndex *bitset.Bitset, + _maxUtilityFee uint64, + _err error, +) { // Get Fee-Time ordered transactions from the mempool feeTimeTxns := pbp.mp.GetTransactions() diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 70c7e596d..3148629f2 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -76,6 +76,7 @@ func TestCreateBlockTemplate(t *testing.T) { require.Equal(blockTemplate.Header.ProposerPublicKey, m0Pk) require.Equal(blockTemplate.Header.ProposerVotingPublicKey, pub) require.True(blockTemplate.Header.ProposerRandomSeedSignature.Eq(seedSignature)) + require.Equal(blockTemplate.Header.TxnConnectStatusByIndexHash, HashBitset(blockTemplate.TxnConnectStatusByIndex)) } func TestCreateBlockWithoutHeader(t *testing.T) { @@ -87,6 +88,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { feeMax := uint64(2000) passingTransactions := 50 m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + blsPubKey, _ := _generateValidatorVotingPublicKeyAndSignature(t) params, db := _posTestBlockchainSetupWithBalances(t, 200000, 200000) params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 maxMempoolPosSizeBytes := uint64(3000000000) @@ -115,7 +117,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { _wrappedPosMempoolAddTransaction(t, mempool, txn) } - pbp := NewPosBlockProducer(mempool, params, nil, nil) + pbp := NewPosBlockProducer(mempool, params, NewPublicKey(m0PubBytes), blsPubKey) txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions( latestBlockView, 3, 0, 50000) require.NoError(err) From ef54af284382699aa983734be8902e63a9ceae3c Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Fri, 26 Jan 2024 16:36:24 -0500 Subject: [PATCH 396/762] Fix typo --- lib/pos_blockchain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index b5716fe65..6cffcaff2 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1813,7 +1813,7 @@ func (bc *Blockchain) GetFinalCommittedPoWBlock() (*BlockNode, error) { } } - return nil, errors.Errorf("Error fetching committed cutover block node before height %d", bc.GetFinalPoWBlockHeight()) + return nil, errors.Errorf("Error fetching committed cutover block node with height %d", bc.GetFinalPoWBlockHeight()) } func (bc *Blockchain) IsPoWBlockHeight(blockHeight uint64) bool { From 21ba1dd12f8feb401c216424a4f74d6f9514e9ca Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Sun, 28 Jan 2024 22:40:13 -0500 Subject: [PATCH 397/762] Add validator deduplication test (#942) --- .../connection_controller_routines_test.go | 193 ++++++++++++++++++ .../connection_controller_test.go | 12 +- .../connection_controller_utils_test.go | 52 +++-- integration_testing/tools.go | 2 +- lib/connection_controller.go | 125 +++++++++++- lib/connection_manager.go | 26 ++- lib/handshake_controller.go | 2 +- lib/network_connection.go | 8 +- lib/remote_node.go | 4 + lib/remote_node_manager.go | 22 +- lib/server.go | 9 +- 11 files changed, 395 insertions(+), 60 deletions(-) create mode 100644 integration_testing/connection_controller_routines_test.go diff --git a/integration_testing/connection_controller_routines_test.go b/integration_testing/connection_controller_routines_test.go new file mode 100644 index 000000000..582031bd8 --- /dev/null +++ b/integration_testing/connection_controller_routines_test.go @@ -0,0 +1,193 @@ +package integration_testing + +import ( + "fmt" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/cmd" + "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/lib" + "github.com/stretchr/testify/require" + "testing" +) + +func TestConnectionControllerInitiatePersistentConnections(t *testing.T) { + require := require.New(t) + t.Cleanup(func() { + setGetActiveValidatorImpl(lib.BasicGetActiveValidators) + }) + + // NonValidator Node1 will set its --connect-ips to two non-validators node2 and node3, + // and two validators node4 and node5. + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + blsPriv4, err := bls.NewPrivateKey() + require.NoError(err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + blsPriv5, err := bls.NewPrivateKey() + require.NoError(err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsPriv5) + + node2 = startNode(t, node2) + node3 = startNode(t, node3) + node4 = startNode(t, node4) + node5 = startNode(t, node5) + + setGetActiveValidatorImplWithValidatorNodes(t, node4, node5) + + node1.Config.ConnectIPs = []string{ + node2.Listeners[0].Addr().String(), + node3.Listeners[0].Addr().String(), + node4.Listeners[0].Addr().String(), + node5.Listeners[0].Addr().String(), + } + node1 = startNode(t, node1) + waitForNonValidatorOutboundConnection(t, node1, node2) + waitForNonValidatorOutboundConnection(t, node1, node3) + waitForValidatorConnection(t, node1, node4) + waitForValidatorConnection(t, node1, node5) + waitForValidatorConnection(t, node4, node5) + waitForCountRemoteNodeIndexer(t, node1, 4, 2, 2, 0) + waitForCountRemoteNodeIndexer(t, node2, 1, 0, 0, 1) + waitForCountRemoteNodeIndexer(t, node3, 1, 0, 0, 1) + waitForCountRemoteNodeIndexer(t, node4, 2, 1, 0, 1) + waitForCountRemoteNodeIndexer(t, node5, 2, 1, 0, 1) + node1.Stop() + t.Logf("Test #1 passed | Successfully run non-validator node1 with --connect-ips set to node2, node3, node4, node5") + + // Now try again with a validator node6, with connect-ips set to node2, node3, node4, node5. + blsPriv6, err := bls.NewPrivateKey() + require.NoError(err) + node6 := spawnValidatorNodeProtocol2(t, 18005, "node6", blsPriv6) + node6.Config.ConnectIPs = []string{ + node2.Listeners[0].Addr().String(), + node3.Listeners[0].Addr().String(), + node4.Listeners[0].Addr().String(), + node5.Listeners[0].Addr().String(), + } + node6 = startNode(t, node6) + setGetActiveValidatorImplWithValidatorNodes(t, node4, node5, node6) + waitForNonValidatorOutboundConnection(t, node6, node2) + waitForNonValidatorOutboundConnection(t, node6, node3) + waitForValidatorConnection(t, node6, node4) + waitForValidatorConnection(t, node6, node5) + waitForValidatorConnection(t, node4, node5) + waitForCountRemoteNodeIndexer(t, node6, 4, 2, 2, 0) + waitForCountRemoteNodeIndexer(t, node2, 1, 1, 0, 0) + waitForCountRemoteNodeIndexer(t, node3, 1, 1, 0, 0) + waitForCountRemoteNodeIndexer(t, node4, 2, 2, 0, 0) + waitForCountRemoteNodeIndexer(t, node5, 2, 2, 0, 0) + node2.Stop() + node3.Stop() + node4.Stop() + node5.Stop() + node6.Stop() + t.Logf("Test #2 passed | Successfully run validator node6 with --connect-ips set to node2, node3, node4, node5") +} + +func TestConnectionControllerNonValidatorCircularConnectIps(t *testing.T) { + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + + node1.Config.ConnectIPs = []string{"127.0.0.1:18001"} + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + + node1 = startNode(t, node1) + node2 = startNode(t, node2) + defer node1.Stop() + defer node2.Stop() + + waitForCountRemoteNodeIndexer(t, node1, 2, 0, 1, 1) + waitForCountRemoteNodeIndexer(t, node2, 2, 0, 1, 1) +} + +func setGetActiveValidatorImplWithValidatorNodes(t *testing.T, validators ...*cmd.Node) { + require := require.New(t) + + mapping := collections.NewConcurrentMap[bls.SerializedPublicKey, *lib.ValidatorEntry]() + for _, validator := range validators { + seed := validator.Config.PosValidatorSeed + if seed == "" { + t.Fatalf("Validator node %s does not have a PosValidatorSeed set", validator.Params.UserAgent) + } + keystore, err := lib.NewBLSKeystore(seed) + require.NoError(err) + mapping.Set(keystore.GetSigner().GetPublicKey().Serialize(), createSimpleValidatorEntry(validator)) + } + setGetActiveValidatorImpl(func() *collections.ConcurrentMap[bls.SerializedPublicKey, *lib.ValidatorEntry] { + return mapping + }) +} + +func setGetActiveValidatorImpl(mapping func() *collections.ConcurrentMap[bls.SerializedPublicKey, *lib.ValidatorEntry]) { + lib.GetActiveValidatorImpl = mapping +} + +func createSimpleValidatorEntry(node *cmd.Node) *lib.ValidatorEntry { + return &lib.ValidatorEntry{ + Domains: [][]byte{[]byte(node.Listeners[0].Addr().String())}, + } +} + +func waitForValidatorFullGraph(t *testing.T, validators ...*cmd.Node) { + for ii := 0; ii < len(validators); ii++ { + waitForValidatorConnectionOneWay(t, validators[ii], validators[ii+1:]...) + } +} + +func waitForValidatorConnectionOneWay(t *testing.T, n *cmd.Node, validators ...*cmd.Node) { + if len(validators) == 0 { + return + } + for _, validator := range validators { + waitForValidatorConnection(t, n, validator) + } +} + +func waitForNonValidatorInboundXOROutboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { + userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + conditionInbound := conditionNonValidatorInboundConnectionDynamic(t, node1, node2, true) + conditionOutbound := conditionNonValidatorOutboundConnectionDynamic(t, node1, node2, true) + xorCondition := func() bool { + return conditionInbound() != conditionOutbound() + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to inbound XOR outbound non-validator Node (%s)", + userAgentN1, userAgentN2), xorCondition) +} + +func waitForMinNonValidatorCountRemoteNodeIndexer(t *testing.T, node *cmd.Node, allCount int, validatorCount int, + minNonValidatorOutboundCount int, minNonValidatorInboundCount int) { + + userAgent := node.Params.UserAgent + rnManager := node.Server.GetConnectionController().GetRemoteNodeManager() + condition := func() bool { + return checkRemoteNodeIndexerMinNonValidatorCount(rnManager, allCount, validatorCount, + minNonValidatorOutboundCount, minNonValidatorInboundCount) + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have at least %d non-validator outbound nodes and %d non-validator inbound nodes", + userAgent, minNonValidatorOutboundCount, minNonValidatorInboundCount), condition) +} + +func checkRemoteNodeIndexerMinNonValidatorCount(manager *lib.RemoteNodeManager, allCount int, validatorCount int, + minNonValidatorOutboundCount int, minNonValidatorInboundCount int) bool { + + if allCount != manager.GetAllRemoteNodes().Count() { + return false + } + if validatorCount != manager.GetValidatorIndex().Count() { + return false + } + if minNonValidatorOutboundCount > manager.GetNonValidatorOutboundIndex().Count() { + return false + } + if minNonValidatorInboundCount > manager.GetNonValidatorInboundIndex().Count() { + return false + } + if allCount != manager.GetValidatorIndex().Count()+ + manager.GetNonValidatorOutboundIndex().Count()+ + manager.GetNonValidatorInboundIndex().Count() { + return false + } + return true +} diff --git a/integration_testing/connection_controller_test.go b/integration_testing/connection_controller_test.go index 01fb01046..58f4be33b 100644 --- a/integration_testing/connection_controller_test.go +++ b/integration_testing/connection_controller_test.go @@ -396,7 +396,8 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { // Create a persistent connection from Node1 to Node2 cc := node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorPersistentOutboundConnection(node2.Listeners[0].Addr().String())) + _, err = cc.CreateNonValidatorPersistentOutboundConnection(node2.Listeners[0].Addr().String()) + require.NoError(err) waitForValidatorConnection(t, node1, node2) waitForNonValidatorInboundConnection(t, node2, node1) node2.Stop() @@ -408,7 +409,8 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { node3 = startNode(t, node3) // Create a persistent connection from Node1 to Node3 - require.NoError(cc.CreateNonValidatorPersistentOutboundConnection(node3.Listeners[0].Addr().String())) + _, err = cc.CreateNonValidatorPersistentOutboundConnection(node3.Listeners[0].Addr().String()) + require.NoError(err) waitForNonValidatorOutboundConnection(t, node1, node3) waitForNonValidatorInboundConnection(t, node3, node1) node3.Stop() @@ -429,7 +431,8 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { // Create a persistent connection from Node4 to Node5 cc = node4.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorPersistentOutboundConnection(node5.Listeners[0].Addr().String())) + _, err = cc.CreateNonValidatorPersistentOutboundConnection(node5.Listeners[0].Addr().String()) + require.NoError(err) waitForNonValidatorOutboundConnection(t, node4, node5) waitForValidatorConnection(t, node5, node4) node5.Stop() @@ -444,7 +447,8 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { defer node6.Stop() // Create a persistent connection from Node4 to Node6 - require.NoError(cc.CreateNonValidatorPersistentOutboundConnection(node6.Listeners[0].Addr().String())) + _, err = cc.CreateNonValidatorPersistentOutboundConnection(node6.Listeners[0].Addr().String()) + require.NoError(err) waitForValidatorConnection(t, node4, node6) waitForValidatorConnection(t, node6, node4) t.Logf("Test #4 passed | Successfuly created persistent connection from validator Node4 to validator Node6") diff --git a/integration_testing/connection_controller_utils_test.go b/integration_testing/connection_controller_utils_test.go index 4d5594634..74a33b943 100644 --- a/integration_testing/connection_controller_utils_test.go +++ b/integration_testing/connection_controller_utils_test.go @@ -26,14 +26,24 @@ func waitForValidatorConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) } return true } - waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to outbound non-validator Node (%s)", userAgentN1, userAgentN2), n1ValidatedN2) + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to validator Node (%s)", userAgentN1, userAgentN2), n1ValidatedN2) } func waitForNonValidatorOutboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + condition := conditionNonValidatorOutboundConnection(t, node1, node2) + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to outbound non-validator Node (%s)", userAgentN1, userAgentN2), condition) +} + +func conditionNonValidatorOutboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) func() bool { + return conditionNonValidatorOutboundConnectionDynamic(t, node1, node2, false) +} + +func conditionNonValidatorOutboundConnectionDynamic(t *testing.T, node1 *cmd.Node, node2 *cmd.Node, inactiveValidator bool) func() bool { userAgentN2 := node2.Params.UserAgent rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() - n1ValidatedN2 := func() bool { + return func() bool { if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, false, true, false) { return false } @@ -44,19 +54,29 @@ func waitForNonValidatorOutboundConnection(t *testing.T, node1 *cmd.Node, node2 if !rnFromN2.IsHandshakeCompleted() { return false } - if rnFromN2.GetValidatorPublicKey() != nil { - return false + // inactiveValidator should have the public key. + if inactiveValidator { + return rnFromN2.GetValidatorPublicKey() != nil } - return true + return rnFromN2.GetValidatorPublicKey() == nil } - waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to outbound non-validator Node (%s)", userAgentN1, userAgentN2), n1ValidatedN2) } func waitForNonValidatorInboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + condition := conditionNonValidatorInboundConnection(t, node1, node2) + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to inbound non-validator Node (%s)", userAgentN1, userAgentN2), condition) +} + +func conditionNonValidatorInboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) func() bool { + return conditionNonValidatorInboundConnectionDynamic(t, node1, node2, false) +} + +func conditionNonValidatorInboundConnectionDynamic(t *testing.T, node1 *cmd.Node, node2 *cmd.Node, inactiveValidator bool) func() bool { userAgentN2 := node2.Params.UserAgent rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() - n1ValidatedN2 := func() bool { + return func() bool { if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, false, false, true) { return false } @@ -67,12 +87,12 @@ func waitForNonValidatorInboundConnection(t *testing.T, node1 *cmd.Node, node2 * if !rnFromN2.IsHandshakeCompleted() { return false } - if rnFromN2.GetValidatorPublicKey() != nil { - return false + // inactiveValidator should have the public key. + if inactiveValidator { + return rnFromN2.GetValidatorPublicKey() != nil } - return true + return rnFromN2.GetValidatorPublicKey() == nil } - waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to inbound non-validator Node (%s)", userAgentN1, userAgentN2), n1ValidatedN2) } func waitForEmptyRemoteNodeIndexer(t *testing.T, node1 *cmd.Node) { @@ -90,15 +110,15 @@ func waitForEmptyRemoteNodeIndexer(t *testing.T, node1 *cmd.Node) { func waitForCountRemoteNodeIndexer(t *testing.T, node1 *cmd.Node, allCount int, validatorCount int, nonValidatorOutboundCount int, nonValidatorInboundCount int) { - userAgentN1 := node1.Params.UserAgent - rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() - n1ValidatedN2 := func() bool { - if true != checkRemoteNodeIndexerCount(rnManagerN1, allCount, validatorCount, nonValidatorOutboundCount, nonValidatorInboundCount) { + userAgent := node1.Params.UserAgent + rnManager := node1.Server.GetConnectionController().GetRemoteNodeManager() + condition := func() bool { + if true != checkRemoteNodeIndexerCount(rnManager, allCount, validatorCount, nonValidatorOutboundCount, nonValidatorInboundCount) { return false } return true } - waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have appropriate RemoteNodes counts", userAgentN1), n1ValidatedN2) + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have appropriate RemoteNodes counts", userAgent), condition) } func checkRemoteNodeIndexerUserAgent(manager *lib.RemoteNodeManager, userAgent string, validator bool, diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 2f97e942d..4db913136 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -69,7 +69,7 @@ func generateConfig(t *testing.T, port uint32, dataDir string, maxPeers uint32) config.MaxSyncBlockHeight = 0 config.ConnectIPs = []string{} config.PrivateMode = true - config.GlogV = 0 + config.GlogV = 2 config.GlogVmodule = "*bitcoin_manager*=0,*balance*=0,*view*=0,*frontend*=0,*peer*=0,*addr*=0,*network*=0,*utils*=0,*connection*=0,*main*=0,*server*=0,*mempool*=0,*miner*=0,*blockchain*=0" config.MaxInboundPeers = maxPeers config.TargetOutboundPeers = maxPeers diff --git a/lib/connection_controller.go b/lib/connection_controller.go index fef9fa887..18d423f46 100644 --- a/lib/connection_controller.go +++ b/lib/connection_controller.go @@ -5,12 +5,23 @@ import ( "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" "github.com/golang/glog" "github.com/pkg/errors" "net" "strconv" + "sync" + "time" ) +type GetActiveValidatorsFunc func() *collections.ConcurrentMap[bls.SerializedPublicKey, *ValidatorEntry] + +var GetActiveValidatorImpl GetActiveValidatorsFunc = BasicGetActiveValidators + +func BasicGetActiveValidators() *collections.ConcurrentMap[bls.SerializedPublicKey, *ValidatorEntry] { + return collections.NewConcurrentMap[bls.SerializedPublicKey, *ValidatorEntry]() +} + // ConnectionController is a structure that oversees all connections to remote nodes. It is responsible for kicking off // the initial connections a node makes to the network. It is also responsible for creating RemoteNodes from all // successful outbound and inbound connections. The ConnectionController also ensures that the node is connected to @@ -32,8 +43,12 @@ type ConnectionController struct { // it's aware of at random and provides it to us. AddrMgr *addrmgr.AddrManager - // When --connectips is set, we don't connect to anything from the addrmgr. + // When --connect-ips is set, we don't connect to anything from the addrmgr. connectIps []string + // persistentIpToRemoteNodeIdsMap maps persistent IP addresses, like the --connect-ips, to the RemoteNodeIds of the + // corresponding RemoteNodes. This is used to ensure that we don't connect to the same persistent IP address twice. + // And that we can reconnect to the same persistent IP address if we disconnect from it. + persistentIpToRemoteNodeIdsMap map[string]RemoteNodeId // The target number of non-validator outbound remote nodes we want to have. We will disconnect remote nodes once // we've exceeded this number of outbound connections. @@ -44,11 +59,16 @@ type ConnectionController struct { // When true, only one connection per IP is allowed. Prevents eclipse attacks // among other things. limitOneInboundRemoteNodePerIP bool + + startGroup sync.WaitGroup + exitChan chan struct{} + exitGroup sync.WaitGroup } func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, handshakeController *HandshakeController, - rnManager *RemoteNodeManager, blsKeystore *BLSKeystore, addrMgr *addrmgr.AddrManager, targetNonValidatorOutboundRemoteNodes uint32, - targetNonValidatorInboundRemoteNodes uint32, limitOneInboundConnectionPerIP bool) *ConnectionController { + rnManager *RemoteNodeManager, blsKeystore *BLSKeystore, addrMgr *addrmgr.AddrManager, connectIps []string, + targetNonValidatorOutboundRemoteNodes uint32, targetNonValidatorInboundRemoteNodes uint32, + limitOneInboundConnectionPerIP bool) *ConnectionController { return &ConnectionController{ params: params, @@ -57,16 +77,45 @@ func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, handsh handshake: handshakeController, rnManager: rnManager, AddrMgr: addrMgr, + connectIps: connectIps, + persistentIpToRemoteNodeIdsMap: make(map[string]RemoteNodeId), targetNonValidatorOutboundRemoteNodes: targetNonValidatorOutboundRemoteNodes, targetNonValidatorInboundRemoteNodes: targetNonValidatorInboundRemoteNodes, limitOneInboundRemoteNodePerIP: limitOneInboundConnectionPerIP, + exitChan: make(chan struct{}), } } +func (cc *ConnectionController) Start() { + cc.startGroup.Add(1) + go cc.startPersistentConnector() + + cc.startGroup.Wait() + cc.exitGroup.Add(1) +} + +func (cc *ConnectionController) Stop() { + close(cc.exitChan) + cc.exitGroup.Wait() +} + func (cc *ConnectionController) GetRemoteNodeManager() *RemoteNodeManager { return cc.rnManager } +func (cc *ConnectionController) startPersistentConnector() { + cc.startGroup.Done() + for { + select { + case <-cc.exitChan: + cc.exitGroup.Done() + return + case <-time.After(1 * time.Second): + cc.refreshConnectIps() + } + } +} + // ########################### // ## Handlers (Peer, DeSoMessage) // ########################### @@ -77,6 +126,12 @@ func (cc *ConnectionController) _handleDonePeerMessage(origin *Peer, desoMsg DeS } cc.rnManager.DisconnectById(NewRemoteNodeId(origin.ID)) + // Update the persistentIpToRemoteNodeIdsMap. + for ip, id := range cc.persistentIpToRemoteNodeIdsMap { + if id.ToUint64() == origin.ID { + delete(cc.persistentIpToRemoteNodeIdsMap, ip) + } + } } func (cc *ConnectionController) _handleAddrMessage(origin *Peer, desoMsg DeSoMessage) { @@ -114,7 +169,7 @@ func (cc *ConnectionController) _handleNewConnectionMessage(origin *Peer, desoMs remoteNode, err = cc.processInboundConnection(msg.Connection) if err != nil { glog.Errorf("ConnectionController.handleNewConnectionMessage: Problem handling inbound connection: %v", err) - msg.Connection.Close() + cc.cleanupFailedInboundConnection(remoteNode, msg.Connection) return } case ConnectionTypeOutbound: @@ -130,6 +185,13 @@ func (cc *ConnectionController) _handleNewConnectionMessage(origin *Peer, desoMs cc.handshake.InitiateHandshake(remoteNode) } +func (cc *ConnectionController) cleanupFailedInboundConnection(remoteNode *RemoteNode, connection Connection) { + if remoteNode != nil { + cc.rnManager.Disconnect(remoteNode) + } + connection.Close() +} + func (cc *ConnectionController) cleanupFailedOutboundConnection(connection Connection) { oc, ok := connection.(*outboundConnection) if !ok { @@ -141,13 +203,34 @@ func (cc *ConnectionController) cleanupFailedOutboundConnection(connection Conne if rn != nil { cc.rnManager.Disconnect(rn) } + oc.Close() cc.cmgr.RemoveAttemptedOutboundAddrs(oc.address) } // ########################### -// ## Connections +// ## Persistent Connections // ########################### +func (cc *ConnectionController) refreshConnectIps() { + // Connect to addresses passed via the --connect-ips flag. These addresses are persistent in the sense that if we + // disconnect from one, we will try to reconnect to the same one. + for _, connectIp := range cc.connectIps { + if _, ok := cc.persistentIpToRemoteNodeIdsMap[connectIp]; ok { + continue + } + + glog.Infof("ConnectionController.initiatePersistentConnections: Connecting to connectIp: %v", connectIp) + id, err := cc.CreateNonValidatorPersistentOutboundConnection(connectIp) + if err != nil { + glog.Errorf("ConnectionController.initiatePersistentConnections: Problem connecting "+ + "to connectIp %v: %v", connectIp, err) + continue + } + + cc.persistentIpToRemoteNodeIdsMap[connectIp] = id + } +} + func (cc *ConnectionController) CreateValidatorConnection(ipStr string, publicKey *bls.PublicKey) error { netAddr, err := cc.ConvertIPStringToNetAddress(ipStr) if err != nil { @@ -156,10 +239,10 @@ func (cc *ConnectionController) CreateValidatorConnection(ipStr string, publicKe return cc.rnManager.CreateValidatorConnection(netAddr, publicKey) } -func (cc *ConnectionController) CreateNonValidatorPersistentOutboundConnection(ipStr string) error { +func (cc *ConnectionController) CreateNonValidatorPersistentOutboundConnection(ipStr string) (RemoteNodeId, error) { netAddr, err := cc.ConvertIPStringToNetAddress(ipStr) if err != nil { - return err + return 0, err } return cc.rnManager.CreateNonValidatorPersistentOutboundConnection(netAddr) } @@ -235,8 +318,8 @@ func (cc *ConnectionController) processOutboundConnection(conn Connection) (*Rem } if oc.failed { - return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Failed to connect to peer (%s)", - oc.address.IP.String()) + return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Failed to connect to peer (%s:%v)", + oc.address.IP.String(), oc.address.Port) } if !oc.isPersistent { @@ -263,11 +346,35 @@ func (cc *ConnectionController) processOutboundConnection(conn Connection) (*Rem "for addr: (%s)", oc.connection.RemoteAddr().String()) } + // Attach the connection before additional validation steps because it is already established. remoteNode, err := cc.rnManager.AttachOutboundConnection(oc.connection, na, oc.attemptId, oc.isPersistent) if remoteNode == nil || err != nil { return nil, errors.Wrapf(err, "ConnectionController.handleOutboundConnection: Problem calling rnManager.AttachOutboundConnection "+ "for addr: (%s)", oc.connection.RemoteAddr().String()) } + + // If this is a persistent remote node or a validator, we don't need to do any extra connection validation. + if remoteNode.IsPersistent() || remoteNode.GetValidatorPublicKey() != nil { + return remoteNode, nil + } + + // If we get here, it means we're dealing with a non-persistent or non-validator remote node. We perform additional + // connection validation. + + // If we already have enough outbound peers, then don't bother adding this one. + if cc.enoughNonValidatorOutboundConnections() { + return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Connected to maximum number of outbound "+ + "peers (%d)", cc.targetNonValidatorOutboundRemoteNodes) + } + + // If the group key overlaps with another peer we're already connected to then abort mission. We only connect to + // one peer per IP group in order to prevent Sybil attacks. + if cc.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { + return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ + "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) + } + cc.cmgr.AddToGroupKey(na) + return remoteNode, nil } diff --git a/lib/connection_manager.go b/lib/connection_manager.go index 7c6f510ac..1ba4bf8f1 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -168,6 +168,10 @@ func NewConnectionManager( // Check if the address passed shares a group with any addresses already in our data structures. func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddress) bool { groupKey := addrmgr.GroupKey(na) + // For the sake of running multiple nodes on the same machine, we allow localhost connections. + if groupKey == "local" { + return false + } cmgr.mtxOutboundConnIPGroups.Lock() numGroupsForKey := cmgr.outboundConnIPGroups[groupKey] @@ -185,7 +189,7 @@ func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddr return true } -func (cmgr *ConnectionManager) addToGroupKey(na *wire.NetAddress) { +func (cmgr *ConnectionManager) AddToGroupKey(na *wire.NetAddress) { groupKey := addrmgr.GroupKey(na) cmgr.mtxOutboundConnIPGroups.Lock() @@ -429,7 +433,6 @@ func (cmgr *ConnectionManager) addPeer(pp *Peer) { // number of outbound peers. Also add the peer's address to // our map. if _, ok := peerList[pp.ID]; !ok { - cmgr.addToGroupKey(pp.netAddr) atomic.AddUint32(&cmgr.numOutboundPeers, 1) cmgr.mtxAddrsMaps.Lock() @@ -528,16 +531,6 @@ func (cmgr *ConnectionManager) _logOutboundPeerData() { numInboundPeers := int(atomic.LoadUint32(&cmgr.numInboundPeers)) numPersistentPeers := int(atomic.LoadUint32(&cmgr.numPersistentPeers)) glog.V(1).Infof("Num peers: OUTBOUND(%d) INBOUND(%d) PERSISTENT(%d)", numOutboundPeers, numInboundPeers, numPersistentPeers) - - cmgr.mtxOutboundConnIPGroups.Lock() - for _, vv := range cmgr.outboundConnIPGroups { - if vv != 0 && vv != 1 { - glog.V(1).Infof("_logOutboundPeerData: Peer group count != (0 or 1). "+ - "Is (%d) instead. This "+ - "should never happen.", vv) - } - } - cmgr.mtxOutboundConnIPGroups.Unlock() } func (cmgr *ConnectionManager) AddTimeSample(addrStr string, timeSample time.Time) { @@ -617,8 +610,13 @@ func (cmgr *ConnectionManager) Start() { select { case oc := <-cmgr.outboundConnectionChan: - glog.V(2).Infof("ConnectionManager.Start: Successfully established an outbound connection with "+ - "(addr= %v)", oc.connection.RemoteAddr()) + if oc.failed { + glog.V(2).Infof("ConnectionManager.Start: Failed to establish an outbound connection with "+ + "(id= %v)", oc.attemptId) + } else { + glog.V(2).Infof("ConnectionManager.Start: Successfully established an outbound connection with "+ + "(addr= %v)", oc.connection.RemoteAddr()) + } delete(cmgr.outboundConnectionAttempts, oc.attemptId) cmgr.serverMessageQueue <- &ServerMessage{ Peer: nil, diff --git a/lib/handshake_controller.go b/lib/handshake_controller.go index bde07745a..f355bad93 100644 --- a/lib/handshake_controller.go +++ b/lib/handshake_controller.go @@ -122,7 +122,7 @@ func (hc *HandshakeController) _handleVersionMessage(origin *Peer, desoMsg DeSoM if hc.usedNonces.Contains(msgNonce) { hc.usedNonces.Delete(msgNonce) glog.Errorf("HandshakeController._handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ - "nonce collision", origin.ID) + "nonce collision, nonce (%v)", origin.ID, msgNonce) hc.rnManager.Disconnect(rn) return } diff --git a/lib/network_connection.go b/lib/network_connection.go index eb6d4ab55..ffb0bb1f1 100644 --- a/lib/network_connection.go +++ b/lib/network_connection.go @@ -33,7 +33,9 @@ func (oc *outboundConnection) Close() { if oc.terminated { return } - oc.connection.Close() + if oc.connection != nil { + oc.connection.Close() + } oc.terminated = true } @@ -58,7 +60,9 @@ func (ic *inboundConnection) Close() { return } - ic.connection.Close() + if ic.connection != nil { + ic.connection.Close() + } ic.terminated = true } diff --git a/lib/remote_node.go b/lib/remote_node.go index f2d849a36..5ba651f3f 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -223,6 +223,10 @@ func (rn *RemoteNode) IsHandshakeCompleted() bool { return rn.connectionStatus == RemoteNodeStatus_HandshakeCompleted } +func (rn *RemoteNode) IsTerminated() bool { + return rn.connectionStatus == RemoteNodeStatus_Terminated +} + func (rn *RemoteNode) IsValidator() bool { if !rn.IsHandshakeCompleted() { return false diff --git a/lib/remote_node_manager.go b/lib/remote_node_manager.go index fb269d072..02bed8e3e 100644 --- a/lib/remote_node_manager.go +++ b/lib/remote_node_manager.go @@ -140,7 +140,7 @@ func (manager *RemoteNodeManager) CreateValidatorConnection(netAddr *wire.NetAdd } remoteNode := manager.newRemoteNode(publicKey) - if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { + if err := remoteNode.DialOutboundConnection(netAddr); err != nil { return errors.Wrapf(err, "RemoteNodeManager.CreateValidatorConnection: Problem calling DialPersistentOutboundConnection "+ "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) } @@ -149,19 +149,19 @@ func (manager *RemoteNodeManager) CreateValidatorConnection(netAddr *wire.NetAdd return nil } -func (manager *RemoteNodeManager) CreateNonValidatorPersistentOutboundConnection(netAddr *wire.NetAddress) error { +func (manager *RemoteNodeManager) CreateNonValidatorPersistentOutboundConnection(netAddr *wire.NetAddress) (RemoteNodeId, error) { if netAddr == nil { - return fmt.Errorf("RemoteNodeManager.CreateNonValidatorPersistentOutboundConnection: netAddr is nil") + return 0, fmt.Errorf("RemoteNodeManager.CreateNonValidatorPersistentOutboundConnection: netAddr is nil") } remoteNode := manager.newRemoteNode(nil) if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { - return errors.Wrapf(err, "RemoteNodeManager.CreateNonValidatorPersistentOutboundConnection: Problem calling DialPersistentOutboundConnection "+ + return 0, errors.Wrapf(err, "RemoteNodeManager.CreateNonValidatorPersistentOutboundConnection: Problem calling DialPersistentOutboundConnection "+ "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) } manager.setRemoteNode(remoteNode) manager.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) - return nil + return remoteNode.GetId(), nil } func (manager *RemoteNodeManager) CreateNonValidatorOutboundConnection(netAddr *wire.NetAddress) error { @@ -184,7 +184,7 @@ func (manager *RemoteNodeManager) AttachInboundConnection(conn net.Conn, remoteNode := manager.newRemoteNode(nil) if err := remoteNode.AttachInboundConnection(conn, na); err != nil { - return nil, errors.Wrapf(err, "RemoteNodeManager.AttachInboundConnection: Problem calling AttachInboundConnection "+ + return remoteNode, errors.Wrapf(err, "RemoteNodeManager.AttachInboundConnection: Problem calling AttachInboundConnection "+ "for addr: (%s)", conn.RemoteAddr().String()) } @@ -219,7 +219,7 @@ func (manager *RemoteNodeManager) setRemoteNode(rn *RemoteNode) { manager.mtx.Lock() defer manager.mtx.Unlock() - if rn == nil { + if rn == nil || rn.IsTerminated() { return } @@ -230,7 +230,7 @@ func (manager *RemoteNodeManager) SetNonValidator(rn *RemoteNode) { manager.mtx.Lock() defer manager.mtx.Unlock() - if rn == nil { + if rn == nil || rn.IsTerminated() { return } @@ -245,7 +245,7 @@ func (manager *RemoteNodeManager) SetValidator(remoteNode *RemoteNode) { manager.mtx.Lock() defer manager.mtx.Unlock() - if remoteNode == nil { + if remoteNode == nil || remoteNode.IsTerminated() { return } @@ -260,7 +260,7 @@ func (manager *RemoteNodeManager) UnsetValidator(remoteNode *RemoteNode) { manager.mtx.Lock() defer manager.mtx.Unlock() - if remoteNode == nil { + if remoteNode == nil || remoteNode.IsTerminated() { return } @@ -275,7 +275,7 @@ func (manager *RemoteNodeManager) UnsetNonValidator(rn *RemoteNode) { manager.mtx.Lock() defer manager.mtx.Unlock() - if rn == nil { + if rn == nil || rn.IsTerminated() { return } diff --git a/lib/server.go b/lib/server.go index d4c371955..a4fa28376 100644 --- a/lib/server.go +++ b/lib/server.go @@ -499,8 +499,8 @@ func NewServer( rnManager := NewRemoteNodeManager(srv, _chain, _cmgr, _blsKeystore, _params, _minFeeRateNanosPerKB, nodeServices) srv.handshakeController = NewHandshakeController(rnManager) - srv.connectionController = NewConnectionController(_params, _cmgr, srv.handshakeController, rnManager, - _blsKeystore, _desoAddrMgr, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP) + srv.connectionController = NewConnectionController(_params, _cmgr, srv.handshakeController, rnManager, _blsKeystore, + _desoAddrMgr, _connectIps, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP) if srv.stateChangeSyncer != nil { srv.stateChangeSyncer.BlockHeight = uint64(_chain.headerTip().Height) @@ -2547,6 +2547,9 @@ func (srv *Server) Stop() { srv.cmgr.Stop() glog.Infof(CLog(Yellow, "Server.Stop: Closed the ConnectionManger")) + srv.connectionController.Stop() + glog.Infof(CLog(Yellow, "Server.Stop: Closed the ConnectionController")) + // Stop the miner if we have one running. if srv.miner != nil { srv.miner.Stop() @@ -2629,6 +2632,8 @@ func (srv *Server) Start() { if srv.miner != nil && len(srv.miner.PublicKeys) > 0 { go srv.miner.Start() } + + srv.connectionController.Start() } // SyncPrefixProgress keeps track of sync progress on an individual prefix. It is used in From 59af74dff65f3572ad8ac4d45488d6e182e67239 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Sun, 28 Jan 2024 22:42:42 -0500 Subject: [PATCH 398/762] Revert "Code split" (#943) This reverts commit 831096ac1d3008233868ac8b8f0eca4cd2b9553e. --- .../connection_controller_routines_test.go | 217 ++++++++++++++++++ .../connection_controller_utils_test.go | 8 + lib/connection_controller.go | 103 ++++++++- 3 files changed, 326 insertions(+), 2 deletions(-) diff --git a/integration_testing/connection_controller_routines_test.go b/integration_testing/connection_controller_routines_test.go index 582031bd8..6eec830dd 100644 --- a/integration_testing/connection_controller_routines_test.go +++ b/integration_testing/connection_controller_routines_test.go @@ -85,6 +85,223 @@ func TestConnectionControllerInitiatePersistentConnections(t *testing.T) { t.Logf("Test #2 passed | Successfully run validator node6 with --connect-ips set to node2, node3, node4, node5") } +func TestConnectionControllerValidatorConnector(t *testing.T) { + require := require.New(t) + t.Cleanup(func() { + setGetActiveValidatorImpl(lib.BasicGetActiveValidators) + }) + + // Spawn 5 validators node1, node2, node3, node4, node5 and two non-validators node6 and node7. + // All the validators are initially in the validator set. And later, node1 and node2 will be removed from the + // validator set. Then, make node3 inactive, and node2 active again. Then, make all the validators inactive. + // Make node6, and node7 connect-ips to all the validators. + + blsPriv1, err := bls.NewPrivateKey() + require.NoError(err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsPriv1) + blsPriv2, err := bls.NewPrivateKey() + require.NoError(err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) + blsPriv3, err := bls.NewPrivateKey() + require.NoError(err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv3) + blsPriv4, err := bls.NewPrivateKey() + require.NoError(err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + blsPriv5, err := bls.NewPrivateKey() + require.NoError(err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsPriv5) + + node6 := spawnNonValidatorNodeProtocol2(t, 18005, "node6") + node7 := spawnNonValidatorNodeProtocol2(t, 18006, "node7") + + node1 = startNode(t, node1) + defer node1.Stop() + node2 = startNode(t, node2) + defer node2.Stop() + node3 = startNode(t, node3) + defer node3.Stop() + node4 = startNode(t, node4) + defer node4.Stop() + node5 = startNode(t, node5) + defer node5.Stop() + setGetActiveValidatorImplWithValidatorNodes(t, node1, node2, node3, node4, node5) + + node6.Config.ConnectIPs = []string{ + node1.Listeners[0].Addr().String(), + node2.Listeners[0].Addr().String(), + node3.Listeners[0].Addr().String(), + node4.Listeners[0].Addr().String(), + node5.Listeners[0].Addr().String(), + } + node7.Config.ConnectIPs = node6.Config.ConnectIPs + node6 = startNode(t, node6) + defer node6.Stop() + node7 = startNode(t, node7) + defer node7.Stop() + + // Verify full graph between active validators. + waitForValidatorFullGraph(t, node1, node2, node3, node4, node5) + // Verify connections of non-validators. + for _, nonValidator := range []*cmd.Node{node6, node7} { + waitForValidatorConnectionOneWay(t, nonValidator, node1, node2, node3, node4, node5) + } + // Verify connections of initial validators. + for _, validator := range []*cmd.Node{node1, node2, node3, node4, node5} { + waitForNonValidatorInboundConnection(t, validator, node6) + waitForNonValidatorInboundConnection(t, validator, node7) + } + // Verify connection counts of active validators. + for _, validator := range []*cmd.Node{node1, node2, node3, node4, node5} { + waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 6, 4, 0, 2) + } + // NOOP Verify connection counts of inactive validators. + // Verify connection counts of non-validators. + waitForCountRemoteNodeIndexer(t, node6, 5, 5, 0, 0) + waitForCountRemoteNodeIndexer(t, node7, 5, 5, 0, 0) + t.Logf("Test #1 passed | Successfully run validators node1, node2, node3, node4, node5; non-validators node6, node7") + + // Remove node1 and node2 from the validator set. + setGetActiveValidatorImplWithValidatorNodes(t, node3, node4, node5) + // Verify full graph between active validators. + waitForValidatorFullGraph(t, node3, node4, node5) + // Verify connections of non-validators. + for _, nonValidator := range []*cmd.Node{node1, node2, node6, node7} { + waitForValidatorConnectionOneWay(t, nonValidator, node3, node4, node5) + } + // Verify connections of initial validators. + for _, validator := range []*cmd.Node{node1, node2, node3, node4, node5} { + waitForNonValidatorInboundConnection(t, validator, node6) + waitForNonValidatorInboundConnection(t, validator, node7) + } + // Verify connections of active validators. + for _, validator := range []*cmd.Node{node3, node4, node5} { + waitForNonValidatorInboundXOROutboundConnection(t, validator, node1) + waitForNonValidatorInboundXOROutboundConnection(t, validator, node2) + waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 6, 2, 0, 2) + } + // Verify connection counts of inactive validators. + for _, validator := range []*cmd.Node{node1, node2} { + waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 6, 3, 0, 2) + } + // Verify connection counts of non-validators. + waitForCountRemoteNodeIndexer(t, node6, 5, 3, 2, 0) + waitForCountRemoteNodeIndexer(t, node7, 5, 3, 2, 0) + t.Logf("Test #2 passed | Successfully run validators node3, node4, node5; inactive-validators node1, node2; " + + "non-validators node6, node7") + + // Remove node3 from the validator set. Make node1 active again. + setGetActiveValidatorImplWithValidatorNodes(t, node1, node4, node5) + // Verify full graph between active validators. + waitForValidatorFullGraph(t, node1, node4, node5) + // Verify connections of non-validators. + for _, nonValidator := range []*cmd.Node{node2, node3, node6, node7} { + waitForValidatorConnectionOneWay(t, nonValidator, node1, node4, node5) + } + // Verify connections of initial validators. + for _, validator := range []*cmd.Node{node1, node2, node3, node4, node5} { + waitForNonValidatorInboundConnection(t, validator, node6) + waitForNonValidatorInboundConnection(t, validator, node7) + } + // Verify connections of active validators. + for _, validator := range []*cmd.Node{node1, node4, node5} { + waitForNonValidatorInboundXOROutboundConnection(t, validator, node2) + waitForNonValidatorInboundXOROutboundConnection(t, validator, node3) + waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 6, 2, 0, 2) + } + // Verify connection counts of inactive validators. + for _, validator := range []*cmd.Node{node2, node3} { + waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 6, 3, 0, 2) + } + // Verify connection counts of non-validators. + waitForCountRemoteNodeIndexer(t, node6, 5, 3, 2, 0) + waitForCountRemoteNodeIndexer(t, node7, 5, 3, 2, 0) + t.Logf("Test #3 passed | Successfully run validators node1, node4, node5; inactive validators node2, node3; " + + "non-validators node6, node7") + + // Make all validators inactive. + setGetActiveValidatorImplWithValidatorNodes(t) + // NOOP Verify full graph between active validators. + // NOOP Verify connections of non-validators. + // Verify connections of initial validators. + for _, validator := range []*cmd.Node{node1, node2, node3, node4, node5} { + waitForNonValidatorInboundConnection(t, validator, node6) + waitForNonValidatorInboundConnection(t, validator, node7) + } + // NOOP Verify connections of active validators. + // Verify connections and counts of inactive validators. + inactiveValidators := []*cmd.Node{node1, node2, node3, node4, node5} + for ii := 0; ii < len(inactiveValidators); ii++ { + for jj := ii + 1; jj < len(inactiveValidators); jj++ { + waitForNonValidatorInboundXOROutboundConnection(t, inactiveValidators[ii], inactiveValidators[jj]) + } + } + inactiveValidatorsRev := []*cmd.Node{node5, node4, node3, node2, node1} + for ii := 0; ii < len(inactiveValidatorsRev); ii++ { + for jj := ii + 1; jj < len(inactiveValidatorsRev); jj++ { + waitForNonValidatorInboundXOROutboundConnection(t, inactiveValidatorsRev[ii], inactiveValidatorsRev[jj]) + } + } + for _, validator := range inactiveValidators { + waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 6, 0, 0, 2) + } + // Verify connection counts of non-validators. + waitForCountRemoteNodeIndexer(t, node6, 5, 0, 5, 0) + waitForCountRemoteNodeIndexer(t, node7, 5, 0, 5, 0) + t.Logf("Test #4 passed | Successfully run inactive validators node1, node2, node3, node4, node5; " + + "non-validators node6, node7") +} + +func TestConnectionControllerValidatorInboundDeduplication(t *testing.T) { + require := require.New(t) + t.Cleanup(func() { + setGetActiveValidatorImpl(lib.BasicGetActiveValidators) + }) + + // Spawn a non-validator node1, and two validators node2, node3. The validator nodes will have the same public key. + // Node2 and node3 will not initially be in the validator set. First, node2 will start an outbound connection to + // node1. We wait until the node2 is re-indexed as non-validator by node1, and then we make node3 open an outbound + // connection to node1. We wait until node3 is re-indexed as non-validator by node1. Then, we make node2 and node3 + // join the validator set (i.e. add one entry with the duplicated public key). Now, node1 should disconnect from + // either node2 or node3 because of duplicate public key. + + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + blsPriv2, err := bls.NewPrivateKey() + require.NoError(err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv2) + + node1 = startNode(t, node1) + defer node1.Stop() + node2 = startNode(t, node2) + defer node2.Stop() + node3 = startNode(t, node3) + defer node3.Stop() + + cc2 := node2.Server.GetConnectionController() + require.NoError(cc2.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + // First wait for node2 to be indexed as a validator by node1. + waitForValidatorConnection(t, node1, node2) + // Now wait for node2 to be re-indexed as a non-validator. + waitForNonValidatorInboundConnectionDynamic(t, node1, node2, true) + waitForNonValidatorOutboundConnection(t, node2, node1) + + // Now connect node3 to node1. + cc3 := node3.Server.GetConnectionController() + require.NoError(cc3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + // First wait for node3 to be indexed as a validator by node1. + waitForValidatorConnection(t, node1, node3) + // Now wait for node3 to be re-indexed as a non-validator. + waitForNonValidatorInboundConnectionDynamic(t, node1, node3, true) + waitForNonValidatorOutboundConnection(t, node3, node1) + + // Now add node2 and node3 to the validator set. + setGetActiveValidatorImplWithValidatorNodes(t, node2) + // Now wait for node1 to disconnect from either node2 or node3. + waitForCountRemoteNodeIndexer(t, node1, 1, 1, 0, 0) + t.Logf("Test #1 passed | Successfully run non-validator node1; validators node2, node3 with duplicate public key") +} + func TestConnectionControllerNonValidatorCircularConnectIps(t *testing.T) { node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") diff --git a/integration_testing/connection_controller_utils_test.go b/integration_testing/connection_controller_utils_test.go index 74a33b943..43cf418bc 100644 --- a/integration_testing/connection_controller_utils_test.go +++ b/integration_testing/connection_controller_utils_test.go @@ -69,6 +69,14 @@ func waitForNonValidatorInboundConnection(t *testing.T, node1 *cmd.Node, node2 * waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to inbound non-validator Node (%s)", userAgentN1, userAgentN2), condition) } +func waitForNonValidatorInboundConnectionDynamic(t *testing.T, node1 *cmd.Node, node2 *cmd.Node, inactiveValidator bool) { + userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + condition := conditionNonValidatorInboundConnectionDynamic(t, node1, node2, inactiveValidator) + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to inbound non-validator Node (%s), "+ + "inactiveValidator (%v)", userAgentN1, userAgentN2, inactiveValidator), condition) +} + func conditionNonValidatorInboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) func() bool { return conditionNonValidatorInboundConnectionDynamic(t, node1, node2, false) } diff --git a/lib/connection_controller.go b/lib/connection_controller.go index 18d423f46..2f6cbb2db 100644 --- a/lib/connection_controller.go +++ b/lib/connection_controller.go @@ -87,11 +87,12 @@ func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, handsh } func (cc *ConnectionController) Start() { - cc.startGroup.Add(1) + cc.startGroup.Add(2) go cc.startPersistentConnector() + go cc.startValidatorConnector() cc.startGroup.Wait() - cc.exitGroup.Add(1) + cc.exitGroup.Add(2) } func (cc *ConnectionController) Stop() { @@ -116,6 +117,26 @@ func (cc *ConnectionController) startPersistentConnector() { } } +// startValidatorConnector is responsible for ensuring that the node is connected to all active validators. It does +// this in two steps. First, it looks through the already established connections and checks if any of these connections +// are validators. If they are, it adds them to the validator index. It also checks if any of the existing validators +// are no longer active and removes them from the validator index. Second, it checks if any of the active validators +// are missing from the validator index. If they are, it attempts to connect to them. +func (cc *ConnectionController) startValidatorConnector() { + cc.startGroup.Done() + for { + select { + case <-cc.exitChan: + cc.exitGroup.Done() + return + case <-time.After(1 * time.Second): + activeValidatorsMap := GetActiveValidatorImpl() + cc.refreshValidatorIndex(activeValidatorsMap) + cc.connectValidators(activeValidatorsMap) + } + } +} + // ########################### // ## Handlers (Peer, DeSoMessage) // ########################### @@ -231,6 +252,84 @@ func (cc *ConnectionController) refreshConnectIps() { } } +// ########################### +// ## Validator Connections +// ########################### + +// refreshValidatorIndex re-indexes validators based on the activeValidatorsMap. It is called periodically by the +// validator connector. +func (cc *ConnectionController) refreshValidatorIndex(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, *ValidatorEntry]) { + // De-index inactive validators. We skip any checks regarding RemoteNodes connection status, nor do we verify whether + // de-indexing the validator would result in an excess number of outbound/inbound connections. Any excess connections + // will be cleaned up by the peer connector. + validatorRemoteNodeMap := cc.rnManager.GetValidatorIndex().Copy() + for pk, rn := range validatorRemoteNodeMap { + // If the validator is no longer active, de-index it. + if _, ok := activeValidatorsMap.Get(pk); !ok { + cc.rnManager.SetNonValidator(rn) + cc.rnManager.UnsetValidator(rn) + } + } + + // Look for validators in our existing outbound / inbound connections. + allNonValidators := cc.rnManager.GetAllNonValidators() + for _, rn := range allNonValidators { + // It is possible for a RemoteNode to be in the non-validator indices, and still have a public key. This can happen + // if the RemoteNode advertised support for the SFValidator service flag during handshake, and provided us + // with a public key, and a corresponding proof of possession signature. + pk := rn.GetValidatorPublicKey() + if pk == nil { + continue + } + // It is possible that through unlikely concurrence, and malevolence, two non-validators happen to have the same + // public key, which goes undetected during handshake. To prevent this from affecting the indexing of the validator + // set, we check that the non-validator's public key is not already present in the validator index. + if _, ok := cc.rnManager.GetValidatorIndex().Get(pk.Serialize()); ok { + cc.rnManager.Disconnect(rn) + continue + } + + // If the RemoteNode turns out to be in the validator set, index it. + if _, ok := activeValidatorsMap.Get(pk.Serialize()); ok { + cc.rnManager.SetValidator(rn) + cc.rnManager.UnsetNonValidator(rn) + } + } +} + +// connectValidators attempts to connect to all active validators that are not already connected. It is called +// periodically by the validator connector. +func (cc *ConnectionController) connectValidators(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, *ValidatorEntry]) { + // Look through the active validators and connect to any that we're not already connected to. + if cc.blsKeystore == nil { + return + } + + validators := activeValidatorsMap.Copy() + for pk, validator := range validators { + _, exists := cc.rnManager.GetValidatorIndex().Get(pk) + // If we're already connected to the validator, continue. + if exists { + continue + } + if cc.blsKeystore.GetSigner().GetPublicKey().Serialize() == pk { + continue + } + + publicKey, err := pk.Deserialize() + if err != nil { + continue + } + + // For now, we only dial the first domain in the validator's domain list. + address := string(validator.Domains[0]) + if err := cc.CreateValidatorConnection(address, publicKey); err != nil { + glog.V(2).Infof("ConnectionController.connectValidators: Problem connecting to validator %v: %v", address, err) + continue + } + } +} + func (cc *ConnectionController) CreateValidatorConnection(ipStr string, publicKey *bls.PublicKey) error { netAddr, err := cc.ConvertIPStringToNetAddress(ipStr) if err != nil { From 390156459693f82a8f42757c1931cd599f0db1dc Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Sun, 28 Jan 2024 22:43:59 -0500 Subject: [PATCH 399/762] Revert "Code split" (#944) This reverts commit 0604b6d3fc155177a2bb295e6635ed21b20dd947. --- .../connection_controller_routines_test.go | 43 +++++ lib/block_view_types.go | 2 +- lib/connection_controller.go | 150 +++++++++++++++++- 3 files changed, 192 insertions(+), 3 deletions(-) diff --git a/integration_testing/connection_controller_routines_test.go b/integration_testing/connection_controller_routines_test.go index 6eec830dd..95f289aba 100644 --- a/integration_testing/connection_controller_routines_test.go +++ b/integration_testing/connection_controller_routines_test.go @@ -302,6 +302,49 @@ func TestConnectionControllerValidatorInboundDeduplication(t *testing.T) { t.Logf("Test #1 passed | Successfully run non-validator node1; validators node2, node3 with duplicate public key") } +func TestConnectionControllerNonValidatorConnector(t *testing.T) { + require := require.New(t) + + // Spawn 6 non-validators node1, node2, node3, node4, node5, node6. Set node1's targetOutboundPeers to 3. Then make + // node1 create persistent outbound connections to node2, node3, and node4, as well as non-validator connections to + // node5 and node6. + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Config.TargetOutboundPeers = 0 + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node4 := spawnNonValidatorNodeProtocol2(t, 18003, "node4") + node5 := spawnNonValidatorNodeProtocol2(t, 18004, "node5") + node6 := spawnNonValidatorNodeProtocol2(t, 18005, "node6") + + node2 = startNode(t, node2) + defer node2.Stop() + node3 = startNode(t, node3) + defer node3.Stop() + node4 = startNode(t, node4) + defer node4.Stop() + node5 = startNode(t, node5) + defer node5.Stop() + node6 = startNode(t, node6) + defer node6.Stop() + + node1.Config.ConnectIPs = []string{ + node2.Listeners[0].Addr().String(), + node3.Listeners[0].Addr().String(), + node4.Listeners[0].Addr().String(), + } + node1 = startNode(t, node1) + defer node1.Stop() + + cc := node1.Server.GetConnectionController() + require.NoError(cc.CreateNonValidatorOutboundConnection(node5.Listeners[0].Addr().String())) + require.NoError(cc.CreateNonValidatorOutboundConnection(node6.Listeners[0].Addr().String())) + + waitForCountRemoteNodeIndexer(t, node1, 3, 0, 3, 0) + waitForNonValidatorOutboundConnection(t, node1, node2) + waitForNonValidatorOutboundConnection(t, node1, node3) + waitForNonValidatorOutboundConnection(t, node1, node4) +} + func TestConnectionControllerNonValidatorCircularConnectIps(t *testing.T) { node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") diff --git a/lib/block_view_types.go b/lib/block_view_types.go index b216804df..073eb4c2f 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -1331,7 +1331,7 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada data = append(data, EncodeToBytes(blockHeight, op.PrevLockupYieldCurvePoint, skipMetadata...)...) data = append(data, byte(op.PrevLockupTransferRestriction)) - // PrevSenderLockedBalanceEntry, PrevReceiverLockedBalanceEntry + // PrevSenderLockedBalanceEntry, PrevReceiverL*ockedBalanceEntry data = append(data, EncodeToBytes(blockHeight, op.PrevSenderLockedBalanceEntry, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, op.PrevReceiverLockedBalanceEntry, skipMetadata...)...) diff --git a/lib/connection_controller.go b/lib/connection_controller.go index 2f6cbb2db..4bda85507 100644 --- a/lib/connection_controller.go +++ b/lib/connection_controller.go @@ -87,12 +87,13 @@ func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, handsh } func (cc *ConnectionController) Start() { - cc.startGroup.Add(2) + cc.startGroup.Add(3) go cc.startPersistentConnector() go cc.startValidatorConnector() + go cc.startNonValidatorConnector() cc.startGroup.Wait() - cc.exitGroup.Add(2) + cc.exitGroup.Add(3) } func (cc *ConnectionController) Stop() { @@ -137,6 +138,26 @@ func (cc *ConnectionController) startValidatorConnector() { } } +// startNonValidatorConnector is responsible for ensuring that the node is connected to the target number of outbound +// and inbound remote nodes. To do this, it periodically checks the number of outbound and inbound remote nodes, and +// if the number is above the target number, it disconnects the excess remote nodes. If the number is below the target +// number, it attempts to connect to new remote nodes. +func (cc *ConnectionController) startNonValidatorConnector() { + cc.startGroup.Done() + + for { + select { + case <-cc.exitChan: + cc.exitGroup.Done() + return + case <-time.After(1 * time.Second): + cc.refreshNonValidatorOutboundIndex() + cc.refreshNonValidatorInboundIndex() + cc.connectNonValidators() + } + } +} + // ########################### // ## Handlers (Peer, DeSoMessage) // ########################### @@ -330,6 +351,131 @@ func (cc *ConnectionController) connectValidators(activeValidatorsMap *collectio } } +// ########################### +// ## NonValidator Connections +// ########################### + +// refreshNonValidatorOutboundIndex is called periodically by the peer connector. It is responsible for disconnecting excess +// outbound remote nodes. +func (cc *ConnectionController) refreshNonValidatorOutboundIndex() { + // There are three categories of outbound remote nodes: attempted, connected, and persistent. All of these + // remote nodes are stored in the same non-validator outbound index. We want to disconnect excess remote nodes that + // are not persistent, starting with the attempted nodes first. + + // First let's run a quick check to see if the number of our non-validator remote nodes exceeds our target. Note that + // this number will include the persistent nodes. + numOutboundRemoteNodes := uint32(cc.rnManager.GetNonValidatorOutboundIndex().Count()) + if numOutboundRemoteNodes <= cc.targetNonValidatorOutboundRemoteNodes { + return + } + + // If we get here, it means that we should potentially disconnect some remote nodes. Let's first separate the + // attempted and connected remote nodes, ignoring the persistent ones. + allOutboundRemoteNodes := cc.rnManager.GetNonValidatorOutboundIndex().GetAll() + var attemptedOutboundRemoteNodes, connectedOutboundRemoteNodes []*RemoteNode + for _, rn := range allOutboundRemoteNodes { + if rn.IsPersistent() { + // We do nothing for persistent remote nodes. + continue + } else if rn.IsHandshakeCompleted() { + connectedOutboundRemoteNodes = append(connectedOutboundRemoteNodes, rn) + } else { + attemptedOutboundRemoteNodes = append(attemptedOutboundRemoteNodes, rn) + } + } + + // Having separated the attempted and connected remote nodes, we can now find the actual number of attempted and + // connected remote nodes. We can then find out how many remote nodes we need to disconnect. + numOutboundRemoteNodes = uint32(len(attemptedOutboundRemoteNodes) + len(connectedOutboundRemoteNodes)) + excessiveOutboundRemoteNodes := uint32(0) + if numOutboundRemoteNodes > cc.targetNonValidatorOutboundRemoteNodes { + excessiveOutboundRemoteNodes = numOutboundRemoteNodes - cc.targetNonValidatorOutboundRemoteNodes + } + + // First disconnect the attempted remote nodes. + for _, rn := range attemptedOutboundRemoteNodes { + if excessiveOutboundRemoteNodes == 0 { + break + } + cc.rnManager.Disconnect(rn) + excessiveOutboundRemoteNodes-- + } + // Now disconnect the connected remote nodes, if we still have too many remote nodes. + for _, rn := range connectedOutboundRemoteNodes { + if excessiveOutboundRemoteNodes == 0 { + break + } + cc.rnManager.Disconnect(rn) + excessiveOutboundRemoteNodes-- + } +} + +// refreshNonValidatorInboundIndex is called periodically by the non-validator connector. It is responsible for +// disconnecting excess inbound remote nodes. +func (cc *ConnectionController) refreshNonValidatorInboundIndex() { + // First let's check if we have an excess number of inbound remote nodes. If we do, we'll disconnect some of them. + numConnectedInboundRemoteNodes := uint32(cc.rnManager.GetNonValidatorInboundIndex().Count()) + excessiveInboundRemoteNodes := uint32(0) + if numConnectedInboundRemoteNodes > cc.targetNonValidatorInboundRemoteNodes { + excessiveInboundRemoteNodes = numConnectedInboundRemoteNodes - cc.targetNonValidatorInboundRemoteNodes + } + // Disconnect random inbound non-validators if we have too many of them. + inboundRemoteNodes := cc.rnManager.GetNonValidatorInboundIndex().GetAll() + for _, rn := range inboundRemoteNodes { + if excessiveInboundRemoteNodes == 0 { + break + } + cc.rnManager.Disconnect(rn) + excessiveInboundRemoteNodes-- + } +} + +func (cc *ConnectionController) connectNonValidators() { + numOutboundPeers := uint32(cc.rnManager.GetNonValidatorOutboundIndex().Count()) + + remainingOutboundPeers := uint32(0) + if numOutboundPeers < cc.targetNonValidatorOutboundRemoteNodes { + remainingOutboundPeers = cc.targetNonValidatorOutboundRemoteNodes - numOutboundPeers + } + for ii := uint32(0); ii < remainingOutboundPeers; ii++ { + addr := cc.getRandomUnconnectedAddress() + if addr == nil { + break + } + cc.AddrMgr.Attempt(addr) + if err := cc.rnManager.CreateNonValidatorOutboundConnection(addr); err != nil { + glog.V(2).Infof("ConnectionController.connectNonValidators: Problem connecting to addr %v: %v", addr, err) + } + } +} + +func (cc *ConnectionController) getRandomUnconnectedAddress() *wire.NetAddress { + for tries := 0; tries < 100; tries++ { + addr := cc.AddrMgr.GetAddress() + if addr == nil { + break + } + + if cc.cmgr.IsConnectedOutboundIpAddress(addr.NetAddress()) { + continue + } + + if cc.cmgr.IsAttemptedOutboundIpAddress(addr.NetAddress()) { + continue + } + + // We can only have one outbound address per /16. This is similar to + // Bitcoin and we do it to prevent Sybil attacks. + if cc.cmgr.IsFromRedundantOutboundIPAddress(addr.NetAddress()) { + continue + } + + return addr.NetAddress() + } + + return nil +} + func (cc *ConnectionController) CreateValidatorConnection(ipStr string, publicKey *bls.PublicKey) error { netAddr, err := cc.ConvertIPStringToNetAddress(ipStr) if err != nil { From a44e4d0be926fec4544b3e4ee86efd9bdd97def4 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Mon, 29 Jan 2024 12:23:40 -0500 Subject: [PATCH 400/762] Cleanup for BLSKeyStore --- lib/bls_keystore.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go index 92d8fc8e8..64778d835 100644 --- a/lib/bls_keystore.go +++ b/lib/bls_keystore.go @@ -16,9 +16,9 @@ import ( // - PoS Validator Connection Handshakes // - PoS Random Seed Signature // -// TODO: We will likely need to associate individual op-codes for each message type that can be signed, -// so that there is no risk of signature collisions between different message types. Ex: the payload -// signed per message type must be made up of the following tuples: +// We need to associate individual op-codes for each message type that can be signed, so that there is no risk +// of signature collisions between different message types. The payload signed per message type must be made +// up of the following tuples: // - Validator Vote: (0x01, view uint64, blockHash consensus.BlockHash) // - Validator Timeout: (0x02, view uint64, highQCView uint64) // - PoS Block Proposal: (0x03, view uint64, blockHash consensus.BlockHash) @@ -116,8 +116,6 @@ func (signer *BLSSigner) SignRandomSeedHash(randomSeedHash *RandomSeedHash) (*bl return SignRandomSeedHash(signer.privateKey, randomSeedHash) } -// TODO: Add signing function for PoS blocks - func (signer *BLSSigner) SignPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64) (*bls.Signature, error) { // FIXME payload := []byte{} @@ -143,11 +141,13 @@ func BLSVerifyValidatorTimeout(view uint64, highQCView uint64, signature *bls.Si return _blsVerify(BLSSignatureOpCodeValidatorTimeout, payload[:], signature, publicKey) } -// TODO: Add Verifier function for PoS blocks - -func BLSVerifyPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64, - signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { - +func BLSVerifyPoSValidatorHandshake( + nonceSent uint64, + nonceReceived uint64, + tstampMicro uint64, + signature *bls.Signature, + publicKey *bls.PublicKey, +) (bool, error) { // FIXME payload := []byte{} return _blsVerify(BLSSignatureOpCodePoSValidatorHandshake, payload[:], signature, publicKey) From 5f8d08a54722b51e808f14094564ce19ecb462ae Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 29 Jan 2024 14:10:11 -0500 Subject: [PATCH 401/762] Move BLSKeystore implementation to feature/proof-of-stake (#961) --- bls/signature.go | 12 +++ bls/signature_no_relic.go | 4 + lib/bls_keystore.go | 154 ++++++++++++++++++++++++++++++++++++++ lib/bls_keystore_test.go | 38 ++++++++++ 4 files changed, 208 insertions(+) create mode 100644 lib/bls_keystore.go create mode 100644 lib/bls_keystore_test.go diff --git a/bls/signature.go b/bls/signature.go index a86a934f2..673f24307 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -115,6 +115,18 @@ func (privateKey *PrivateKey) ToString() string { return privateKey.flowPrivateKey.String() } +func (privateKey *PrivateKey) FromSeed(seed []byte) (*PrivateKey, error) { + var err error + + if privateKey == nil { + return nil, nil + } + + // Generate a new private key from the seed. + privateKey.flowPrivateKey, err = flowCrypto.GeneratePrivateKey(signingAlgorithm, seed) + return privateKey, err +} + func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { if privateKey == nil || privateKeyString == "" { return nil, nil diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index 288714278..b3d472739 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -38,6 +38,10 @@ func (privateKey *PrivateKey) ToString() string { panic(BLSNoRelicError) } +func (privateKey *PrivateKey) FromSeed(seed []byte) (*PrivateKey, error) { + panic(BLSNoRelicError) +} + func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { panic(BLSNoRelicError) } diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go new file mode 100644 index 000000000..64778d835 --- /dev/null +++ b/lib/bls_keystore.go @@ -0,0 +1,154 @@ +package lib + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/consensus" + "github.com/pkg/errors" + "github.com/tyler-smith/go-bip39" +) + +// BLSSigner is a wrapper for the bls.PrivateKey type, which abstracts away the private key +// and only exposes protected methods for signing a select set of message types needed for +// Proof of Stake. It allows signing for: +// - PoS Validator Votes Messages +// - PoS Validator Timeout Messages +// - PoS Block Proposals +// - PoS Validator Connection Handshakes +// - PoS Random Seed Signature +// +// We need to associate individual op-codes for each message type that can be signed, so that there is no risk +// of signature collisions between different message types. The payload signed per message type must be made +// up of the following tuples: +// - Validator Vote: (0x01, view uint64, blockHash consensus.BlockHash) +// - Validator Timeout: (0x02, view uint64, highQCView uint64) +// - PoS Block Proposal: (0x03, view uint64, blockHash consensus.BlockHash) +// - PoS Validator Handshake: (0x04, peer's random nonce, our node's random nonce) +// - PoS Random Seed Signature: (previous block's random seed hash) + +type BLSSignatureOpCode byte + +const ( + BLSSignatureOpCodeValidatorVote BLSSignatureOpCode = 0 + BLSSignatureOpCodeValidatorTimeout BLSSignatureOpCode = 1 + BLSSignatureOpCodePoSBlockProposal BLSSignatureOpCode = 2 + BLSSignatureOpCodePoSValidatorHandshake BLSSignatureOpCode = 3 +) + +func (opCode BLSSignatureOpCode) Bytes() []byte { + return []byte{byte(opCode)} +} + +////////////////////////////////////////////////////////// +// BLSKeystore +////////////////////////////////////////////////////////// + +type BLSKeystore struct { + signer *BLSSigner +} + +func NewBLSKeystore(seedPhrase string) (*BLSKeystore, error) { + seedBytes, err := bip39.NewSeedWithErrorChecking(seedPhrase, "") + if err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating seed bytes from seed phrase") + } + + privateKey, err := bls.NewPrivateKey() + if err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed phrase") + } + + if _, err = privateKey.FromSeed(seedBytes); err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed phrase") + } + + signer, err := NewBLSSigner(privateKey) + if err != nil { + return nil, err + } + return &BLSKeystore{signer: signer}, nil +} + +func (keystore *BLSKeystore) GetSigner() *BLSSigner { + return keystore.signer +} + +////////////////////////////////////////////////////////// +// BLSSigner +////////////////////////////////////////////////////////// + +type BLSSigner struct { + privateKey *bls.PrivateKey +} + +func NewBLSSigner(privateKey *bls.PrivateKey) (*BLSSigner, error) { + if privateKey == nil { + return nil, errors.New("NewBLSSigner: privateKey cannot be nil") + } + return &BLSSigner{privateKey: privateKey}, nil +} + +func (signer *BLSSigner) sign(opCode BLSSignatureOpCode, payload []byte) (*bls.Signature, error) { + newPayload := append(opCode.Bytes(), payload...) + return signer.privateKey.Sign(newPayload) +} + +func (signer *BLSSigner) GetPublicKey() *bls.PublicKey { + return signer.privateKey.PublicKey() +} + +func (signer *BLSSigner) SignBlockProposal(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { + // A block proposer's signature on a block is just its partial vote signature. This allows us to aggregate + // signatures from the proposer and validators into a single aggregated signature to build a QC. + return signer.SignValidatorVote(view, blockHash) +} + +func (signer *BLSSigner) SignValidatorVote(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { + payload := consensus.GetVoteSignaturePayload(view, blockHash) + return signer.sign(BLSSignatureOpCodeValidatorVote, payload[:]) +} + +func (signer *BLSSigner) SignValidatorTimeout(view uint64, highQCView uint64) (*bls.Signature, error) { + payload := consensus.GetTimeoutSignaturePayload(view, highQCView) + return signer.sign(BLSSignatureOpCodeValidatorTimeout, payload[:]) +} + +func (signer *BLSSigner) SignRandomSeedHash(randomSeedHash *RandomSeedHash) (*bls.Signature, error) { + return SignRandomSeedHash(signer.privateKey, randomSeedHash) +} + +func (signer *BLSSigner) SignPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64) (*bls.Signature, error) { + // FIXME + payload := []byte{} + return signer.sign(BLSSignatureOpCodePoSValidatorHandshake, payload[:]) +} + +////////////////////////////////////////////////////////// +// BLS Verification +////////////////////////////////////////////////////////// + +func _blsVerify(opCode BLSSignatureOpCode, payload []byte, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + newPayload := append(opCode.Bytes(), payload...) + return publicKey.Verify(signature, newPayload) +} + +func BLSVerifyValidatorVote(view uint64, blockHash consensus.BlockHash, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + payload := consensus.GetVoteSignaturePayload(view, blockHash) + return _blsVerify(BLSSignatureOpCodeValidatorVote, payload[:], signature, publicKey) +} + +func BLSVerifyValidatorTimeout(view uint64, highQCView uint64, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + payload := consensus.GetTimeoutSignaturePayload(view, highQCView) + return _blsVerify(BLSSignatureOpCodeValidatorTimeout, payload[:], signature, publicKey) +} + +func BLSVerifyPoSValidatorHandshake( + nonceSent uint64, + nonceReceived uint64, + tstampMicro uint64, + signature *bls.Signature, + publicKey *bls.PublicKey, +) (bool, error) { + // FIXME + payload := []byte{} + return _blsVerify(BLSSignatureOpCodePoSValidatorHandshake, payload[:], signature, publicKey) +} diff --git a/lib/bls_keystore_test.go b/lib/bls_keystore_test.go new file mode 100644 index 000000000..a96389389 --- /dev/null +++ b/lib/bls_keystore_test.go @@ -0,0 +1,38 @@ +//go:build relic + +package lib + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewBLSKeystore(t *testing.T) { + // Test empty string + { + _, err := NewBLSKeystore("") + require.Error(t, err) + } + + // Test invalid seed phrase + { + _, err := NewBLSKeystore("invalid seed phrase") + require.Error(t, err) + } + + // Test valid 12 word seed phrase + { + keystore, err := NewBLSKeystore("suit three minute series empty virtual snake safe joke gold pear emerge") + require.NoError(t, err) + require.Equal(t, keystore.GetSigner().privateKey.ToString(), "0x2000bd5d14801e3a96f27a25ae4ebd26ec08a67c207b04c21703b40d80b8de71") + } + + // Test valid 24 word seed phrase + { + keystore, err := NewBLSKeystore("vapor educate wood post fiber proof cannon chunk luggage hedgehog merit dove network lemon scorpion job law more salt market excuse auction refuse apart") + require.NoError(t, err) + require.NoError(t, err) + require.Equal(t, keystore.GetSigner().privateKey.ToString(), "0x13b5febb384a3d3dec5c579724872607cd0ddb97adef592efaf144f6d25a70d7") + } +} From 30aa77c6e005897b43df53ca2f81034ab595d3c3 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 29 Jan 2024 14:12:50 -0500 Subject: [PATCH 402/762] Mutate bestHeaderChain along with bestChain (#962) --- lib/pos_blockchain.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 6cffcaff2..9c5a1435c 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1488,6 +1488,8 @@ func (bc *Blockchain) shouldReorg(blockNode *BlockNode, currentView uint64) bool func (bc *Blockchain) addTipBlockToBestChain(blockNode *BlockNode) { bc.bestChain = append(bc.bestChain, blockNode) bc.bestChainMap[*blockNode.Hash] = blockNode + bc.bestHeaderChain = append(bc.bestHeaderChain, blockNode) + bc.bestHeaderChainMap[*blockNode.Hash] = blockNode } // removeTipBlockFromBestChain removes the current tip from the best chain. It @@ -1499,6 +1501,8 @@ func (bc *Blockchain) removeTipBlockFromBestChain() *BlockNode { lastBlock := bc.bestChain[len(bc.bestChain)-1] delete(bc.bestChainMap, *lastBlock.Hash) bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] + bc.bestHeaderChain = bc.bestHeaderChain[:len(bc.bestChain)] + delete(bc.bestHeaderChainMap, *lastBlock.Hash) return lastBlock } From dc2e84ec081aad05e1cdf517723e9dfe3044315f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 29 Jan 2024 15:15:44 -0500 Subject: [PATCH 403/762] Add Comments to FastHotStuffConsensus For Future Optimizations (#963) --- lib/pos_consensus.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 3c8d17966..5486c9d66 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -493,6 +493,12 @@ func (cc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error // If there are missing block hashes, then we need to fetch the missing blocks from the network // and retry processing the block as a new tip. We'll request the blocks from the same peer. + // + // If we need to optimize this in the future, we can additionally send the block hash of our + // current committed tip. The peer can then send us all of the blocks that are missing starting + // from our current committed tip all the way through to the requested block hashes. + // + // See https://github.com/deso-protocol/core/pull/875#discussion_r1460183510 for more details. if len(missingBlockHashes) > 0 { pp.QueueMessage(&MsgDeSoGetBlocks{ HashList: missingBlockHashes, From 3543f2c79e7b7d0e708f144ffa087b14d7065053 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 29 Jan 2024 13:13:51 -0800 Subject: [PATCH 404/762] Nits --- lib/block_view_types.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 55777718c..77918b209 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -1370,7 +1370,7 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada data = append(data, EncodeToBytes(blockHeight, op.PrevLockupYieldCurvePoint, skipMetadata...)...) data = append(data, byte(op.PrevLockupTransferRestriction)) - // PrevSenderLockedBalanceEntry, PrevReceiverL*ockedBalanceEntry + // PrevSenderLockedBalanceEntry, PrevReceiverLockedBalanceEntry data = append(data, EncodeToBytes(blockHeight, op.PrevSenderLockedBalanceEntry, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, op.PrevReceiverLockedBalanceEntry, skipMetadata...)...) From 4398573b3e1bb228cf1bc46a3cf3b0a7bb3ce1f1 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 29 Jan 2024 17:12:06 -0500 Subject: [PATCH 405/762] Reorder encoder type consts so that existing encoder types come first (#965) --- lib/block_view_types.go | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 77918b209..806d79f8c 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -112,23 +112,25 @@ const ( EncoderTypeDmThreadEntry EncoderType = 37 EncoderTypeDeSoNonce EncoderType = 38 EncoderTypeTransactorNonceEntry EncoderType = 39 - EncoderTypeValidatorEntry EncoderType = 40 - EncoderTypeStakeEntry EncoderType = 41 - EncoderTypeLockedStakeEntry EncoderType = 42 - EncoderTypeEpochEntry EncoderType = 43 - EncoderTypeLockedBalanceEntry EncoderType = 44 - EncoderTypeLockupYieldCurvePoint EncoderType = 45 // EncoderTypeStateChangeEntry represents a state change to a DeSo encoder entry. - EncoderTypeStateChangeEntry EncoderType = 46 + EncoderTypeStateChangeEntry EncoderType = 40 // EncoderTypeFollowEntry represents a follow relationship between two pkids. - EncoderTypeFollowEntry EncoderType = 47 + EncoderTypeFollowEntry EncoderType = 41 // EncoderTypeDeSoBalanceEntry represents a balance of DeSo for a particular public key. - EncoderTypeDeSoBalanceEntry EncoderType = 48 + EncoderTypeDeSoBalanceEntry EncoderType = 42 // EncoderTypeBlock represents a block in the blockchain, including all transactions in said block. - EncoderTypeBlock EncoderType = 49 + EncoderTypeBlock EncoderType = 43 // EncoderTypeTxn represents a transaction in the blockchain. - EncoderTypeTxn EncoderType = 50 + EncoderTypeTxn EncoderType = 44 + + EncoderTypeValidatorEntry EncoderType = 45 + EncoderTypeStakeEntry EncoderType = 46 + EncoderTypeLockedStakeEntry EncoderType = 47 + EncoderTypeEpochEntry EncoderType = 48 + EncoderTypeLockedBalanceEntry EncoderType = 49 + EncoderTypeLockupYieldCurvePoint EncoderType = 50 + // EncoderTypeBLSPublicKeyPKIDPairEntry represents a BLS public key <> PKID mapping. EncoderTypeBLSPublicKeyPKIDPairEntry EncoderType = 51 From 08b502cd7be5d1c56fc9540b0311888506c75723 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 29 Jan 2024 18:15:03 -0500 Subject: [PATCH 406/762] Fix typo in handleBlockProposalEvent function name (#966) --- lib/pos_consensus.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 5486c9d66..5d53880db 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -120,7 +120,7 @@ func (cc *FastHotStuffConsensus) HandleLocalBlockProposalEvent(event *consensus. defer cc.blockchain.ChainLock.Unlock() // Handle the event as a block proposal event for a regular block - if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructVoteQC); err != nil { + if err := cc.handleBlockProposalEvent(event, consensus.FastHotStuffEventTypeConstructVoteQC); err != nil { return errors.Wrapf(err, "FastHotStuffConsensus.HandleLocalBlockProposalEvent: ") } @@ -148,7 +148,7 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutBlockProposalEvent(event *con defer cc.blockchain.ChainLock.Unlock() // Handle the event as a block proposal event for a timeout block - if err := cc.handleBlockProposerEvent(event, consensus.FastHotStuffEventTypeConstructTimeoutQC); err != nil { + if err := cc.handleBlockProposalEvent(event, consensus.FastHotStuffEventTypeConstructTimeoutQC); err != nil { return errors.Wrapf(err, "FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: ") } @@ -156,7 +156,7 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutBlockProposalEvent(event *con return nil } -// handleBlockProposerEvent is a helper function that can process a block proposal event for either +// handleBlockProposalEvent is a helper function that can process a block proposal event for either // a regular block or a timeout block. It can be called with a expectedEventType param that toggles // whether the event should be validated and processed as normal block or timeout block proposal. // @@ -169,7 +169,7 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutBlockProposalEvent(event *con // - This will connect the block to the blockchain, remove the transactions from the // mempool, and process the vote in the FastHotStuffEventLoop // 6. Broadcast the block to the network -func (cc *FastHotStuffConsensus) handleBlockProposerEvent( +func (cc *FastHotStuffConsensus) handleBlockProposalEvent( event *consensus.FastHotStuffEvent, expectedEventType consensus.FastHotStuffEventType, ) error { From 927df23540ba4bb8b5510460666e89aab290a9a1 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 29 Jan 2024 19:51:14 -0800 Subject: [PATCH 407/762] PoS NetworkManager RemoteNode Cleanup (#945) * Revert "Code split" This reverts commit c0c32f3943ead0e06fdfb3343954a6b5273ea887. * Review * Sync trunk * Rename --- collections/concurrent_map.go | 13 +- collections/concurrent_map_test.go | 2 +- consensus/integration_test_types.go | 4 + consensus/types.go | 1 + consensus/types_internal.go | 4 + .../connection_controller_routines_test.go | 347 ++++++++++++------ .../connection_controller_test.go | 265 ++++++------- .../connection_controller_utils_test.go | 53 ++- integration_testing/tools.go | 2 +- lib/block_view_validator.go | 4 + lib/connection_controller.go | 152 +++++--- lib/connection_manager.go | 6 +- lib/constants.go | 9 + lib/handshake_controller.go | 16 +- lib/network.go | 17 - lib/network_connection.go | 10 +- lib/peer.go | 12 +- lib/remote_node.go | 50 ++- lib/remote_node_manager.go | 37 +- lib/server.go | 11 +- 20 files changed, 652 insertions(+), 363 deletions(-) diff --git a/collections/concurrent_map.go b/collections/concurrent_map.go index 7e3b1b09f..e16d64dc7 100644 --- a/collections/concurrent_map.go +++ b/collections/concurrent_map.go @@ -39,7 +39,18 @@ func (cm *ConcurrentMap[Key, Value]) Get(key Key) (Value, bool) { return val, ok } -func (cm *ConcurrentMap[Key, Value]) Copy() map[Key]Value { +func (cm *ConcurrentMap[Key, Value]) Clone() *ConcurrentMap[Key, Value] { + cm.mtx.RLock() + defer cm.mtx.RUnlock() + + clone := NewConcurrentMap[Key, Value]() + for key, val := range cm.m { + clone.Set(key, val) + } + return clone +} + +func (cm *ConcurrentMap[Key, Value]) ToMap() map[Key]Value { cm.mtx.RLock() defer cm.mtx.RUnlock() diff --git a/collections/concurrent_map_test.go b/collections/concurrent_map_test.go index 46fc0c9ba..aac89b2fb 100644 --- a/collections/concurrent_map_test.go +++ b/collections/concurrent_map_test.go @@ -36,7 +36,7 @@ func TestConcurrentMap(t *testing.T) { } // test copy - copy := m.Copy() + copy := m.ToMap() for key, val := range control { if mVal, ok := copy[key]; !ok || mVal != val { t.Errorf("Expected %d, got %d", val, m.m[key]) diff --git a/consensus/integration_test_types.go b/consensus/integration_test_types.go index 3613e63e7..f674332c6 100644 --- a/consensus/integration_test_types.go +++ b/consensus/integration_test_types.go @@ -102,6 +102,10 @@ func (node *validatorNode) GetStakeAmount() *uint256.Int { return node.stake } +func (node *validatorNode) GetDomains() [][]byte { + return [][]byte{} +} + func (node *validatorNode) ProcessBlock(incomingBlock *block) { node.lock.Lock() defer node.lock.Unlock() diff --git a/consensus/types.go b/consensus/types.go index a4714f109..de134453e 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -76,6 +76,7 @@ type BlockHash interface { type Validator interface { GetPublicKey() *bls.PublicKey GetStakeAmount() *uint256.Int + GetDomains() [][]byte } type AggregateQuorumCertificate interface { diff --git a/consensus/types_internal.go b/consensus/types_internal.go index 7b98ce1a2..1eb40eeb7 100644 --- a/consensus/types_internal.go +++ b/consensus/types_internal.go @@ -35,6 +35,10 @@ func (v *validator) GetStakeAmount() *uint256.Int { return v.stakeAmount } +func (v *validator) GetDomains() [][]byte { + return [][]byte{} +} + //////////////////////////////////////////////////////////////////////// // AggregateQuorumCertificate interface implementation for internal use. // We use this type for unit tests, and to construct timeout QCs for diff --git a/integration_testing/connection_controller_routines_test.go b/integration_testing/connection_controller_routines_test.go index 95f289aba..1f30e22b5 100644 --- a/integration_testing/connection_controller_routines_test.go +++ b/integration_testing/connection_controller_routines_test.go @@ -5,36 +5,32 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/consensus" "github.com/deso-protocol/core/lib" "github.com/stretchr/testify/require" + "github.com/tyler-smith/go-bip39" "testing" + "time" ) func TestConnectionControllerInitiatePersistentConnections(t *testing.T) { - require := require.New(t) - t.Cleanup(func() { - setGetActiveValidatorImpl(lib.BasicGetActiveValidators) - }) - // NonValidator Node1 will set its --connect-ips to two non-validators node2 and node3, // and two validators node4 and node5. node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") - blsPriv4, err := bls.NewPrivateKey() - require.NoError(err) - node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) - blsPriv5, err := bls.NewPrivateKey() - require.NoError(err) - node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsPriv5) + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + blsSeedPhrase5, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase5) node2 = startNode(t, node2) node3 = startNode(t, node3) node4 = startNode(t, node4) node5 = startNode(t, node5) - setGetActiveValidatorImplWithValidatorNodes(t, node4, node5) - node1.Config.ConnectIPs = []string{ node2.Listeners[0].Addr().String(), node3.Listeners[0].Addr().String(), @@ -42,23 +38,25 @@ func TestConnectionControllerInitiatePersistentConnections(t *testing.T) { node5.Listeners[0].Addr().String(), } node1 = startNode(t, node1) + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node4, node5) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5) waitForNonValidatorOutboundConnection(t, node1, node2) waitForNonValidatorOutboundConnection(t, node1, node3) waitForValidatorConnection(t, node1, node4) waitForValidatorConnection(t, node1, node5) waitForValidatorConnection(t, node4, node5) - waitForCountRemoteNodeIndexer(t, node1, 4, 2, 2, 0) - waitForCountRemoteNodeIndexer(t, node2, 1, 0, 0, 1) - waitForCountRemoteNodeIndexer(t, node3, 1, 0, 0, 1) - waitForCountRemoteNodeIndexer(t, node4, 2, 1, 0, 1) - waitForCountRemoteNodeIndexer(t, node5, 2, 1, 0, 1) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 4, 2, 2, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node2, 1, 0, 0, 1) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node3, 1, 0, 0, 1) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node4, 2, 1, 0, 1) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node5, 2, 1, 0, 1) node1.Stop() t.Logf("Test #1 passed | Successfully run non-validator node1 with --connect-ips set to node2, node3, node4, node5") // Now try again with a validator node6, with connect-ips set to node2, node3, node4, node5. - blsPriv6, err := bls.NewPrivateKey() - require.NoError(err) - node6 := spawnValidatorNodeProtocol2(t, 18005, "node6", blsPriv6) + blsSeedPhrase6, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node6 := spawnValidatorNodeProtocol2(t, 18005, "node6", blsSeedPhrase6) node6.Config.ConnectIPs = []string{ node2.Listeners[0].Addr().String(), node3.Listeners[0].Addr().String(), @@ -66,66 +64,118 @@ func TestConnectionControllerInitiatePersistentConnections(t *testing.T) { node5.Listeners[0].Addr().String(), } node6 = startNode(t, node6) - setGetActiveValidatorImplWithValidatorNodes(t, node4, node5, node6) + activeValidatorsMap = getActiveValidatorsMapWithValidatorNodes(t, node4, node5, node6) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6) waitForNonValidatorOutboundConnection(t, node6, node2) waitForNonValidatorOutboundConnection(t, node6, node3) waitForValidatorConnection(t, node6, node4) waitForValidatorConnection(t, node6, node5) waitForValidatorConnection(t, node4, node5) - waitForCountRemoteNodeIndexer(t, node6, 4, 2, 2, 0) - waitForCountRemoteNodeIndexer(t, node2, 1, 1, 0, 0) - waitForCountRemoteNodeIndexer(t, node3, 1, 1, 0, 0) - waitForCountRemoteNodeIndexer(t, node4, 2, 2, 0, 0) - waitForCountRemoteNodeIndexer(t, node5, 2, 2, 0, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node6, 4, 2, 2, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node2, 1, 1, 0, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node3, 1, 1, 0, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node4, 2, 2, 0, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node5, 2, 2, 0, 0) + t.Logf("Test #2 passed | Successfully run validator node6 with --connect-ips set to node2, node3, node4, node5") +} + +func TestConnectionControllerNonValidatorCircularConnectIps(t *testing.T) { + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + + node1.Config.ConnectIPs = []string{"127.0.0.1:18001"} + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + + node1 = startNode(t, node1) + node2 = startNode(t, node2) + + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 2, 0, 1, 1) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node2, 2, 0, 1, 1) +} + +func TestNetworkManagerPersistentConnectorReconnect(t *testing.T) { + // Ensure that a node that is disconnected from a persistent connection will be reconnected to. + // Spawn three nodes: a non-validator node1, and node2, and a validator node3. Then set node1 connectIps + // to node2, node3, as well as a non-existing ip. Then we will stop node2, and wait for node1 to drop the + // connection. Then we will restart node2, and wait for node1 to reconnect to node2. We will repeat this + // process for node3. + + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + // Set TargetOutboundPeers to 0 to ensure the non-validator connector doesn't interfere. + node1.Config.TargetOutboundPeers = 0 + + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + + node2 = startNode(t, node2) + node3 = startNode(t, node3) + + node1.Config.ConnectIPs = []string{ + node2.Listeners[0].Addr().String(), + node3.Listeners[0].Addr().String(), + "127.0.0.1:18003", + } + node1 = startNode(t, node1) + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node3) + setActiveValidators(activeValidatorsMap, node1, node2, node3) + + waitForNonValidatorOutboundConnection(t, node1, node2) + waitForValidatorConnection(t, node1, node3) + waitForCountRemoteNodeIndexer(t, node1, 3, 1, 2, 0) + node2.Stop() + waitForCountRemoteNodeIndexer(t, node1, 2, 1, 1, 0) + // node1 should reopen the connection to node2, and it should be re-indexed as a non-validator (attempted). + waitForCountRemoteNodeIndexer(t, node1, 3, 1, 2, 0) + node2 = startNode(t, node2) + setActiveValidators(activeValidatorsMap, node2) + waitForCountRemoteNodeIndexer(t, node1, 3, 1, 2, 0) + t.Logf("Test #1 passed | Successfully run reconnect test with non-validator node1 with --connect-ips for node2") + + // Now we will do the same for node3. node3.Stop() - node4.Stop() - node5.Stop() - node6.Stop() - t.Logf("Test #2 passed | Successfully run validator node6 with --connect-ips set to node2, node3, node4, node5") + waitForCountRemoteNodeIndexer(t, node1, 2, 0, 2, 0) + // node1 should reopen the connection to node3, and it should be re-indexed as a non-validator (attempted). + waitForCountRemoteNodeIndexer(t, node1, 3, 0, 3, 0) + node3 = startNode(t, node3) + setActiveValidators(activeValidatorsMap, node3) + waitForValidatorConnection(t, node1, node3) + waitForCountRemoteNodeIndexer(t, node1, 3, 1, 2, 0) + t.Logf("Test #2 passed | Successfully run reconnect test with non-validator node1 with --connect-ips for node3") } func TestConnectionControllerValidatorConnector(t *testing.T) { - require := require.New(t) - t.Cleanup(func() { - setGetActiveValidatorImpl(lib.BasicGetActiveValidators) - }) - // Spawn 5 validators node1, node2, node3, node4, node5 and two non-validators node6 and node7. // All the validators are initially in the validator set. And later, node1 and node2 will be removed from the // validator set. Then, make node3 inactive, and node2 active again. Then, make all the validators inactive. // Make node6, and node7 connect-ips to all the validators. - blsPriv1, err := bls.NewPrivateKey() - require.NoError(err) - node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsPriv1) - blsPriv2, err := bls.NewPrivateKey() - require.NoError(err) - node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) - blsPriv3, err := bls.NewPrivateKey() - require.NoError(err) - node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv3) - blsPriv4, err := bls.NewPrivateKey() - require.NoError(err) - node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) - blsPriv5, err := bls.NewPrivateKey() - require.NoError(err) - node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsPriv5) + blsSeedPhrase1, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsSeedPhrase1) + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + blsSeedPhrase5, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase5) node6 := spawnNonValidatorNodeProtocol2(t, 18005, "node6") node7 := spawnNonValidatorNodeProtocol2(t, 18006, "node7") node1 = startNode(t, node1) - defer node1.Stop() node2 = startNode(t, node2) - defer node2.Stop() node3 = startNode(t, node3) - defer node3.Stop() node4 = startNode(t, node4) - defer node4.Stop() node5 = startNode(t, node5) - defer node5.Stop() - setGetActiveValidatorImplWithValidatorNodes(t, node1, node2, node3, node4, node5) node6.Config.ConnectIPs = []string{ node1.Listeners[0].Addr().String(), @@ -136,9 +186,9 @@ func TestConnectionControllerValidatorConnector(t *testing.T) { } node7.Config.ConnectIPs = node6.Config.ConnectIPs node6 = startNode(t, node6) - defer node6.Stop() node7 = startNode(t, node7) - defer node7.Stop() + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node1, node2, node3, node4, node5) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7) // Verify full graph between active validators. waitForValidatorFullGraph(t, node1, node2, node3, node4, node5) @@ -162,7 +212,8 @@ func TestConnectionControllerValidatorConnector(t *testing.T) { t.Logf("Test #1 passed | Successfully run validators node1, node2, node3, node4, node5; non-validators node6, node7") // Remove node1 and node2 from the validator set. - setGetActiveValidatorImplWithValidatorNodes(t, node3, node4, node5) + activeValidatorsMap = getActiveValidatorsMapWithValidatorNodes(t, node3, node4, node5) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7) // Verify full graph between active validators. waitForValidatorFullGraph(t, node3, node4, node5) // Verify connections of non-validators. @@ -185,13 +236,14 @@ func TestConnectionControllerValidatorConnector(t *testing.T) { waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 6, 3, 0, 2) } // Verify connection counts of non-validators. - waitForCountRemoteNodeIndexer(t, node6, 5, 3, 2, 0) - waitForCountRemoteNodeIndexer(t, node7, 5, 3, 2, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node6, 5, 3, 2, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node7, 5, 3, 2, 0) t.Logf("Test #2 passed | Successfully run validators node3, node4, node5; inactive-validators node1, node2; " + "non-validators node6, node7") // Remove node3 from the validator set. Make node1 active again. - setGetActiveValidatorImplWithValidatorNodes(t, node1, node4, node5) + activeValidatorsMap = getActiveValidatorsMapWithValidatorNodes(t, node1, node4, node5) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7) // Verify full graph between active validators. waitForValidatorFullGraph(t, node1, node4, node5) // Verify connections of non-validators. @@ -214,13 +266,14 @@ func TestConnectionControllerValidatorConnector(t *testing.T) { waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 6, 3, 0, 2) } // Verify connection counts of non-validators. - waitForCountRemoteNodeIndexer(t, node6, 5, 3, 2, 0) - waitForCountRemoteNodeIndexer(t, node7, 5, 3, 2, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node6, 5, 3, 2, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node7, 5, 3, 2, 0) t.Logf("Test #3 passed | Successfully run validators node1, node4, node5; inactive validators node2, node3; " + "non-validators node6, node7") // Make all validators inactive. - setGetActiveValidatorImplWithValidatorNodes(t) + activeValidatorsMap = getActiveValidatorsMapWithValidatorNodes(t) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7) // NOOP Verify full graph between active validators. // NOOP Verify connections of non-validators. // Verify connections of initial validators. @@ -246,18 +299,13 @@ func TestConnectionControllerValidatorConnector(t *testing.T) { waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 6, 0, 0, 2) } // Verify connection counts of non-validators. - waitForCountRemoteNodeIndexer(t, node6, 5, 0, 5, 0) - waitForCountRemoteNodeIndexer(t, node7, 5, 0, 5, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node6, 5, 0, 5, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node7, 5, 0, 5, 0) t.Logf("Test #4 passed | Successfully run inactive validators node1, node2, node3, node4, node5; " + "non-validators node6, node7") } func TestConnectionControllerValidatorInboundDeduplication(t *testing.T) { - require := require.New(t) - t.Cleanup(func() { - setGetActiveValidatorImpl(lib.BasicGetActiveValidators) - }) - // Spawn a non-validator node1, and two validators node2, node3. The validator nodes will have the same public key. // Node2 and node3 will not initially be in the validator set. First, node2 will start an outbound connection to // node1. We wait until the node2 is re-indexed as non-validator by node1, and then we make node3 open an outbound @@ -266,20 +314,17 @@ func TestConnectionControllerValidatorInboundDeduplication(t *testing.T) { // either node2 or node3 because of duplicate public key. node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") - blsPriv2, err := bls.NewPrivateKey() - require.NoError(err) - node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) - node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv2) + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase2) node1 = startNode(t, node1) - defer node1.Stop() node2 = startNode(t, node2) - defer node2.Stop() node3 = startNode(t, node3) - defer node3.Stop() cc2 := node2.Server.GetConnectionController() - require.NoError(cc2.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + require.NoError(t, cc2.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) // First wait for node2 to be indexed as a validator by node1. waitForValidatorConnection(t, node1, node2) // Now wait for node2 to be re-indexed as a non-validator. @@ -288,7 +333,7 @@ func TestConnectionControllerValidatorInboundDeduplication(t *testing.T) { // Now connect node3 to node1. cc3 := node3.Server.GetConnectionController() - require.NoError(cc3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + require.NoError(t, cc3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) // First wait for node3 to be indexed as a validator by node1. waitForValidatorConnection(t, node1, node3) // Now wait for node3 to be re-indexed as a non-validator. @@ -296,15 +341,14 @@ func TestConnectionControllerValidatorInboundDeduplication(t *testing.T) { waitForNonValidatorOutboundConnection(t, node3, node1) // Now add node2 and node3 to the validator set. - setGetActiveValidatorImplWithValidatorNodes(t, node2) + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node2) + setActiveValidators(activeValidatorsMap, node1, node2, node3) // Now wait for node1 to disconnect from either node2 or node3. - waitForCountRemoteNodeIndexer(t, node1, 1, 1, 0, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 1, 1, 0, 0) t.Logf("Test #1 passed | Successfully run non-validator node1; validators node2, node3 with duplicate public key") } -func TestConnectionControllerNonValidatorConnector(t *testing.T) { - require := require.New(t) - +func TestConnectionControllerNonValidatorConnectorOutbound(t *testing.T) { // Spawn 6 non-validators node1, node2, node3, node4, node5, node6. Set node1's targetOutboundPeers to 3. Then make // node1 create persistent outbound connections to node2, node3, and node4, as well as non-validator connections to // node5 and node6. @@ -317,15 +361,10 @@ func TestConnectionControllerNonValidatorConnector(t *testing.T) { node6 := spawnNonValidatorNodeProtocol2(t, 18005, "node6") node2 = startNode(t, node2) - defer node2.Stop() node3 = startNode(t, node3) - defer node3.Stop() node4 = startNode(t, node4) - defer node4.Stop() node5 = startNode(t, node5) - defer node5.Stop() node6 = startNode(t, node6) - defer node6.Stop() node1.Config.ConnectIPs = []string{ node2.Listeners[0].Addr().String(), @@ -333,54 +372,134 @@ func TestConnectionControllerNonValidatorConnector(t *testing.T) { node4.Listeners[0].Addr().String(), } node1 = startNode(t, node1) - defer node1.Stop() cc := node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node5.Listeners[0].Addr().String())) - require.NoError(cc.CreateNonValidatorOutboundConnection(node6.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node5.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node6.Listeners[0].Addr().String())) - waitForCountRemoteNodeIndexer(t, node1, 3, 0, 3, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 3, 0, 3, 0) waitForNonValidatorOutboundConnection(t, node1, node2) waitForNonValidatorOutboundConnection(t, node1, node3) waitForNonValidatorOutboundConnection(t, node1, node4) } -func TestConnectionControllerNonValidatorCircularConnectIps(t *testing.T) { - node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") - node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") +func TestConnectionControllerNonValidatorConnectorInbound(t *testing.T) { + // Spawn validators node1, node2, node3, node4, node5, node6. Also spawn non-validators node7, node8, node9, node10. + // Set node1's targetOutboundPeers to 0 and targetInboundPeers to 1. Then make node1 create outbound connections to + // node2, node3, and make node4, node5, node6 create inbound connections to node1. Then make node1 create outbound + // connections to node7, node8, and make node9, node10 create inbound connections to node1. + blsSeedPhrase1, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsSeedPhrase1) + node1.Config.TargetOutboundPeers = 0 + node1.Config.MaxInboundPeers = 1 + node1.Params.DialTimeout = 1 * time.Second + node1.Params.VerackNegotiationTimeout = 1 * time.Second + node1.Params.VersionNegotiationTimeout = 1 * time.Second + + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node2.Config.GlogV = 0 + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + node3.Config.GlogV = 0 + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Config.GlogV = 0 + blsSeedPhrase5, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase5) + node5.Config.GlogV = 0 + blsSeedPhrase6, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node6 := spawnValidatorNodeProtocol2(t, 18005, "node6", blsSeedPhrase6) + node6.Config.GlogV = 0 - node1.Config.ConnectIPs = []string{"127.0.0.1:18001"} - node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node7 := spawnNonValidatorNodeProtocol2(t, 18006, "node7") + node8 := spawnNonValidatorNodeProtocol2(t, 18007, "node8") + node9 := spawnNonValidatorNodeProtocol2(t, 18008, "node9") + node10 := spawnNonValidatorNodeProtocol2(t, 18009, "node10") node1 = startNode(t, node1) node2 = startNode(t, node2) - defer node1.Stop() - defer node2.Stop() + node3 = startNode(t, node3) + node4 = startNode(t, node4) + node5 = startNode(t, node5) + node6 = startNode(t, node6) + node7 = startNode(t, node7) + node8 = startNode(t, node8) + node9 = startNode(t, node9) + node10 = startNode(t, node10) + + // Connect node1 to node2, node3, node7, and node8. + cc1 := node1.Server.GetConnectionController() + require.NoError(t, cc1.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + require.NoError(t, cc1.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + require.NoError(t, cc1.CreateNonValidatorOutboundConnection(node7.Listeners[0].Addr().String())) + require.NoError(t, cc1.CreateNonValidatorOutboundConnection(node8.Listeners[0].Addr().String())) + // Connect node4, node5, node6 to node1. + cc4 := node4.Server.GetConnectionController() + require.NoError(t, cc4.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + cc5 := node5.Server.GetConnectionController() + require.NoError(t, cc5.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + cc6 := node6.Server.GetConnectionController() + require.NoError(t, cc6.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + + // Connect node9, node10 to node1. + cc9 := node9.Server.GetConnectionController() + require.NoError(t, cc9.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + cc10 := node10.Server.GetConnectionController() + require.NoError(t, cc10.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node1, node2, node3, node4, node5, node6) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7, node8, node9, node10) - waitForCountRemoteNodeIndexer(t, node1, 2, 0, 1, 1) - waitForCountRemoteNodeIndexer(t, node2, 2, 0, 1, 1) + waitForValidatorConnection(t, node1, node2) + waitForValidatorConnection(t, node1, node3) + waitForValidatorConnection(t, node1, node4) + waitForValidatorConnection(t, node1, node5) + waitForValidatorConnection(t, node1, node6) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 6, 5, 0, 1) } -func setGetActiveValidatorImplWithValidatorNodes(t *testing.T, validators ...*cmd.Node) { - require := require.New(t) +func TestConnectionControllerNonValidatorConnectorAddressMgr(t *testing.T) { + // Spawn a non-validator node1. Set node1's targetOutboundPeers to 2 and targetInboundPeers to 0. Then + // add two ip addresses to AddrMgr. Make sure that node1 creates outbound connections to these nodes. + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Config.TargetOutboundPeers = 2 + node1.Config.MaxInboundPeers = 0 + + node1 = startNode(t, node1) + cc := node1.Server.GetConnectionController() + na1, err := cc.ConvertIPStringToNetAddress("deso-seed-2.io:17000") + na2, err := cc.ConvertIPStringToNetAddress("deso-seed-3.io:17000") + require.NoError(t, err) + cc.AddrMgr.AddAddress(na1, na1) + cc.AddrMgr.AddAddress(na2, na2) + waitForCountRemoteNodeIndexer(t, node1, 2, 0, 2, 0) +} - mapping := collections.NewConcurrentMap[bls.SerializedPublicKey, *lib.ValidatorEntry]() +func getActiveValidatorsMapWithValidatorNodes(t *testing.T, validators ...*cmd.Node) *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator] { + mapping := collections.NewConcurrentMap[bls.SerializedPublicKey, consensus.Validator]() for _, validator := range validators { seed := validator.Config.PosValidatorSeed if seed == "" { t.Fatalf("Validator node %s does not have a PosValidatorSeed set", validator.Params.UserAgent) } keystore, err := lib.NewBLSKeystore(seed) - require.NoError(err) + require.NoError(t, err) mapping.Set(keystore.GetSigner().GetPublicKey().Serialize(), createSimpleValidatorEntry(validator)) } - setGetActiveValidatorImpl(func() *collections.ConcurrentMap[bls.SerializedPublicKey, *lib.ValidatorEntry] { - return mapping - }) + return mapping } -func setGetActiveValidatorImpl(mapping func() *collections.ConcurrentMap[bls.SerializedPublicKey, *lib.ValidatorEntry]) { - lib.GetActiveValidatorImpl = mapping +func setActiveValidators(validatorMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator], nodes ...*cmd.Node) { + for _, node := range nodes { + node.Server.GetConnectionController().SetActiveValidatorsMap(validatorMap) + } } func createSimpleValidatorEntry(node *cmd.Node) *lib.ValidatorEntry { diff --git a/integration_testing/connection_controller_test.go b/integration_testing/connection_controller_test.go index 58f4be33b..63976fbd4 100644 --- a/integration_testing/connection_controller_test.go +++ b/integration_testing/connection_controller_test.go @@ -4,22 +4,22 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/lib" "github.com/stretchr/testify/require" + "github.com/tyler-smith/go-bip39" "testing" ) func TestConnectionControllerNonValidator(t *testing.T) { - require := require.New(t) - node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.DisableNetworkManagerRoutines = true node1 = startNode(t, node1) - defer node1.Stop() // Make sure NonValidator Node1 can create an outbound connection to NonValidator Node2 node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node2.Params.DisableNetworkManagerRoutines = true node2 = startNode(t, node2) cc := node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) waitForNonValidatorOutboundConnection(t, node1, node2) waitForNonValidatorInboundConnection(t, node2, node1) @@ -28,13 +28,14 @@ func TestConnectionControllerNonValidator(t *testing.T) { t.Logf("Test #1 passed | Successfully created outbound connection from NonValidator Node1 to NonValidator Node2") // Make sure NonValidator Node1 can create an outbound connection to validator Node3 - blsPriv3, err := bls.NewPrivateKey() - require.NoError(err) - node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv3) + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + node3.Params.DisableNetworkManagerRoutines = true node3 = startNode(t, node3) cc = node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) waitForValidatorConnection(t, node1, node3) waitForNonValidatorInboundConnection(t, node3, node1) @@ -43,37 +44,38 @@ func TestConnectionControllerNonValidator(t *testing.T) { t.Logf("Test #2 passed | Successfully created outbound connection from NonValidator Node1 to Validator Node3") // Make sure NonValidator Node1 can create a non-validator connection to validator Node4 - blsPriv4, err := bls.NewPrivateKey() - require.NoError(err) - node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true node4 = startNode(t, node4) - defer node4.Stop() cc = node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) waitForValidatorConnection(t, node1, node4) waitForNonValidatorInboundConnection(t, node4, node1) t.Logf("Test #3 passed | Successfully created outbound connection from NonValidator Node1 to Validator Node4") } func TestConnectionControllerValidator(t *testing.T) { - require := require.New(t) - - blsPriv1, err := bls.NewPrivateKey() - require.NoError(err) - node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsPriv1) + blsSeedPhrase1, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsSeedPhrase1) + node1.Params.DisableNetworkManagerRoutines = true node1 = startNode(t, node1) - defer node1.Stop() // Make sure Validator Node1 can create an outbound connection to Validator Node2 - blsPriv2, err := bls.NewPrivateKey() - blsPub2 := blsPriv2.PublicKey() - require.NoError(err) - node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore2, err := lib.NewBLSKeystore(blsSeedPhrase2) + require.NoError(t, err) + blsPub2 := blsKeyStore2.GetSigner().GetPublicKey() + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node2.Params.DisableNetworkManagerRoutines = true node2 = startNode(t, node2) cc := node1.Server.GetConnectionController() - require.NoError(cc.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsPub2)) + require.NoError(t, cc.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsPub2)) waitForValidatorConnection(t, node1, node2) waitForValidatorConnection(t, node2, node1) @@ -83,10 +85,11 @@ func TestConnectionControllerValidator(t *testing.T) { // Make sure Validator Node1 can create an outbound connection to NonValidator Node3 node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node3.Params.DisableNetworkManagerRoutines = true node3 = startNode(t, node3) cc = node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) waitForNonValidatorOutboundConnection(t, node1, node3) waitForValidatorConnection(t, node3, node1) @@ -95,125 +98,123 @@ func TestConnectionControllerValidator(t *testing.T) { t.Logf("Test #2 passed | Successfully created outbound connection from Validator Node1 to NonValidator Node3") // Make sure Validator Node1 can create an outbound non-validator connection to Validator Node4 - blsPriv4, err := bls.NewPrivateKey() - require.NoError(err) - node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true node4 = startNode(t, node4) - defer node4.Stop() cc = node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) waitForValidatorConnection(t, node1, node4) waitForValidatorConnection(t, node4, node1) t.Logf("Test #3 passed | Successfully created non-validator outbound connection from Validator Node1 to Validator Node4") } func TestConnectionControllerHandshakeDataErrors(t *testing.T) { - require := require.New(t) - - blsPriv1, err := bls.NewPrivateKey() - require.NoError(err) - node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsPriv1) + blsSeedPhrase1, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsSeedPhrase1) + node1.Params.DisableNetworkManagerRoutines = true // This node should have ProtocolVersion2, but it has ProtocolVersion1 as we want it to disconnect. - blsPriv2, err := bls.NewPrivateKey() - require.NoError(err) - node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node2.Params.DisableNetworkManagerRoutines = true node2.Params.ProtocolVersion = lib.ProtocolVersion1 node1 = startNode(t, node1) node2 = startNode(t, node2) - defer node1.Stop() - defer node2.Stop() cc := node2.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node2) t.Logf("Test #1 passed | Successfuly disconnected node with SFValidator flag and ProtocolVersion1 mismatch") // This node shouldn't have ProtocolVersion3, which is beyond latest ProtocolVersion2, meaning nodes should disconnect. - blsPriv3, err := bls.NewPrivateKey() - require.NoError(err) - node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv3) + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + node3.Params.DisableNetworkManagerRoutines = true node3.Params.ProtocolVersion = lib.ProtocolVersionType(3) node3 = startNode(t, node3) - defer node3.Stop() cc = node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node3) t.Logf("Test #2 passed | Successfuly disconnected node with ProtocolVersion3") // This node shouldn't have ProtocolVersion0, which is outdated. node4 := spawnNonValidatorNodeProtocol2(t, 18003, "node4") + node4.Params.DisableNetworkManagerRoutines = true node4.Params.ProtocolVersion = lib.ProtocolVersion0 node4 = startNode(t, node4) - defer node4.Stop() cc = node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node4) t.Logf("Test #3 passed | Successfuly disconnected node with ProtocolVersion0") // This node will have a different public key than the one it's supposed to have. - blsPriv5, err := bls.NewPrivateKey() - require.NoError(err) - blsPriv5Wrong, err := bls.NewPrivateKey() - require.NoError(err) - node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsPriv5) + blsSeedPhrase5, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsSeedPhrase5Wrong, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore5Wrong, err := lib.NewBLSKeystore(blsSeedPhrase5Wrong) + require.NoError(t, err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase5) + node5.Params.DisableNetworkManagerRoutines = true node5 = startNode(t, node5) - defer node5.Stop() cc = node1.Server.GetConnectionController() - require.NoError(cc.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsPriv5Wrong.PublicKey())) + require.NoError(t, cc.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsKeyStore5Wrong.GetSigner().GetPublicKey())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node5) t.Logf("Test #4 passed | Successfuly disconnected node with public key mismatch") // This node will be missing SFPosValidator flag while being connected as a validator. blsPriv6, err := bls.NewPrivateKey() - require.NoError(err) + require.NoError(t, err) node6 := spawnNonValidatorNodeProtocol2(t, 18005, "node6") + node6.Params.DisableNetworkManagerRoutines = true node6 = startNode(t, node6) - defer node6.Stop() cc = node1.Server.GetConnectionController() - require.NoError(cc.CreateValidatorConnection(node6.Listeners[0].Addr().String(), blsPriv6.PublicKey())) + require.NoError(t, cc.CreateValidatorConnection(node6.Listeners[0].Addr().String(), blsPriv6.PublicKey())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node6) t.Logf("Test #5 passed | Successfuly disconnected supposed validator node with missing SFPosValidator flag") // This node will have ProtocolVersion1 and be connected as an outbound non-validator node. node7 := spawnNonValidatorNodeProtocol2(t, 18006, "node7") + node7.Params.DisableNetworkManagerRoutines = true node7.Params.ProtocolVersion = lib.ProtocolVersion1 node7 = startNode(t, node7) - defer node7.Stop() cc = node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node7.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node7.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node7) t.Logf("Test #6 passed | Successfuly disconnected outbound non-validator node with ProtocolVersion1") } func TestConnectionControllerHandshakeTimeouts(t *testing.T) { - require := require.New(t) - // Set version negotiation timeout to 0 to make sure that the node will be disconnected node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.DisableNetworkManagerRoutines = true node1.Params.VersionNegotiationTimeout = 0 node1 = startNode(t, node1) - defer node1.Stop() node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node2.Params.DisableNetworkManagerRoutines = true node2 = startNode(t, node2) - defer node2.Stop() cc := node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node2) t.Logf("Test #1 passed | Successfuly disconnected node after version negotiation timeout") @@ -221,66 +222,70 @@ func TestConnectionControllerHandshakeTimeouts(t *testing.T) { // Now let's try timing out verack exchange node1.Params.VersionNegotiationTimeout = lib.DeSoTestnetParams.VersionNegotiationTimeout node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node3.Params.DisableNetworkManagerRoutines = true node3.Params.VerackNegotiationTimeout = 0 node3 = startNode(t, node3) - defer node3.Stop() cc = node3.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node3) t.Logf("Test #2 passed | Successfuly disconnected node after verack exchange timeout") // Now let's try timing out handshake between two validators node4 and node5 - blsPriv4, err := bls.NewPrivateKey() - require.NoError(err) - node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true node4.Params.HandshakeTimeoutMicroSeconds = 0 node4 = startNode(t, node4) - defer node4.Stop() - blsPriv5, err := bls.NewPrivateKey() - require.NoError(err) - node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsPriv5) + blsSeedPhrase5, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore5, err := lib.NewBLSKeystore(blsSeedPhrase5) + require.NoError(t, err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase5) + node5.Params.DisableNetworkManagerRoutines = true node5 = startNode(t, node5) - defer node5.Stop() cc = node4.Server.GetConnectionController() - require.NoError(cc.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsPriv5.PublicKey())) + require.NoError(t, cc.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsKeyStore5.GetSigner().GetPublicKey())) waitForEmptyRemoteNodeIndexer(t, node4) waitForEmptyRemoteNodeIndexer(t, node5) t.Logf("Test #3 passed | Successfuly disconnected validator node after handshake timeout") } func TestConnectionControllerValidatorDuplication(t *testing.T) { - require := require.New(t) - node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.DisableNetworkManagerRoutines = true node1 = startNode(t, node1) - defer node1.Stop() // Create a validator Node2 - blsPriv2, err := bls.NewPrivateKey() - require.NoError(err) - node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore2, err := lib.NewBLSKeystore(blsSeedPhrase2) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node2.Params.DisableNetworkManagerRoutines = true node2 = startNode(t, node2) // Create a duplicate validator Node3 - node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv2) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase2) + node3.Params.DisableNetworkManagerRoutines = true node3 = startNode(t, node3) // Create validator connection from Node1 to Node2 and from Node1 to Node3 cc := node1.Server.GetConnectionController() - require.NoError(cc.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsPriv2.PublicKey())) + require.NoError(t, cc.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsKeyStore2.GetSigner().GetPublicKey())) // This should fail out right because Node3 has a duplicate public key. - require.Error(cc.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsPriv2.PublicKey())) + require.Error(t, cc.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsKeyStore2.GetSigner().GetPublicKey())) waitForValidatorConnection(t, node1, node2) waitForNonValidatorInboundConnection(t, node2, node1) // Now create an outbound connection from Node3 to Node1, which should pass handshake, but then fail because // Node1 already has a validator connection to Node2 with the same public key. cc3 := node3.Server.GetConnectionController() - require.NoError(cc3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + require.NoError(t, cc3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node3) waitForCountRemoteNodeIndexer(t, node1, 1, 1, 0, 0) t.Logf("Test #1 passed | Successfuly rejected duplicate validator connection with inbound/outbound validators") @@ -290,56 +295,58 @@ func TestConnectionControllerValidatorDuplication(t *testing.T) { waitForEmptyRemoteNodeIndexer(t, node1) // Create two more validators Node4, Node5 with duplicate public keys - blsPriv4, err := bls.NewPrivateKey() - require.NoError(err) - node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true node4 = startNode(t, node4) - defer node4.Stop() - node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsPriv4) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase4) + node5.Params.DisableNetworkManagerRoutines = true node5 = startNode(t, node5) - defer node5.Stop() // Create validator connections from Node4 to Node1 and from Node5 to Node1 cc4 := node4.Server.GetConnectionController() - require.NoError(cc4.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + require.NoError(t, cc4.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForValidatorConnection(t, node1, node4) waitForNonValidatorOutboundConnection(t, node4, node1) cc5 := node5.Server.GetConnectionController() - require.NoError(cc5.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + require.NoError(t, cc5.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node5) waitForCountRemoteNodeIndexer(t, node1, 1, 1, 0, 0) t.Logf("Test #2 passed | Successfuly rejected duplicate validator connection with multiple outbound validators") } func TestConnectionControllerProtocolDifference(t *testing.T) { - require := require.New(t) - // Create a ProtocolVersion1 Node1 node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.DisableNetworkManagerRoutines = true node1.Params.ProtocolVersion = lib.ProtocolVersion1 node1 = startNode(t, node1) - defer node1.Stop() // Create a ProtocolVersion2 NonValidator Node2 node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node2.Params.DisableNetworkManagerRoutines = true node2 = startNode(t, node2) // Create non-validator connection from Node1 to Node2 cc := node1.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) waitForNonValidatorOutboundConnection(t, node1, node2) waitForNonValidatorInboundConnection(t, node2, node1) t.Logf("Test #1 passed | Successfuly connected to a ProtocolVersion1 node with a ProtocolVersion2 non-validator") // Create a ProtocolVersion2 Validator Node3 - blsPriv3, err := bls.NewPrivateKey() - require.NoError(err) - node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsPriv3) + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore3, err := lib.NewBLSKeystore(blsSeedPhrase3) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + node3.Params.DisableNetworkManagerRoutines = true node3 = startNode(t, node3) // Create validator connection from Node1 to Node3 - require.NoError(cc.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsPriv3.PublicKey())) + require.NoError(t, cc.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsKeyStore3.GetSigner().GetPublicKey())) waitForValidatorConnection(t, node1, node3) waitForNonValidatorInboundConnection(t, node3, node1) t.Logf("Test #2 passed | Successfuly connected to a ProtocolVersion1 node with a ProtocolVersion2 validator") @@ -349,55 +356,57 @@ func TestConnectionControllerProtocolDifference(t *testing.T) { waitForEmptyRemoteNodeIndexer(t, node1) // Create a ProtocolVersion2 validator Node4 - blsPriv4, err := bls.NewPrivateKey() - require.NoError(err) - node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore4, err := lib.NewBLSKeystore(blsSeedPhrase4) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true node4 = startNode(t, node4) - defer node4.Stop() // Attempt to create non-validator connection from Node4 to Node1 cc = node4.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node4) waitForEmptyRemoteNodeIndexer(t, node1) t.Logf("Test #3 passed | Successfuly rejected outbound connection from ProtocolVersion2 node to ProtcolVersion1 node") // Attempt to create validator connection from Node4 to Node1 - require.NoError(cc.CreateValidatorConnection(node1.Listeners[0].Addr().String(), blsPriv4.PublicKey())) + require.NoError(t, cc.CreateValidatorConnection(node1.Listeners[0].Addr().String(), blsKeyStore4.GetSigner().GetPublicKey())) waitForEmptyRemoteNodeIndexer(t, node4) waitForEmptyRemoteNodeIndexer(t, node1) t.Logf("Test #4 passed | Successfuly rejected validator connection from ProtocolVersion2 node to ProtcolVersion1 node") // Create a ProtocolVersion2 non-validator Node5 node5 := spawnNonValidatorNodeProtocol2(t, 18004, "node5") + node5.Params.DisableNetworkManagerRoutines = true node5 = startNode(t, node5) - defer node5.Stop() // Attempt to create non-validator connection from Node5 to Node1 cc = node5.Server.GetConnectionController() - require.NoError(cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + require.NoError(t, cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node5) waitForEmptyRemoteNodeIndexer(t, node1) t.Logf("Test #5 passed | Successfuly rejected outbound connection from ProtocolVersion2 node to ProtcolVersion1 node") } func TestConnectionControllerPersistentConnection(t *testing.T) { - require := require.New(t) - // Create a NonValidator Node1 node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.DisableNetworkManagerRoutines = true node1 = startNode(t, node1) // Create a Validator Node2 - blsPriv2, err := bls.NewPrivateKey() - require.NoError(err) - node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsPriv2) + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node2.Params.DisableNetworkManagerRoutines = true node2 = startNode(t, node2) // Create a persistent connection from Node1 to Node2 cc := node1.Server.GetConnectionController() _, err = cc.CreateNonValidatorPersistentOutboundConnection(node2.Listeners[0].Addr().String()) - require.NoError(err) + require.NoError(t, err) waitForValidatorConnection(t, node1, node2) waitForNonValidatorInboundConnection(t, node2, node1) node2.Stop() @@ -406,11 +415,12 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { // Create a Non-validator Node3 node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node3.Params.DisableNetworkManagerRoutines = true node3 = startNode(t, node3) // Create a persistent connection from Node1 to Node3 _, err = cc.CreateNonValidatorPersistentOutboundConnection(node3.Listeners[0].Addr().String()) - require.NoError(err) + require.NoError(t, err) waitForNonValidatorOutboundConnection(t, node1, node3) waitForNonValidatorInboundConnection(t, node3, node1) node3.Stop() @@ -419,20 +429,21 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { t.Logf("Test #2 passed | Successfuly created persistent connection from non-validator Node1 to non-validator Node3") // Create a Validator Node4 - blsPriv4, err := bls.NewPrivateKey() - require.NoError(err) - node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsPriv4) + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true node4 = startNode(t, node4) - defer node4.Stop() // Create a non-validator Node5 node5 := spawnNonValidatorNodeProtocol2(t, 18004, "node5") + node5.Params.DisableNetworkManagerRoutines = true node5 = startNode(t, node5) // Create a persistent connection from Node4 to Node5 cc = node4.Server.GetConnectionController() _, err = cc.CreateNonValidatorPersistentOutboundConnection(node5.Listeners[0].Addr().String()) - require.NoError(err) + require.NoError(t, err) waitForNonValidatorOutboundConnection(t, node4, node5) waitForValidatorConnection(t, node5, node4) node5.Stop() @@ -440,15 +451,15 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { t.Logf("Test #3 passed | Successfuly created persistent connection from validator Node4 to non-validator Node5") // Create a Validator Node6 - blsPriv6, err := bls.NewPrivateKey() - require.NoError(err) - node6 := spawnValidatorNodeProtocol2(t, 18005, "node6", blsPriv6) + blsSeedPhrase6, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node6 := spawnValidatorNodeProtocol2(t, 18005, "node6", blsSeedPhrase6) + node6.Params.DisableNetworkManagerRoutines = true node6 = startNode(t, node6) - defer node6.Stop() // Create a persistent connection from Node4 to Node6 _, err = cc.CreateNonValidatorPersistentOutboundConnection(node6.Listeners[0].Addr().String()) - require.NoError(err) + require.NoError(t, err) waitForValidatorConnection(t, node4, node6) waitForValidatorConnection(t, node6, node4) t.Logf("Test #4 passed | Successfuly created persistent connection from validator Node4 to validator Node6") diff --git a/integration_testing/connection_controller_utils_test.go b/integration_testing/connection_controller_utils_test.go index 43cf418bc..94f4702c6 100644 --- a/integration_testing/connection_controller_utils_test.go +++ b/integration_testing/connection_controller_utils_test.go @@ -2,7 +2,6 @@ package integration_testing import ( "fmt" - "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" "os" @@ -129,6 +128,18 @@ func waitForCountRemoteNodeIndexer(t *testing.T, node1 *cmd.Node, allCount int, waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have appropriate RemoteNodes counts", userAgent), condition) } +func waitForCountRemoteNodeIndexerHandshakeCompleted(t *testing.T, node1 *cmd.Node, allCount, validatorCount int, + nonValidatorOutboundCount int, nonValidatorInboundCount int) { + + userAgent := node1.Params.UserAgent + rnManager := node1.Server.GetConnectionController().GetRemoteNodeManager() + condition := func() bool { + return checkRemoteNodeIndexerCountHandshakeCompleted(rnManager, allCount, validatorCount, + nonValidatorOutboundCount, nonValidatorInboundCount) + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have appropriate RemoteNodes counts", userAgent), condition) +} + func checkRemoteNodeIndexerUserAgent(manager *lib.RemoteNodeManager, userAgent string, validator bool, nonValidatorOutbound bool, nonValidatorInbound bool) bool { @@ -167,6 +178,42 @@ func checkRemoteNodeIndexerCount(manager *lib.RemoteNodeManager, allCount int, v return true } +func checkRemoteNodeIndexerCountHandshakeCompleted(manager *lib.RemoteNodeManager, allCount int, validatorCount int, + nonValidatorOutboundCount int, nonValidatorInboundCount int) bool { + + if allCount != manager.GetAllRemoteNodes().Count() { + return false + } + if validatorCount != manager.GetValidatorIndex().Count() { + return false + } + for _, rn := range manager.GetValidatorIndex().GetAll() { + if !rn.IsHandshakeCompleted() { + return false + } + } + + if nonValidatorOutboundCount != manager.GetNonValidatorOutboundIndex().Count() { + return false + } + for _, rn := range manager.GetNonValidatorOutboundIndex().GetAll() { + if !rn.IsHandshakeCompleted() { + return false + } + } + + if nonValidatorInboundCount != manager.GetNonValidatorInboundIndex().Count() { + return false + } + for _, rn := range manager.GetNonValidatorInboundIndex().GetAll() { + if !rn.IsHandshakeCompleted() { + return false + } + } + + return true +} + func checkRemoteNodeIndexerEmpty(manager *lib.RemoteNodeManager) bool { if manager.GetAllRemoteNodes().Count() != 0 { return false @@ -219,14 +266,14 @@ func spawnNonValidatorNodeProtocol2(t *testing.T, port uint32, id string) *cmd.N return node } -func spawnValidatorNodeProtocol2(t *testing.T, port uint32, id string, blsPriv *bls.PrivateKey) *cmd.Node { +func spawnValidatorNodeProtocol2(t *testing.T, port uint32, id string, blsSeedPhrase string) *cmd.Node { dbDir := getDirectory(t) t.Cleanup(func() { os.RemoveAll(dbDir) }) config := generateConfig(t, port, dbDir, 10) config.SyncType = lib.NodeSyncTypeBlockSync - config.PosValidatorSeed = blsPriv.ToString() + config.PosValidatorSeed = blsSeedPhrase node := cmd.NewNode(config) node.Params.UserAgent = id node.Params.ProtocolVersion = lib.ProtocolVersion2 diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 4db913136..df9aad581 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -486,7 +486,7 @@ func waitForCondition(t *testing.T, id string, condition func() bool) { signalChan <- struct{}{} return } - time.Sleep(1 * time.Millisecond) + time.Sleep(100 * time.Millisecond) } }() diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index b6a662676..623a67ae2 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -93,6 +93,10 @@ func (validatorEntry *ValidatorEntry) GetStakeAmount() *uint256.Int { return validatorEntry.TotalStakeAmountNanos } +func (validatorEntry *ValidatorEntry) GetDomains() [][]byte { + return validatorEntry.Domains +} + func (validatorEntry *ValidatorEntry) Status() ValidatorStatus { // ValidatorEntry.Status() is a virtual/derived field that is not stored in // the database, but instead constructed from other ValidatorEntry fields. diff --git a/lib/connection_controller.go b/lib/connection_controller.go index 4bda85507..ac8467b09 100644 --- a/lib/connection_controller.go +++ b/lib/connection_controller.go @@ -6,6 +6,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/consensus" "github.com/golang/glog" "github.com/pkg/errors" "net" @@ -14,14 +15,6 @@ import ( "time" ) -type GetActiveValidatorsFunc func() *collections.ConcurrentMap[bls.SerializedPublicKey, *ValidatorEntry] - -var GetActiveValidatorImpl GetActiveValidatorsFunc = BasicGetActiveValidators - -func BasicGetActiveValidators() *collections.ConcurrentMap[bls.SerializedPublicKey, *ValidatorEntry] { - return collections.NewConcurrentMap[bls.SerializedPublicKey, *ValidatorEntry]() -} - // ConnectionController is a structure that oversees all connections to remote nodes. It is responsible for kicking off // the initial connections a node makes to the network. It is also responsible for creating RemoteNodes from all // successful outbound and inbound connections. The ConnectionController also ensures that the node is connected to @@ -48,7 +41,12 @@ type ConnectionController struct { // persistentIpToRemoteNodeIdsMap maps persistent IP addresses, like the --connect-ips, to the RemoteNodeIds of the // corresponding RemoteNodes. This is used to ensure that we don't connect to the same persistent IP address twice. // And that we can reconnect to the same persistent IP address if we disconnect from it. - persistentIpToRemoteNodeIdsMap map[string]RemoteNodeId + persistentIpToRemoteNodeIdsMap *collections.ConcurrentMap[string, RemoteNodeId] + + activeValidatorsMapLock sync.RWMutex + // activeValidatorsMap is a map of all currently active validators registered in consensus. It will be updated + // periodically by the owner of the ConnectionController. + activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator] // The target number of non-validator outbound remote nodes we want to have. We will disconnect remote nodes once // we've exceeded this number of outbound connections. @@ -78,7 +76,8 @@ func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, handsh rnManager: rnManager, AddrMgr: addrMgr, connectIps: connectIps, - persistentIpToRemoteNodeIdsMap: make(map[string]RemoteNodeId), + persistentIpToRemoteNodeIdsMap: collections.NewConcurrentMap[string, RemoteNodeId](), + activeValidatorsMap: collections.NewConcurrentMap[bls.SerializedPublicKey, consensus.Validator](), targetNonValidatorOutboundRemoteNodes: targetNonValidatorOutboundRemoteNodes, targetNonValidatorInboundRemoteNodes: targetNonValidatorInboundRemoteNodes, limitOneInboundRemoteNodePerIP: limitOneInboundConnectionPerIP, @@ -87,18 +86,26 @@ func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, handsh } func (cc *ConnectionController) Start() { - cc.startGroup.Add(3) + if cc.params.DisableNetworkManagerRoutines { + return + } + + cc.startGroup.Add(4) go cc.startPersistentConnector() go cc.startValidatorConnector() go cc.startNonValidatorConnector() + go cc.startRemoteNodeCleanup() cc.startGroup.Wait() - cc.exitGroup.Add(3) + cc.exitGroup.Add(4) } func (cc *ConnectionController) Stop() { - close(cc.exitChan) - cc.exitGroup.Wait() + if !cc.params.DisableNetworkManagerRoutines { + close(cc.exitChan) + cc.exitGroup.Wait() + } + cc.rnManager.DisconnectAll() } func (cc *ConnectionController) GetRemoteNodeManager() *RemoteNodeManager { @@ -131,7 +138,7 @@ func (cc *ConnectionController) startValidatorConnector() { cc.exitGroup.Done() return case <-time.After(1 * time.Second): - activeValidatorsMap := GetActiveValidatorImpl() + activeValidatorsMap := cc.getActiveValidatorsMap() cc.refreshValidatorIndex(activeValidatorsMap) cc.connectValidators(activeValidatorsMap) } @@ -158,6 +165,21 @@ func (cc *ConnectionController) startNonValidatorConnector() { } } +func (cc *ConnectionController) startRemoteNodeCleanup() { + cc.startGroup.Done() + + for { + select { + case <-cc.exitChan: + cc.exitGroup.Done() + return + case <-time.After(1 * time.Second): + cc.rnManager.Cleanup() + } + } + +} + // ########################### // ## Handlers (Peer, DeSoMessage) // ########################### @@ -167,11 +189,14 @@ func (cc *ConnectionController) _handleDonePeerMessage(origin *Peer, desoMsg DeS return } + glog.V(2).Infof("ConnectionController.handleDonePeerMessage: Handling disconnected peer message for "+ + "id=%v", origin.ID) cc.rnManager.DisconnectById(NewRemoteNodeId(origin.ID)) // Update the persistentIpToRemoteNodeIdsMap. - for ip, id := range cc.persistentIpToRemoteNodeIdsMap { + ipRemoteNodeIdMap := cc.persistentIpToRemoteNodeIdsMap.ToMap() + for ip, id := range ipRemoteNodeIdMap { if id.ToUint64() == origin.ID { - delete(cc.persistentIpToRemoteNodeIdsMap, ip) + cc.persistentIpToRemoteNodeIdsMap.Remove(ip) } } } @@ -228,6 +253,7 @@ func (cc *ConnectionController) _handleNewConnectionMessage(origin *Peer, desoMs } func (cc *ConnectionController) cleanupFailedInboundConnection(remoteNode *RemoteNode, connection Connection) { + glog.V(2).Infof("ConnectionController.cleanupFailedInboundConnection: Cleaning up failed inbound connection") if remoteNode != nil { cc.rnManager.Disconnect(remoteNode) } @@ -239,6 +265,7 @@ func (cc *ConnectionController) cleanupFailedOutboundConnection(connection Conne if !ok { return } + glog.V(2).Infof("ConnectionController.cleanupFailedOutboundConnection: Cleaning up failed outbound connection") id := NewRemoteNodeId(oc.attemptId) rn := cc.rnManager.GetRemoteNodeById(id) @@ -257,7 +284,7 @@ func (cc *ConnectionController) refreshConnectIps() { // Connect to addresses passed via the --connect-ips flag. These addresses are persistent in the sense that if we // disconnect from one, we will try to reconnect to the same one. for _, connectIp := range cc.connectIps { - if _, ok := cc.persistentIpToRemoteNodeIdsMap[connectIp]; ok { + if _, ok := cc.persistentIpToRemoteNodeIdsMap.Get(connectIp); ok { continue } @@ -269,7 +296,7 @@ func (cc *ConnectionController) refreshConnectIps() { continue } - cc.persistentIpToRemoteNodeIdsMap[connectIp] = id + cc.persistentIpToRemoteNodeIdsMap.Set(connectIp, id) } } @@ -277,13 +304,26 @@ func (cc *ConnectionController) refreshConnectIps() { // ## Validator Connections // ########################### +func (cc *ConnectionController) SetActiveValidatorsMap(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { + cc.activeValidatorsMapLock.Lock() + defer cc.activeValidatorsMapLock.Unlock() + cc.activeValidatorsMap = activeValidatorsMap.Clone() + +} + +func (cc *ConnectionController) getActiveValidatorsMap() *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator] { + cc.activeValidatorsMapLock.RLock() + defer cc.activeValidatorsMapLock.RUnlock() + return cc.activeValidatorsMap.Clone() +} + // refreshValidatorIndex re-indexes validators based on the activeValidatorsMap. It is called periodically by the // validator connector. -func (cc *ConnectionController) refreshValidatorIndex(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, *ValidatorEntry]) { +func (cc *ConnectionController) refreshValidatorIndex(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { // De-index inactive validators. We skip any checks regarding RemoteNodes connection status, nor do we verify whether // de-indexing the validator would result in an excess number of outbound/inbound connections. Any excess connections // will be cleaned up by the peer connector. - validatorRemoteNodeMap := cc.rnManager.GetValidatorIndex().Copy() + validatorRemoteNodeMap := cc.rnManager.GetValidatorIndex().ToMap() for pk, rn := range validatorRemoteNodeMap { // If the validator is no longer active, de-index it. if _, ok := activeValidatorsMap.Get(pk); !ok { @@ -306,6 +346,8 @@ func (cc *ConnectionController) refreshValidatorIndex(activeValidatorsMap *colle // public key, which goes undetected during handshake. To prevent this from affecting the indexing of the validator // set, we check that the non-validator's public key is not already present in the validator index. if _, ok := cc.rnManager.GetValidatorIndex().Get(pk.Serialize()); ok { + glog.V(2).Infof("ConnectionController.refreshValidatorIndex: Disconnecting Validator RemoteNode "+ + "(%v) has validator public key (%v) that is already present in validator index", rn, pk) cc.rnManager.Disconnect(rn) continue } @@ -320,13 +362,13 @@ func (cc *ConnectionController) refreshValidatorIndex(activeValidatorsMap *colle // connectValidators attempts to connect to all active validators that are not already connected. It is called // periodically by the validator connector. -func (cc *ConnectionController) connectValidators(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, *ValidatorEntry]) { +func (cc *ConnectionController) connectValidators(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { // Look through the active validators and connect to any that we're not already connected to. if cc.blsKeystore == nil { return } - validators := activeValidatorsMap.Copy() + validators := activeValidatorsMap.ToMap() for pk, validator := range validators { _, exists := cc.rnManager.GetValidatorIndex().Get(pk) // If we're already connected to the validator, continue. @@ -343,7 +385,10 @@ func (cc *ConnectionController) connectValidators(activeValidatorsMap *collectio } // For now, we only dial the first domain in the validator's domain list. - address := string(validator.Domains[0]) + if len(validator.GetDomains()) == 0 { + continue + } + address := string(validator.GetDomains()[0]) if err := cc.CreateValidatorConnection(address, publicKey); err != nil { glog.V(2).Infof("ConnectionController.connectValidators: Problem connecting to validator %v: %v", address, err) continue @@ -374,8 +419,8 @@ func (cc *ConnectionController) refreshNonValidatorOutboundIndex() { allOutboundRemoteNodes := cc.rnManager.GetNonValidatorOutboundIndex().GetAll() var attemptedOutboundRemoteNodes, connectedOutboundRemoteNodes []*RemoteNode for _, rn := range allOutboundRemoteNodes { - if rn.IsPersistent() { - // We do nothing for persistent remote nodes. + if rn.IsPersistent() || rn.IsExpectedValidator() { + // We do nothing for persistent remote nodes or expected validators. continue } else if rn.IsHandshakeCompleted() { connectedOutboundRemoteNodes = append(connectedOutboundRemoteNodes, rn) @@ -397,6 +442,8 @@ func (cc *ConnectionController) refreshNonValidatorOutboundIndex() { if excessiveOutboundRemoteNodes == 0 { break } + glog.V(2).Infof("ConnectionController.refreshNonValidatorOutboundIndex: Disconnecting attempted remote "+ + "node (id=%v) due to excess outbound peers", rn.GetId()) cc.rnManager.Disconnect(rn) excessiveOutboundRemoteNodes-- } @@ -405,6 +452,8 @@ func (cc *ConnectionController) refreshNonValidatorOutboundIndex() { if excessiveOutboundRemoteNodes == 0 { break } + glog.V(2).Infof("ConnectionController.refreshNonValidatorOutboundIndex: Disconnecting connected remote "+ + "node (id=%v) due to excess outbound peers", rn.GetId()) cc.rnManager.Disconnect(rn) excessiveOutboundRemoteNodes-- } @@ -415,16 +464,30 @@ func (cc *ConnectionController) refreshNonValidatorOutboundIndex() { func (cc *ConnectionController) refreshNonValidatorInboundIndex() { // First let's check if we have an excess number of inbound remote nodes. If we do, we'll disconnect some of them. numConnectedInboundRemoteNodes := uint32(cc.rnManager.GetNonValidatorInboundIndex().Count()) - excessiveInboundRemoteNodes := uint32(0) - if numConnectedInboundRemoteNodes > cc.targetNonValidatorInboundRemoteNodes { - excessiveInboundRemoteNodes = numConnectedInboundRemoteNodes - cc.targetNonValidatorInboundRemoteNodes + if numConnectedInboundRemoteNodes <= cc.targetNonValidatorInboundRemoteNodes { + return } + // Disconnect random inbound non-validators if we have too many of them. inboundRemoteNodes := cc.rnManager.GetNonValidatorInboundIndex().GetAll() + var connectedInboundRemoteNodes []*RemoteNode for _, rn := range inboundRemoteNodes { + // We only want to disconnect remote nodes that have completed handshake. + if rn.IsHandshakeCompleted() { + connectedInboundRemoteNodes = append(connectedInboundRemoteNodes, rn) + } + } + + excessiveInboundRemoteNodes := uint32(0) + if numConnectedInboundRemoteNodes > cc.targetNonValidatorInboundRemoteNodes { + excessiveInboundRemoteNodes = numConnectedInboundRemoteNodes - cc.targetNonValidatorInboundRemoteNodes + } + for _, rn := range connectedInboundRemoteNodes { if excessiveInboundRemoteNodes == 0 { break } + glog.V(2).Infof("ConnectionController.refreshNonValidatorInboundIndex: Disconnecting inbound remote "+ + "node (id=%v) due to excess inbound peers", rn.GetId()) cc.rnManager.Disconnect(rn) excessiveInboundRemoteNodes-- } @@ -444,7 +507,8 @@ func (cc *ConnectionController) connectNonValidators() { } cc.AddrMgr.Attempt(addr) if err := cc.rnManager.CreateNonValidatorOutboundConnection(addr); err != nil { - glog.V(2).Infof("ConnectionController.connectNonValidators: Problem connecting to addr %v: %v", addr, err) + glog.V(2).Infof("ConnectionController.connectNonValidators: Problem creating non-validator outbound "+ + "connection to addr: %v; err: %v", addr, err) } } } @@ -504,14 +568,6 @@ func (cc *ConnectionController) SetTargetOutboundPeers(numPeers uint32) { cc.targetNonValidatorOutboundRemoteNodes = numPeers } -func (cc *ConnectionController) enoughNonValidatorInboundConnections() bool { - return uint32(cc.rnManager.GetNonValidatorInboundIndex().Count()) >= cc.targetNonValidatorInboundRemoteNodes -} - -func (cc *ConnectionController) enoughNonValidatorOutboundConnections() bool { - return uint32(cc.rnManager.GetNonValidatorOutboundIndex().Count()) >= cc.targetNonValidatorOutboundRemoteNodes -} - // processInboundConnection is called when a new inbound connection is established. At this point, the connection is not validated, // nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. // Once the RemoteNode is created, we will initiate handshake. @@ -522,12 +578,6 @@ func (cc *ConnectionController) processInboundConnection(conn Connection) (*Remo return nil, fmt.Errorf("ConnectionController.handleInboundConnection: Connection is not an inboundConnection") } - // Reject the peer if we have too many inbound connections already. - if cc.enoughNonValidatorInboundConnections() { - return nil, fmt.Errorf("ConnectionController.handleInboundConnection: Rejecting INBOUND peer (%s) due to max "+ - "inbound peers (%d) hit", ic.connection.RemoteAddr().String(), cc.targetNonValidatorInboundRemoteNodes) - } - // If we want to limit inbound connections to one per IP address, check to make sure this address isn't already connected. if cc.limitOneInboundRemoteNodePerIP && cc.isDuplicateInboundIPAddress(ic.connection.RemoteAddr()) { @@ -572,12 +622,6 @@ func (cc *ConnectionController) processOutboundConnection(conn Connection) (*Rem cc.AddrMgr.Good(oc.address) } - // if this is a non-persistent outbound peer, and we already have enough outbound peers, then don't bother adding this one. - if !oc.isPersistent && cc.enoughNonValidatorOutboundConnections() { - return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Connected to maximum number of outbound "+ - "peers (%d)", cc.targetNonValidatorOutboundRemoteNodes) - } - // If this is a non-persistent outbound peer and the group key overlaps with another peer we're already connected to then // abort mission. We only connect to one peer per IP group in order to prevent Sybil attacks. if !oc.isPersistent && cc.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { @@ -599,19 +643,13 @@ func (cc *ConnectionController) processOutboundConnection(conn Connection) (*Rem } // If this is a persistent remote node or a validator, we don't need to do any extra connection validation. - if remoteNode.IsPersistent() || remoteNode.GetValidatorPublicKey() != nil { + if remoteNode.IsPersistent() || remoteNode.IsExpectedValidator() { return remoteNode, nil } // If we get here, it means we're dealing with a non-persistent or non-validator remote node. We perform additional // connection validation. - // If we already have enough outbound peers, then don't bother adding this one. - if cc.enoughNonValidatorOutboundConnections() { - return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Connected to maximum number of outbound "+ - "peers (%d)", cc.targetNonValidatorOutboundRemoteNodes) - } - // If the group key overlaps with another peer we're already connected to then abort mission. We only connect to // one peer per IP group in order to prevent Sybil attacks. if cc.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { diff --git a/lib/connection_manager.go b/lib/connection_manager.go index 1ba4bf8f1..761fb048e 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -615,9 +615,11 @@ func (cmgr *ConnectionManager) Start() { "(id= %v)", oc.attemptId) } else { glog.V(2).Infof("ConnectionManager.Start: Successfully established an outbound connection with "+ - "(addr= %v)", oc.connection.RemoteAddr()) + "(addr= %v) (id= %v)", oc.connection.RemoteAddr(), oc.attemptId) } + cmgr.mtxConnectionAttempts.Lock() delete(cmgr.outboundConnectionAttempts, oc.attemptId) + cmgr.mtxConnectionAttempts.Unlock() cmgr.serverMessageQueue <- &ServerMessage{ Peer: nil, Msg: &MsgDeSoNewConnection{ @@ -639,7 +641,7 @@ func (cmgr *ConnectionManager) Start() { // has already been called, since that is what's responsible for adding the peer // to this queue in the first place. - glog.V(1).Infof("Done with peer (%v).", pp) + glog.V(1).Infof("Done with peer (id=%v).", pp.ID) // Remove the peer from our data structures. cmgr.removePeer(pp) diff --git a/lib/constants.go b/lib/constants.go index f92e05f1e..42645283e 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -745,6 +745,9 @@ type DeSoParams struct { // HandshakeTimeoutMicroSeconds is the timeout for the peer handshake certificate. The default value is 15 minutes. HandshakeTimeoutMicroSeconds uint64 + // DisableNetworkManagerRoutines is a testing flag that disables the network manager routines. + DisableNetworkManagerRoutines bool + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -1164,6 +1167,9 @@ var DeSoMainnetParams = DeSoParams{ // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), + // DisableNetworkManagerRoutines is a testing flag that disables the network manager routines. + DisableNetworkManagerRoutines: false, + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1437,6 +1443,9 @@ var DeSoTestnetParams = DeSoParams{ // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), + // DisableNetworkManagerRoutines is a testing flag that disables the network manager routines. + DisableNetworkManagerRoutines: false, + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/handshake_controller.go b/lib/handshake_controller.go index f355bad93..385e36275 100644 --- a/lib/handshake_controller.go +++ b/lib/handshake_controller.go @@ -38,18 +38,13 @@ func (hc *HandshakeController) InitiateHandshake(rn *RemoteNode) { hc.usedNonces.Add(nonce) } -// _handleHandshakeCompleteMessage handles HandshakeComplete control messages, sent by RemoteNodes. -func (hc *HandshakeController) _handleHandshakeCompleteMessage(origin *Peer, desoMsg DeSoMessage) { +// handleHandshakeComplete handles HandshakeComplete control messages, sent by RemoteNodes. +func (hc *HandshakeController) handleHandshakeComplete(remoteNode *RemoteNode) { // Prevent race conditions while handling handshake complete messages. hc.mtxHandshakeComplete.Lock() defer hc.mtxHandshakeComplete.Unlock() - if desoMsg.GetMsgType() != MsgTypePeerHandshakeComplete { - return - } - // Get the handshake information of this peer. - remoteNode := hc.rnManager.GetRemoteNodeFromPeer(origin) if remoteNode == nil { return } @@ -60,7 +55,8 @@ func (hc *HandshakeController) _handleHandshakeCompleteMessage(origin *Peer, des } if err := hc.handleHandshakeCompletePoSMessage(remoteNode); err != nil { - glog.Errorf("HandshakeController._handleHandshakeCompleteMessage: Error handling PoS handshake peer message: %v", err) + glog.Errorf("HandshakeController.handleHandshakeComplete: Error handling PoS handshake peer message: %v, "+ + "remoteNodePk (%s)", err, remoteNode.GetValidatorPublicKey().Serialize()) hc.rnManager.Disconnect(remoteNode) return } @@ -165,6 +161,8 @@ func (hc *HandshakeController) _handleVerackMessage(origin *Peer, desoMsg DeSoMe glog.Errorf("HandshakeController._handleVerackMessage: Requesting PeerDisconnect for id: (%v) "+ "error handling verack message: %v", origin.ID, err) hc.rnManager.Disconnect(rn) + return } - return + + hc.handleHandshakeComplete(rn) } diff --git a/lib/network.go b/lib/network.go index 23bc86765..120348c6c 100644 --- a/lib/network.go +++ b/lib/network.go @@ -854,23 +854,6 @@ func (msg *MsgDeSoDisconnectedPeer) FromBytes(data []byte) error { return fmt.Errorf("MsgDeSoDisconnectedPeer.FromBytes not implemented") } -// MsgDeSoPeerHandshakeComplete is a control message that is used to internally signal when a peer has -// connected and completed the Version + Verack handshake and authentication process. -type MsgDeSoPeerHandshakeComplete struct { -} - -func (msg *MsgDeSoPeerHandshakeComplete) GetMsgType() MsgType { - return MsgTypePeerHandshakeComplete -} - -func (msg *MsgDeSoPeerHandshakeComplete) ToBytes(preSignature bool) ([]byte, error) { - return nil, fmt.Errorf("MsgDeSoPeerHandshakeComplete.ToBytes: Not implemented") -} - -func (msg *MsgDeSoPeerHandshakeComplete) FromBytes(data []byte) error { - return fmt.Errorf("MsgDeSoPeerHandshakeComplete.FromBytes not implemented") -} - type ConnectionType uint8 const ( diff --git a/lib/network_connection.go b/lib/network_connection.go index ffb0bb1f1..4d50d22a8 100644 --- a/lib/network_connection.go +++ b/lib/network_connection.go @@ -91,8 +91,9 @@ type OutboundConnectionAttempt struct { // connectionChan is used to send the result of the connection attempt to the caller thread. connectionChan chan *outboundConnection - exitChan chan bool - status outboundConnectionAttemptStatus + startGroup sync.WaitGroup + exitChan chan bool + status outboundConnectionAttemptStatus } type outboundConnectionAttemptStatus int @@ -126,11 +127,14 @@ func (oca *OutboundConnectionAttempt) Start() { return } + oca.startGroup.Add(1) go oca.start() + oca.startGroup.Wait() oca.status = outboundConnectionAttemptRunning } func (oca *OutboundConnectionAttempt) start() { + oca.startGroup.Done() oca.retryCount = 0 out: @@ -198,7 +202,7 @@ func (oca *OutboundConnectionAttempt) SetTimeoutUnit(timeoutUnit time.Duration) // Otherwise, it will return nil. func (oca *OutboundConnectionAttempt) attemptOutboundConnection() net.Conn { // If the peer is not persistent, update the addrmgr. - glog.V(1).Infof("Attempting to connect to addr: %v", oca.netAddr.IP.String()) + glog.V(1).Infof("Attempting to connect to addr: %v:%v", oca.netAddr.IP.String(), oca.netAddr.Port) var err error tcpAddr := net.TCPAddr{ diff --git a/lib/peer.go b/lib/peer.go index 0af9aa0b7..e0aae7e77 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -5,6 +5,7 @@ import ( "github.com/decred/dcrd/lru" "net" "sort" + "sync" "sync/atomic" "time" @@ -140,6 +141,9 @@ type Peer struct { // SyncType indicates whether blocksync should not be requested for this peer. If set to true // then we'll only hypersync from this peer. syncType NodeSyncType + + // startGroup ensures that all the Peer's go routines are started when we call Start(). + startGroup sync.WaitGroup } func (pp *Peer) GetId() uint64 { @@ -550,6 +554,7 @@ func (pp *Peer) cleanupMessageProcessor() { } func (pp *Peer) StartDeSoMessageProcessor() { + pp.startGroup.Done() glog.Infof("StartDeSoMessageProcessor: Starting for peer %v", pp) for { if pp.disconnected != 0 { @@ -739,6 +744,7 @@ func (pp *Peer) HandlePongMsg(msg *MsgDeSoPong) { } func (pp *Peer) PingHandler() { + pp.startGroup.Done() glog.V(1).Infof("Peer.PingHandler: Starting ping handler for Peer %v", pp) pingTicker := time.NewTicker(pingInterval) defer pingTicker.Stop() @@ -908,6 +914,7 @@ func (pp *Peer) _setKnownAddressesMap(key string, val bool) { } func (pp *Peer) outHandler() { + pp.startGroup.Done() glog.V(1).Infof("Peer.outHandler: Starting outHandler for Peer %v", pp) stallTicker := time.NewTicker(time.Second) out: @@ -1087,6 +1094,7 @@ func (pp *Peer) _handleInExpectedResponse(rmsg DeSoMessage) error { // inHandler handles all incoming messages for the peer. It must be run as a // goroutine. func (pp *Peer) inHandler() { + pp.startGroup.Done() glog.V(1).Infof("Peer.inHandler: Starting inHandler for Peer %v", pp) // The timer is stopped when a new message is received and reset after it @@ -1184,10 +1192,12 @@ func (pp *Peer) Start() { glog.Infof("Peer.Start: Starting peer %v", pp) // The protocol has been negotiated successfully so start processing input // and output messages. + pp.startGroup.Add(4) go pp.PingHandler() go pp.outHandler() go pp.inHandler() go pp.StartDeSoMessageProcessor() + pp.startGroup.Wait() // If the address manager needs more addresses, then send a GetAddr message // to the peer. This is best-effort. @@ -1290,7 +1300,7 @@ func (pp *Peer) Disconnect() { } atomic.AddInt32(&pp.disconnected, 1) - glog.V(1).Infof("Peer.Disconnect: Running Disconnect for the first time for Peer %v", pp) + glog.V(2).Infof("Peer.Disconnect: Running Disconnect for the first time for Peer %v", pp) // Close the connection object. pp.Conn.Close() diff --git a/lib/remote_node.go b/lib/remote_node.go index 5ba651f3f..b74ba9e23 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -63,6 +63,12 @@ type RemoteNode struct { // the RemoteNode is instantiated. And for inbound validator nodes, the validatorPublicKey will be set when the // handshake is completed. validatorPublicKey *bls.PublicKey + // isPersistent identifies whether the RemoteNode is persistent or not. Persistent RemoteNodes is a sub-category of + // outbound RemoteNodes. They are different from non-persistent RemoteNodes from the very moment they are created. + // Initially, an outbound RemoteNode is in an "attempted" state, meaning we dial the connection to the peer. The + // non-persistent RemoteNode is terminated after the first failed dial, while a persistent RemoteNode will keep + // trying to dial the peer indefinitely until the connection is established, or the node stops. + isPersistent bool connectionStatus RemoteNodeStatus @@ -128,11 +134,13 @@ func NewHandshakeMetadata() *HandshakeMetadata { return &HandshakeMetadata{} } -func NewRemoteNode(id RemoteNodeId, validatorPublicKey *bls.PublicKey, srv *Server, cmgr *ConnectionManager, keystore *BLSKeystore, - params *DeSoParams, minTxFeeRateNanosPerKB uint64, latestBlockHeight uint64, nodeServices ServiceFlag) *RemoteNode { +func NewRemoteNode(id RemoteNodeId, validatorPublicKey *bls.PublicKey, isPersistent bool, srv *Server, + cmgr *ConnectionManager, keystore *BLSKeystore, params *DeSoParams, minTxFeeRateNanosPerKB uint64, + latestBlockHeight uint64, nodeServices ServiceFlag) *RemoteNode { return &RemoteNode{ id: id, validatorPublicKey: validatorPublicKey, + isPersistent: isPersistent, connectionStatus: RemoteNodeStatus_NotConnected, handshakeMetadata: NewHandshakeMetadata(), srv: srv, @@ -208,7 +216,7 @@ func (rn *RemoteNode) IsOutbound() bool { } func (rn *RemoteNode) IsPersistent() bool { - return rn.peer != nil && rn.peer.IsPersistent() + return rn.isPersistent } func (rn *RemoteNode) IsNotConnected() bool { @@ -219,6 +227,14 @@ func (rn *RemoteNode) IsConnected() bool { return rn.connectionStatus == RemoteNodeStatus_Connected } +func (rn *RemoteNode) IsVersionSent() bool { + return rn.connectionStatus == RemoteNodeStatus_VersionSent +} + +func (rn *RemoteNode) IsVerackSent() bool { + return rn.connectionStatus == RemoteNodeStatus_VerackSent +} + func (rn *RemoteNode) IsHandshakeCompleted() bool { return rn.connectionStatus == RemoteNodeStatus_HandshakeCompleted } @@ -234,6 +250,10 @@ func (rn *RemoteNode) IsValidator() bool { return rn.hasValidatorServiceFlag() } +func (rn *RemoteNode) IsExpectedValidator() bool { + return rn.GetValidatorPublicKey() != nil +} + func (rn *RemoteNode) hasValidatorServiceFlag() bool { return rn.GetServiceFlag().HasService(SFPosValidator) } @@ -304,6 +324,8 @@ func (rn *RemoteNode) Disconnect() { if rn.connectionStatus == RemoteNodeStatus_Terminated { return } + glog.V(2).Infof("RemoteNode.Disconnect: Disconnecting from peer (id= %d, status= %v)", + rn.id, rn.connectionStatus) id := rn.GetId().ToUint64() switch rn.connectionStatus { @@ -344,9 +366,9 @@ func (rn *RemoteNode) InitiateHandshake(nonce uint64) error { return fmt.Errorf("InitiateHandshake: Remote node is not connected") } + versionTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) + rn.versionTimeExpected = &versionTimeExpected if rn.GetPeer().IsOutbound() { - versionTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) - rn.versionTimeExpected = &versionTimeExpected if err := rn.sendVersionMessage(nonce); err != nil { return fmt.Errorf("InitiateHandshake: Problem sending version message to peer (id= %d): %v", rn.id, err) } @@ -397,6 +419,19 @@ func (rn *RemoteNode) newVersionMessage(nonce uint64) *MsgDeSoVersion { return ver } +func (rn *RemoteNode) IsTimedOut() bool { + if rn.IsTerminated() { + return true + } + if rn.IsConnected() || rn.IsVersionSent() { + return rn.versionTimeExpected.Before(time.Now()) + } + if rn.IsVerackSent() { + return rn.verackTimeExpected.Before(time.Now()) + } + return false +} + // HandleVersionMessage is called upon receiving a version message from the RemoteNode's peer. The peer may be the one // initiating the handshake, in which case, we should respond with our own version message. To do this, we pass the // responseNonce to this function, which we will use in our response version message. @@ -404,7 +439,7 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce rn.mtx.Lock() defer rn.mtx.Unlock() - if rn.connectionStatus != RemoteNodeStatus_Connected && rn.connectionStatus != RemoteNodeStatus_VersionSent { + if !rn.IsConnected() && !rn.IsVersionSent() { return fmt.Errorf("HandleVersionMessage: RemoteNode is not connected or version exchange has already "+ "been completed, connectionStatus: %v", rn.connectionStatus) } @@ -416,7 +451,7 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce } // Verify that the peer's version message is sent within the version negotiation timeout. - if rn.versionTimeExpected != nil && rn.versionTimeExpected.Before(time.Now()) { + if rn.versionTimeExpected.Before(time.Now()) { return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v) "+ "version timeout. Time expected: %v, now: %v", rn.id, rn.versionTimeExpected.UnixMicro(), time.Now().UnixMicro()) } @@ -580,7 +615,6 @@ func (rn *RemoteNode) HandleVerackMessage(vrkMsg *MsgDeSoVerack) error { vMeta.versionNegotiated = true rn._logVersionSuccess() rn.setStatusHandshakeCompleted() - rn.srv.NotifyHandshakePeerMessage(rn.peer) return nil } diff --git a/lib/remote_node_manager.go b/lib/remote_node_manager.go index 02bed8e3e..2457af58f 100644 --- a/lib/remote_node_manager.go +++ b/lib/remote_node_manager.go @@ -51,12 +51,19 @@ func NewRemoteNodeManager(srv *Server, bc *Blockchain, cmgr *ConnectionManager, } } -func (manager *RemoteNodeManager) newRemoteNode(validatorPublicKey *bls.PublicKey) *RemoteNode { +func (manager *RemoteNodeManager) DisconnectAll() { + for _, rn := range manager.GetAllRemoteNodes().GetAll() { + glog.V(2).Infof("RemoteNodeManager.DisconnectAll: Disconnecting from remote node (id=%v)", rn.GetId()) + manager.Disconnect(rn) + } +} + +func (manager *RemoteNodeManager) newRemoteNode(validatorPublicKey *bls.PublicKey, isPersistent bool) *RemoteNode { id := atomic.AddUint64(&manager.remoteNodeIndex, 1) remoteNodeId := NewRemoteNodeId(id) latestBlockHeight := uint64(manager.bc.BlockTip().Height) - return NewRemoteNode(remoteNodeId, validatorPublicKey, manager.srv, manager.cmgr, manager.keystore, manager.params, - manager.minTxFeeRateNanosPerKB, latestBlockHeight, manager.nodeServices) + return NewRemoteNode(remoteNodeId, validatorPublicKey, isPersistent, manager.srv, manager.cmgr, manager.keystore, + manager.params, manager.minTxFeeRateNanosPerKB, latestBlockHeight, manager.nodeServices) } func (manager *RemoteNodeManager) ProcessCompletedHandshake(remoteNode *RemoteNode) { @@ -78,7 +85,7 @@ func (manager *RemoteNodeManager) Disconnect(rn *RemoteNode) { if rn == nil { return } - glog.V(2).Infof("RemoteNodeManager.Disconnect: Disconnecting from remote node %v", rn.GetId()) + glog.V(2).Infof("RemoteNodeManager.Disconnect: Disconnecting from remote node id=%v", rn.GetId()) rn.Disconnect() manager.removeRemoteNodeFromIndexer(rn) } @@ -126,6 +133,18 @@ func (manager *RemoteNodeManager) SendMessage(rn *RemoteNode, desoMessage DeSoMe return rn.SendMessage(desoMessage) } +func (manager *RemoteNodeManager) Cleanup() { + manager.mtx.Lock() + defer manager.mtx.Unlock() + + for _, rn := range manager.GetAllRemoteNodes().GetAll() { + if rn.IsTimedOut() { + glog.V(2).Infof("RemoteNodeManager.Cleanup: Disconnecting from remote node (id=%v)", rn.GetId()) + manager.Disconnect(rn) + } + } +} + // ########################### // ## Create RemoteNode // ########################### @@ -139,7 +158,7 @@ func (manager *RemoteNodeManager) CreateValidatorConnection(netAddr *wire.NetAdd return fmt.Errorf("RemoteNodeManager.CreateValidatorConnection: RemoteNode already exists for public key: %v", publicKey) } - remoteNode := manager.newRemoteNode(publicKey) + remoteNode := manager.newRemoteNode(publicKey, false) if err := remoteNode.DialOutboundConnection(netAddr); err != nil { return errors.Wrapf(err, "RemoteNodeManager.CreateValidatorConnection: Problem calling DialPersistentOutboundConnection "+ "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) @@ -154,7 +173,7 @@ func (manager *RemoteNodeManager) CreateNonValidatorPersistentOutboundConnection return 0, fmt.Errorf("RemoteNodeManager.CreateNonValidatorPersistentOutboundConnection: netAddr is nil") } - remoteNode := manager.newRemoteNode(nil) + remoteNode := manager.newRemoteNode(nil, true) if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { return 0, errors.Wrapf(err, "RemoteNodeManager.CreateNonValidatorPersistentOutboundConnection: Problem calling DialPersistentOutboundConnection "+ "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) @@ -169,7 +188,7 @@ func (manager *RemoteNodeManager) CreateNonValidatorOutboundConnection(netAddr * return fmt.Errorf("RemoteNodeManager.CreateNonValidatorOutboundConnection: netAddr is nil") } - remoteNode := manager.newRemoteNode(nil) + remoteNode := manager.newRemoteNode(nil, false) if err := remoteNode.DialOutboundConnection(netAddr); err != nil { return errors.Wrapf(err, "RemoteNodeManager.CreateNonValidatorOutboundConnection: Problem calling DialOutboundConnection "+ "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) @@ -182,7 +201,7 @@ func (manager *RemoteNodeManager) CreateNonValidatorOutboundConnection(netAddr * func (manager *RemoteNodeManager) AttachInboundConnection(conn net.Conn, na *wire.NetAddress) (*RemoteNode, error) { - remoteNode := manager.newRemoteNode(nil) + remoteNode := manager.newRemoteNode(nil, false) if err := remoteNode.AttachInboundConnection(conn, na); err != nil { return remoteNode, errors.Wrapf(err, "RemoteNodeManager.AttachInboundConnection: Problem calling AttachInboundConnection "+ "for addr: (%s)", conn.RemoteAddr().String()) @@ -205,7 +224,7 @@ func (manager *RemoteNodeManager) AttachOutboundConnection(conn net.Conn, na *wi if err := remoteNode.AttachOutboundConnection(conn, na, isPersistent); err != nil { manager.Disconnect(remoteNode) return nil, errors.Wrapf(err, "RemoteNodeManager.AttachOutboundConnection: Problem calling AttachOutboundConnection "+ - "for addr: (%s)", conn.RemoteAddr().String()) + "for addr: (%s). Disconnecting remote node (id=%v)", conn.RemoteAddr().String(), remoteNode.GetId()) } return remoteNode, nil diff --git a/lib/server.go b/lib/server.go index 1641770cc..0ef8afae5 100644 --- a/lib/server.go +++ b/lib/server.go @@ -450,7 +450,7 @@ func NewServer( timesource := chainlib.NewMedianTime() // Create a new connection manager but note that it won't be initialized until Start(). - _incomingMessages := make(chan *ServerMessage, (_targetOutboundPeers+_maxInboundPeers)*3) + _incomingMessages := make(chan *ServerMessage, 100+(_targetOutboundPeers+_maxInboundPeers)*3) _cmgr := NewConnectionManager( _params, _listeners, _connectIps, timesource, _hyperSync, _syncType, _stallTimeoutSeconds, _minFeeRateNanosPerKB, @@ -739,13 +739,6 @@ func (srv *Server) GetSnapshot(pp *Peer) { "with Prefix (%v) and SnapshotStartEntry (%v)", pp, prefix, lastReceivedKey) } -func (srv *Server) NotifyHandshakePeerMessage(peer *Peer) { - srv.incomingMessages <- &ServerMessage{ - Peer: peer, - Msg: &MsgDeSoPeerHandshakeComplete{}, - } -} - // GetBlocksToStore is part of the archival mode, which makes the node download all historical blocks after completing // hypersync. We will go through all blocks corresponding to the snapshot and download the blocks. func (srv *Server) GetBlocksToStore(pp *Peer) { @@ -2264,8 +2257,6 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, msg *MsgDeSoGetAddr) { func (srv *Server) _handleControlMessages(serverMessage *ServerMessage) (_shouldQuit bool) { switch serverMessage.Msg.(type) { // Control messages used internally to signal to the server. - case *MsgDeSoPeerHandshakeComplete: - srv.handshakeController._handleHandshakeCompleteMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoDisconnectedPeer: srv._handleDonePeer(serverMessage.Peer) srv.connectionController._handleDonePeerMessage(serverMessage.Peer, serverMessage.Msg) From f60ec17b3029d59d39664969cd2afee4cfadb0b6 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 29 Jan 2024 21:13:07 -0800 Subject: [PATCH 408/762] PoS NetworkManager Address (#957) * Revert "Another split" This reverts commit eaeec5875a84621b4888fc2a6104e9904e7ef53d. * Revert routine stops * gofmt * Add addrMgr to Server * Review --- integration_testing/connection_bridge.go | 6 +- lib/connection_controller.go | 18 +-- lib/connection_manager.go | 12 +- lib/peer.go | 13 +- lib/remote_node.go | 7 ++ lib/server.go | 154 ++++++++++++++--------- 6 files changed, 114 insertions(+), 96 deletions(-) diff --git a/integration_testing/connection_bridge.go b/integration_testing/connection_bridge.go index f6a9897ed..139c7cafb 100644 --- a/integration_testing/connection_bridge.go +++ b/integration_testing/connection_bridge.go @@ -113,13 +113,12 @@ func (bridge *ConnectionBridge) createInboundConnection(node *cmd.Node) *lib.Pee // This channel is redundant in our setting. messagesFromPeer := make(chan *lib.ServerMessage, 100) - newPeerChan := make(chan *lib.Peer, 100) donePeerChan := make(chan *lib.Peer, 100) // Because it is an inbound Peer of the node, it is simultaneously a "fake" outbound Peer of the bridge. // Hence, we will mark the _isOutbound parameter as "true" in NewPeer. peer := lib.NewPeer(uint64(lib.RandInt64(math.MaxInt64)), conn, true, netAddress, true, 10000, 0, &lib.DeSoMainnetParams, - messagesFromPeer, nil, nil, lib.NodeSyncTypeAny, newPeerChan, donePeerChan) + messagesFromPeer, nil, nil, lib.NodeSyncTypeAny, donePeerChan) return peer } @@ -144,11 +143,10 @@ func (bridge *ConnectionBridge) createOutboundConnection(node *cmd.Node, otherNo addrMgr := addrmgr.New("", net.LookupIP) na, err := lib.IPToNetAddr(conn.RemoteAddr().String(), addrMgr, otherNode.Params) messagesFromPeer := make(chan *lib.ServerMessage, 100) - newPeerChan := make(chan *lib.Peer, 100) donePeerChan := make(chan *lib.Peer, 100) peer := lib.NewPeer(uint64(lib.RandInt64(math.MaxInt64)), conn, false, na, false, 10000, 0, bridge.nodeB.Params, - messagesFromPeer, nil, nil, lib.NodeSyncTypeAny, newPeerChan, donePeerChan) + messagesFromPeer, nil, nil, lib.NodeSyncTypeAny, donePeerChan) bridge.newPeerChan <- peer //} }(ll) diff --git a/lib/connection_controller.go b/lib/connection_controller.go index ac8467b09..ef021e100 100644 --- a/lib/connection_controller.go +++ b/lib/connection_controller.go @@ -97,11 +97,11 @@ func (cc *ConnectionController) Start() { go cc.startRemoteNodeCleanup() cc.startGroup.Wait() - cc.exitGroup.Add(4) } func (cc *ConnectionController) Stop() { if !cc.params.DisableNetworkManagerRoutines { + cc.exitGroup.Add(4) close(cc.exitChan) cc.exitGroup.Wait() } @@ -201,22 +201,6 @@ func (cc *ConnectionController) _handleDonePeerMessage(origin *Peer, desoMsg DeS } } -func (cc *ConnectionController) _handleAddrMessage(origin *Peer, desoMsg DeSoMessage) { - if desoMsg.GetMsgType() != MsgTypeAddr { - return - } - - // TODO -} - -func (cc *ConnectionController) _handleGetAddrMessage(origin *Peer, desoMsg DeSoMessage) { - if desoMsg.GetMsgType() != MsgTypeGetAddr { - return - } - - // TODO -} - // _handleNewConnectionMessage is called when a new outbound or inbound connection is established. It is responsible // for creating a RemoteNode from the connection and initiating the handshake. The incoming DeSoMessage is a control message. func (cc *ConnectionController) _handleNewConnectionMessage(origin *Peer, desoMsg DeSoMessage) { diff --git a/lib/connection_manager.go b/lib/connection_manager.go index 761fb048e..ffc56aa25 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -105,9 +105,8 @@ type ConnectionManager struct { // peers' time. timeSource chainlib.MedianTimeSource - // Events that can happen to a peer. - newPeerChan chan *Peer - donePeerChan chan *Peer + // peerDisconnectedChan is notified whenever a peer exits. + peerDisconnectedChan chan *Peer // stallTimeoutSeconds is how long we wait to receive responses from Peers // for certain types of messages. @@ -152,8 +151,7 @@ func NewConnectionManager( attemptedOutboundAddrs: make(map[string]bool), // Initialize the channels. - newPeerChan: make(chan *Peer, 100), - donePeerChan: make(chan *Peer, 100), + peerDisconnectedChan: make(chan *Peer, 100), outboundConnectionChan: make(chan *outboundConnection, 100), inboundConnectionChan: make(chan *inboundConnection, 100), @@ -301,7 +299,7 @@ func (cmgr *ConnectionManager) ConnectPeer(id uint64, conn net.Conn, na *wire.Ne cmgr.minFeeRateNanosPerKB, cmgr.params, cmgr.srv.incomingMessages, cmgr, cmgr.srv, cmgr.SyncType, - cmgr.newPeerChan, cmgr.donePeerChan) + cmgr.peerDisconnectedChan) // Now we can add the peer to our data structures. peer._logAddPeer() @@ -635,7 +633,7 @@ func (cmgr *ConnectionManager) Start() { Connection: ic, }, } - case pp := <-cmgr.donePeerChan: + case pp := <-cmgr.peerDisconnectedChan: { // By the time we get here, it can be assumed that the Peer's Disconnect function // has already been called, since that is what's responsible for adding the peer diff --git a/lib/peer.go b/lib/peer.go index e0aae7e77..c59d77921 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -100,9 +100,8 @@ type Peer struct { knownAddressesMap map[string]bool // Output queue for messages that need to be sent to the peer. - outputQueueChan chan DeSoMessage - newPeerChan chan *Peer - donePeerChan chan *Peer + outputQueueChan chan DeSoMessage + peerDisconnectedChan chan *Peer // Set to zero until Disconnect has been called on the Peer. Used to make it // so that the logic in Disconnect will only be executed once. @@ -625,8 +624,7 @@ func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAdd messageChan chan *ServerMessage, _cmgr *ConnectionManager, _srv *Server, _syncType NodeSyncType, - newPeerChan chan *Peer, - donePeerChan chan *Peer) *Peer { + peerDisconnectedChan chan *Peer) *Peer { pp := Peer{ ID: _id, @@ -638,8 +636,7 @@ func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAdd isOutbound: _isOutbound, isPersistent: _isPersistent, outputQueueChan: make(chan DeSoMessage), - newPeerChan: newPeerChan, - donePeerChan: donePeerChan, + peerDisconnectedChan: peerDisconnectedChan, quit: make(chan interface{}), knownInventory: lru.NewCache(maxKnownInventory), blocksToSend: make(map[BlockHash]bool), @@ -1310,7 +1307,7 @@ func (pp *Peer) Disconnect() { // Add the Peer to donePeers so that the ConnectionManager and Server can do any // cleanup they need to do. - pp.donePeerChan <- pp + pp.peerDisconnectedChan <- pp } func (pp *Peer) _logVersionSuccess() { diff --git a/lib/remote_node.go b/lib/remote_node.go index b74ba9e23..42fe21521 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -207,6 +207,13 @@ func (rn *RemoteNode) GetUserAgent() string { return rn.handshakeMetadata.userAgent } +func (rn *RemoteNode) GetNetAddress() *wire.NetAddress { + if !rn.IsHandshakeCompleted() || rn.GetPeer() == nil { + return nil + } + return rn.GetPeer().NetAddress() +} + func (rn *RemoteNode) IsInbound() bool { return rn.peer != nil && !rn.peer.IsOutbound() } diff --git a/lib/server.go b/lib/server.go index 0ef8afae5..987431af8 100644 --- a/lib/server.go +++ b/lib/server.go @@ -68,6 +68,8 @@ type Server struct { connectionController *ConnectionController // posMempool *PosMemPool TODO: Add the mempool later + params *DeSoParams + // All messages received from peers get sent from the ConnectionManager to the // Server through this channel. // @@ -130,7 +132,9 @@ type Server struct { // It is organized in this way so that we can limit the number of addresses we // are distributing for a single peer to avoid a DOS attack. addrsToBroadcastLock deadlock.RWMutex - addrsToBroadcastt map[string][]*SingleAddr + addrsToBroadcast map[string][]*SingleAddr + + AddrMgr *addrmgr.AddrManager // When set to true, we disable the ConnectionManager DisableNetworking bool @@ -438,6 +442,7 @@ func NewServer( snapshot: _snapshot, nodeMessageChannel: _nodeMessageChan, forceChecksum: _forceChecksum, + AddrMgr: _desoAddrMgr, } if stateChangeSyncer != nil { @@ -590,7 +595,7 @@ func NewServer( } // Initialize the addrs to broadcast map. - srv.addrsToBroadcastt = make(map[string][]*SingleAddr) + srv.addrsToBroadcast = make(map[string][]*SingleAddr) // This will initialize the request queues. srv.ResetRequestQueues() @@ -2171,20 +2176,33 @@ func (srv *Server) StartStatsdReporter() { }() } -func (srv *Server) _handleAddrMessage(pp *Peer, msg *MsgDeSoAddr) { +func (srv *Server) _handleAddrMessage(pp *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeAddr { + return + } + + id := NewRemoteNodeId(pp.ID) + var msg *MsgDeSoAddr + var ok bool + if msg, ok = desoMsg.(*MsgDeSoAddr); !ok { + glog.Errorf("Server._handleAddrMessage: Problem decoding MsgDeSoAddr: %v", spew.Sdump(desoMsg)) + srv.connectionController.rnManager.DisconnectById(id) + return + } + srv.addrsToBroadcastLock.Lock() defer srv.addrsToBroadcastLock.Unlock() - glog.V(1).Infof("Server._handleAddrMessage: Received Addr from peer %v with addrs %v", pp, spew.Sdump(msg.AddrList)) + glog.V(1).Infof("Server._handleAddrMessage: Received Addr from peer id=%v with addrs %v", pp.ID, spew.Sdump(msg.AddrList)) // If this addr message contains more than the maximum allowed number of addresses // then disconnect this peer. if len(msg.AddrList) > MaxAddrsPerAddrMsg { glog.Errorf(fmt.Sprintf("Server._handleAddrMessage: Disconnecting "+ - "Peer %v for sending us an addr message with %d transactions, which exceeds "+ + "Peer id=%v for sending us an addr message with %d transactions, which exceeds "+ "the max allowed %d", - pp, len(msg.AddrList), MaxAddrsPerAddrMsg)) - pp.Disconnect() + pp.ID, len(msg.AddrList), MaxAddrsPerAddrMsg)) + srv.connectionController.rnManager.DisconnectById(id) return } @@ -2193,19 +2211,16 @@ func (srv *Server) _handleAddrMessage(pp *Peer, msg *MsgDeSoAddr) { for _, addr := range msg.AddrList { addrAsNetAddr := wire.NewNetAddressIPPort(addr.IP, addr.Port, (wire.ServiceFlag)(addr.Services)) if !addrmgr.IsRoutable(addrAsNetAddr) { - glog.V(1).Infof("Dropping address %v from peer %v because it is not routable", addr, pp) + glog.V(1).Infof("Server._handleAddrMessage: Dropping address %v from peer %v because it is not routable", addr, pp) continue } netAddrsReceived = append( netAddrsReceived, addrAsNetAddr) } - // TODO: temporary - addressMgr := addrmgr.New("", net.LookupIP) - addressMgr.AddAddresses(netAddrsReceived, pp.netAddr) + srv.AddrMgr.AddAddresses(netAddrsReceived, pp.netAddr) - // If the message had <= 10 addrs in it, then queue all the addresses for relaying - // on the next cycle. + // If the message had <= 10 addrs in it, then queue all the addresses for relaying on the next cycle. if len(msg.AddrList) <= 10 { glog.V(1).Infof("Server._handleAddrMessage: Queueing %d addrs for forwarding from "+ "peer %v", len(msg.AddrList), pp) @@ -2215,7 +2230,7 @@ func (srv *Server) _handleAddrMessage(pp *Peer, msg *MsgDeSoAddr) { Port: pp.netAddr.Port, Services: pp.serviceFlags, } - listToAddTo, hasSeenSource := srv.addrsToBroadcastt[sourceAddr.StringWithPort(false /*includePort*/)] + listToAddTo, hasSeenSource := srv.addrsToBroadcast[sourceAddr.StringWithPort(false /*includePort*/)] if !hasSeenSource { listToAddTo = []*SingleAddr{} } @@ -2225,17 +2240,27 @@ func (srv *Server) _handleAddrMessage(pp *Peer, msg *MsgDeSoAddr) { listToAddTo = listToAddTo[:MaxAddrsPerAddrMsg/2] } listToAddTo = append(listToAddTo, msg.AddrList...) - srv.addrsToBroadcastt[sourceAddr.StringWithPort(false /*includePort*/)] = listToAddTo + srv.addrsToBroadcast[sourceAddr.StringWithPort(false /*includePort*/)] = listToAddTo } } -func (srv *Server) _handleGetAddrMessage(pp *Peer, msg *MsgDeSoGetAddr) { +func (srv *Server) _handleGetAddrMessage(pp *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeGetAddr { + return + } + + id := NewRemoteNodeId(pp.ID) + if _, ok := desoMsg.(*MsgDeSoGetAddr); !ok { + glog.Errorf("Server._handleAddrMessage: Problem decoding "+ + "MsgDeSoAddr: %v", spew.Sdump(desoMsg)) + srv.connectionController.rnManager.DisconnectById(id) + return + } + glog.V(1).Infof("Server._handleGetAddrMessage: Received GetAddr from peer %v", pp) // When we get a GetAddr message, choose MaxAddrsPerMsg from the AddrMgr // and send them back to the peer. - // TODO: temporary - addressMgr := addrmgr.New("", net.LookupIP) - netAddrsFound := addressMgr.AddressCache() + netAddrsFound := srv.AddrMgr.AddressCache() if len(netAddrsFound) > MaxAddrsPerAddrMsg { netAddrsFound = netAddrsFound[:MaxAddrsPerAddrMsg] } @@ -2251,7 +2276,12 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, msg *MsgDeSoGetAddr) { } res.AddrList = append(res.AddrList, singleAddr) } - pp.AddDeSoMessage(res, false) + rn := srv.connectionController.rnManager.GetRemoteNodeById(id) + if err := srv.connectionController.rnManager.SendMessage(rn, res); err != nil { + glog.Errorf("Server._handleGetAddrMessage: Problem sending addr message to peer %v: %v", pp, err) + srv.connectionController.rnManager.DisconnectById(id) + return + } } func (srv *Server) _handleControlMessages(serverMessage *ServerMessage) (_shouldQuit bool) { @@ -2274,9 +2304,9 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { switch msg := serverMessage.Msg.(type) { // Messages sent among peers. case *MsgDeSoAddr: - srv.connectionController._handleAddrMessage(serverMessage.Peer, serverMessage.Msg) + srv._handleAddrMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoGetAddr: - srv.connectionController._handleGetAddrMessage(serverMessage.Peer, serverMessage.Msg) + srv._handleGetAddrMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoGetHeaders: srv._handleGetHeaders(serverMessage.Peer, msg) case *MsgDeSoHeaderBundle: @@ -2399,20 +2429,6 @@ func (srv *Server) _startConsensus() { glog.V(2).Infof("Server._startConsensus: Handling message of type %v from Peer %v", serverMessage.Msg.GetMsgType(), serverMessage.Peer) - - // If the message is an addr message we handle it independent of whether or - // not the BitcoinManager is synced. - if serverMessage.Msg.GetMsgType() == MsgTypeAddr { - srv._handleAddrMessage(serverMessage.Peer, serverMessage.Msg.(*MsgDeSoAddr)) - continue - } - // If the message is a GetAddr message we handle it independent of whether or - // not the BitcoinManager is synced. - if serverMessage.Msg.GetMsgType() == MsgTypeGetAddr { - srv._handleGetAddrMessage(serverMessage.Peer, serverMessage.Msg.(*MsgDeSoGetAddr)) - continue - } - srv._handlePeerMessages(serverMessage) // Always check for and handle control messages regardless of whether the @@ -2433,35 +2449,36 @@ func (srv *Server) _startConsensus() { glog.V(2).Info("Server.Start: Server done") } -func (srv *Server) _getAddrsToBroadcast() []*SingleAddr { +func (srv *Server) getAddrsToBroadcast() []*SingleAddr { srv.addrsToBroadcastLock.Lock() defer srv.addrsToBroadcastLock.Unlock() // If there's nothing in the map, return. - if len(srv.addrsToBroadcastt) == 0 { + if len(srv.addrsToBroadcast) == 0 { return []*SingleAddr{} } // If we get here then we have some addresses to broadcast. addrsToBroadcast := []*SingleAddr{} - for len(addrsToBroadcast) < 10 && len(srv.addrsToBroadcastt) > 0 { + for uint32(len(addrsToBroadcast)) < srv.params.MaxAddressesToBroadcast && + len(srv.addrsToBroadcast) > 0 { // Choose a key at random. This works because map iteration is random in golang. bucket := "" - for kk := range srv.addrsToBroadcastt { + for kk := range srv.addrsToBroadcast { bucket = kk break } // Remove the last element from the slice for the given bucket. - currentAddrList := srv.addrsToBroadcastt[bucket] + currentAddrList := srv.addrsToBroadcast[bucket] if len(currentAddrList) > 0 { lastIndex := len(currentAddrList) - 1 currentAddr := currentAddrList[lastIndex] currentAddrList = currentAddrList[:lastIndex] if len(currentAddrList) == 0 { - delete(srv.addrsToBroadcastt, bucket) + delete(srv.addrsToBroadcast, bucket) } else { - srv.addrsToBroadcastt[bucket] = currentAddrList + srv.addrsToBroadcast[bucket] = currentAddrList } addrsToBroadcast = append(addrsToBroadcast, currentAddr) @@ -2478,18 +2495,24 @@ func (srv *Server) _startAddressRelayer() { if atomic.LoadInt32(&srv.shutdown) >= 1 { break } - // For the first ten minutes after the server starts, relay our address to all + // For the first ten minutes after the connection controller starts, relay our address to all // peers. After the first ten minutes, do it once every 24 hours. - // TODO: temporary - addressMgr := addrmgr.New("", net.LookupIP) - glog.V(1).Infof("Server.Start._startAddressRelayer: Relaying our own addr to peers") + glog.V(1).Infof("Server.startAddressRelayer: Relaying our own addr to peers") + remoteNodes := srv.connectionController.rnManager.GetAllRemoteNodes().GetAll() if numMinutesPassed < 10 || numMinutesPassed%(RebroadcastNodeAddrIntervalMinutes) == 0 { - for _, pp := range srv.cmgr.GetAllPeers() { - bestAddress := addressMgr.GetBestLocalAddress(pp.netAddr) + for _, rn := range remoteNodes { + if !rn.IsHandshakeCompleted() { + continue + } + netAddr := rn.GetNetAddress() + if netAddr == nil { + continue + } + bestAddress := srv.AddrMgr.GetBestLocalAddress(netAddr) if bestAddress != nil { - glog.V(2).Infof("Server.Start._startAddressRelayer: Relaying address %v to "+ - "peer %v", bestAddress.IP.String(), pp) - pp.AddDeSoMessage(&MsgDeSoAddr{ + glog.V(2).Infof("Server.startAddressRelayer: Relaying address %v to "+ + "RemoteNode (id= %v)", bestAddress.IP.String(), rn.GetId()) + addrMsg := &MsgDeSoAddr{ AddrList: []*SingleAddr{ { Timestamp: time.Now(), @@ -2498,27 +2521,38 @@ func (srv *Server) _startAddressRelayer() { Services: (ServiceFlag)(bestAddress.Services), }, }, - }, false) + } + if err := rn.SendMessage(addrMsg); err != nil { + glog.Errorf("Server.startAddressRelayer: Problem sending "+ + "MsgDeSoAddr to RemoteNode (id= %v): %v", rn.GetId(), err) + } } } } - glog.V(2).Infof("Server.Start._startAddressRelayer: Seeing if there are addrs to relay...") + glog.V(2).Infof("Server.startAddressRelayer: Seeing if there are addrs to relay...") // Broadcast the addrs we have to all of our peers. - addrsToBroadcast := srv._getAddrsToBroadcast() + addrsToBroadcast := srv.getAddrsToBroadcast() if len(addrsToBroadcast) == 0 { - glog.V(2).Infof("Server.Start._startAddressRelayer: No addrs to relay.") + glog.V(2).Infof("Server.startAddressRelayer: No addrs to relay.") time.Sleep(AddrRelayIntervalSeconds * time.Second) continue } - glog.V(2).Infof("Server.Start._startAddressRelayer: Found %d addrs to "+ + glog.V(2).Infof("Server.startAddressRelayer: Found %d addrs to "+ "relay: %v", len(addrsToBroadcast), spew.Sdump(addrsToBroadcast)) // Iterate over all our peers and broadcast the addrs to all of them. - for _, pp := range srv.cmgr.GetAllPeers() { - pp.AddDeSoMessage(&MsgDeSoAddr{ + for _, rn := range remoteNodes { + if !rn.IsHandshakeCompleted() { + continue + } + addrMsg := &MsgDeSoAddr{ AddrList: addrsToBroadcast, - }, false) + } + if err := rn.SendMessage(addrMsg); err != nil { + glog.Errorf("Server.startAddressRelayer: Problem sending "+ + "MsgDeSoAddr to RemoteNode (id= %v): %v", rn.GetId(), err) + } } time.Sleep(AddrRelayIntervalSeconds * time.Second) continue From baa91ae3140abb76e0667c95e6affba0fda934d9 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 29 Jan 2024 21:16:46 -0800 Subject: [PATCH 409/762] PoS NetworkManager Rename and Nits (#959) * Renames * nits * More renames * Review --- ...st.go => network_manager_routines_test.go} | 58 +-- ...roller_test.go => network_manager_test.go} | 106 +++--- ..._test.go => network_manager_utils_test.go} | 14 +- lib/handshake_controller.go | 78 ++-- ...ction_controller.go => network_manager.go} | 360 +++++++++--------- lib/peer.go | 11 - lib/remote_node.go | 6 +- lib/remote_node_manager.go | 1 + lib/server.go | 60 +-- 9 files changed, 355 insertions(+), 339 deletions(-) rename integration_testing/{connection_controller_routines_test.go => network_manager_routines_test.go} (93%) rename integration_testing/{connection_controller_test.go => network_manager_test.go} (86%) rename integration_testing/{connection_controller_utils_test.go => network_manager_utils_test.go} (94%) rename lib/{connection_controller.go => network_manager.go} (60%) diff --git a/integration_testing/connection_controller_routines_test.go b/integration_testing/network_manager_routines_test.go similarity index 93% rename from integration_testing/connection_controller_routines_test.go rename to integration_testing/network_manager_routines_test.go index 1f30e22b5..141e36026 100644 --- a/integration_testing/connection_controller_routines_test.go +++ b/integration_testing/network_manager_routines_test.go @@ -323,8 +323,8 @@ func TestConnectionControllerValidatorInboundDeduplication(t *testing.T) { node2 = startNode(t, node2) node3 = startNode(t, node3) - cc2 := node2.Server.GetConnectionController() - require.NoError(t, cc2.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm2 := node2.Server.GetNetworkManager() + require.NoError(t, nm2.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) // First wait for node2 to be indexed as a validator by node1. waitForValidatorConnection(t, node1, node2) // Now wait for node2 to be re-indexed as a non-validator. @@ -332,8 +332,8 @@ func TestConnectionControllerValidatorInboundDeduplication(t *testing.T) { waitForNonValidatorOutboundConnection(t, node2, node1) // Now connect node3 to node1. - cc3 := node3.Server.GetConnectionController() - require.NoError(t, cc3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm3 := node3.Server.GetNetworkManager() + require.NoError(t, nm3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) // First wait for node3 to be indexed as a validator by node1. waitForValidatorConnection(t, node1, node3) // Now wait for node3 to be re-indexed as a non-validator. @@ -373,9 +373,9 @@ func TestConnectionControllerNonValidatorConnectorOutbound(t *testing.T) { } node1 = startNode(t, node1) - cc := node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node5.Listeners[0].Addr().String())) - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node6.Listeners[0].Addr().String())) + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node5.Listeners[0].Addr().String())) + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node6.Listeners[0].Addr().String())) waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 3, 0, 3, 0) waitForNonValidatorOutboundConnection(t, node1, node2) @@ -435,24 +435,24 @@ func TestConnectionControllerNonValidatorConnectorInbound(t *testing.T) { node10 = startNode(t, node10) // Connect node1 to node2, node3, node7, and node8. - cc1 := node1.Server.GetConnectionController() - require.NoError(t, cc1.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) - require.NoError(t, cc1.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) - require.NoError(t, cc1.CreateNonValidatorOutboundConnection(node7.Listeners[0].Addr().String())) - require.NoError(t, cc1.CreateNonValidatorOutboundConnection(node8.Listeners[0].Addr().String())) + nm1 := node1.Server.GetNetworkManager() + require.NoError(t, nm1.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + require.NoError(t, nm1.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + require.NoError(t, nm1.CreateNonValidatorOutboundConnection(node7.Listeners[0].Addr().String())) + require.NoError(t, nm1.CreateNonValidatorOutboundConnection(node8.Listeners[0].Addr().String())) // Connect node4, node5, node6 to node1. - cc4 := node4.Server.GetConnectionController() - require.NoError(t, cc4.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) - cc5 := node5.Server.GetConnectionController() - require.NoError(t, cc5.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) - cc6 := node6.Server.GetConnectionController() - require.NoError(t, cc6.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm4 := node4.Server.GetNetworkManager() + require.NoError(t, nm4.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm5 := node5.Server.GetNetworkManager() + require.NoError(t, nm5.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm6 := node6.Server.GetNetworkManager() + require.NoError(t, nm6.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) // Connect node9, node10 to node1. - cc9 := node9.Server.GetConnectionController() - require.NoError(t, cc9.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) - cc10 := node10.Server.GetConnectionController() - require.NoError(t, cc10.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm9 := node9.Server.GetNetworkManager() + require.NoError(t, nm9.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm10 := node10.Server.GetNetworkManager() + require.NoError(t, nm10.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node1, node2, node3, node4, node5, node6) setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7, node8, node9, node10) @@ -473,12 +473,12 @@ func TestConnectionControllerNonValidatorConnectorAddressMgr(t *testing.T) { node1.Config.MaxInboundPeers = 0 node1 = startNode(t, node1) - cc := node1.Server.GetConnectionController() - na1, err := cc.ConvertIPStringToNetAddress("deso-seed-2.io:17000") - na2, err := cc.ConvertIPStringToNetAddress("deso-seed-3.io:17000") + nm := node1.Server.GetNetworkManager() + na1, err := nm.ConvertIPStringToNetAddress("deso-seed-2.io:17000") + na2, err := nm.ConvertIPStringToNetAddress("deso-seed-3.io:17000") require.NoError(t, err) - cc.AddrMgr.AddAddress(na1, na1) - cc.AddrMgr.AddAddress(na2, na2) + nm.AddrMgr.AddAddress(na1, na1) + nm.AddrMgr.AddAddress(na2, na2) waitForCountRemoteNodeIndexer(t, node1, 2, 0, 2, 0) } @@ -498,7 +498,7 @@ func getActiveValidatorsMapWithValidatorNodes(t *testing.T, validators ...*cmd.N func setActiveValidators(validatorMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator], nodes ...*cmd.Node) { for _, node := range nodes { - node.Server.GetConnectionController().SetActiveValidatorsMap(validatorMap) + node.Server.GetNetworkManager().SetActiveValidatorsMap(validatorMap) } } @@ -539,7 +539,7 @@ func waitForMinNonValidatorCountRemoteNodeIndexer(t *testing.T, node *cmd.Node, minNonValidatorOutboundCount int, minNonValidatorInboundCount int) { userAgent := node.Params.UserAgent - rnManager := node.Server.GetConnectionController().GetRemoteNodeManager() + rnManager := node.Server.GetNetworkManager().GetRemoteNodeManager() condition := func() bool { return checkRemoteNodeIndexerMinNonValidatorCount(rnManager, allCount, validatorCount, minNonValidatorOutboundCount, minNonValidatorInboundCount) diff --git a/integration_testing/connection_controller_test.go b/integration_testing/network_manager_test.go similarity index 86% rename from integration_testing/connection_controller_test.go rename to integration_testing/network_manager_test.go index 63976fbd4..8c883b973 100644 --- a/integration_testing/connection_controller_test.go +++ b/integration_testing/network_manager_test.go @@ -18,8 +18,8 @@ func TestConnectionControllerNonValidator(t *testing.T) { node2.Params.DisableNetworkManagerRoutines = true node2 = startNode(t, node2) - cc := node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) waitForNonValidatorOutboundConnection(t, node1, node2) waitForNonValidatorInboundConnection(t, node2, node1) @@ -34,8 +34,8 @@ func TestConnectionControllerNonValidator(t *testing.T) { node3.Params.DisableNetworkManagerRoutines = true node3 = startNode(t, node3) - cc = node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) waitForValidatorConnection(t, node1, node3) waitForNonValidatorInboundConnection(t, node3, node1) @@ -50,8 +50,8 @@ func TestConnectionControllerNonValidator(t *testing.T) { node4.Params.DisableNetworkManagerRoutines = true node4 = startNode(t, node4) - cc = node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) waitForValidatorConnection(t, node1, node4) waitForNonValidatorInboundConnection(t, node4, node1) t.Logf("Test #3 passed | Successfully created outbound connection from NonValidator Node1 to Validator Node4") @@ -74,8 +74,8 @@ func TestConnectionControllerValidator(t *testing.T) { node2.Params.DisableNetworkManagerRoutines = true node2 = startNode(t, node2) - cc := node1.Server.GetConnectionController() - require.NoError(t, cc.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsPub2)) + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsPub2)) waitForValidatorConnection(t, node1, node2) waitForValidatorConnection(t, node2, node1) @@ -88,8 +88,8 @@ func TestConnectionControllerValidator(t *testing.T) { node3.Params.DisableNetworkManagerRoutines = true node3 = startNode(t, node3) - cc = node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) waitForNonValidatorOutboundConnection(t, node1, node3) waitForValidatorConnection(t, node3, node1) @@ -104,8 +104,8 @@ func TestConnectionControllerValidator(t *testing.T) { node4.Params.DisableNetworkManagerRoutines = true node4 = startNode(t, node4) - cc = node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) waitForValidatorConnection(t, node1, node4) waitForValidatorConnection(t, node4, node1) t.Logf("Test #3 passed | Successfully created non-validator outbound connection from Validator Node1 to Validator Node4") @@ -127,8 +127,8 @@ func TestConnectionControllerHandshakeDataErrors(t *testing.T) { node1 = startNode(t, node1) node2 = startNode(t, node2) - cc := node2.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm := node2.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node2) t.Logf("Test #1 passed | Successfuly disconnected node with SFValidator flag and ProtocolVersion1 mismatch") @@ -141,8 +141,8 @@ func TestConnectionControllerHandshakeDataErrors(t *testing.T) { node3.Params.ProtocolVersion = lib.ProtocolVersionType(3) node3 = startNode(t, node3) - cc = node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node3) t.Logf("Test #2 passed | Successfuly disconnected node with ProtocolVersion3") @@ -153,8 +153,8 @@ func TestConnectionControllerHandshakeDataErrors(t *testing.T) { node4.Params.ProtocolVersion = lib.ProtocolVersion0 node4 = startNode(t, node4) - cc = node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node4) t.Logf("Test #3 passed | Successfuly disconnected node with ProtocolVersion0") @@ -170,8 +170,8 @@ func TestConnectionControllerHandshakeDataErrors(t *testing.T) { node5.Params.DisableNetworkManagerRoutines = true node5 = startNode(t, node5) - cc = node1.Server.GetConnectionController() - require.NoError(t, cc.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsKeyStore5Wrong.GetSigner().GetPublicKey())) + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsKeyStore5Wrong.GetSigner().GetPublicKey())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node5) t.Logf("Test #4 passed | Successfuly disconnected node with public key mismatch") @@ -183,8 +183,8 @@ func TestConnectionControllerHandshakeDataErrors(t *testing.T) { node6.Params.DisableNetworkManagerRoutines = true node6 = startNode(t, node6) - cc = node1.Server.GetConnectionController() - require.NoError(t, cc.CreateValidatorConnection(node6.Listeners[0].Addr().String(), blsPriv6.PublicKey())) + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateValidatorConnection(node6.Listeners[0].Addr().String(), blsPriv6.PublicKey())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node6) t.Logf("Test #5 passed | Successfuly disconnected supposed validator node with missing SFPosValidator flag") @@ -195,8 +195,8 @@ func TestConnectionControllerHandshakeDataErrors(t *testing.T) { node7.Params.ProtocolVersion = lib.ProtocolVersion1 node7 = startNode(t, node7) - cc = node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node7.Listeners[0].Addr().String())) + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node7.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node7) t.Logf("Test #6 passed | Successfuly disconnected outbound non-validator node with ProtocolVersion1") @@ -213,8 +213,8 @@ func TestConnectionControllerHandshakeTimeouts(t *testing.T) { node2.Params.DisableNetworkManagerRoutines = true node2 = startNode(t, node2) - cc := node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node2) t.Logf("Test #1 passed | Successfuly disconnected node after version negotiation timeout") @@ -226,8 +226,8 @@ func TestConnectionControllerHandshakeTimeouts(t *testing.T) { node3.Params.VerackNegotiationTimeout = 0 node3 = startNode(t, node3) - cc = node3.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm = node3.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node1) waitForEmptyRemoteNodeIndexer(t, node3) t.Logf("Test #2 passed | Successfuly disconnected node after verack exchange timeout") @@ -248,8 +248,8 @@ func TestConnectionControllerHandshakeTimeouts(t *testing.T) { node5.Params.DisableNetworkManagerRoutines = true node5 = startNode(t, node5) - cc = node4.Server.GetConnectionController() - require.NoError(t, cc.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsKeyStore5.GetSigner().GetPublicKey())) + nm = node4.Server.GetNetworkManager() + require.NoError(t, nm.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsKeyStore5.GetSigner().GetPublicKey())) waitForEmptyRemoteNodeIndexer(t, node4) waitForEmptyRemoteNodeIndexer(t, node5) t.Logf("Test #3 passed | Successfuly disconnected validator node after handshake timeout") @@ -275,17 +275,17 @@ func TestConnectionControllerValidatorDuplication(t *testing.T) { node3 = startNode(t, node3) // Create validator connection from Node1 to Node2 and from Node1 to Node3 - cc := node1.Server.GetConnectionController() - require.NoError(t, cc.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsKeyStore2.GetSigner().GetPublicKey())) + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsKeyStore2.GetSigner().GetPublicKey())) // This should fail out right because Node3 has a duplicate public key. - require.Error(t, cc.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsKeyStore2.GetSigner().GetPublicKey())) + require.Error(t, nm.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsKeyStore2.GetSigner().GetPublicKey())) waitForValidatorConnection(t, node1, node2) waitForNonValidatorInboundConnection(t, node2, node1) // Now create an outbound connection from Node3 to Node1, which should pass handshake, but then fail because // Node1 already has a validator connection to Node2 with the same public key. - cc3 := node3.Server.GetConnectionController() - require.NoError(t, cc3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm3 := node3.Server.GetNetworkManager() + require.NoError(t, nm3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node3) waitForCountRemoteNodeIndexer(t, node1, 1, 1, 0, 0) t.Logf("Test #1 passed | Successfuly rejected duplicate validator connection with inbound/outbound validators") @@ -306,12 +306,12 @@ func TestConnectionControllerValidatorDuplication(t *testing.T) { node5 = startNode(t, node5) // Create validator connections from Node4 to Node1 and from Node5 to Node1 - cc4 := node4.Server.GetConnectionController() - require.NoError(t, cc4.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm4 := node4.Server.GetNetworkManager() + require.NoError(t, nm4.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForValidatorConnection(t, node1, node4) waitForNonValidatorOutboundConnection(t, node4, node1) - cc5 := node5.Server.GetConnectionController() - require.NoError(t, cc5.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm5 := node5.Server.GetNetworkManager() + require.NoError(t, nm5.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node5) waitForCountRemoteNodeIndexer(t, node1, 1, 1, 0, 0) t.Logf("Test #2 passed | Successfuly rejected duplicate validator connection with multiple outbound validators") @@ -330,8 +330,8 @@ func TestConnectionControllerProtocolDifference(t *testing.T) { node2 = startNode(t, node2) // Create non-validator connection from Node1 to Node2 - cc := node1.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) waitForNonValidatorOutboundConnection(t, node1, node2) waitForNonValidatorInboundConnection(t, node2, node1) t.Logf("Test #1 passed | Successfuly connected to a ProtocolVersion1 node with a ProtocolVersion2 non-validator") @@ -346,7 +346,7 @@ func TestConnectionControllerProtocolDifference(t *testing.T) { node3 = startNode(t, node3) // Create validator connection from Node1 to Node3 - require.NoError(t, cc.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsKeyStore3.GetSigner().GetPublicKey())) + require.NoError(t, nm.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsKeyStore3.GetSigner().GetPublicKey())) waitForValidatorConnection(t, node1, node3) waitForNonValidatorInboundConnection(t, node3, node1) t.Logf("Test #2 passed | Successfuly connected to a ProtocolVersion1 node with a ProtocolVersion2 validator") @@ -365,14 +365,14 @@ func TestConnectionControllerProtocolDifference(t *testing.T) { node4 = startNode(t, node4) // Attempt to create non-validator connection from Node4 to Node1 - cc = node4.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm = node4.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node4) waitForEmptyRemoteNodeIndexer(t, node1) t.Logf("Test #3 passed | Successfuly rejected outbound connection from ProtocolVersion2 node to ProtcolVersion1 node") // Attempt to create validator connection from Node4 to Node1 - require.NoError(t, cc.CreateValidatorConnection(node1.Listeners[0].Addr().String(), blsKeyStore4.GetSigner().GetPublicKey())) + require.NoError(t, nm.CreateValidatorConnection(node1.Listeners[0].Addr().String(), blsKeyStore4.GetSigner().GetPublicKey())) waitForEmptyRemoteNodeIndexer(t, node4) waitForEmptyRemoteNodeIndexer(t, node1) t.Logf("Test #4 passed | Successfuly rejected validator connection from ProtocolVersion2 node to ProtcolVersion1 node") @@ -383,8 +383,8 @@ func TestConnectionControllerProtocolDifference(t *testing.T) { node5 = startNode(t, node5) // Attempt to create non-validator connection from Node5 to Node1 - cc = node5.Server.GetConnectionController() - require.NoError(t, cc.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm = node5.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) waitForEmptyRemoteNodeIndexer(t, node5) waitForEmptyRemoteNodeIndexer(t, node1) t.Logf("Test #5 passed | Successfuly rejected outbound connection from ProtocolVersion2 node to ProtcolVersion1 node") @@ -404,8 +404,8 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { node2 = startNode(t, node2) // Create a persistent connection from Node1 to Node2 - cc := node1.Server.GetConnectionController() - _, err = cc.CreateNonValidatorPersistentOutboundConnection(node2.Listeners[0].Addr().String()) + nm := node1.Server.GetNetworkManager() + _, err = nm.CreateNonValidatorPersistentOutboundConnection(node2.Listeners[0].Addr().String()) require.NoError(t, err) waitForValidatorConnection(t, node1, node2) waitForNonValidatorInboundConnection(t, node2, node1) @@ -419,7 +419,7 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { node3 = startNode(t, node3) // Create a persistent connection from Node1 to Node3 - _, err = cc.CreateNonValidatorPersistentOutboundConnection(node3.Listeners[0].Addr().String()) + _, err = nm.CreateNonValidatorPersistentOutboundConnection(node3.Listeners[0].Addr().String()) require.NoError(t, err) waitForNonValidatorOutboundConnection(t, node1, node3) waitForNonValidatorInboundConnection(t, node3, node1) @@ -441,8 +441,8 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { node5 = startNode(t, node5) // Create a persistent connection from Node4 to Node5 - cc = node4.Server.GetConnectionController() - _, err = cc.CreateNonValidatorPersistentOutboundConnection(node5.Listeners[0].Addr().String()) + nm = node4.Server.GetNetworkManager() + _, err = nm.CreateNonValidatorPersistentOutboundConnection(node5.Listeners[0].Addr().String()) require.NoError(t, err) waitForNonValidatorOutboundConnection(t, node4, node5) waitForValidatorConnection(t, node5, node4) @@ -458,7 +458,7 @@ func TestConnectionControllerPersistentConnection(t *testing.T) { node6 = startNode(t, node6) // Create a persistent connection from Node4 to Node6 - _, err = cc.CreateNonValidatorPersistentOutboundConnection(node6.Listeners[0].Addr().String()) + _, err = nm.CreateNonValidatorPersistentOutboundConnection(node6.Listeners[0].Addr().String()) require.NoError(t, err) waitForValidatorConnection(t, node4, node6) waitForValidatorConnection(t, node6, node4) diff --git a/integration_testing/connection_controller_utils_test.go b/integration_testing/network_manager_utils_test.go similarity index 94% rename from integration_testing/connection_controller_utils_test.go rename to integration_testing/network_manager_utils_test.go index 94f4702c6..f14cb39d9 100644 --- a/integration_testing/connection_controller_utils_test.go +++ b/integration_testing/network_manager_utils_test.go @@ -11,7 +11,7 @@ import ( func waitForValidatorConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { userAgentN1 := node1.Params.UserAgent userAgentN2 := node2.Params.UserAgent - rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() + rnManagerN1 := node1.Server.GetNetworkManager().GetRemoteNodeManager() n1ValidatedN2 := func() bool { if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, true, false, false) { return false @@ -41,7 +41,7 @@ func conditionNonValidatorOutboundConnection(t *testing.T, node1 *cmd.Node, node func conditionNonValidatorOutboundConnectionDynamic(t *testing.T, node1 *cmd.Node, node2 *cmd.Node, inactiveValidator bool) func() bool { userAgentN2 := node2.Params.UserAgent - rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() + rnManagerN1 := node1.Server.GetNetworkManager().GetRemoteNodeManager() return func() bool { if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, false, true, false) { return false @@ -82,7 +82,7 @@ func conditionNonValidatorInboundConnection(t *testing.T, node1 *cmd.Node, node2 func conditionNonValidatorInboundConnectionDynamic(t *testing.T, node1 *cmd.Node, node2 *cmd.Node, inactiveValidator bool) func() bool { userAgentN2 := node2.Params.UserAgent - rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() + rnManagerN1 := node1.Server.GetNetworkManager().GetRemoteNodeManager() return func() bool { if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, false, false, true) { return false @@ -104,7 +104,7 @@ func conditionNonValidatorInboundConnectionDynamic(t *testing.T, node1 *cmd.Node func waitForEmptyRemoteNodeIndexer(t *testing.T, node1 *cmd.Node) { userAgentN1 := node1.Params.UserAgent - rnManagerN1 := node1.Server.GetConnectionController().GetRemoteNodeManager() + rnManagerN1 := node1.Server.GetNetworkManager().GetRemoteNodeManager() n1ValidatedN2 := func() bool { if true != checkRemoteNodeIndexerEmpty(rnManagerN1) { return false @@ -118,7 +118,7 @@ func waitForCountRemoteNodeIndexer(t *testing.T, node1 *cmd.Node, allCount int, nonValidatorOutboundCount int, nonValidatorInboundCount int) { userAgent := node1.Params.UserAgent - rnManager := node1.Server.GetConnectionController().GetRemoteNodeManager() + rnManager := node1.Server.GetNetworkManager().GetRemoteNodeManager() condition := func() bool { if true != checkRemoteNodeIndexerCount(rnManager, allCount, validatorCount, nonValidatorOutboundCount, nonValidatorInboundCount) { return false @@ -132,7 +132,7 @@ func waitForCountRemoteNodeIndexerHandshakeCompleted(t *testing.T, node1 *cmd.No nonValidatorOutboundCount int, nonValidatorInboundCount int) { userAgent := node1.Params.UserAgent - rnManager := node1.Server.GetConnectionController().GetRemoteNodeManager() + rnManager := node1.Server.GetNetworkManager().GetRemoteNodeManager() condition := func() bool { return checkRemoteNodeIndexerCountHandshakeCompleted(rnManager, allCount, validatorCount, nonValidatorOutboundCount, nonValidatorInboundCount) @@ -243,7 +243,7 @@ func checkUserAgentInRemoteNodeList(userAgent string, rnList []*lib.RemoteNode) } func getRemoteNodeWithUserAgent(node *cmd.Node, userAgent string) *lib.RemoteNode { - rnManager := node.Server.GetConnectionController().GetRemoteNodeManager() + rnManager := node.Server.GetNetworkManager().GetRemoteNodeManager() rnList := rnManager.GetAllRemoteNodes().GetAll() for _, rn := range rnList { if rn.GetUserAgent() == userAgent { diff --git a/lib/handshake_controller.go b/lib/handshake_controller.go index 385e36275..2d558317f 100644 --- a/lib/handshake_controller.go +++ b/lib/handshake_controller.go @@ -8,19 +8,19 @@ import ( "sync" ) -// HandshakeController is a structure that handles the handshake process with remote nodes. It is the entry point for +// HandshakeManager is a structure that handles the handshake process with remote nodes. It is the entry point for // initiating a handshake with a remote node. It is also responsible for handling version/verack messages from remote // nodes. And for handling the handshake complete control message. -type HandshakeController struct { +type HandshakeManager struct { mtxHandshakeComplete sync.Mutex rnManager *RemoteNodeManager usedNonces lru.Cache } -func NewHandshakeController(rnManager *RemoteNodeManager) *HandshakeController { +func NewHandshakeController(rnManager *RemoteNodeManager) *HandshakeManager { - vm := &HandshakeController{ + vm := &HandshakeManager{ rnManager: rnManager, usedNonces: lru.NewCache(1000), } @@ -29,20 +29,20 @@ func NewHandshakeController(rnManager *RemoteNodeManager) *HandshakeController { } // InitiateHandshake kicks off handshake with a remote node. -func (hc *HandshakeController) InitiateHandshake(rn *RemoteNode) { +func (hm *HandshakeManager) InitiateHandshake(rn *RemoteNode) { nonce := uint64(RandInt64(math.MaxInt64)) if err := rn.InitiateHandshake(nonce); err != nil { glog.Errorf("RemoteNode.InitiateHandshake: Error initiating handshake: %v", err) - hc.rnManager.Disconnect(rn) + hm.rnManager.Disconnect(rn) } - hc.usedNonces.Add(nonce) + hm.usedNonces.Add(nonce) } // handleHandshakeComplete handles HandshakeComplete control messages, sent by RemoteNodes. -func (hc *HandshakeController) handleHandshakeComplete(remoteNode *RemoteNode) { +func (hm *HandshakeManager) handleHandshakeComplete(remoteNode *RemoteNode) { // Prevent race conditions while handling handshake complete messages. - hc.mtxHandshakeComplete.Lock() - defer hc.mtxHandshakeComplete.Unlock() + hm.mtxHandshakeComplete.Lock() + defer hm.mtxHandshakeComplete.Unlock() // Get the handshake information of this peer. if remoteNode == nil { @@ -50,20 +50,20 @@ func (hc *HandshakeController) handleHandshakeComplete(remoteNode *RemoteNode) { } if remoteNode.GetNegotiatedProtocolVersion().Before(ProtocolVersion2) { - hc.rnManager.ProcessCompletedHandshake(remoteNode) + hm.rnManager.ProcessCompletedHandshake(remoteNode) return } - if err := hc.handleHandshakeCompletePoSMessage(remoteNode); err != nil { - glog.Errorf("HandshakeController.handleHandshakeComplete: Error handling PoS handshake peer message: %v, "+ + if err := hm.handleHandshakeCompletePoSMessage(remoteNode); err != nil { + glog.Errorf("HandshakeManager.handleHandshakeComplete: Error handling PoS handshake peer message: %v, "+ "remoteNodePk (%s)", err, remoteNode.GetValidatorPublicKey().Serialize()) - hc.rnManager.Disconnect(remoteNode) + hm.rnManager.Disconnect(remoteNode) return } - hc.rnManager.ProcessCompletedHandshake(remoteNode) + hm.rnManager.ProcessCompletedHandshake(remoteNode) } -func (hc *HandshakeController) handleHandshakeCompletePoSMessage(remoteNode *RemoteNode) error { +func (hm *HandshakeManager) handleHandshakeCompletePoSMessage(remoteNode *RemoteNode) error { validatorPk := remoteNode.GetValidatorPublicKey() // If the remote node is not a potential validator, we don't need to do anything. @@ -72,11 +72,11 @@ func (hc *HandshakeController) handleHandshakeCompletePoSMessage(remoteNode *Rem } // Lookup the validator in the ValidatorIndex with the same public key. - existingValidator, ok := hc.rnManager.GetValidatorIndex().Get(validatorPk.Serialize()) + existingValidator, ok := hm.rnManager.GetValidatorIndex().Get(validatorPk.Serialize()) // For inbound RemoteNodes, we should ensure that there isn't an existing validator connected with the same public key. // Inbound nodes are not initiated by us, so we shouldn't have added the RemoteNode to the ValidatorIndex yet. if remoteNode.IsInbound() && ok { - return fmt.Errorf("HandshakeController.handleHandshakeCompletePoSMessage: Inbound RemoteNode with duplicate validator public key") + return fmt.Errorf("HandshakeManager.handleHandshakeCompletePoSMessage: Inbound RemoteNode with duplicate validator public key") } // For outbound RemoteNodes, we have two possible scenarios. Either the RemoteNode has been initiated as a validator, // in which case it should already be in the ValidatorIndex. Or the RemoteNode has been initiated as a regular node, @@ -85,20 +85,20 @@ func (hc *HandshakeController) handleHandshakeCompletePoSMessage(remoteNode *Rem // with the RemoteNode's public key. If there is one, we want to ensure that these two RemoteNodes have identical ids. if remoteNode.IsOutbound() && ok { if remoteNode.GetId() != existingValidator.GetId() { - return fmt.Errorf("HandshakeController.handleHandshakeCompletePoSMessage: Outbound RemoteNode with duplicate validator public key. "+ + return fmt.Errorf("HandshakeManager.handleHandshakeCompletePoSMessage: Outbound RemoteNode with duplicate validator public key. "+ "Existing validator id: %v, new validator id: %v", existingValidator.GetId().ToUint64(), remoteNode.GetId().ToUint64()) } } return nil } -// _handleVersionMessage handles version messages, sent by RemoteNodes. -func (hc *HandshakeController) _handleVersionMessage(origin *Peer, desoMsg DeSoMessage) { +// handleVersionMessage handles version messages, sent by RemoteNodes. +func (hm *HandshakeManager) handleVersionMessage(origin *Peer, desoMsg DeSoMessage) { if desoMsg.GetMsgType() != MsgTypeVersion { return } - rn := hc.rnManager.GetRemoteNodeFromPeer(origin) + rn := hm.rnManager.GetRemoteNodeFromPeer(origin) if rn == nil { // This should never happen. return @@ -107,41 +107,41 @@ func (hc *HandshakeController) _handleVersionMessage(origin *Peer, desoMsg DeSoM var verMsg *MsgDeSoVersion var ok bool if verMsg, ok = desoMsg.(*MsgDeSoVersion); !ok { - glog.Errorf("HandshakeController._handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ + glog.Errorf("HandshakeManager.handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ "error casting version message", origin.ID) - hc.rnManager.Disconnect(rn) + hm.rnManager.Disconnect(rn) return } // If we've seen this nonce before then return an error since this is a connection from ourselves. msgNonce := verMsg.Nonce - if hc.usedNonces.Contains(msgNonce) { - hc.usedNonces.Delete(msgNonce) - glog.Errorf("HandshakeController._handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ + if hm.usedNonces.Contains(msgNonce) { + hm.usedNonces.Delete(msgNonce) + glog.Errorf("HandshakeManager.handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ "nonce collision, nonce (%v)", origin.ID, msgNonce) - hc.rnManager.Disconnect(rn) + hm.rnManager.Disconnect(rn) return } // Call HandleVersionMessage on the RemoteNode. responseNonce := uint64(RandInt64(math.MaxInt64)) if err := rn.HandleVersionMessage(verMsg, responseNonce); err != nil { - glog.Errorf("HandshakeController._handleVersionMessage: Requesting PeerDisconnect for id: (%v) "+ + glog.Errorf("HandshakeManager.handleVersionMessage: Requesting PeerDisconnect for id: (%v) "+ "error handling version message: %v", origin.ID, err) - hc.rnManager.Disconnect(rn) + hm.rnManager.Disconnect(rn) return } - hc.usedNonces.Add(responseNonce) + hm.usedNonces.Add(responseNonce) } -// _handleVerackMessage handles verack messages, sent by RemoteNodes. -func (hc *HandshakeController) _handleVerackMessage(origin *Peer, desoMsg DeSoMessage) { +// handleVerackMessage handles verack messages, sent by RemoteNodes. +func (hm *HandshakeManager) handleVerackMessage(origin *Peer, desoMsg DeSoMessage) { if desoMsg.GetMsgType() != MsgTypeVerack { return } - rn := hc.rnManager.GetRemoteNodeFromPeer(origin) + rn := hm.rnManager.GetRemoteNodeFromPeer(origin) if rn == nil { // This should never happen. return @@ -150,19 +150,19 @@ func (hc *HandshakeController) _handleVerackMessage(origin *Peer, desoMsg DeSoMe var vrkMsg *MsgDeSoVerack var ok bool if vrkMsg, ok = desoMsg.(*MsgDeSoVerack); !ok { - glog.Errorf("HandshakeController._handleVerackMessage: Disconnecting RemoteNode with id: (%v) "+ + glog.Errorf("HandshakeManager.handleVerackMessage: Disconnecting RemoteNode with id: (%v) "+ "error casting verack message", origin.ID) - hc.rnManager.Disconnect(rn) + hm.rnManager.Disconnect(rn) return } // Call HandleVerackMessage on the RemoteNode. if err := rn.HandleVerackMessage(vrkMsg); err != nil { - glog.Errorf("HandshakeController._handleVerackMessage: Requesting PeerDisconnect for id: (%v) "+ + glog.Errorf("HandshakeManager.handleVerackMessage: Requesting PeerDisconnect for id: (%v) "+ "error handling verack message: %v", origin.ID, err) - hc.rnManager.Disconnect(rn) + hm.rnManager.Disconnect(rn) return } - hc.handleHandshakeComplete(rn) + hm.handleHandshakeComplete(rn) } diff --git a/lib/connection_controller.go b/lib/network_manager.go similarity index 60% rename from lib/connection_controller.go rename to lib/network_manager.go index ef021e100..0227a6ac9 100644 --- a/lib/connection_controller.go +++ b/lib/network_manager.go @@ -15,19 +15,19 @@ import ( "time" ) -// ConnectionController is a structure that oversees all connections to remote nodes. It is responsible for kicking off +// NetworkManager is a structure that oversees all connections to remote nodes. It is responsible for kicking off // the initial connections a node makes to the network. It is also responsible for creating RemoteNodes from all -// successful outbound and inbound connections. The ConnectionController also ensures that the node is connected to +// successful outbound and inbound connections. The NetworkManager also ensures that the node is connected to // the active validators, once the node reaches Proof of Stake. // TODO: Document more in later PRs -type ConnectionController struct { +type NetworkManager struct { // The parameters we are initialized with. params *DeSoParams cmgr *ConnectionManager blsKeystore *BLSKeystore - handshake *HandshakeController + handshake *HandshakeManager rnManager *RemoteNodeManager @@ -45,7 +45,7 @@ type ConnectionController struct { activeValidatorsMapLock sync.RWMutex // activeValidatorsMap is a map of all currently active validators registered in consensus. It will be updated - // periodically by the owner of the ConnectionController. + // periodically by the owner of the NetworkManager. activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator] // The target number of non-validator outbound remote nodes we want to have. We will disconnect remote nodes once @@ -63,16 +63,16 @@ type ConnectionController struct { exitGroup sync.WaitGroup } -func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, handshakeController *HandshakeController, - rnManager *RemoteNodeManager, blsKeystore *BLSKeystore, addrMgr *addrmgr.AddrManager, connectIps []string, +func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, rnManager *RemoteNodeManager, + blsKeystore *BLSKeystore, addrMgr *addrmgr.AddrManager, connectIps []string, targetNonValidatorOutboundRemoteNodes uint32, targetNonValidatorInboundRemoteNodes uint32, - limitOneInboundConnectionPerIP bool) *ConnectionController { + limitOneInboundConnectionPerIP bool) *NetworkManager { - return &ConnectionController{ + return &NetworkManager{ params: params, cmgr: cmgr, blsKeystore: blsKeystore, - handshake: handshakeController, + handshake: NewHandshakeController(rnManager), rnManager: rnManager, AddrMgr: addrMgr, connectIps: connectIps, @@ -85,42 +85,42 @@ func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, handsh } } -func (cc *ConnectionController) Start() { - if cc.params.DisableNetworkManagerRoutines { +func (nm *NetworkManager) Start() { + if nm.params.DisableNetworkManagerRoutines { return } - cc.startGroup.Add(4) - go cc.startPersistentConnector() - go cc.startValidatorConnector() - go cc.startNonValidatorConnector() - go cc.startRemoteNodeCleanup() + nm.startGroup.Add(4) + go nm.startPersistentConnector() + go nm.startValidatorConnector() + go nm.startNonValidatorConnector() + go nm.startRemoteNodeCleanup() - cc.startGroup.Wait() + nm.startGroup.Wait() } -func (cc *ConnectionController) Stop() { - if !cc.params.DisableNetworkManagerRoutines { - cc.exitGroup.Add(4) - close(cc.exitChan) - cc.exitGroup.Wait() +func (nm *NetworkManager) Stop() { + if !nm.params.DisableNetworkManagerRoutines { + nm.exitGroup.Add(4) + close(nm.exitChan) + nm.exitGroup.Wait() } - cc.rnManager.DisconnectAll() + nm.rnManager.DisconnectAll() } -func (cc *ConnectionController) GetRemoteNodeManager() *RemoteNodeManager { - return cc.rnManager +func (nm *NetworkManager) GetRemoteNodeManager() *RemoteNodeManager { + return nm.rnManager } -func (cc *ConnectionController) startPersistentConnector() { - cc.startGroup.Done() +func (nm *NetworkManager) startPersistentConnector() { + nm.startGroup.Done() for { select { - case <-cc.exitChan: - cc.exitGroup.Done() + case <-nm.exitChan: + nm.exitGroup.Done() return case <-time.After(1 * time.Second): - cc.refreshConnectIps() + nm.refreshConnectIps() } } } @@ -130,17 +130,17 @@ func (cc *ConnectionController) startPersistentConnector() { // are validators. If they are, it adds them to the validator index. It also checks if any of the existing validators // are no longer active and removes them from the validator index. Second, it checks if any of the active validators // are missing from the validator index. If they are, it attempts to connect to them. -func (cc *ConnectionController) startValidatorConnector() { - cc.startGroup.Done() +func (nm *NetworkManager) startValidatorConnector() { + nm.startGroup.Done() for { select { - case <-cc.exitChan: - cc.exitGroup.Done() + case <-nm.exitChan: + nm.exitGroup.Done() return case <-time.After(1 * time.Second): - activeValidatorsMap := cc.getActiveValidatorsMap() - cc.refreshValidatorIndex(activeValidatorsMap) - cc.connectValidators(activeValidatorsMap) + activeValidatorsMap := nm.getActiveValidatorsMap() + nm.refreshValidatorIndex(activeValidatorsMap) + nm.connectValidators(activeValidatorsMap) } } } @@ -149,32 +149,32 @@ func (cc *ConnectionController) startValidatorConnector() { // and inbound remote nodes. To do this, it periodically checks the number of outbound and inbound remote nodes, and // if the number is above the target number, it disconnects the excess remote nodes. If the number is below the target // number, it attempts to connect to new remote nodes. -func (cc *ConnectionController) startNonValidatorConnector() { - cc.startGroup.Done() +func (nm *NetworkManager) startNonValidatorConnector() { + nm.startGroup.Done() for { select { - case <-cc.exitChan: - cc.exitGroup.Done() + case <-nm.exitChan: + nm.exitGroup.Done() return case <-time.After(1 * time.Second): - cc.refreshNonValidatorOutboundIndex() - cc.refreshNonValidatorInboundIndex() - cc.connectNonValidators() + nm.refreshNonValidatorOutboundIndex() + nm.refreshNonValidatorInboundIndex() + nm.connectNonValidators() } } } -func (cc *ConnectionController) startRemoteNodeCleanup() { - cc.startGroup.Done() +func (nm *NetworkManager) startRemoteNodeCleanup() { + nm.startGroup.Done() for { select { - case <-cc.exitChan: - cc.exitGroup.Done() + case <-nm.exitChan: + nm.exitGroup.Done() return case <-time.After(1 * time.Second): - cc.rnManager.Cleanup() + nm.rnManager.Cleanup() } } @@ -184,26 +184,34 @@ func (cc *ConnectionController) startRemoteNodeCleanup() { // ## Handlers (Peer, DeSoMessage) // ########################### -func (cc *ConnectionController) _handleDonePeerMessage(origin *Peer, desoMsg DeSoMessage) { +func (nm *NetworkManager) _handleVersionMessage(origin *Peer, desoMsg DeSoMessage) { + nm.handshake.handleVersionMessage(origin, desoMsg) +} + +func (nm *NetworkManager) _handleVerackMessage(origin *Peer, desoMsg DeSoMessage) { + nm.handshake.handleVerackMessage(origin, desoMsg) +} + +func (nm *NetworkManager) _handleDonePeerMessage(origin *Peer, desoMsg DeSoMessage) { if desoMsg.GetMsgType() != MsgTypeDisconnectedPeer { return } - glog.V(2).Infof("ConnectionController.handleDonePeerMessage: Handling disconnected peer message for "+ + glog.V(2).Infof("NetworkManager.handleDonePeerMessage: Handling disconnected peer message for "+ "id=%v", origin.ID) - cc.rnManager.DisconnectById(NewRemoteNodeId(origin.ID)) + nm.rnManager.DisconnectById(NewRemoteNodeId(origin.ID)) // Update the persistentIpToRemoteNodeIdsMap. - ipRemoteNodeIdMap := cc.persistentIpToRemoteNodeIdsMap.ToMap() + ipRemoteNodeIdMap := nm.persistentIpToRemoteNodeIdsMap.ToMap() for ip, id := range ipRemoteNodeIdMap { if id.ToUint64() == origin.ID { - cc.persistentIpToRemoteNodeIdsMap.Remove(ip) + nm.persistentIpToRemoteNodeIdsMap.Remove(ip) } } } // _handleNewConnectionMessage is called when a new outbound or inbound connection is established. It is responsible // for creating a RemoteNode from the connection and initiating the handshake. The incoming DeSoMessage is a control message. -func (cc *ConnectionController) _handleNewConnectionMessage(origin *Peer, desoMsg DeSoMessage) { +func (nm *NetworkManager) _handleNewConnectionMessage(origin *Peer, desoMsg DeSoMessage) { if desoMsg.GetMsgType() != MsgTypeNewConnection { return } @@ -217,70 +225,70 @@ func (cc *ConnectionController) _handleNewConnectionMessage(origin *Peer, desoMs var err error switch msg.Connection.GetConnectionType() { case ConnectionTypeInbound: - remoteNode, err = cc.processInboundConnection(msg.Connection) + remoteNode, err = nm.processInboundConnection(msg.Connection) if err != nil { - glog.Errorf("ConnectionController.handleNewConnectionMessage: Problem handling inbound connection: %v", err) - cc.cleanupFailedInboundConnection(remoteNode, msg.Connection) + glog.Errorf("NetworkManager.handleNewConnectionMessage: Problem handling inbound connection: %v", err) + nm.cleanupFailedInboundConnection(remoteNode, msg.Connection) return } case ConnectionTypeOutbound: - remoteNode, err = cc.processOutboundConnection(msg.Connection) + remoteNode, err = nm.processOutboundConnection(msg.Connection) if err != nil { - glog.Errorf("ConnectionController.handleNewConnectionMessage: Problem handling outbound connection: %v", err) - cc.cleanupFailedOutboundConnection(msg.Connection) + glog.Errorf("NetworkManager.handleNewConnectionMessage: Problem handling outbound connection: %v", err) + nm.cleanupFailedOutboundConnection(msg.Connection) return } } // If we made it here, we have a valid remote node. We will now initiate the handshake. - cc.handshake.InitiateHandshake(remoteNode) + nm.handshake.InitiateHandshake(remoteNode) } -func (cc *ConnectionController) cleanupFailedInboundConnection(remoteNode *RemoteNode, connection Connection) { - glog.V(2).Infof("ConnectionController.cleanupFailedInboundConnection: Cleaning up failed inbound connection") +func (nm *NetworkManager) cleanupFailedInboundConnection(remoteNode *RemoteNode, connection Connection) { + glog.V(2).Infof("NetworkManager.cleanupFailedInboundConnection: Cleaning up failed inbound connection") if remoteNode != nil { - cc.rnManager.Disconnect(remoteNode) + nm.rnManager.Disconnect(remoteNode) } connection.Close() } -func (cc *ConnectionController) cleanupFailedOutboundConnection(connection Connection) { +func (nm *NetworkManager) cleanupFailedOutboundConnection(connection Connection) { oc, ok := connection.(*outboundConnection) if !ok { return } - glog.V(2).Infof("ConnectionController.cleanupFailedOutboundConnection: Cleaning up failed outbound connection") + glog.V(2).Infof("NetworkManager.cleanupFailedOutboundConnection: Cleaning up failed outbound connection") id := NewRemoteNodeId(oc.attemptId) - rn := cc.rnManager.GetRemoteNodeById(id) + rn := nm.rnManager.GetRemoteNodeById(id) if rn != nil { - cc.rnManager.Disconnect(rn) + nm.rnManager.Disconnect(rn) } oc.Close() - cc.cmgr.RemoveAttemptedOutboundAddrs(oc.address) + nm.cmgr.RemoveAttemptedOutboundAddrs(oc.address) } // ########################### // ## Persistent Connections // ########################### -func (cc *ConnectionController) refreshConnectIps() { +func (nm *NetworkManager) refreshConnectIps() { // Connect to addresses passed via the --connect-ips flag. These addresses are persistent in the sense that if we // disconnect from one, we will try to reconnect to the same one. - for _, connectIp := range cc.connectIps { - if _, ok := cc.persistentIpToRemoteNodeIdsMap.Get(connectIp); ok { + for _, connectIp := range nm.connectIps { + if _, ok := nm.persistentIpToRemoteNodeIdsMap.Get(connectIp); ok { continue } - glog.Infof("ConnectionController.initiatePersistentConnections: Connecting to connectIp: %v", connectIp) - id, err := cc.CreateNonValidatorPersistentOutboundConnection(connectIp) + glog.Infof("NetworkManager.initiatePersistentConnections: Connecting to connectIp: %v", connectIp) + id, err := nm.CreateNonValidatorPersistentOutboundConnection(connectIp) if err != nil { - glog.Errorf("ConnectionController.initiatePersistentConnections: Problem connecting "+ + glog.Errorf("NetworkManager.initiatePersistentConnections: Problem connecting "+ "to connectIp %v: %v", connectIp, err) continue } - cc.persistentIpToRemoteNodeIdsMap.Set(connectIp, id) + nm.persistentIpToRemoteNodeIdsMap.Set(connectIp, id) } } @@ -288,36 +296,36 @@ func (cc *ConnectionController) refreshConnectIps() { // ## Validator Connections // ########################### -func (cc *ConnectionController) SetActiveValidatorsMap(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { - cc.activeValidatorsMapLock.Lock() - defer cc.activeValidatorsMapLock.Unlock() - cc.activeValidatorsMap = activeValidatorsMap.Clone() +func (nm *NetworkManager) SetActiveValidatorsMap(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { + nm.activeValidatorsMapLock.Lock() + defer nm.activeValidatorsMapLock.Unlock() + nm.activeValidatorsMap = activeValidatorsMap.Clone() } -func (cc *ConnectionController) getActiveValidatorsMap() *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator] { - cc.activeValidatorsMapLock.RLock() - defer cc.activeValidatorsMapLock.RUnlock() - return cc.activeValidatorsMap.Clone() +func (nm *NetworkManager) getActiveValidatorsMap() *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator] { + nm.activeValidatorsMapLock.RLock() + defer nm.activeValidatorsMapLock.RUnlock() + return nm.activeValidatorsMap.Clone() } // refreshValidatorIndex re-indexes validators based on the activeValidatorsMap. It is called periodically by the // validator connector. -func (cc *ConnectionController) refreshValidatorIndex(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { +func (nm *NetworkManager) refreshValidatorIndex(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { // De-index inactive validators. We skip any checks regarding RemoteNodes connection status, nor do we verify whether // de-indexing the validator would result in an excess number of outbound/inbound connections. Any excess connections // will be cleaned up by the peer connector. - validatorRemoteNodeMap := cc.rnManager.GetValidatorIndex().ToMap() + validatorRemoteNodeMap := nm.rnManager.GetValidatorIndex().ToMap() for pk, rn := range validatorRemoteNodeMap { // If the validator is no longer active, de-index it. if _, ok := activeValidatorsMap.Get(pk); !ok { - cc.rnManager.SetNonValidator(rn) - cc.rnManager.UnsetValidator(rn) + nm.rnManager.SetNonValidator(rn) + nm.rnManager.UnsetValidator(rn) } } // Look for validators in our existing outbound / inbound connections. - allNonValidators := cc.rnManager.GetAllNonValidators() + allNonValidators := nm.rnManager.GetAllNonValidators() for _, rn := range allNonValidators { // It is possible for a RemoteNode to be in the non-validator indices, and still have a public key. This can happen // if the RemoteNode advertised support for the SFValidator service flag during handshake, and provided us @@ -329,37 +337,37 @@ func (cc *ConnectionController) refreshValidatorIndex(activeValidatorsMap *colle // It is possible that through unlikely concurrence, and malevolence, two non-validators happen to have the same // public key, which goes undetected during handshake. To prevent this from affecting the indexing of the validator // set, we check that the non-validator's public key is not already present in the validator index. - if _, ok := cc.rnManager.GetValidatorIndex().Get(pk.Serialize()); ok { - glog.V(2).Infof("ConnectionController.refreshValidatorIndex: Disconnecting Validator RemoteNode "+ + if _, ok := nm.rnManager.GetValidatorIndex().Get(pk.Serialize()); ok { + glog.V(2).Infof("NetworkManager.refreshValidatorIndex: Disconnecting Validator RemoteNode "+ "(%v) has validator public key (%v) that is already present in validator index", rn, pk) - cc.rnManager.Disconnect(rn) + nm.rnManager.Disconnect(rn) continue } // If the RemoteNode turns out to be in the validator set, index it. if _, ok := activeValidatorsMap.Get(pk.Serialize()); ok { - cc.rnManager.SetValidator(rn) - cc.rnManager.UnsetNonValidator(rn) + nm.rnManager.SetValidator(rn) + nm.rnManager.UnsetNonValidator(rn) } } } // connectValidators attempts to connect to all active validators that are not already connected. It is called // periodically by the validator connector. -func (cc *ConnectionController) connectValidators(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { +func (nm *NetworkManager) connectValidators(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { // Look through the active validators and connect to any that we're not already connected to. - if cc.blsKeystore == nil { + if nm.blsKeystore == nil { return } validators := activeValidatorsMap.ToMap() for pk, validator := range validators { - _, exists := cc.rnManager.GetValidatorIndex().Get(pk) + _, exists := nm.rnManager.GetValidatorIndex().Get(pk) // If we're already connected to the validator, continue. if exists { continue } - if cc.blsKeystore.GetSigner().GetPublicKey().Serialize() == pk { + if nm.blsKeystore.GetSigner().GetPublicKey().Serialize() == pk { continue } @@ -373,8 +381,8 @@ func (cc *ConnectionController) connectValidators(activeValidatorsMap *collectio continue } address := string(validator.GetDomains()[0]) - if err := cc.CreateValidatorConnection(address, publicKey); err != nil { - glog.V(2).Infof("ConnectionController.connectValidators: Problem connecting to validator %v: %v", address, err) + if err := nm.CreateValidatorConnection(address, publicKey); err != nil { + glog.V(2).Infof("NetworkManager.connectValidators: Problem connecting to validator %v: %v", address, err) continue } } @@ -386,21 +394,21 @@ func (cc *ConnectionController) connectValidators(activeValidatorsMap *collectio // refreshNonValidatorOutboundIndex is called periodically by the peer connector. It is responsible for disconnecting excess // outbound remote nodes. -func (cc *ConnectionController) refreshNonValidatorOutboundIndex() { +func (nm *NetworkManager) refreshNonValidatorOutboundIndex() { // There are three categories of outbound remote nodes: attempted, connected, and persistent. All of these // remote nodes are stored in the same non-validator outbound index. We want to disconnect excess remote nodes that // are not persistent, starting with the attempted nodes first. // First let's run a quick check to see if the number of our non-validator remote nodes exceeds our target. Note that // this number will include the persistent nodes. - numOutboundRemoteNodes := uint32(cc.rnManager.GetNonValidatorOutboundIndex().Count()) - if numOutboundRemoteNodes <= cc.targetNonValidatorOutboundRemoteNodes { + numOutboundRemoteNodes := uint32(nm.rnManager.GetNonValidatorOutboundIndex().Count()) + if numOutboundRemoteNodes <= nm.targetNonValidatorOutboundRemoteNodes { return } // If we get here, it means that we should potentially disconnect some remote nodes. Let's first separate the // attempted and connected remote nodes, ignoring the persistent ones. - allOutboundRemoteNodes := cc.rnManager.GetNonValidatorOutboundIndex().GetAll() + allOutboundRemoteNodes := nm.rnManager.GetNonValidatorOutboundIndex().GetAll() var attemptedOutboundRemoteNodes, connectedOutboundRemoteNodes []*RemoteNode for _, rn := range allOutboundRemoteNodes { if rn.IsPersistent() || rn.IsExpectedValidator() { @@ -417,8 +425,8 @@ func (cc *ConnectionController) refreshNonValidatorOutboundIndex() { // connected remote nodes. We can then find out how many remote nodes we need to disconnect. numOutboundRemoteNodes = uint32(len(attemptedOutboundRemoteNodes) + len(connectedOutboundRemoteNodes)) excessiveOutboundRemoteNodes := uint32(0) - if numOutboundRemoteNodes > cc.targetNonValidatorOutboundRemoteNodes { - excessiveOutboundRemoteNodes = numOutboundRemoteNodes - cc.targetNonValidatorOutboundRemoteNodes + if numOutboundRemoteNodes > nm.targetNonValidatorOutboundRemoteNodes { + excessiveOutboundRemoteNodes = numOutboundRemoteNodes - nm.targetNonValidatorOutboundRemoteNodes } // First disconnect the attempted remote nodes. @@ -426,9 +434,9 @@ func (cc *ConnectionController) refreshNonValidatorOutboundIndex() { if excessiveOutboundRemoteNodes == 0 { break } - glog.V(2).Infof("ConnectionController.refreshNonValidatorOutboundIndex: Disconnecting attempted remote "+ + glog.V(2).Infof("NetworkManager.refreshNonValidatorOutboundIndex: Disconnecting attempted remote "+ "node (id=%v) due to excess outbound peers", rn.GetId()) - cc.rnManager.Disconnect(rn) + nm.rnManager.Disconnect(rn) excessiveOutboundRemoteNodes-- } // Now disconnect the connected remote nodes, if we still have too many remote nodes. @@ -436,24 +444,24 @@ func (cc *ConnectionController) refreshNonValidatorOutboundIndex() { if excessiveOutboundRemoteNodes == 0 { break } - glog.V(2).Infof("ConnectionController.refreshNonValidatorOutboundIndex: Disconnecting connected remote "+ + glog.V(2).Infof("NetworkManager.refreshNonValidatorOutboundIndex: Disconnecting connected remote "+ "node (id=%v) due to excess outbound peers", rn.GetId()) - cc.rnManager.Disconnect(rn) + nm.rnManager.Disconnect(rn) excessiveOutboundRemoteNodes-- } } // refreshNonValidatorInboundIndex is called periodically by the non-validator connector. It is responsible for // disconnecting excess inbound remote nodes. -func (cc *ConnectionController) refreshNonValidatorInboundIndex() { +func (nm *NetworkManager) refreshNonValidatorInboundIndex() { // First let's check if we have an excess number of inbound remote nodes. If we do, we'll disconnect some of them. - numConnectedInboundRemoteNodes := uint32(cc.rnManager.GetNonValidatorInboundIndex().Count()) - if numConnectedInboundRemoteNodes <= cc.targetNonValidatorInboundRemoteNodes { + numConnectedInboundRemoteNodes := uint32(nm.rnManager.GetNonValidatorInboundIndex().Count()) + if numConnectedInboundRemoteNodes <= nm.targetNonValidatorInboundRemoteNodes { return } // Disconnect random inbound non-validators if we have too many of them. - inboundRemoteNodes := cc.rnManager.GetNonValidatorInboundIndex().GetAll() + inboundRemoteNodes := nm.rnManager.GetNonValidatorInboundIndex().GetAll() var connectedInboundRemoteNodes []*RemoteNode for _, rn := range inboundRemoteNodes { // We only want to disconnect remote nodes that have completed handshake. @@ -463,58 +471,58 @@ func (cc *ConnectionController) refreshNonValidatorInboundIndex() { } excessiveInboundRemoteNodes := uint32(0) - if numConnectedInboundRemoteNodes > cc.targetNonValidatorInboundRemoteNodes { - excessiveInboundRemoteNodes = numConnectedInboundRemoteNodes - cc.targetNonValidatorInboundRemoteNodes + if numConnectedInboundRemoteNodes > nm.targetNonValidatorInboundRemoteNodes { + excessiveInboundRemoteNodes = numConnectedInboundRemoteNodes - nm.targetNonValidatorInboundRemoteNodes } for _, rn := range connectedInboundRemoteNodes { if excessiveInboundRemoteNodes == 0 { break } - glog.V(2).Infof("ConnectionController.refreshNonValidatorInboundIndex: Disconnecting inbound remote "+ + glog.V(2).Infof("NetworkManager.refreshNonValidatorInboundIndex: Disconnecting inbound remote "+ "node (id=%v) due to excess inbound peers", rn.GetId()) - cc.rnManager.Disconnect(rn) + nm.rnManager.Disconnect(rn) excessiveInboundRemoteNodes-- } } -func (cc *ConnectionController) connectNonValidators() { - numOutboundPeers := uint32(cc.rnManager.GetNonValidatorOutboundIndex().Count()) +func (nm *NetworkManager) connectNonValidators() { + numOutboundPeers := uint32(nm.rnManager.GetNonValidatorOutboundIndex().Count()) remainingOutboundPeers := uint32(0) - if numOutboundPeers < cc.targetNonValidatorOutboundRemoteNodes { - remainingOutboundPeers = cc.targetNonValidatorOutboundRemoteNodes - numOutboundPeers + if numOutboundPeers < nm.targetNonValidatorOutboundRemoteNodes { + remainingOutboundPeers = nm.targetNonValidatorOutboundRemoteNodes - numOutboundPeers } for ii := uint32(0); ii < remainingOutboundPeers; ii++ { - addr := cc.getRandomUnconnectedAddress() + addr := nm.getRandomUnconnectedAddress() if addr == nil { break } - cc.AddrMgr.Attempt(addr) - if err := cc.rnManager.CreateNonValidatorOutboundConnection(addr); err != nil { - glog.V(2).Infof("ConnectionController.connectNonValidators: Problem creating non-validator outbound "+ + nm.AddrMgr.Attempt(addr) + if err := nm.rnManager.CreateNonValidatorOutboundConnection(addr); err != nil { + glog.V(2).Infof("NetworkManager.connectNonValidators: Problem creating non-validator outbound "+ "connection to addr: %v; err: %v", addr, err) } } } -func (cc *ConnectionController) getRandomUnconnectedAddress() *wire.NetAddress { +func (nm *NetworkManager) getRandomUnconnectedAddress() *wire.NetAddress { for tries := 0; tries < 100; tries++ { - addr := cc.AddrMgr.GetAddress() + addr := nm.AddrMgr.GetAddress() if addr == nil { break } - if cc.cmgr.IsConnectedOutboundIpAddress(addr.NetAddress()) { + if nm.cmgr.IsConnectedOutboundIpAddress(addr.NetAddress()) { continue } - if cc.cmgr.IsAttemptedOutboundIpAddress(addr.NetAddress()) { + if nm.cmgr.IsAttemptedOutboundIpAddress(addr.NetAddress()) { continue } // We can only have one outbound address per /16. This is similar to // Bitcoin and we do it to prevent Sybil attacks. - if cc.cmgr.IsFromRedundantOutboundIPAddress(addr.NetAddress()) { + if nm.cmgr.IsFromRedundantOutboundIPAddress(addr.NetAddress()) { continue } @@ -524,62 +532,62 @@ func (cc *ConnectionController) getRandomUnconnectedAddress() *wire.NetAddress { return nil } -func (cc *ConnectionController) CreateValidatorConnection(ipStr string, publicKey *bls.PublicKey) error { - netAddr, err := cc.ConvertIPStringToNetAddress(ipStr) +func (nm *NetworkManager) CreateValidatorConnection(ipStr string, publicKey *bls.PublicKey) error { + netAddr, err := nm.ConvertIPStringToNetAddress(ipStr) if err != nil { return err } - return cc.rnManager.CreateValidatorConnection(netAddr, publicKey) + return nm.rnManager.CreateValidatorConnection(netAddr, publicKey) } -func (cc *ConnectionController) CreateNonValidatorPersistentOutboundConnection(ipStr string) (RemoteNodeId, error) { - netAddr, err := cc.ConvertIPStringToNetAddress(ipStr) +func (nm *NetworkManager) CreateNonValidatorPersistentOutboundConnection(ipStr string) (RemoteNodeId, error) { + netAddr, err := nm.ConvertIPStringToNetAddress(ipStr) if err != nil { return 0, err } - return cc.rnManager.CreateNonValidatorPersistentOutboundConnection(netAddr) + return nm.rnManager.CreateNonValidatorPersistentOutboundConnection(netAddr) } -func (cc *ConnectionController) CreateNonValidatorOutboundConnection(ipStr string) error { - netAddr, err := cc.ConvertIPStringToNetAddress(ipStr) +func (nm *NetworkManager) CreateNonValidatorOutboundConnection(ipStr string) error { + netAddr, err := nm.ConvertIPStringToNetAddress(ipStr) if err != nil { return err } - return cc.rnManager.CreateNonValidatorOutboundConnection(netAddr) + return nm.rnManager.CreateNonValidatorOutboundConnection(netAddr) } -func (cc *ConnectionController) SetTargetOutboundPeers(numPeers uint32) { - cc.targetNonValidatorOutboundRemoteNodes = numPeers +func (nm *NetworkManager) SetTargetOutboundPeers(numPeers uint32) { + nm.targetNonValidatorOutboundRemoteNodes = numPeers } // processInboundConnection is called when a new inbound connection is established. At this point, the connection is not validated, // nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. // Once the RemoteNode is created, we will initiate handshake. -func (cc *ConnectionController) processInboundConnection(conn Connection) (*RemoteNode, error) { +func (nm *NetworkManager) processInboundConnection(conn Connection) (*RemoteNode, error) { var ic *inboundConnection var ok bool if ic, ok = conn.(*inboundConnection); !ok { - return nil, fmt.Errorf("ConnectionController.handleInboundConnection: Connection is not an inboundConnection") + return nil, fmt.Errorf("NetworkManager.handleInboundConnection: Connection is not an inboundConnection") } // If we want to limit inbound connections to one per IP address, check to make sure this address isn't already connected. - if cc.limitOneInboundRemoteNodePerIP && - cc.isDuplicateInboundIPAddress(ic.connection.RemoteAddr()) { + if nm.limitOneInboundRemoteNodePerIP && + nm.isDuplicateInboundIPAddress(ic.connection.RemoteAddr()) { - return nil, fmt.Errorf("ConnectionController.handleInboundConnection: Rejecting INBOUND peer (%s) due to "+ + return nil, fmt.Errorf("NetworkManager.handleInboundConnection: Rejecting INBOUND peer (%s) due to "+ "already having an inbound connection from the same IP with limit_one_inbound_connection_per_ip set", ic.connection.RemoteAddr().String()) } - na, err := cc.ConvertIPStringToNetAddress(ic.connection.RemoteAddr().String()) + na, err := nm.ConvertIPStringToNetAddress(ic.connection.RemoteAddr().String()) if err != nil { - return nil, errors.Wrapf(err, "ConnectionController.handleInboundConnection: Problem calling "+ + return nil, errors.Wrapf(err, "NetworkManager.handleInboundConnection: Problem calling "+ "ConvertIPStringToNetAddress for addr: (%s)", ic.connection.RemoteAddr().String()) } - remoteNode, err := cc.rnManager.AttachInboundConnection(ic.connection, na) + remoteNode, err := nm.rnManager.AttachInboundConnection(ic.connection, na) if remoteNode == nil || err != nil { - return nil, errors.Wrapf(err, "ConnectionController.handleInboundConnection: Problem calling "+ + return nil, errors.Wrapf(err, "NetworkManager.handleInboundConnection: Problem calling "+ "AttachInboundConnection for addr: (%s)", ic.connection.RemoteAddr().String()) } @@ -589,40 +597,40 @@ func (cc *ConnectionController) processInboundConnection(conn Connection) (*Remo // processOutboundConnection is called when a new outbound connection is established. At this point, the connection is not validated, // nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. // Once the RemoteNode is created, we will initiate handshake. -func (cc *ConnectionController) processOutboundConnection(conn Connection) (*RemoteNode, error) { +func (nm *NetworkManager) processOutboundConnection(conn Connection) (*RemoteNode, error) { var oc *outboundConnection var ok bool if oc, ok = conn.(*outboundConnection); !ok { - return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Connection is not an outboundConnection") + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Connection is not an outboundConnection") } if oc.failed { - return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Failed to connect to peer (%s:%v)", + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Failed to connect to peer (%s:%v)", oc.address.IP.String(), oc.address.Port) } if !oc.isPersistent { - cc.AddrMgr.Connected(oc.address) - cc.AddrMgr.Good(oc.address) + nm.AddrMgr.Connected(oc.address) + nm.AddrMgr.Good(oc.address) } // If this is a non-persistent outbound peer and the group key overlaps with another peer we're already connected to then // abort mission. We only connect to one peer per IP group in order to prevent Sybil attacks. - if !oc.isPersistent && cc.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { - return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ + if !oc.isPersistent && nm.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) } - na, err := cc.ConvertIPStringToNetAddress(oc.connection.RemoteAddr().String()) + na, err := nm.ConvertIPStringToNetAddress(oc.connection.RemoteAddr().String()) if err != nil { - return nil, errors.Wrapf(err, "ConnectionController.handleOutboundConnection: Problem calling ipToNetAddr "+ + return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling ipToNetAddr "+ "for addr: (%s)", oc.connection.RemoteAddr().String()) } // Attach the connection before additional validation steps because it is already established. - remoteNode, err := cc.rnManager.AttachOutboundConnection(oc.connection, na, oc.attemptId, oc.isPersistent) + remoteNode, err := nm.rnManager.AttachOutboundConnection(oc.connection, na, oc.attemptId, oc.isPersistent) if remoteNode == nil || err != nil { - return nil, errors.Wrapf(err, "ConnectionController.handleOutboundConnection: Problem calling rnManager.AttachOutboundConnection "+ + return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling rnManager.AttachOutboundConnection "+ "for addr: (%s)", oc.connection.RemoteAddr().String()) } @@ -636,24 +644,24 @@ func (cc *ConnectionController) processOutboundConnection(conn Connection) (*Rem // If the group key overlaps with another peer we're already connected to then abort mission. We only connect to // one peer per IP group in order to prevent Sybil attacks. - if cc.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { - return nil, fmt.Errorf("ConnectionController.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ + if nm.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) } - cc.cmgr.AddToGroupKey(na) + nm.cmgr.AddToGroupKey(na) return remoteNode, nil } -func (cc *ConnectionController) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAddress, error) { - netAddr, err := IPToNetAddr(ipStr, cc.AddrMgr, cc.params) +func (nm *NetworkManager) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAddress, error) { + netAddr, err := IPToNetAddr(ipStr, nm.AddrMgr, nm.params) if err != nil { return nil, errors.Wrapf(err, - "ConnectionController.ConvertIPStringToNetAddress: Problem parsing "+ + "NetworkManager.ConvertIPStringToNetAddress: Problem parsing "+ "ipString to wire.NetAddress") } if netAddr == nil { - return nil, fmt.Errorf("ConnectionController.ConvertIPStringToNetAddress: " + + return nil, fmt.Errorf("NetworkManager.ConvertIPStringToNetAddress: " + "address was nil after parsing") } return netAddr, nil @@ -680,24 +688,24 @@ func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) return netAddr, nil } -func (cc *ConnectionController) isDuplicateInboundIPAddress(addr net.Addr) bool { - netAddr, err := IPToNetAddr(addr.String(), cc.AddrMgr, cc.params) +func (nm *NetworkManager) isDuplicateInboundIPAddress(addr net.Addr) bool { + netAddr, err := IPToNetAddr(addr.String(), nm.AddrMgr, nm.params) if err != nil { // Return true in case we have an error. We do this because it // will result in the peer connection not being accepted, which // is desired in this case. glog.Warningf(errors.Wrapf(err, - "ConnectionController.isDuplicateInboundIPAddress: Problem parsing "+ + "NetworkManager.isDuplicateInboundIPAddress: Problem parsing "+ "net.Addr to wire.NetAddress so marking as redundant and not "+ "making connection").Error()) return true } if netAddr == nil { - glog.Warningf("ConnectionController.isDuplicateInboundIPAddress: " + + glog.Warningf("NetworkManager.isDuplicateInboundIPAddress: " + "address was nil after parsing so marking as redundant and not " + "making connection") return true } - return cc.cmgr.IsDuplicateInboundIPAddress(netAddr) + return nm.cmgr.IsDuplicateInboundIPAddress(netAddr) } diff --git a/lib/peer.go b/lib/peer.go index c59d77921..996d2632d 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -1196,17 +1196,6 @@ func (pp *Peer) Start() { go pp.StartDeSoMessageProcessor() pp.startGroup.Wait() - // If the address manager needs more addresses, then send a GetAddr message - // to the peer. This is best-effort. - if pp.cmgr != nil { - // TODO: Move this to ConnectionController. - /*if pp.cmgr.AddrMgr.NeedMoreAddresses() { - go func() { - pp.QueueMessage(&MsgDeSoGetAddr{}) - }() - }*/ - } - // Send our verack message now that the IO processing machinery has started. } diff --git a/lib/remote_node.go b/lib/remote_node.go index 42fe21521..b8f378d07 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -304,6 +304,8 @@ func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddress id := rn.GetId().ToUint64() rn.peer = rn.cmgr.ConnectPeer(id, conn, na, false, false) + versionTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) + rn.versionTimeExpected = &versionTimeExpected rn.setStatusConnected() return nil } @@ -319,6 +321,8 @@ func (rn *RemoteNode) AttachOutboundConnection(conn net.Conn, na *wire.NetAddres id := rn.GetId().ToUint64() rn.peer = rn.cmgr.ConnectPeer(id, conn, na, true, isPersistent) + versionTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) + rn.versionTimeExpected = &versionTimeExpected rn.setStatusConnected() return nil } @@ -373,8 +377,6 @@ func (rn *RemoteNode) InitiateHandshake(nonce uint64) error { return fmt.Errorf("InitiateHandshake: Remote node is not connected") } - versionTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) - rn.versionTimeExpected = &versionTimeExpected if rn.GetPeer().IsOutbound() { if err := rn.sendVersionMessage(nonce); err != nil { return fmt.Errorf("InitiateHandshake: Problem sending version message to peer (id= %d): %v", rn.id, err) diff --git a/lib/remote_node_manager.go b/lib/remote_node_manager.go index 2457af58f..bd23908bf 100644 --- a/lib/remote_node_manager.go +++ b/lib/remote_node_manager.go @@ -79,6 +79,7 @@ func (manager *RemoteNodeManager) ProcessCompletedHandshake(remoteNode *RemoteNo manager.SetNonValidator(remoteNode) } manager.srv.HandleAcceptedPeer(remoteNode.GetPeer()) + manager.srv.maybeRequestAddresses(remoteNode) } func (manager *RemoteNodeManager) Disconnect(rn *RemoteNode) { diff --git a/lib/server.go b/lib/server.go index 987431af8..e54296739 100644 --- a/lib/server.go +++ b/lib/server.go @@ -63,9 +63,8 @@ type Server struct { eventManager *EventManager TxIndex *TXIndex - handshakeController *HandshakeController // fastHotStuffEventLoop consensus.FastHotStuffEventLoop - connectionController *ConnectionController + networkManager *NetworkManager // posMempool *PosMemPool TODO: Add the mempool later params *DeSoParams @@ -182,8 +181,8 @@ func (srv *Server) ResetRequestQueues() { srv.requestedTransactionsMap = make(map[BlockHash]*GetDataRequestInfo) } -func (srv *Server) GetConnectionController() *ConnectionController { - return srv.connectionController +func (srv *Server) GetNetworkManager() *NetworkManager { + return srv.networkManager } // dataLock must be acquired for writing before calling this function. @@ -503,10 +502,8 @@ func NewServer( nodeServices |= SFPosValidator } rnManager := NewRemoteNodeManager(srv, _chain, _cmgr, _blsKeystore, _params, _minFeeRateNanosPerKB, nodeServices) - - srv.handshakeController = NewHandshakeController(rnManager) - srv.connectionController = NewConnectionController(_params, _cmgr, srv.handshakeController, rnManager, _blsKeystore, - _desoAddrMgr, _connectIps, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP) + srv.networkManager = NewConnectionController(_params, _cmgr, rnManager, _blsKeystore, _desoAddrMgr, + _connectIps, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP) if srv.stateChangeSyncer != nil { srv.stateChangeSyncer.BlockHeight = uint64(_chain.headerTip().Height) @@ -1625,6 +1622,22 @@ func (srv *Server) HandleAcceptedPeer(pp *Peer) { } } +func (srv *Server) maybeRequestAddresses(remoteNode *RemoteNode) { + if remoteNode == nil { + return + } + // If the address manager needs more addresses, then send a GetAddr message + // to the peer. This is best-effort. + if !srv.AddrMgr.NeedMoreAddresses() { + return + } + + if err := remoteNode.SendMessage(&MsgDeSoGetAddr{}); err != nil { + glog.Errorf("Server.maybeRequestAddresses: Problem sending GetAddr message to "+ + "remoteNode (id= %v); err: %v", remoteNode, err) + } +} + func (srv *Server) _cleanupDonePeerState(pp *Peer) { // Grab the dataLock since we'll be modifying requestedBlocks srv.dataLock.Lock() @@ -2186,7 +2199,7 @@ func (srv *Server) _handleAddrMessage(pp *Peer, desoMsg DeSoMessage) { var ok bool if msg, ok = desoMsg.(*MsgDeSoAddr); !ok { glog.Errorf("Server._handleAddrMessage: Problem decoding MsgDeSoAddr: %v", spew.Sdump(desoMsg)) - srv.connectionController.rnManager.DisconnectById(id) + srv.networkManager.rnManager.DisconnectById(id) return } @@ -2202,7 +2215,7 @@ func (srv *Server) _handleAddrMessage(pp *Peer, desoMsg DeSoMessage) { "Peer id=%v for sending us an addr message with %d transactions, which exceeds "+ "the max allowed %d", pp.ID, len(msg.AddrList), MaxAddrsPerAddrMsg)) - srv.connectionController.rnManager.DisconnectById(id) + srv.networkManager.rnManager.DisconnectById(id) return } @@ -2253,7 +2266,7 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, desoMsg DeSoMessage) { if _, ok := desoMsg.(*MsgDeSoGetAddr); !ok { glog.Errorf("Server._handleAddrMessage: Problem decoding "+ "MsgDeSoAddr: %v", spew.Sdump(desoMsg)) - srv.connectionController.rnManager.DisconnectById(id) + srv.networkManager.rnManager.DisconnectById(id) return } @@ -2261,6 +2274,9 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, desoMsg DeSoMessage) { // When we get a GetAddr message, choose MaxAddrsPerMsg from the AddrMgr // and send them back to the peer. netAddrsFound := srv.AddrMgr.AddressCache() + if len(netAddrsFound) == 0 { + return + } if len(netAddrsFound) > MaxAddrsPerAddrMsg { netAddrsFound = netAddrsFound[:MaxAddrsPerAddrMsg] } @@ -2276,10 +2292,10 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, desoMsg DeSoMessage) { } res.AddrList = append(res.AddrList, singleAddr) } - rn := srv.connectionController.rnManager.GetRemoteNodeById(id) - if err := srv.connectionController.rnManager.SendMessage(rn, res); err != nil { + rn := srv.networkManager.rnManager.GetRemoteNodeById(id) + if err := srv.networkManager.rnManager.SendMessage(rn, res); err != nil { glog.Errorf("Server._handleGetAddrMessage: Problem sending addr message to peer %v: %v", pp, err) - srv.connectionController.rnManager.DisconnectById(id) + srv.networkManager.rnManager.DisconnectById(id) return } } @@ -2289,9 +2305,9 @@ func (srv *Server) _handleControlMessages(serverMessage *ServerMessage) (_should // Control messages used internally to signal to the server. case *MsgDeSoDisconnectedPeer: srv._handleDonePeer(serverMessage.Peer) - srv.connectionController._handleDonePeerMessage(serverMessage.Peer, serverMessage.Msg) + srv.networkManager._handleDonePeerMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoNewConnection: - srv.connectionController._handleNewConnectionMessage(serverMessage.Peer, serverMessage.Msg) + srv.networkManager._handleNewConnectionMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoQuit: return true } @@ -2330,9 +2346,9 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { case *MsgDeSoInv: srv._handleInv(serverMessage.Peer, msg) case *MsgDeSoVersion: - srv.handshakeController._handleVersionMessage(serverMessage.Peer, serverMessage.Msg) + srv.networkManager._handleVersionMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoVerack: - srv.handshakeController._handleVerackMessage(serverMessage.Peer, serverMessage.Msg) + srv.networkManager._handleVerackMessage(serverMessage.Peer, serverMessage.Msg) } } @@ -2498,7 +2514,7 @@ func (srv *Server) _startAddressRelayer() { // For the first ten minutes after the connection controller starts, relay our address to all // peers. After the first ten minutes, do it once every 24 hours. glog.V(1).Infof("Server.startAddressRelayer: Relaying our own addr to peers") - remoteNodes := srv.connectionController.rnManager.GetAllRemoteNodes().GetAll() + remoteNodes := srv.networkManager.rnManager.GetAllRemoteNodes().GetAll() if numMinutesPassed < 10 || numMinutesPassed%(RebroadcastNodeAddrIntervalMinutes) == 0 { for _, rn := range remoteNodes { if !rn.IsHandshakeCompleted() { @@ -2582,8 +2598,8 @@ func (srv *Server) Stop() { srv.cmgr.Stop() glog.Infof(CLog(Yellow, "Server.Stop: Closed the ConnectionManger")) - srv.connectionController.Stop() - glog.Infof(CLog(Yellow, "Server.Stop: Closed the ConnectionController")) + srv.networkManager.Stop() + glog.Infof(CLog(Yellow, "Server.Stop: Closed the NetworkManager")) // Stop the miner if we have one running. if srv.miner != nil { @@ -2668,7 +2684,7 @@ func (srv *Server) Start() { go srv.miner.Start() } - srv.connectionController.Start() + srv.networkManager.Start() } // SyncPrefixProgress keeps track of sync progress on an individual prefix. It is used in From a619f90fee0e287858c334122db9679cd68d44de Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 30 Jan 2024 08:15:50 -0500 Subject: [PATCH 410/762] Support Server Event Loop When FastHotStuffConsensus Channel Isn't Initialized (#967) --- lib/server.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/server.go b/lib/server.go index 6c45e577d..e1448b927 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2522,7 +2522,7 @@ func (srv *Server) _startConsensus() { } select { - case consensusEvent := <-srv.fastHotStuffConsensus.fastHotStuffEventLoop.GetEvents(): + case consensusEvent := <-srv._getFastHotStuffConsensusEventChannel(): { glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) srv._handleFastHostStuffConsensusEvent(consensusEvent) @@ -2658,6 +2658,13 @@ func (srv *Server) _startAddressRelayer() { } } +func (srv *Server) _getFastHotStuffConsensusEventChannel() chan *consensus.FastHotStuffEvent { + if srv.fastHotStuffConsensus == nil { + return nil + } + return srv.fastHotStuffConsensus.fastHotStuffEventLoop.GetEvents() +} + func (srv *Server) _startTransactionRelayer() { // If we've set a maximum sync height, we will not relay transactions. if srv.blockchain.MaxSyncBlockHeight > 0 { From c44e0fd9447f62ada190cf952957860ac5f0cab9 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 30 Jan 2024 08:25:39 -0500 Subject: [PATCH 411/762] Use Blockchain TimeSource For Block Production (#968) --- lib/pos_block_producer.go | 14 +++++++++++--- lib/pos_block_producer_test.go | 7 ++++--- lib/pos_blockchain_test.go | 4 ++-- lib/pos_consensus.go | 9 ++++++++- 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 62d2dd0f7..19f39e655 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -2,8 +2,8 @@ package lib import ( "math" - "time" + chainlib "github.com/btcsuite/btcd/blockchain" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" "github.com/pkg/errors" @@ -24,14 +24,22 @@ type PosBlockProducer struct { params *DeSoParams proposerPublicKey *PublicKey proposerVotingPublicKey *bls.PublicKey + timeSource chainlib.MedianTimeSource } -func NewPosBlockProducer(mp Mempool, params *DeSoParams, proposerPublicKey *PublicKey, proposerVotingPublicKey *bls.PublicKey) *PosBlockProducer { +func NewPosBlockProducer( + mp Mempool, + params *DeSoParams, + timeSource chainlib.MedianTimeSource, + proposerPublicKey *PublicKey, + proposerVotingPublicKey *bls.PublicKey, +) *PosBlockProducer { return &PosBlockProducer{ mp: mp, params: params, proposerPublicKey: proposerPublicKey, proposerVotingPublicKey: proposerVotingPublicKey, + timeSource: timeSource, } } @@ -75,7 +83,7 @@ func (pbp *PosBlockProducer) CreateUnsignedTimeoutBlock(latestBlockView *UtxoVie func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, proposerRandomSeedSignature *bls.Signature) (BlockTemplate, error) { // First get the block without the header. - currentTimestamp := uint64(time.Now().UnixNano()) + currentTimestamp := uint64(pbp.timeSource.AdjustedTime().UnixNano()) block, err := pbp.createBlockWithoutHeader(latestBlockView, newBlockHeight, currentTimestamp) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.CreateBlockTemplate: Problem creating block without header") diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 3148629f2..d2d340244 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + chainlib "github.com/btcsuite/btcd/blockchain" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" "github.com/stretchr/testify/require" @@ -59,7 +60,7 @@ func TestCreateBlockTemplate(t *testing.T) { _, err = seedSignature.FromBytes(Sha256DoubleHash([]byte("seed")).ToBytes()) require.NoError(err) m0Pk := NewPublicKey(m0PubBytes) - pbp := NewPosBlockProducer(mempool, params, m0Pk, pub) + pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), m0Pk, pub) blockTemplate, err := pbp.createBlockTemplate(latestBlockView, 3, 10, seedSignature) require.NoError(err) @@ -117,7 +118,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { _wrappedPosMempoolAddTransaction(t, mempool, txn) } - pbp := NewPosBlockProducer(mempool, params, NewPublicKey(m0PubBytes), blsPubKey) + pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), NewPublicKey(m0PubBytes), blsPubKey) txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions( latestBlockView, 3, 0, 50000) require.NoError(err) @@ -174,7 +175,7 @@ func TestGetBlockTransactions(t *testing.T) { _wrappedPosMempoolAddTransaction(t, mempool, txn) } - pbp := NewPosBlockProducer(mempool, params, nil, nil) + pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), nil, nil) _testProduceBlockNoSizeLimit(t, mempool, pbp, latestBlockView, 3, len(passingTxns), 0, 0) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index b97e6b0fd..69f84018c 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2702,7 +2702,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { require.True(t, mempool.IsRunning()) priv := _generateRandomBLSPrivateKey(t) m0Pk := NewPublicKey(m0PubBytes) - posBlockProducer := NewPosBlockProducer(mempool, params, m0Pk, priv.PublicKey()) + posBlockProducer := NewPosBlockProducer(mempool, params, chain.timeSource, m0Pk, priv.PublicKey()) // TODO: do we need to update the encoder migration stuff for global params. Probably. testMeta.mempool = nil testMeta.posMempool = mempool @@ -2773,7 +2773,7 @@ func NewTestPoSBlockchain(t *testing.T) *TestMeta { require.True(t, mempool.IsRunning()) priv := _generateRandomBLSPrivateKey(t) m0Pk := NewPublicKey(m0PubBytes) - posBlockProducer := NewPosBlockProducer(mempool, params, m0Pk, priv.PublicKey()) + posBlockProducer := NewPosBlockProducer(mempool, params, chain.timeSource, m0Pk, priv.PublicKey()) testMeta := &TestMeta{ t: t, chain: chain, diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 5d53880db..dfa6707c3 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -737,7 +737,14 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView) (*PosBlockPr if blockProducerPublicKey == nil { return nil, errors.Errorf("Error fetching public key for block producer: %v", err) } - return NewPosBlockProducer(fc.mempool, fc.params, blockProducerPublicKey, blockProducerBlsPublicKey), nil + blockProducer := NewPosBlockProducer( + fc.mempool, + fc.params, + fc.blockchain.timeSource, + blockProducerPublicKey, + blockProducerBlsPublicKey, + ) + return blockProducer, nil } // Finds the epoch entry for the block and returns the epoch number. From c7317f531a6e23f939678405683dadc57293fe66 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Tue, 30 Jan 2024 12:20:17 -0800 Subject: [PATCH 412/762] PoS NetworkManager Fix Integration Tests (#960) * Some fixes * Fixes * Fix another integration test * Fix integration tests * Fix RegtestMiner --- integration_testing/blocksync_test.go | 135 ++------ integration_testing/connection_bridge.go | 9 - integration_testing/hypersync_test.go | 302 +++++------------- integration_testing/migrations_test.go | 56 +--- integration_testing/mining_test.go | 27 +- .../network_manager_utils_test.go | 13 + integration_testing/rollback_test.go | 3 + integration_testing/tools.go | 26 +- integration_testing/txindex_test.go | 49 +-- lib/peer.go | 47 ++- lib/remote_node.go | 4 + lib/remote_node_manager.go | 2 +- lib/server.go | 17 +- 13 files changed, 227 insertions(+), 463 deletions(-) diff --git a/integration_testing/blocksync_test.go b/integration_testing/blocksync_test.go index be87aae3a..e03aef152 100644 --- a/integration_testing/blocksync_test.go +++ b/integration_testing/blocksync_test.go @@ -2,10 +2,6 @@ package integration_testing import ( "fmt" - "github.com/deso-protocol/core/cmd" - "github.com/deso-protocol/core/lib" - "github.com/stretchr/testify/require" - "os" "testing" ) @@ -16,40 +12,22 @@ import ( // 4. node2 syncs MaxSyncBlockHeight blocks from node1. // 5. compare node1 db matches node2 db. func TestSimpleBlockSync(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeBlockSync - - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // TODO: Dial an outbound connection from node2 to node1 - // Fix other integration tests. + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) compareNodesByDB(t, node1, node2, 0) fmt.Println("Databases match!") - node1.Stop() - node2.Stop() } // TestSimpleSyncRestart tests if a node can successfully restart while syncing blocks. @@ -61,46 +39,26 @@ func TestSimpleBlockSync(t *testing.T) { // 6. node2 reconnects with node1 and syncs remaining blocks. // 7. compare node1 db matches node2 db. func TestSimpleSyncRestart(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeBlockSync - - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) - randomHeight := randomUint32Between(t, 10, config2.MaxSyncBlockHeight) - fmt.Println("Random height for a restart (re-use if test failed):", randomHeight) + randomHeight := randomUint32Between(t, 10, node2.Config.MaxSyncBlockHeight) + t.Logf("Random height for a restart (re-use if test failed): %v", randomHeight) // Reboot node2 at a specific height and reconnect it with node1 - node2, bridge = restartAtHeightAndReconnectNode(t, node2, node1, bridge, randomHeight) + node2 = restartAtHeight(t, node2, randomHeight) waitForNodeToFullySync(node2) compareNodesByDB(t, node1, node2, 0) - fmt.Println("Random restart successful! Random height was", randomHeight) - fmt.Println("Databases match!") - bridge.Disconnect() - node1.Stop() - node2.Stop() + t.Logf("Random restart successful! Random height was: %v", randomHeight) + t.Logf("Databases match!") } // TestSimpleSyncDisconnectWithSwitchingToNewPeer tests if a node can successfully restart while syncing blocks, and @@ -114,62 +72,35 @@ func TestSimpleSyncRestart(t *testing.T) { // 7. compare node1 state matches node2 state. // 8. compare node3 state matches node2 state. func TestSimpleSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - dbDir3 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - defer os.RemoveAll(dbDir3) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeBlockSync - config3 := generateConfig(t, 18002, dbDir3, 10) - config3.SyncType = lib.NodeSyncTypeBlockSync - - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - config3.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - node3 := cmd.NewNode(config3) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) - node3 = startNode(t, node3) // wait for node1 to sync blocks waitForNodeToFullySync(node1) + + node3 := spawnNodeProtocol1(t, 18002, "node3") + node3.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} + node3 = startNode(t, node3) + // wait for node3 to sync blocks waitForNodeToFullySync(node3) - // bridge the nodes together. - bridge12 := NewConnectionBridge(node1, node2) - require.NoError(bridge12.Start()) - - randomHeight := randomUint32Between(t, 10, config2.MaxSyncBlockHeight) - fmt.Println("Random height for a restart (re-use if test failed):", randomHeight) - disconnectAtBlockHeight(node2, bridge12, randomHeight) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) - // bridge the nodes together. - bridge23 := NewConnectionBridge(node2, node3) - require.NoError(bridge23.Start()) + randomHeight := randomUint32Between(t, 10, node2.Config.MaxSyncBlockHeight) + t.Logf("Random height for a restart (re-use if test failed): %v", randomHeight) - // Reboot node2 at a specific height and reconnect it with node1 - //node2, bridge12 = restartAtHeightAndReconnectNode(t, node2, node1, bridge12, randomHeight) + // Reboot node2 at a specific height and reconnect it with node3 + node2 = shutdownAtHeight(t, node2, randomHeight) + node2.Config.ConnectIPs = []string{"127.0.0.1:18002"} + node2 = startNode(t, node2) waitForNodeToFullySync(node2) compareNodesByDB(t, node1, node2, 0) compareNodesByDB(t, node3, node2, 0) - fmt.Println("Random restart successful! Random height was", randomHeight) - fmt.Println("Databases match!") - bridge12.Disconnect() - bridge23.Disconnect() - node1.Stop() - node2.Stop() - node3.Stop() + t.Logf("Random restart successful! Random height was %v", randomHeight) + t.Logf("Databases match!") } diff --git a/integration_testing/connection_bridge.go b/integration_testing/connection_bridge.go index 139c7cafb..b93fabac5 100644 --- a/integration_testing/connection_bridge.go +++ b/integration_testing/connection_bridge.go @@ -201,7 +201,6 @@ func ReadWithTimeout(readFunc func() error, readTimeout time.Duration) error { func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode *cmd.Node) error { // Prepare the version message. versionMessage := bridge.getVersionMessage(otherNode) - connection.VersionNonceSent = versionMessage.Nonce // Send the version message. fmt.Println("Sending version message:", versionMessage, versionMessage.LatestBlockHeight) @@ -222,7 +221,6 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode return err } - connection.VersionNonceReceived = verMsg.Nonce connection.TimeConnected = time.Unix(verMsg.TstampSecs, 0) connection.TimeOffsetSecs = verMsg.TstampSecs - time.Now().Unix() return nil @@ -233,7 +231,6 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode // Now prepare the verack message. verackMsg := lib.NewMessage(lib.MsgTypeVerack) - verackMsg.(*lib.MsgDeSoVerack).NonceReceived = connection.VersionNonceReceived // And send it to the connection. if err := connection.WriteDeSoMessage(verackMsg); err != nil { @@ -251,17 +248,11 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode if msg.GetMsgType() != lib.MsgTypeVerack { return fmt.Errorf("message is not verack! Type: %v", msg.GetMsgType()) } - verackMsg := msg.(*lib.MsgDeSoVerack) - if verackMsg.NonceReceived != connection.VersionNonceSent { - return fmt.Errorf("verack message nonce doesn't match (received: %v, sent: %v)", - verackMsg.NonceReceived, connection.VersionNonceSent) - } return nil }, lib.DeSoMainnetParams.VersionNegotiationTimeout); err != nil { return err } - connection.VersionNegotiated = true return nil } diff --git a/integration_testing/hypersync_test.go b/integration_testing/hypersync_test.go index bc4c8a7c0..b76b1db48 100644 --- a/integration_testing/hypersync_test.go +++ b/integration_testing/hypersync_test.go @@ -1,11 +1,7 @@ package integration_testing import ( - "fmt" - "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" - "github.com/stretchr/testify/require" - "os" "testing" ) @@ -16,35 +12,19 @@ import ( // 4. node2 hypersyncs from node1 // 5. once done, compare node1 state, db, and checksum matches node2. func TestSimpleHyperSync(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeHyperSync - - config1.HyperSync = true - config2.HyperSync = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSync + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) @@ -52,10 +32,7 @@ func TestSimpleHyperSync(t *testing.T) { compareNodesByState(t, node1, node2, 0) //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) - fmt.Println("Databases match!") - bridge.Disconnect() - node1.Stop() - node2.Stop() + t.Logf("Databases match!") } // TestHyperSyncFromHyperSyncedNode test if a node can successfully hypersync from another hypersynced node: @@ -66,49 +43,28 @@ func TestSimpleHyperSync(t *testing.T) { // 5. once done, bridge node3 and node2 so that node3 hypersyncs from node2. // 6. compare node1 state, db, and checksum matches node2, and node3. func TestHyperSyncFromHyperSyncedNode(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - dbDir3 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - defer os.RemoveAll(dbDir3) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - config3 := generateConfig(t, 18002, dbDir3, 10) - config3.SyncType = lib.NodeSyncTypeHyperSyncArchival - - config1.HyperSync = true - config2.HyperSync = true - config3.HyperSync = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - node3 := cmd.NewNode(config3) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) - node3 = startNode(t, node3) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge12 := NewConnectionBridge(node1, node2) - require.NoError(bridge12.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) - // bridge node3 to node2 to kick off hyper sync from a hyper synced node - bridge23 := NewConnectionBridge(node2, node3) - require.NoError(bridge23.Start()) + node3 := spawnNodeProtocol1(t, 18002, "node3") + node3.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node3.Config.HyperSync = true + node3.Config.ConnectIPs = []string{"127.0.0.1:18001"} + node3 = startNode(t, node3) // wait for node2 to sync blocks. waitForNodeToFullySync(node3) @@ -122,12 +78,7 @@ func TestHyperSyncFromHyperSyncedNode(t *testing.T) { //compareNodesByDB(t, node2, node3, 0) compareNodesByChecksum(t, node2, node3) - fmt.Println("Databases match!") - bridge12.Disconnect() - bridge23.Disconnect() - node1.Stop() - node2.Stop() - node3.Stop() + t.Logf("Databases match!") } // TestSimpleHyperSyncRestart test if a node can successfully hyper sync from another node: @@ -138,52 +89,34 @@ func TestHyperSyncFromHyperSyncedNode(t *testing.T) { // 5. node2 reconnects to node1 and hypersyncs again. // 6. Once node2 finishes sync, compare node1 state, db, and checksum matches node2. func TestSimpleHyperSyncRestart(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config2 := generateConfig(t, 18001, dbDir2, 10) - - config1.HyperSync = true - config1.SyncType = lib.NodeSyncTypeBlockSync - config2.HyperSync = true - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) syncIndex := randomUint32Between(t, 0, uint32(len(lib.StatePrefixes.StatePrefixesList))) syncPrefix := lib.StatePrefixes.StatePrefixesList[syncIndex] - fmt.Println("Random sync prefix for a restart (re-use if test failed):", syncPrefix) + t.Logf("Random sync prefix for a restart (re-use if test failed): %v", syncPrefix) + // Reboot node2 at a specific sync prefix and reconnect it with node1 - node2, bridge = restartAtSyncPrefixAndReconnectNode(t, node2, node1, bridge, syncPrefix) + node2 = restartAtSyncPrefix(t, node2, syncPrefix) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) compareNodesByState(t, node1, node2, 0) //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) - fmt.Println("Random restart successful! Random sync prefix was", syncPrefix) - fmt.Println("Databases match!") - bridge.Disconnect() - node1.Stop() - node2.Stop() + t.Logf("Random restart successful! Random sync prefix was: %v", syncPrefix) + t.Logf("Databases match!") } // TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer tests if a node can successfully restart while hypersyncing. @@ -194,57 +127,34 @@ func TestSimpleHyperSyncRestart(t *testing.T) { // 5. after restart, bridge node2 with node3 and resume hypersync. // 6. once node2 finishes, compare node1, node2, node3 state, db, and checksums are identical. func TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - dbDir3 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - defer os.RemoveAll(dbDir3) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - config3 := generateConfig(t, 18002, dbDir3, 10) - config3.SyncType = lib.NodeSyncTypeBlockSync - - config1.HyperSync = true - config2.HyperSync = true - config3.HyperSync = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - config3.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - node3 := cmd.NewNode(config3) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) - node3 = startNode(t, node3) - // wait for node1 to sync blocks waitForNodeToFullySync(node1) + + node3 := spawnNodeProtocol1(t, 18002, "node3") + node3.Config.HyperSync = true + node3.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node3 = startNode(t, node3) // wait for node3 to sync blocks waitForNodeToFullySync(node3) - // bridge the nodes together. - bridge12 := NewConnectionBridge(node1, node2) - require.NoError(bridge12.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) + // Reboot node2 at a specific height and reconnect it with node1 syncIndex := randomUint32Between(t, 0, uint32(len(lib.StatePrefixes.StatePrefixesList))) syncPrefix := lib.StatePrefixes.StatePrefixesList[syncIndex] - fmt.Println("Random prefix for a restart (re-use if test failed):", syncPrefix) - disconnectAtSyncPrefix(t, node2, bridge12, syncPrefix) - - // bridge the nodes together. - bridge23 := NewConnectionBridge(node2, node3) - require.NoError(bridge23.Start()) + t.Logf("Random prefix for a restart (re-use if test failed): %v", syncPrefix) + node2 = shutdownAtSyncPrefix(t, node2, syncPrefix) + node2.Config.ConnectIPs = []string{"127.0.0.1:18002"} + node2 = startNode(t, node2) - // Reboot node2 at a specific height and reconnect it with node1 - //node2, bridge12 = restartAtHeightAndReconnectNode(t, node2, node1, bridge12, randomHeight) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) @@ -257,13 +167,8 @@ func TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { compareNodesByState(t, node1, node2, 0) //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) - fmt.Println("Random restart successful! Random sync prefix was", syncPrefix) - fmt.Println("Databases match!") - bridge12.Disconnect() - bridge23.Disconnect() - node1.Stop() - node2.Stop() - node3.Stop() + t.Logf("Random restart successful! Random sync prefix was: %v", syncPrefix) + t.Logf("Databases match!") } // TODO: disconnecting the provider peer during hypersync doesn't work. @@ -317,93 +222,49 @@ func TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { //} func TestArchivalMode(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config2 := generateConfig(t, 18001, dbDir2, 10) - - config1.HyperSync = true - config2.HyperSync = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - config1.SyncType = lib.NodeSyncTypeBlockSync - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) compareNodesByDB(t, node1, node2, 0) - - //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) - fmt.Println("Databases match!") - bridge.Disconnect() - node1.Stop() - node2.Stop() + t.Logf("Databases match!") } func TestBlockSyncFromArchivalModeHyperSync(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - dbDir3 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - defer os.RemoveAll(dbDir3) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config2 := generateConfig(t, 18001, dbDir2, 10) - config3 := generateConfig(t, 18002, dbDir3, 10) - - config1.HyperSync = true - config1.SyncType = lib.NodeSyncTypeBlockSync - config2.HyperSync = true - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - config3.HyperSync = false - config3.SyncType = lib.NodeSyncTypeBlockSync - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - node3 := cmd.NewNode(config3) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) - node3 = startNode(t, node3) - // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge12 := NewConnectionBridge(node1, node2) - require.NoError(bridge12.Start()) - + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) - bridge23 := NewConnectionBridge(node2, node3) - require.NoError(bridge23.Start()) - + node3 := spawnNodeProtocol1(t, 18002, "node3") + node3.Config.SyncType = lib.NodeSyncTypeBlockSync + node3.Config.HyperSync = true + node3.Config.ConnectIPs = []string{"127.0.0.1:18001"} + node3 = startNode(t, node3) // wait for node3 to sync blocks. waitForNodeToFullySync(node3) @@ -412,10 +273,5 @@ func TestBlockSyncFromArchivalModeHyperSync(t *testing.T) { //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) - fmt.Println("Databases match!") - bridge12.Disconnect() - bridge23.Disconnect() - node1.Stop() - node2.Stop() - node3.Stop() + t.Logf("Databases match!") } diff --git a/integration_testing/migrations_test.go b/integration_testing/migrations_test.go index 1419d483e..067a2f3b6 100644 --- a/integration_testing/migrations_test.go +++ b/integration_testing/migrations_test.go @@ -1,65 +1,39 @@ package integration_testing import ( - "fmt" - "github.com/deso-protocol/core/cmd" - "github.com/deso-protocol/core/lib" "github.com/stretchr/testify/require" - "os" "testing" ) // TODO: Add an encoder migration height in constants.go then modify some // random struct like UtxoEntry. Until we have a migration, we can't fully test this. func TestEncoderMigrations(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeHyperSync - - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - config1.HyperSync = true - config2.HyperSync = true - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) - // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) - + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) - fmt.Println("Chain state and operation channel", node2.Server.GetBlockchain().ChainState(), + t.Logf("Chain state and operation channel (state: %v), (len: %v)", node2.Server.GetBlockchain().ChainState(), len(node2.Server.GetBlockchain().Snapshot().OperationChannel.OperationChannel)) compareNodesByState(t, node1, node2, 0) - fmt.Println("node1 checksum:", computeNodeStateChecksum(t, node1, 1500)) - fmt.Println("node2 checksum:", computeNodeStateChecksum(t, node2, 1500)) + t.Logf("node1 checksum: %v", computeNodeStateChecksum(t, node1, 1500)) + t.Logf("node2 checksum: %v", computeNodeStateChecksum(t, node2, 1500)) checksum1, err := node1.Server.GetBlockchain().Snapshot().Checksum.ToBytes() - require.NoError(err) + require.NoError(t, err) checksum2, err := node2.Server.GetBlockchain().Snapshot().Checksum.ToBytes() - require.NoError(err) - fmt.Println("node1 server checksum:", checksum1) - fmt.Println("node2 server checksum:", checksum2) + require.NoError(t, err) + t.Logf("node1 server checksum: %v", checksum1) + t.Logf("node2 server checksum: %v", checksum2) compareNodesByChecksum(t, node1, node2) - fmt.Println("Databases match!") - bridge.Disconnect() - node1.Stop() - node2.Stop() + t.Logf("Databases match!") } diff --git a/integration_testing/mining_test.go b/integration_testing/mining_test.go index 88de5e097..facbce226 100644 --- a/integration_testing/mining_test.go +++ b/integration_testing/mining_test.go @@ -1,35 +1,22 @@ package integration_testing import ( - "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" - "github.com/stretchr/testify/require" - "os" "testing" ) // TestSimpleBlockSync test if a node can mine blocks on regtest func TestRegtestMiner(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - defer os.RemoveAll(dbDir1) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config1.Params = &lib.DeSoTestnetParams - config1.MaxSyncBlockHeight = 0 - config1.MinerPublicKeys = []string{"tBCKVERmG9nZpHTk2AVPqknWc1Mw9HHAnqrTpW1RnXpXMQ4PsQgnmV"} - - config1.Regtest = true - - node1 := cmd.NewNode(config1) + node1 := spawnNodeProtocol1(t, 18000, "node1") + params := lib.DeSoTestnetParams + node1.Config.Params = ¶ms + node1.Params = ¶ms + node1.Config.MaxSyncBlockHeight = 0 + node1.Config.MinerPublicKeys = []string{"tBCKVERmG9nZpHTk2AVPqknWc1Mw9HHAnqrTpW1RnXpXMQ4PsQgnmV"} + node1.Config.Regtest = true node1 = startNode(t, node1) // wait for node1 to sync blocks mineHeight := uint32(40) <-listenForBlockHeight(node1, mineHeight) - - node1.Stop() } diff --git a/integration_testing/network_manager_utils_test.go b/integration_testing/network_manager_utils_test.go index f14cb39d9..d46b5a22f 100644 --- a/integration_testing/network_manager_utils_test.go +++ b/integration_testing/network_manager_utils_test.go @@ -253,6 +253,19 @@ func getRemoteNodeWithUserAgent(node *cmd.Node, userAgent string) *lib.RemoteNod return nil } +func spawnNodeProtocol1(t *testing.T, port uint32, id string) *cmd.Node { + dbDir := getDirectory(t) + t.Cleanup(func() { + os.RemoveAll(dbDir) + }) + config := generateConfig(t, port, dbDir, 10) + config.SyncType = lib.NodeSyncTypeBlockSync + node := cmd.NewNode(config) + node.Params.UserAgent = id + node.Params.ProtocolVersion = lib.ProtocolVersion1 + return node +} + func spawnNonValidatorNodeProtocol2(t *testing.T, port uint32, id string) *cmd.Node { dbDir := getDirectory(t) t.Cleanup(func() { diff --git a/integration_testing/rollback_test.go b/integration_testing/rollback_test.go index 8028866ac..c7b440b2b 100644 --- a/integration_testing/rollback_test.go +++ b/integration_testing/rollback_test.go @@ -10,7 +10,10 @@ import ( ) // Start blocks to height 5000 and then disconnect +// TODO: This test won't work now. func TestStateRollback(t *testing.T) { + t.Skipf("DisconnectBlocksToHeight doesn't work in PoS") + require := require.New(t) _ = require diff --git a/integration_testing/tools.go b/integration_testing/tools.go index df9aad581..42cb6c11f 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -69,7 +69,7 @@ func generateConfig(t *testing.T, port uint32, dataDir string, maxPeers uint32) config.MaxSyncBlockHeight = 0 config.ConnectIPs = []string{} config.PrivateMode = true - config.GlogV = 2 + config.GlogV = 0 config.GlogVmodule = "*bitcoin_manager*=0,*balance*=0,*view*=0,*frontend*=0,*peer*=0,*addr*=0,*network*=0,*utils*=0,*connection*=0,*main*=0,*server*=0,*mempool*=0,*miner*=0,*blockchain*=0" config.MaxInboundPeers = maxPeers config.TargetOutboundPeers = maxPeers @@ -427,6 +427,16 @@ func restartAtHeightAndReconnectNode(t *testing.T, node *cmd.Node, source *cmd.N return newNode, bridge } +func restartAtHeight(t *testing.T, node *cmd.Node, height uint32) *cmd.Node { + <-listenForBlockHeight(node, height) + return restartNode(t, node) +} + +func shutdownAtHeight(t *testing.T, node *cmd.Node, height uint32) *cmd.Node { + <-listenForBlockHeight(node, height) + return shutdownNode(t, node) +} + // listenForSyncPrefix will wait until the node starts downloading the provided syncPrefix in hypersync, and then sends // a message to the provided signal channel. func listenForSyncPrefix(t *testing.T, node *cmd.Node, syncPrefix []byte, signal chan<- bool) { @@ -470,6 +480,20 @@ func restartAtSyncPrefixAndReconnectNode(t *testing.T, node *cmd.Node, source *c return newNode, bridge } +func restartAtSyncPrefix(t *testing.T, node *cmd.Node, syncPrefix []byte) *cmd.Node { + listener := make(chan bool) + listenForSyncPrefix(t, node, syncPrefix, listener) + <-listener + return restartNode(t, node) +} + +func shutdownAtSyncPrefix(t *testing.T, node *cmd.Node, syncPrefix []byte) *cmd.Node { + listener := make(chan bool) + listenForSyncPrefix(t, node, syncPrefix, listener) + <-listener + return shutdownNode(t, node) +} + func randomUint32Between(t *testing.T, min, max uint32) uint32 { require := require.New(t) randomNumber, err := wire.RandomUint64() diff --git a/integration_testing/txindex_test.go b/integration_testing/txindex_test.go index dfd398557..702e63c10 100644 --- a/integration_testing/txindex_test.go +++ b/integration_testing/txindex_test.go @@ -1,11 +1,7 @@ package integration_testing import ( - "fmt" - "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" - "github.com/stretchr/testify/require" - "os" "testing" ) @@ -16,39 +12,21 @@ import ( // 4. node2 syncs MaxSyncBlockHeight blocks from node1, and builds txindex afterwards. // 5. compare node1 db and txindex matches node2. func TestSimpleTxIndex(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.HyperSync = true - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.HyperSync = true - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - - config1.TXIndex = true - config2.TXIndex = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} + node1.Config.HyperSync = true + node1.Config.TXIndex = true node1 = startNode(t, node1) - node2 = startNode(t, node2) - // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) - - // wait for node2 to sync blocks. + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2.Config.HyperSync = true + node2.Config.TXIndex = true + node2 = startNode(t, node2) + // wait for node1 to sync blocks waitForNodeToFullySync(node2) waitForNodeToFullySyncTxIndex(node1) @@ -56,8 +34,5 @@ func TestSimpleTxIndex(t *testing.T) { compareNodesByDB(t, node1, node2, 0) compareNodesByTxIndex(t, node1, node2, 0) - fmt.Println("Databases match!") - bridge.Disconnect() - node1.Stop() - node2.Stop() + t.Logf("Databases match!") } diff --git a/lib/peer.go b/lib/peer.go index 996d2632d..6bc683382 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -48,7 +48,6 @@ type Peer struct { StatsMtx deadlock.RWMutex TimeOffsetSecs int64 TimeConnected time.Time - startingHeight uint32 ID uint64 // Ping-related fields. LastPingNonce uint64 @@ -64,32 +63,16 @@ type Peer struct { Params *DeSoParams MessageChan chan *ServerMessage - // In order to complete a version negotiation successfully, the peer must - // reply to the initial version message we send them with a verack message - // containing the nonce from that initial version message. This ensures that - // the peer's IP isn't being spoofed since the only way to actually produce - // a verack with the appropriate response is to actually own the IP that - // the peer claims it has. As such, we maintain the version nonce we sent - // the peer and the version nonce they sent us here. - // - // TODO: The way we synchronize the version nonce is currently a bit - // messy; ideally we could do it without keeping global state. - VersionNonceSent uint64 - VersionNonceReceived uint64 - // A pointer to the Server srv *Server // Basic state. - PeerInfoMtx deadlock.Mutex - serviceFlags ServiceFlag - addrStr string - netAddr *wire.NetAddress - userAgent string - advertisedProtocolVersion uint64 - negotiatedProtocolVersion uint64 - VersionNegotiated bool - minTxFeeRateNanosPerKB uint64 + PeerInfoMtx deadlock.Mutex + serviceFlags ServiceFlag + latestHeight uint64 + addrStr string + netAddr *wire.NetAddress + minTxFeeRateNanosPerKB uint64 // Messages for which we are expecting a reply within a fixed // amount of time. This list is always sorted by ExpectedTime, // with the item having the earliest time at the front. @@ -682,10 +665,10 @@ func (pp *Peer) MinFeeRateNanosPerKB() uint64 { } // StartingBlockHeight is the height of the peer's blockchain tip. -func (pp *Peer) StartingBlockHeight() uint32 { +func (pp *Peer) StartingBlockHeight() uint64 { pp.StatsMtx.RLock() defer pp.StatsMtx.RUnlock() - return pp.startingHeight + return pp.latestHeight } // NumBlocksToSend is the number of blocks the Peer has requested from @@ -910,6 +893,20 @@ func (pp *Peer) _setKnownAddressesMap(key string, val bool) { pp.knownAddressesMap[key] = val } +func (pp *Peer) SetLatestBlockHeight(height uint64) { + pp.StatsMtx.Lock() + defer pp.StatsMtx.Unlock() + + pp.latestHeight = height +} + +func (pp *Peer) SetServiceFlag(sf ServiceFlag) { + pp.PeerInfoMtx.Lock() + defer pp.PeerInfoMtx.Unlock() + + pp.serviceFlags = sf +} + func (pp *Peer) outHandler() { pp.startGroup.Done() glog.V(1).Infof("Peer.outHandler: Starting outHandler for Peer %v", pp) diff --git a/lib/remote_node.go b/lib/remote_node.go index b8f378d07..6fb5e13ba 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -203,6 +203,10 @@ func (rn *RemoteNode) GetServiceFlag() ServiceFlag { return rn.handshakeMetadata.serviceFlag } +func (rn *RemoteNode) GetLatestBlockHeight() uint64 { + return rn.handshakeMetadata.latestBlockHeight +} + func (rn *RemoteNode) GetUserAgent() string { return rn.handshakeMetadata.userAgent } diff --git a/lib/remote_node_manager.go b/lib/remote_node_manager.go index bd23908bf..2dfb77431 100644 --- a/lib/remote_node_manager.go +++ b/lib/remote_node_manager.go @@ -78,7 +78,7 @@ func (manager *RemoteNodeManager) ProcessCompletedHandshake(remoteNode *RemoteNo manager.UnsetValidator(remoteNode) manager.SetNonValidator(remoteNode) } - manager.srv.HandleAcceptedPeer(remoteNode.GetPeer()) + manager.srv.HandleAcceptedPeer(remoteNode) manager.srv.maybeRequestAddresses(remoteNode) } diff --git a/lib/server.go b/lib/server.go index e54296739..5efd755f6 100644 --- a/lib/server.go +++ b/lib/server.go @@ -442,6 +442,7 @@ func NewServer( nodeMessageChannel: _nodeMessageChan, forceChecksum: _forceChecksum, AddrMgr: _desoAddrMgr, + params: _params, } if stateChangeSyncer != nil { @@ -837,8 +838,8 @@ func (srv *Server) GetBlocks(pp *Peer, maxHeight int) { func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { printHeight := pp.StartingBlockHeight() - if srv.blockchain.headerTip().Height > printHeight { - printHeight = srv.blockchain.headerTip().Height + if uint64(srv.blockchain.headerTip().Height) > printHeight { + printHeight = uint64(srv.blockchain.headerTip().Height) } glog.Infof(CLog(Yellow, fmt.Sprintf("Received header bundle with %v headers "+ "in state %s from peer %v. Downloaded ( %v / %v ) total headers", @@ -1545,6 +1546,7 @@ func (srv *Server) _startSync() { // Find a peer with StartingHeight bigger than our best header tip. var bestPeer *Peer for _, peer := range srv.cmgr.GetAllPeers() { + if !peer.IsSyncCandidate() { glog.Infof("Peer is not sync candidate: %v (isOutbound: %v)", peer, peer.isOutbound) continue @@ -1552,7 +1554,7 @@ func (srv *Server) _startSync() { // Choose the peer with the best height out of everyone who's a // valid sync candidate. - if peer.StartingBlockHeight() < bestHeight { + if peer.StartingBlockHeight() < uint64(bestHeight) { continue } @@ -1602,7 +1604,14 @@ func (srv *Server) _startSync() { } -func (srv *Server) HandleAcceptedPeer(pp *Peer) { +func (srv *Server) HandleAcceptedPeer(rn *RemoteNode) { + if rn == nil || rn.GetPeer() == nil { + return + } + pp := rn.GetPeer() + pp.SetServiceFlag(rn.GetServiceFlag()) + pp.SetLatestBlockHeight(rn.GetLatestBlockHeight()) + isSyncCandidate := pp.IsSyncCandidate() isSyncing := srv.blockchain.isSyncing() chainState := srv.blockchain.chainState() From edaf1749cdb0bef82dcbe709417054f248ffb386 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Tue, 30 Jan 2024 12:26:17 -0800 Subject: [PATCH 413/762] Fix fmt (#973) --- integration_testing/blocksync_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/integration_testing/blocksync_test.go b/integration_testing/blocksync_test.go index e03aef152..cf077f2c1 100644 --- a/integration_testing/blocksync_test.go +++ b/integration_testing/blocksync_test.go @@ -1,7 +1,6 @@ package integration_testing import ( - "fmt" "testing" ) @@ -27,7 +26,7 @@ func TestSimpleBlockSync(t *testing.T) { waitForNodeToFullySync(node2) compareNodesByDB(t, node1, node2, 0) - fmt.Println("Databases match!") + t.Logf("Databases match!") } // TestSimpleSyncRestart tests if a node can successfully restart while syncing blocks. From 970e9a44968c9905935ffb0c76940575afd1b606 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 30 Jan 2024 15:28:23 -0500 Subject: [PATCH 414/762] Check txn connect status by index in connect block (#970) --- lib/block_view.go | 19 +++++++- lib/block_view_lockups_test.go | 9 ++-- lib/block_view_test.go | 23 +++++++--- lib/block_view_types.go | 4 +- lib/pos_block_producer_test.go | 2 +- lib/pos_blockchain_test.go | 69 ++++++++++++++++++++++------- lib/pos_epoch_complete_hook_test.go | 12 ++--- 7 files changed, 98 insertions(+), 40 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 23b6f7cf4..2d179188d 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3886,7 +3886,7 @@ func (bav *UtxoView) _connectFailingTransaction(txn *MsgDeSoTxn, blockHeight uin _utxoOps []*UtxoOperation, _burnFee uint64, _utilityFee uint64, _err error) { // Failing transactions are only allowed after ProofOfStake2ConsensusCutoverBlockHeight. - if blockHeight <= bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { + if blockHeight < bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { return nil, 0, 0, fmt.Errorf("_connectFailingTransaction: Failing transactions " + "not allowed before ProofOfStake2ConsensusCutoverBlockHeight") } @@ -4053,8 +4053,23 @@ func (bav *UtxoView) ConnectBlock( // which a miner is trying to spam the network, which should generally never happen. utxoOpsForTxn, totalInput, totalOutput, currentFees, err := bav.ConnectTransaction(txn, txHash, 0, uint32(blockHeader.Height), int64(blockHeader.TstampNanoSecs), verifySignatures, false) _, _ = totalInput, totalOutput // A bit surprising we don't use these - if err != nil { + // After the PoS cutover, we need to check if the transaction is a failing transaction. + txnConnects := blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) || + (txIndex == 0 && txn.TxnMeta.GetTxnType() == TxnTypeBlockReward) || + desoBlock.TxnConnectStatusByIndex.Get(txIndex-1) + if txnConnects && err != nil { return nil, errors.Wrapf(err, "ConnectBlock: error connecting txn #%d", txIndex) + } else if !txnConnects { + if err == nil { + return nil, errors.Wrapf(err, "ConnectBlock: txn #%d should not connect based on "+ + "TxnConnectStatusByIndex but err is nil", txIndex) + } + var burnFee, utilityFee uint64 + utxoOpsForTxn, burnFee, utilityFee, err = bav._connectFailingTransaction(txn, uint32(blockHeader.Height), verifySignatures) + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: error connecting failing txn #%d", txIndex) + } + _, _ = burnFee, utilityFee // TODO: figure out what we're supposed to do with these ones. } // After the block reward patch block height, we only include fees from transactions diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 96d9f72d3..776941fe1 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -3032,6 +3032,9 @@ func _setUpMinerAndTestMetaForTimestampBasedLockupTests(t *testing.T) *TestMeta // Initialize balance model fork heights. setBalanceModelBlockHeights(t) + // Initialize pos fork heights. + setPoSBlockHeights(t, 11, 100) + // Initialize test chain and miner. chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) @@ -3039,12 +3042,6 @@ func _setUpMinerAndTestMetaForTimestampBasedLockupTests(t *testing.T) *TestMeta // Ensure DAO coins are enabled (a pre-requisite for lockups) params.ForkHeights.DAOCoinBlockHeight = uint32(0) - // Initialize PoS fork heights. - params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) - params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(1) - GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) - GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { _, err := miner.MineAndProcessSingleBlock(0, mempool) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 32881709a..d19d0af36 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -79,8 +79,6 @@ func setBalanceModelBlockHeights(t *testing.T) { DeSoTestnetParams.ForkHeights.ExtraDataOnEntriesBlockHeight = 0 DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = 0 DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = 1 - DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = 1 - DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams @@ -96,14 +94,27 @@ func resetBalanceModelBlockHeights() { DeSoTestnetParams.ForkHeights.ExtraDataOnEntriesBlockHeight = uint32(304087) DeSoTestnetParams.ForkHeights.AssociationsAndAccessGroupsBlockHeight = uint32(596555) DeSoTestnetParams.ForkHeights.BalanceModelBlockHeight = uint32(683058) - DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(math.MaxUint32) - DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(math.MaxUint32) - DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(math.MaxUint32) DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) GlobalDeSoParams = DeSoTestnetParams } +func setPoSBlockHeights(t *testing.T, posSetupHeight uint32, posCutoverHeight uint32) { + DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = posSetupHeight + DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = posCutoverHeight + DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) + DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) + GlobalDeSoParams = DeSoTestnetParams + + t.Cleanup(resetPoSBlockHeights) +} + +func resetPoSBlockHeights() { + DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(math.MaxUint32) + DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(math.MaxUint32) + GlobalDeSoParams = DeSoTestnetParams +} + func GetConditionalBalanceFunc(chain *Blockchain, params *DeSoParams) func(uint64, uint64) uint64 { return func(deso uint64, balanceModelBalance uint64) uint64 { if chain.blockTip().Height >= params.ForkHeights.BalanceModelBlockHeight { @@ -2208,7 +2219,7 @@ func TestBlockRewardPatch(t *testing.T) { func TestConnectFailingTransaction(t *testing.T) { setBalanceModelBlockHeights(t) - + setPoSBlockHeights(t, 3, 3) require := require.New(t) seed := int64(1011) rand := rand.New(rand.NewSource(seed)) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 806d79f8c..c8a8f1762 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3728,8 +3728,10 @@ func (key *DerivedKeyEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *byt } func (key *DerivedKeyEntry) GetVersionByte(blockHeight uint64) byte { + // Remember to update this every time there an encoder migration that impacts + // the TransactionSpendingLimit struct. return GetMigrationVersion(blockHeight, UnlimitedDerivedKeysMigration, AssociationsAndAccessGroupsMigration, - BalanceModelMigration) + BalanceModelMigration, ProofOfStake1StateSetupMigration) } func (key *DerivedKeyEntry) GetEncoderType() EncoderType { diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 3148629f2..dffa64664 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -272,7 +272,7 @@ func _testProduceBlockNoSizeLimit(t *testing.T, mp *PosMempool, pbp *PosBlockPro require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(totalAcceptedTxns, len(txns)) - require.Equal(true, totalAcceptedTxns >= txnConnectStatus.Size()) + require.True(true, totalAcceptedTxns >= txnConnectStatus.Size()) numConnected := 0 for ii := range txns { if txnConnectStatus.Get(ii) { diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 263a65017..5cde8eebc 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -496,6 +496,7 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { // greater than its parent's view. func TestHasValidBlockViewPoS(t *testing.T) { setBalanceModelBlockHeights(t) + setPoSBlockHeights(t, 1, 1) bc, _, _ := NewTestBlockchain(t) hash1 := NewBlockHash(RandomBytes(32)) hash2 := NewBlockHash(RandomBytes(32)) @@ -587,6 +588,8 @@ func TestHasValidBlockViewPoS(t *testing.T) { func TestHasValidBlockProposerPoS(t *testing.T) { // Initialize balance model fork heights. setBalanceModelBlockHeights(t) + // Initialize PoS fork heights. + setPoSBlockHeights(t, 11, 12) // Initialize test chain, miner, and testMeta testMeta := _setUpMinerAndTestMetaForEpochCompleteTest(t) @@ -1743,11 +1746,13 @@ func TestProcessBlockPoS(t *testing.T) { require.True(t, exists) require.False(t, timeoutBlockNode.IsCommitted()) } + var dummyParentBlockHash, orphanBlockHash *BlockHash { // Let's process an orphan block. var dummyParentBlock *MsgDeSoBlock + var err error dummyParentBlock = _generateRealBlock(testMeta, 16, 16, 272, reorgBlockHash, false) - dummyParentBlockHash, err := dummyParentBlock.Hash() + dummyParentBlockHash, err = dummyParentBlock.Hash() require.NoError(t, err) var orphanBlock *MsgDeSoBlock orphanBlock = _generateRealBlock(testMeta, 17, 17, 9273, reorgBlockHash, false) @@ -1757,7 +1762,7 @@ func TestProcessBlockPoS(t *testing.T) { // Create a QC on the dummy parent block orphanBlock.Header.ValidatorsVoteQC = _getVoteQC(testMeta, testMeta.posMempool.readOnlyLatestBlockView, dummyParentBlockHash, 16) updateProposerVotePartialSignatureForBlock(testMeta, orphanBlock) - orphanBlockHash, err := orphanBlock.Hash() + orphanBlockHash, err = orphanBlock.Hash() require.NoError(t, err) success, isOrphan, missingBlockHashes, err := testMeta.chain.ProcessBlockPoS(orphanBlock, 17, true) require.False(t, success) @@ -1781,6 +1786,8 @@ func TestProcessBlockPoS(t *testing.T) { require.NotNil(t, orphanBlockInIndex) require.True(t, orphanBlockInIndex.IsStored()) require.True(t, orphanBlockInIndex.IsValidated()) + _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2, blockHash3, reorgBlockHash}, + []*BlockHash{dummyParentBlockHash, orphanBlockHash}, reorgBlockHash) } { // Let's process a block that is an orphan, but is malformed. @@ -1813,6 +1820,18 @@ func TestProcessBlockPoS(t *testing.T) { require.Len(t, missingBlockHashes, 0) require.Error(t, err) } + var blockWithFailingTxnHash *BlockHash + { + var blockWithFailingTxn *MsgDeSoBlock + blockWithFailingTxn = _generateRealBlockWithFailingTxn(testMeta, 18, 18, 123722, orphanBlockHash, false, 1) + require.Equal(t, blockWithFailingTxn.TxnConnectStatusByIndex.Get(len(blockWithFailingTxn.Txns)-1), false) + success, _, _, err := testMeta.chain.ProcessBlockPoS(blockWithFailingTxn, 18, true) + require.True(t, success) + blockWithFailingTxnHash, err = blockWithFailingTxn.Hash() + require.NoError(t, err) + _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2, blockHash3, reorgBlockHash, dummyParentBlockHash}, + []*BlockHash{orphanBlockHash, blockWithFailingTxnHash}, dummyParentBlockHash) + } } // TestGetSafeBlocks tests the GetSafeBlocks function to make sure it returns the correct blocks. @@ -1851,7 +1870,7 @@ func TestGetSafeBlocks(t *testing.T) { require.True(t, bn3.Hash.IsEqual(block3Hash)) // Add block 3' only as stored var block3Prime *MsgDeSoBlock - block3Prime = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+2), uint64(testMeta.savedHeight+3), 13717, block2Hash, false) + block3Prime = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+2), uint64(testMeta.savedHeight+3), 137175, block2Hash, false) bn3Prime, err := testMeta.chain.storeBlockInBlockIndex(block3Prime) require.NoError(t, err) block3PrimeHash, err := block3Prime.Hash() @@ -2277,6 +2296,11 @@ func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { // PosMempool, generating a RandomSeedHash, updating the latestBlockView in the PosBlockProducer, and calling _getFullRealBlockTemplate. // It can be used to generate a block w/ either a vote or timeout QC. func _generateRealBlock(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64, prevBlockHash *BlockHash, isTimeout bool) BlockTemplate { + return _generateRealBlockWithFailingTxn(testMeta, blockHeight, view, seed, prevBlockHash, isTimeout, 0) +} + +func _generateRealBlockWithFailingTxn(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64, + prevBlockHash *BlockHash, isTimeout bool, numFailingTxns uint64) BlockTemplate { globalParams := _testGetDefaultGlobalParams() randSource := rand.New(rand.NewSource(seed)) passingTxns := []*MsgDeSoTxn{} @@ -2293,6 +2317,17 @@ func _generateRealBlock(testMeta *TestMeta, blockHeight uint64, view uint64, see _wrappedPosMempoolAddTransaction(testMeta.t, testMeta.posMempool, txn) } + failingTxns := []*MsgDeSoTxn{} + for jj := 0; jj < int(numFailingTxns); jj++ { + // make a like on a non-existent post + txn, _, _, _, err := testMeta.chain.CreateLikeTxn( + m0PubBytes, ZeroBlockHash, false, feeMax, nil, []*DeSoOutput{}) + failingTxns = append(failingTxns, txn) + require.NoError(testMeta.t, err) + _signTxn(testMeta.t, txn, m0Priv) + _wrappedPosMempoolAddTransaction(testMeta.t, testMeta.posMempool, txn) + } + // TODO: Get real seed signature. prevBlock, exists := testMeta.chain.blockIndexByHash[*prevBlockHash] require.True(testMeta.t, exists) @@ -2302,7 +2337,16 @@ func _generateRealBlock(testMeta *TestMeta, blockHeight uint64, view uint64, see require.NoError(testMeta.t, err) latestBlockHeight := testMeta.chain.blockIndexByHash[*prevBlockHash].Height testMeta.posMempool.UpdateLatestBlock(latestBlockView, uint64(latestBlockHeight)) - return _getFullRealBlockTemplate(testMeta, testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature, isTimeout) + fullBlockTemplate := _getFullRealBlockTemplate(testMeta, testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature, isTimeout) + // Remove the transactions from this block from the mempool. + // This prevents nonce reuse issues when trying to make reorg blocks. + for _, txn := range passingTxns { + testMeta.posMempool.RemoveTransaction(txn.Hash()) + } + for _, txn := range failingTxns { + testMeta.posMempool.RemoveTransaction(txn.Hash()) + } + return fullBlockTemplate } // _generateDummyBlock generates a BlockTemplate with dummy data by adding 50 test transactions to the @@ -2579,12 +2623,8 @@ func _generateRandomBLSPrivateKey(t *testing.T) *bls.PrivateKey { // PoS Cutover height to 12. func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { setBalanceModelBlockHeights(t) - // Set the PoS Setup Height to block 11. - DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = 11 - DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 12 - DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) - DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) - GlobalDeSoParams = DeSoTestnetParams + // Set the PoS Setup Height to block 11 and cutover to 12. + setPoSBlockHeights(t, 11, 12) chain, params, db := NewLowDifficultyBlockchain(t) oldPool, miner := NewTestMiner(t, chain, params, true) @@ -2629,7 +2669,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) require.NoError(t, err) - maxMempoolPosSizeBytes := uint64(500) + maxMempoolPosSizeBytes := uint64(1024 * 1024 * 1000) mempoolBackupIntervalMillis := uint64(30000) mempool := NewPosMempool() require.NoError(t, mempool.Init( @@ -2675,11 +2715,8 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { // block height is set to 9 and the cutover is set to 11. func NewTestPoSBlockchain(t *testing.T) *TestMeta { setBalanceModelBlockHeights(t) - DeSoTestnetParams.ForkHeights.ProofOfStake1StateSetupBlockHeight = 9 - DeSoTestnetParams.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 11 - DeSoTestnetParams.EncoderMigrationHeights = GetEncoderMigrationHeights(&DeSoTestnetParams.ForkHeights) - DeSoTestnetParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(&DeSoTestnetParams.ForkHeights) - GlobalDeSoParams = DeSoTestnetParams + // Set the PoS Setup Height to block 9 and cutover to 11. + setPoSBlockHeights(t, 9, 11) chain, params, db := NewLowDifficultyBlockchain(t) params.ForkHeights.BalanceModelBlockHeight = 1 oldPool, miner := NewTestMiner(t, chain, params, true) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 5bfe740cf..e3c7b3b67 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -57,7 +57,8 @@ func TestIsLastBlockInCurrentEpoch(t *testing.T) { func TestRunEpochCompleteHook(t *testing.T) { // Initialize balance model fork heights. setBalanceModelBlockHeights(t) - + // Initialize PoS fork heights. + setPoSBlockHeights(t, 11, 11) // Initialize test chain, miner, and testMeta testMeta := _setUpMinerAndTestMetaForEpochCompleteTest(t) @@ -520,7 +521,8 @@ func TestStakingRewardDistribution(t *testing.T) { defer resetDefaultEpochDurationNumBlocks() // Initialize balance model fork heights. setBalanceModelBlockHeights(t) - + // Initialize PoS fork heights. + setPoSBlockHeights(t, 11, 11) // Initialize test chain, miner, and testMeta testMeta := _setUpMinerAndTestMetaForEpochCompleteTest(t) @@ -806,12 +808,6 @@ func _setUpMinerAndTestMetaForEpochCompleteTest(t *testing.T) *TestMeta { chain, params, db := NewLowDifficultyBlockchain(t) mempool, miner := NewTestMiner(t, chain, params, true) - // Initialize PoS fork heights. - params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(1) - params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = uint32(1) - GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) - GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - // Mine a few blocks to give the senderPkString some money. for ii := 0; ii < 10; ii++ { _, err := miner.MineAndProcessSingleBlock(0, mempool) From bc7de6f31a00e084bf17c30c3eec43c39d3b8229 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 30 Jan 2024 15:48:13 -0500 Subject: [PATCH 415/762] Simplify require.True call (#975) --- lib/pos_block_producer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index dffa64664..2cf32d4ab 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -272,7 +272,7 @@ func _testProduceBlockNoSizeLimit(t *testing.T, mp *PosMempool, pbp *PosBlockPro require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(totalAcceptedTxns, len(txns)) - require.True(true, totalAcceptedTxns >= txnConnectStatus.Size()) + require.True(totalAcceptedTxns >= txnConnectStatus.Size()) numConnected := 0 for ii := range txns { if txnConnectStatus.Get(ii) { From 6d2cb13a851823df76c26bd2722d28336c1a6e80 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 30 Jan 2024 16:56:14 -0500 Subject: [PATCH 416/762] Set LastActiveAtEpochNumber on validator entry in connect block (#971) --- lib/block_view.go | 64 +++++++++++++++++++++- lib/block_view_types.go | 101 +++++++++++++++++----------------- lib/pos_blockchain_test.go | 107 +++++++++++++------------------------ 3 files changed, 149 insertions(+), 123 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 2d179188d..3c1690af6 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" "math" "math/big" "reflect" @@ -1655,7 +1656,8 @@ func (bav *UtxoView) DisconnectBlock( case OperationTypeAddBalance: // We don't allow add balance utxo operations unless it's the end of an epoch. if !isLastBlockInEpoch { - return fmt.Errorf("DisconnectBlock: Found add balance operation in block %d that is not the end of an epoch", desoBlock.Header.Height) + return fmt.Errorf("DisconnectBlock: Found add balance operation in block %d that is not the end "+ + "of an epoch", desoBlock.Header.Height) } // We need to revert the add balance operation. if err = bav._unAddBalance(utxoOp.BalanceAmountNanos, utxoOp.BalancePublicKey); err != nil { @@ -1664,7 +1666,8 @@ func (bav *UtxoView) DisconnectBlock( case OperationTypeStakeDistribution: // We don't allow stake distribution utxo operations unless it's the end of an epoch. if !isLastBlockInEpoch { - return fmt.Errorf("DisconnectBlock: Found add balance operation in block %d that is not the end of an epoch", desoBlock.Header.Height) + return fmt.Errorf("DisconnectBlock: Found add balance operation in block %d that is not the end "+ + "of an epoch", desoBlock.Header.Height) } if len(utxoOp.PrevStakeEntries) != 1 { return fmt.Errorf("DisconnectBlock: Expected exactly one prev stake entry for stake distribution op") @@ -1674,6 +1677,12 @@ func (bav *UtxoView) DisconnectBlock( } bav._setStakeEntryMappings(utxoOp.PrevStakeEntries[0]) bav._setValidatorEntryMappings(utxoOp.PrevValidatorEntry) + case OperationTypeSetValidatorLastActiveAtEpoch: + if utxoOp.PrevValidatorEntry == nil { + return fmt.Errorf("DisconnectBlock: Expected prev validator entry for set validator last active " + + "at epoch op") + } + bav._setValidatorEntryMappings(utxoOp.PrevValidatorEntry) } } } @@ -4160,6 +4169,57 @@ func (bav *UtxoView) ConnectBlock( } } + // If we're past the PoS cutover, we need to track which validators were active. + if blockHeight >= uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + // Get the active validators for the block. + var signersList *bitset.Bitset + if !desoBlock.Header.ValidatorsVoteQC.isEmpty() { + signersList = desoBlock.Header.ValidatorsVoteQC.ValidatorsVoteAggregatedSignature.SignersList + } else { + signersList = desoBlock.Header.ValidatorsTimeoutAggregateQC.ValidatorsTimeoutAggregatedSignature.SignersList + } + allSnapshotValidators, err := bav.GetAllSnapshotValidatorSetEntriesByStake() + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: error getting all snapshot validator set entries by stake") + } + currentEpochNumber, err := bav.GetCurrentEpochNumber() + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: error getting current epoch number") + } + for ii, validator := range allSnapshotValidators { + // Skip validators who didn't sign + if !signersList.Get(ii) { + continue + } + // Get the current validator entry + validatorEntry, err := bav.GetValidatorByPKID(validator.ValidatorPKID) + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: error getting validator by PKID") + } + // It's possible for the validator to have unregistered since two epochs ago, but is continuing + // to vote. If the validatorEntry is nil or IsDeleted, we skip it here. + if validatorEntry == nil || validatorEntry.IsDeleted() { + continue + } + // It's possible for the validator to be in the snapshot validator set, but to have been jailed + // in the previous epoch due to inactivity. In the edge case where the validator now comes back + // online, we maintain its jailed status until it unjails itself explicitly again. + if validatorEntry.Status() == ValidatorStatusJailed { + continue + } + if validatorEntry.LastActiveAtEpochNumber != currentEpochNumber { + blockLevelUtxoOps = append(blockLevelUtxoOps, &UtxoOperation{ + Type: OperationTypeSetValidatorLastActiveAtEpoch, + PrevValidatorEntry: validatorEntry.Copy(), + }) + // Set the last active at epoch number to the current epoch number + // and set the validator entry on the view. + validatorEntry.LastActiveAtEpochNumber = currentEpochNumber + bav._setValidatorEntryMappings(validatorEntry) + } + } + } + // If we're past the PoS Setup Fork Height, check if we should run the end of epoch hook. if blockHeight >= uint64(bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight) { isLastBlockInEpoch, err := bav.IsLastBlockInCurrentEpoch(blockHeight) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index c8a8f1762..73727eae5 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -620,56 +620,57 @@ const ( // used when rolling back a txn to determine what kind of operations need // to be performed. For example, rolling back a BitcoinExchange may require // rolling back an AddUtxo operation. - OperationTypeAddUtxo OperationType = 0 - OperationTypeSpendUtxo OperationType = 1 - OperationTypeBitcoinExchange OperationType = 2 - OperationTypePrivateMessage OperationType = 3 - OperationTypeSubmitPost OperationType = 4 - OperationTypeUpdateProfile OperationType = 5 - OperationTypeDeletePost OperationType = 7 - OperationTypeUpdateBitcoinUSDExchangeRate OperationType = 8 - OperationTypeFollow OperationType = 9 - OperationTypeLike OperationType = 10 - OperationTypeCreatorCoin OperationType = 11 - OperationTypeSwapIdentity OperationType = 12 - OperationTypeUpdateGlobalParams OperationType = 13 - OperationTypeCreatorCoinTransfer OperationType = 14 - OperationTypeCreateNFT OperationType = 15 - OperationTypeUpdateNFT OperationType = 16 - OperationTypeAcceptNFTBid OperationType = 17 - OperationTypeNFTBid OperationType = 18 - OperationTypeDeSoDiamond OperationType = 19 - OperationTypeNFTTransfer OperationType = 20 - OperationTypeAcceptNFTTransfer OperationType = 21 - OperationTypeBurnNFT OperationType = 22 - OperationTypeAuthorizeDerivedKey OperationType = 23 - OperationTypeMessagingKey OperationType = 24 - OperationTypeDAOCoin OperationType = 25 - OperationTypeDAOCoinTransfer OperationType = 26 - OperationTypeSpendingLimitAccounting OperationType = 27 - OperationTypeDAOCoinLimitOrder OperationType = 28 - OperationTypeCreateUserAssociation OperationType = 29 - OperationTypeDeleteUserAssociation OperationType = 30 - OperationTypeCreatePostAssociation OperationType = 31 - OperationTypeDeletePostAssociation OperationType = 32 - OperationTypeAccessGroup OperationType = 33 - OperationTypeAccessGroupMembers OperationType = 34 - OperationTypeNewMessage OperationType = 35 - OperationTypeAddBalance OperationType = 36 - OperationTypeSpendBalance OperationType = 37 - OperationTypeDeleteExpiredNonces OperationType = 38 - OperationTypeRegisterAsValidator OperationType = 39 - OperationTypeUnregisterAsValidator OperationType = 40 - OperationTypeStake OperationType = 41 - OperationTypeUnstake OperationType = 42 - OperationTypeUnlockStake OperationType = 43 - OperationTypeUnjailValidator OperationType = 44 - OperationTypeCoinLockup OperationType = 45 - OperationTypeCoinLockupTransfer OperationType = 46 - OperationTypeCoinUnlock OperationType = 47 - OperationTypeUpdateCoinLockupParams OperationType = 48 - OperationTypeStakeDistribution OperationType = 49 - // NEXT_TAG = 50 + OperationTypeAddUtxo OperationType = 0 + OperationTypeSpendUtxo OperationType = 1 + OperationTypeBitcoinExchange OperationType = 2 + OperationTypePrivateMessage OperationType = 3 + OperationTypeSubmitPost OperationType = 4 + OperationTypeUpdateProfile OperationType = 5 + OperationTypeDeletePost OperationType = 7 + OperationTypeUpdateBitcoinUSDExchangeRate OperationType = 8 + OperationTypeFollow OperationType = 9 + OperationTypeLike OperationType = 10 + OperationTypeCreatorCoin OperationType = 11 + OperationTypeSwapIdentity OperationType = 12 + OperationTypeUpdateGlobalParams OperationType = 13 + OperationTypeCreatorCoinTransfer OperationType = 14 + OperationTypeCreateNFT OperationType = 15 + OperationTypeUpdateNFT OperationType = 16 + OperationTypeAcceptNFTBid OperationType = 17 + OperationTypeNFTBid OperationType = 18 + OperationTypeDeSoDiamond OperationType = 19 + OperationTypeNFTTransfer OperationType = 20 + OperationTypeAcceptNFTTransfer OperationType = 21 + OperationTypeBurnNFT OperationType = 22 + OperationTypeAuthorizeDerivedKey OperationType = 23 + OperationTypeMessagingKey OperationType = 24 + OperationTypeDAOCoin OperationType = 25 + OperationTypeDAOCoinTransfer OperationType = 26 + OperationTypeSpendingLimitAccounting OperationType = 27 + OperationTypeDAOCoinLimitOrder OperationType = 28 + OperationTypeCreateUserAssociation OperationType = 29 + OperationTypeDeleteUserAssociation OperationType = 30 + OperationTypeCreatePostAssociation OperationType = 31 + OperationTypeDeletePostAssociation OperationType = 32 + OperationTypeAccessGroup OperationType = 33 + OperationTypeAccessGroupMembers OperationType = 34 + OperationTypeNewMessage OperationType = 35 + OperationTypeAddBalance OperationType = 36 + OperationTypeSpendBalance OperationType = 37 + OperationTypeDeleteExpiredNonces OperationType = 38 + OperationTypeRegisterAsValidator OperationType = 39 + OperationTypeUnregisterAsValidator OperationType = 40 + OperationTypeStake OperationType = 41 + OperationTypeUnstake OperationType = 42 + OperationTypeUnlockStake OperationType = 43 + OperationTypeUnjailValidator OperationType = 44 + OperationTypeCoinLockup OperationType = 45 + OperationTypeCoinLockupTransfer OperationType = 46 + OperationTypeCoinUnlock OperationType = 47 + OperationTypeUpdateCoinLockupParams OperationType = 48 + OperationTypeStakeDistribution OperationType = 49 + OperationTypeSetValidatorLastActiveAtEpoch OperationType = 50 + // NEXT_TAG = 51 ) func (op OperationType) String() string { diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 5cde8eebc..4ce9885ea 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1533,10 +1533,10 @@ func TestCanCommitGrandparent(t *testing.T) { // 5. Adding a fifth block (block5) w/ parent (block4) and block5's view = block4's view + 1 to the best chain does not result in block3 being committed. // 6. Adding a sixth block (block6) w/ parent (block5) and block6's view = block5's view + 1 to the best chain results in block3 and block4 being committed. func TestRunCommitRuleOnBestChain(t *testing.T) { - testMeta := NewTestPoSBlockchain(t) + testMeta := NewTestPoSBlockchainWithValidators(t) // Create a single block and add it to the best chain. - blockTemplate1 := _generateBlockAndAddToBestChain(testMeta, 11, 11, 887) + blockTemplate1 := _generateBlockAndAddToBestChain(testMeta, 12, 12, 887) // Okay now try to run the commit rule. Nothing will happen. // We expect the block to be uncommitted. err := testMeta.chain.runCommitRuleOnBestChain() @@ -1548,7 +1548,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { _verifyCommitRuleHelper(testMeta, []*BlockHash{}, []*BlockHash{blockHash1}, nil) // Add one more block to best chain. Should still not trigger commit rule - blockTemplate2 := _generateBlockAndAddToBestChain(testMeta, 12, 12, 813) + blockTemplate2 := _generateBlockAndAddToBestChain(testMeta, 13, 13, 813) // Run commit rule again. Nothing should happen. // We expect both block 1 and block 2 to be uncommitted. @@ -1561,7 +1561,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { _verifyCommitRuleHelper(testMeta, []*BlockHash{}, []*BlockHash{blockHash1, blockHash2}, nil) // Okay add one MORE block to the best chain. This should trigger the commit rule. - blockTemplate3 := _generateBlockAndAddToBestChain(testMeta, 13, 13, 513) + blockTemplate3 := _generateBlockAndAddToBestChain(testMeta, 14, 14, 513) // Run the commit rule again. This time we expect block 1 to be committed. err = testMeta.chain.runCommitRuleOnBestChain() @@ -1653,6 +1653,23 @@ func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, u require.False(testMeta.t, blockNode.IsCommitted()) // TODO: Verify DB results?? Kinda silly to make sure everything is missing. } + utxoView, err := testMeta.chain.GetUncommittedTipView() + require.NoError(testMeta.t, err) + currentEpoch, err := utxoView.GetCurrentEpochEntry() + require.NoError(testMeta.t, err) + currentEpochNumber := currentEpoch.EpochNumber + prevEpoch, err := utxoView.simulatePrevEpochEntry(currentEpochNumber, currentEpoch.InitialBlockHeight) + require.NoError(testMeta.t, err) + prevEpochNumber := prevEpoch.EpochNumber + for pubKeyString := range testMeta.pubKeyToBLSKeyMap { + publicKeyBytes := MustBase58CheckDecode(pubKeyString) + validatorEntry, err := utxoView.GetValidatorByPublicKey(NewPublicKey(publicKeyBytes)) + require.NoError(testMeta.t, err) + // Validator should be active in either the last epoch or the current epoch + // since the epoch turns over at every other block. + require.True(testMeta.t, validatorEntry.LastActiveAtEpochNumber == prevEpochNumber || + validatorEntry.LastActiveAtEpochNumber == currentEpochNumber) + } } // Test the following series of blocks to make sure that ProcessBlockPoS properly handles all cases as expected during the steady state @@ -2331,12 +2348,12 @@ func _generateRealBlockWithFailingTxn(testMeta *TestMeta, blockHeight uint64, vi // TODO: Get real seed signature. prevBlock, exists := testMeta.chain.blockIndexByHash[*prevBlockHash] require.True(testMeta.t, exists) - seedSignature := getRandomSeedSignature(testMeta, blockHeight, view, prevBlock.Header.ProposerRandomSeedSignature) // Always update the testMeta latestBlockView latestBlockView, err := testMeta.chain.getUtxoViewAtBlockHash(*prevBlockHash) require.NoError(testMeta.t, err) latestBlockHeight := testMeta.chain.blockIndexByHash[*prevBlockHash].Height testMeta.posMempool.UpdateLatestBlock(latestBlockView, uint64(latestBlockHeight)) + seedSignature := getRandomSeedSignature(testMeta, blockHeight, view, prevBlock.Header.ProposerRandomSeedSignature) fullBlockTemplate := _getFullRealBlockTemplate(testMeta, testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature, isTimeout) // Remove the transactions from this block from the mempool. // This prevents nonce reuse issues when trying to make reorg blocks. @@ -2394,14 +2411,19 @@ func _generateDummyBlock(testMeta *TestMeta, blockHeight uint64, view uint64, se return blockTemplate } -// _generateBlockAndAddToBestChain generates a dummy BlockTemplate by calling _generateDummyBlock and then adds it to the best chain. -// Finally it updates the PosMempool's latest block view. +// _generateBlockAndAddToBestChain generates a BlockTemplate by calling _generateRealBlock and then adds it to the +// best chain. Finally it updates the PosMempool's latest block view. func _generateBlockAndAddToBestChain(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64) *MsgDeSoBlock { - blockTemplate := _generateDummyBlock(testMeta, blockHeight, view, seed) + blockTemplate := _generateRealBlock(testMeta, blockHeight, view, seed, testMeta.chain.BlockTip().Hash, false) var msgDesoBlock *MsgDeSoBlock msgDesoBlock = blockTemplate newBlockHash, err := msgDesoBlock.Hash() require.NoError(testMeta.t, err) + // Add block to block index. + blockNode, err := testMeta.chain.storeValidatedBlockInBlockIndex(msgDesoBlock) + require.NoError(testMeta.t, err) + require.True(testMeta.t, blockNode.IsStored()) + require.True(testMeta.t, blockNode.IsValidated()) newBlockNode, exists := testMeta.chain.blockIndexByHash[*newBlockHash] require.True(testMeta.t, exists) testMeta.chain.addBlockToBestChain(newBlockNode) @@ -2625,6 +2647,12 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { setBalanceModelBlockHeights(t) // Set the PoS Setup Height to block 11 and cutover to 12. setPoSBlockHeights(t, 11, 12) + // Set Epoch length to 2 block for testing. + DeSoTestnetParams.DefaultEpochDurationNumBlocks = 2 + t.Cleanup(func() { + DeSoTestnetParams.DefaultEpochDurationNumBlocks = 3600 + GlobalDeSoParams = DeSoTestnetParams + }) chain, params, db := NewLowDifficultyBlockchain(t) oldPool, miner := NewTestMiner(t, chain, params, true) @@ -2707,66 +2735,3 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { }) return testMeta } - -// NewTestPoSBlockchain creates a new low-difficulty Blockchain for use in tests. -// It first creates a new Blockchain, then mines 10 blocks to give the senderPkString -// some DESO to send to m0 and m1. Then it stops the miner and PoW Mempool. Finally, -// it creates a PoSMempool and PoSBlockProducer and sets the PoS fork heights. The setup -// block height is set to 9 and the cutover is set to 11. -func NewTestPoSBlockchain(t *testing.T) *TestMeta { - setBalanceModelBlockHeights(t) - // Set the PoS Setup Height to block 9 and cutover to 11. - setPoSBlockHeights(t, 9, 11) - chain, params, db := NewLowDifficultyBlockchain(t) - params.ForkHeights.BalanceModelBlockHeight = 1 - oldPool, miner := NewTestMiner(t, chain, params, true) - // Mine a few blocks to give the senderPkString some money. - for ii := 0; ii < 10; ii++ { - _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, oldPool) - require.NoError(t, err) - } - - m0PubBytes, _, _ := Base58CheckDecode(m0Pub) - publicKeys := []string{m0Pub, m1Pub, m2Pub, m3Pub, m4Pub, m5Pub, m6Pub} - for _, publicKey := range publicKeys { - _, _, _ = _doBasicTransferWithViewFlush( - t, chain, db, params, senderPkString, publicKey, - senderPrivString, 1e9, 1000) - } - oldPool.Stop() - miner.Stop() - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(t, err) - maxMempoolPosSizeBytes := uint64(500) - mempoolBackupIntervalMillis := uint64(30000) - mempool := NewPosMempool() - require.NoError(t, mempool.Init( - params, _testGetDefaultGlobalParams(), latestBlockView, 10, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, - )) - require.NoError(t, mempool.Start()) - require.True(t, mempool.IsRunning()) - priv := _generateRandomBLSPrivateKey(t) - m0Pk := NewPublicKey(m0PubBytes) - posBlockProducer := NewPosBlockProducer(mempool, params, m0Pk, priv.PublicKey()) - testMeta := &TestMeta{ - t: t, - chain: chain, - db: db, - params: params, - posMempool: mempool, - posBlockProducer: posBlockProducer, - // TODO: what else do we need here? - feeRateNanosPerKb: 1000, - savedHeight: 10, - //miner: nil, - //txnOps: nil, - //txns: nil, - //expectedSenderBalances: nil, - //savedHeight: 0, - //feeRateNanosPerKb: 0, - } - t.Cleanup(func() { - mempool.Stop() - }) - return testMeta -} From 96eeaf7f36e5136832433392e01734fba223db39 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 30 Jan 2024 17:14:52 -0500 Subject: [PATCH 417/762] Validate BMF in connect block (#976) --- lib/block_view.go | 31 +++++++++++++++++++++++++------ lib/errors.go | 1 + lib/pos_block_producer.go | 10 ++++++++-- 3 files changed, 34 insertions(+), 8 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 3c1690af6..51005476d 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3971,6 +3971,10 @@ func (bav *UtxoView) _connectFailingTransaction(txn *MsgDeSoTxn, blockHeight uin // // burnFee := fee - log_2(fee), utilityFee := log_2(fee). func computeBMF(fee uint64) (_burnFee uint64, _utilityFee uint64) { + // If no fee, burn and utility fee are both 0. + if fee == 0 { + return 0, 0 + } // Compute the utility fee as log_2(fee). We can find it by taking the bit length of fee. // Alternatively: uint64(bits.Len64(fee)) utilityFee, _ := BigFloatLog2(NewFloat().SetUint64(fee)).Uint64() @@ -4049,6 +4053,7 @@ func (bav *UtxoView) ConnectBlock( // keep track of the total fees throughout. var totalFees uint64 utxoOps := [][]*UtxoOperation{} + var maxUtilityFee uint64 for txIndex, txn := range desoBlock.Txns { txHash := txHashes[txIndex] @@ -4066,19 +4071,21 @@ func (bav *UtxoView) ConnectBlock( txnConnects := blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) || (txIndex == 0 && txn.TxnMeta.GetTxnType() == TxnTypeBlockReward) || desoBlock.TxnConnectStatusByIndex.Get(txIndex-1) - if txnConnects && err != nil { - return nil, errors.Wrapf(err, "ConnectBlock: error connecting txn #%d", txIndex) - } else if !txnConnects { + var utilityFee uint64 + if txnConnects { + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: error connecting txn #%d", txIndex) + } + _, utilityFee = computeBMF(currentFees) + } else { if err == nil { return nil, errors.Wrapf(err, "ConnectBlock: txn #%d should not connect based on "+ "TxnConnectStatusByIndex but err is nil", txIndex) } - var burnFee, utilityFee uint64 - utxoOpsForTxn, burnFee, utilityFee, err = bav._connectFailingTransaction(txn, uint32(blockHeader.Height), verifySignatures) + utxoOpsForTxn, _, utilityFee, err = bav._connectFailingTransaction(txn, uint32(blockHeader.Height), verifySignatures) if err != nil { return nil, errors.Wrapf(err, "ConnectBlock: error connecting failing txn #%d", txIndex) } - _, _ = burnFee, utilityFee // TODO: figure out what we're supposed to do with these ones. } // After the block reward patch block height, we only include fees from transactions @@ -4103,6 +4110,15 @@ func (bav *UtxoView) ConnectBlock( return nil, RuleErrorTxnOutputWithInvalidAmount } totalFees += currentFees + + // For PoS, the maximum block reward is based on the maximum utility fee. + // Add the utility fees to the max utility fees. If any overflow + // occurs mark the block as invalid and return a rule error. + maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) + if err != nil { + return nil, errors.Wrapf(RuleErrorPoSBlockRewardWithInvalidAmount, + "ConnectBlock: error computing maxUtilityFee: %v", err) + } } // Add the utxo operations to our list for all the txns. @@ -4146,6 +4162,9 @@ func (bav *UtxoView) ConnectBlock( return nil, RuleErrorBlockRewardOverflow } maxBlockReward := blockReward + totalFees + if blockHeight >= uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + maxBlockReward = maxUtilityFee + } // If the outputs of the block reward txn exceed the max block reward // allowed then mark the block as invalid and return an error. if blockRewardOutput > maxBlockReward { diff --git a/lib/errors.go b/lib/errors.go index a187dcaac..a8d68188f 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -37,6 +37,7 @@ const ( RuleErrorInputSpendsImmatureBlockReward RuleError = "RuleErrorInputSpendsImmatureBlockReward" RuleErrorInputSpendsOutputWithInvalidAmount RuleError = "RuleErrorInputSpendsOutputWithInvalidAmount" RuleErrorTxnOutputWithInvalidAmount RuleError = "RuleErrorTxnOutputWithInvalidAmount" + RuleErrorPoSBlockRewardWithInvalidAmount RuleError = "RuleErrorPoSBlockRewardWithInvalidAmount" RuleErrorTxnOutputExceedsInput RuleError = "RuleErrorTxnOutputExceedsInput" RuleErrorTxnFeeBelowNetworkMinimum RuleError = "RuleErrorTxnFeeBelowNetworkMinimum" RuleErrorOverflowDetectedInFeeRateCalculation RuleError = "RuleErrorOverflowDetectedInFeeRateCalculation" diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 62d2dd0f7..d5bc3a610 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -190,7 +190,10 @@ func (pbp *PosBlockProducer) getBlockTransactions( currentBlockSize += uint64(len(txnBytes)) // Compute BMF for the transaction. _, utilityFee := computeBMF(fees) - maxUtilityFee += utilityFee + maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) + if err != nil { + return nil, nil, 0, errors.Wrapf(err, "Error computing max utility fee: ") + } continue } // If the transaction didn't connect, we will try to add it as a failing transaction. @@ -211,7 +214,10 @@ func (pbp *PosBlockProducer) getBlockTransactions( txnConnectStatusByIndex.Set(len(blocksTxns), false) blocksTxns = append(blocksTxns, txn.GetTxn()) currentBlockSize += uint64(len(txnBytes)) - maxUtilityFee += utilityFee + maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) + if err != nil { + return nil, nil, 0, errors.Wrapf(err, "Error computing max utility fee: ") + } } return blocksTxns, txnConnectStatusByIndex, maxUtilityFee, nil From 17370dfb1ce0bd2f8b22ee848e2f78c7c5e6ce40 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 30 Jan 2024 21:00:11 -0500 Subject: [PATCH 418/762] Convert MsgDeSoHeader TstampNanoSecs to int64 (#972) * Convert MsgDeSoHeader TstampNanoSecs to int64 * Rename * Fix typo * Rebase * Cleanup --- lib/block_producer.go | 8 ++++---- lib/block_view.go | 7 ++++--- lib/block_view_bitcoin_test.go | 2 +- lib/block_view_lockups_test.go | 7 ++++--- lib/blockchain_test.go | 14 +++++++------- lib/constants.go | 19 +++++++++++++------ lib/network.go | 16 ++++++++-------- lib/pos_block_producer.go | 8 ++++---- lib/pos_block_producer_test.go | 2 +- lib/pos_blockchain.go | 2 +- lib/pos_blockchain_test.go | 16 ++++++++-------- lib/pos_epoch.go | 6 +++--- lib/pos_epoch_complete_hook.go | 8 ++++---- lib/pos_epoch_complete_hook_test.go | 5 +++-- lib/pos_epoch_test.go | 2 +- lib/pos_fee_estimator.go | 4 ++-- lib/pos_staking_rewards.go | 6 +++--- lib/postgres.go | 4 ++-- 18 files changed, 73 insertions(+), 63 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index 122aead36..00d3ab0c3 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -114,11 +114,11 @@ func (desoBlockProducer *DeSoBlockProducer) _updateBlockTimestamp(blk *MsgDeSoBl // the timestamp set in the last block then set the time based on the last // block's timestamp instead. We do this because consensus rules require a // monotonically increasing timestamp. - blockTstamp := uint32(desoBlockProducer.chain.timeSource.AdjustedTime().Unix()) - if blockTstamp <= uint32(lastNode.Header.GetTstampSecs()) { - blockTstamp = uint32(lastNode.Header.GetTstampSecs()) + 1 + blockTstamp := desoBlockProducer.chain.timeSource.AdjustedTime().Unix() + if blockTstamp <= lastNode.Header.GetTstampSecs() { + blockTstamp = lastNode.Header.GetTstampSecs() + 1 } - blk.Header.SetTstampSecs(uint64(blockTstamp)) + blk.Header.SetTstampSecs(blockTstamp) } func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) ( diff --git a/lib/block_view.go b/lib/block_view.go index 51005476d..141a9fa0e 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4,14 +4,15 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/collections/bitset" "math" "math/big" "reflect" "strings" "time" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections/bitset" + "github.com/btcsuite/btcd/wire" "github.com/holiman/uint256" @@ -4065,7 +4066,7 @@ func (bav *UtxoView) ConnectBlock( // would slow down block processing significantly. We should figure out a way to // enforce this check in the future, but for now the only attack vector is one in // which a miner is trying to spam the network, which should generally never happen. - utxoOpsForTxn, totalInput, totalOutput, currentFees, err := bav.ConnectTransaction(txn, txHash, 0, uint32(blockHeader.Height), int64(blockHeader.TstampNanoSecs), verifySignatures, false) + utxoOpsForTxn, totalInput, totalOutput, currentFees, err := bav.ConnectTransaction(txn, txHash, 0, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) _, _ = totalInput, totalOutput // A bit surprising we don't use these // After the PoS cutover, we need to check if the transaction is a failing transaction. txnConnects := blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) || diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index 6e3671156..a428bd5af 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -59,7 +59,7 @@ func GetTestParamsCopy( big.NewInt(0), // We are bastardizing the DeSo header to store Bitcoin information here. &MsgDeSoHeader{ - TstampNanoSecs: SecondsToNanoSeconds(uint64(startHeader.Timestamp.Unix())), + TstampNanoSecs: SecondsToNanoSeconds(startHeader.Timestamp.Unix()), Height: 0, }, StatusBitcoinHeaderValidated, diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 776941fe1..09120db93 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -1,13 +1,14 @@ package lib import ( + "testing" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v3" "github.com/holiman/uint256" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" ) func TestCoinLockupsForkHeight(t *testing.T) { @@ -2752,7 +2753,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { blk1Root, _, err := ComputeMerkleRoot(blk1.Txns) require.NoError(t, err) blk1.Header.TransactionMerkleRoot = blk1Root - blk1.Header.TstampNanoSecs = uint64(tipTimestamp + 1e9) + blk1.Header.TstampNanoSecs = tipTimestamp + 1e9 // Mine the first block to ensure the difficulty is sufficient for ProcessBlock // NOTE: 10000 iterations is presumed sufficient for testing as seen in TestBasicTransfer. @@ -2809,7 +2810,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { blk2Root, _, err := ComputeMerkleRoot(blk2.Txns) require.NoError(t, err) blk2.Header.TransactionMerkleRoot = blk2Root - blk2.Header.TstampNanoSecs = uint64(tipTimestamp + 3e9) + blk2.Header.TstampNanoSecs = tipTimestamp + 3e9 // Mine the second block to ensure the difficulty is sufficient for ProcessBlock // NOTE: 10000 iterations is presumed sufficient for testing as seen in TestBasicTransfer. diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index eda5838cf..78303f327 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -1250,7 +1250,7 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 1 second, which is 2x too fast. - TstampNanoSecs: SecondsToNanoSeconds(uint64(ii)), + TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, StatusNone, )) @@ -1287,7 +1287,7 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 4 second, which is 2x too slow. - TstampNanoSecs: SecondsToNanoSeconds(uint64(ii * 4)), + TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 4)), }, StatusNone, )) @@ -1346,7 +1346,7 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 1 second, which is 2x too fast. - TstampNanoSecs: SecondsToNanoSeconds(uint64(ii)), + TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, StatusNone, )) @@ -1383,7 +1383,7 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 8 second, which is >2x too slow. - TstampNanoSecs: SecondsToNanoSeconds(uint64(ii * 4)), + TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 4)), }, StatusNone, )) @@ -1497,7 +1497,7 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 2 second, which is under the limit. - TstampNanoSecs: SecondsToNanoSeconds(uint64(ii * 2)), + TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 2)), }, StatusNone, )) @@ -1552,7 +1552,7 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 1 second, which is 2x too fast. - TstampNanoSecs: SecondsToNanoSeconds(uint64(ii)), + TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, StatusNone, )) @@ -1589,7 +1589,7 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { nil, &MsgDeSoHeader{ // Blocks generating every 3 seconds, which is slow but under the limit. - TstampNanoSecs: SecondsToNanoSeconds(uint64(float32(ii) * 3)), + TstampNanoSecs: SecondsToNanoSeconds(int64(ii) * 3), }, StatusNone, )) diff --git a/lib/constants.go b/lib/constants.go index 15710f80d..3ead46483 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -52,17 +52,24 @@ const ( // Time constants const ( - NanoSecondsPerSecond = uint64(1000000000) + NanoSecondsPerSecond = int64(1000000000) ) -func SecondsToNanoSeconds(secs uint64) uint64 { +func SecondsToNanoSeconds(secs int64) int64 { return secs * NanoSecondsPerSecond } -func NanoSecondsToSeconds(nanos uint64) uint64 { +func NanoSecondsToSeconds(nanos int64) int64 { return nanos / NanoSecondsPerSecond } +func NanoSecondsToUint64MicroSeconds(nanos int64) uint64 { + if nanos < 0 { + return 0 + } + return uint64(nanos / 1000) +} + // Snapshot constants const ( // GetSnapshotTimeout is used in Peer when we fetch a snapshot chunk, and we need to retry. @@ -702,7 +709,7 @@ type DeSoParams struct { // TODO: add support for putting the drift in global params. // DefaultBlockTimestampDriftNanoSecs is the default number of nanoseconds // from the current timestamp that we will allow a PoS block to be submitted. - DefaultBlockTimestampDriftNanoSecs uint64 + DefaultBlockTimestampDriftNanoSecs int64 // DefaultFeeBucketGrowthRateBasisPoints is the rate of growth of the fee bucket ranges. The multiplier is given // as basis points. For example a value of 1000 means that the fee bucket ranges will grow by 10% each time. @@ -1118,7 +1125,7 @@ var DeSoMainnetParams = DeSoParams{ DefaultJailInactiveValidatorGracePeriodEpochs: uint64(48), // The number of nanoseconds from the current timestamp that we will allow a PoS block to be submitted. - DefaultBlockTimestampDriftNanoSecs: uint64((time.Minute * 10).Nanoseconds()), + DefaultBlockTimestampDriftNanoSecs: (time.Minute * 10).Nanoseconds(), // The rate of growth of the fee bucket ranges. DefaultFeeBucketGrowthRateBasisPoints: uint64(1000), @@ -1385,7 +1392,7 @@ var DeSoTestnetParams = DeSoParams{ DefaultJailInactiveValidatorGracePeriodEpochs: uint64(48), // The number of nanoseconds from the current timestamp that we will allow a PoS block to be submitted. - DefaultBlockTimestampDriftNanoSecs: uint64((time.Minute * 10).Nanoseconds()), + DefaultBlockTimestampDriftNanoSecs: (time.Minute * 10).Nanoseconds(), // The rate of growth of the fee bucket ranges. DefaultFeeBucketGrowthRateBasisPoints: uint64(1000), diff --git a/lib/network.go b/lib/network.go index 83bc4520d..5c27ac775 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1931,7 +1931,7 @@ type MsgDeSoHeader struct { // use the SetTstampSecs() and GetTstampSecs() public methods. // The unix timestamp (in nanoseconds) specifying when this block was produced. - TstampNanoSecs uint64 + TstampNanoSecs int64 // The height of the block this header corresponds to. Height uint64 @@ -2040,11 +2040,11 @@ func HeaderSizeBytes() int { return len(headerBytes) } -func (msg *MsgDeSoHeader) SetTstampSecs(tstampSecs uint64) { +func (msg *MsgDeSoHeader) SetTstampSecs(tstampSecs int64) { msg.TstampNanoSecs = SecondsToNanoSeconds(tstampSecs) } -func (msg *MsgDeSoHeader) GetTstampSecs() uint64 { +func (msg *MsgDeSoHeader) GetTstampSecs() int64 { return NanoSecondsToSeconds(msg.TstampNanoSecs) } @@ -2124,7 +2124,7 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion1(preSignature bool) ([]byte, error // TstampSecs { scratchBytes := [8]byte{} - binary.BigEndian.PutUint64(scratchBytes[:], msg.GetTstampSecs()) + binary.BigEndian.PutUint64(scratchBytes[:], uint64(msg.GetTstampSecs())) retBytes = append(retBytes, scratchBytes[:]...) // TODO: Don't allow this field to exceed 32-bits for now. This will @@ -2194,7 +2194,7 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error // TstampNanosSecs: this field can be encoded to take up to the full 64 bits now // that MsgDeSoHeader version 2 does not need to be backwards compatible. - retBytes = append(retBytes, UintToBuf(msg.TstampNanoSecs)...) + retBytes = append(retBytes, IntToBuf(msg.TstampNanoSecs)...) // Height: similar to the field above, this field can be encoded to take // up to the full 64 bits now that MsgDeSoHeader version 2 does not need to @@ -2309,7 +2309,7 @@ func DecodeHeaderVersion0(rr io.Reader) (*MsgDeSoHeader, error) { if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TstampSecs") } - retHeader.SetTstampSecs(uint64(binary.LittleEndian.Uint32(scratchBytes[:]))) + retHeader.SetTstampSecs(int64(binary.LittleEndian.Uint32(scratchBytes[:]))) } // Height @@ -2357,7 +2357,7 @@ func DecodeHeaderVersion1(rr io.Reader) (*MsgDeSoHeader, error) { if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TstampSecs") } - retHeader.SetTstampSecs(binary.BigEndian.Uint64(scratchBytes[:])) + retHeader.SetTstampSecs(int64(binary.BigEndian.Uint64(scratchBytes[:]))) } // Height @@ -2409,7 +2409,7 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { } // TstampNanoSecs - retHeader.TstampNanoSecs, err = ReadUvarint(rr) + retHeader.TstampNanoSecs, err = ReadVarint(rr) if err != nil { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TstampNanoSecs") } diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index d5bc3a610..81665b46e 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -75,7 +75,7 @@ func (pbp *PosBlockProducer) CreateUnsignedTimeoutBlock(latestBlockView *UtxoVie func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, proposerRandomSeedSignature *bls.Signature) (BlockTemplate, error) { // First get the block without the header. - currentTimestamp := uint64(time.Now().UnixNano()) + currentTimestamp := time.Now().UnixNano() block, err := pbp.createBlockWithoutHeader(latestBlockView, newBlockHeight, currentTimestamp) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.CreateBlockTemplate: Problem creating block without header") @@ -108,7 +108,7 @@ func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newB // createBlockWithoutHeader is a helper function used by createBlockTemplate. It constructs a partially filled out // block with Fee-Time ordered transactions. The returned block all its contents filled, except for the header. func (pbp *PosBlockProducer) createBlockWithoutHeader( - latestBlockView *UtxoView, newBlockHeight uint64, newBlockTimestampNanoSecs uint64) (BlockTemplate, error) { + latestBlockView *UtxoView, newBlockHeight uint64, newBlockTimestampNanoSecs int64) (BlockTemplate, error) { block := NewMessage(MsgTypeBlock).(*MsgDeSoBlock) // Create the block reward transaction. @@ -144,7 +144,7 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( func (pbp *PosBlockProducer) getBlockTransactions( latestBlockView *UtxoView, newBlockHeight uint64, - newBlockTimestampNanoSecs uint64, + newBlockTimestampNanoSecs int64, maxBlockSizeBytes uint64, ) ( _txns []*MsgDeSoTxn, @@ -179,7 +179,7 @@ func (pbp *PosBlockProducer) getBlockTransactions( return nil, nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") } _, _, _, fees, err := blockUtxoViewCopy._connectTransaction( - txn.GetTxn(), txn.Hash(), int64(len(txnBytes)), uint32(newBlockHeight), int64(newBlockTimestampNanoSecs), + txn.GetTxn(), txn.Hash(), int64(len(txnBytes)), uint32(newBlockHeight), newBlockTimestampNanoSecs, true, false) // Check if the transaction connected. diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 2cf32d4ab..e0c534876 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -70,7 +70,7 @@ func TestCreateBlockTemplate(t *testing.T) { root, _, err := ComputeMerkleRoot(blockTemplate.Txns) require.NoError(err) require.Equal(blockTemplate.Header.TransactionMerkleRoot, root) - require.Equal(true, blockTemplate.Header.TstampNanoSecs < uint64(time.Now().UnixNano())) + require.Equal(true, blockTemplate.Header.TstampNanoSecs < time.Now().UnixNano()) require.Equal(blockTemplate.Header.Height, uint64(3)) require.Equal(blockTemplate.Header.ProposedInView, uint64(10)) require.Equal(blockTemplate.Header.ProposerPublicKey, m0Pk) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 111c22a3a..8a03d6d0f 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -595,7 +595,7 @@ func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock) error { // Timestamp validation // TODO: Add support for putting the drift into global params. - if block.Header.TstampNanoSecs > uint64(time.Now().UnixNano())+bc.params.DefaultBlockTimestampDriftNanoSecs { + if block.Header.TstampNanoSecs > time.Now().UnixNano()+bc.params.DefaultBlockTimestampDriftNanoSecs { return RuleErrorPoSBlockTstampNanoSecsInFuture } diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 4ce9885ea..df1b4540f 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -240,7 +240,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsTooOld) // Block timestamps can't be in the future. - block.Header.TstampNanoSecs = uint64(time.Now().UnixNano() + (11 * time.Minute).Nanoseconds()) + block.Header.TstampNanoSecs = time.Now().UnixNano() + (11 * time.Minute).Nanoseconds() err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsInFuture) @@ -278,10 +278,10 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * func TestHasValidBlockHeight(t *testing.T) { bc, _, _ := NewTestBlockchain(t) hash := NewBlockHash(RandomBytes(32)) - nowTimestamp := uint64(time.Now().UnixNano()) + nowTimestamp := time.Now().UnixNano() genesisBlock := NewBlockNode(nil, hash, 1, nil, nil, &MsgDeSoHeader{ Version: 2, - TstampNanoSecs: nowTimestamp - uint64(time.Minute.Nanoseconds()), + TstampNanoSecs: nowTimestamp - time.Minute.Nanoseconds(), Height: 1, ProposedInView: 1, ValidatorsVoteQC: nil, @@ -298,7 +298,7 @@ func TestHasValidBlockHeight(t *testing.T) { Header: &MsgDeSoHeader{ PrevBlockHash: genesisBlock.Hash, Version: 2, - TstampNanoSecs: uint64(time.Now().UnixNano()) - 10, + TstampNanoSecs: time.Now().UnixNano() - 10, Height: 2, ProposedInView: 1, ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ @@ -386,7 +386,7 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { Header: &MsgDeSoHeader{ PrevBlockHash: hash2, Version: 2, - TstampNanoSecs: uint64(time.Now().UnixNano()) - 10, + TstampNanoSecs: time.Now().UnixNano() - 10, Height: 2, ProposedInView: 1, ProposerPublicKey: NewPublicKey(RandomBytes(33)), @@ -536,7 +536,7 @@ func TestHasValidBlockViewPoS(t *testing.T) { Header: &MsgDeSoHeader{ PrevBlockHash: hash2, Version: 2, - TstampNanoSecs: uint64(time.Now().UnixNano()) - 10, + TstampNanoSecs: time.Now().UnixNano() - 10, Height: 2, ProposedInView: 1, ValidatorsTimeoutAggregateQC: &TimeoutAggregateQuorumCertificate{ @@ -2575,11 +2575,11 @@ func _getFullRealBlockTemplate(testMeta *TestMeta, latestBlockView *UtxoView, bl blockTemplate.Header.ProposerPublicKey = NewPublicKey(leaderPublicKeyBytes) blockTemplate.Header.ProposerVotingPublicKey = leaderVotingPrivateKey.PublicKey() // Ugh we need to adjust the timestamp. - blockTemplate.Header.TstampNanoSecs = uint64(time.Now().UnixNano()) + blockTemplate.Header.TstampNanoSecs = time.Now().UnixNano() if chainTip.Header.TstampNanoSecs > blockTemplate.Header.TstampNanoSecs { blockTemplate.Header.TstampNanoSecs = chainTip.Header.TstampNanoSecs + 1 } - require.Less(testMeta.t, blockTemplate.Header.TstampNanoSecs, uint64(time.Now().UnixNano())+testMeta.chain.params.DefaultBlockTimestampDriftNanoSecs) + require.Less(testMeta.t, blockTemplate.Header.TstampNanoSecs, time.Now().UnixNano()+testMeta.chain.params.DefaultBlockTimestampDriftNanoSecs) var proposerVotePartialSignature *bls.Signature // Just hack it so the leader gets the block reward. blockTemplate.Txns[0].TxOutputs[0].PublicKey = leaderPublicKeyBytes diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index 25b55aa22..438377777 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -21,7 +21,7 @@ type EpochEntry struct { // This captures the on-chain timestamp when this epoch entry was created. This does not // represent the timestamp for first block of the epoch, but rather when this epoch entry // was created during that epoch transition at the end of the previous epoch. - CreatedAtBlockTimestampNanoSecs uint64 + CreatedAtBlockTimestampNanoSecs int64 } func (epochEntry *EpochEntry) Copy() *EpochEntry { @@ -40,7 +40,7 @@ func (epochEntry *EpochEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipM data = append(data, UintToBuf(epochEntry.InitialBlockHeight)...) data = append(data, UintToBuf(epochEntry.InitialView)...) data = append(data, UintToBuf(epochEntry.FinalBlockHeight)...) - data = append(data, UintToBuf(epochEntry.CreatedAtBlockTimestampNanoSecs)...) + data = append(data, IntToBuf(epochEntry.CreatedAtBlockTimestampNanoSecs)...) return data } @@ -72,7 +72,7 @@ func (epochEntry *EpochEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *b } // CreatedAtBlockTimestampNanoSecs - epochEntry.CreatedAtBlockTimestampNanoSecs, err = ReadUvarint(rr) + epochEntry.CreatedAtBlockTimestampNanoSecs, err = ReadVarint(rr) if err != nil { return errors.Wrapf(err, "EpochEntry.Decode: Problem reading CreatedAtBlockTimestampNanoSecs: ") } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 83b06d598..3cc2b0ab9 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -50,7 +50,7 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // - Compute the start block height and view number for the next epoch. // - Compute the final block height for the next epoch. // - Update CurrentEpochEntry to the next epoch's. -func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, view uint64, blockTimestampNanoSecs uint64) ([]*UtxoOperation, error) { +func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, view uint64, blockTimestampNanoSecs int64) ([]*UtxoOperation, error) { // Sanity-check that the current block is the last block in the current epoch. // // Note that this will also return true if we're currently at the ProofOfStake1StateSetupBlockHeight @@ -94,7 +94,7 @@ func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, view uint64, block } // Runs all state-mutating operations required when completing an epoch. -func (bav *UtxoView) runEpochCompleteStateTransition(blockHeight uint64, blockTimestampNanoSecs uint64) ([]*UtxoOperation, error) { +func (bav *UtxoView) runEpochCompleteStateTransition(blockHeight uint64, blockTimestampNanoSecs int64) ([]*UtxoOperation, error) { // Jail all inactive validators from the current snapshot validator set. This is an O(n) operation // that loops through all active unjailed validators from current epoch's snapshot validator set // and jails them if they have been inactive. @@ -140,7 +140,7 @@ func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) erro return nil } -func (bav *UtxoView) computeNextEpochEntry(currentEpochNumber uint64, currentEpochFinalBlockHeight uint64, currentEpochFinalView uint64, nextEpochBlockTimestampNanoSecs uint64) (*EpochEntry, error) { +func (bav *UtxoView) computeNextEpochEntry(currentEpochNumber uint64, currentEpochFinalBlockHeight uint64, currentEpochFinalView uint64, nextEpochBlockTimestampNanoSecs int64) (*EpochEntry, error) { // Retrieve the SnapshotGlobalParamsEntry to determine the next epoch's final block height. We use the // snapshot global params here because the next epoch begin immediately, and its length is used in the PoS // consensus. The validator set for the next epoch needs to be in agreement on the length of the epoch @@ -199,7 +199,7 @@ func (bav *UtxoView) simulatePrevEpochEntry(currentEpochNumber uint64, currentEp } // Updates the currentEpochEntry to the next epoch's. -func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, view uint64, blockTimestampNanoSecs uint64) error { +func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, view uint64, blockTimestampNanoSecs int64) error { nextEpochEntry, err := bav.computeNextEpochEntry(epochNumber, blockHeight, view, blockTimestampNanoSecs) if err != nil { return errors.Wrap(err, "runEpochCompleteEpochRollover: ") diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index e3c7b3b67..6188a51d0 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -4,10 +4,11 @@ package lib import ( "fmt" - "github.com/deso-protocol/core/bls" "sort" "testing" + "github.com/deso-protocol/core/bls" + "github.com/holiman/uint256" "github.com/stretchr/testify/require" ) @@ -896,7 +897,7 @@ func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64, viewNumber tmpUtxoView := _newUtxoView(testMeta) // Set blockTimestampNanoSecs to 1 year * block height. Every time the block height increments, // the timestamp increases by 1 year - blockTimestampNanoSecs := blockHeight * 365 * 24 * 3600 * 1e9 + blockTimestampNanoSecs := int64(blockHeight) * 365 * 24 * 3600 * 1e9 _, err := tmpUtxoView.RunEpochCompleteHook(blockHeight, viewNumber, blockTimestampNanoSecs) require.NoError(testMeta.t, err) require.NoError(testMeta.t, tmpUtxoView.FlushToDb(blockHeight)) diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index 558b558f7..5fb9cd3f9 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -13,7 +13,7 @@ func TestCurrentEpoch(t *testing.T) { // Initialize blockchain. chain, params, db := NewLowDifficultyBlockchain(t) blockHeight := uint64(chain.blockTip().Height) + 1 - blockTimestampNanoSecs := uint64(chain.blockTip().Header.TstampNanoSecs) + 1e9 + blockTimestampNanoSecs := chain.blockTip().Header.TstampNanoSecs + 1e9 utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, err) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 75294eb5d..dff3bfe6e 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -126,7 +126,7 @@ func addBlockToTransactionRegister(txnRegister *TransactionRegister, block *MsgD if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { continue } - mtxn, err := NewMempoolTx(txn, block.Header.TstampNanoSecs, block.Header.Height) + mtxn, err := NewMempoolTx(txn, NanoSecondsToUint64MicroSeconds(block.Header.TstampNanoSecs), block.Header.Height) if err != nil { return errors.Wrap(err, "PoSFeeEstimator.addBlockToTransactionRegister: error creating MempoolTx") } @@ -159,7 +159,7 @@ func (posFeeEstimator *PoSFeeEstimator) removeBlockNoLock(block *MsgDeSoBlock) e if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { continue } - mtxn, err := NewMempoolTx(txn, block.Header.TstampNanoSecs, block.Header.Height) + mtxn, err := NewMempoolTx(txn, NanoSecondsToUint64MicroSeconds(block.Header.TstampNanoSecs), block.Header.Height) if err != nil { return errors.Wrap(err, "PoSFeeEstimator.RemoveBlock: error creating MempoolTx") } diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index 36db8ba95..4492f7568 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -7,7 +7,7 @@ import ( "github.com/pkg/errors" ) -func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64, blockTimestampNanoSecs uint64) ([]*UtxoOperation, error) { +func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64, blockTimestampNanoSecs int64) ([]*UtxoOperation, error) { // Check if we have switched from PoW to PoS yet. If we have not, then the PoS consensus // has not started yet. We don't want to distribute any staking rewards until the PoS consensus begins. if blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { @@ -266,8 +266,8 @@ func convertAPYBasisPointsToFloat(apyBasisPoints uint64) *big.Float { return NewFloat().Quo(apyBasisPointsAsFloat, _basisPointsAsFloat) } -func computeFractionOfYearAsFloat(nanoSecs uint64) *big.Float { - nanoSecsAsFloat := NewFloat().SetUint64(nanoSecs) +func computeFractionOfYearAsFloat(nanoSecs int64) *big.Float { + nanoSecsAsFloat := NewFloat().SetInt64(nanoSecs) return NewFloat().Quo(nanoSecsAsFloat, _nanoSecsPerYearAsFloat) } diff --git a/lib/postgres.go b/lib/postgres.go index 1b38f820f..fae62bc16 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -1289,7 +1289,7 @@ func (postgres *Postgres) UpsertBlockTx(tx *pg.Tx, blockNode *BlockNode) error { TxMerkleRoot: blockNode.Header.TransactionMerkleRoot, Version: blockNode.Header.Version, - Timestamp: blockNode.Header.TstampNanoSecs, + Timestamp: uint64(blockNode.Header.TstampNanoSecs), Nonce: blockNode.Header.Nonce, ExtraNonce: blockNode.Header.ExtraNonce, } @@ -1322,7 +1322,7 @@ func (postgres *Postgres) GetBlockIndex() (map[BlockHash]*BlockNode, error) { Version: block.Version, PrevBlockHash: block.ParentHash, TransactionMerkleRoot: block.TxMerkleRoot, - TstampNanoSecs: block.Timestamp, + TstampNanoSecs: int64(block.Timestamp), Height: block.Height, Nonce: block.Nonce, ExtraNonce: block.ExtraNonce, From 76bf439e148d2d0fba010b3e1b3f68fa3b967db4 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Tue, 30 Jan 2024 22:15:51 -0800 Subject: [PATCH 419/762] PoS Networking and Syncing Documentation (#974) * noop * NetworkManager documentation * gofmt --- lib/network_manager.go | 311 +++++++++++++++++++++++++---------------- lib/server.go | 8 +- 2 files changed, 197 insertions(+), 122 deletions(-) diff --git a/lib/network_manager.go b/lib/network_manager.go index 0227a6ac9..9a83676bc 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -15,11 +15,25 @@ import ( "time" ) -// NetworkManager is a structure that oversees all connections to remote nodes. It is responsible for kicking off -// the initial connections a node makes to the network. It is also responsible for creating RemoteNodes from all -// successful outbound and inbound connections. The NetworkManager also ensures that the node is connected to -// the active validators, once the node reaches Proof of Stake. -// TODO: Document more in later PRs +// NetworkManager is a structure that oversees all connections to RemoteNodes. NetworkManager has the following +// responsibilities in regard to the lifecycle of RemoteNodes: +// - Maintain a list of all RemoteNodes that the node is connected to through the RemoteNodeManager. +// - Initialize RemoteNodes from established outbound and inbound peer connections. +// - Initiate and handle the communication of the handshake process with RemoteNodes. +// +// The NetworkManager is also responsible for opening and closing connections. It does this by running a set of +// goroutines that periodically check the state of different categories of RemoteNodes, and disconnects or connects +// RemoteNodes as needed. These categories of RemoteNodes include: +// - Persistent RemoteNodes: These are RemoteNodes that we want to maintain a persistent (constant) connection to. +// These are specified by the --connect-ips flag. +// - Validators: These are RemoteNodes that are in the active validators set. We want to maintain a connection to +// all active validators. We also want to disconnect from any validators that are no longer active. +// - Non-Validators: These are RemoteNodes that are not in the active validators set. We want to maintain a connection +// to at most a target number of outbound and inbound non-validators. If we have more than the target number of +// outbound or inbound non-validators, we will disconnect the excess RemoteNodes. +// +// The NetworkManager also runs an auxiliary goroutine that periodically cleans up RemoteNodes that may have timed out +// the handshake process, or became invalid for some other reason. type NetworkManager struct { // The parameters we are initialized with. params *DeSoParams @@ -28,7 +42,6 @@ type NetworkManager struct { blsKeystore *BLSKeystore handshake *HandshakeManager - rnManager *RemoteNodeManager // The address manager keeps track of peer addresses we're aware of. When @@ -86,10 +99,13 @@ func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, rnMana } func (nm *NetworkManager) Start() { + // If the NetworkManager routines are disabled, we do nothing. if nm.params.DisableNetworkManagerRoutines { return } + // Start the NetworkManager goroutines. The startGroup is used to ensure that all goroutines have started before + // exiting the context of this function. nm.startGroup.Add(4) go nm.startPersistentConnector() go nm.startValidatorConnector() @@ -112,6 +128,17 @@ func (nm *NetworkManager) GetRemoteNodeManager() *RemoteNodeManager { return nm.rnManager } +func (nm *NetworkManager) SetTargetOutboundPeers(numPeers uint32) { + nm.targetNonValidatorOutboundRemoteNodes = numPeers +} + +// ########################### +// ## NetworkManager Routines +// ########################### + +// startPersistentConnector is responsible for ensuring that the node is connected to all persistent IP addresses. It +// does this by periodically checking the persistentIpToRemoteNodeIdsMap, and connecting to any persistent IP addresses +// that are not already connected. func (nm *NetworkManager) startPersistentConnector() { nm.startGroup.Done() for { @@ -165,6 +192,8 @@ func (nm *NetworkManager) startNonValidatorConnector() { } } +// startRemoteNodeCleanup is responsible for cleaning up RemoteNodes that may have timed out the handshake process, +// or became invalid for some other reason. func (nm *NetworkManager) startRemoteNodeCleanup() { nm.startGroup.Done() @@ -184,23 +213,29 @@ func (nm *NetworkManager) startRemoteNodeCleanup() { // ## Handlers (Peer, DeSoMessage) // ########################### +// _handleVersionMessage is called when a new version message is received. It is a wrapper around the handshake's +// handleVersionMessage function. func (nm *NetworkManager) _handleVersionMessage(origin *Peer, desoMsg DeSoMessage) { nm.handshake.handleVersionMessage(origin, desoMsg) } +// _handleVerackMessage is called when a new verack message is received. It is a wrapper around the handshake's +// handleVerackMessage function. func (nm *NetworkManager) _handleVerackMessage(origin *Peer, desoMsg DeSoMessage) { nm.handshake.handleVerackMessage(origin, desoMsg) } -func (nm *NetworkManager) _handleDonePeerMessage(origin *Peer, desoMsg DeSoMessage) { +// _handleDisconnectedPeerMessage is called when a peer is disconnected. It is responsible for cleaning up the +// RemoteNode associated with the peer. +func (nm *NetworkManager) _handleDisconnectedPeerMessage(origin *Peer, desoMsg DeSoMessage) { if desoMsg.GetMsgType() != MsgTypeDisconnectedPeer { return } - glog.V(2).Infof("NetworkManager.handleDonePeerMessage: Handling disconnected peer message for "+ + glog.V(2).Infof("NetworkManager._handleDisconnectedPeerMessage: Handling disconnected peer message for "+ "id=%v", origin.ID) nm.rnManager.DisconnectById(NewRemoteNodeId(origin.ID)) - // Update the persistentIpToRemoteNodeIdsMap. + // Update the persistentIpToRemoteNodeIdsMap, in case the disconnected peer was a persistent peer. ipRemoteNodeIdMap := nm.persistentIpToRemoteNodeIdsMap.ToMap() for ip, id := range ipRemoteNodeIdMap { if id.ToUint64() == origin.ID { @@ -223,6 +258,7 @@ func (nm *NetworkManager) _handleNewConnectionMessage(origin *Peer, desoMsg DeSo var remoteNode *RemoteNode var err error + // We create the RemoteNode differently depending on whether the connection is inbound or outbound. switch msg.Connection.GetConnectionType() { case ConnectionTypeInbound: remoteNode, err = nm.processInboundConnection(msg.Connection) @@ -244,6 +280,102 @@ func (nm *NetworkManager) _handleNewConnectionMessage(origin *Peer, desoMsg DeSo nm.handshake.InitiateHandshake(remoteNode) } +// processInboundConnection is called when a new inbound connection is established. At this point, the connection is not validated, +// nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. +// Once the RemoteNode is created, we will initiate handshake. +func (nm *NetworkManager) processInboundConnection(conn Connection) (*RemoteNode, error) { + var ic *inboundConnection + var ok bool + if ic, ok = conn.(*inboundConnection); !ok { + return nil, fmt.Errorf("NetworkManager.handleInboundConnection: Connection is not an inboundConnection") + } + + // If we want to limit inbound connections to one per IP address, check to make sure this address isn't already connected. + if nm.limitOneInboundRemoteNodePerIP && + nm.isDuplicateInboundIPAddress(ic.connection.RemoteAddr()) { + + return nil, fmt.Errorf("NetworkManager.handleInboundConnection: Rejecting INBOUND peer (%s) due to "+ + "already having an inbound connection from the same IP with limit_one_inbound_connection_per_ip set", + ic.connection.RemoteAddr().String()) + } + + na, err := nm.ConvertIPStringToNetAddress(ic.connection.RemoteAddr().String()) + if err != nil { + return nil, errors.Wrapf(err, "NetworkManager.handleInboundConnection: Problem calling "+ + "ConvertIPStringToNetAddress for addr: (%s)", ic.connection.RemoteAddr().String()) + } + + remoteNode, err := nm.rnManager.AttachInboundConnection(ic.connection, na) + if remoteNode == nil || err != nil { + return nil, errors.Wrapf(err, "NetworkManager.handleInboundConnection: Problem calling "+ + "AttachInboundConnection for addr: (%s)", ic.connection.RemoteAddr().String()) + } + + return remoteNode, nil +} + +// processOutboundConnection is called when a new outbound connection is established. At this point, the connection is not validated, +// nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. +// Once the RemoteNode is created, we will initiate handshake. +func (nm *NetworkManager) processOutboundConnection(conn Connection) (*RemoteNode, error) { + var oc *outboundConnection + var ok bool + if oc, ok = conn.(*outboundConnection); !ok { + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Connection is not an outboundConnection") + } + + if oc.failed { + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Failed to connect to peer (%s:%v)", + oc.address.IP.String(), oc.address.Port) + } + + if !oc.isPersistent { + nm.AddrMgr.Connected(oc.address) + nm.AddrMgr.Good(oc.address) + } + + // If this is a non-persistent outbound peer and the group key overlaps with another peer we're already connected to then + // abort mission. We only connect to one peer per IP group in order to prevent Sybil attacks. + if !oc.isPersistent && nm.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ + "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) + } + + na, err := nm.ConvertIPStringToNetAddress(oc.connection.RemoteAddr().String()) + if err != nil { + return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling ipToNetAddr "+ + "for addr: (%s)", oc.connection.RemoteAddr().String()) + } + + // Attach the connection before additional validation steps because it is already established. + remoteNode, err := nm.rnManager.AttachOutboundConnection(oc.connection, na, oc.attemptId, oc.isPersistent) + if remoteNode == nil || err != nil { + return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling rnManager.AttachOutboundConnection "+ + "for addr: (%s)", oc.connection.RemoteAddr().String()) + } + + // If this is a persistent remote node or a validator, we don't need to do any extra connection validation. + if remoteNode.IsPersistent() || remoteNode.IsExpectedValidator() { + return remoteNode, nil + } + + // If we get here, it means we're dealing with a non-persistent or non-validator remote node. We perform additional + // connection validation. + + // If the group key overlaps with another peer we're already connected to then abort mission. We only connect to + // one peer per IP group in order to prevent Sybil attacks. + if nm.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ + "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) + } + nm.cmgr.AddToGroupKey(na) + + return remoteNode, nil +} + +// cleanupFailedInboundConnection is called when an inbound connection fails to be processed. It is responsible for +// cleaning up the RemoteNode and the connection. Most of the time, the RemoteNode will be nil, but if the RemoteNode +// was successfully created, we will disconnect it. func (nm *NetworkManager) cleanupFailedInboundConnection(remoteNode *RemoteNode, connection Connection) { glog.V(2).Infof("NetworkManager.cleanupFailedInboundConnection: Cleaning up failed inbound connection") if remoteNode != nil { @@ -252,6 +384,8 @@ func (nm *NetworkManager) cleanupFailedInboundConnection(remoteNode *RemoteNode, connection.Close() } +// cleanupFailedOutboundConnection is called when an outbound connection fails to be processed. It is responsible for +// cleaning up the RemoteNode and the connection. func (nm *NetworkManager) cleanupFailedOutboundConnection(connection Connection) { oc, ok := connection.(*outboundConnection) if !ok { @@ -259,6 +393,8 @@ func (nm *NetworkManager) cleanupFailedOutboundConnection(connection Connection) } glog.V(2).Infof("NetworkManager.cleanupFailedOutboundConnection: Cleaning up failed outbound connection") + // Find the RemoteNode associated with the connection. It should almost always exist, since we create the RemoteNode + // as we're attempting to connect to the address. id := NewRemoteNodeId(oc.attemptId) rn := nm.rnManager.GetRemoteNodeById(id) if rn != nil { @@ -272,6 +408,8 @@ func (nm *NetworkManager) cleanupFailedOutboundConnection(connection Connection) // ## Persistent Connections // ########################### +// refreshConnectIps is called periodically by the persistent connector. It is responsible for connecting to all +// persistent IP addresses that we are not already connected to. func (nm *NetworkManager) refreshConnectIps() { // Connect to addresses passed via the --connect-ips flag. These addresses are persistent in the sense that if we // disconnect from one, we will try to reconnect to the same one. @@ -296,6 +434,8 @@ func (nm *NetworkManager) refreshConnectIps() { // ## Validator Connections // ########################### +// SetActiveValidatorsMap is called by the owner of the NetworkManager to update the activeValidatorsMap. This should +// generally be done whenever the active validators set changes. func (nm *NetworkManager) SetActiveValidatorsMap(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { nm.activeValidatorsMapLock.Lock() defer nm.activeValidatorsMapLock.Unlock() @@ -314,7 +454,7 @@ func (nm *NetworkManager) getActiveValidatorsMap() *collections.ConcurrentMap[bl func (nm *NetworkManager) refreshValidatorIndex(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { // De-index inactive validators. We skip any checks regarding RemoteNodes connection status, nor do we verify whether // de-indexing the validator would result in an excess number of outbound/inbound connections. Any excess connections - // will be cleaned up by the peer connector. + // will be cleaned up by the NonValidator connector. validatorRemoteNodeMap := nm.rnManager.GetValidatorIndex().ToMap() for pk, rn := range validatorRemoteNodeMap { // If the validator is no longer active, de-index it. @@ -367,6 +507,7 @@ func (nm *NetworkManager) connectValidators(activeValidatorsMap *collections.Con if exists { continue } + // If the validator is our node, continue. if nm.blsKeystore.GetSigner().GetPublicKey().Serialize() == pk { continue } @@ -392,8 +533,8 @@ func (nm *NetworkManager) connectValidators(activeValidatorsMap *collections.Con // ## NonValidator Connections // ########################### -// refreshNonValidatorOutboundIndex is called periodically by the peer connector. It is responsible for disconnecting excess -// outbound remote nodes. +// refreshNonValidatorOutboundIndex is called periodically by the NonValidator connector. It is responsible for +// disconnecting excess outbound remote nodes. func (nm *NetworkManager) refreshNonValidatorOutboundIndex() { // There are three categories of outbound remote nodes: attempted, connected, and persistent. All of these // remote nodes are stored in the same non-validator outbound index. We want to disconnect excess remote nodes that @@ -435,7 +576,7 @@ func (nm *NetworkManager) refreshNonValidatorOutboundIndex() { break } glog.V(2).Infof("NetworkManager.refreshNonValidatorOutboundIndex: Disconnecting attempted remote "+ - "node (id=%v) due to excess outbound peers", rn.GetId()) + "node (id=%v) due to excess outbound RemoteNodes", rn.GetId()) nm.rnManager.Disconnect(rn) excessiveOutboundRemoteNodes-- } @@ -445,7 +586,7 @@ func (nm *NetworkManager) refreshNonValidatorOutboundIndex() { break } glog.V(2).Infof("NetworkManager.refreshNonValidatorOutboundIndex: Disconnecting connected remote "+ - "node (id=%v) due to excess outbound peers", rn.GetId()) + "node (id=%v) due to excess outbound RemoteNodes", rn.GetId()) nm.rnManager.Disconnect(rn) excessiveOutboundRemoteNodes-- } @@ -464,12 +605,18 @@ func (nm *NetworkManager) refreshNonValidatorInboundIndex() { inboundRemoteNodes := nm.rnManager.GetNonValidatorInboundIndex().GetAll() var connectedInboundRemoteNodes []*RemoteNode for _, rn := range inboundRemoteNodes { - // We only want to disconnect remote nodes that have completed handshake. + // We only want to disconnect remote nodes that have completed handshake. RemoteNodes that don't have the + // handshake completed status could be validators, in which case we don't want to disconnect them. It is also + // possible that the RemoteNodes without completed handshake will end up never finishing it, in which case + // they will be removed by the cleanup goroutine, once the handshake timeout is reached. if rn.IsHandshakeCompleted() { connectedInboundRemoteNodes = append(connectedInboundRemoteNodes, rn) } } + // Having separated the connected remote nodes, we can now find the actual number of connected inbound remote nodes + // that have completed handshake. We can then find out how many remote nodes we need to disconnect. + numConnectedInboundRemoteNodes = uint32(len(connectedInboundRemoteNodes)) excessiveInboundRemoteNodes := uint32(0) if numConnectedInboundRemoteNodes > nm.targetNonValidatorInboundRemoteNodes { excessiveInboundRemoteNodes = numConnectedInboundRemoteNodes - nm.targetNonValidatorInboundRemoteNodes @@ -479,24 +626,40 @@ func (nm *NetworkManager) refreshNonValidatorInboundIndex() { break } glog.V(2).Infof("NetworkManager.refreshNonValidatorInboundIndex: Disconnecting inbound remote "+ - "node (id=%v) due to excess inbound peers", rn.GetId()) + "node (id=%v) due to excess inbound RemoteNodes", rn.GetId()) nm.rnManager.Disconnect(rn) excessiveInboundRemoteNodes-- } } +// connectNonValidators attempts to connect to new outbound nonValidator remote nodes. It is called periodically by the +// nonValidator connector. func (nm *NetworkManager) connectNonValidators() { - numOutboundPeers := uint32(nm.rnManager.GetNonValidatorOutboundIndex().Count()) - - remainingOutboundPeers := uint32(0) - if numOutboundPeers < nm.targetNonValidatorOutboundRemoteNodes { - remainingOutboundPeers = nm.targetNonValidatorOutboundRemoteNodes - numOutboundPeers + // First, find all nonValidator outbound remote nodes that are not persistent. + allOutboundRemoteNodes := nm.rnManager.GetNonValidatorOutboundIndex().GetAll() + var nonValidatorOutboundRemoteNodes []*RemoteNode + for _, rn := range allOutboundRemoteNodes { + if rn.IsPersistent() || rn.IsExpectedValidator() { + // We do nothing for persistent remote nodes or expected validators. + continue + } else { + nonValidatorOutboundRemoteNodes = append(nonValidatorOutboundRemoteNodes, rn) + } + } + // Now find the number of nonValidator, non-persistent outbound remote nodes. + numOutboundRemoteNodes := uint32(len(nonValidatorOutboundRemoteNodes)) + remainingOutboundRemoteNodes := uint32(0) + // Check if we need to connect to more nonValidator outbound remote nodes. + if numOutboundRemoteNodes < nm.targetNonValidatorOutboundRemoteNodes { + remainingOutboundRemoteNodes = nm.targetNonValidatorOutboundRemoteNodes - numOutboundRemoteNodes } - for ii := uint32(0); ii < remainingOutboundPeers; ii++ { + for ii := uint32(0); ii < remainingOutboundRemoteNodes; ii++ { + // Get a random unconnected address from the address manager. If we can't find one, we break out of the loop. addr := nm.getRandomUnconnectedAddress() if addr == nil { break } + // Attempt to connect to the address. nm.AddrMgr.Attempt(addr) if err := nm.rnManager.CreateNonValidatorOutboundConnection(addr); err != nil { glog.V(2).Infof("NetworkManager.connectNonValidators: Problem creating non-validator outbound "+ @@ -505,6 +668,7 @@ func (nm *NetworkManager) connectNonValidators() { } } +// getRandomUnconnectedAddress returns a random address from the address manager that we are not already connected to. func (nm *NetworkManager) getRandomUnconnectedAddress() *wire.NetAddress { for tries := 0; tries < 100; tries++ { addr := nm.AddrMgr.GetAddress() @@ -532,6 +696,10 @@ func (nm *NetworkManager) getRandomUnconnectedAddress() *wire.NetAddress { return nil } +// ########################### +// ## RemoteNode Dial Functions +// ########################### + func (nm *NetworkManager) CreateValidatorConnection(ipStr string, publicKey *bls.PublicKey) error { netAddr, err := nm.ConvertIPStringToNetAddress(ipStr) if err != nil { @@ -556,102 +724,9 @@ func (nm *NetworkManager) CreateNonValidatorOutboundConnection(ipStr string) err return nm.rnManager.CreateNonValidatorOutboundConnection(netAddr) } -func (nm *NetworkManager) SetTargetOutboundPeers(numPeers uint32) { - nm.targetNonValidatorOutboundRemoteNodes = numPeers -} - -// processInboundConnection is called when a new inbound connection is established. At this point, the connection is not validated, -// nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. -// Once the RemoteNode is created, we will initiate handshake. -func (nm *NetworkManager) processInboundConnection(conn Connection) (*RemoteNode, error) { - var ic *inboundConnection - var ok bool - if ic, ok = conn.(*inboundConnection); !ok { - return nil, fmt.Errorf("NetworkManager.handleInboundConnection: Connection is not an inboundConnection") - } - - // If we want to limit inbound connections to one per IP address, check to make sure this address isn't already connected. - if nm.limitOneInboundRemoteNodePerIP && - nm.isDuplicateInboundIPAddress(ic.connection.RemoteAddr()) { - - return nil, fmt.Errorf("NetworkManager.handleInboundConnection: Rejecting INBOUND peer (%s) due to "+ - "already having an inbound connection from the same IP with limit_one_inbound_connection_per_ip set", - ic.connection.RemoteAddr().String()) - } - - na, err := nm.ConvertIPStringToNetAddress(ic.connection.RemoteAddr().String()) - if err != nil { - return nil, errors.Wrapf(err, "NetworkManager.handleInboundConnection: Problem calling "+ - "ConvertIPStringToNetAddress for addr: (%s)", ic.connection.RemoteAddr().String()) - } - - remoteNode, err := nm.rnManager.AttachInboundConnection(ic.connection, na) - if remoteNode == nil || err != nil { - return nil, errors.Wrapf(err, "NetworkManager.handleInboundConnection: Problem calling "+ - "AttachInboundConnection for addr: (%s)", ic.connection.RemoteAddr().String()) - } - - return remoteNode, nil -} - -// processOutboundConnection is called when a new outbound connection is established. At this point, the connection is not validated, -// nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. -// Once the RemoteNode is created, we will initiate handshake. -func (nm *NetworkManager) processOutboundConnection(conn Connection) (*RemoteNode, error) { - var oc *outboundConnection - var ok bool - if oc, ok = conn.(*outboundConnection); !ok { - return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Connection is not an outboundConnection") - } - - if oc.failed { - return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Failed to connect to peer (%s:%v)", - oc.address.IP.String(), oc.address.Port) - } - - if !oc.isPersistent { - nm.AddrMgr.Connected(oc.address) - nm.AddrMgr.Good(oc.address) - } - - // If this is a non-persistent outbound peer and the group key overlaps with another peer we're already connected to then - // abort mission. We only connect to one peer per IP group in order to prevent Sybil attacks. - if !oc.isPersistent && nm.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { - return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ - "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) - } - - na, err := nm.ConvertIPStringToNetAddress(oc.connection.RemoteAddr().String()) - if err != nil { - return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling ipToNetAddr "+ - "for addr: (%s)", oc.connection.RemoteAddr().String()) - } - - // Attach the connection before additional validation steps because it is already established. - remoteNode, err := nm.rnManager.AttachOutboundConnection(oc.connection, na, oc.attemptId, oc.isPersistent) - if remoteNode == nil || err != nil { - return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling rnManager.AttachOutboundConnection "+ - "for addr: (%s)", oc.connection.RemoteAddr().String()) - } - - // If this is a persistent remote node or a validator, we don't need to do any extra connection validation. - if remoteNode.IsPersistent() || remoteNode.IsExpectedValidator() { - return remoteNode, nil - } - - // If we get here, it means we're dealing with a non-persistent or non-validator remote node. We perform additional - // connection validation. - - // If the group key overlaps with another peer we're already connected to then abort mission. We only connect to - // one peer per IP group in order to prevent Sybil attacks. - if nm.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { - return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ - "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) - } - nm.cmgr.AddToGroupKey(na) - - return remoteNode, nil -} +// ########################### +// ## Helper Functions +// ########################### func (nm *NetworkManager) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAddress, error) { netAddr, err := IPToNetAddr(ipStr, nm.AddrMgr, nm.params) diff --git a/lib/server.go b/lib/server.go index 5efd755f6..9eff29d98 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1711,8 +1711,8 @@ func (srv *Server) _cleanupDonePeerState(pp *Peer) { }, false) } -func (srv *Server) _handleDonePeer(pp *Peer) { - glog.V(1).Infof("Server._handleDonePeer: Processing DonePeer: %v", pp) +func (srv *Server) _handleDisconnectedPeerMessage(pp *Peer) { + glog.V(1).Infof("Server._handleDisconnectedPeerMessage: Processing DonePeer: %v", pp) srv._cleanupDonePeerState(pp) @@ -2313,8 +2313,8 @@ func (srv *Server) _handleControlMessages(serverMessage *ServerMessage) (_should switch serverMessage.Msg.(type) { // Control messages used internally to signal to the server. case *MsgDeSoDisconnectedPeer: - srv._handleDonePeer(serverMessage.Peer) - srv.networkManager._handleDonePeerMessage(serverMessage.Peer, serverMessage.Msg) + srv._handleDisconnectedPeerMessage(serverMessage.Peer) + srv.networkManager._handleDisconnectedPeerMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoNewConnection: srv.networkManager._handleNewConnectionMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoQuit: From 539059d71092d0a9d53392f6b43cf669a1d49d8f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 31 Jan 2024 09:42:21 -0500 Subject: [PATCH 420/762] Exclude Block Proposer Utility Fee from Block Reward (#978) --- lib/pos_block_producer.go | 27 +++++++++++++++++-- lib/pos_block_producer_test.go | 48 ++++++++++++++++++++++++---------- 2 files changed, 59 insertions(+), 16 deletions(-) diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 72344a50e..835f214e9 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -133,8 +133,12 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( // Get block transactions from the mempool. feeTimeTxns, txnConnectStatusByIndex, maxUtilityFee, err := pbp.getBlockTransactions( - latestBlockView, newBlockHeight, newBlockTimestampNanoSecs, - pbp.params.MinerMaxBlockSizeBytes-uint64(len(blockRewardTxnSizeBytes))) + pbp.proposerPublicKey, + latestBlockView, + newBlockHeight, + newBlockTimestampNanoSecs, + pbp.params.MinerMaxBlockSizeBytes-uint64(len(blockRewardTxnSizeBytes)), + ) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.createBlockWithoutHeader: Problem retrieving block transactions: ") } @@ -150,6 +154,7 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( // getBlockTransactions is used to retrieve fee-time ordered transactions from the mempool. func (pbp *PosBlockProducer) getBlockTransactions( + blockProducerPublicKey *PublicKey, latestBlockView *UtxoView, newBlockHeight uint64, newBlockTimestampNanoSecs uint64, @@ -177,6 +182,7 @@ func (pbp *PosBlockProducer) getBlockTransactions( if err != nil { return nil, nil, 0, errors.Wrapf(err, "Error getting transaction size: ") } + // Skip over transactions that are too big. if currentBlockSize+uint64(len(txnBytes)) > maxBlockSizeBytes { continue @@ -196,6 +202,13 @@ func (pbp *PosBlockProducer) getBlockTransactions( txnConnectStatusByIndex.Set(len(blocksTxns), true) blocksTxns = append(blocksTxns, txn.GetTxn()) currentBlockSize += uint64(len(txnBytes)) + + // If the transactor is the block producer, then they won't receive the utility + // fee. + if blockProducerPublicKey.Equal(*NewPublicKey(txn.PublicKey)) { + continue + } + // Compute BMF for the transaction. _, utilityFee := computeBMF(fees) maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) @@ -204,11 +217,13 @@ func (pbp *PosBlockProducer) getBlockTransactions( } continue } + // If the transaction didn't connect, we will try to add it as a failing transaction. blockUtxoViewCopy, err = blockUtxoView.CopyUtxoView() if err != nil { return nil, nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") } + _, _, utilityFee, err := blockUtxoViewCopy._connectFailingTransaction(txn.GetTxn(), uint32(newBlockHeight), true) if err != nil { // If the transaction still doesn't connect, this means we encountered an invalid transaction. We will skip @@ -216,12 +231,20 @@ func (pbp *PosBlockProducer) getBlockTransactions( // process, so we don't need to worry about it here. continue } + // If we get to this point, it means the transaction didn't connect but it was a valid transaction. We will // add it to the block as a failing transaction. blockUtxoView = blockUtxoViewCopy txnConnectStatusByIndex.Set(len(blocksTxns), false) blocksTxns = append(blocksTxns, txn.GetTxn()) currentBlockSize += uint64(len(txnBytes)) + + // If the transactor is the block producer, then they won't receive the utility + // fee. + if blockProducerPublicKey.Equal(*NewPublicKey(txn.PublicKey)) { + continue + } + maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) if err != nil { return nil, nil, 0, errors.Wrapf(err, "Error computing max utility fee: ") diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 7ae190ed0..acee06f72 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -89,6 +89,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { feeMax := uint64(2000) passingTransactions := 50 m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + m1PubBytes, _, _ := Base58CheckDecode(m1Pub) blsPubKey, _ := _generateValidatorVotingPublicKeyAndSignature(t) params, db := _posTestBlockchainSetupWithBalances(t, 200000, 200000) params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 @@ -118,18 +119,37 @@ func TestCreateBlockWithoutHeader(t *testing.T) { _wrappedPosMempoolAddTransaction(t, mempool, txn) } - pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), NewPublicKey(m0PubBytes), blsPubKey) - txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions( - latestBlockView, 3, 0, 50000) - require.NoError(err) + // Test cases where the block producer is the transactor for the mempool txns + { + pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), NewPublicKey(m0PubBytes), blsPubKey) + txns, txnConnectStatus, _, err := pbp.getBlockTransactions( + NewPublicKey(m0PubBytes), latestBlockView, 3, 0, 50000) + require.NoError(err) - blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0) - require.NoError(err) - require.Equal(txns, blockTemplate.Txns[1:]) - require.Equal(txnConnectStatus, blockTemplate.TxnConnectStatusByIndex) - require.Equal(maxUtilityFee, blockTemplate.Txns[0].TxOutputs[0].AmountNanos) - require.Equal(NewMessage(MsgTypeHeader).(*MsgDeSoHeader), blockTemplate.Header) - require.Nil(blockTemplate.BlockProducerInfo) + blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0) + require.NoError(err) + require.Equal(txns, blockTemplate.Txns[1:]) + require.Equal(txnConnectStatus, blockTemplate.TxnConnectStatusByIndex) + require.Equal(uint64(0), blockTemplate.Txns[0].TxOutputs[0].AmountNanos) + require.Equal(NewMessage(MsgTypeHeader).(*MsgDeSoHeader), blockTemplate.Header) + require.Nil(blockTemplate.BlockProducerInfo) + } + + // Test cases where the block producer is not the transactor for the mempool txns + { + pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), NewPublicKey(m1PubBytes), blsPubKey) + txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions( + NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 50000) + require.NoError(err) + + blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0) + require.NoError(err) + require.Equal(txns, blockTemplate.Txns[1:]) + require.Equal(txnConnectStatus, blockTemplate.TxnConnectStatusByIndex) + require.Equal(maxUtilityFee, blockTemplate.Txns[0].TxOutputs[0].AmountNanos) + require.Equal(NewMessage(MsgTypeHeader).(*MsgDeSoHeader), blockTemplate.Header) + require.Nil(blockTemplate.BlockProducerInfo) + } } func TestGetBlockTransactions(t *testing.T) { @@ -175,7 +195,7 @@ func TestGetBlockTransactions(t *testing.T) { _wrappedPosMempoolAddTransaction(t, mempool, txn) } - pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), nil, nil) + pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), NewPublicKey(m1PubBytes), nil) _testProduceBlockNoSizeLimit(t, mempool, pbp, latestBlockView, 3, len(passingTxns), 0, 0) @@ -225,7 +245,7 @@ func TestGetBlockTransactions(t *testing.T) { latestBlockViewCopy, err := latestBlockView.CopyUtxoView() require.NoError(err) - txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions(latestBlockView, 3, 0, 1000) + txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions(NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 1000) require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(true, len(passingTxns) > len(txns)) @@ -269,7 +289,7 @@ func _testProduceBlockNoSizeLimit(t *testing.T, mp *PosMempool, pbp *PosBlockPro latestBlockViewCopy, err := latestBlockView.CopyUtxoView() require.NoError(err) - txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions(latestBlockView, blockHeight, 0, math.MaxUint64) + txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions(pbp.proposerPublicKey, latestBlockView, blockHeight, 0, math.MaxUint64) require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(totalAcceptedTxns, len(txns)) From 4c327dfabc5e951141e55e4ec4caaeba572970f8 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 31 Jan 2024 11:30:54 -0500 Subject: [PATCH 421/762] Reject PoS Blocks in processBlockPoW and processHeaderPoW (#980) * Reject PoS blocks in processBlockPoW and processHeaderPoW * Fix isTipCurrent --- lib/blockchain.go | 31 ++++++++++++++++++------------- lib/errors.go | 2 ++ 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index dc19b77a5..364e0ec53 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1154,7 +1154,7 @@ func (bc *Blockchain) isTipCurrent(tip *BlockNode) bool { minChainWorkBytes, _ := hex.DecodeString(bc.params.MinChainWorkHex) // Not current if the cumulative work is below the threshold. - if tip.CumWork.Cmp(BytesToBigint(minChainWorkBytes)) < 0 { + if bc.IsPoWBlockHeight(uint64(tip.Height)) && tip.CumWork.Cmp(BytesToBigint(minChainWorkBytes)) < 0 { //glog.V(2).Infof("Blockchain.isTipCurrent: Tip not current because "+ //"CumWork (%v) is less than minChainWorkBytes (%v)", //tip.CumWork, BytesToBigint(minChainWorkBytes)) @@ -1708,6 +1708,11 @@ func updateBestChainInMemory(mainChainList []*BlockNode, mainChainMap map[BlockH // Caller must acquire the ChainLock for writing prior to calling this. func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { + // Only accept the header if its height is below the PoS cutover height. + if !bc.IsPoWBlockHeight(blockHeader.Height) { + return false, false, HeaderErrorBlockHeightAfterProofOfStakeCutover + } + // Start by checking if the header already exists in our node // index. If it does, then return an error. We should generally // expect that processHeaderPoW will only be called on headers we @@ -1889,11 +1894,10 @@ func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc return false, false, fmt.Errorf("ProcessHeader: Header is nil") } - // If the header's height is after the PoS cut-over fork height, then we use the PoS header processing logic. Otherwise, fall back - // to the PoW logic. - if blockHeader.Height >= uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { - // TODO: call bc.processHeaderPoS(blockHeader, headerHash) instead - return bc.processHeaderPoW(blockHeader, headerHash) + // If the header's height is after the PoS cut-over fork height, then we use the PoS header processing logic. + // Otherwise, fall back to the PoW logic. + if bc.IsPoSBlockHeight(blockHeader.Height) { + return bc.processHeaderPoS(blockHeader) } return bc.processHeaderPoW(blockHeader, headerHash) @@ -1908,12 +1912,10 @@ func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures boo return false, false, nil, fmt.Errorf("ProcessBlock: Block is nil") } - // If the block's height is after the PoS cut-over fork height, then we use the PoS block processing logic. Otherwise, fall back - // to the PoW logic. - if desoBlock.Header.Height >= uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { - // TODO: call bc.processBlockPoS(desoBlock, verifySignatures) instead - isMainChain, isOrphan, err := bc.processBlockPoW(desoBlock, verifySignatures) - return isMainChain, isOrphan, nil, err + // If the block's height is after the PoS cut-over fork height, then we use the PoS block processing logic. + // Otherwise, fall back to the PoW logic. + if bc.IsPoSBlockHeight(desoBlock.Header.Height) { + return bc.processBlockPoS(desoBlock, 1, verifySignatures) } isMainChain, isOrphan, err := bc.processBlockPoW(desoBlock, verifySignatures) @@ -1921,7 +1923,10 @@ func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures boo } func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { - // TODO: Move this to be more isolated. + // Only accept the block if its height is below the PoS cutover height. + if !bc.IsPoWBlockHeight(desoBlock.Header.Height) { + return false, false, RuleErrorBlockHeightAfterProofOfStakeCutover + } blockHeight := uint64(bc.BlockTip().Height + 1) diff --git a/lib/errors.go b/lib/errors.go index a8d68188f..64211b118 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -61,6 +61,7 @@ const ( RuleErrorInputsWithDifferingSpendKeys RuleError = "RuleErrorInputsWithDifferingSpendKeys" RuleErrorInvalidTransactionSignature RuleError = "RuleErrorInvalidTransactionSignature" RuleErrorBlockRewardTxnMustHaveOneOutput RuleError = "RuleErrorBlockRewardTxnMustHaveOneOutput" + RuleErrorBlockHeightAfterProofOfStakeCutover RuleError = "RuleErrorBlockHeightAfterProofOfStakeCutover" RuleErrorMissingBlockProducerSignature RuleError = "RuleErrorMissingBlockProducerSignature" RuleErrorInvalidBlockProducerPublicKey RuleError = "RuleErrorInvalidBlockProducerPublicKey" @@ -538,6 +539,7 @@ const ( HeaderErrorBlockDifficultyAboveTarget RuleError = "HeaderErrorBlockDifficultyAboveTarget" HeaderErrorHeightInvalid RuleError = "HeaderErrorHeightInvalid" HeaderErrorDifficultyBitsNotConsistentWithTargetDifficultyComputedFromParent RuleError = "HeaderErrorDifficultyBitsNotConsistentWithTargetDifficultyComputedFromParent" + HeaderErrorBlockHeightAfterProofOfStakeCutover RuleError = "HeaderErrorBlockHeightAfterProofOfStakeCutover" TxErrorTooLarge RuleError = "TxErrorTooLarge" TxErrorDuplicate RuleError = "TxErrorDuplicate" From a6b26b73aa11ae3ac01865adbec791f38135dc9b Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 31 Jan 2024 09:35:25 -0800 Subject: [PATCH 422/762] Rewording (#981) --- lib/remote_node.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/remote_node.go b/lib/remote_node.go index 6fb5e13ba..07610f8ec 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -35,8 +35,8 @@ func (id RemoteNodeId) ToUint64() uint64 { return uint64(id) } -// RemoteNode is a consensus-aware wrapper around the network Peer object. It is used to manage the lifecycle of a peer -// and to store consensus-related metadata about the peer. The RemoteNode can wrap around either an inbound or outbound +// RemoteNode is a chain-aware wrapper around the network Peer object. It is used to manage the lifecycle of a peer +// and to store blockchain-related metadata about the peer. The RemoteNode can wrap around either an inbound or outbound // peer connection. For outbound peers, the RemoteNode is created prior to the connection being established. In this case, // the RemoteNode will be first used to initiate an OutboundConnectionAttempt, and then store the resulting connected peer. // For inbound peers, the RemoteNode is created after the connection is established in ConnectionManager. From 3434c9a4ff56534d63f9b5e97bb24e0a6be1b63e Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 31 Jan 2024 13:04:43 -0500 Subject: [PATCH 423/762] Simple approach to connecting failing transactions properly (#979) --- lib/block_view.go | 43 ++++++++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 141a9fa0e..4e2e6d279 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4058,35 +4058,52 @@ func (bav *UtxoView) ConnectBlock( for txIndex, txn := range desoBlock.Txns { txHash := txHashes[txIndex] - // ConnectTransaction validates all of the transactions in the block and - // is responsible for verifying signatures. - // - // TODO: We currently don't check that the min transaction fee is satisfied when - // connecting blocks. We skip this check because computing the transaction's size - // would slow down block processing significantly. We should figure out a way to - // enforce this check in the future, but for now the only attack vector is one in - // which a miner is trying to spam the network, which should generally never happen. - utxoOpsForTxn, totalInput, totalOutput, currentFees, err := bav.ConnectTransaction(txn, txHash, 0, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) - _, _ = totalInput, totalOutput // A bit surprising we don't use these // After the PoS cutover, we need to check if the transaction is a failing transaction. txnConnects := blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) || (txIndex == 0 && txn.TxnMeta.GetTxnType() == TxnTypeBlockReward) || desoBlock.TxnConnectStatusByIndex.Get(txIndex-1) + var utilityFee uint64 + var utxoOpsForTxn []*UtxoOperation + var err error + var currentFees uint64 if txnConnects { + // ConnectTransaction validates all of the transactions in the block and + // is responsible for verifying signatures. + // + // TODO: We currently don't check that the min transaction fee is satisfied when + // connecting blocks. We skip this check because computing the transaction's size + // would slow down block processing significantly. We should figure out a way to + // enforce this check in the future, but for now the only attack vector is one in + // which a miner is trying to spam the network, which should generally never happen. + utxoOpsForTxn, _, _, currentFees, err = bav.ConnectTransaction( + txn, txHash, 0, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) if err != nil { return nil, errors.Wrapf(err, "ConnectBlock: error connecting txn #%d", txIndex) } _, utilityFee = computeBMF(currentFees) } else { + // If the transaction is not supposed to connect, we need to verify that it won't connect. + // We need to construct a copy of the view to verify that the transaction won't connect + // without side effects. + var utxoViewCopy *UtxoView + utxoViewCopy, err = bav.CopyUtxoView() + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: error copying UtxoView") + } + _, _, _, _, err = utxoViewCopy.ConnectTransaction( + txn, txHash, 0, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) if err == nil { - return nil, errors.Wrapf(err, "ConnectBlock: txn #%d should not connect based on "+ - "TxnConnectStatusByIndex but err is nil", txIndex) + return nil, errors.Wrapf(err, "ConnectBlock: txn #%d should not connect but err is nil", txIndex) } - utxoOpsForTxn, _, utilityFee, err = bav._connectFailingTransaction(txn, uint32(blockHeader.Height), verifySignatures) + var burnFee uint64 + // Connect the failing transaction to get the fees and utility fee. + utxoOpsForTxn, burnFee, utilityFee, err = bav._connectFailingTransaction( + txn, uint32(blockHeader.Height), verifySignatures) if err != nil { return nil, errors.Wrapf(err, "ConnectBlock: error connecting failing txn #%d", txIndex) } + currentFees = burnFee + utilityFee } // After the block reward patch block height, we only include fees from transactions From e22b81e18f367d3b65b4df648f924260e4220814 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 31 Jan 2024 10:16:14 -0800 Subject: [PATCH 424/762] Resolve timestamp issues in TxIndex for e2e testing. --- lib/block_producer.go | 2 +- lib/block_view_lockups.go | 4 ++++ lib/legacy_mempool.go | 5 +++-- lib/txindex.go | 3 ++- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index 122aead36..cf2e5df3c 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -295,7 +295,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) var feeNanos uint64 _, _, _, feeNanos, err = feesUtxoView._connectTransaction( txnInBlock, txnInBlock.Hash(), 0, uint32(blockRet.Header.Height), - 0, false, false) + int64(blockRet.Header.TstampNanoSecs), false, false) if err != nil { return nil, nil, nil, fmt.Errorf( "DeSoBlockProducer._getBlockTemplate: Error attaching txn to UtxoView for computed block: %v", err) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index a43302b98..3e5e7c08b 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -135,6 +135,10 @@ func (lockedBalanceEntry *LockedBalanceEntry) ToMapKey() LockedBalanceEntryKey { } } +func (lockedBalanceEntry *LockedBalanceEntry) IsDeleted() bool { + return lockedBalanceEntry.isDeleted +} + // DeSoEncoder Interface Implementation for LockedBalanceEntry func (lockedBalanceEntry *LockedBalanceEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 22ab50ac3..970c27a37 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2124,12 +2124,13 @@ func _computeBitcoinExchangeFields(params *DeSoParams, func ConnectTxnAndComputeTransactionMetadata( txn *MsgDeSoTxn, utxoView *UtxoView, blockHash *BlockHash, - blockHeight uint32, txnIndexInBlock uint64) (*TransactionMetadata, error) { + blockHeight uint32, blockTimestampNanoSecs int64, txnIndexInBlock uint64) (*TransactionMetadata, error) { totalNanosPurchasedBefore := utxoView.NanosPurchased usdCentsPerBitcoinBefore := utxoView.GetCurrentUSDCentsPerBitcoin() utxoOps, totalInput, totalOutput, fees, err := utxoView._connectTransaction( - txn, txn.Hash(), 0, blockHeight, 0, false, false) + txn, txn.Hash(), 0, blockHeight, blockTimestampNanoSecs, + false, false) if err != nil { return nil, fmt.Errorf( "UpdateTxindex: Error connecting txn to UtxoView: %v", err) diff --git a/lib/txindex.go b/lib/txindex.go index ae9d01b19..8cca493f7 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -419,7 +419,8 @@ func (txi *TXIndex) Update() error { // - add all its mappings to the db. for txnIndexInBlock, txn := range blockMsg.Txns { txnMeta, err := ConnectTxnAndComputeTransactionMetadata( - txn, utxoView, blockToAttach.Hash, blockToAttach.Height, uint64(txnIndexInBlock)) + txn, utxoView, blockToAttach.Hash, blockToAttach.Height, + int64(blockToAttach.Header.TstampNanoSecs), uint64(txnIndexInBlock)) if err != nil { return fmt.Errorf("Update: Problem connecting txn %v to txindex: %v", txn, err) From 7e3b9a6a87d50877a6c13a098a6e1b0606e5b0aa Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 31 Jan 2024 13:32:48 -0500 Subject: [PATCH 425/762] Support Signature OpCodes in consensus Package (#982) * Support Signature OpCodes in consensus Package * Naming cleanup * Add back Sign function * Addres Nina's comments --- consensus/types.go | 16 +++++++++++++++ consensus/utils.go | 11 ++++++++-- lib/bls_keystore.go | 44 ++++++++++++++++++++-------------------- lib/bls_keystore_test.go | 16 +++++++++++++++ 4 files changed, 63 insertions(+), 24 deletions(-) diff --git a/consensus/types.go b/consensus/types.go index 44f51c353..b724b062e 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -36,6 +36,22 @@ type FastHotStuffEvent struct { AggregateQC AggregateQuorumCertificate } +// SignatureOpCode is a way for the FastHotStuffEventLoop to differentiate between different types of +// BLS signatures. This is used to ensure that the event loop doesn't accidentally sign two different +// message types with the same signature. +// - SignatureOpCodeValidatorVote: The BLS signature is for a validator vote message +// - SignatureOpCodeValidatorTimeout: The BLS signature is for a validator timeout message +type SignatureOpCode byte + +const ( + SignatureOpCodeValidatorVote SignatureOpCode = 1 + SignatureOpCodeValidatorTimeout SignatureOpCode = 2 +) + +func (opCode SignatureOpCode) ToBytes() []byte { + return []byte{byte(opCode)} +} + // The maximum number of consecutive timeouts that can occur before the event loop stops // its exponential back-off. This is a safety valve that helps ensure that the event loop // doesn't get stuck in a near indefinite back-off state. diff --git a/consensus/utils.go b/consensus/utils.go index d0c57253c..810992925 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -146,9 +146,13 @@ func isSuperMajorityStakeSignersList(signersList *bitset.Bitset, validators []Va func GetVoteSignaturePayload(view uint64, blockHash BlockHash) [32]byte { viewBytes := make([]byte, 8) binary.BigEndian.PutUint64(viewBytes, view) + blockHashBytes := blockHash.GetValue() - return sha3.Sum256(append(viewBytes, blockHashBytes[:]...)) + payload := append(SignatureOpCodeValidatorVote.ToBytes(), viewBytes...) + payload = append(payload, blockHashBytes[:]...) + + return sha3.Sum256(payload) } // When timing out for a view, validators sign the payload sha3-256(View, HighQCView) with their BLS @@ -161,7 +165,10 @@ func GetTimeoutSignaturePayload(view uint64, highQCView uint64) [32]byte { highQCViewBytes := make([]byte, 8) binary.BigEndian.PutUint64(highQCViewBytes, highQCView) - return sha3.Sum256(append(viewBytes, highQCViewBytes...)) + payload := append(SignatureOpCodeValidatorTimeout.ToBytes(), viewBytes...) + payload = append(payload, highQCViewBytes...) + + return sha3.Sum256(payload) } func isProperlyFormedBlockWithValidatorList(block BlockWithValidatorList) bool { diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go index 64778d835..cd0ec28f8 100644 --- a/lib/bls_keystore.go +++ b/lib/bls_keystore.go @@ -19,23 +19,25 @@ import ( // We need to associate individual op-codes for each message type that can be signed, so that there is no risk // of signature collisions between different message types. The payload signed per message type must be made // up of the following tuples: -// - Validator Vote: (0x01, view uint64, blockHash consensus.BlockHash) -// - Validator Timeout: (0x02, view uint64, highQCView uint64) -// - PoS Block Proposal: (0x03, view uint64, blockHash consensus.BlockHash) +// - PoS Validator Vote: (0x01, view uint64, blockHash consensus.BlockHash) +// - PoS Validator Timeout: (0x02, view uint64, highQCView uint64) // - PoS Validator Handshake: (0x04, peer's random nonce, our node's random nonce) // - PoS Random Seed Signature: (previous block's random seed hash) type BLSSignatureOpCode byte const ( - BLSSignatureOpCodeValidatorVote BLSSignatureOpCode = 0 - BLSSignatureOpCodeValidatorTimeout BLSSignatureOpCode = 1 - BLSSignatureOpCodePoSBlockProposal BLSSignatureOpCode = 2 + BLSSignatureOpCodeValidatorVote BLSSignatureOpCode = BLSSignatureOpCode(consensus.SignatureOpCodeValidatorVote) + BLSSignatureOpCodeValidatorTimeout BLSSignatureOpCode = BLSSignatureOpCode(consensus.SignatureOpCodeValidatorTimeout) BLSSignatureOpCodePoSValidatorHandshake BLSSignatureOpCode = 3 ) -func (opCode BLSSignatureOpCode) Bytes() []byte { - return []byte{byte(opCode)} +func GetAllBLSSignatureOpCodes() []BLSSignatureOpCode { + return []BLSSignatureOpCode{ + BLSSignatureOpCodeValidatorVote, + BLSSignatureOpCodeValidatorTimeout, + BLSSignatureOpCodePoSValidatorHandshake, + } } ////////////////////////////////////////////////////////// @@ -87,15 +89,14 @@ func NewBLSSigner(privateKey *bls.PrivateKey) (*BLSSigner, error) { return &BLSSigner{privateKey: privateKey}, nil } -func (signer *BLSSigner) sign(opCode BLSSignatureOpCode, payload []byte) (*bls.Signature, error) { - newPayload := append(opCode.Bytes(), payload...) - return signer.privateKey.Sign(newPayload) -} - func (signer *BLSSigner) GetPublicKey() *bls.PublicKey { return signer.privateKey.PublicKey() } +func (signer *BLSSigner) Sign(payload []byte) (*bls.Signature, error) { + return signer.privateKey.Sign(payload) +} + func (signer *BLSSigner) SignBlockProposal(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { // A block proposer's signature on a block is just its partial vote signature. This allows us to aggregate // signatures from the proposer and validators into a single aggregated signature to build a QC. @@ -104,12 +105,12 @@ func (signer *BLSSigner) SignBlockProposal(view uint64, blockHash consensus.Bloc func (signer *BLSSigner) SignValidatorVote(view uint64, blockHash consensus.BlockHash) (*bls.Signature, error) { payload := consensus.GetVoteSignaturePayload(view, blockHash) - return signer.sign(BLSSignatureOpCodeValidatorVote, payload[:]) + return signer.privateKey.Sign(payload[:]) } func (signer *BLSSigner) SignValidatorTimeout(view uint64, highQCView uint64) (*bls.Signature, error) { payload := consensus.GetTimeoutSignaturePayload(view, highQCView) - return signer.sign(BLSSignatureOpCodeValidatorTimeout, payload[:]) + return signer.privateKey.Sign(payload[:]) } func (signer *BLSSigner) SignRandomSeedHash(randomSeedHash *RandomSeedHash) (*bls.Signature, error) { @@ -119,26 +120,25 @@ func (signer *BLSSigner) SignRandomSeedHash(randomSeedHash *RandomSeedHash) (*bl func (signer *BLSSigner) SignPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64) (*bls.Signature, error) { // FIXME payload := []byte{} - return signer.sign(BLSSignatureOpCodePoSValidatorHandshake, payload[:]) + return signer.privateKey.Sign(payload[:]) } ////////////////////////////////////////////////////////// // BLS Verification ////////////////////////////////////////////////////////// -func _blsVerify(opCode BLSSignatureOpCode, payload []byte, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { - newPayload := append(opCode.Bytes(), payload...) - return publicKey.Verify(signature, newPayload) +func _blsVerify(payload []byte, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { + return publicKey.Verify(signature, payload) } func BLSVerifyValidatorVote(view uint64, blockHash consensus.BlockHash, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { payload := consensus.GetVoteSignaturePayload(view, blockHash) - return _blsVerify(BLSSignatureOpCodeValidatorVote, payload[:], signature, publicKey) + return _blsVerify(payload[:], signature, publicKey) } func BLSVerifyValidatorTimeout(view uint64, highQCView uint64, signature *bls.Signature, publicKey *bls.PublicKey) (bool, error) { payload := consensus.GetTimeoutSignaturePayload(view, highQCView) - return _blsVerify(BLSSignatureOpCodeValidatorTimeout, payload[:], signature, publicKey) + return _blsVerify(payload[:], signature, publicKey) } func BLSVerifyPoSValidatorHandshake( @@ -150,5 +150,5 @@ func BLSVerifyPoSValidatorHandshake( ) (bool, error) { // FIXME payload := []byte{} - return _blsVerify(BLSSignatureOpCodePoSValidatorHandshake, payload[:], signature, publicKey) + return _blsVerify(payload[:], signature, publicKey) } diff --git a/lib/bls_keystore_test.go b/lib/bls_keystore_test.go index a96389389..6fee62124 100644 --- a/lib/bls_keystore_test.go +++ b/lib/bls_keystore_test.go @@ -36,3 +36,19 @@ func TestNewBLSKeystore(t *testing.T) { require.Equal(t, keystore.GetSigner().privateKey.ToString(), "0x13b5febb384a3d3dec5c579724872607cd0ddb97adef592efaf144f6d25a70d7") } } + +func TestUniqueBLSSignatureOpCodes(t *testing.T) { + opCodes := GetAllBLSSignatureOpCodes() + require.Len(t, opCodes, 3) + require.Contains(t, opCodes, BLSSignatureOpCodeValidatorVote) + require.Contains(t, opCodes, BLSSignatureOpCodeValidatorTimeout) + require.Contains(t, opCodes, BLSSignatureOpCodePoSValidatorHandshake) + + // Ensure no duplicates + uniqueOpCodes := make(map[BLSSignatureOpCode]struct{}) + for _, opCode := range opCodes { + _, exists := uniqueOpCodes[opCode] + require.False(t, exists) + uniqueOpCodes[opCode] = struct{}{} + } +} From 20f2eb4cfcc11d80d88233bd2443d0ec0c2b2d2a Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 31 Jan 2024 11:31:34 -0800 Subject: [PATCH 426/762] Rename (#986) --- lib/{handshake_controller.go => handshake_manager.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename lib/{handshake_controller.go => handshake_manager.go} (100%) diff --git a/lib/handshake_controller.go b/lib/handshake_manager.go similarity index 100% rename from lib/handshake_controller.go rename to lib/handshake_manager.go From 8e7608bdb29787ea54ba45dccd6bdf0c733297fa Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 31 Jan 2024 17:38:33 -0500 Subject: [PATCH 427/762] Log Block Proposal To stdout (#983) Log Block Proposal To stdout Fix BlockHash log line --- lib/pos_consensus.go | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index dfa6707c3..9b89cef2a 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -1,11 +1,13 @@ package lib import ( + "fmt" "sync" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" + "github.com/golang/glog" "github.com/pkg/errors" ) @@ -265,6 +267,7 @@ func (cc *FastHotStuffConsensus) handleBlockProposalEvent( // TODO: Broadcast the block proposal to the network + cc.logBlockProposal(unsignedBlock, blockHash) return nil } @@ -789,3 +792,36 @@ func isProperlyFormedBlockProposalEvent(event *consensus.FastHotStuffEvent) bool return false } + +////////////////////////////////////////// Logging Helper Functions /////////////////////////////////////////////// + +func (fc *FastHotStuffConsensus) logBlockProposal(block *MsgDeSoBlock, blockHash *BlockHash) { + aggQCView := uint64(0) + aggQCNumValidators := 0 + aggQCHighQCViews := "[]" + + if !block.Header.ValidatorsTimeoutAggregateQC.isEmpty() { + aggQCView = block.Header.ValidatorsTimeoutAggregateQC.GetView() + aggQCNumValidators = block.Header.ValidatorsTimeoutAggregateQC.GetAggregatedSignature().GetSignersList().Size() + aggQCHighQCViews = fmt.Sprint(block.Header.ValidatorsTimeoutAggregateQC.GetHighQCViews()) + } + + glog.Infof( + "\n==================================== YOU PROPOSED A NEW FAST-HOTSTUFF BLOCK! ===================================="+ + "\n Timestamp: %d, View: %d, Height: %d, BlockHash: %v"+ + "\n Proposer PKey: %s"+ + "\n Proposer Voting PKey: %s"+ + "\n Proposer Signature: %s"+ + "\n High QC View: %d, High QC Num Validators: %d, High QC BlockHash: %s"+ + "\n Timeout Agg QC View: %d, Timeout Agg QC Num Validators: %d, Timeout High QC Views: %s"+ + "\n Num Block Transactions: %d, Num Transactions Remaining In Mempool: %d"+ + "\n=================================================================================================================", + block.Header.GetTstampSecs(), block.Header.GetView(), block.Header.Height, blockHash.String(), + PkToString(block.Header.ProposerPublicKey.ToBytes(), fc.params), + block.Header.ProposerVotingPublicKey.ToString(), + block.Header.ProposerVotePartialSignature.ToString(), + block.Header.GetQC().GetView(), block.Header.GetQC().GetAggregatedSignature().GetSignersList().Size(), block.Header.PrevBlockHash.String(), + aggQCView, aggQCNumValidators, aggQCHighQCViews, + len(block.Txns), len(fc.mempool.GetTransactions()), + ) +} From 82c47893c21c6301814071ebe3e528ddf29fffa2 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 31 Jan 2024 17:40:33 -0500 Subject: [PATCH 428/762] Define FasHotStuffConsensus Timer Values as Configs (#984) --- cmd/config.go | 6 +++++- cmd/node.go | 2 ++ cmd/run.go | 5 +++++ lib/pos_consensus.go | 44 ++++++++++++++++++++++++++++++-------------- lib/server.go | 11 ++++++++++- 5 files changed, 52 insertions(+), 16 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 2839868f8..3e337c24c 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -44,7 +44,9 @@ type Config struct { HypersyncMaxQueueSize uint32 // PoS Validator - PosValidatorSeed string + PosValidatorSeed string + PosBlockProductionIntervalMilliseconds uint64 + PosTimeoutBaseDurationMilliseconds uint64 // Mempool MempoolBackupIntervalMillis uint64 @@ -119,6 +121,8 @@ func LoadConfig() *Config { // PoS Validator config.PosValidatorSeed = viper.GetString("pos-validator-seed") + config.PosBlockProductionIntervalMilliseconds = viper.GetUint64("pos-block-production-interval-milliseconds") + config.PosTimeoutBaseDurationMilliseconds = viper.GetUint64("pos-timeout-base-duration-milliseconds") // Mempool config.MempoolBackupIntervalMillis = viper.GetUint64("mempool-backup-time-millis") diff --git a/cmd/node.go b/cmd/node.go index ca332c581..42d22733b 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -279,6 +279,8 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.MempoolBackupIntervalMillis, node.Config.MempoolFeeEstimatorNumMempoolBlocks, node.Config.MempoolFeeEstimatorNumPastBlocks, + node.Config.PosBlockProductionIntervalMilliseconds, + node.Config.PosTimeoutBaseDurationMilliseconds, ) if err != nil { // shouldRestart can be true if, on the previous run, we did not finish flushing all ancestral diff --git a/cmd/run.go b/cmd/run.go index abf1c5d4d..af35deac1 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -85,6 +85,11 @@ func SetupRunFlags(cmd *cobra.Command) { // PoS Validator cmd.PersistentFlags().String("pos-validator-seed", "", "A BIP39 seed phrase used to generate the private key of the "+ "Proof of Stake validator. Setting this flag automatically makes the node run as a Proof of Stake Validator.") + cmd.PersistentFlags().Uint64("pos-block-production-interval-milliseconds", 1500, "The interval in milliseconds "+ + "between blocks produced by the Proof of Stake block proposer. The default value is 1.5 seconds, or 1500 milliseconds.") + cmd.PersistentFlags().Uint64("pos-timeout-base-duration-milliseconds", 30000, "The base duration in milliseconds "+ + "that the Proof of Stake block proposer waits before timing out and producing a block. The default value is "+ + "30 seconds, or 30000 milliseconds.") // Mempool cmd.PersistentFlags().Uint64("mempool-backup-time-millis", 30000, diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 9b89cef2a..b884e6617 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -3,6 +3,7 @@ package lib import ( "fmt" "sync" + "time" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" @@ -12,21 +13,32 @@ import ( ) type FastHotStuffConsensus struct { - lock sync.RWMutex - blockchain *Blockchain - fastHotStuffEventLoop consensus.FastHotStuffEventLoop - mempool Mempool - params *DeSoParams - signer *BLSSigner + lock sync.RWMutex + blockchain *Blockchain + fastHotStuffEventLoop consensus.FastHotStuffEventLoop + mempool Mempool + params *DeSoParams + signer *BLSSigner + blockProductionIntervalMilliseconds uint64 + timeoutBaseDurationMilliseconds uint64 } -func NewFastHotStuffConsensus(params *DeSoParams, blockchain *Blockchain, mempool Mempool, signer *BLSSigner) *FastHotStuffConsensus { +func NewFastHotStuffConsensus( + params *DeSoParams, + blockchain *Blockchain, + mempool Mempool, + signer *BLSSigner, + blockProductionIntervalMilliseconds uint64, + timeoutBaseDurationMilliseconds uint64, +) *FastHotStuffConsensus { return &FastHotStuffConsensus{ - blockchain: blockchain, - fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), - mempool: mempool, - params: params, - signer: signer, + blockchain: blockchain, + fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), + mempool: mempool, + params: params, + signer: signer, + blockProductionIntervalMilliseconds: blockProductionIntervalMilliseconds, + timeoutBaseDurationMilliseconds: timeoutBaseDurationMilliseconds, } } @@ -83,8 +95,12 @@ func (cc *FastHotStuffConsensus) Start() error { return errors.Errorf("FastHotStuffConsensus.Start: Error fetching validator lists for safe blocks: %v", err) } - // Initialize and start the event loop. TODO: Pass in the crank timer duration and timeout duration - cc.fastHotStuffEventLoop.Init(0, 0, genesisQC, tipBlockWithValidators[0], safeBlocksWithValidators) + // Compute the block production internal and timeout base duration as time.Duration + blockProductionInterval := time.Millisecond * time.Duration(cc.blockProductionIntervalMilliseconds) + timeoutBaseDuration := time.Millisecond * time.Duration(cc.timeoutBaseDurationMilliseconds) + + // Initialize and start the event loop + cc.fastHotStuffEventLoop.Init(blockProductionInterval, timeoutBaseDuration, genesisQC, tipBlockWithValidators[0], safeBlocksWithValidators) cc.fastHotStuffEventLoop.Start() return nil diff --git a/lib/server.go b/lib/server.go index e1448b927..deff36178 100644 --- a/lib/server.go +++ b/lib/server.go @@ -392,6 +392,8 @@ func NewServer( _mempoolBackupIntervalMillis uint64, _mempoolFeeEstimatorNumMempoolBlocks uint64, _mempoolFeeEstimatorNumPastBlocks uint64, + _posBlockProductionIntervalMilliseconds uint64, + _posTimeoutBaseDurationMilliseconds uint64, ) ( _srv *Server, _err error, @@ -582,7 +584,14 @@ func NewServer( // Only initialize the FastHotStuffConsensus if the node is a validator with a BLS keystore if _blsKeystore != nil { - srv.fastHotStuffConsensus = NewFastHotStuffConsensus(_params, _chain, _posMempool, _blsKeystore.GetSigner()) + srv.fastHotStuffConsensus = NewFastHotStuffConsensus( + _params, + _chain, + _posMempool, + _blsKeystore.GetSigner(), + _posBlockProductionIntervalMilliseconds, + _posTimeoutBaseDurationMilliseconds, + ) } // Set all the fields on the Server object. From 7a8ebd9d73526ab47b04fc2dd817baf2fca79ed4 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 31 Jan 2024 17:43:59 -0500 Subject: [PATCH 429/762] augmented utxo view for steady state & syncing stack (#987) --- cmd/config.go | 10 ++-- cmd/node.go | 1 + cmd/run.go | 3 + lib/pos_block_producer_test.go | 12 ++-- lib/pos_blockchain_test.go | 36 +++++++---- lib/pos_fee_estimator_test.go | 7 ++- lib/pos_mempool.go | 106 ++++++++++++++++++++++++++++++++- lib/pos_mempool_test.go | 23 ++++--- lib/server.go | 10 ++-- 9 files changed, 171 insertions(+), 37 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 3e337c24c..3e3a47f97 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -49,10 +49,11 @@ type Config struct { PosTimeoutBaseDurationMilliseconds uint64 // Mempool - MempoolBackupIntervalMillis uint64 - MaxMempoolPosSizeBytes uint64 - MempoolFeeEstimatorNumMempoolBlocks uint64 - MempoolFeeEstimatorNumPastBlocks uint64 + MempoolBackupIntervalMillis uint64 + MaxMempoolPosSizeBytes uint64 + MempoolFeeEstimatorNumMempoolBlocks uint64 + MempoolFeeEstimatorNumPastBlocks uint64 + AugmentedBlockViewRefreshIntervalMillis uint64 // Mining MinerPublicKeys []string @@ -129,6 +130,7 @@ func LoadConfig() *Config { config.MaxMempoolPosSizeBytes = viper.GetUint64("max-mempool-pos-size-bytes") config.MempoolFeeEstimatorNumMempoolBlocks = viper.GetUint64("mempool-fee-estimator-num-mempool-blocks") config.MempoolFeeEstimatorNumPastBlocks = viper.GetUint64("mempool-fee-estimator-num-past-blocks") + config.AugmentedBlockViewRefreshIntervalMillis = viper.GetUint64("augmented-block-view-refresh-interval-millis") // Peers config.ConnectIPs = viper.GetStringSlice("connect-ips") diff --git a/cmd/node.go b/cmd/node.go index 42d22733b..4e64870f4 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -279,6 +279,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.MempoolBackupIntervalMillis, node.Config.MempoolFeeEstimatorNumMempoolBlocks, node.Config.MempoolFeeEstimatorNumPastBlocks, + node.Config.AugmentedBlockViewRefreshIntervalMillis, node.Config.PosBlockProductionIntervalMilliseconds, node.Config.PosTimeoutBaseDurationMilliseconds, ) diff --git a/cmd/run.go b/cmd/run.go index af35deac1..585328abf 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -101,6 +101,9 @@ func SetupRunFlags(cmd *cobra.Command) { "The number of future blocks to break the PoS mempool into when estimating txn fee for the next block.") cmd.PersistentFlags().Uint64("mempool-fee-estimator-num-past-blocks", 50, "The number of past blocks to use when estimating txn fee for the next block from the PoS mempool.") + cmd.PersistentFlags().Uint64("augmented-block-view-refresh-interval-millis", 100, + "The frequency in milliseconds with which the augmented block view will be refreshed. "+ + "The default value is 100 milliseconds.") // Peers cmd.PersistentFlags().StringSlice("connect-ips", []string{}, diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 0c85c02db..fef7549de 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -36,7 +36,8 @@ func TestCreateBlockTemplate(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -102,7 +103,8 @@ func TestCreateBlockWithoutHeader(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -178,7 +180,8 @@ func TestGetBlockTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -262,7 +265,8 @@ func TestGetBlockTransactions(t *testing.T) { // be returned in the same order as the transaction from getBlockTransactions. testMempool := NewPosMempool() testMempool.Init( - params, globalParams, latestBlockView, 2, "", true, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, globalParams, latestBlockView, 2, "", true, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, ) require.NoError(testMempool.Start()) defer testMempool.Stop() diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 468c22d45..51960f461 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1834,7 +1834,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { // Set the prev block hash manually on orphan block orphanBlock.Header.PrevBlockHash = dummyParentBlockHash // Create a QC on the dummy parent block - orphanBlock.Header.ValidatorsVoteQC = _getVoteQC(testMeta, testMeta.posMempool.readOnlyLatestBlockView, orphanBlock.Header.Height, dummyParentBlockHash, 16) + orphanBlock.Header.ValidatorsVoteQC = _getVoteQC(testMeta, orphanBlock.Header.Height, dummyParentBlockHash, 16) updateProposerVotePartialSignatureForBlock(testMeta, orphanBlock) orphanBlockHash, err = orphanBlock.Hash() require.NoError(t, err) @@ -2411,7 +2411,7 @@ func _generateRealBlockWithFailingTxn(testMeta *TestMeta, blockHeight uint64, vi latestBlockHeight := testMeta.chain.blockIndexByHash[*prevBlockHash].Height testMeta.posMempool.UpdateLatestBlock(latestBlockView, uint64(latestBlockHeight)) seedSignature := getRandomSeedSignature(testMeta, blockHeight, view, prevBlock.Header.ProposerRandomSeedSignature) - fullBlockTemplate := _getFullRealBlockTemplate(testMeta, testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature, isTimeout) + fullBlockTemplate := _getFullRealBlockTemplate(testMeta, blockHeight, view, seedSignature, isTimeout) // Remove the transactions from this block from the mempool. // This prevents nonce reuse issues when trying to make reorg blocks. for _, txn := range passingTxns { @@ -2465,6 +2465,11 @@ func _generateDummyBlock(testMeta *TestMeta, blockHeight uint64, view uint64, se require.True(testMeta.t, blockNode.IsStored()) _, exists := testMeta.chain.blockIndexByHash[*newBlockHash] require.True(testMeta.t, exists) + // Remove the transactions from this block from the mempool. + // This prevents nonce reuse issues when trying to make failing blocks. + for _, txn := range passingTxns { + testMeta.posMempool.RemoveTransaction(txn.Hash()) + } return blockTemplate } @@ -2492,7 +2497,10 @@ func _generateBlockAndAddToBestChain(testMeta *TestMeta, blockHeight uint64, vie return blockTemplate } -func getLeaderForBlockHeightAndView(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint64, view uint64) (string, []byte) { +func getLeaderForBlockHeightAndView(testMeta *TestMeta, blockHeight uint64, view uint64) (string, []byte) { + testMeta.posMempool.Lock() + defer testMeta.posMempool.Unlock() + latestBlockView := testMeta.posMempool.readOnlyLatestBlockView currentEpochEntry, err := latestBlockView.GetCurrentEpochEntry() require.NoError(testMeta.t, err) leaders, err := latestBlockView.GetCurrentSnapshotLeaderSchedule() @@ -2510,7 +2518,7 @@ func getLeaderForBlockHeightAndView(testMeta *TestMeta, latestBlockView *UtxoVie } func getRandomSeedSignature(testMeta *TestMeta, height uint64, view uint64, prevRandomSeedSignature *bls.Signature) *bls.Signature { - leaderPublicKey, _ := getLeaderForBlockHeightAndView(testMeta, testMeta.posMempool.readOnlyLatestBlockView, height, view) + leaderPublicKey, _ := getLeaderForBlockHeightAndView(testMeta, height, view) leaderBLSPrivKey := testMeta.pubKeyToBLSKeyMap[leaderPublicKey] prevRandomSeedHashSHA256 := sha256.Sum256(prevRandomSeedSignature.ToBytes()) newRandomSeedSignature, err := leaderBLSPrivKey.Sign(prevRandomSeedHashSHA256[:]) @@ -2525,7 +2533,7 @@ func updateRandomSeedSignature(testMeta *TestMeta, block *MsgDeSoBlock, prevRand func updateProposerVotePartialSignatureForBlock(testMeta *TestMeta, block *MsgDeSoBlock) { blockHash, err := block.Hash() require.NoError(testMeta.t, err) - leaderPublicKey, _ := getLeaderForBlockHeightAndView(testMeta, testMeta.posMempool.readOnlyLatestBlockView, block.Header.Height, block.Header.ProposedInView) + leaderPublicKey, _ := getLeaderForBlockHeightAndView(testMeta, block.Header.Height, block.Header.ProposedInView) leaderBlsPrivKey := testMeta.pubKeyToBLSKeyMap[leaderPublicKey] partialSigPayload := consensus.GetVoteSignaturePayload(block.Header.ProposedInView, blockHash) sig, err := leaderBlsPrivKey.Sign(partialSigPayload[:]) @@ -2533,12 +2541,14 @@ func updateProposerVotePartialSignatureForBlock(testMeta *TestMeta, block *MsgDe block.Header.ProposerVotePartialSignature = sig } -func _getVoteQC(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint64, qcBlockHash *BlockHash, qcView uint64) *QuorumCertificate { +func _getVoteQC(testMeta *TestMeta, blockHeight uint64, qcBlockHash *BlockHash, qcView uint64) *QuorumCertificate { var validators []consensus.Validator var signersList *bitset.Bitset var aggregatedSignature *bls.Signature - votePayload := consensus.GetVoteSignaturePayload(qcView, qcBlockHash) + testMeta.posMempool.Lock() + defer testMeta.posMempool.Unlock() + latestBlockView := testMeta.posMempool.readOnlyLatestBlockView allSnapshotValidators, err := latestBlockView.GetAllSnapshotValidatorSetEntriesByStake() require.NoError(testMeta.t, err) validators = toConsensusValidators(allSnapshotValidators) @@ -2579,14 +2589,15 @@ func _getVoteQC(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint6 // _getFullRealBlockTemplate is a helper function that generates a block template with a valid TxnConnectStatusByIndexHash // and a valid TxnConnectStatusByIndex, a valid vote or timeout QC, does all the required signing by validators, // and generates the proper ProposerVotePartialSignature. -func _getFullRealBlockTemplate(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint64, view uint64, seedSignature *bls.Signature, isTimeout bool) BlockTemplate { - blockTemplate, err := testMeta.posBlockProducer.createBlockTemplate(latestBlockView, blockHeight, view, seedSignature) +func _getFullRealBlockTemplate(testMeta *TestMeta, blockHeight uint64, view uint64, seedSignature *bls.Signature, isTimeout bool) BlockTemplate { + blockTemplate, err := testMeta.posBlockProducer.createBlockTemplate( + testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature) require.NoError(testMeta.t, err) require.NotNil(testMeta.t, blockTemplate) blockTemplate.Header.TxnConnectStatusByIndexHash = HashBitset(blockTemplate.TxnConnectStatusByIndex) // Figure out who the leader is supposed to be. - leaderPublicKey, leaderPublicKeyBytes := getLeaderForBlockHeightAndView(testMeta, latestBlockView, blockHeight, view) + leaderPublicKey, leaderPublicKeyBytes := getLeaderForBlockHeightAndView(testMeta, blockHeight, view) // Get leader voting private key. leaderVotingPrivateKey := testMeta.pubKeyToBLSKeyMap[leaderPublicKey] // Get hash of last block @@ -2600,7 +2611,7 @@ func _getFullRealBlockTemplate(testMeta *TestMeta, latestBlockView *UtxoView, bl } // Create the vote QC. - voteQC := _getVoteQC(testMeta, latestBlockView, blockHeight, chainTipHash, qcView) + voteQC := _getVoteQC(testMeta, blockHeight, chainTipHash, qcView) if !isTimeout { blockTemplate.Header.ValidatorsVoteQC = voteQC } else { @@ -2764,7 +2775,8 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { mempoolBackupIntervalMillis := uint64(30000) mempool := NewPosMempool() require.NoError(t, mempool.Init( - params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, + mempoolBackupIntervalMillis, 1, nil, 1, 100, )) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index bfd12054a..08e1a18ee 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -24,11 +24,14 @@ func TestFeeEstimator(t *testing.T) { dir := _dbDirSetup(t) mempool := NewPosMempool() - mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + err = mempool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, ) + require.NoError(t, err) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) + defer mempool.Stop() minFeeBucketMin, minFeeBucketMax := computeFeeTimeBucketRangeFromFeeNanosPerKB( globalParams.MinimumNetworkFeeNanosPerKB, big.NewFloat(float64(globalParams.MinimumNetworkFeeNanosPerKB)), diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index d3de0c6c8..997801567 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -5,6 +5,8 @@ import ( "path/filepath" "strings" "sync" + "sync/atomic" + "time" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" @@ -34,6 +36,7 @@ type Mempool interface { GetAugmentedUniversalView() (*UtxoView, error) GetAugmentedUtxoViewForPublicKey(pk []byte, optionalTx *MsgDeSoTxn) (*UtxoView, error) + BlockUntilReadOnlyViewRegenerated() CheckSpend(op UtxoKey) *MsgDeSoTxn GetOrderedTransactions() []*MempoolTx IsTransactionInPool(txHash *BlockHash) bool @@ -118,6 +121,14 @@ type PosMempool struct { // PosMempool only needs read-access to the block view. It isn't necessary to copy the block view before passing it // to the mempool. readOnlyLatestBlockView *UtxoView + // augmentedLatestBlockView is a copy of the latest block view with all the transactions in the mempool applied to + // it. This allows the backend to display the current state of the blockchain including the mempool. + // The augmentedLatestBlockView is updated every 100 milliseconds to reflect the latest state of the mempool. + augmentedLatestBlockView *UtxoView + // augmentedLatestBlockViewMutex is used to protect the augmentedLatestBlockView from concurrent access. + augmentedLatestBlockViewMutex sync.RWMutex + // Signals that the mempool is now in the stopped state. + quit chan interface{} // latestBlockNode is used to infer the latest block height. The latestBlockNode should be updated whenever a new // block is added to the blockchain via UpdateLatestBlock. latestBlockHeight uint64 @@ -129,6 +140,14 @@ type PosMempool struct { // feeEstimator is used to estimate the fee required for a transaction to be included in the next block // based off the current state of the mempool and the most n recent blocks. feeEstimator *PoSFeeEstimator + + // augmentedBlockViewRefreshIntervalMillis is the frequency with which the augmentedLatestBlockView is updated. + augmentedBlockViewRefreshIntervalMillis uint64 + + // augmentedLatestBlockViewSequenceNumber is the sequence number of the readOnlyLatestBlockView. It is incremented + // every time augmentedLatestBlockView is updated. It can be used by obtainers of the augmentedLatestBlockView to + // wait until a particular transaction has been connected. + augmentedLatestBlockViewSequenceNumber int64 } // PosMempoolIterator is a wrapper around FeeTimeIterator, modified to return MsgDeSoTxn instead of MempoolTx. @@ -164,6 +183,7 @@ func NewPosMempool() *PosMempool { feeEstimator: NewPoSFeeEstimator(), ledger: NewBalanceLedger(), nonceTracker: NewNonceTracker(), + quit: make(chan interface{}), } } @@ -179,6 +199,7 @@ func (mp *PosMempool) Init( feeEstimatorNumMempoolBlocks uint64, feeEstimatorPastBlocks []*MsgDeSoBlock, feeEstimatorNumPastBlocks uint64, + augmentedBlockViewRefreshIntervalMillis uint64, ) error { if mp.status != PosMempoolStatusNotInitialized { return errors.New("PosMempool.Init: PosMempool already initialized") @@ -188,14 +209,22 @@ func (mp *PosMempool) Init( mp.params = params mp.globalParams = globalParams mp.readOnlyLatestBlockView = readOnlyLatestBlockView + var err error + if readOnlyLatestBlockView != nil { + mp.augmentedLatestBlockView, err = readOnlyLatestBlockView.CopyUtxoView() + if err != nil { + return errors.Wrapf(err, "PosMempool.Init: Problem copying utxo view") + } + } mp.latestBlockHeight = latestBlockHeight mp.dir = dir mp.inMemoryOnly = inMemoryOnly mp.maxMempoolPosSizeBytes = maxMempoolPosSizeBytes mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis + mp.augmentedBlockViewRefreshIntervalMillis = augmentedBlockViewRefreshIntervalMillis // TODO: parameterize num blocks. Also, how to pass in blocks. - err := mp.feeEstimator.Init( + err = mp.feeEstimator.Init( mp.txnRegister, feeEstimatorNumMempoolBlocks, feeEstimatorPastBlocks, @@ -241,11 +270,70 @@ func (mp *PosMempool) Start() error { return errors.Wrapf(err, "PosMempool.Start: Problem loading persisted transactions") } } + mp.startAugmentedViewRefreshRoutine() mp.status = PosMempoolStatusRunning return nil } +func (mp *PosMempool) startAugmentedViewRefreshRoutine() { + go func() { + for { + select { + case <-time.After(time.Duration(mp.augmentedBlockViewRefreshIntervalMillis) * time.Millisecond): + // If we're not within 10 blocks of the PoS cutover, we don't need to update the + // augmentedLatestBlockView. + if mp.latestBlockHeight+10 < uint64(mp.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + continue + } + // Update the augmentedLatestBlockView with the latest block view. + mp.RLock() + newView, err := mp.readOnlyLatestBlockView.CopyUtxoView() + if err != nil { + glog.Errorf("PosMempool.startAugmentedViewRefreshRoutine: Problem copying utxo view: %v", err) + continue + } + mp.RUnlock() + for _, txn := range mp.GetTransactions() { + copiedView, err := newView.CopyUtxoView() + if err != nil { + glog.Errorf("PosMempool.startAugmentedViewRefreshRoutine: Problem copying utxo view: %v", err) + continue + } + _, _, _, _, err = copiedView.ConnectTransaction( + txn.GetTxn(), txn.Hash(), 0, uint32(mp.latestBlockHeight)+1, time.Now().UnixNano(), false, + false) + // If the transaction successfully connects, we set the newView to the copiedView + // and proceed to the next transaction. + if err == nil { + newView = copiedView + continue + } + // If the transaction failed to connect, we connect the transaction as a failed txn + // directly on newView. + if mp.latestBlockHeight+1 >= uint64(mp.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + // Try to connect as failing txn directly to newView + _, _, _, err = newView._connectFailingTransaction( + txn.GetTxn(), uint32(mp.latestBlockHeight), false) + if err != nil { + glog.Errorf( + "PosMempool.startAugmentedViewRefreshRoutine: Problem connecting transaction: %v", err) + } + } + } + // Grab the augmentedLatestBlockViewMutex write lock and update the augmentedLatestBlockView. + mp.augmentedLatestBlockViewMutex.Lock() + mp.augmentedLatestBlockView = newView + mp.augmentedLatestBlockViewMutex.Unlock() + // Increment the augmentedLatestBlockViewSequenceNumber. + atomic.AddInt64(&mp.augmentedLatestBlockViewSequenceNumber, 1) + case <-mp.quit: + return + } + } + }() +} + func (mp *PosMempool) Stop() { mp.Lock() defer mp.Unlock() @@ -269,7 +357,7 @@ func (mp *PosMempool) Stop() { mp.ledger.Reset() mp.nonceTracker.Reset() mp.feeEstimator = NewPoSFeeEstimator() - + close(mp.quit) mp.status = PosMempoolStatusNotInitialized } @@ -650,6 +738,7 @@ func (mp *PosMempool) refreshNoLock() error { mp.feeEstimator.numMempoolBlocks, mp.feeEstimator.cachedBlocks, mp.feeEstimator.numPastBlocks, + mp.augmentedBlockViewRefreshIntervalMillis, ) if err != nil { return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem initializing temp pool") @@ -751,7 +840,9 @@ func (mp *PosMempool) GetAugmentedUniversalView() (*UtxoView, error) { if !mp.IsRunning() { return nil, errors.Wrapf(MempoolErrorNotRunning, "PosMempool.GetAugmentedUniversalView: ") } - newView, err := mp.readOnlyLatestBlockView.CopyUtxoView() + mp.augmentedLatestBlockViewMutex.RLock() + defer mp.augmentedLatestBlockViewMutex.RUnlock() + newView, err := mp.augmentedLatestBlockView.CopyUtxoView() if err != nil { return nil, errors.Wrapf(err, "PosMempool.GetAugmentedUniversalView: Problem copying utxo view") } @@ -760,6 +851,15 @@ func (mp *PosMempool) GetAugmentedUniversalView() (*UtxoView, error) { func (mp *PosMempool) GetAugmentedUtxoViewForPublicKey(pk []byte, optionalTx *MsgDeSoTxn) (*UtxoView, error) { return mp.GetAugmentedUniversalView() } +func (mp *PosMempool) BlockUntilReadOnlyViewRegenerated() { + oldSeqNum := atomic.LoadInt64(&mp.augmentedLatestBlockViewSequenceNumber) + newSeqNum := oldSeqNum + for newSeqNum == oldSeqNum { + // Check fairly often. Not too often. + time.Sleep(25 * time.Millisecond) + newSeqNum = atomic.LoadInt64(&mp.augmentedLatestBlockViewSequenceNumber) + } +} func (mp *PosMempool) CheckSpend(op UtxoKey) *MsgDeSoTxn { panic("implement me") } diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index b1d02d841..b95478c92 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -23,7 +23,7 @@ func TestPosMempoolStart(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - ¶ms, globalParams, nil, 0, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + ¶ms, globalParams, nil, 0, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -52,7 +52,8 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -70,7 +71,8 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { require.False(mempool.IsRunning()) newPool := NewPosMempool() - require.NoError(newPool.Init(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1)) + require.NoError(newPool.Init(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, + mempoolBackupIntervalMillis, 1, nil, 1, 100)) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() @@ -105,7 +107,8 @@ func TestPosMempoolPrune(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -141,7 +144,8 @@ func TestPosMempoolPrune(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) @@ -201,7 +205,8 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -229,7 +234,8 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init( - params, newGlobalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, newGlobalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) @@ -261,7 +267,8 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) diff --git a/lib/server.go b/lib/server.go index deff36178..e66bc2a37 100644 --- a/lib/server.go +++ b/lib/server.go @@ -392,6 +392,7 @@ func NewServer( _mempoolBackupIntervalMillis uint64, _mempoolFeeEstimatorNumMempoolBlocks uint64, _mempoolFeeEstimatorNumPastBlocks uint64, + _augmentedBlockViewRefreshIntervalMillis uint64, _posBlockProductionIntervalMilliseconds uint64, _posTimeoutBaseDurationMilliseconds uint64, ) ( @@ -523,6 +524,7 @@ func NewServer( _mempoolFeeEstimatorNumMempoolBlocks, []*MsgDeSoBlock{latestBlock}, _mempoolFeeEstimatorNumPastBlocks, + _augmentedBlockViewRefreshIntervalMillis, ) if err != nil { return nil, errors.Wrapf(err, "NewServer: Problem initializing PoS mempool"), true @@ -1834,10 +1836,6 @@ func (srv *Server) _addNewTxn( return nil, errors.Wrapf(err, "Server._addNewTxn: Problem adding transaction to mempool: ") } - // At this point, we know the transaction has been run through the mempool. - // Now wait for an update of the ReadOnlyUtxoView so we don't break anything. - srv.mempool.BlockUntilReadOnlyViewRegenerated() - glog.V(1).Infof("Server._addNewTxn: newly accepted txn: %v, Peer: %v", txn, pp) } @@ -1848,6 +1846,10 @@ func (srv *Server) _addNewTxn( return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") } + // At this point, we know the transaction has been run through the mempool. + // Now wait for an update of the ReadOnlyUtxoView so we don't break anything. + srv.GetMempool().BlockUntilReadOnlyViewRegenerated() + return []*MsgDeSoTxn{txn}, nil } From f9117f2d171caac7b53ddfe2fa9f8605becd2691 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 31 Jan 2024 17:46:48 -0500 Subject: [PATCH 430/762] Update db handle for pos mempool's db and utxo views in dirtyHackUpdateDbOpts (#989) --- lib/server.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/server.go b/lib/server.go index e66bc2a37..a610f3011 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1551,6 +1551,11 @@ func (srv *Server) dirtyHackUpdateDbOpts(opts badger.Options) { // Make sure that a mempool process doesn't try to access the DB while we're closing and re-opening it. srv.mempool.mtx.Lock() defer srv.mempool.mtx.Unlock() + // Make sure that the pos mempool process doesn't try to access the DB while we're closing and re-opening it. + srv.posMempool.Lock() + defer srv.posMempool.Unlock() + srv.posMempool.augmentedLatestBlockViewMutex.Lock() + defer srv.posMempool.augmentedLatestBlockViewMutex.Unlock() // Make sure that a server process doesn't try to access the DB while we're closing and re-opening it. srv.DbMutex.Lock() defer srv.DbMutex.Unlock() @@ -1565,6 +1570,13 @@ func (srv *Server) dirtyHackUpdateDbOpts(opts badger.Options) { srv.mempool.bc.db = srv.blockchain.db srv.mempool.backupUniversalUtxoView.Handle = srv.blockchain.db srv.mempool.universalUtxoView.Handle = srv.blockchain.db + srv.posMempool.db = srv.blockchain.db + if srv.posMempool.readOnlyLatestBlockView != nil { + srv.posMempool.readOnlyLatestBlockView.Handle = srv.blockchain.db + } + if srv.posMempool.augmentedLatestBlockView != nil { + srv.posMempool.augmentedLatestBlockView.Handle = srv.blockchain.db + } // Save the new options to the DB so that we know what to use if the node restarts. isPerformanceOptions := DbOptsArePerformance(&opts) From 25e01e3a59526737a551989138c28a7648e63106 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 31 Jan 2024 17:48:33 -0500 Subject: [PATCH 431/762] Add script to generate bls voting auth for validator registration (#990) --- scripts/pos/validator_registration_utils.go | 39 +++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 scripts/pos/validator_registration_utils.go diff --git a/scripts/pos/validator_registration_utils.go b/scripts/pos/validator_registration_utils.go new file mode 100644 index 000000000..c5a811970 --- /dev/null +++ b/scripts/pos/validator_registration_utils.go @@ -0,0 +1,39 @@ +//go:build relic + +package main + +import ( + "fmt" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/lib" +) + +func getBLSVotingAuthorizationAndPublicKey(blsKeyStore *lib.BLSKeystore, transactorPublicKey *lib.PublicKey) ( + *bls.PublicKey, *bls.Signature, +) { + votingAuthPayload := lib.CreateValidatorVotingAuthorizationPayload(transactorPublicKey.ToBytes()) + votingAuthorization, err := blsKeyStore.GetSigner().Sign(votingAuthPayload) + if err != nil { + panic(err) + } + return blsKeyStore.GetSigner().GetPublicKey(), votingAuthorization +} + +// You must have relic installed to run this code. +// To install relic, use the install-relic.sh script in the scripts directory. +// go run -tags relic validator_registration_utils.go +func main() { + // Replace with your own BIP39 Validator seed phrase + keystore, err := lib.NewBLSKeystore("...") + if err != nil { + panic(err) + } + // Replace with your DeSo Public Key + pubKeyBytes, _, err := lib.Base58CheckDecode("...") + if err != nil { + panic(err) + } + publicKey, votingAuthorization := getBLSVotingAuthorizationAndPublicKey(keystore, lib.NewPublicKey(pubKeyBytes)) + fmt.Println("Validator BLS PublicKey: ", publicKey.ToString()) + fmt.Println("Validator Voting Authorization: ", votingAuthorization.ToString()) +} From fe63c525b7ae0cbe4ba33a41196b3701f8793e62 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 31 Jan 2024 15:08:12 -0800 Subject: [PATCH 432/762] Remove locked DESO from lockups spec. --- lib/block_view_lockups.go | 387 ++++++++-------------- lib/block_view_lockups_test.go | 588 ++------------------------------- lib/block_view_types.go | 11 - lib/errors.go | 6 +- 4 files changed, 161 insertions(+), 831 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 3e5e7c08b..98c220746 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -8,7 +8,6 @@ import ( "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "math/big" "reflect" "sort" ) @@ -1039,38 +1038,37 @@ func (bav *UtxoView) _connectCoinLockup( // Check that the target profile public key is valid and that a profile corresponding to that public key exists. // We also go ahead and fetch the profile PKID as we will use it later. - var profileEntry *ProfileEntry - var profilePKID *PKID if len(txMeta.ProfilePublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupInvalidProfilePubKey, "_connectCoinLockup") } - if !txMeta.ProfilePublicKey.IsZeroPublicKey() { - profileEntry = bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) - if profileEntry == nil || profileEntry.isDeleted { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupOnNonExistentProfile, "_connectCoinLockup") - } - profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) - if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupNonExistentProfile, "_connectCoinLockup") - } - profilePKID = profilePKIDEntry.PKID.NewPKID() - } else { - profilePKID = ZeroPKID.NewPKID() + + // NOTE: The zero key could be used to enable locking up DESO. + // If this feature is desired, it can be restored with the following PR: + // TODO: Add relevant PR here. + if txMeta.ProfilePublicKey.IsZeroPublicKey() { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupCannotLockupZeroKey, "_connectCoinLockup") + } - // Validate the lockup amount as non-zero. This is meant to prevent wasteful "no-op" transactions. - if txMeta.LockupAmountBaseUnits.IsZero() { + // Check that the profile specified exists. + profileEntry := bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profileEntry == nil || profileEntry.isDeleted { return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupOfAmountZero, "_connectCoinLockup") + errors.Wrap(RuleErrorCoinLockupOnNonExistentProfile, "_connectCoinLockup") } + profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupNonExistentProfile, "_connectCoinLockup") + } + profilePKID := profilePKIDEntry.PKID.NewPKID() - // If this is a DeSo lockup, ensure the amount is less than 2**64 (maximum DeSo balance). - if txMeta.ProfilePublicKey.IsZeroPublicKey() && !txMeta.LockupAmountBaseUnits.IsUint64() { + // Validate the lockup amount as non-zero. This is meant to prevent wasteful "no-op" transactions. + if txMeta.LockupAmountBaseUnits.IsZero() { return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupExcessiveDeSoLockup, "_connectCoinLockup") + errors.Wrap(RuleErrorCoinLockupOfAmountZero, "_connectCoinLockup") } // Validate the lockup expires in the future. @@ -1110,73 +1108,42 @@ func (bav *UtxoView) _connectCoinLockup( } hodlerPKID := recipientPKIDEntry.PKID - // Validate the transactor as having sufficient DAO Coin or DESO balance for the transaction. - var transactorBalanceNanos256 *uint256.Int - var prevTransactorBalanceEntry *BalanceEntry - var prevCoinEntry *CoinEntry - if profilePKID.IsZeroPKID() { - // NOTE: When spending balances, we need to check for immature block rewards. Since we don't have - // the block rewards yet for the current block, we subtract one from the current block height - // when spending balances. - - // Check the DeSo balance of the user. - transactorBalanceNanos, err := bav.GetSpendableDeSoBalanceNanosForPublicKey(txn.PublicKey, blockHeight-1) - if err != nil { - return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") - } - - // Construct a uint256 balance and validate the transactor as having sufficient DeSo. - transactorBalanceNanos256, _ = uint256.FromBig(big.NewInt(0).SetUint64(transactorBalanceNanos)) - if txMeta.LockupAmountBaseUnits.Gt(transactorBalanceNanos256) { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupInsufficientDeSo, "_connectCoinLockup") - } - - // Spend the transactor's DeSo balance. - lockupAmount64 := txMeta.LockupAmountBaseUnits.Uint64() - newUtxoOp, err := bav._spendBalance(lockupAmount64, txn.PublicKey, blockHeight-1) - if err != nil { - return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") - } - utxoOpsForTxn = append(utxoOpsForTxn, newUtxoOp) - } else { - // Check the BalanceEntry of the user. - transactorBalanceEntry, _, _ := bav.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( - txn.PublicKey, - txMeta.ProfilePublicKey.ToBytes(), - true) - if transactorBalanceEntry == nil || transactorBalanceEntry.isDeleted { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupBalanceEntryDoesNotExist, "_connectCoinLockup") - } + // Check the BalanceEntry of the user. + transactorBalanceEntry, _, _ := bav.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( + txn.PublicKey, + txMeta.ProfilePublicKey.ToBytes(), + true) + if transactorBalanceEntry == nil || transactorBalanceEntry.isDeleted { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupBalanceEntryDoesNotExist, "_connectCoinLockup") + } - // Validate the balance entry as having sufficient funds. - transactorBalanceNanos256 = transactorBalanceEntry.BalanceNanos.Clone() - if txMeta.LockupAmountBaseUnits.Gt(transactorBalanceNanos256) { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupInsufficientCoins, "_connectCoinLockup") - } + // Validate the balance entry as having sufficient funds. + transactorBalanceNanos256 := transactorBalanceEntry.BalanceNanos.Clone() + if txMeta.LockupAmountBaseUnits.Gt(transactorBalanceNanos256) { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupInsufficientCoins, "_connectCoinLockup") + } - // We store the previous transactor balance entry in the event we need to revert the transaction. - prevTransactorBalanceEntry = transactorBalanceEntry.Copy() + // We store the previous transactor balance entry in the event we need to revert the transaction. + prevTransactorBalanceEntry := transactorBalanceEntry.Copy() - // Spend the transactor's DAO coin balance. - transactorBalanceEntry.BalanceNanos = - *uint256.NewInt().Sub(&transactorBalanceEntry.BalanceNanos, txMeta.LockupAmountBaseUnits) - bav._setDAOCoinBalanceEntryMappings(transactorBalanceEntry) + // Spend the transactor's DAO coin balance. + transactorBalanceEntry.BalanceNanos = + *uint256.NewInt().Sub(&transactorBalanceEntry.BalanceNanos, txMeta.LockupAmountBaseUnits) + bav._setDAOCoinBalanceEntryMappings(transactorBalanceEntry) - // Create a copy of the associated CoinEntry in the event we must roll back the transaction. - prevCoinEntry = profileEntry.DAOCoinEntry.Copy() + // Create a copy of the associated CoinEntry in the event we must roll back the transaction. + prevCoinEntry := profileEntry.DAOCoinEntry.Copy() - // Update CoinsInCirculation and NumberOfHolders associated with the DAO coin balance. - profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().Sub( - &profileEntry.DAOCoinEntry.CoinsInCirculationNanos, - txMeta.LockupAmountBaseUnits) - if transactorBalanceEntry.BalanceNanos.IsZero() && !prevTransactorBalanceEntry.BalanceNanos.IsZero() { - profileEntry.DAOCoinEntry.NumberOfHolders-- - } - bav._setProfileEntryMappings(profileEntry) + // Update CoinsInCirculation and NumberOfHolders associated with the DAO coin balance. + profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().Sub( + &profileEntry.DAOCoinEntry.CoinsInCirculationNanos, + txMeta.LockupAmountBaseUnits) + if transactorBalanceEntry.BalanceNanos.IsZero() && !prevTransactorBalanceEntry.BalanceNanos.IsZero() { + profileEntry.DAOCoinEntry.NumberOfHolders-- } + bav._setProfileEntryMappings(profileEntry) // If this is an unvested lockup, compute any accrued yield. // In the vested lockup case, the yield earned is always zero. @@ -1277,12 +1244,6 @@ func (bav *UtxoView) _connectCoinLockup( errors.Wrap(RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry, "_connectCoinLockup: New Locked Balance Entry Balance") } - // Ensure in the case of DESO the resulting locked amount is less than 2**64. - if profilePKID.IsZeroPKID() && !newLockedBalanceEntryBalance.IsUint64() { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupYieldCausesOverflow, "_connectCoinLockup: "+ - "New DESO Locked Balance Entry Balance") - } // (4) Set the new locked balance entry in the view lockedBalanceEntry.BalanceBaseUnits = *newLockedBalanceEntryBalance @@ -1912,20 +1873,12 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( txMeta := txn.TxnMeta.(*UpdateCoinLockupParamsMetadata) // Get the profilePKID from the transactor public key. - var profilePKID *PKID - _, updaterIsParamUpdater := GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(txn.PublicKey)] - if updaterIsParamUpdater { - // NOTE: The implication here is ParamUpdaters share write access to the DeSo lockup parameters. - // As further implication, this means ParamUpdaters cannot specify their own coin's lockup parameters. - profilePKID = ZeroPKID.NewPKID() - } else { - profilePKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) - if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { - return 0, 0, nil, errors.Wrap(RuleErrorUpdateCoinLockupParamsOnInvalidPKID, - "_connectUpdateCoinLockupParams") - } - profilePKID = profilePKIDEntry.PKID + profilePKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { + return 0, 0, nil, errors.Wrap(RuleErrorUpdateCoinLockupParamsOnInvalidPKID, + "_connectUpdateCoinLockupParams") } + profilePKID := profilePKIDEntry.PKID // Sanity check the lockup duration as valid. if txMeta.LockupYieldDurationNanoSecs < 0 { @@ -1975,7 +1928,7 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( // Check if we're updating transfer restriction. var prevLockupTransferRestriction TransferRestrictionStatus - if txMeta.NewLockupTransferRestrictions && !profilePKID.IsZeroPKID() { + if txMeta.NewLockupTransferRestrictions { // Fetch the profile entry and LockupTransferRestriction status. profileEntry := bav.GetProfileEntryForPKID(profilePKID) if profileEntry == nil || profileEntry.isDeleted { @@ -1990,13 +1943,6 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus = txMeta.LockupTransferRestrictionStatus bav._setProfileEntryMappings(profileEntry) } - if txMeta.NewLockupTransferRestrictions && profilePKID.IsZeroPKID() { - // Store a copy of the previous TransferRestrictionStatus. - prevLockupTransferRestriction = bav.GlobalParamsEntry.LockedDESOTransferRestrictions - - // Update the transfer restrictions in global params. - bav.GlobalParamsEntry.LockedDESOTransferRestrictions = txMeta.LockupTransferRestrictionStatus - } // Check that the new transfer restriction is valid. if txMeta.NewLockupTransferRestrictions { @@ -2051,18 +1997,12 @@ func (bav *UtxoView) _disconnectUpdateCoinLockupParams( txMeta := currentTxn.TxnMeta.(*UpdateCoinLockupParamsMetadata) // Fetch the profilePKID for the transactor. - var profilePKID *PKID - _, updaterIsParamUpdater := GetParamUpdaterPublicKeys(blockHeight, bav.Params)[MakePkMapKey(currentTxn.PublicKey)] - if updaterIsParamUpdater { - profilePKID = ZeroPKID.NewPKID() - } else { - profilePKIDEntry := bav.GetPKIDForPublicKey(currentTxn.PublicKey) - if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { - return errors.Wrap(RuleErrorUpdateCoinLockupParamsOnInvalidPKID, - "_connectUpdateCoinLockupParams") - } - profilePKID = profilePKIDEntry.PKID + profilePKIDEntry := bav.GetPKIDForPublicKey(currentTxn.PublicKey) + if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { + return errors.Wrap(RuleErrorUpdateCoinLockupParamsOnInvalidPKID, + "_connectUpdateCoinLockupParams") } + profilePKID := profilePKIDEntry.PKID // Check if the transaction added a yield curve point. If it did, we restore the previous point. // If the previous point is nil meaning this point didn't have a previous, then we simply delete the current point. @@ -2096,7 +2036,7 @@ func (bav *UtxoView) _disconnectUpdateCoinLockupParams( } // Check if the transaction updated transfer restrictions. If it did, we reset the previous transfer restrictions. - if txMeta.NewLockupTransferRestrictions && !profilePKID.IsZeroPKID() { + if txMeta.NewLockupTransferRestrictions { // Fetch the profile entry and LockupTransferRestriction status. profileEntry := bav.GetProfileEntryForPKID(profilePKID) if profileEntry == nil || profileEntry.isDeleted { @@ -2108,9 +2048,6 @@ func (bav *UtxoView) _disconnectUpdateCoinLockupParams( profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus = operationData.PrevLockupTransferRestriction bav._setProfileEntryMappings(profileEntry) } - if txMeta.NewLockupTransferRestrictions && profilePKID.IsZeroPKID() { - bav.GlobalParamsEntry.LockedDESOTransferRestrictions = operationData.PrevLockupTransferRestriction - } // Decrement the operationIndex. We expect to find the basic transfer UtxoOps next. operationIndex-- @@ -2173,12 +2110,6 @@ func (bav *UtxoView) _connectCoinLockupTransfer( "_connectCoinLockupTransfer") } - // If this is a DeSo lockup, ensure the amount is less than 2**64. - if txMeta.ProfilePublicKey.IsZeroPublicKey() && !txMeta.LockedCoinsToTransferBaseUnits.IsUint64() { - return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupTransferOfDeSoCausesOverflow, - "_connectCoinLockupTransfer") - } - // Validate recipient and profile public keys as valid. var profileEntry *ProfileEntry if len(txMeta.RecipientPublicKey) != btcec.PubKeyBytesLenCompressed { @@ -2189,12 +2120,12 @@ func (bav *UtxoView) _connectCoinLockupTransfer( return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupTransferInvalidProfilePubKey, "_connectCoinLockupTransfer") } - if !txMeta.ProfilePublicKey.IsZeroPublicKey() { - profileEntry = bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) - if profileEntry == nil || profileEntry.isDeleted { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupTransferOnNonExistentProfile, "_connectCoinLockupTransfer") - } + + // Ensure the locked profile exists. + profileEntry = bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profileEntry == nil || profileEntry.isDeleted { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupTransferOnNonExistentProfile, "_connectCoinLockupTransfer") } // Fetch PKIDs for the recipient, sender, and profile. @@ -2267,12 +2198,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( prevReceiverLockedBalanceEntry := receiverLockedBalanceEntry.Copy() // Fetch the transfer restrictions attached to the transfer. - var transferRestrictionStatus TransferRestrictionStatus - if profilePKID.IsZeroPKID() { - transferRestrictionStatus = bav.GlobalParamsEntry.LockedDESOTransferRestrictions - } else { - transferRestrictionStatus = profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus - } + transferRestrictionStatus := profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus // Check if transfers are limited to profile owner only. if transferRestrictionStatus == TransferRestrictionStatusProfileOwnerOnly && !profilePKID.Eq(senderPKID) { @@ -2449,12 +2375,18 @@ func (bav *UtxoView) _connectCoinUnlock( return 0, 0, nil, errors.Wrap(RuleErrorDAOCoinInvalidPubKey, "_connectCoinUnlock") } - if !txMeta.ProfilePublicKey.IsZeroPublicKey() { - profileEntry = bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) - if profileEntry == nil || profileEntry.isDeleted { - return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockOnNonExistentProfile, - "_connectCoinUnlock") - } + + // Check that we're not unlocking the zero public key. + if txMeta.ProfilePublicKey.IsZeroPublicKey() { + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockCannotUnlockZeroPublicKey, + "_connectCoinUnlock") + } + + // Check that the associated public key exists. + profileEntry = bav.GetProfileEntryForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profileEntry == nil || profileEntry.isDeleted { + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockOnNonExistentProfile, + "_connectCoinUnlock") } // Convert the TransactorPublicKey to HODLerPKID @@ -2466,17 +2398,12 @@ func (bav *UtxoView) _connectCoinUnlock( hodlerPKID := transactorPKIDEntry.PKID // Convert the ProfilePublicKey to ProfilePKID. - var profilePKID *PKID - if txMeta.ProfilePublicKey.IsZeroPublicKey() { - profilePKID = ZeroPKID.NewPKID() - } else { - profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) - if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { - return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockInvalidProfilePKID, - "_connectCoinUnlock") - } - profilePKID = profilePKIDEntry.PKID + profilePKIDEntry := bav.GetPKIDForPublicKey(txMeta.ProfilePublicKey.ToBytes()) + if profilePKIDEntry == nil || profilePKIDEntry.isDeleted { + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockInvalidProfilePKID, + "_connectCoinUnlock") } + profilePKID := profilePKIDEntry.PKID // Retrieve unlockable locked balance entries. unvestedUnlockableLockedBalanceEntries, vestedUnlockableLockedBalanceEntries, err := @@ -2558,52 +2485,33 @@ func (bav *UtxoView) _connectCoinUnlock( } // Credit the transactor with either DAO coins or DeSo for this unlock. - var prevTransactorBalanceEntry *BalanceEntry - var prevCoinEntry *CoinEntry - if profilePKID.IsZeroPKID() { - // Ensure the uint256 can be properly represented as a uint64. - if !unlockedBalance.IsUint64() { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinUnlockUnlockableDeSoOverflow, "_connectCoinUnlock") - } + prevTransactorBalanceEntry := + bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(hodlerPKID, profilePKID, true) - // Add the unlockedBalance to the transactors DeSo balance. - // NOTE: _addBalance checks for balance overflow. - utxoOp, err := bav._addBalance(unlockedBalance.Uint64(), txn.PublicKey) - if err != nil { - return 0, 0, nil, errors.Wrap(err, "_connectCoinUnlock: error"+ - "adding CoinToUnlockBaseUnits to the transactor balance: ") - } - utxoOpsForTxn = append(utxoOpsForTxn, utxoOp) - } else { - prevTransactorBalanceEntry = - bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(hodlerPKID, profilePKID, true) - - // Credit the transactor with the unlock amount. - newTransactorBalanceEntry := prevTransactorBalanceEntry.Copy() - newTransactorBalanceNanos, err := SafeUint256().Add(&newTransactorBalanceEntry.BalanceNanos, unlockedBalance) - if err != nil { - return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockCausesBalanceOverflow, - "_connectCoinUnlock") - } - newTransactorBalanceEntry.BalanceNanos = *newTransactorBalanceNanos - bav._setBalanceEntryMappings(newTransactorBalanceEntry, true) + // Credit the transactor with the unlock amount. + newTransactorBalanceEntry := prevTransactorBalanceEntry.Copy() + newTransactorBalanceNanos, err := SafeUint256().Add(&newTransactorBalanceEntry.BalanceNanos, unlockedBalance) + if err != nil { + return 0, 0, nil, errors.Wrap(RuleErrorCoinUnlockCausesBalanceOverflow, + "_connectCoinUnlock") + } + newTransactorBalanceEntry.BalanceNanos = *newTransactorBalanceNanos + bav._setBalanceEntryMappings(newTransactorBalanceEntry, true) - // Update CoinsInCirculation and NumberOfHolders to accurately reflect the changing balance. - prevCoinEntry = profileEntry.DAOCoinEntry.Copy() - newCoinsInCirculationNanos, err := SafeUint256().Add( - &profileEntry.DAOCoinEntry.CoinsInCirculationNanos, - unlockedBalance) - if err != nil { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinUnlockCausesCoinsInCirculationOverflow, "_connectCoinUnlock") - } - profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *newCoinsInCirculationNanos - if prevTransactorBalanceEntry.BalanceNanos.IsZero() && !newTransactorBalanceEntry.BalanceNanos.IsZero() { - profileEntry.DAOCoinEntry.NumberOfHolders++ - } - bav._setProfileEntryMappings(profileEntry) + // Update CoinsInCirculation and NumberOfHolders to accurately reflect the changing balance. + prevCoinEntry := profileEntry.DAOCoinEntry.Copy() + newCoinsInCirculationNanos, err := SafeUint256().Add( + &profileEntry.DAOCoinEntry.CoinsInCirculationNanos, + unlockedBalance) + if err != nil { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinUnlockCausesCoinsInCirculationOverflow, "_connectCoinUnlock") + } + profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *newCoinsInCirculationNanos + if prevTransactorBalanceEntry.BalanceNanos.IsZero() && !newTransactorBalanceEntry.BalanceNanos.IsZero() { + profileEntry.DAOCoinEntry.NumberOfHolders++ } + bav._setProfileEntryMappings(profileEntry) // Create a UtxoOp for the operation. utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ @@ -2734,60 +2642,37 @@ func (bav *UtxoView) _disconnectCoinUnlock( bav._setLockedBalanceEntry(prevLockedBalanceEntry) } - // Reverting the BalanceEntry (if applicable) should not result in more coins. + // Reverting the BalanceEntry should not result in more coins. profilePKID := operationData.PrevLockedBalanceEntries[0].ProfilePKID hodlerPKID := operationData.PrevLockedBalanceEntries[0].HODLerPKID - if !profilePKID.IsZeroPKID() { - balanceEntry := bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(hodlerPKID, profilePKID, true) - if operationData.PrevTransactorBalanceEntry == nil || operationData.PrevTransactorBalanceEntry.isDeleted { - return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + - "but found nil or deleted previous balance entry") - } - if operationData.PrevTransactorBalanceEntry.BalanceNanos.Gt(&balanceEntry.BalanceNanos) { - return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + - "would cause balance entry balance to increase") - } - if operationData.PrevTransactorBalanceEntry.BalanceNanos.IsZero() { - bav._deleteBalanceEntryMappingsWithPKIDs(operationData.PrevTransactorBalanceEntry, - hodlerPKID, profilePKID, true) - } else { - bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) - } + balanceEntry := bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(hodlerPKID, profilePKID, true) + if operationData.PrevTransactorBalanceEntry == nil || operationData.PrevTransactorBalanceEntry.isDeleted { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + + "but found nil or deleted previous balance entry") } - - // Reverting the CoinEntry (if applicable) should not result in more coins in circulation. - if !profilePKID.IsZeroPKID() { - profileEntry := bav.GetProfileEntryForPKID(profilePKID) - if profileEntry == nil || profileEntry.isDeleted { - return fmt.Errorf("_disconnectCoinUnlock: Trying to revert coin unlock " + - "but found nil profile entry; this shouldn't be possible") - } - if operationData.PrevCoinEntry.CoinsInCirculationNanos.Gt(&profileEntry.DAOCoinEntry.CoinsInCirculationNanos) { - return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + - "would cause profile entry coin entry balance to increase") - } - profileEntry.DAOCoinEntry = *operationData.PrevCoinEntry - bav._setProfileEntryMappings(profileEntry) + if operationData.PrevTransactorBalanceEntry.BalanceNanos.Gt(&balanceEntry.BalanceNanos) { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + + "would cause balance entry balance to increase") + } + if operationData.PrevTransactorBalanceEntry.BalanceNanos.IsZero() { + bav._deleteBalanceEntryMappingsWithPKIDs(operationData.PrevTransactorBalanceEntry, + hodlerPKID, profilePKID, true) + } else { + bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) } - // Reverting the DeSo addition should not result in more coins. - if profilePKID.IsZeroPKID() { - // Revert the DeSo add. - operationData = utxoOpsForTxn[operationIndex] - if operationData.Type != OperationTypeAddBalance { - return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeAddBalance "+ - "but found type %v", operationData.Type) - } - if !bytes.Equal(operationData.BalancePublicKey, currentTxn.PublicKey) { - return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeAddBalance " + - "but found mismatched public keys") - } - err := bav._unAddBalance(operationData.BalanceAmountNanos, operationData.BalancePublicKey) - if err != nil { - return errors.Wrapf(err, "_disconnectCoinLockup: Problem unAdding balance of %v for the "+ - "transactor", operationData.BalanceAmountNanos) - } + // Reverting the CoinEntry should not result in more coins in circulation. + profileEntry := bav.GetProfileEntryForPKID(profilePKID) + if profileEntry == nil || profileEntry.isDeleted { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert coin unlock " + + "but found nil profile entry; this shouldn't be possible") + } + if operationData.PrevCoinEntry.CoinsInCirculationNanos.Gt(&profileEntry.DAOCoinEntry.CoinsInCirculationNanos) { + return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + + "would cause profile entry coin entry balance to increase") } + profileEntry.DAOCoinEntry = *operationData.PrevCoinEntry + bav._setProfileEntryMappings(profileEntry) // By here we only need to disconnect the basic transfer associated with the transaction. basicTransferOps := utxoOpsForTxn[:operationIndex] diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index bea15b4a5..befc736b3 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -250,15 +250,14 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { require.Contains(t, err.Error(), RuleErrorCoinLockupOnNonExistentProfile) } - // Attempt to perform an excessive DESO lockup (more than 2**64 DESO). - // (This should fail -- RuleErrorCoinLockupExcessiveDeSoLockup) - // NOTE: This also checks that DESO lockups do not require an associated profile. + // Attempt to perform a lockup with the zero public key as the profile. + // (This should fail -- RuleErrorCoinLockupCannotLockupZeroKey) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m0Pub, - 0, 0, MaxUint256, 0) - require.Contains(t, err.Error(), RuleErrorCoinLockupExcessiveDeSoLockup) + 0, 0, uint256.NewInt().SetUint64(1), 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupCannotLockupZeroKey) } // Attempt to perform a lockup with zero lockup duration. @@ -266,8 +265,8 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, - m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), - m0Pub, 0, 0, uint256.NewInt().SetUint64(1), 0) + m0Pub, m0Priv, m0Pub, m0Pub, + 0, 0, uint256.NewInt().SetUint64(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) } @@ -276,7 +275,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, - m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m0Pub, + m0Pub, m0Priv, m0Pub, m0Pub, 0, 0, uint256.NewInt().SetUint64(1), 1) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) } @@ -286,7 +285,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, - m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m0Pub, + m0Pub, m0Priv, m0Pub, m0Pub, 1000, 900, uint256.NewInt().SetUint64(1), 950) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidVestingEndTimestamp) } @@ -301,16 +300,6 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { require.Contains(t, err.Error(), RuleErrorCoinLockupZeroPublicKeyAsRecipient) } - // Attempt to perform a lockup in excess of the user's DESO balance. - // (This should fail -- RuleErrorCoinLockupInsufficientDeSo) - { - _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, - m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m0Pub, - 1, 1, uint256.NewInt().SetUint64(1e10), 0) - require.Contains(t, err.Error(), RuleErrorCoinLockupInsufficientDeSo) - } - // Attempt to perform a lockup in excess of the user's coin balance. // (This should fail -- RuleErrorCoinLockupInsufficientCoins) { @@ -482,21 +471,6 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { require.Contains(t, err.Error(), RuleErrorCoinLockupTransferOfAmountZero) } - // Attempt to perform a locked DESO transfer in excess of 2**64. - // (This should fail -- RuleErrorCoinLockupTransferOfDeSoCausesOverflow) - { - _, _, _, err := _coinLockupTransfer( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - NewPublicKey(m3PkBytes), - &ZeroPublicKey, - 0, - MaxUint256) - require.Contains(t, err.Error(), RuleErrorCoinLockupTransferOfDeSoCausesOverflow) - } - // Attempt to perform a coin lockup transfer on coins from a non-existent profile. // (This should fail -- RuleErrorCoinLockupTransferOnNonExistentProfile) { @@ -660,6 +634,19 @@ func TestCoinUnlockTxnRuleErrors(t *testing.T) { require.Contains(t, err.Error(), RuleErrorCoinUnlockOnNonExistentProfile) } + // Attempt to unlock coins with the zero public key as the profile. + // (This should fail -- RuleErrorCoinUnlockCannotUnlockZeroPublicKey) + { + _, _, _, err := _coinUnlockWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), + 0) + require.Contains(t, err.Error(), RuleErrorCoinUnlockCannotUnlockZeroPublicKey) + } + // Attempt to unlock locked coins which do not exist. // (This should fail -- RuleErrorCoinUnlockNoUnlockableCoinsFound) { @@ -1207,266 +1194,6 @@ func TestLockupStandardProfileFlows(t *testing.T) { } } -func TestLockupStandardDeSoFlows(t *testing.T) { - // Initialize test chain, miner, and testMeta - testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) - - // Initialize m0, m1, m2, m3, m4, and paramUpdater - _setUpProfilesAndMintM0M1DAOCoins(testMeta) - - // Ensure that paramUpdater is set in the testMeta - testMeta.params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - - // Have paramUpdater create a yield curve which consists of: - // 1 year @ 5% yield - // 2 years @ 10% yield - // Remove the yield curve point. - { - _updateCoinLockupParamsWithTestMeta( - testMeta, - testMeta.feeRateNanosPerKb, - paramUpdaterPub, - paramUpdaterPriv, - 365*24*60*60*1e9, - 500, - false, - false, - TransferRestrictionStatusUnrestricted, - ) - _updateCoinLockupParamsWithTestMeta( - testMeta, - testMeta.feeRateNanosPerKb, - paramUpdaterPub, - paramUpdaterPriv, - 2*365*24*60*60*1e9, - 1000, - false, - true, - TransferRestrictionStatusProfileOwnerOnly, - ) - } - - // Have m1 lockup 500 nDESO for half of a year. - // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. - // We expect this to create a locked balance entry with 500 base units locked inside. - { - // Get m1's DESO balance. - utxoView, err := NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m1OriginalBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) - require.NoError(t, err) - - _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, testMeta.feeRateNanosPerKb, - m1Pub, m1Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m1Pub, - 365*24*60*60*1e9+365*12*60*60*1e9, - 365*24*60*60*1e9+365*12*60*60*1e9, - uint256.NewInt().SetUint64(500), - 365*24*60*60*1e9) - - // Check to ensure the resulting locked balance entry has 525 base units. - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) - m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := - utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - m1PKID, - ZeroPKID.NewPKID(), - 365*24*60*60*1e9+365*12*60*60*1e9, - 365*24*60*60*1e9+365*12*60*60*1e9) - require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) - - // Check that m1's DESO balance has decreased by more than 500 nDESO (some extra was spent on fees). - m1NewBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) - require.NoError(t, err) - require.Greater(t, m1OriginalBalance, m1NewBalance) - require.Greater(t, m1OriginalBalance-m1NewBalance, uint64(500)) - } - - // Have m1 lockup 500 nDESO for one year. - // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. - // We expect this to create a locked balance entry with 525 base units locked inside. - { - // Get m1's DESO balance. - utxoView, err := NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m1OriginalBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) - require.NoError(t, err) - - _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, testMeta.feeRateNanosPerKb, - m1Pub, m1Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m1Pub, - 2*365*24*60*60*1e9, - 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(500), - 365*24*60*60*1e9) - - // Check to ensure the resulting locked balance entry has 525 base units. - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) - m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := - utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - m1PKID, - ZeroPKID.NewPKID(), - 2*365*24*60*60*1e9, - 2*365*24*60*60*1e9) - require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(525), lockedBalanceEntry.BalanceBaseUnits) - - // Check that m1's DESO balance has decreased by more than 500 nDESO (some extra was spent on fees). - m1NewBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) - require.NoError(t, err) - require.Greater(t, m1OriginalBalance, m1NewBalance) - require.Greater(t, m1OriginalBalance-m1NewBalance, uint64(500)) - } - - // Have m1 lockup 500 nDESO for one and a half years. - // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. - // We expect this to create a locked balance entry with 525 base units locked inside. - { - // Get m1's DESO balance. - utxoView, err := NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m1OriginalBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) - require.NoError(t, err) - - _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, testMeta.feeRateNanosPerKb, - m1Pub, m1Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m1Pub, - 2*365*24*60*60*1e9+365*12*60*60*1e9, - 2*365*24*60*60*1e9+365*12*60*60*1e9, - uint256.NewInt().SetUint64(500), - 365*24*60*60*1e9) - - // Check to ensure the resulting locked balance entry has 525 base units. - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) - m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := - utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - m1PKID, - ZeroPKID.NewPKID(), - 2*365*24*60*60*1e9+365*12*60*60*1e9, - 2*365*24*60*60*1e9+365*12*60*60*1e9) - require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(525), lockedBalanceEntry.BalanceBaseUnits) - - // Check that m1's DESO balance has decreased by more than 500 nDESO (some extra was spent on fees). - m1NewBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) - require.NoError(t, err) - require.Greater(t, m1OriginalBalance, m1NewBalance) - require.Greater(t, m1OriginalBalance-m1NewBalance, uint64(500)) - } - - // Have m1 lockup 500 nDESO for two years. - // We set the connecting block timestamp to 1 year from UNIX start to give it a non-zero value. - // We expect this to create a locked balance entry with 600 base units locked inside. - { - // Get m1's DESO balance. - utxoView, err := NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m1OriginalBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) - require.NoError(t, err) - - _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, testMeta.feeRateNanosPerKb, - m1Pub, m1Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m1Pub, - 3*365*24*60*60*1e9, - 3*365*24*60*60*1e9, - uint256.NewInt().SetUint64(500), - 365*24*60*60*1e9) - - // Check to ensure the resulting locked balance entry has 525 base units. - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) - m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := - utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - m1PKID, - ZeroPKID.NewPKID(), - 3*365*24*60*60*1e9, - 3*365*24*60*60*1e9) - require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(600), lockedBalanceEntry.BalanceBaseUnits) - - // Check that m1's DESO balance has decreased by more than 500 nDESO (some extra was spent on fees). - m1NewBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) - require.NoError(t, err) - require.Greater(t, m1OriginalBalance, m1NewBalance) - require.Greater(t, m1OriginalBalance-m1NewBalance, uint64(500)) - } - - // Check to make sure locked DESO is not liquid. - { - _, _, _, err := _coinLockupTransfer( - t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - NewPublicKey(m3PkBytes), - &ZeroPublicKey, - 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(100), - ) - require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) - } - - // Check to make sure tokens can be unlocked following a year. - // Ensure that the associated balance entry increases by less than 1025 on unlock. - // One locked balance entry exists at half a year and has 500 nDESO, the other at one year - // and has 525 nDESO. - { - // Get m1's DESO balance. - utxoView, err := NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m1OriginalBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) - require.NoError(t, err) - - _, _, _, err = _coinUnlockWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), - 2*365*24*60*60*1e9+1, - ) - require.NoError(t, err) - - // Check to ensure the resulting locked balance entry is deleted. - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) - m1PKID := m1PKIDEntry.PKID - lockedBalanceEntry, err := - utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - m1PKID, - ZeroPKID.NewPKID(), - 2*365*24*60*60*1e9, - 2*365*24*60*60*1e9) - require.True(t, lockedBalanceEntry == nil) - - // Check that m1's DESO balance has increased by less than 1025 nDESO (some extra was spent on fees). - m1NewBalance, err := utxoView.GetDeSoBalanceNanosForPublicKey(m1PkBytes) - require.NoError(t, err) - require.Greater(t, m1NewBalance, m1OriginalBalance) - require.Less(t, m1NewBalance-m1OriginalBalance, uint64(1025)) - } -} - func TestLockupWithDerivedKey(t *testing.T) { var derivedKeyPriv string var derivedKeyPub string @@ -2220,80 +1947,6 @@ func TestLockupDisconnects(t *testing.T) { balanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) require.Equal(t, *uint256.NewInt().SetUint64(1000000), balanceEntry.BalanceNanos) - // - // Test Coin Lockup for DESO - // - - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - originalBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( - m2PkBytes, testMeta.chain.BlockTip().Height) - require.NoError(t, err) - utxoOps1, txn1, _, err = _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, - m2Pub, m2Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m2Pub, - 2*365*24*60*60*1e9, - 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(500), - 365*24*60*60*1e9) - require.NoError(t, err) - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - intermediateBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( - m2PkBytes, testMeta.chain.BlockTip().Height) - require.NoError(t, err) - utxoOps2, txn2, _, err = _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, - m2Pub, m2Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m2Pub, - 2*365*24*60*60*1e9, - 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(500), - 365*24*60*60*1e9) - require.NoError(t, err) - txHash = txn2.Hash() - blockHeight = testMeta.chain.BlockTip().Height + 1 - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - err = utxoView.DisconnectTransaction(txn2, txHash, utxoOps2, blockHeight) - require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) - require.NoError(t, err) - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m2PKID := utxoView.GetPKIDForPublicKey(m2PkBytes).PKID - lockedBalanceEntry, err = - utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - m2PKID, - &ZeroPKID, - 2*365*24*60*60*1e9, - 2*365*24*60*60*1e9) - require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) - currentBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( - m2PkBytes, testMeta.chain.BlockTip().Height) - require.NoError(t, err) - require.Equal(t, currentBalance, intermediateBalance) - err = utxoView.DisconnectTransaction(txn1, txn1.Hash(), utxoOps1, blockHeight) - require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) - require.NoError(t, err) - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - lockedBalanceEntry, err = - utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - m2PKID, - &ZeroPKID, - 2*365*24*60*60*1e9, - 2*365*24*60*60*1e9) - require.True(t, lockedBalanceEntry == nil) - currentBalance, err = utxoView.GetSpendableDeSoBalanceNanosForPublicKey( - m2PkBytes, testMeta.chain.BlockTip().Height) - require.NoError(t, err) - require.Equal(t, currentBalance, originalBalance) - // // Test Update Coin Lockup Params for Profiles // @@ -2383,92 +2036,6 @@ func TestLockupDisconnects(t *testing.T) { profileEntry = utxoView.GetProfileEntryForPKID(m1PKID) require.Equal(t, profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus, TransferRestrictionStatusProfileOwnerOnly) - // - // Test Update Coin Lockup Params for DESO - // - - // Test adding a lockup curve point and modifying lockup transfer restrictions. - // Ensure upon disconnect the original point and restrictions remain. - _, _, _, err = _updateCoinLockupParams( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - paramUpdaterPub, - paramUpdaterPriv, - 365*24*60*60*1e9, - 1000, - false, - true, - TransferRestrictionStatusProfileOwnerOnly, - ) - require.NoError(t, err) - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - utxoOps, txn, _, err = _updateCoinLockupParams( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - paramUpdaterPub, - paramUpdaterPriv, - 365*24*60*60*1e9, - 2500, - false, - true, - TransferRestrictionStatusPermanentlyUnrestricted, - ) - require.NoError(t, err) - txHash = txn.Hash() - blockHeight = testMeta.chain.BlockTip().Height + 1 - err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) - require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) - require.NoError(t, err) - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - leftYieldCurvePoint, rightYieldCurvePoint, err = - utxoView.GetLocalYieldCurvePoints(&ZeroPKID, 365*24*60*60*1e9) - require.NoError(t, err) - require.True(t, leftYieldCurvePoint == nil) - require.Equal(t, rightYieldCurvePoint.LockupYieldAPYBasisPoints, uint64(1000)) - require.Equal(t, rightYieldCurvePoint.LockupDurationNanoSecs, int64(365*24*60*60*1e9)) - require.Equal(t, utxoView.GlobalParamsEntry.LockedDESOTransferRestrictions, TransferRestrictionStatusProfileOwnerOnly) - - // Test Deleting a Yield Curve Point and Reverting Said Transaction - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - utxoOps, txn, _, err = _updateCoinLockupParams( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - paramUpdaterPub, - paramUpdaterPriv, - 365*24*60*60*1e9, - 0, - true, - false, - TransferRestrictionStatusUnrestricted, - ) - require.NoError(t, err) - leftYieldCurvePoint, rightYieldCurvePoint, err = - utxoView.GetLocalYieldCurvePoints(&ZeroPKID, 365*24*60*60*1e9) - require.NoError(t, err) - require.True(t, leftYieldCurvePoint == nil) - require.True(t, rightYieldCurvePoint == nil) - txHash = txn.Hash() - blockHeight = testMeta.chain.BlockTip().Height + 1 - err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) - require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) - require.NoError(t, err) - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - leftYieldCurvePoint, rightYieldCurvePoint, err = - utxoView.GetLocalYieldCurvePoints(&ZeroPKID, 365*24*60*60*1e9) - require.NoError(t, err) - require.True(t, leftYieldCurvePoint == nil) - require.Equal(t, rightYieldCurvePoint.LockupYieldAPYBasisPoints, uint64(1000)) - require.Equal(t, rightYieldCurvePoint.LockupDurationNanoSecs, int64(365*24*60*60*1e9)) - require.Equal(t, utxoView.GlobalParamsEntry.LockedDESOTransferRestrictions, TransferRestrictionStatusProfileOwnerOnly) - // // Test Coin Lockup Transfers // @@ -2656,87 +2223,6 @@ func TestLockupDisconnects(t *testing.T) { m4be, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) require.Equal(t, *uint256.NewInt(), m4be.BalanceNanos) require.Equal(t, *MaxUint256, m4LockedBalanceEntry.BalanceBaseUnits) - - // - // Test Coin Unlocks for DESO - // - - // Lockup 500 nDESO with m4. Check to ensure balances are accurately updated. - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - startingBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( - m4PkBytes, testMeta.chain.BlockTip().Height) - require.NoError(t, err) - _coinLockupWithTestMetaAndConnectTimestamp( - testMeta, testMeta.feeRateNanosPerKb, - m4Pub, m4Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m4Pub, - 1000, 1000, uint256.NewInt().SetUint64(500), 0) - - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m4LockedBalanceEntry, err = - utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - m4PKID, - &ZeroPKID, - 1000, - 1000) - preUnlockBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( - m4PkBytes, testMeta.chain.BlockTip().Height) - require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(500), m4LockedBalanceEntry.BalanceBaseUnits) - require.Greater(t, startingBalance, preUnlockBalance) - require.Greater(t, startingBalance-preUnlockBalance, uint64(500)) - - utxoOps, txn, _, err = _coinUnlockWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, - testMeta.feeRateNanosPerKb, - m4Pub, - m4Priv, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), - 1001) - - // Ensure unlock functioned properly - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m4LockedBalanceEntry, err = - utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - m4PKID, - &ZeroPKID, - 1000, - 1000) - currentBalance, err = utxoView.GetSpendableDeSoBalanceNanosForPublicKey( - m4PkBytes, testMeta.chain.BlockTip().Height) - require.NoError(t, err) - require.True(t, m4LockedBalanceEntry == nil) - require.Greater(t, startingBalance, currentBalance) - require.Less(t, startingBalance-currentBalance, uint64(500)) - - // Execute the disconnect and ensure it functions correctly - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - txHash = txn.Hash() - blockHeight = testMeta.chain.BlockTip().Height + 1 - err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) - require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) - require.NoError(t, err) - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - m4LockedBalanceEntry, err = - utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - m4PKID, - &ZeroPKID, - 1000, - 1000) - currentBalance, err = utxoView.GetSpendableDeSoBalanceNanosForPublicKey( - m4PkBytes, testMeta.chain.BlockTip().Height) - require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(500), m4LockedBalanceEntry.BalanceBaseUnits) - require.Equal(t, preUnlockBalance, currentBalance) } func TestLockupBlockConnectsAndDisconnects(t *testing.T) { @@ -3103,24 +2589,6 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { require.True(t, m0LockedBalanceEntry == nil) } -func TestVestedDeSoLockupAsInvalid(t *testing.T) { - // Initialize test chain, miner, and testMeta - testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) - - // Initialize m0, m1, m2, m3, m4, and paramUpdater - _setUpProfilesAndMintM0M1DAOCoins(testMeta) - - // Ensure that it's impossible for m0 to lockup deso in a vested lockup. - { - zeroPkString := Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params) - _, _, _, err := _coinLockupWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, - m0Pub, m0Priv, zeroPkString, m0Pub, - 1050, 1100, uint256.NewInt().SetUint64(1000), 0) - require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidVestedTransactor) - } -} - func TestSimpleVestedLockup(t *testing.T) { // Initialize test chain, miner, and testMeta testMeta := _setUpMinerAndTestMetaForTimestampBasedLockupTests(t) @@ -4480,12 +3948,7 @@ func _coinLockupWithConnectTimestamp( // Check that UtxoOps following connection have the correct type require.Equal(OperationTypeSpendBalance, utxoOps[0].Type) - if NewPublicKey(profilePkBytes).IsZeroPublicKey() { - require.Equal(OperationTypeSpendBalance, utxoOps[1].Type) - require.Equal(OperationTypeCoinLockup, utxoOps[2].Type) - } else { - require.Equal(OperationTypeCoinLockup, utxoOps[1].Type) - } + require.Equal(OperationTypeCoinLockup, utxoOps[1].Type) // Ensure the transaction can be flushed without issue before returning require.NoError(utxoView.FlushToDb(uint64(blockHeight))) @@ -4723,12 +4186,7 @@ func _coinUnlockWithConnectTimestamp(t *testing.T, chain *Blockchain, db *badger // Check that UtxoOps following connection have the correct type require.Equal(OperationTypeSpendBalance, utxoOps[0].Type) - if NewPublicKey(profilePkBytes).IsZeroPublicKey() { - require.Equal(OperationTypeAddBalance, utxoOps[1].Type) - require.Equal(OperationTypeCoinUnlock, utxoOps[2].Type) - } else { - require.Equal(OperationTypeCoinUnlock, utxoOps[1].Type) - } + require.Equal(OperationTypeCoinUnlock, utxoOps[1].Type) // Ensure the transaction can be flushed without issue before returning require.NoError(utxoView.FlushToDb(uint64(blockHeight))) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 43e0bc009..7de06f83a 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4083,10 +4083,6 @@ type GlobalParamsEntry struct { // blocks) before they are jailed. JailInactiveValidatorGracePeriodEpochs uint64 - // LockedDESOTransferRestrictions specifies the transfer restriction status - // of locked unvested DESO. - LockedDESOTransferRestrictions TransferRestrictionStatus - // MaximumVestedIntersectionsPerLockupTransaction is used to limit the computational complexity of // vested lockup transactions. Essentially, vested lockups may overlap in time creating either // significant complexity on the lockup transaction or the unlock transaction. As a simple example, @@ -4153,7 +4149,6 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { StakingRewardsAPYBasisPoints: gp.StakingRewardsAPYBasisPoints, EpochDurationNumBlocks: gp.EpochDurationNumBlocks, JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, - LockedDESOTransferRestrictions: gp.LockedDESOTransferRestrictions, MaximumVestedIntersectionsPerLockupTransaction: gp.MaximumVestedIntersectionsPerLockupTransaction, FeeBucketGrowthRateBasisPoints: gp.FeeBucketGrowthRateBasisPoints, FailingTransactionBMFMultiplierBasisPoints: gp.FailingTransactionBMFMultiplierBasisPoints, @@ -4180,7 +4175,6 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.StakingRewardsAPYBasisPoints)...) data = append(data, UintToBuf(gp.EpochDurationNumBlocks)...) data = append(data, UintToBuf(gp.JailInactiveValidatorGracePeriodEpochs)...) - data = append(data, byte(gp.LockedDESOTransferRestrictions)) data = append(data, IntToBuf(int64(gp.MaximumVestedIntersectionsPerLockupTransaction))...) data = append(data, UintToBuf(gp.FeeBucketGrowthRateBasisPoints)...) data = append(data, UintToBuf(gp.FailingTransactionBMFMultiplierBasisPoints)...) @@ -4250,11 +4244,6 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading JailInactiveValidatorGracePeriodEpochs: ") } - lockedDESOTransferRestrictions, err := rr.ReadByte() - if err != nil { - return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading LockedDESOTransferRestrictions: ") - } - gp.LockedDESOTransferRestrictions = TransferRestrictionStatus(lockedDESOTransferRestrictions) maximumVestedIntersectionsPerLockupTransaction, err := ReadVarint(rr) if err != nil { return errors.Wrapf(err, diff --git a/lib/errors.go b/lib/errors.go index 05fefd8b0..14309c4ad 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -498,20 +498,18 @@ const ( RuleErrorCoinLockupInvalidVestedTransactor RuleError = "RuleErrorCoinLockupInvalidVestedTransactor" RuleErrorCoinLockupBalanceEntryDoesNotExist RuleError = "RuleErrorCoinLockupBalanceEntryDoesNotExist" RuleErrorCoinLockupInsufficientCoins RuleError = "RuleErrorCoinLockupInsufficientCoins" - RuleErrorCoinLockupInsufficientDeSo RuleError = "RuleErrorCoinLockupInsufficientDeSo" RuleErrorCoinLockupInvalidProfilePubKey RuleError = "RuleErrorCoinLockupInvalidProfilePubKey" + RuleErrorCoinLockupCannotLockupZeroKey RuleError = "RuleErrorCoinLockupCannotLockupZeroKey" RuleErrorCoinLockupInvalidRecipientPubKey RuleError = "RuleErrorCoinLockupInvalidRecipientPubKey" RuleErrorCoinLockupZeroPublicKeyAsRecipient RuleError = "RuleErrorCoinLockupZeroPublicKeyAsRecipient" RuleErrorCoinLockupOnNonExistentProfile RuleError = "RuleErrorCoinLockupOnNonExistentProfile" RuleErrorCoinLockupOfAmountZero RuleError = "RuleErrorCoinLockupOfAmountZero" - RuleErrorCoinLockupExcessiveDeSoLockup RuleError = "RuleErrorCoinLockupExcessiveDeSoLockup" RuleErrorCoinLockupNonExistentProfile RuleError = "RuleErrorCoinLockupNonExistentProfile" RuleErrorCoinLockupCoinYieldOverflow RuleError = "RuleErrorCoinLockupCoinYieldOverflow" RuleErrorCoinLockupYieldCausesOverflow RuleError = "RuleErrorCoinLockupYieldCausesOverflow" RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry RuleError = "RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry" RuleErrorCoinLockupViolatesVestingIntersectionLimit RuleError = "RuleErrorCoinLockupViolatesVestingIntersectionLimit" RuleErrorCoinLockupTransferOfAmountZero RuleError = "RuleErrorCoinLockupTransferOfAmountZero" - RuleErrorCoinLockupTransferOfDeSoCausesOverflow RuleError = "RuleErrorCoinLockupTransferOfDeSoCausesOverflow" RuleErrorCoinLockupTransferRestrictedToProfileOwner RuleError = "RuleErrorCoinLockupTransferRestrictedToCreator" RuleErrorCoinLockupTransferRestrictedToDAOMembers RuleError = "RuleErrorCoinLockupTransferRestrictedToDAOMembers" RuleErrorCoinLockupTransferSenderEqualsReceiver RuleError = "RuleErrorCoinLockupTransferSenderEqualsReceiver" @@ -522,11 +520,11 @@ const ( RuleErrorCoinLockupTransferOnNonExistentProfile RuleError = "RuleErrorCoinLockupTransferOnNonExistentProfile" RuleErrorCoinLockupTransferToZeroPublicKey RuleError = "RuleErrorCoinLockupTransferToZeroPublicKey" RuleErrorCoinUnlockOnNonExistentProfile RuleError = "RuleErrorCoinUnlockOnNonExistentProfile" + RuleErrorCoinUnlockCannotUnlockZeroPublicKey RuleError = "RuleErrorCoinUnlockCannotUnlockZeroPublicKey" RuleErrorCoinUnlockInvalidHODLerPKID RuleError = "RuleErrorCoinUnlockInvalidHODLerPKID" RuleErrorCoinUnlockInvalidProfilePKID RuleError = "RuleErrorCoinUnlockInvalidProfilePKID" RuleErrorCoinUnlockNoUnlockableCoinsFound RuleError = "RuleErrorCoinUnlockNoUnlockableCoinsFound" RuleErrorCoinUnlockUnlockableCoinsOverflow RuleError = "RuleErrorCoinUnlockUnlockableCoinsOverflow" - RuleErrorCoinUnlockUnlockableDeSoOverflow RuleError = "RuleErrorCoinUnlockUnlockableCoinsOverflow" RuleErrorCoinUnlockCausesBalanceOverflow RuleError = "RuleErrorCoinUnlockCausesBalanceOverflow" RuleErrorCoinUnlockCausesCoinsInCirculationOverflow RuleError = "RuleErrorCoinUnlockCausesCoinsInCirculationOverflow" RuleErrorUpdateCoinLockupParamsOnInvalidPKID RuleError = "RuleErrorUpdateCoinLockupParamsOnInvalidPKID" From 123aa62f82d5ff0b2073f1eaee34df895b6086ac Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 31 Jan 2024 15:15:53 -0800 Subject: [PATCH 433/762] Add lockup DESO pull-request link. --- lib/block_view_lockups.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 98c220746..0c99ce789 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1045,7 +1045,7 @@ func (bav *UtxoView) _connectCoinLockup( // NOTE: The zero key could be used to enable locking up DESO. // If this feature is desired, it can be restored with the following PR: - // TODO: Add relevant PR here. + // https://github.com/deso-protocol/core/pull/991 if txMeta.ProfilePublicKey.IsZeroPublicKey() { return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupCannotLockupZeroKey, "_connectCoinLockup") From e2050501a83dea09e0902dc56de4b68f83a662cf Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 1 Feb 2024 11:34:08 -0500 Subject: [PATCH 434/762] Override MsgDesoHeader.GetView() For PoW Blocks (#992) * Override MsgDesoHeader.GetView() For PoW Blocks * Address Nina's comments --- lib/network.go | 23 ------------------ lib/pos_network_message_interface.go | 36 ++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 23 deletions(-) diff --git a/lib/network.go b/lib/network.go index 5c27ac775..ccfc8dfee 100644 --- a/lib/network.go +++ b/lib/network.go @@ -22,8 +22,6 @@ import ( "github.com/decred/dcrd/dcrec/secp256k1/v4" - "github.com/deso-protocol/core/consensus" - "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" @@ -2009,31 +2007,10 @@ type MsgDeSoHeader struct { ProposerVotePartialSignature *bls.Signature } -func (msg *MsgDeSoHeader) GetBlockHash() consensus.BlockHash { - hash, err := msg.Hash() - if err != nil { - glog.Errorf("MsgDeSoHeader.GetBlockHash: Problem hashing header: %v", err) - // TODO: Should we return nil? - return &BlockHash{} - } - return hash -} - func (msg *MsgDeSoHeader) GetHeight() uint64 { return msg.Height } -func (msg *MsgDeSoHeader) GetView() uint64 { - return msg.ProposedInView -} - -func (msg *MsgDeSoHeader) GetQC() consensus.QuorumCertificate { - if msg.ValidatorsTimeoutAggregateQC.isEmpty() { - return msg.ValidatorsVoteQC - } - return msg.ValidatorsTimeoutAggregateQC.ValidatorsHighQC -} - func HeaderSizeBytes() int { header := NewMessage(MsgTypeHeader) headerBytes, _ := header.ToBytes(false) diff --git a/lib/pos_network_message_interface.go b/lib/pos_network_message_interface.go index 5a89c3520..96d19a9cb 100644 --- a/lib/pos_network_message_interface.go +++ b/lib/pos_network_message_interface.go @@ -4,6 +4,7 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" "github.com/deso-protocol/core/consensus" + "github.com/golang/glog" "github.com/holiman/uint256" ) @@ -14,6 +15,41 @@ import ( // place all translations between types defined in lib and consensus packages. // ////////////////////////////////////////////////////////////////////////////////// +// MsgDeSoHeader struct <-> consensus.Block interface translation + +func (msg *MsgDeSoHeader) GetBlockHash() consensus.BlockHash { + hash, err := msg.Hash() + if err != nil { + glog.Errorf("MsgDeSoHeader.GetBlockHash: Problem hashing header: %v", err) + // If we can't generate the block hash, return an empty hash. We return a non-nil + // value to avoid panics. An empty block hash will always have a value that can + // be compared to other block hashes. + return &BlockHash{} + } + return hash +} + +func (msg *MsgDeSoHeader) GetView() uint64 { + // Header version 0 and 1 are Proof of Work block header versions, and do not have a view. + // In order to be able to extend from a PoW block once the PoS chain begins, we need some + // way to attach a "view" to it. Using the PoW block's height as its view allows us to do + // this in a simple and self-contained way. + // + // With this change, PoW blocks can be considered to always have their view = height. As + // a result, PoS blocks will always have their view >= height. + if msg.Version < HeaderVersion2 { + return msg.Height + } + return msg.ProposedInView +} + +func (msg *MsgDeSoHeader) GetQC() consensus.QuorumCertificate { + if msg.ValidatorsTimeoutAggregateQC.isEmpty() { + return msg.ValidatorsVoteQC + } + return msg.ValidatorsTimeoutAggregateQC.ValidatorsHighQC +} + // MsgDeSoValidatorVote struct <-> consensus.VoteMessage interface translation func (msg *MsgDeSoValidatorVote) GetPublicKey() *bls.PublicKey { From 5ffc022f9506be27fd4dd612448943c53da5ab77 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 1 Feb 2024 10:03:24 -0800 Subject: [PATCH 435/762] PoS NetworkManager Fix Deadlock and Test AddIps (#996) * Fix Deadlock and Test AddIps * Glog fix --- .../network_manager_routines_test.go | 18 +++++++++++++++--- lib/remote_node_manager.go | 9 ++++----- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/integration_testing/network_manager_routines_test.go b/integration_testing/network_manager_routines_test.go index 141e36026..31d53e211 100644 --- a/integration_testing/network_manager_routines_test.go +++ b/integration_testing/network_manager_routines_test.go @@ -468,17 +468,29 @@ func TestConnectionControllerNonValidatorConnectorInbound(t *testing.T) { func TestConnectionControllerNonValidatorConnectorAddressMgr(t *testing.T) { // Spawn a non-validator node1. Set node1's targetOutboundPeers to 2 and targetInboundPeers to 0. Then // add two ip addresses to AddrMgr. Make sure that node1 creates outbound connections to these nodes. - node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1 := spawnNodeProtocol1(t, 18000, "node1") node1.Config.TargetOutboundPeers = 2 node1.Config.MaxInboundPeers = 0 + node1.Config.MaxSyncBlockHeight = 1 node1 = startNode(t, node1) nm := node1.Server.GetNetworkManager() na1, err := nm.ConvertIPStringToNetAddress("deso-seed-2.io:17000") - na2, err := nm.ConvertIPStringToNetAddress("deso-seed-3.io:17000") require.NoError(t, err) nm.AddrMgr.AddAddress(na1, na1) - nm.AddrMgr.AddAddress(na2, na2) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 1, 0, 1, 0) +} + +func TestConnectionControllerNonValidatorConnectorAddIps(t *testing.T) { + // Spawn a non-validator node1. Set node1's targetOutboundPeers to 2 and targetInboundPeers to 0. Then + // add two ip addresses to the ConnectIPs. Make sure that node1 creates outbound connections to these nodes. + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.TargetOutboundPeers = 2 + node1.Config.MaxInboundPeers = 0 + node1.Config.MaxSyncBlockHeight = 1 + node1.Config.AddIPs = []string{"deso-seed-2.io", "deso-seed-3.io"} + + node1 = startNode(t, node1) waitForCountRemoteNodeIndexer(t, node1, 2, 0, 2, 0) } diff --git a/lib/remote_node_manager.go b/lib/remote_node_manager.go index 2dfb77431..3300d5d62 100644 --- a/lib/remote_node_manager.go +++ b/lib/remote_node_manager.go @@ -52,7 +52,8 @@ func NewRemoteNodeManager(srv *Server, bc *Blockchain, cmgr *ConnectionManager, } func (manager *RemoteNodeManager) DisconnectAll() { - for _, rn := range manager.GetAllRemoteNodes().GetAll() { + allRemoteNodes := manager.GetAllRemoteNodes().GetAll() + for _, rn := range allRemoteNodes { glog.V(2).Infof("RemoteNodeManager.DisconnectAll: Disconnecting from remote node (id=%v)", rn.GetId()) manager.Disconnect(rn) } @@ -135,10 +136,8 @@ func (manager *RemoteNodeManager) SendMessage(rn *RemoteNode, desoMessage DeSoMe } func (manager *RemoteNodeManager) Cleanup() { - manager.mtx.Lock() - defer manager.mtx.Unlock() - - for _, rn := range manager.GetAllRemoteNodes().GetAll() { + allRemoteNodes := manager.GetAllRemoteNodes().GetAll() + for _, rn := range allRemoteNodes { if rn.IsTimedOut() { glog.V(2).Infof("RemoteNodeManager.Cleanup: Disconnecting from remote node (id=%v)", rn.GetId()) manager.Disconnect(rn) From 1b0bc01171b94b790e8799a1519fa1d98ba86147 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 1 Feb 2024 10:06:58 -0800 Subject: [PATCH 436/762] PoS NetworkManager Comment Nit (#997) --- integration_testing/network_manager_routines_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/integration_testing/network_manager_routines_test.go b/integration_testing/network_manager_routines_test.go index 31d53e211..368c419c8 100644 --- a/integration_testing/network_manager_routines_test.go +++ b/integration_testing/network_manager_routines_test.go @@ -466,10 +466,10 @@ func TestConnectionControllerNonValidatorConnectorInbound(t *testing.T) { } func TestConnectionControllerNonValidatorConnectorAddressMgr(t *testing.T) { - // Spawn a non-validator node1. Set node1's targetOutboundPeers to 2 and targetInboundPeers to 0. Then - // add two ip addresses to AddrMgr. Make sure that node1 creates outbound connections to these nodes. + // Spawn a non-validator node1. Set node1's targetOutboundPeers to 1 and targetInboundPeers to 0. Then + // add one ip address to AddrMgr. Make sure that node1 creates outbound connections to this node. node1 := spawnNodeProtocol1(t, 18000, "node1") - node1.Config.TargetOutboundPeers = 2 + node1.Config.TargetOutboundPeers = 1 node1.Config.MaxInboundPeers = 0 node1.Config.MaxSyncBlockHeight = 1 @@ -483,7 +483,7 @@ func TestConnectionControllerNonValidatorConnectorAddressMgr(t *testing.T) { func TestConnectionControllerNonValidatorConnectorAddIps(t *testing.T) { // Spawn a non-validator node1. Set node1's targetOutboundPeers to 2 and targetInboundPeers to 0. Then - // add two ip addresses to the ConnectIPs. Make sure that node1 creates outbound connections to these nodes. + // add two ip addresses to AddIps. Make sure that node1 creates outbound connections to these nodes. node1 := spawnNodeProtocol1(t, 18000, "node1") node1.Config.TargetOutboundPeers = 2 node1.Config.MaxInboundPeers = 0 From ca38b019ea24cd7bbab43b54443ee3d6b3317ca0 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:43:39 -0500 Subject: [PATCH 437/762] Stop DeSoMiner When Block Height is Past PoS Cutover (#993) --- lib/miner.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/miner.go b/lib/miner.go index 6a2453486..0b341b601 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -190,6 +190,10 @@ func (desoMiner *DeSoMiner) MineAndProcessSingleBlock(threadIndex uint32, mempoo return nil, fmt.Errorf("DeSoMiner._startThread: _mineSingleBlock returned nil; should only happen if we're stopping") } + if blockToMine.Header.Height >= uint64(desoMiner.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + return nil, fmt.Errorf("DeSoMiner._startThread: _mineSingleBlock returned a block that is past the Proof of Stake Cutover") + } + // Log information on the block we just mined. bestHash, _ := blockToMine.Hash() glog.Infof("================== YOU MINED A NEW BLOCK! ================== Height: %d, Hash: %s", blockToMine.Header.Height, hex.EncodeToString(bestHash[:])) From 297b35acbdf2692e421b70f02c6553d3a0b86b38 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:45:28 -0500 Subject: [PATCH 438/762] Create ConcurrentList Data Structure (#994) --- collections/concurrent_list.go | 28 ++++++++++++++++++++++++++++ collections/concurrent_list_test.go | 23 +++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 collections/concurrent_list.go create mode 100644 collections/concurrent_list_test.go diff --git a/collections/concurrent_list.go b/collections/concurrent_list.go new file mode 100644 index 000000000..b935a4630 --- /dev/null +++ b/collections/concurrent_list.go @@ -0,0 +1,28 @@ +package collections + +import "sync" + +type ConcurrentList[T any] struct { + mtx sync.RWMutex + list []T +} + +func NewConcurrentList[T any]() *ConcurrentList[T] { + return &ConcurrentList[T]{ + list: []T{}, + } +} + +func (cl *ConcurrentList[T]) Add(item T) { + cl.mtx.Lock() + defer cl.mtx.Unlock() + cl.list = append(cl.list, item) +} + +func (cl *ConcurrentList[T]) GetAll() []T { + cl.mtx.RLock() + defer cl.mtx.RUnlock() + tmp := make([]T, len(cl.list)) + copy(tmp, cl.list) + return tmp +} diff --git a/collections/concurrent_list_test.go b/collections/concurrent_list_test.go new file mode 100644 index 000000000..28ca915bf --- /dev/null +++ b/collections/concurrent_list_test.go @@ -0,0 +1,23 @@ +package collections + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestConcurrentList(t *testing.T) { + cl := NewConcurrentList[int]() + cl.Add(1) + cl.Add(2) + cl.Add(3) + + listSnapshot := cl.GetAll() + require.Equal(t, []int{1, 2, 3}, listSnapshot) + + cl.Add(4) + + listSnapshot2 := cl.GetAll() + require.Equal(t, []int{1, 2, 3}, listSnapshot) + require.Equal(t, []int{1, 2, 3, 4}, listSnapshot2) +} From 44f1b33d32cd4eaf7d267df12223c2e980049920 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:47:30 -0500 Subject: [PATCH 439/762] Add OnBlockMined Listeners to miner.go For Regtest (#995) Add OnBlockMined Listeners to miner.go For Regtest Initialize blockMinedListeners --- lib/miner.go | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/lib/miner.go b/lib/miner.go index 0b341b601..5e332ba50 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -14,6 +14,7 @@ import ( "time" "github.com/btcsuite/btcd/wire" + "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/desohash" "github.com/btcsuite/btcd/btcec" @@ -31,6 +32,8 @@ type DeSoMiner struct { BlockProducer *DeSoBlockProducer params *DeSoParams + blockMinedListeners *collections.ConcurrentList[func(*MsgDeSoBlock)] + stopping int32 } @@ -52,13 +55,18 @@ func NewDeSoMiner(_minerPublicKeys []string, _numThreads uint32, } return &DeSoMiner{ - PublicKeys: _pubKeys, - numThreads: _numThreads, - BlockProducer: _blockProducer, - params: _params, + PublicKeys: _pubKeys, + numThreads: _numThreads, + BlockProducer: _blockProducer, + params: _params, + blockMinedListeners: collections.NewConcurrentList[func(*MsgDeSoBlock)](), }, nil } +func (desoMiner *DeSoMiner) AddBlockMinedListener(ff func(*MsgDeSoBlock)) { + desoMiner.blockMinedListeners.Add(ff) +} + func (desoMiner *DeSoMiner) Stop() { atomic.AddInt32(&desoMiner.stopping, 1) } @@ -289,10 +297,16 @@ func (desoMiner *DeSoMiner) _startThread(threadIndex uint32) { if err != nil { glog.Errorf(err.Error()) } + isFinished := (newBlock == nil) if isFinished { return } + + blockMinedListeners := desoMiner.blockMinedListeners.GetAll() + for _, listener := range blockMinedListeners { + listener(newBlock) + } } } From 043991251c372574dd5fff9a5ffc264322a12165 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:50:35 -0500 Subject: [PATCH 440/762] Attach Listeners To Register Node As Validator on Testnet (#998) --- lib/pos_server_regtest.go | 13 +++++++++++++ lib/server.go | 8 ++++++++ 2 files changed, 21 insertions(+) create mode 100644 lib/pos_server_regtest.go diff --git a/lib/pos_server_regtest.go b/lib/pos_server_regtest.go new file mode 100644 index 000000000..506f45077 --- /dev/null +++ b/lib/pos_server_regtest.go @@ -0,0 +1,13 @@ +package lib + +func (srv *Server) submitRegtestRegisterAsValidatorTxn(block *MsgDeSoBlock) { + +} + +func (srv *Server) submitRegtestStakeTxn(block *MsgDeSoBlock) { + +} + +func (srv *Server) startRegtestFastHotStuffConsensus(block *MsgDeSoBlock) { + +} diff --git a/lib/server.go b/lib/server.go index a610f3011..5ccb7a495 100644 --- a/lib/server.go +++ b/lib/server.go @@ -594,6 +594,14 @@ func NewServer( _posBlockProductionIntervalMilliseconds, _posTimeoutBaseDurationMilliseconds, ) + // On testnet, if the node is configured to be a PoW block producer, and it is configured + // to be also a PoS validator, then we attach block mined listeners to the miner to kick + // off the PoS consensus once the miner is done. + if _params.NetworkType == NetworkType_TESTNET && _miner != nil && _blockProducer != nil { + _miner.AddBlockMinedListener(srv.submitRegtestRegisterAsValidatorTxn) + _miner.AddBlockMinedListener(srv.submitRegtestStakeTxn) + _miner.AddBlockMinedListener(srv.startRegtestFastHotStuffConsensus) + } } // Set all the fields on the Server object. From 731dae852dc2b7086ba96eebcb0926c92d432d6b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:52:31 -0500 Subject: [PATCH 441/762] Implement Listeners to Register Node As Validator on Testnet (#999) Implement Listeneres to Register Node As Validator on Testnet Cleanup --- lib/pos_server_regtest.go | 99 +++++++++++++++++++++++++++++++++++++-- lib/server.go | 3 +- 2 files changed, 97 insertions(+), 5 deletions(-) diff --git a/lib/pos_server_regtest.go b/lib/pos_server_regtest.go index 506f45077..8fc8738f1 100644 --- a/lib/pos_server_regtest.go +++ b/lib/pos_server_regtest.go @@ -1,13 +1,106 @@ package lib -func (srv *Server) submitRegtestRegisterAsValidatorTxn(block *MsgDeSoBlock) { +import ( + "github.com/golang/glog" + "github.com/holiman/uint256" +) -} +func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { + if block.Header.Height != uint64(srv.blockchain.params.ForkHeights.ProofOfStake1StateSetupBlockHeight+5) { + return + } + + glog.Infof(CLog(Yellow, "Reached ProofOfStake1StateSetupMigration.Height. Setting Up PoS Validator")) + + blsSigner := srv.fastHotStuffConsensus.signer + privKey := srv.blockProducer.blockProducerPrivateKey + transactorPubKey := privKey.PubKey().SerializeCompressed() + + // Register as a validator + { + votingAuthorizationPayload := CreateValidatorVotingAuthorizationPayload(transactorPubKey) + votingAuthorization, err := blsSigner.Sign(votingAuthorizationPayload) + if err != nil { + panic(err) + } + + txnMeta := RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("https://deso.com")}, + DisableDelegatedStake: false, + DelegatedStakeCommissionBasisPoints: 100, + VotingPublicKey: blsSigner.GetPublicKey(), + VotingAuthorization: votingAuthorization, + } + + txn, _, _, _, err := srv.blockchain.CreateRegisterAsValidatorTxn( + transactorPubKey, + &txnMeta, + make(map[string][]byte), + 1000, + srv.mempool, + []*DeSoOutput{}, + ) + if err != nil { + panic(err) + } + + txnSignature, err := txn.Sign(privKey) + if err != nil { + panic(err) + } -func (srv *Server) submitRegtestStakeTxn(block *MsgDeSoBlock) { + txn.Signature.SetSignature(txnSignature) + err = srv.VerifyAndBroadcastTransaction(txn) + if err != nil { + panic(err) + } + } + + // Stake DESO to the validator + { + stakeTxnMeta := StakeMetadata{ + ValidatorPublicKey: NewPublicKey(transactorPubKey), + RewardMethod: StakingRewardMethodPayToBalance, + StakeAmountNanos: uint256.NewInt().SetUint64(10), + } + + stakeTxn, _, _, _, err := srv.blockProducer.chain.CreateStakeTxn( + transactorPubKey, + &stakeTxnMeta, + make(map[string][]byte), + 1000, + srv.mempool, + []*DeSoOutput{}, + ) + if err != nil { + panic(err) + } + + stakeTxnSignature, err := stakeTxn.Sign(privKey) + if err != nil { + panic(err) + } + + stakeTxn.Signature.SetSignature(stakeTxnSignature) + + err = srv.VerifyAndBroadcastTransaction(stakeTxn) + if err != nil { + panic(err) + } + } } func (srv *Server) startRegtestFastHotStuffConsensus(block *MsgDeSoBlock) { + if block.Header.Height != uint64(srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1) { + return + } + + if srv.fastHotStuffConsensus == nil || srv.fastHotStuffConsensus.IsRunning() { + return + } + if err := srv.fastHotStuffConsensus.Start(); err != nil { + glog.Errorf(CLog(Yellow, "DeSoMiner._startThread: Error starting fast hotstuff consensus: %v"), err) + } } diff --git a/lib/server.go b/lib/server.go index 5ccb7a495..7bbb119f1 100644 --- a/lib/server.go +++ b/lib/server.go @@ -598,8 +598,7 @@ func NewServer( // to be also a PoS validator, then we attach block mined listeners to the miner to kick // off the PoS consensus once the miner is done. if _params.NetworkType == NetworkType_TESTNET && _miner != nil && _blockProducer != nil { - _miner.AddBlockMinedListener(srv.submitRegtestRegisterAsValidatorTxn) - _miner.AddBlockMinedListener(srv.submitRegtestStakeTxn) + _miner.AddBlockMinedListener(srv.submitRegtestValidatorRegistrationTxns) _miner.AddBlockMinedListener(srv.startRegtestFastHotStuffConsensus) } } From f60eaaaf18715154510401088796ef68f53579ee Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:54:41 -0500 Subject: [PATCH 442/762] Update Constants and Params for PoS Regtest (#1000) --- lib/constants.go | 8 ++++++-- lib/pos_transaction_register.go | 31 +++++++++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index 3ead46483..e32f5a4af 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -755,8 +755,9 @@ var RegtestForkHeights = ForkHeights{ BalanceModelBlockHeight: uint32(1), ProofOfStake1StateSetupBlockHeight: uint32(1), - // FIXME: set to real block height when ready - ProofOfStake2ConsensusCutoverBlockHeight: uint32(math.MaxUint32), + // For convenience, we set the PoS cutover block height to 50 + // so that enough DESO is minted to allow for testing. + ProofOfStake2ConsensusCutoverBlockHeight: uint32(50), BlockRewardPatchBlockHeight: uint32(0), @@ -790,6 +791,9 @@ func (params *DeSoParams) EnableRegtest() { // Allow block rewards to be spent instantly params.BlockRewardMaturity = 0 + // Set the PoS epoch duration to 10 blocks + params.DefaultEpochDurationNumBlocks = 10 + // In regtest, we start all the fork heights at zero. These can be adjusted // for testing purposes to ensure that a transition does not cause issues. params.ForkHeights = RegtestForkHeights diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index 7873705cf..b1f77d3a8 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -42,6 +42,7 @@ type TransactionRegister struct { func NewTransactionRegister() *TransactionRegister { feeTimeBucketSet := treeset.NewWith(feeTimeBucketComparator) + minimumNetworkFeeNanosPerKB, feeBucketMultiplier := _getFallbackSafeMinimumFeeAndMultiplier() return &TransactionRegister{ feeTimeBucketSet: feeTimeBucketSet, feeTimeBucketsByMinFeeMap: make(map[uint64]*FeeTimeBucket), @@ -49,13 +50,21 @@ func NewTransactionRegister() *TransactionRegister { totalTxnsSizeBytes: 0, // Set default values for the uninitialized fields. This is safe because any transactions // added to the register will be re-bucketed once the params are updated. - minimumNetworkFeeNanosPerKB: big.NewFloat(1), // Default to 1 nanos per KB - feeBucketGrowthRateBasisPoints: big.NewFloat(float64(MaxBasisPoints)), // Default to 100% + minimumNetworkFeeNanosPerKB: minimumNetworkFeeNanosPerKB, // Default to 1000 nanos per KB + feeBucketGrowthRateBasisPoints: feeBucketMultiplier, // Default to 10% } } func (tr *TransactionRegister) Init(globalParams *GlobalParamsEntry) { minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + if !_isValidMinimumFeeAndMultiplier(minNetworkFee, bucketMultiplier) { + minNetworkFee, bucketMultiplier = _getFallbackSafeMinimumFeeAndMultiplier() + glog.Warningf( + "TransactionRegister.Init: Invalid minimumNetworkFeeNanosPerKB or feeBucketMultiplier. "+ + "Using fallback values: minimumNetworkFeeNanosPerKB: %v, feeBucketMultiplier: %v", + minNetworkFee, bucketMultiplier, + ) + } tr.minimumNetworkFeeNanosPerKB = minNetworkFee tr.feeBucketGrowthRateBasisPoints = bucketMultiplier } @@ -674,3 +683,21 @@ func computeFeeTimeBucketExponentFromFeeNanosPerKB(feeNanosPerKB uint64, minimum // If we get here, then the computed exponent is correct. return feeTimeBucketExponent } + +func _isValidMinimumFeeAndMultiplier(minimumNetworkFeeNanosPerKB *big.Float, feeBucketMultiplier *big.Float) bool { + if minimumNetworkFeeNanosPerKB == nil || feeBucketMultiplier == nil { + return false + } + + if minimumNetworkFeeNanosPerKB.Sign() <= 0 || feeBucketMultiplier.Sign() <= 0 { + return false + } + + return true +} + +func _getFallbackSafeMinimumFeeAndMultiplier() (*big.Float, *big.Float) { + minimumNetworkFeeNanosPerKB := big.NewFloat(1000) // Default to 1000 nanos per KB + feeBucketMultiplier := big.NewFloat(1000) // Default to 10% + return minimumNetworkFeeNanosPerKB, feeBucketMultiplier +} From 25db6a84ad834d76235e88fab37d6bb79d55cfc4 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:56:27 -0500 Subject: [PATCH 443/762] Enforce txn size check in _connectTransaction (#1001) --- lib/block_producer.go | 6 +-- lib/block_view.go | 14 +++---- lib/block_view_access_group_members_test.go | 2 +- lib/block_view_access_group_test.go | 2 +- lib/block_view_association_test.go | 4 +- lib/block_view_bitcoin_test.go | 42 ++++++++------------- lib/block_view_creator_coin_test.go | 29 +++++++------- lib/block_view_dao_coin_limit_order_test.go | 4 +- lib/block_view_dao_coin_test.go | 4 +- lib/block_view_derived_key_test.go | 12 +++--- lib/block_view_follow_test.go | 4 +- lib/block_view_like_test.go | 4 +- lib/block_view_lockups_test.go | 10 ++--- lib/block_view_message_test.go | 8 ++-- lib/block_view_nft_test.go | 14 +++---- lib/block_view_post_test.go | 10 ++--- lib/block_view_profile_test.go | 9 ++--- lib/block_view_stake_test.go | 8 ++-- lib/block_view_test.go | 21 +++++------ lib/block_view_types_test.go | 6 +-- lib/block_view_validator_test.go | 10 ++--- lib/blockchain.go | 9 +---- lib/db_utils.go | 2 +- lib/legacy_mempool.go | 11 +++--- lib/load_test.go | 2 +- lib/pos_block_producer.go | 2 +- lib/pos_mempool.go | 2 +- lib/state_change_syncer.go | 2 +- 28 files changed, 114 insertions(+), 139 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index 00d3ab0c3..51daadd1a 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -225,7 +225,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) if err != nil { return nil, nil, nil, errors.Wrapf(err, "Error copying UtxoView: ") } - _, _, _, _, err = utxoViewCopy._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), + _, _, _, _, err = utxoViewCopy._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, uint32(blockRet.Header.Height), int64(blockRet.Header.TstampNanoSecs), true, false) if err != nil { // Skip failing txns. This should happen super rarely. @@ -236,7 +236,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) continue } // At this point, we know the transaction isn't going to break our view so attach it. - _, _, _, _, err = utxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), + _, _, _, _, err = utxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, uint32(blockRet.Header.Height), int64(blockRet.Header.TstampNanoSecs), true, false) if err != nil { // We should never get an error here since we just attached a txn to an indentical @@ -294,7 +294,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) for _, txnInBlock := range blockRet.Txns[1:] { var feeNanos uint64 _, _, _, feeNanos, err = feesUtxoView._connectTransaction( - txnInBlock, txnInBlock.Hash(), 0, uint32(blockRet.Header.Height), + txnInBlock, txnInBlock.Hash(), uint32(blockRet.Header.Height), 0, false, false) if err != nil { return nil, nil, nil, fmt.Errorf( diff --git a/lib/block_view.go b/lib/block_view.go index 4e2e6d279..2ee4fb001 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3429,16 +3429,15 @@ func (bav *UtxoView) ValidateDiamondsAndGetNumDeSoNanos( } func (bav *UtxoView) ConnectTransaction( - txn *MsgDeSoTxn, txHash *BlockHash, txnSizeBytes int64, + txn *MsgDeSoTxn, txHash *BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, verifySignatures bool, ignoreUtxos bool) (_utxoOps []*UtxoOperation, _totalInput uint64, _totalOutput uint64, _fees uint64, _err error) { - return bav._connectTransaction(txn, txHash, txnSizeBytes, - blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) + return bav._connectTransaction(txn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) } func (bav *UtxoView) _connectTransaction( - txn *MsgDeSoTxn, txHash *BlockHash, txnSizeBytes int64, blockHeight uint32, + txn *MsgDeSoTxn, txHash *BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, verifySignatures bool, ignoreUtxos bool) (_utxoOps []*UtxoOperation, _totalInput uint64, _totalOutput uint64, _fees uint64, _err error) { // Do a quick sanity check before trying to connect. @@ -3452,7 +3451,8 @@ func (bav *UtxoView) _connectTransaction( return nil, 0, 0, 0, errors.Wrapf( err, "_connectTransaction: Problem serializing transaction: ") } - if len(txnBytes) > int(bav.Params.MaxBlockSizeBytes/2) { + txnSizeBytes := uint64(len(txnBytes)) + if txnSizeBytes > bav.Params.MaxBlockSizeBytes/2 { return nil, 0, 0, 0, RuleErrorTxnTooBig } @@ -4077,7 +4077,7 @@ func (bav *UtxoView) ConnectBlock( // enforce this check in the future, but for now the only attack vector is one in // which a miner is trying to spam the network, which should generally never happen. utxoOpsForTxn, _, _, currentFees, err = bav.ConnectTransaction( - txn, txHash, 0, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) + txn, txHash, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) if err != nil { return nil, errors.Wrapf(err, "ConnectBlock: error connecting txn #%d", txIndex) } @@ -4092,7 +4092,7 @@ func (bav *UtxoView) ConnectBlock( return nil, errors.Wrapf(err, "ConnectBlock: error copying UtxoView") } _, _, _, _, err = utxoViewCopy.ConnectTransaction( - txn, txHash, 0, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) + txn, txHash, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) if err == nil { return nil, errors.Wrapf(err, "ConnectBlock: txn #%d should not connect but err is nil", txIndex) } diff --git a/lib/block_view_access_group_members_test.go b/lib/block_view_access_group_members_test.go index 81d8453c5..d3becd56d 100644 --- a/lib/block_view_access_group_members_test.go +++ b/lib/block_view_access_group_members_test.go @@ -1764,7 +1764,7 @@ func TestAccessGroupMembersTxnWithDerivedKey(t *testing.T) { // Sign txn. _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, + utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return err diff --git a/lib/block_view_access_group_test.go b/lib/block_view_access_group_test.go index 1a9427126..4afd40857 100644 --- a/lib/block_view_access_group_test.go +++ b/lib/block_view_access_group_test.go @@ -613,7 +613,7 @@ func TestAccessGroupTxnWithDerivedKey(t *testing.T) { // Sign txn. _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) // Connect txn. - utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, + utxoOps, _, _, _, err := utxoView.ConnectTransaction(txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return err diff --git a/lib/block_view_association_test.go b/lib/block_view_association_test.go index 38033ee75..81496d9dc 100644 --- a/lib/block_view_association_test.go +++ b/lib/block_view_association_test.go @@ -2173,7 +2173,7 @@ func _submitAssociationTxn( // Connect the transaction. utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -2341,7 +2341,7 @@ func _testAssociationsWithDerivedKey(t *testing.T) { _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) // Connect txn. utxoOps, _, _, _, err := utxoView.ConnectTransaction( - txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return err } diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index a428bd5af..b482aecd1 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -202,7 +202,7 @@ func _updateUSDCentsPerBitcoinExchangeRate(t *testing.T, chain *Blockchain, db * // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -480,10 +480,9 @@ func TestBitcoinExchange(t *testing.T) { bitcoinExchangeTxns[ii] = rateUpdateTxn burnTxn := bitcoinExchangeTxns[ii] - burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) _, _, _ = totalInput, totalOutput, fees require.NoError(err) utxoOpsList = append(utxoOpsList, utxoOps) @@ -491,10 +490,9 @@ func TestBitcoinExchange(t *testing.T) { } burnTxn := bitcoinExchangeTxns[ii] - burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) require.NoError(err) require.Equal(2, len(utxoOps)) @@ -644,9 +642,8 @@ func TestBitcoinExchange(t *testing.T) { require.NoError(err) for ii, burnTxn := range bitcoinExchangeTxns { blockHeight := chain.blockTip().Height + 1 - burnTxnSize := getTxnSize(*burnTxn) utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) require.NoError(err) if ii < len(expectedBitcoinBurnAmounts) { @@ -1082,7 +1079,6 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { //// Validating the first Bitcoin burn transaction via a UtxoView should //// fail because the block corresponding to it is not yet in the BitcoinManager. burnTxn1 := bitcoinExchangeTxns[0] - burnTxn1Size := getTxnSize(*burnTxn1) txHash1 := burnTxn1.Hash() burnTxn2 := bitcoinExchangeTxns[1] @@ -1165,13 +1161,13 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { { utxoView, _ := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 - utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn1, txHash1, blockHeight, 0, true, false) } { utxoView, _ := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 - utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn1, txHash1, blockHeight, 0, true, false) } // The transaction should pass now @@ -1179,7 +1175,7 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { utxoView, _ := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn1, txHash1, blockHeight, 0, true, false) require.NoError(err) } @@ -1214,10 +1210,9 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { bitcoinExchangeTxns[ii] = rateUpdateTxn burnTxn := bitcoinExchangeTxns[ii] - burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) _, _, _ = totalInput, totalOutput, fees require.NoError(err) utxoOpsList = append(utxoOpsList, utxoOps) @@ -1225,10 +1220,9 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { } burnTxn := bitcoinExchangeTxns[ii] - burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) require.NoError(err) require.Equal(2, len(utxoOps)) @@ -1378,9 +1372,8 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { require.NoError(err) for ii, burnTxn := range bitcoinExchangeTxns { blockHeight := chain.blockTip().Height + 1 - burnTxnSize := getTxnSize(*burnTxn) utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) require.NoError(err) if ii < len(expectedBitcoinBurnAmounts) { @@ -1813,7 +1806,6 @@ func TestSpendOffOfUnminedTxnsBitcoinExchange(t *testing.T) { // The amount of work on the first burn transaction should be zero. burnTxn1 := bitcoinExchangeTxns[0] - burnTxn1Size := getTxnSize(*burnTxn1) burnTxn2 := bitcoinExchangeTxns[1] txHash1 := burnTxn1.Hash() @@ -2062,7 +2054,7 @@ func TestSpendOffOfUnminedTxnsBitcoinExchange(t *testing.T) { blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(burnTxn1, txHash1, burnTxn1Size, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn1, txHash1, blockHeight, 0, true, false) require.NoError(err) } @@ -2515,10 +2507,9 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { bitcoinExchangeTxns[ii] = rateUpdateTxn burnTxn := bitcoinExchangeTxns[ii] - burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) _, _, _ = totalInput, totalOutput, fees require.NoError(err) utxoOpsList = append(utxoOpsList, utxoOps) @@ -2526,10 +2517,9 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { } burnTxn := bitcoinExchangeTxns[ii] - burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) require.NoError(err) require.Equal(2, len(utxoOps)) @@ -2679,9 +2669,8 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { require.NoError(err) for ii, burnTxn := range bitcoinExchangeTxns { blockHeight := chain.blockTip().Height + 1 - burnTxnSize := getTxnSize(*burnTxn) utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) require.NoError(err) if ii < len(expectedBitcoinBurnAmounts) { @@ -2984,10 +2973,9 @@ func TestUpdateExchangeRate(t *testing.T) { utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) require.NoError(err) - txnSize := getTxnSize(*updateExchangeRateTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(updateExchangeRateTxn, updateExchangeRateTxn.Hash(), txnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(updateExchangeRateTxn, updateExchangeRateTxn.Hash(), blockHeight, 0, true, false) require.NoError(err) _, _, _, _ = utxoOps, totalInput, totalOutput, fees require.NoError(utxoView.FlushToDb(0)) diff --git a/lib/block_view_creator_coin_test.go b/lib/block_view_creator_coin_test.go index 645c10f4e..8707b9a48 100644 --- a/lib/block_view_creator_coin_test.go +++ b/lib/block_view_creator_coin_test.go @@ -608,9 +608,8 @@ func _helpTestCreatorCoinBuySell( fmt.Printf("Applying test index: %v\n", testIndex) txn := testTxns[testIndex] blockHeight := chain.blockTip().Height + 1 - txnSize := getTxnSize(*txn) _, _, _, _, err := - utxoView.ConnectTransaction(txn, txn.Hash(), txnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txn.Hash(), blockHeight, 0, true, false) require.NoError(err) _checkTestData(testData, fmt.Sprintf("SimpleConnect: Index: %v", testIndex), utxoView, nil) } @@ -1140,7 +1139,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferHasDiamondPostHashWithoutDiamondLevel) } @@ -1174,7 +1173,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), "level 15 not allowed") } @@ -1207,7 +1206,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), "level 0 not allowed") } @@ -1240,7 +1239,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferCantSendDiamondsForOtherProfiles) } @@ -1273,7 +1272,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCoinTransferCannotTransferToSelf) } @@ -1307,7 +1306,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferDiamondPostEntryDoesNotExist) } @@ -1340,7 +1339,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferInsufficientCreatorCoinsForDiamondLevel) } @@ -1373,7 +1372,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.NoError(err) _, err = mempool.processTransaction(txn, false, false, 0, false) @@ -1405,7 +1404,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferPostAlreadyHasSufficientDiamonds) } @@ -1536,7 +1535,7 @@ func TestCreatorCoinDiamondAfterDeSoDiamondsBlockHeight(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorCreatorCoinTransferHasDiamondsAfterDeSoBlockHeight) } @@ -4351,7 +4350,7 @@ func _creatorCoinTxn(t *testing.T, chain *Blockchain, db *badger.DB, txHash := txn.Hash() utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -4455,7 +4454,7 @@ func _doCreatorCoinTransferTxnWithDiamonds(t *testing.T, chain *Blockchain, db * // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -4528,7 +4527,7 @@ func _doCreatorCoinTransferTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index d29be88cc..026b8eaad 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -4095,7 +4095,7 @@ func _connectDAOCoinLimitOrderTxn( // Always use savedHeight (blockHeight+1) for validation since it's // assumed the transaction will get mined into the next block. utxoOps, totalInput, totalOutput, fees, err := currentUtxoView.ConnectTransaction( - txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { // If error, remove most-recent expected sender balance added for this txn. testMeta.expectedSenderBalances = testMeta.expectedSenderBalances[:len(testMeta.expectedSenderBalances)-1] @@ -4168,7 +4168,7 @@ func _doDAOCoinLimitOrderTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } diff --git a/lib/block_view_dao_coin_test.go b/lib/block_view_dao_coin_test.go index d8b8bea6c..41d8c277b 100644 --- a/lib/block_view_dao_coin_test.go +++ b/lib/block_view_dao_coin_test.go @@ -51,7 +51,7 @@ func _daoCoinTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -132,7 +132,7 @@ func _daoCoinTransferTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index db77f44b5..7b9c8fdde 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -92,7 +92,7 @@ func _derivedKeyBasicTransfer(t *testing.T, db *badger.DB, chain *Blockchain, pa txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 utxoOps, _, _, _, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) return utxoOps, txn, err } @@ -519,7 +519,7 @@ func _doTxnWithBlockHeight( txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -837,7 +837,7 @@ func _doAuthorizeTxnWithExtraDataAndSpendingLimits(testMeta *TestMeta, utxoView txHash := txn.Hash() utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -1148,9 +1148,8 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { for testIndex, txn := range testTxns { fmt.Printf("Applying test index: %v\n", testIndex) blockHeight := chain.blockTip().Height + 1 - txnSize := getTxnSize(*txn) _, _, _, _, err := - utxoView.ConnectTransaction(txn, txn.Hash(), txnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txn.Hash(), blockHeight, 0, true, false) require.NoError(err) } @@ -2001,9 +2000,8 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { for testIndex, txn := range testTxns { fmt.Printf("Applying test index: %v\n", testIndex) blockHeight := chain.blockTip().Height + 1 - txnSize := getTxnSize(*txn) _, _, _, _, err := - utxoView.ConnectTransaction(txn, txn.Hash(), txnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txn.Hash(), blockHeight, 0, true, false) require.NoError(err) } diff --git a/lib/block_view_follow_test.go b/lib/block_view_follow_test.go index aa308aa62..8b73727b1 100644 --- a/lib/block_view_follow_test.go +++ b/lib/block_view_follow_test.go @@ -43,7 +43,7 @@ func _doFollowTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -653,7 +653,7 @@ func TestFollowTxns(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.NoError(err) } // Flush the utxoView after having added all the transactions. diff --git a/lib/block_view_like_test.go b/lib/block_view_like_test.go index d7856607a..19d3ab260 100644 --- a/lib/block_view_like_test.go +++ b/lib/block_view_like_test.go @@ -40,7 +40,7 @@ func _doLikeTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -576,7 +576,7 @@ func TestLikeTxns(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.NoError(err) } // Flush the utxoView after having added all the transactions. diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 09120db93..0b0e82579 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -1618,7 +1618,7 @@ func TestLockupWithDerivedKey(t *testing.T) { transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, testMeta.params) prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) // Connect txn. - utxoOps, _, _, fees, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), + utxoOps, _, _, fees, err := utxoView.ConnectTransaction(txn, txn.Hash(), testMeta.savedHeight, blockTimestamp, true, false) if err != nil { return 0, err @@ -3133,7 +3133,7 @@ func _coinLockupWithConnectTimestamp(t *testing.T, chain *Blockchain, db *badger blockHeight := chain.BlockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := utxoView.ConnectTransaction( - txn, txHash, getTxnSize(*txn), blockHeight, connectTimestamp, true, false) + txn, txHash, blockHeight, connectTimestamp, true, false) if err != nil { return nil, nil, 0, err } @@ -3220,7 +3220,7 @@ func _updateCoinLockupParams(t *testing.T, chain *Blockchain, db *badger.DB, blockHeight := chain.BlockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := utxoView.ConnectTransaction( - txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -3299,7 +3299,7 @@ func _coinLockupTransfer(t *testing.T, chain *Blockchain, db *badger.DB, blockHeight := chain.BlockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := utxoView.ConnectTransaction( - txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -3376,7 +3376,7 @@ func _coinUnlockWithConnectTimestamp(t *testing.T, chain *Blockchain, db *badger blockHeight := chain.BlockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := utxoView.ConnectTransaction( - txn, txHash, getTxnSize(*txn), blockHeight, connectTimestamp, true, false) + txn, txHash, blockHeight, connectTimestamp, true, false) if err != nil { return nil, nil, 0, err } diff --git a/lib/block_view_message_test.go b/lib/block_view_message_test.go index fa1efd69e..b79931981 100644 --- a/lib/block_view_message_test.go +++ b/lib/block_view_message_test.go @@ -88,7 +88,7 @@ func _privateMessageWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -476,7 +476,7 @@ func TestPrivateMessage(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.NoError(err) } // Flush the utxoView after having added all the transactions. @@ -659,7 +659,7 @@ func _messagingKeyWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB, txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, err } @@ -1688,7 +1688,7 @@ func _connectPrivateMessageWithPartyWithExtraData(testMeta *TestMeta, senderPkBy utxoView, err := NewUtxoView(testMeta.db, testMeta.params, nil, testMeta.chain.snapshot, nil) blockHeight := testMeta.chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the output. if expectedError != nil { assert.Equal(true, strings.Contains(err.Error(), expectedError.Error())) diff --git a/lib/block_view_nft_test.go b/lib/block_view_nft_test.go index cad187c76..bc3d6efcd 100644 --- a/lib/block_view_nft_test.go +++ b/lib/block_view_nft_test.go @@ -92,7 +92,7 @@ func _createNFTWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB, par // Always use height+1 for validation since it's assumed the transaction will // get mined into the next block. utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -309,7 +309,7 @@ func _createNFTBid(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoP // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -403,7 +403,7 @@ func _acceptNFTBid(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoP // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -517,7 +517,7 @@ func _updateNFT(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoPara // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -613,7 +613,7 @@ func _transferNFT(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoPa // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -700,7 +700,7 @@ func _acceptNFTTransfer(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -783,7 +783,7 @@ func _burnNFT(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } diff --git a/lib/block_view_post_test.go b/lib/block_view_post_test.go index 794ddafe1..1f8574d05 100644 --- a/lib/block_view_post_test.go +++ b/lib/block_view_post_test.go @@ -72,7 +72,7 @@ func _submitPost(t *testing.T, chain *Blockchain, db *badger.DB, blockHeight := chain.blockTip().Height + 1 utxoView.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB = 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -170,7 +170,7 @@ func _giveDeSoDiamonds(t *testing.T, chain *Blockchain, db *badger.DB, params *D // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return nil, nil, 0, err } @@ -266,7 +266,7 @@ func _doSubmitPostTxn(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -1552,7 +1552,7 @@ func TestSubmitPost(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.NoError(err) // Assert "after" comment counts are correct at a few different spots @@ -1983,7 +1983,7 @@ func TestDeSoDiamondErrorCases(t *testing.T) { // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) if err != nil { return err } diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index 70ef92452..3f3c0d9d8 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -70,7 +70,7 @@ func _swapIdentity(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -153,7 +153,7 @@ func _updateProfileWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB, // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -1147,7 +1147,7 @@ func TestUpdateProfile(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.NoError(err) } // Flush the utxoView after having added all the transactions. @@ -3410,8 +3410,7 @@ func TestUpdateProfileChangeBack(t *testing.T) { // This ensure that the read-only version of the utxoView accurately reflects the current set of profile names taken. utxoViewCopy, err := mempool.universalUtxoView.CopyUtxoView() require.NoError(err) - txnSize := getTxnSize(*txn) - _, _, _, _, err = utxoViewCopy.ConnectTransaction(txn, txn.Hash(), txnSize, chain.blockTip().Height+1, 0, false, false) + _, _, _, _, err = utxoViewCopy.ConnectTransaction(txn, txn.Hash(), chain.blockTip().Height+1, 0, false, false) require.NoError(err) mempoolTxsAdded, err := mempool.processTransaction( diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 4e7c36230..4253c1cea 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -640,7 +640,7 @@ func _submitStakeTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -692,7 +692,7 @@ func _submitUnstakeTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -744,7 +744,7 @@ func _submitUnlockStakeTxn( _signTxn(testMeta.t, txn, transactorPrivateKeyBase58Check) // Connect the transaction. - utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction(txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -918,7 +918,7 @@ func TestStakingWithDerivedKey(t *testing.T) { transactorPublicKeyBase58Check := Base58CheckEncode(transactorPkBytes, false, params) prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) // Connect txn. - utxoOps, _, _, fees, err := utxoView.ConnectTransaction(txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + utxoOps, _, _, fees, err := utxoView.ConnectTransaction(txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } diff --git a/lib/block_view_test.go b/lib/block_view_test.go index d19d0af36..1bfe3d3f1 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -892,7 +892,7 @@ func _doBasicTransferWithViewFlush(t *testing.T, chain *Blockchain, db *badger.D txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.NoError(err) require.GreaterOrEqual(totalOutput, amountNanos) require.Equal(totalInput, totalOutput+fees) @@ -1001,7 +1001,7 @@ func _updateGlobalParamsEntryWithMempool(t *testing.T, chain *Blockchain, db *ba // get mined into the next block. blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) // ConnectTransaction should treat the amount locked as contributing to the // output. if err != nil { @@ -1210,7 +1210,7 @@ func _applyTestMetaTxnsToViewAndFlush(testMeta *TestMeta) { txHash := txn.Hash() blockHeight := testMeta.chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.NoError(testMeta.t, err) } // Flush the utxoView after having added all the transactions. @@ -1342,10 +1342,9 @@ func TestUpdateGlobalParams(t *testing.T) { utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) require.NoError(err) - txnSize := getTxnSize(*updateGlobalParamsTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(updateGlobalParamsTxn, updateGlobalParamsTxn.Hash(), txnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(updateGlobalParamsTxn, updateGlobalParamsTxn.Hash(), blockHeight, 0, true, false) require.NoError(err) _, _, _, _ = utxoOps, totalInput, totalOutput, fees require.NoError(utxoView.FlushToDb(0)) @@ -1543,7 +1542,7 @@ func TestBasicTransfer(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) if blockHeight < params.ForkHeights.BalanceModelBlockHeight { require.Contains(err.Error(), RuleErrorInputWithPublicKeyDifferentFromTxnPublicKey) @@ -1585,7 +1584,7 @@ func TestBasicTransfer(t *testing.T) { _signTxn(t, txn, recipientPrivString) txHash := txn.Hash() _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorInvalidTransactionSignature) } @@ -1611,7 +1610,7 @@ func TestBasicTransfer(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorBlockRewardTxnNotAllowedToHaveSignature) } @@ -1645,7 +1644,7 @@ func TestBasicTransfer(t *testing.T) { txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) if blockHeight < params.ForkHeights.BalanceModelBlockHeight { require.Contains(err.Error(), RuleErrorBlockRewardTxnNotAllowedToHaveInputs) @@ -1734,7 +1733,7 @@ func TestBasicTransfer(t *testing.T) { utxoView, _ := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) txHash := txn.Hash() _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, getTxnSize(*txn), blockHeight, 0, true, false) + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.NoError(err) senderBalance, _ := utxoView.GetDeSoBalanceNanosForPublicKey(senderPkBytes) @@ -2190,7 +2189,7 @@ func TestBlockRewardPatch(t *testing.T) { _signTxn(t, txn, senderPrivString) utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) require.NoError(t, err) - _, _, _, fees, err := utxoView._connectTransaction(txn, txn.Hash(), getTxnSize(*txn), chain.blockTip().Height+1, 0, true, false) + _, _, _, fees, err := utxoView._connectTransaction(txn, txn.Hash(), chain.blockTip().Height+1, 0, true, false) require.NoError(t, err) blkToMine, _, _, err := miner._getBlockToMine(0) require.NoError(t, err) diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index 48c209dc4..43bddeaae 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -375,10 +375,9 @@ func TestUtxoEntryEncodeDecode(t *testing.T) { bitcoinExchangeTxns[ii] = rateUpdateTxn burnTxn := bitcoinExchangeTxns[ii] - burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) _, _, _ = totalInput, totalOutput, fees require.NoError(err) utxoOpsList = append(utxoOpsList, utxoOps) @@ -386,10 +385,9 @@ func TestUtxoEntryEncodeDecode(t *testing.T) { } burnTxn := bitcoinExchangeTxns[ii] - burnTxnSize := getTxnSize(*burnTxn) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := - utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), burnTxnSize, blockHeight, 0, true, false) + utxoView.ConnectTransaction(burnTxn, burnTxn.Hash(), blockHeight, 0, true, false) require.NoError(err) require.Equal(2, len(utxoOps)) diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 35c48d203..5ab35076b 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -377,7 +377,7 @@ func _submitRegisterAsValidatorTxn( // Connect the transaction. utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -428,7 +428,7 @@ func _submitUnregisterAsValidatorTxn( // Connect the transaction. utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } @@ -582,7 +582,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) // Connect txn. utxoOps, _, _, _, err := utxoView.ConnectTransaction( - txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return err } @@ -1842,7 +1842,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { prevBalance := _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, transactorPublicKeyBase58Check) // Connect txn. utxoOps, _, _, _, err := utxoView.ConnectTransaction( - txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return err } @@ -1995,7 +1995,7 @@ func _submitUnjailValidatorTxn( // Connect the transaction. utxoOps, totalInput, totalOutput, fees, err := testMeta.mempool.universalUtxoView.ConnectTransaction( - txn, txn.Hash(), getTxnSize(*txn), testMeta.savedHeight, 0, true, false) + txn, txn.Hash(), testMeta.savedHeight, 0, true, false) if err != nil { return 0, err } diff --git a/lib/blockchain.go b/lib/blockchain.go index 364e0ec53..6d7c9c4de 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -2866,14 +2866,9 @@ func (bc *Blockchain) ValidateTransaction( // Hash the transaction. txHash := txnMsg.Hash() - txnBytes, err := txnMsg.ToBytes(false) - if err != nil { - return errors.Wrapf(err, "ValidateTransaction: Error serializing txn: %v", err) - } - txnSize := int64(len(txnBytes)) // We don't care about the utxoOps or the fee it returns. _, _, _, _, err = utxoView._connectTransaction( - txnMsg, txHash, txnSize, blockHeight, 0, verifySignatures, false) + txnMsg, txHash, blockHeight, 0, verifySignatures, false) if err != nil { return errors.Wrapf(err, "ValidateTransaction: Problem validating transaction: ") } @@ -5264,7 +5259,7 @@ func (bc *Blockchain) EstimateDefaultFeeRateNanosPerKB( } numBytesInTxn := len(txnBytes) _, _, _, fees, err := utxoView.ConnectTransaction( - txn, txn.Hash(), int64(numBytesInTxn), tipNode.Height, int64(tipNode.Header.TstampNanoSecs), + txn, txn.Hash(), tipNode.Height, tipNode.Header.TstampNanoSecs, false, false) if err != nil { return minFeeRateNanosPerKB diff --git a/lib/db_utils.go b/lib/db_utils.go index d77f4c71e..de825edd1 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -5365,7 +5365,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, // Set txnSizeBytes to 0 here as the minimum network fee is 0 at genesis block, so there is no need to serialize // these transactions to check if they meet the minimum network fee requirement. var utxoOpsForTxn []*UtxoOperation - utxoOpsForTxn, _, _, _, err = utxoView.ConnectTransaction(txn, txn.Hash(), 0, 0, 0, false, true) + utxoOpsForTxn, _, _, _, err = utxoView.ConnectTransaction(txn, txn.Hash(), 0, 0, false, true) if err != nil { return fmt.Errorf( "InitDbWithDeSoGenesisBlock: Error connecting transaction: %v, "+ diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 24deed649..977e622e0 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -905,7 +905,7 @@ func (mp *DeSoMempool) addTransaction( // Add it to the universal view. We assume the txn was already added to the // backup view. _, _, _, _, err = mp.universalUtxoView._connectTransaction( - mempoolTx.Tx, mempoolTx.Hash, int64(mempoolTx.TxSizeBytes), height, + mempoolTx.Tx, mempoolTx.Hash, height, timestamp, false, false) if err != nil { return nil, errors.Wrap(err, "ERROR addTransaction: _connectTransaction "+ @@ -915,7 +915,7 @@ func (mp *DeSoMempool) addTransaction( mp.universalTransactionList = append(mp.universalTransactionList, mempoolTx) if updateBackupView { _, _, _, _, err = mp.backupUniversalUtxoView._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, - int64(mempoolTx.TxSizeBytes), height, timestamp, false, false) + height, timestamp, false, false) if err != nil { return nil, errors.Wrap(err, "ERROR addTransaction: _connectTransaction "+ "failed on backupUniversalUtxoView; this is a HUGE problem and should never happen") @@ -996,7 +996,7 @@ func (mp *DeSoMempool) _quickCheckBitcoinExchangeTxn( // has the block corresponding to the transaction. // We skip verifying txn size for bitcoin exchange transactions. _, _, _, txFee, err := utxoView._connectTransaction( - tx, txHash, 0, bestHeight, 0, false, false) + tx, txHash, bestHeight, 0, false, false) if err != nil { // Note this can happen in odd cases where a transaction's dependency was removed // but the transaction depending on it was not. See the comment on @@ -1088,9 +1088,8 @@ func (mp *DeSoMempool) tryAcceptTransaction( usdCentsPerBitcoinBefore := mp.backupUniversalUtxoView.GetCurrentUSDCentsPerBitcoin() bestHeight := uint32(mp.bc.blockTip().Height + 1) bestTimestamp := time.Now().UnixNano() - // We can skip verifying the transaction size as related to the minimum fee here. utxoOps, totalInput, totalOutput, txFee, err := mp.backupUniversalUtxoView._connectTransaction( - tx, txHash, 0, bestHeight, bestTimestamp, verifySignatures, false) + tx, txHash, bestHeight, bestTimestamp, verifySignatures, false) if err != nil { mp.rebuildBackupView() return nil, nil, errors.Wrapf(err, "tryAcceptTransaction: Problem "+ @@ -2137,7 +2136,7 @@ func ConnectTxnAndComputeTransactionMetadata( totalNanosPurchasedBefore := utxoView.NanosPurchased usdCentsPerBitcoinBefore := utxoView.GetCurrentUSDCentsPerBitcoin() utxoOps, totalInput, totalOutput, fees, err := utxoView._connectTransaction( - txn, txn.Hash(), 0, blockHeight, 0, false, false) + txn, txn.Hash(), blockHeight, 0, false, false) if err != nil { return nil, fmt.Errorf( "UpdateTxindex: Error connecting txn to UtxoView: %v", err) diff --git a/lib/load_test.go b/lib/load_test.go index 00cec0e6f..88c54543c 100644 --- a/lib/load_test.go +++ b/lib/load_test.go @@ -134,7 +134,7 @@ func TestComputeMaxTPS(t *testing.T) { timeStart := time.Now() for _, tx := range txns { - _, _, _, _, err := utxoView.ConnectTransaction(tx, tx.Hash(), 0, 1, 0, false, false) + _, _, _, _, err := utxoView.ConnectTransaction(tx, tx.Hash(), 1, 0, false, false) require.NoError(err) } //require.NoError(utxoView.FlushToDb()) diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 469939814..aba26dacc 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -193,7 +193,7 @@ func (pbp *PosBlockProducer) getBlockTransactions( return nil, nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") } _, _, _, fees, err := blockUtxoViewCopy._connectTransaction( - txn.GetTxn(), txn.Hash(), int64(len(txnBytes)), uint32(newBlockHeight), newBlockTimestampNanoSecs, + txn.GetTxn(), txn.Hash(), uint32(newBlockHeight), newBlockTimestampNanoSecs, true, false) // Check if the transaction connected. diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 997801567..4c75be2fe 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -301,7 +301,7 @@ func (mp *PosMempool) startAugmentedViewRefreshRoutine() { continue } _, _, _, _, err = copiedView.ConnectTransaction( - txn.GetTxn(), txn.Hash(), 0, uint32(mp.latestBlockHeight)+1, time.Now().UnixNano(), false, + txn.GetTxn(), txn.Hash(), uint32(mp.latestBlockHeight)+1, time.Now().UnixNano(), false, false) // If the transaction successfully connects, we set the newView to the copiedView // and proceed to the next transaction. diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index ec754aa12..a5a9f1777 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -688,7 +688,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser currentTimestamp := time.Now().UnixNano() for _, mempoolTx := range mempoolTxns { utxoOpsForTxn, _, _, _, err := mempoolTxUtxoView.ConnectTransaction( - mempoolTx.Tx, mempoolTx.Hash, 0, uint32(blockHeight+1), + mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), currentTimestamp, false, false /*ignoreUtxos*/) if err != nil { return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer ConnectTransaction: ") From c40575816f7e5f79bceb9e3e67333dc379657515 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:58:30 -0500 Subject: [PATCH 444/762] Fix Unexpected nil Return Value from errors.Wrapf (#1002) --- lib/block_view.go | 2 +- lib/pos_blockchain.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 2ee4fb001..aa415e745 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4094,7 +4094,7 @@ func (bav *UtxoView) ConnectBlock( _, _, _, _, err = utxoViewCopy.ConnectTransaction( txn, txHash, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) if err == nil { - return nil, errors.Wrapf(err, "ConnectBlock: txn #%d should not connect but err is nil", txIndex) + return nil, errors.Errorf("ConnectBlock: txn #%d should not connect but err is nil", txIndex) } var burnFee uint64 // Connect the failing transaction to get the fees and utility fee. diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 8a5f916bd..2dc89bdd7 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1123,8 +1123,7 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc // isValidPoSQuorumCertificate validates that the QC of this block is valid, meaning a super majority // of the validator set has voted (or timed out). It special cases the first block after the PoS cutover -// by overriding the validator set used to validate the vote QC or high QC in the first block after the -// PoS cutover. +// by overriding the validator set used to validate the high QC in the first block after the PoS cutover. func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validatorSet []*ValidatorEntry) error { voteQCValidators := toConsensusValidators(validatorSet) aggregateQCValidators := voteQCValidators From b5c5d3272d728fc1ce3134aea4893855711e846e Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 1 Feb 2024 16:12:26 -0800 Subject: [PATCH 445/762] Add comments and simplify CalculateVestedEarnings while keeping sanity checks. --- lib/block_view_lockups.go | 111 ++++++++++++++++++++++++++------------ 1 file changed, 78 insertions(+), 33 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 0c99ce789..bbe89a1ec 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -46,16 +46,20 @@ type LockedBalanceEntry struct { isDeleted bool } -// LockedBalanceEntryKey is a very crucial struct in the design of lockups. +// LockedBalanceEntryKey is a very crucial struct in the design of lockup // // Consider a naive utilization of LockedBalanceEntryMapKey in the context of two subsequent vested unlocks -// WITHOUT flushing to db in-between: +// WITHOUT flushing to db in-between. Assume that there exists a vested locked balance entry in the db +// and that the view is empty at the start. We will step through how the code would read from disk and the +// UtxoView when performing these two unlocks: // // (1st Unlock, blockTimestampNanoSecs=x+100) // (1) We read from disk the LockedBalanceEntry with UnlockTimestampNanoSecs=x // (2) We cache the entry found in-memory (i.e. in the UtxoView) // (3) We read from the in-memory cache the LockedBalanceEntry with UnlockTimestampNanoSecs=x -// (4) We update the in-memory entry to have UnlockTimestampNanoSecs=x+100 +// (4) We update the in-memory entry to have UnlockTimestampNanoSecs=x+100. +// NOTE: At this step, there is no entry in the UtxoView view with an UnlockTimestampNanoSecs=x. +// This is an issue as we will see in the next unlock as we will read duplicate entries from the db. // (2nd Unlock, blockTimestampNanoSecs=x+150) // (1) We read from disk the LockedBalanceEntry with UnlockTimestampNanoSecs=x // (2) Because no other entry in-memory has UnlockTimestampNanoSecs=x, we cache a duplicate entry in-memory @@ -753,11 +757,29 @@ func (bav *UtxoView) GetAllYieldCurvePoints( // type CoinLockupMetadata struct { - ProfilePublicKey *PublicKey - RecipientPublicKey *PublicKey - UnlockTimestampNanoSecs int64 + // The profile public key is the profile who's associated DAO coins we wish to lockup. + ProfilePublicKey *PublicKey + + // The recipient of the locked DAO coins following execution of the transaction. + RecipientPublicKey *PublicKey + + // The UnlockTimestampNanoSecs specifies when the recipient should begin to be able to unlock + // their locked DAO coins. + UnlockTimestampNanoSecs int64 + + // If VestingEndTimestampNanoSecs is equal to UnlockTimestampNanoSecs, the user will be able to unlock + // all locked DAO coins associated with this transaction once a block whose header timestamp is + // greater to or equal to UnlockTimestampNanoSecs. This is the "unvested" or "point" case. + // + // If not equal to UnlockTimestampNanoSecs, the user can unlock the associated DAO coins once + // a block with header timestamp greater to or equal to UnlockTimestampNanoSecs, but will only + // receive tokens in proportion to the amount of time that has passed between UnlockTimestampNanoSecs + // and VestingEndTimestampNanoSecs. This is the "vested" case. VestingEndTimestampNanoSecs int64 - LockupAmountBaseUnits *uint256.Int + + // LockupAmountBaseUnits specifies The amount of locked ProfilePublicKey DAO coins to be + // placed in a LockedBalanceEntry and given to RecipientPublicKey. + LockupAmountBaseUnits *uint256.Int } func (txnData *CoinLockupMetadata) GetTxnType() TxnType { @@ -1084,6 +1106,14 @@ func (bav *UtxoView) _connectCoinLockup( } // In the vested case, validate that the underlying profile is the transactor. + // NOTE: This check exists because there's several attack vectors that exist in letting + // any user perform vested lockups and send them to other users. For example, in the + // current implementation we rely on consolidation in the lockup transaction to provide + // quick unlock transactions by users. A malicious user could knowingly send vested + // lockups with small durations in the attempt to fragment the targeted users locked balance + // entries. This would result in the user being unable to easily receive future vested lockups. + // Attack vectors exist in various vested lockup designs and as a result it was decided + // best to only allow the transacting public key to perform vested lockups. if txMeta.VestingEndTimestampNanoSecs > txMeta.UnlockTimestampNanoSecs && !reflect.DeepEqual(txn.PublicKey, txMeta.ProfilePublicKey.ToBytes()) { return 0, 0, nil, @@ -1145,10 +1175,31 @@ func (bav *UtxoView) _connectCoinLockup( } bav._setProfileEntryMappings(profileEntry) + // SAFEGUARD: We perform a redundant check if the profile has ANY yield curve points. + // This could be removed for added performance, but it adds an extra sanity check before we compute + // any associated yield. Specifically this helps protect against unforeseen issues with GetLocalYieldCurvePoints + // which is meant to be an optimized DB implementation capable of quickly fetching the yield + profileEnablesYield := false + if txMeta.UnlockTimestampNanoSecs == txMeta.VestingEndTimestampNanoSecs { + // Fetch ALL yield curve points associated with the profilePKID. + yieldCurvePointsMap, err := bav.GetAllYieldCurvePoints(profilePKID) + if err != nil { + return 0, 0, nil, + errors.Wrap(err, "_connectCoinLockup failed to perform yield curve safeguard check") + } + + // Check if any yield curve points exist, updating profileEnablesYield if so. + for _, yieldCurvePoint := range yieldCurvePointsMap { + if !yieldCurvePoint.isDeleted { + profileEnablesYield = true + } + } + } + // If this is an unvested lockup, compute any accrued yield. // In the vested lockup case, the yield earned is always zero. yieldFromTxn := uint256.NewInt() - if txMeta.UnlockTimestampNanoSecs == txMeta.VestingEndTimestampNanoSecs { + if profileEnablesYield && txMeta.UnlockTimestampNanoSecs == txMeta.VestingEndTimestampNanoSecs { // Compute the lockup duration in nanoseconds. lockupDurationNanoSeconds := txMeta.UnlockTimestampNanoSecs - blockTimestampNanoSecs @@ -1246,6 +1297,9 @@ func (bav *UtxoView) _connectCoinLockup( } // (4) Set the new locked balance entry in the view + // NOTE: An astute reader may have noticed the comment on the LockedBalanceEntryKey definition + // and be confused why we are not deleting then setting the lockedBalanceEntry below. This is because + // we do not modify the key in this case, making it safe to just set the lockedBalanceEntry. lockedBalanceEntry.BalanceBaseUnits = *newLockedBalanceEntryBalance bav._setLockedBalanceEntry(lockedBalanceEntry) @@ -1293,10 +1347,9 @@ func (bav *UtxoView) _connectCoinLockup( VestingEndTimestampNanoSecs: txMeta.VestingEndTimestampNanoSecs, BalanceBaseUnits: *lockupValue, }) - } + } else if len(lockedBalanceEntries) > 0 { + // (3b) Go through each existing locked balance entry and consolidate - // (3b) Go through each existing locked balance entry and consolidate - if len(lockedBalanceEntries) > 0 { // Construct a "proposed" locked balance entry from the transaction's metadata. proposedLockedBalanceEntry := &LockedBalanceEntry{ HODLerPKID: hodlerPKID, @@ -1488,6 +1541,15 @@ func (bav *UtxoView) _connectCoinLockup( return totalInput, totalOutput, utxoOpsForTxn, nil } +// SplitVestedLockedBalanceEntry is used for splitting a vested locked balance entry into two pieces. +// It is assumed that the startSplitTimestamp lines up with the UnlockTimestampNanoSecs of the lockedBalanceEntry +// passed or endSplitTimestampNanoSecs lines up with the VestingEndTimestampNanoSecs of +// the lockedBalanceEntry passed. +// +// On return a splitLockedBalanceEntry will be returned with UnlockTimestampNanoSecs=startSplitTimestampNanoSecs +// and VestingEndTimestampNanoSecs=endSplitTimestampNanoSecs. +// In addition, a remainingLockedBalanceEntry will be returned whose UnlockTimestampNanoSecs and +// VestingEndTimestampNanoSecs is whatever remains of the lockedBalanceEntry passed minus the splitLockedBalanceEntry. func SplitVestedLockedBalanceEntry( lockedBalanceEntry *LockedBalanceEntry, startSplitTimestampNanoSecs int64, @@ -2541,30 +2603,13 @@ func CalculateVestedEarnings( return &lockedBalanceEntry.BalanceBaseUnits, nil } - // Here we know that: - // UnlockTimestampNanoSecs < blockTimestampNanoSecs < VestingEndTimestampNanoSecs - // Now we compute the fraction of time that's passed. - numerator := uint256.NewInt().SetUint64( - uint64(blockTimestampNanoSecs - lockedBalanceEntry.UnlockTimestampNanoSecs)) - denominator := uint256.NewInt().SetUint64( - uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs - lockedBalanceEntry.UnlockTimestampNanoSecs)) - - // Compute the numerator (lockedBalanceEntry.BalanceBaseUnits * numerator). - var err error - numerator, err = SafeUint256().Mul( - &lockedBalanceEntry.BalanceBaseUnits, - numerator) - if err != nil { - return uint256.NewInt(), - errors.Wrap(err, "ComputeVestedEarnings failed to compute multiplication (time elapsed * balance)") - } - - // Compute the vested earnings. - vestedEarnings, err := SafeUint256().Div(numerator, denominator) + // Compute the vested earnings using CalculateLockupValueOverElapsedDuration + vestedEarnings, err := CalculateLockupValueOverElapsedDuration( + lockedBalanceEntry, + blockTimestampNanoSecs-lockedBalanceEntry.UnlockTimestampNanoSecs) if err != nil { return uint256.NewInt(), - errors.Wrap(err, "ComputeVestedEarnings failed to compute division "+ - "((time elapsed * balance) / total time)") + errors.Wrap(err, "CalculateVestedEarnings failed to compute vestedEarnings") } // Sanity check that vestedEarnings < BalanceBaseUnits From c2864d83a12e77a74159f4c8de74132e8dacc35e Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 1 Feb 2024 16:23:58 -0800 Subject: [PATCH 446/762] Update DefaultMaximumVestedIntersectionsPerLockupTransaction to 1000. --- lib/block_view_lockups_test.go | 23 ++++++++++++++++++++--- lib/block_view_types.go | 2 +- lib/constants.go | 4 ++-- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index befc736b3..089c03e3f 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -3493,7 +3493,7 @@ func TestVestingIntersectionLimit(t *testing.T) { utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - require.Equal(t, utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction, 100) + require.Equal(t, utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction, 1000) // Generate consecutive vested locked balance entries equal to this limit. for ii := 0; ii < utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction; ii++ { @@ -3506,7 +3506,24 @@ func TestVestingIntersectionLimit(t *testing.T) { } } - // Ensure we can consolidate on top of . + // Create 1,000,000,000 m0 dao coins held by m0. + // We require this otherwise we will hit downstream (correct) RuleErrorCoinLockupInsufficientCoins errors. + { + _daoCoinTxnWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + DAOCoinMetadata{ + ProfilePublicKey: m0PkBytes, + OperationType: DAOCoinOperationTypeMint, + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e9), + CoinsToBurnNanos: uint256.Int{}, + TransferRestrictionStatus: 0, + }) + } + + // Ensure we can consolidate on top of all these locked balance entries. maxIntersections := utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction { _, _, _, err := _coinLockupWithConnectTimestamp( @@ -3735,7 +3752,7 @@ func TestRealWorldLockupsUseCase(t *testing.T) { // paramUpdaterPub - 10,000 nDESO func _setUpProfilesAndMintM0M1DAOCoins(testMeta *TestMeta) { // Create on-chain public keys with DESO sent from miner - _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 10000) + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 100000) _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 10000) _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 10000) _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 10000) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7de06f83a..7d40002e3 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4113,7 +4113,7 @@ type GlobalParamsEntry struct { // // Notice that this is functionally identical but at any given point in time we hit exactly one vested // locked balance entry. This consolidation on the lockup transaction operation could be computationally expensive. - // Hence, we limit this complexity with the MaximumVestedIntersectionsPerLockupTransactions (default: 100). + // Hence, we limit this complexity with the MaximumVestedIntersectionsPerLockupTransactions (default: 1000). // When connecting a lockup transaction we check to see how many existing vested locked balance entries // we would intersect with. If we exceed the MaximumVestedIntersectionsPerLockupTransaction, we reject // the transaction. A user must split their single transaction into multiple disjoint time intervals which diff --git a/lib/constants.go b/lib/constants.go index ac92d02ae..eb8244476 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1151,7 +1151,7 @@ var DeSoMainnetParams = DeSoParams{ DefaultFailingTransactionBMFMultiplierBasisPoints: uint64(2500), // The maximum number of vested lockup intersections in a lockup transaction. - DefaultMaximumVestedIntersectionsPerLockupTransaction: 100, + DefaultMaximumVestedIntersectionsPerLockupTransaction: 1000, ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), @@ -1424,7 +1424,7 @@ var DeSoTestnetParams = DeSoParams{ DefaultFailingTransactionBMFMultiplierBasisPoints: uint64(2500), // The maximum number of vested lockup intersections in a lockup transaction. - DefaultMaximumVestedIntersectionsPerLockupTransaction: 100, + DefaultMaximumVestedIntersectionsPerLockupTransaction: 1000, ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), From 6aab3863d41785cf4a21aa8b418f5a22aadd365a Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Fri, 2 Feb 2024 07:13:11 -0800 Subject: [PATCH 447/762] PoS Aggregate QC Verify HighQCViews (#1003) --- consensus/utils.go | 14 +++++++++++--- consensus/utils_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/consensus/utils.go b/consensus/utils.go index 810992925..c241bc87f 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -292,11 +292,19 @@ func isProperlyFormedAggregateQC(aggQC AggregateQuorumCertificate) bool { return false } - // Validate that all of the high QC views are non-zero - for _, view := range aggQC.GetHighQCViews() { - if view == 0 { + // Verify that AggregateSignature's HighQC view is the highest view in the HighQCViews. + // Also validate that all of the high QC views are non-zero + highestView := uint64(0) + for _, highQCView := range aggQC.GetHighQCViews() { + if highQCView == 0 { return false } + if highQCView > highestView { + highestView = highQCView + } + } + if highestView != aggQC.GetHighQC().GetView() { + return false } // Happy path diff --git a/consensus/utils_test.go b/consensus/utils_test.go index ae51e7d19..afe419589 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -189,6 +189,31 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { } require.True(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) } + + // Test highQC with view lower than the highest view in the highQCViews. + { + // Compute the aggregate signature payload + validator1TimeoutPayload := GetTimeoutSignaturePayload(view+3, view) + validator1TimeoutSignature, err := validatorPrivateKey1.Sign(validator1TimeoutPayload[:]) + require.NoError(t, err) + // Let's have validator 2 sign a timeout payload where high QC is for higher view. + validator2TimeoutPayload := GetTimeoutSignaturePayload(view+3, view+1) + validator2TimeoutSignature, err := validatorPrivateKey2.Sign(validator2TimeoutPayload[:]) + require.NoError(t, err) + + timeoutAggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1TimeoutSignature, validator2TimeoutSignature}) + require.NoError(t, err) + qc := aggregateQuorumCertificate{ + view: view + 3, + highQC: &highQC, + highQCViews: []uint64{view, view + 1}, + aggregatedSignature: &aggregatedSignature{ + signersList: bitset.NewBitset().FromBytes([]byte{0x3}), // 0b0011, which represents validators 1 and 2 + signature: timeoutAggSig, + }, + } + require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) + } } func TestIsProperlyFormedBlock(t *testing.T) { From 0d747013a3da38db91b9f179b5cbfa50dab920c6 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 2 Feb 2024 07:34:03 -0800 Subject: [PATCH 448/762] Add coin printing safeguard to CoinLockupTranfser and add additional comments. --- lib/block_view_lockups.go | 61 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index bbe89a1ec..decfd2eed 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -931,9 +931,16 @@ func (txnData *UpdateCoinLockupParamsMetadata) New() DeSoTxnMetadata { // type CoinLockupTransferMetadata struct { - RecipientPublicKey *PublicKey - ProfilePublicKey *PublicKey - UnlockTimestampNanoSecs int64 + // The recipient of the locked coins. + RecipientPublicKey *PublicKey + + // The profile whose locked coins are being transferred. + ProfilePublicKey *PublicKey + + // The UnlockTimestampNanoSecs to source the locked coins from. + UnlockTimestampNanoSecs int64 + + // The amount of locked coins to transfer. LockedCoinsToTransferBaseUnits *uint256.Int } @@ -991,6 +998,7 @@ func (txnData *CoinLockupTransferMetadata) New() DeSoTxnMetadata { // type CoinUnlockMetadata struct { + // The public key whose associated locked coins should be unlocked. ProfilePublicKey *PublicKey } @@ -1418,6 +1426,11 @@ func (bav *UtxoView) _connectCoinLockup( // ^ ^ // right overhang // We will break any overhang off into its own separate locked balance entry. + // + // NOTE: Because in the previous portion of the code we trim any locked balance entries that + // have "left overhang" we know the UnlockTimestampNanoSecs to be lined up between + // both the existing and proposed LockedBalanceEntry. This is important as it means after + // we remove any existing right overhang the two locked balance entries will be perfectly lined up. // Check for right overhang by the existing locked balance entry if existingLockedBalanceEntry.VestingEndTimestampNanoSecs > @@ -2214,7 +2227,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( senderPKID, profilePKID, txMeta.UnlockTimestampNanoSecs, txMeta.UnlockTimestampNanoSecs) if err != nil { return 0, 0, nil, - errors.Wrap(err, "connectCoinLockupTransfer failed to fetch senderLockedBalanceEntry:w") + errors.Wrap(err, "connectCoinLockupTransfer failed to fetch senderLockedBalanceEntry") } if senderLockedBalanceEntry == nil || senderLockedBalanceEntry.isDeleted { senderLockedBalanceEntry = &LockedBalanceEntry{ @@ -2291,6 +2304,46 @@ func (bav *UtxoView) _connectCoinLockupTransfer( bav._setLockedBalanceEntry(senderLockedBalanceEntry) bav._setLockedBalanceEntry(receiverLockedBalanceEntry) + // SAFEGUARD: Ensure no locked coins were printed by accident. + prevTotalBalance, err := SafeUint256().Add( + &prevSenderLockedBalanceEntry.BalanceBaseUnits, + &prevReceiverLockedBalanceEntry.BalanceBaseUnits) + if err != nil { + return 0, 0, nil, errors.New("_connectCoinLockupTransfer" + + " cannot verify balance change safeguard check due to previous balance overflow") + } + newSenderLockedBalanceEntry, err := + bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + senderPKID, + profilePKID, + txMeta.UnlockTimestampNanoSecs, + txMeta.UnlockTimestampNanoSecs) + if err != nil { + return 0, 0, nil, errors.New("_connectCoinLockupTransfer" + + " cannot verify balance change safeguard check; cannot fetch new sender locked balance entry") + } + newReceiverLockedBalanceEntry, err := + bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + receiverPKID, + profilePKID, + txMeta.UnlockTimestampNanoSecs, + txMeta.UnlockTimestampNanoSecs) + if err != nil { + return 0, 0, nil, errors.New("_connectCoinLockupTransfer" + + " cannot verify balance change safeguard check; cannot fetch new receiver locked balance entry") + } + newTotalBalance, err := SafeUint256().Add( + &newSenderLockedBalanceEntry.BalanceBaseUnits, + &newReceiverLockedBalanceEntry.BalanceBaseUnits) + if err != nil { + return 0, 0, nil, errors.New("_connectCoinLockupTransfer" + + " cannot verify balance change safeguard check due to new balance overflow") + } + if !prevTotalBalance.Eq(newTotalBalance) { + return 0, 0, nil, errors.New("_connectCoinLockupTransfer" + + " failed coin printing safeguard check; this should not be possible") + } + // Create a UtxoOperation for easily disconnecting the transaction. utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ Type: OperationTypeCoinLockupTransfer, From 4ac0a2c4380afc9791e06214598c546da915a762 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 2 Feb 2024 08:05:42 -0800 Subject: [PATCH 449/762] Remove unused snapshot from DB gets for lockups. --- lib/block_view.go | 4 ++++ lib/block_view_lockups.go | 8 +++----- lib/db_utils.go | 19 +++++-------------- 3 files changed, 12 insertions(+), 19 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index d1ab441bb..b6b15e164 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -135,6 +135,10 @@ type UtxoView struct { LockedBalanceEntryKeyToLockedBalanceEntry map[LockedBalanceEntryKey]*LockedBalanceEntry // Lockup yield curve points. + // NOTE: While the nested map does break convention, this enables us to quickly read, scan, and modify + // lockup yield curve points without needing to traverse yield curve points held by other PKIDs. + // This enables us to have a high performance means of computing yield during lockup transactions without + // having to scan all yield curve points for all users stored in the view. PKIDToLockupYieldCurvePointKeyToLockupYieldCurvePoints map[PKID]map[LockupYieldCurvePointKey]*LockupYieldCurvePoint // Current EpochEntry diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index decfd2eed..55203cd40 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -124,7 +124,7 @@ func (lockedBalanceEntry *LockedBalanceEntry) Copy() *LockedBalanceEntry { ProfilePKID: lockedBalanceEntry.ProfilePKID.NewPKID(), UnlockTimestampNanoSecs: lockedBalanceEntry.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: lockedBalanceEntry.VestingEndTimestampNanoSecs, - BalanceBaseUnits: lockedBalanceEntry.BalanceBaseUnits, + BalanceBaseUnits: *lockedBalanceEntry.BalanceBaseUnits.Clone(), isDeleted: lockedBalanceEntry.isDeleted, } } @@ -238,8 +238,7 @@ func (bav *UtxoView) GetAllLockedBalanceEntriesForHodlerPKID( _err error, ) { // Pull entries from db. - dbLockedBalanceEntries, err := DBGetAllLockedBalanceEntriesForHodlerPKID( - bav.Handle, bav.Snapshot, hodlerPKID) + dbLockedBalanceEntries, err := DBGetAllLockedBalanceEntriesForHodlerPKID(bav.Handle, hodlerPKID) if err != nil { return nil, errors.Wrap(err, "GetLockedBalanceEntryForLockedBalanceEntryKey") @@ -321,7 +320,6 @@ func (bav *UtxoView) GetLimitedVestedLockedBalanceEntriesOverTimeInterval( // as a result of over-reading the db or from other db errors. vestedLockedBalanceEntries, err := DBGetLimitedVestedLockedBalanceEntries( bav.Handle, - bav.Snapshot, hodlerPKID, profilePKID, unlockTimestampNanoSecs, @@ -449,7 +447,7 @@ func (bav *UtxoView) GetUnlockableLockedBalanceEntries( // First, pull unlockable LockedBalanceEntries from the db and cache them in the UtxoView. dbUnvestedUnlockableLockedBalanceEntries, dbVestedUnlockableLockedBalanceEntries, err := - DBGetUnlockableLockedBalanceEntries(bav.Handle, bav.Snapshot, hodlerPKID, profilePKID, currentTimestampNanoSecs) + DBGetUnlockableLockedBalanceEntries(bav.Handle, hodlerPKID, profilePKID, currentTimestampNanoSecs) if err != nil { return nil, nil, errors.Wrap(err, "UtxoView.GetUnlockableLockedBalanceEntries") diff --git a/lib/db_utils.go b/lib/db_utils.go index 587ecfe3e..031234cbb 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -11020,7 +11020,6 @@ func DBGetLockedBalanceEntryForLockedBalanceEntryKeyWithTxn( func DBGetAllLockedBalanceEntriesForHodlerPKID( handle *badger.DB, - snap *Snapshot, hodlerPKID *PKID, ) ( _lockedBalanceEntries []*LockedBalanceEntry, @@ -11029,8 +11028,7 @@ func DBGetAllLockedBalanceEntriesForHodlerPKID( var lockedBalanceEntries []*LockedBalanceEntry var err error handle.View(func(txn *badger.Txn) error { - lockedBalanceEntries, err = DBGetAllLockedBalanceEntriesForHodlerPKIDWithTxn( - txn, snap, hodlerPKID) + lockedBalanceEntries, err = DBGetAllLockedBalanceEntriesForHodlerPKIDWithTxn(txn, hodlerPKID) return nil }) return lockedBalanceEntries, err @@ -11038,7 +11036,6 @@ func DBGetAllLockedBalanceEntriesForHodlerPKID( func DBGetAllLockedBalanceEntriesForHodlerPKIDWithTxn( txn *badger.Txn, - snap *Snapshot, hodlerPKID *PKID, ) ( _lockedBalanceEntries []*LockedBalanceEntry, @@ -11098,7 +11095,6 @@ func DBGetAllLockedBalanceEntriesForHodlerPKIDWithTxn( func DBGetUnlockableLockedBalanceEntries( handle *badger.DB, - snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, currentTimestampUnixNanoSecs int64, @@ -11113,7 +11109,7 @@ func DBGetUnlockableLockedBalanceEntries( var err error handle.View(func(txn *badger.Txn) error { unvested, vested, err = DBGetUnlockableLockedBalanceEntriesWithTxn( - txn, snap, hodlerPKID, profilePKID, currentTimestampUnixNanoSecs) + txn, hodlerPKID, profilePKID, currentTimestampUnixNanoSecs) return nil }) return unvested, vested, err @@ -11121,7 +11117,6 @@ func DBGetUnlockableLockedBalanceEntries( func DBGetUnlockableLockedBalanceEntriesWithTxn( txn *badger.Txn, - snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, currentTimestampUnixNanoSecs int64, @@ -11132,7 +11127,7 @@ func DBGetUnlockableLockedBalanceEntriesWithTxn( ) { // Get vested unlockable locked balance entries. unlockableVestedLockedBalanceEntries, err := DBGetUnlockableVestedLockedBalanceEntriesWithTxn( - txn, snap, hodlerPKID, profilePKID, currentTimestampUnixNanoSecs) + txn, hodlerPKID, profilePKID, currentTimestampUnixNanoSecs) if err != nil { return nil, nil, errors.Wrap(err, "DBGetUnlockableLockedBalanceEntriesWithTxn") @@ -11140,7 +11135,7 @@ func DBGetUnlockableLockedBalanceEntriesWithTxn( // Get unvested unlockable locked balance entries. unlockableUnvestedLockedBalanceEntries, err := DBGetUnlockableUnvestedLockedBalanceEntriesWithTxn( - txn, snap, hodlerPKID, profilePKID, currentTimestampUnixNanoSecs) + txn, hodlerPKID, profilePKID, currentTimestampUnixNanoSecs) if err != nil { return nil, nil, errors.Wrap(err, "DBGetUnlockableLockedBalanceEntriesWithTxn") @@ -11151,7 +11146,6 @@ func DBGetUnlockableLockedBalanceEntriesWithTxn( func DBGetUnlockableVestedLockedBalanceEntriesWithTxn( txn *badger.Txn, - snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, currentTimestampUnixNanoSecs int64, @@ -11223,7 +11217,6 @@ func DBGetUnlockableVestedLockedBalanceEntriesWithTxn( func DBGetUnlockableUnvestedLockedBalanceEntriesWithTxn( txn *badger.Txn, - snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, currentTimestampUnixNanoSecs int64, @@ -11293,7 +11286,6 @@ func DBGetUnlockableUnvestedLockedBalanceEntriesWithTxn( func DBGetLimitedVestedLockedBalanceEntries( handle *badger.DB, - snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, unlockTimestampNanoSecs int64, @@ -11320,7 +11312,7 @@ func DBGetLimitedVestedLockedBalanceEntries( var err error handle.View(func(txn *badger.Txn) error { lockedBalanceEntries, err = DBGetLimitedVestedLockedBalanceEntriesWithTxn( - txn, snap, hodlerPKID, profilePKID, unlockTimestampNanoSecs, vestingEndTimestampNanoSecs, limitToFetch) + txn, hodlerPKID, profilePKID, unlockTimestampNanoSecs, vestingEndTimestampNanoSecs, limitToFetch) return nil }) return lockedBalanceEntries, err @@ -11328,7 +11320,6 @@ func DBGetLimitedVestedLockedBalanceEntries( func DBGetLimitedVestedLockedBalanceEntriesWithTxn( txn *badger.Txn, - snap *Snapshot, hodlerPKID *PKID, profilePKID *PKID, unlockTimestampNanoSecs int64, From b2f03f46b75003000cf2b7310bcea5380b0a52e4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 2 Feb 2024 11:16:29 -0500 Subject: [PATCH 450/762] Address Piotr Nits (#1004) --- consensus/event_loop.go | 2 +- consensus/utils.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 3eba47067..27168c054 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -237,7 +237,7 @@ func (fc *fastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlo // Sanity check: the tip block and safe blocks must not have lower views than the genesis QC's view. if tip.Block.GetView() < fc.genesisQC.GetView() { - return errors.New("Tip block view must be greater than or qual to the genesis QC view") + return errors.New("Tip block view must be greater than or equal to the genesis QC view") } for _, block := range safeBlocks { diff --git a/consensus/utils.go b/consensus/utils.go index c241bc87f..32d8e7ca7 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -68,7 +68,7 @@ func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Va } // IsValidSuperMajorityAggregateQuorumCertificate validates that the aggregate QC is properly formed and signed -// by a super-majority of validators in the network. It takes in two sets of validators defines as: +// by a super-majority of validators in the network. It takes in two sets of validators defined as: // - aggQCValidators: The validator set that signed the timeouts for the view that has timed out (the view in the aggregate QC) // - highQCValidators: The validator set that signed the high QC (the view in the high QC) func IsValidSuperMajorityAggregateQuorumCertificate(aggQC AggregateQuorumCertificate, aggQCValidators []Validator, highQCValidators []Validator) bool { From e84dfadef5e47019bb296846db4c436f7201b8bf Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 2 Feb 2024 08:26:43 -0800 Subject: [PATCH 451/762] Switch to passing LockedBalanceEntryKey by value in DB operations. --- lib/block_view_lockups.go | 8 ++-- lib/block_view_lockups_test.go | 84 +++++++++++++++++----------------- lib/db_utils.go | 46 ++++++++++++++++++- 3 files changed, 90 insertions(+), 48 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 55203cd40..f70b2fd71 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -269,14 +269,14 @@ func (bav *UtxoView) GetAllLockedBalanceEntriesForHodlerPKID( } func (bav *UtxoView) GetLockedBalanceEntryForLockedBalanceEntryKey( - lockedBalanceEntryKey *LockedBalanceEntryKey, + lockedBalanceEntryKey LockedBalanceEntryKey, ) ( _lockedBalanceEntry *LockedBalanceEntry, _err error, ) { // Check if the key exists in the view. if viewEntry, viewEntryExists := - bav.LockedBalanceEntryKeyToLockedBalanceEntry[*lockedBalanceEntryKey]; viewEntryExists { + bav.LockedBalanceEntryKeyToLockedBalanceEntry[lockedBalanceEntryKey]; viewEntryExists { if viewEntry == nil || viewEntry.isDeleted { return nil, nil } @@ -411,7 +411,7 @@ func (bav *UtxoView) GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestam // No mapping exists in the view, check for an entry in the DB. lockedBalanceEntry, err := - DBGetLockedBalanceEntryForLockedBalanceEntryKey(bav.Handle, bav.Snapshot, &lockedBalanceEntryKey) + DBGetLockedBalanceEntryForLockedBalanceEntryKey(bav.Handle, bav.Snapshot, lockedBalanceEntryKey) if err != nil { return nil, errors.Wrap(err, @@ -1271,7 +1271,7 @@ func (bav *UtxoView) _connectCoinLockup( // (1) Check for a locked balance entry with the same unlock time lockedBalanceEntry, err := bav.GetLockedBalanceEntryForLockedBalanceEntryKey( - &LockedBalanceEntryKey{ + LockedBalanceEntryKey{ HODLerPKID: *hodlerPKID, ProfilePKID: *profilePKID, UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 089c03e3f..baf34ac87 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -2538,7 +2538,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { m0PKID := m0PKIDEntry.PKID // Check m3 LockedBalanceEntry - m3LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m3LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m3PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -2549,7 +2549,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) // Check the m0 LockedBalanceEntry as non-existent - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -2568,7 +2568,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { } // Check m3 LockedBalanceEntry - m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m3LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m3PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1050, @@ -2579,7 +2579,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) // Check the m0 LockedBalanceEntry as non-existent - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1050, @@ -2630,7 +2630,7 @@ func TestSimpleVestedLockup(t *testing.T) { require.NoError(t, err) // Check m0 LockedBalanceEntry - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -2638,7 +2638,7 @@ func TestSimpleVestedLockup(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry == nil) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1500, @@ -2676,7 +2676,7 @@ func TestSimpleVestedLockup(t *testing.T) { require.NoError(t, err) // Check m0 LockedBalanceEntry - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1500, @@ -2684,7 +2684,7 @@ func TestSimpleVestedLockup(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry == nil) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1750, @@ -2722,7 +2722,7 @@ func TestSimpleVestedLockup(t *testing.T) { require.NoError(t, err) // Check m0 LockedBalanceEntry - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1750, @@ -2730,7 +2730,7 @@ func TestSimpleVestedLockup(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry == nil) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 2000, @@ -2787,7 +2787,7 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -2796,7 +2796,7 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 3000, @@ -2840,7 +2840,7 @@ func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -2891,7 +2891,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -2900,7 +2900,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1500, @@ -2938,7 +2938,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { // Check m0 LockedBalanceEntry m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, UnlockTimestampNanoSecs: 1000, @@ -2947,7 +2947,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) - m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, UnlockTimestampNanoSecs: 1500, @@ -2998,7 +2998,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -3007,7 +3007,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1499))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1500, @@ -3045,7 +3045,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { // Check m1 LockedBalanceEntry m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, UnlockTimestampNanoSecs: 1000, @@ -3054,7 +3054,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) - m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, UnlockTimestampNanoSecs: 1500, @@ -3104,7 +3104,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -3113,7 +3113,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1501, @@ -3122,7 +3122,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1250, @@ -3131,7 +3131,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1250, @@ -3167,7 +3167,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { // Check m1 LockedBalanceEntry m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, UnlockTimestampNanoSecs: 1000, @@ -3176,7 +3176,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) - m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, UnlockTimestampNanoSecs: 1250, @@ -3185,7 +3185,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1002))) - m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, UnlockTimestampNanoSecs: 1501, @@ -3235,7 +3235,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -3244,7 +3244,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1250, @@ -3253,7 +3253,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1751, @@ -3290,7 +3290,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { // Check m1 LockedBalanceEntry m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID - m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m1LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, UnlockTimestampNanoSecs: 1000, @@ -3299,7 +3299,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) - m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, UnlockTimestampNanoSecs: 1250, @@ -3308,7 +3308,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1501))) - m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, UnlockTimestampNanoSecs: 1751, @@ -3367,7 +3367,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -3376,7 +3376,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1501))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1501, @@ -3435,7 +3435,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1000, @@ -3444,7 +3444,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 1500, @@ -3453,7 +3453,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(750))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 2001, @@ -3462,7 +3462,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(499))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 3000, @@ -3471,7 +3471,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(751))) - m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: 3501, @@ -3541,7 +3541,7 @@ func TestVestingIntersectionLimit(t *testing.T) { NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) for ii := 0; ii < utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction; ii++ { - m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(&LockedBalanceEntryKey{ + m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, UnlockTimestampNanoSecs: int64(ii*1000) + 1, diff --git a/lib/db_utils.go b/lib/db_utils.go index 031234cbb..80d9ae768 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -10974,7 +10974,7 @@ func DbDeleteLockedBalanceEntryWithTxn( func DBGetLockedBalanceEntryForLockedBalanceEntryKey( handle *badger.DB, snap *Snapshot, - lockedBalanceEntryKey *LockedBalanceEntryKey, + lockedBalanceEntryKey LockedBalanceEntryKey, ) ( _lockedBalanceEntry *LockedBalanceEntry, _err error, @@ -10991,7 +10991,7 @@ func DBGetLockedBalanceEntryForLockedBalanceEntryKey( func DBGetLockedBalanceEntryForLockedBalanceEntryKeyWithTxn( txn *badger.Txn, snap *Snapshot, - lockedBalanceEntryKey *LockedBalanceEntryKey, + lockedBalanceEntryKey LockedBalanceEntryKey, ) ( _lockedBalanceEntry *LockedBalanceEntry, _err error, @@ -11027,6 +11027,13 @@ func DBGetAllLockedBalanceEntriesForHodlerPKID( ) { var lockedBalanceEntries []*LockedBalanceEntry var err error + + // Validate profilePKID is not nil. + if hodlerPKID == nil { + return nil, errors.New("DBGetAllLockedBalanceEntriesForHodlerPKID: " + + "called with nil hodlerPKID; this shouldn't happen") + } + handle.View(func(txn *badger.Txn) error { lockedBalanceEntries, err = DBGetAllLockedBalanceEntriesForHodlerPKIDWithTxn(txn, hodlerPKID) return nil @@ -11103,6 +11110,17 @@ func DBGetUnlockableLockedBalanceEntries( _vestedUnlockableLockedEntries []*LockedBalanceEntry, _err error, ) { + // Validate profilePKID and holderPKID are not nil. + if profilePKID == nil { + return nil, nil, + errors.New("DBGetUnlockableLockedBalanceEntries: " + + "called with nil profilePKID; this shouldn't happen") + } + if hodlerPKID == nil { + return nil, nil, + errors.New("DBGetUnlockableLockedBalanceEntries: " + + "called with nil hodlerPKID; this shouldn't happen") + } var unvested []*LockedBalanceEntry var vested []*LockedBalanceEntry @@ -11308,6 +11326,16 @@ func DBGetLimitedVestedLockedBalanceEntries( // iteration on the specified unlockTimestampNanoSecs, check for an overlapping // entry, and move on. + // Validate profilePKID and holderPKID are not nil. + if profilePKID == nil { + return nil, errors.New("DBGetLimitedVestedLockedBalanceEntries: " + + "called with nil profilePKID; this shouldn't happen") + } + if hodlerPKID == nil { + return nil, errors.New("DBGetLimitedVestedLockedBalanceEntries: " + + "called with nil hodlerPKID; this shouldn't happen") + } + var lockedBalanceEntries []*LockedBalanceEntry var err error handle.View(func(txn *badger.Txn) error { @@ -11518,6 +11546,13 @@ func DbDeleteLockupYieldCurvePointWithTxn(txn *badger.Txn, snap *Snapshot, func DBGetAllYieldCurvePointsByProfilePKID(handle *badger.DB, snap *Snapshot, profilePKID *PKID) (_lockupYieldCurvePoints []*LockupYieldCurvePoint, _err error) { var lockupYieldCurvePoints []*LockupYieldCurvePoint + + // Validate profilePKID is not nil. + if profilePKID == nil { + return nil, errors.New("DBGetAllYieldCurvePointsByProfilePKID: " + + "called with nil profilePKID; this shouldn't happen") + } + err := handle.View(func(txn *badger.Txn) error { var err error lockupYieldCurvePoints, err = DBGetAllYieldCurvePointsByProfilePKIDWithTxn( @@ -11571,6 +11606,13 @@ func DBGetAllYieldCurvePointsByProfilePKIDWithTxn(txn *badger.Txn, snap *Snapsho func DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecs(handle *badger.DB, snap *Snapshot, profilePKID *PKID, lockupDurationNanoSecs int64) (_lockupYieldCurvePoint *LockupYieldCurvePoint, _err error) { var lockupYieldCurvePoint *LockupYieldCurvePoint + + // Validate profilePKID is not nil. + if profilePKID == nil { + return nil, errors.New("DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecs: " + + "called with nil profilePKID; this shouldn't happen") + } + err := handle.View(func(txn *badger.Txn) error { var err error lockupYieldCurvePoint, err = DBGetYieldCurvePointsByProfilePKIDAndDurationNanoSecsWithTxn( From eeb5946c99e8a57ca92a2c13f4d01f43e305fa92 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 2 Feb 2024 08:35:14 -0800 Subject: [PATCH 452/762] Add GetLocalYieldCurvePoints comment. --- lib/block_view_lockups.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index f70b2fd71..1090ba061 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -651,6 +651,17 @@ func (bav *UtxoView) GetYieldCurvePointByProfilePKIDAndDurationNanoSecs(profileP return lockupYieldCurvePoint, nil } +// GetLocalYieldCurvePoints is used when trying to figure out what yield to award a user for a coin lockup +// transaction. Consider a profile who has generated the following yield curve: {0.5 years: 5%, 2 years: 10%} +// While this yield curve is simple, what should happen in the event where a lockup of length 1 year occurs? +// In this case it's convenient to provide the "local" points meaning those points on the yield curve closest +// to the one year lockup duration. If GetLocalYieldCurvePoints was called in this case, it would return +// 0.5 years @ 5% as the leftLockupPoint and 2 years @ 10% as the rightLockupPoint. +// +// To be more specific, the leftLockupPoint returned will always be greatest yield curve point with a +// LockupDurationNanoSecs less than the lockupDuration provided. The rightLockupPoint returned will +// always be the least yield curve point with a LockupDurationNanoSecs greater than or equal to the lockupDuration +// provided. func (bav *UtxoView) GetLocalYieldCurvePoints(profilePKID *PKID, lockupDuration int64) ( _leftLockupPoint *LockupYieldCurvePoint, _rightLockupPoint *LockupYieldCurvePoint, _err error) { var leftLockupPoint *LockupYieldCurvePoint From aa50e86ceef641780d62d8a1b83fa2e28dc43c42 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 2 Feb 2024 08:56:53 -0800 Subject: [PATCH 453/762] Add isDeleted check to previous transactor balance entry in CoinUnlock. --- lib/block_view_lockups.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 1090ba061..0e6f01de0 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -2611,6 +2611,14 @@ func (bav *UtxoView) _connectCoinUnlock( // Credit the transactor with either DAO coins or DeSo for this unlock. prevTransactorBalanceEntry := bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(hodlerPKID, profilePKID, true) + if prevTransactorBalanceEntry == nil || prevTransactorBalanceEntry.isDeleted { + prevTransactorBalanceEntry = &BalanceEntry{ + HODLerPKID: hodlerPKID, + CreatorPKID: profilePKID, + BalanceNanos: uint256.Int{}, + HasPurchased: false, + } + } // Credit the transactor with the unlock amount. newTransactorBalanceEntry := prevTransactorBalanceEntry.Copy() From 8a18ecc67b1a1ec0bdf24c19b8dd069cdfdbc9f8 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 2 Feb 2024 09:12:22 -0800 Subject: [PATCH 454/762] Add UpdateCoinLockupParams no-op check. --- lib/block_view_lockups.go | 7 +++++++ lib/errors.go | 1 + 2 files changed, 8 insertions(+) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 0e6f01de0..ea51baa7e 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1970,6 +1970,13 @@ func (bav *UtxoView) _connectUpdateCoinLockupParams( "_connectUpdateCoinLockupParams") } + // Check to ensure this transaction is not a no-op. + if !txMeta.NewLockupTransferRestrictions && txMeta.LockupYieldDurationNanoSecs == 0 { + return 0, 0, nil, errors.Wrap(RuleErrorUpdateCoinLockupParamsIsNoOp, + "_connectUpdateCoinLockupParams") + + } + // Fetch the previous yield curve point associated with this pair. prevLockupYieldCurvePoint, err := bav.GetYieldCurvePointByProfilePKIDAndDurationNanoSecs(profilePKID, txMeta.LockupYieldDurationNanoSecs) diff --git a/lib/errors.go b/lib/errors.go index 14309c4ad..468f357a5 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -530,6 +530,7 @@ const ( RuleErrorUpdateCoinLockupParamsOnInvalidPKID RuleError = "RuleErrorUpdateCoinLockupParamsOnInvalidPKID" RuleErrorUpdateCoinLockupParamsInvalidRestrictions RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" RuleErrorUpdateCoinLockupParamsNegativeDuration RuleError = "RuleErrorUpdateCoinLockupParamsInvalidRestrictions" + RuleErrorUpdateCoinLockupParamsIsNoOp RuleError = "RuleErrorUpdateCoinLockupParamsIsNoOp" RuleErrorUpdateCoinLockupParamsDeletingNonExistentPoint RuleError = "RuleErrorUpdateCoinLockupParamsDeletingNonExistentPoint" RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile RuleError = "RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile" RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction RuleError = "RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction" From cf91ed3d93f2ff2c685ea4073dd97df5c324124b Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 2 Feb 2024 09:17:17 -0800 Subject: [PATCH 455/762] Use Encode and DecodeByteArray for LockedBalanceEntry to save byte. --- lib/block_view_lockups.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index ea51baa7e..6d9f61e05 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -150,7 +150,7 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawEncodeWithoutMetadata(blockHeig data = append(data, EncodeToBytes(blockHeight, lockedBalanceEntry.ProfilePKID, skipMetadata...)...) data = append(data, IntToBuf(lockedBalanceEntry.UnlockTimestampNanoSecs)...) data = append(data, IntToBuf(lockedBalanceEntry.VestingEndTimestampNanoSecs)...) - data = append(data, VariableEncodeUint256(&lockedBalanceEntry.BalanceBaseUnits)...) + data = append(data, EncodeByteArray(lockedBalanceEntry.BalanceBaseUnits.Bytes())...) return data } @@ -182,11 +182,11 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawDecodeWithoutMetadata(blockHeig } // BalanceBaseUnits - balanceBaseUnits, err := VariableDecodeUint256(rr) + balanceBaseUnitsBytes, err := DecodeByteArray(rr) if err != nil { return errors.Wrap(err, "LockedBalanceEntry.Decode: Problem reading BalanceBaseUnits") } - lockedBalanceEntry.BalanceBaseUnits = *balanceBaseUnits + lockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt().SetBytes(balanceBaseUnitsBytes) return nil } From 9385544d88cd3611df82e9294f404bd4e875a033 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 2 Feb 2024 10:05:15 -0800 Subject: [PATCH 456/762] Add ModifiedLockedBalanceEntry for modified keys in disconnectCoinUnlock. --- lib/block_view_lockups.go | 22 ++++++++++++++++++---- lib/block_view_types.go | 16 ++++++++++++++-- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 6d9f61e05..94dc2d336 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -2570,7 +2570,7 @@ func (bav *UtxoView) _connectCoinUnlock( // Unlock all vested locked balance entries. // NOTE: See the comment on LockedBalanceEntryKey for how we deal with modified vested locked balance entries. - var prevVestedLockedBalanceEntries []*LockedBalanceEntry + var modifiedLockedBalanceEntry *LockedBalanceEntry for _, unlockableLockedBalanceEntry := range vestedUnlockableLockedBalanceEntries { // Depending on the time of unlock, compute how much from the balance can be unlocked. amountToUnlock, err := CalculateVestedEarnings(unlockableLockedBalanceEntry, blockTimestampNanoSecs) @@ -2588,7 +2588,7 @@ func (bav *UtxoView) _connectCoinUnlock( } // Append the original LockedBalanceEntry in the event we rollback the transaction. - prevVestedLockedBalanceEntries = append(prevVestedLockedBalanceEntries, unlockableLockedBalanceEntry.Copy()) + prevLockedBalanceEntries = append(prevLockedBalanceEntries, unlockableLockedBalanceEntry.Copy()) // Depending on when the unlock occurs, we either DELETE or MODIFY the locked balance entry. if blockTimestampNanoSecs >= unlockableLockedBalanceEntry.VestingEndTimestampNanoSecs { @@ -2598,7 +2598,7 @@ func (bav *UtxoView) _connectCoinUnlock( bav._deleteLockedBalanceEntry(unlockableLockedBalanceEntry) // Create and modify a copy to prevent pointer reuse. - modifiedLockedBalanceEntry := unlockableLockedBalanceEntry.Copy() + modifiedLockedBalanceEntry = unlockableLockedBalanceEntry.Copy() modifiedLockedBalanceEntry.UnlockTimestampNanoSecs = blockTimestampNanoSecs newBalanceBaseUnits, err := SafeUint256().Sub( &modifiedLockedBalanceEntry.BalanceBaseUnits, @@ -2657,6 +2657,7 @@ func (bav *UtxoView) _connectCoinUnlock( Type: OperationTypeCoinUnlock, PrevTransactorBalanceEntry: prevTransactorBalanceEntry, PrevLockedBalanceEntries: prevLockedBalanceEntries, + ModifiedLockedBalanceEntry: modifiedLockedBalanceEntry, PrevCoinEntry: prevCoinEntry, }) @@ -2737,8 +2738,14 @@ func (bav *UtxoView) _disconnectCoinUnlock( } // Sanity check the data within the CoinUnlock. - // Reverting an unlock of LockedBalanceEntry should not result in less coins. + // Reverting an unlock of LockedBalanceEntry for unvested lockups should not result in less coins. for _, prevLockedBalanceEntry := range operationData.PrevLockedBalanceEntries { + // Skip the balance decrease check for vested lockups. + if prevLockedBalanceEntry.UnlockTimestampNanoSecs < prevLockedBalanceEntry.VestingEndTimestampNanoSecs { + bav._setLockedBalanceEntry(prevLockedBalanceEntry) + continue + } + lockedBalanceEntry, err := bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( prevLockedBalanceEntry.HODLerPKID, @@ -2764,6 +2771,13 @@ func (bav *UtxoView) _disconnectCoinUnlock( bav._setLockedBalanceEntry(prevLockedBalanceEntry) } + // If a modified vested locked balance entry exists, we must delete this from the view to ensure proper reversion. + // This is because the underlying key for the vested lockup may have changed, and we + // would otherwise leave this lingering in the view. + if operationData.ModifiedLockedBalanceEntry != nil { + bav._deleteLockedBalanceEntry(operationData.ModifiedLockedBalanceEntry) + } + // Reverting the BalanceEntry should not result in more coins. profilePKID := operationData.PrevLockedBalanceEntries[0].ProfilePKID hodlerPKID := operationData.PrevLockedBalanceEntries[0].HODLerPKID diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7d40002e3..e9fc47069 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -994,7 +994,13 @@ type UtxoOperation struct { PrevReceiverLockedBalanceEntry *LockedBalanceEntry // PrevLockedBalanceEntries is a slice of LockedBalanceEntry prior to a coin unlock. - PrevLockedBalanceEntries []*LockedBalanceEntry + // ModifiedLockedBalanceEntry is required due to the dynamic nature of the LockedBalanceEntryKey + // in the coin unlock transaction. Essentially we need to know what LockedBalanceEntryKey + // did not exist prior to the coin unlock to ensure it is properly deleted during a disconnect. + // There is at most one modified locked balance entry per unlock (a vested locked balance entry + // that has not fully expired yet). + PrevLockedBalanceEntries []*LockedBalanceEntry + ModifiedLockedBalanceEntry *LockedBalanceEntry // StakeAmountNanosDiff is used by Rosetta to return the amount of DESO that was added // to a StakeEntry during the end-of-epoch hook. It's needed @@ -1377,8 +1383,9 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada data = append(data, EncodeToBytes(blockHeight, op.PrevSenderLockedBalanceEntry, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, op.PrevReceiverLockedBalanceEntry, skipMetadata...)...) - // PrevLockedBalanceEntries + // PrevLockedBalanceEntries, ModifiedLockedBalanceEntry data = append(data, EncodeDeSoEncoderSlice(op.PrevLockedBalanceEntries, blockHeight, skipMetadata...)...) + data = append(data, EncodeToBytes(blockHeight, op.ModifiedLockedBalanceEntry, skipMetadata...)...) // StakeAmountNanosDiff data = append(data, UintToBuf(op.StakeAmountNanosDiff)...) @@ -2063,6 +2070,11 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. if op.PrevLockedBalanceEntries, err = DecodeDeSoEncoderSlice[*LockedBalanceEntry](rr); err != nil { return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockedBalanceEntry: ") } + // ModifiedLockedBalanceEntry + if op.ModifiedLockedBalanceEntry, err = DecodeDeSoEncoder(&LockedBalanceEntry{}, rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem Reading ModifiedLockedBalanceEntry: ") + } + // StakeAmountNanosDiff if op.StakeAmountNanosDiff, err = ReadUvarint(rr); err != nil { return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading StakeAmountNanosDiff: ") From e660841835fe248caf9d9e54ff2e16ddea1a909f Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 2 Feb 2024 10:25:14 -0800 Subject: [PATCH 457/762] Introduce SetLockedBalanceEntries to UtxoOps for disconnecting vested lockups. --- lib/block_view_lockups.go | 132 ++++++++++++++++++++------------------ lib/block_view_types.go | 16 ++++- 2 files changed, 81 insertions(+), 67 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 94dc2d336..a64204958 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1277,6 +1277,7 @@ func (bav *UtxoView) _connectCoinLockup( // In the vested case we must make careful modifications to the existing locked balance entry/entries. var previousLockedBalanceEntry *LockedBalanceEntry var previousLockedBalanceEntries []*LockedBalanceEntry + var setLockedBalanceEntries []*LockedBalanceEntry if txMeta.UnlockTimestampNanoSecs == txMeta.VestingEndTimestampNanoSecs { // Unvested consolidation case: @@ -1342,9 +1343,9 @@ func (bav *UtxoView) _connectCoinLockup( errors.Wrap(err, "_connectCoinLockup failed to fetch vested locked balance entries") } - // (2a) Store the previous locked balance entries + // (2a) Store the previous locked balance entries in the event of disconnect for _, lockedBalanceEntry := range lockedBalanceEntries { - previousLockedBalanceEntries = append(previousLockedBalanceEntries, lockedBalanceEntry) + previousLockedBalanceEntries = append(previousLockedBalanceEntries, lockedBalanceEntry.Copy()) } // (2b) Delete the previous locked balance entries in the view @@ -1357,13 +1358,15 @@ func (bav *UtxoView) _connectCoinLockup( // (3a) First check if there's no existing vested locked balance entries, this is the no-consolidation case if len(lockedBalanceEntries) == 0 { - bav._setLockedBalanceEntry(&LockedBalanceEntry{ + newLockedBalanceEntry := &LockedBalanceEntry{ HODLerPKID: hodlerPKID, ProfilePKID: profilePKID, UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: txMeta.VestingEndTimestampNanoSecs, BalanceBaseUnits: *lockupValue, - }) + } + bav._setLockedBalanceEntry(newLockedBalanceEntry) + setLockedBalanceEntries = append(setLockedBalanceEntries, newLockedBalanceEntry.Copy()) } else if len(lockedBalanceEntries) > 0 { // (3b) Go through each existing locked balance entry and consolidate @@ -1405,6 +1408,7 @@ func (bav *UtxoView) _connectCoinLockup( // balance entry in half it's impossible to intersect an existing // vested locked balance entry. bav._setLockedBalanceEntry(splitLockedBalanceEntry) + setLockedBalanceEntries = append(setLockedBalanceEntries, splitLockedBalanceEntry.Copy()) // We update the existingLockedBalanceEntry as broke the left overhanging portion off. existingLockedBalanceEntry = remainingLockedBalanceEntry @@ -1424,6 +1428,7 @@ func (bav *UtxoView) _connectCoinLockup( // Set the splitLockedBalanceEntry into the view. bav._setLockedBalanceEntry(splitLockedBalanceEntry) + setLockedBalanceEntries = append(setLockedBalanceEntries, splitLockedBalanceEntry.Copy()) // We update the proposedLockedBalanceEntry as the left overhanging portion was broken off. proposedLockedBalanceEntry = remainingLockedBalanceEntry @@ -1455,6 +1460,7 @@ func (bav *UtxoView) _connectCoinLockup( // Set the splitLockedBalanceEntry into the view. bav._setLockedBalanceEntry(splitLockedBalanceEntry) + setLockedBalanceEntries = append(setLockedBalanceEntries, splitLockedBalanceEntry.Copy()) // We update the existingLockedBalanceEntry as broke the right overhanging portion off. existingLockedBalanceEntry = remainingLockedBalanceEntry @@ -1516,6 +1522,7 @@ func (bav *UtxoView) _connectCoinLockup( // Set the now combined splitLockedBalanceEntry into the view. bav._setLockedBalanceEntry(splitLockedBalanceEntry) + setLockedBalanceEntries = append(setLockedBalanceEntries, splitLockedBalanceEntry.Copy()) // Update the proposed locked balance entry with the remaining portion. proposedLockedBalanceEntry = remainingLockedBalanceEntry @@ -1524,6 +1531,7 @@ func (bav *UtxoView) _connectCoinLockup( // is the only vesting schedule left. if ii == len(lockedBalanceEntries)-1 { bav._setLockedBalanceEntry(proposedLockedBalanceEntry) + setLockedBalanceEntries = append(setLockedBalanceEntries, proposedLockedBalanceEntry.Copy()) } } @@ -1546,6 +1554,7 @@ func (bav *UtxoView) _connectCoinLockup( // Update the remaining entry. proposedLockedBalanceEntry.BalanceBaseUnits = *combinedBalanceBaseUnits bav._setLockedBalanceEntry(proposedLockedBalanceEntry) + setLockedBalanceEntries = append(setLockedBalanceEntries, proposedLockedBalanceEntry.Copy()) } } } @@ -1556,6 +1565,8 @@ func (bav *UtxoView) _connectCoinLockup( Type: OperationTypeCoinLockup, PrevTransactorBalanceEntry: prevTransactorBalanceEntry, PrevLockedBalanceEntry: previousLockedBalanceEntry, + PrevLockedBalanceEntries: previousLockedBalanceEntries, + SetLockedBalanceEntries: setLockedBalanceEntries, PrevCoinEntry: prevCoinEntry, }) @@ -1841,76 +1852,69 @@ func (bav *UtxoView) _disconnectCoinLockup( "but malformed utxoOpsForTxn") } - // Sanity check the data within the CoinLockup. Reverting a lockup should not result in more coins. - lockedBalanceEntry, err := - bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( - operationData.PrevLockedBalanceEntry.HODLerPKID, - operationData.PrevLockedBalanceEntry.ProfilePKID, - operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, - operationData.PrevLockedBalanceEntry.VestingEndTimestampNanoSecs) - if err != nil { - return errors.Wrap(err, "_disconnectCoinLockup failed to fetch current lockedBalanceEntry") - } - if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { - lockedBalanceEntry = &LockedBalanceEntry{ - HODLerPKID: operationData.PrevLockedBalanceEntry.HODLerPKID, - ProfilePKID: operationData.PrevLockedBalanceEntry.ProfilePKID, - UnlockTimestampNanoSecs: operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, - VestingEndTimestampNanoSecs: operationData.PrevLockedBalanceEntry.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), - } - } - if lockedBalanceEntry.BalanceBaseUnits.Lt(&operationData.PrevLockedBalanceEntry.BalanceBaseUnits) { - return fmt.Errorf("_disconnectCoinLockup: Reversion of coin lockup would result in " + - "more coins in the lockup") - } - - // Reset the transactor's LockedBalanceEntry to what it was previously. - bav._setLockedBalanceEntry(operationData.PrevLockedBalanceEntry) - - // Depending on whether the lockup dealt with DeSo, we should have either a UtxoOp or a PrevTransactorBalanceEntry. - isDeSoLockup := operationData.PrevLockedBalanceEntry.ProfilePKID.IsZeroPKID() - if isDeSoLockup { - // Revert the spent DeSo. - operationData = utxoOpsForTxn[operationIndex] - if operationData.Type != OperationTypeSpendBalance { - return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeSpendBalance "+ - "but found type %v", operationData.Type) + // Depending on whether this was a vested or unvested lockup, we disconnect differently. + if operationData.PrevLockedBalanceEntry != nil { + // Sanity check the data within the CoinLockup. Reverting an unvested lockup should not result in more coins. + lockedBalanceEntry, err := + bav.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( + operationData.PrevLockedBalanceEntry.HODLerPKID, + operationData.PrevLockedBalanceEntry.ProfilePKID, + operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, + operationData.PrevLockedBalanceEntry.VestingEndTimestampNanoSecs) + if err != nil { + return errors.Wrap(err, "_disconnectCoinLockup failed to fetch current lockedBalanceEntry") } - if !bytes.Equal(operationData.BalancePublicKey, currentTxn.PublicKey) { - return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeSpendBalance but found " + - "mismatched public keys") + if lockedBalanceEntry == nil || lockedBalanceEntry.isDeleted { + lockedBalanceEntry = &LockedBalanceEntry{ + HODLerPKID: operationData.PrevLockedBalanceEntry.HODLerPKID, + ProfilePKID: operationData.PrevLockedBalanceEntry.ProfilePKID, + UnlockTimestampNanoSecs: operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, + VestingEndTimestampNanoSecs: operationData.PrevLockedBalanceEntry.VestingEndTimestampNanoSecs, + BalanceBaseUnits: *uint256.NewInt(), + } } - err := bav._unSpendBalance(operationData.BalanceAmountNanos, currentTxn.PublicKey) - if err != nil { - return errors.Wrapf(err, "_disconnectCoinLockup: Problem unSpending balance of %v "+ - "for the transactor", operationData.BalanceAmountNanos) + if lockedBalanceEntry.BalanceBaseUnits.Lt(&operationData.PrevLockedBalanceEntry.BalanceBaseUnits) { + return fmt.Errorf("_disconnectCoinLockup: Reversion of coin lockup would result in " + + "more coins in the lockup") } - } else { - // Revert the transactor's DAO coin balance. - bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) - // Fetch the profile entry associated with the lockup. - profileEntry := bav.GetProfileEntryForPKID(operationData.PrevLockedBalanceEntry.ProfilePKID) - if profileEntry == nil || profileEntry.isDeleted { - return fmt.Errorf("_disconnectCoinLockup: Trying to revert coin entry " + - "update but found nil profile entry; this shouldn't be possible") + // Reset the transactor's LockedBalanceEntry to what it was previously. + bav._setLockedBalanceEntry(operationData.PrevLockedBalanceEntry) + } else { + // Delete any set locked balance entries. + for _, setLockedBalanceEntry := range operationData.SetLockedBalanceEntries { + bav._setLockedBalanceEntry(setLockedBalanceEntry) } - // Ensure the PrevCoinEntry is not nil. This shouldn't be possible. - if operationData.PrevCoinEntry == nil { - return fmt.Errorf("_disconnectCoinLockup: Trying to revert coin entry " + - "update but found nil prev coin entry; this shouldn't be possible") + // Set any previous locked balance entries. + for _, prevLockedBalanceEntry := range operationData.PrevLockedBalanceEntries { + bav._setLockedBalanceEntry(prevLockedBalanceEntry) } + } - // Revert the coin entry. - profileEntry.DAOCoinEntry = *operationData.PrevCoinEntry - bav._setProfileEntryMappings(profileEntry) + // Revert the transactor's DAO coin balance. + bav._setBalanceEntryMappings(operationData.PrevTransactorBalanceEntry, true) + + // Fetch the profile entry associated with the lockup. + profileEntry := bav.GetProfileEntryForPKID(operationData.PrevLockedBalanceEntry.ProfilePKID) + if profileEntry == nil || profileEntry.isDeleted { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert coin entry " + + "update but found nil profile entry; this shouldn't be possible") + } + + // Ensure the PrevCoinEntry is not nil. This shouldn't be possible. + if operationData.PrevCoinEntry == nil { + return fmt.Errorf("_disconnectCoinLockup: Trying to revert coin entry " + + "update but found nil prev coin entry; this shouldn't be possible") } + // Revert the coin entry. + profileEntry.DAOCoinEntry = *operationData.PrevCoinEntry + bav._setProfileEntryMappings(profileEntry) + // By here we only need to disconnect the basic transfer associated with the transaction. basicTransferOps := utxoOpsForTxn[:operationIndex] - err = bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) + err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) if err != nil { return errors.Wrap(err, "_disconnectCoinLockup") } @@ -2740,7 +2744,7 @@ func (bav *UtxoView) _disconnectCoinUnlock( // Sanity check the data within the CoinUnlock. // Reverting an unlock of LockedBalanceEntry for unvested lockups should not result in less coins. for _, prevLockedBalanceEntry := range operationData.PrevLockedBalanceEntries { - // Skip the balance decrease check for vested lockups. + // Skip the balance decrease check for vested lockups -- the changing map key makes this an inaccurate test. if prevLockedBalanceEntry.UnlockTimestampNanoSecs < prevLockedBalanceEntry.VestingEndTimestampNanoSecs { bav._setLockedBalanceEntry(prevLockedBalanceEntry) continue @@ -2814,7 +2818,7 @@ func (bav *UtxoView) _disconnectCoinUnlock( basicTransferOps := utxoOpsForTxn[:operationIndex] err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) if err != nil { - return errors.Wrap(err, "_disconnectCoinLockup") + return errors.Wrap(err, "_disconnectCoinUnlock") } return nil } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index e9fc47069..d93dd2aac 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -980,7 +980,13 @@ type UtxoOperation struct { // PrevLockedBalanceEntry is the previous LockedBalanceEntry prior // to a DAO coin lockup. PrevCoinEntry defined above stores the // CoinsInCirculation and NumberOfHolders prior to a lockup transaction. - PrevLockedBalanceEntry *LockedBalanceEntry + // + // Vested lockups are a bit more confusing as we delete then set numerous locked balance entries. + // To revert this we must know what locked balance entries were set as well as what locked + // balance entries were deleted. We use PrevLockedBalanceEntries below and SetLockedBalanceEntries to convey + // these two pieces of state change for disconnects. + PrevLockedBalanceEntry *LockedBalanceEntry + SetLockedBalanceEntries []*LockedBalanceEntry // PrevLockupYieldCurvePoint and PrevLockupTransferRestriction are // the previous yield curve and transfer restrictions associated @@ -1372,8 +1378,9 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada // Lockup Fields - // PrevLockedBalanceEntry + // PrevLockedBalanceEntry, SetLockedBalanceEntries data = append(data, EncodeToBytes(blockHeight, op.PrevLockedBalanceEntry, skipMetadata...)...) + data = append(data, EncodeDeSoEncoderSlice(op.SetLockedBalanceEntries, blockHeight, skipMetadata...)...) // PrevLockupYieldCurvePoint, PrevLockupTransferRestrictions data = append(data, EncodeToBytes(blockHeight, op.PrevLockupYieldCurvePoint, skipMetadata...)...) @@ -2043,10 +2050,13 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. // Lockup Fields - // PrevLockedBalanceEntry + // PrevLockedBalanceEntry, SetLockedBalanceEntries if op.PrevLockedBalanceEntry, err = DecodeDeSoEncoder(&LockedBalanceEntry{}, rr); err != nil { return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading PrevLockedBalanceEntry: ") } + if op.SetLockedBalanceEntries, err = DecodeDeSoEncoderSlice[*LockedBalanceEntry](rr); err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading SetLockedBalanceEntries: ") + } // PrevLockupYieldCurvePoint, PrevLockupTransferRestriction if op.PrevLockupYieldCurvePoint, err = DecodeDeSoEncoder(&LockupYieldCurvePoint{}, rr); err != nil { From fff16d5b9471cc7f77713b2abce669712ed2168a Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 2 Feb 2024 10:50:04 -0800 Subject: [PATCH 458/762] Fix indexing issue around basic transfer disconnects in lockups. --- lib/block_view_lockups.go | 22 ---------------------- lib/block_view_lockups_test.go | 5 +++-- 2 files changed, 3 insertions(+), 24 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index a64204958..e79ccae2b 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1846,11 +1846,6 @@ func (bav *UtxoView) _disconnectCoinLockup( return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeCoinLockup " + "but found nil or deleted previous locked balance entry") } - operationIndex-- - if operationIndex < 0 { - return fmt.Errorf("_disconnectCoinLockup: Trying to revert OperationTypeCoinLockup " + - "but malformed utxoOpsForTxn") - } // Depending on whether this was a vested or unvested lockup, we disconnect differently. if operationData.PrevLockedBalanceEntry != nil { @@ -2144,13 +2139,6 @@ func (bav *UtxoView) _disconnectUpdateCoinLockupParams( bav._setProfileEntryMappings(profileEntry) } - // Decrement the operationIndex. We expect to find the basic transfer UtxoOps next. - operationIndex-- - if operationIndex < 0 { - return fmt.Errorf("_disconnectUpdateCoinLockupParams: Trying to revert OperationTypeUpdateCoinLockupParams " + - "but found malformed utxoOpsForTxn") - } - // By here we only need to disconnect the basic transfer associated with the transaction. basicTransferOps := utxoOpsForTxn[:operationIndex] err := bav._disconnectBasicTransfer(currentTxn, txnHash, basicTransferOps, blockHeight) @@ -2393,11 +2381,6 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( // Sanity check the OperationTypeCoinLockupTransfer exists. operationData := utxoOpsForTxn[operationIndex] - operationIndex-- - if operationIndex < 0 { - return fmt.Errorf("_disconnectCoinLockupTransfer: Trying to revert OperationTypeCoinLockupTransfer " + - "but malformed utxoOpsForTxn") - } if operationData.PrevSenderLockedBalanceEntry == nil || operationData.PrevSenderLockedBalanceEntry.isDeleted { return fmt.Errorf("_disconnectCoinLockupTransfer: Trying to revert OperationTypeCoinLockupTransfer " + "but found nil or deleted PrevSenderLockedBalanceEntry") @@ -2735,11 +2718,6 @@ func (bav *UtxoView) _disconnectCoinUnlock( "but found nil or deleted previous locked balance entry") } } - operationIndex-- - if operationIndex < 0 { - return fmt.Errorf("_disconnectCoinUnlock: Trying to revert OperationTypeCoinUnlock " + - "but found malformed utxoOpsForTxn") - } // Sanity check the data within the CoinUnlock. // Reverting an unlock of LockedBalanceEntry for unvested lockups should not result in less coins. diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index baf34ac87..05cbb9d1c 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -1173,8 +1173,9 @@ func TestLockupStandardProfileFlows(t *testing.T) { originalBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( m2PkBytes, m1PkBytes, true) - _, _, _, err = _coinUnlockWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + _coinUnlockWithTestMetaAndConnectTimestamp( + testMeta, + testMeta.feeRateNanosPerKb, m2Pub, m2Priv, m1Pub, From 303a17cfb82afb4999dfc31366fa104957fc575e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 2 Feb 2024 14:46:56 -0500 Subject: [PATCH 459/762] Catch and Log Error from FastHotStuffConsensus.HandleBlock (#1005) --- lib/server.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/server.go b/lib/server.go index 7bbb119f1..43a63ec4a 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2009,7 +2009,9 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { // If the node is running a Fast-HotStuff validator and the consensus is running, // in the steady-state, then we handle the block according to the consensus rules. if srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() { - srv.fastHotStuffConsensus.HandleBlock(pp, blk) + if err := srv.fastHotStuffConsensus.HandleBlock(pp, blk); err != nil { + glog.Errorf("Server._handleBlock: Problem handling block with FastHotStuffConsensus: %v", err) + } return } From 045131a01980af70c0a6b5ce775f3ec3afc9c044 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 2 Feb 2024 14:48:35 -0500 Subject: [PATCH 460/762] Use MsgDeSoHeader.GetView() To Define View for PoW Epochs (#1007) --- lib/block_view.go | 2 +- lib/pos_blockchain.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index aa415e745..0890e3a2d 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4265,7 +4265,7 @@ func (bav *UtxoView) ConnectBlock( } if isLastBlockInEpoch { var utxoOperations []*UtxoOperation - utxoOperations, err = bav.RunEpochCompleteHook(blockHeight, blockHeader.ProposedInView, blockHeader.TstampNanoSecs) + utxoOperations, err = bav.RunEpochCompleteHook(blockHeight, blockHeader.GetView(), blockHeader.TstampNanoSecs) if err != nil { return nil, errors.Wrapf(err, "ConnectBlock: error running epoch complete hook") } diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 2dc89bdd7..e11a2bc64 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1792,7 +1792,7 @@ func (bc *Blockchain) GetProofOfStakeGenesisQuorumCertificate() (*QuorumCertific qc := &QuorumCertificate{ BlockHash: finalPoWBlock.Hash, - ProposedInView: finalPoWBlock.Header.Height, + ProposedInView: finalPoWBlock.Header.GetView(), ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ Signature: aggregatedSignature, SignersList: signersList, From f7ab9f21e5e74ee4401b7037455f116fe02dbd84 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 2 Feb 2024 14:50:27 -0500 Subject: [PATCH 461/762] Update getLineageFromCommittedTip To Use MsgDeSoHeader.GetView() (#1008) --- lib/pos_blockchain.go | 10 +++++----- lib/pos_blockchain_test.go | 1 + 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index e11a2bc64..f0b80c3a9 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1177,7 +1177,7 @@ func (bc *Blockchain) getLineageFromCommittedTip(block *MsgDeSoBlock) ([]*BlockN currentHash := block.Header.PrevBlockHash.NewBlockHash() ancestors := []*BlockNode{} prevHeight := block.Header.Height - prevView := block.Header.ProposedInView + prevView := block.Header.GetView() for { currentBlock, exists := bc.blockIndexByHash[*currentHash] if !exists { @@ -1192,16 +1192,16 @@ func (bc *Blockchain) getLineageFromCommittedTip(block *MsgDeSoBlock) ([]*BlockN if currentBlock.IsValidateFailed() { return nil, RuleErrorAncestorBlockValidationFailed } - if currentBlock.Header.ProposedInView >= prevView { - return nil, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock - } if uint64(currentBlock.Header.Height)+1 != prevHeight { return nil, RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight } + if currentBlock.Header.GetView() >= prevView { + return nil, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock + } ancestors = append(ancestors, currentBlock) currentHash = currentBlock.Header.PrevBlockHash prevHeight = currentBlock.Header.Height - prevView = currentBlock.Header.ProposedInView + prevView = currentBlock.Header.GetView() } return collections.Reverse(ancestors), nil } diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 51960f461..760250852 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -803,6 +803,7 @@ func TestGetLineageFromCommittedTip(t *testing.T) { } block := &MsgDeSoBlock{ Header: &MsgDeSoHeader{ + Version: HeaderVersion2, PrevBlockHash: hash1, ProposedInView: 3, Height: 3, From af407af676788869377ceb87e4fe40549cc03bba Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Fri, 2 Feb 2024 12:06:00 -0800 Subject: [PATCH 462/762] PoS Change ForkHeight Checks to Use DeSoParams and Simplify BlockView TxnConnects condition (#1009) * PoS Change ForkHeight Checks to Use DeSoParams; Simplify BlockView TxnConnects condition * nit --- lib/block_view.go | 21 +++++++++++++++++---- lib/blockchain.go | 10 +++++----- lib/constants.go | 20 ++++++++++++++++++++ lib/miner.go | 2 +- lib/pos_blockchain.go | 38 +++++++++----------------------------- lib/pos_consensus.go | 4 ++-- lib/server.go | 14 ++++++++------ 7 files changed, 62 insertions(+), 47 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 0890e3a2d..b90980863 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4058,10 +4058,23 @@ func (bav *UtxoView) ConnectBlock( for txIndex, txn := range desoBlock.Txns { txHash := txHashes[txIndex] - // After the PoS cutover, we need to check if the transaction is a failing transaction. - txnConnects := blockHeight < uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) || - (txIndex == 0 && txn.TxnMeta.GetTxnType() == TxnTypeBlockReward) || - desoBlock.TxnConnectStatusByIndex.Get(txIndex-1) + // PoS introduced a concept of a failing transaction, or transactions that fail UtxoView's ConnectTransaction. + // In PoS, these failing transactions are included in the block and their fees are burned. + + // To determine if we're dealing with a connecting or failing transaction, we first check if we're on a PoS block + // height. Otherwise, the transaction is expected to connect. + hasPoWBlockHeight := bav.Params.IsPoWBlockHeight(blockHeight) + // Also, the first transaction in the block, the block reward transaction, should always be a connecting transaction. + isBlockRewardTxn := (txIndex == 0) && (txn.TxnMeta.GetTxnType() == TxnTypeBlockReward) + // Finally, if the transaction is not the first in the block, we check the TxnConnectStatusByIndex to see if + // it's marked by the block producer as a connecting transaction. PoS blocks should reflect this in TxnConnectStatusByIndex. + hasConnectingPoSTxnStatus := false + if bav.Params.IsPoSBlockHeight(blockHeight) && (txIndex > 0) && (desoBlock.TxnConnectStatusByIndex != nil) { + // Note that TxnConnectStatusByIndex doesn't include the first block reward transaction. + hasConnectingPoSTxnStatus = desoBlock.TxnConnectStatusByIndex.Get(txIndex - 1) + } + // Now, we can determine if the transaction is expected to connect. + txnConnects := hasPoWBlockHeight || isBlockRewardTxn || hasConnectingPoSTxnStatus var utilityFee uint64 var utxoOpsForTxn []*UtxoOperation diff --git a/lib/blockchain.go b/lib/blockchain.go index 6d7c9c4de..e2fb3a7e7 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1154,7 +1154,7 @@ func (bc *Blockchain) isTipCurrent(tip *BlockNode) bool { minChainWorkBytes, _ := hex.DecodeString(bc.params.MinChainWorkHex) // Not current if the cumulative work is below the threshold. - if bc.IsPoWBlockHeight(uint64(tip.Height)) && tip.CumWork.Cmp(BytesToBigint(minChainWorkBytes)) < 0 { + if bc.params.IsPoWBlockHeight(uint64(tip.Height)) && tip.CumWork.Cmp(BytesToBigint(minChainWorkBytes)) < 0 { //glog.V(2).Infof("Blockchain.isTipCurrent: Tip not current because "+ //"CumWork (%v) is less than minChainWorkBytes (%v)", //tip.CumWork, BytesToBigint(minChainWorkBytes)) @@ -1709,7 +1709,7 @@ func updateBestChainInMemory(mainChainList []*BlockNode, mainChainMap map[BlockH // Caller must acquire the ChainLock for writing prior to calling this. func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { // Only accept the header if its height is below the PoS cutover height. - if !bc.IsPoWBlockHeight(blockHeader.Height) { + if !bc.params.IsPoWBlockHeight(blockHeader.Height) { return false, false, HeaderErrorBlockHeightAfterProofOfStakeCutover } @@ -1896,7 +1896,7 @@ func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc // If the header's height is after the PoS cut-over fork height, then we use the PoS header processing logic. // Otherwise, fall back to the PoW logic. - if bc.IsPoSBlockHeight(blockHeader.Height) { + if bc.params.IsPoSBlockHeight(blockHeader.Height) { return bc.processHeaderPoS(blockHeader) } @@ -1914,7 +1914,7 @@ func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures boo // If the block's height is after the PoS cut-over fork height, then we use the PoS block processing logic. // Otherwise, fall back to the PoW logic. - if bc.IsPoSBlockHeight(desoBlock.Header.Height) { + if bc.params.IsPoSBlockHeight(desoBlock.Header.Height) { return bc.processBlockPoS(desoBlock, 1, verifySignatures) } @@ -1924,7 +1924,7 @@ func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures boo func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bool) (_isMainChain bool, _isOrphan bool, err error) { // Only accept the block if its height is below the PoS cutover height. - if !bc.IsPoWBlockHeight(desoBlock.Header.Height) { + if !bc.params.IsPoWBlockHeight(desoBlock.Header.Height) { return false, false, RuleErrorBlockHeightAfterProofOfStakeCutover } diff --git a/lib/constants.go b/lib/constants.go index e32f5a4af..c4154368c 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -801,6 +801,26 @@ func (params *DeSoParams) EnableRegtest() { params.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) } +func (params *DeSoParams) IsPoWBlockHeight(blockHeight uint64) bool { + return !params.IsPoSBlockHeight(blockHeight) +} + +func (params *DeSoParams) IsPoSBlockHeight(blockHeight uint64) bool { + return blockHeight >= params.GetFirstPoSBlockHeight() +} + +func (params *DeSoParams) IsFinalPoWBlockHeight(blockHeight uint64) bool { + return blockHeight == params.GetFinalPoWBlockHeight() +} + +func (params *DeSoParams) GetFinalPoWBlockHeight() uint64 { + return uint64(params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight - 1) +} + +func (params *DeSoParams) GetFirstPoSBlockHeight() uint64 { + return uint64(params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) +} + // GenesisBlock defines the genesis block used for the DeSo mainnet and testnet var ( ArchitectPubKeyBase58Check = "BC1YLg3oh6Boj8e2boCo1vQCYHLk1rjsHF6jthBdvSw79bixQvKK6Qa" diff --git a/lib/miner.go b/lib/miner.go index 5e332ba50..0590facbb 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -198,7 +198,7 @@ func (desoMiner *DeSoMiner) MineAndProcessSingleBlock(threadIndex uint32, mempoo return nil, fmt.Errorf("DeSoMiner._startThread: _mineSingleBlock returned nil; should only happen if we're stopping") } - if blockToMine.Header.Height >= uint64(desoMiner.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + if desoMiner.params.IsPoSBlockHeight(blockToMine.Header.Height) { return nil, fmt.Errorf("DeSoMiner._startThread: _mineSingleBlock returned a block that is past the Proof of Stake Cutover") } diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index f0b80c3a9..c51f50bd5 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -52,10 +52,10 @@ func (bc *Blockchain) ProcessHeaderPoS(header *MsgDeSoHeader) (_isMainChain bool func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader) ( _isMainChain bool, _isOrphan bool, _err error, ) { - if !bc.IsPoSBlockHeight(header.Height) { + if !bc.params.IsPoSBlockHeight(header.Height) { return false, false, errors.Errorf( "processHeaderPoS: Header height %d is less than the ProofOfStake2ConsensusCutoverBlockHeight %d", - header.Height, bc.GetFirstPoSBlockHeight(), + header.Height, bc.params.GetFirstPoSBlockHeight(), ) } @@ -214,10 +214,10 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v _err error, ) { // If the incoming block's height is under the PoS cutover fork height, then we can't process it. Exit early. - if !bc.IsPoSBlockHeight(block.Header.Height) { + if !bc.params.IsPoSBlockHeight(block.Header.Height) { return false, false, nil, errors.Errorf( "processHeaderPoS: Header height %d is less than the ProofOfStake2ConsensusCutoverBlockHeight %d", - block.Header.Height, bc.GetFirstPoSBlockHeight(), + block.Header.Height, bc.params.GetFirstPoSBlockHeight(), ) } @@ -940,7 +940,7 @@ func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader) erro // that this block height is exactly one greater than its parent's block height. func (bc *Blockchain) hasValidBlockHeightPoS(header *MsgDeSoHeader) error { blockHeight := header.Height - if !bc.IsPoSBlockHeight(blockHeight) { + if !bc.params.IsPoSBlockHeight(blockHeight) { return RuleErrorPoSBlockBeforeCutoverHeight } // Validate that the block height is exactly one greater than its parent. @@ -1133,7 +1133,7 @@ func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validator // If the block is the first block after the PoS cutover and has a timeout aggregate QC, then the // highQC must be a synthetic QC. We need to override the validator set used to validate the high QC. - if block.Header.Height == bc.GetFirstPoSBlockHeight() && !timeoutAggregateQC.isEmpty() { + if block.Header.Height == bc.params.GetFirstPoSBlockHeight() && !timeoutAggregateQC.isEmpty() { genesisQC, err := bc.GetProofOfStakeGenesisQuorumCertificate() if err != nil { return errors.Wrapf(err, "isValidPoSQuorumCertificate: Problem getting PoS genesis QC") @@ -1804,9 +1804,9 @@ func (bc *Blockchain) GetProofOfStakeGenesisQuorumCertificate() (*QuorumCertific func (bc *Blockchain) GetFinalCommittedPoWBlock() (*BlockNode, error) { // Fetch the block node for the cutover block - blockNodes, blockNodesExist := bc.blockIndexByHeight[bc.GetFinalPoWBlockHeight()] + blockNodes, blockNodesExist := bc.blockIndexByHeight[bc.params.GetFinalPoWBlockHeight()] if !blockNodesExist { - return nil, errors.Errorf("Error fetching cutover block nodes before height %d", bc.GetFinalPoWBlockHeight()) + return nil, errors.Errorf("Error fetching cutover block nodes before height %d", bc.params.GetFinalPoWBlockHeight()) } // Fetch the block node with the committed status @@ -1816,27 +1816,7 @@ func (bc *Blockchain) GetFinalCommittedPoWBlock() (*BlockNode, error) { } } - return nil, errors.Errorf("Error fetching committed cutover block node with height %d", bc.GetFinalPoWBlockHeight()) -} - -func (bc *Blockchain) IsPoWBlockHeight(blockHeight uint64) bool { - return !bc.IsPoSBlockHeight(blockHeight) -} - -func (bc *Blockchain) IsPoSBlockHeight(blockHeight uint64) bool { - return blockHeight >= bc.GetFirstPoSBlockHeight() -} - -func (bc *Blockchain) IsFinalPoWBlockHeight(blockHeight uint64) bool { - return blockHeight == bc.GetFinalPoWBlockHeight() -} - -func (bc *Blockchain) GetFinalPoWBlockHeight() uint64 { - return uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight - 1) -} - -func (bc *Blockchain) GetFirstPoSBlockHeight() uint64 { - return uint64(bc.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) + return nil, errors.Errorf("Error fetching committed cutover block node with height %d", bc.params.GetFinalPoWBlockHeight()) } const ( diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index b884e6617..86e18adde 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -66,7 +66,7 @@ func (cc *FastHotStuffConsensus) Start() error { tipHeight := tipBlock.Header.Height // If the chain is not at the final PoW block height or higher, then we cannot start the PoS consensus. - if cc.blockchain.IsPoWBlockHeight(tipHeight) && !cc.blockchain.IsFinalPoWBlockHeight(tipHeight) { + if cc.params.IsPoWBlockHeight(tipHeight) && !cc.params.IsFinalPoWBlockHeight(tipHeight) { return errors.Errorf( "FastHotStuffConsensus.Start: Block tip %d is not at the final PoW block height", tipBlock.Height, ) @@ -434,7 +434,7 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo timeoutMsg.TimedOutView = event.View timeoutMsg.VotingPublicKey = cc.signer.GetPublicKey() - if cc.blockchain.IsFinalPoWBlockHeight(tipBlockNode.Header.Height) { + if cc.params.IsFinalPoWBlockHeight(tipBlockNode.Header.Height) { // If the tip block is the final block of the PoW chain, then we can use the PoS chain's genesis block // as the highQC for it. if timeoutMsg.HighQC, err = cc.blockchain.GetProofOfStakeGenesisQuorumCertificate(); err != nil { diff --git a/lib/server.go b/lib/server.go index 43a63ec4a..0b0ce0057 100644 --- a/lib/server.go +++ b/lib/server.go @@ -62,6 +62,7 @@ type Server struct { blockProducer *DeSoBlockProducer eventManager *EventManager TxIndex *TXIndex + params *DeSoParams fastHotStuffConsensus *FastHotStuffConsensus @@ -223,7 +224,7 @@ func (srv *Server) GetMempool() Mempool { srv.blockchain.ChainLock.RLock() defer srv.blockchain.ChainLock.RUnlock() - if srv.blockchain.IsPoSBlockHeight(uint64(srv.blockchain.BlockTip().Height)) { + if srv.params.IsPoSBlockHeight(uint64(srv.blockchain.BlockTip().Height)) { return srv.posMempool } return srv.mempool @@ -264,7 +265,7 @@ func (srv *Server) VerifyAndBroadcastTransaction(txn *MsgDeSoTxn) error { srv.blockchain.ChainLock.RUnlock() // Only add the txn to the PoW mempool if we are below the PoS cutover height. - if srv.blockchain.IsPoWBlockHeight(uint64(tipHeight)) { + if srv.params.IsPoWBlockHeight(uint64(tipHeight)) { err := srv.blockchain.ValidateTransaction( txn, // blockHeight is set to the next block since that's where this @@ -439,6 +440,7 @@ func NewServer( snapshot: _snapshot, nodeMessageChannel: _nodeMessageChan, forceChecksum: _forceChecksum, + params: _params, } if stateChangeSyncer != nil { @@ -1773,7 +1775,7 @@ func (srv *Server) _relayTransactions() { srv.blockchain.ChainLock.RUnlock() // If we're on the PoW protocol, we need to wait for the mempool readOnlyView to regenerate. - if srv.blockchain.IsPoWBlockHeight(tipHeight) { + if srv.params.IsPoWBlockHeight(tipHeight) { glog.V(1).Infof("Server._relayTransactions: Waiting for mempool readOnlyView to regenerate") srv.mempool.BlockUntilReadOnlyViewRegenerated() glog.V(1).Infof("Server._relayTransactions: Mempool view has regenerated") @@ -1848,7 +1850,7 @@ func (srv *Server) _addNewTxn( // txn validity checks to signal whether the txn has been added or not. The PoW // mempool has stricter txn validity checks than the PoW mempool, so this works // out conveniently, as it allows us to always add a txn to the PoS mempool. - if srv.blockchain.IsPoWBlockHeight(tipHeight) { + if srv.params.IsPoWBlockHeight(tipHeight) { _, err := srv.mempool.ProcessTransaction( txn, true /*allowUnconnectedTxn*/, rateLimit, peerID, verifySignatures) if err != nil { @@ -2185,7 +2187,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { // the PoS validator FastHotStuffConsensus once we reach the final block of the PoW protocol. // This requires the block height check to use ProofOfStake2ConsensusCutoverBlockHeight-1 tipHeight := uint64(srv.blockchain.blockTip().Height) - if tipHeight < srv.blockchain.GetFinalPoWBlockHeight() { + if tipHeight < srv.params.GetFinalPoWBlockHeight() { return } @@ -2235,7 +2237,7 @@ func (srv *Server) ProcessSingleTxnWithChainLock(pp *Peer, txn *MsgDeSoTxn) ([]* // mempool has stricter txn validity checks than the PoS mempool, so this works // out conveniently, as it allows us to always add a txn to the PoS mempool. tipHeight := uint64(srv.blockchain.blockTip().Height) - if srv.blockchain.IsPoWBlockHeight(tipHeight) { + if srv.params.IsPoWBlockHeight(tipHeight) { _, err := srv.mempool.ProcessTransaction( txn, true, /*allowUnconnectedTxn*/ From e5aec9a79df78008fcb5ed5c9f814001fd04a7c2 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 2 Feb 2024 15:24:01 -0500 Subject: [PATCH 463/762] Clarify Block Height Condition For Starting FastHotStuffConsensus (#1010) * Clarify Block Height Condition For Starting FastHotStuffConsensus * Clean up comment --- lib/server.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/server.go b/lib/server.go index 0b0ce0057..aed1f6345 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2183,9 +2183,10 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { return } - // Exit early if the current tip height is below the PoS cutover height. We are ready to enable - // the PoS validator FastHotStuffConsensus once we reach the final block of the PoW protocol. - // This requires the block height check to use ProofOfStake2ConsensusCutoverBlockHeight-1 + // Exit early if the current tip height is below the final PoW block's height. We are ready to + // enable the FastHotStuffConsensus once we reach the final block of the PoW protocol. + // + // Enable the FastHotStuffConsensus once the tipHeight >= ProofOfStake2ConsensusCutoverBlockHeight-1 tipHeight := uint64(srv.blockchain.blockTip().Height) if tipHeight < srv.params.GetFinalPoWBlockHeight() { return From a42896545f2ff6d1cffc11e279c9d9e56446fdbc Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Fri, 2 Feb 2024 13:30:45 -0800 Subject: [PATCH 464/762] PoS Validate Header Sanity Check (#1011) --- lib/pos_blockchain.go | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index c51f50bd5..d7c7b464f 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -12,19 +12,6 @@ import ( "github.com/pkg/errors" ) -// ProcessHeaderPoS simply acquires the chain lock and calls processHeaderPoS. -func (bc *Blockchain) ProcessHeaderPoS(header *MsgDeSoHeader) (_isMainChain bool, _isOrphan bool, _err error) { - // Grab the chain lock - bc.ChainLock.Lock() - defer bc.ChainLock.Unlock() - - if header == nil { - return false, false, fmt.Errorf("ProcessHeaderPoS: Header is nil") - } - - return bc.processHeaderPoS(header) -} - // processHeaderPoS validates and stores an incoming block header to build // the PoS version of the header chain. It requires callers to call it with // headers in order of increasing block height. If called with an orphan header, @@ -125,6 +112,12 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas return nil, true, nil } + // Sanity-check that the parent block is an ancestor of the current block. + if parentBlockNode.Height+1 != blockNode.Height { + return nil, false, errors.New("validateAndIndexHeaderPoS: Parent header has " + + "greater or equal height compared to the current header.") + } + // ---------------------------------- Recursive Case ---------------------------------- // // Recursively call validateAndIndexHeaderPoS on the header's ancestors. It's possible for From 6f4341024a9e8f88b15f02c874d10baa9059d6b3 Mon Sep 17 00:00:00 2001 From: diamondhands Date: Sat, 3 Feb 2024 23:04:33 -0800 Subject: [PATCH 465/762] Small updates to the mempool updating --- lib/pos_mempool.go | 50 +++++++++++++++++++++++++++++----------------- lib/server.go | 8 ++++---- 2 files changed, 36 insertions(+), 22 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 4c75be2fe..70d6aa2d3 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -121,12 +121,12 @@ type PosMempool struct { // PosMempool only needs read-access to the block view. It isn't necessary to copy the block view before passing it // to the mempool. readOnlyLatestBlockView *UtxoView - // augmentedLatestBlockView is a copy of the latest block view with all the transactions in the mempool applied to + // augmentedReadOnlyLatestBlockView is a copy of the latest block view with all the transactions in the mempool applied to // it. This allows the backend to display the current state of the blockchain including the mempool. - // The augmentedLatestBlockView is updated every 100 milliseconds to reflect the latest state of the mempool. - augmentedLatestBlockView *UtxoView - // augmentedLatestBlockViewMutex is used to protect the augmentedLatestBlockView from concurrent access. - augmentedLatestBlockViewMutex sync.RWMutex + // The augmentedReadOnlyLatestBlockView is updated every 10 milliseconds to reflect the latest state of the mempool. + augmentedReadOnlyLatestBlockView *UtxoView + // augmentedReadOnlyLatestBlockViewMutex is used to protect the augmentedLatestBlockView from concurrent access. + augmentedReadOnlyLatestBlockViewMutex sync.RWMutex // Signals that the mempool is now in the stopped state. quit chan interface{} // latestBlockNode is used to infer the latest block height. The latestBlockNode should be updated whenever a new @@ -211,7 +211,7 @@ func (mp *PosMempool) Init( mp.readOnlyLatestBlockView = readOnlyLatestBlockView var err error if readOnlyLatestBlockView != nil { - mp.augmentedLatestBlockView, err = readOnlyLatestBlockView.CopyUtxoView() + mp.augmentedReadOnlyLatestBlockView, err = readOnlyLatestBlockView.CopyUtxoView() if err != nil { return errors.Wrapf(err, "PosMempool.Init: Problem copying utxo view") } @@ -288,12 +288,13 @@ func (mp *PosMempool) startAugmentedViewRefreshRoutine() { } // Update the augmentedLatestBlockView with the latest block view. mp.RLock() - newView, err := mp.readOnlyLatestBlockView.CopyUtxoView() + readOnlyViewPointer := mp.readOnlyLatestBlockView + mp.RUnlock() + newView, err := readOnlyViewPointer.CopyUtxoView() if err != nil { - glog.Errorf("PosMempool.startAugmentedViewRefreshRoutine: Problem copying utxo view: %v", err) + glog.Errorf("PosMempool.startAugmentedViewRefreshRoutine: Problem copying utxo view outer: %v", err) continue } - mp.RUnlock() for _, txn := range mp.GetTransactions() { copiedView, err := newView.CopyUtxoView() if err != nil { @@ -312,19 +313,27 @@ func (mp *PosMempool) startAugmentedViewRefreshRoutine() { // If the transaction failed to connect, we connect the transaction as a failed txn // directly on newView. if mp.latestBlockHeight+1 >= uint64(mp.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + // Copy the view again in case we hit an error. + copiedView, err = newView.CopyUtxoView() + if err != nil { + glog.Errorf("PosMempool.startAugmentedViewRefreshRoutine: Problem copying utxo view inner: %v", err) + continue + } // Try to connect as failing txn directly to newView - _, _, _, err = newView._connectFailingTransaction( + _, _, _, err = copiedView._connectFailingTransaction( txn.GetTxn(), uint32(mp.latestBlockHeight), false) if err != nil { glog.Errorf( "PosMempool.startAugmentedViewRefreshRoutine: Problem connecting transaction: %v", err) + continue } + newView = copiedView } } // Grab the augmentedLatestBlockViewMutex write lock and update the augmentedLatestBlockView. - mp.augmentedLatestBlockViewMutex.Lock() - mp.augmentedLatestBlockView = newView - mp.augmentedLatestBlockViewMutex.Unlock() + mp.augmentedReadOnlyLatestBlockViewMutex.Lock() + mp.augmentedReadOnlyLatestBlockView = newView + mp.augmentedReadOnlyLatestBlockViewMutex.Unlock() // Increment the augmentedLatestBlockViewSequenceNumber. atomic.AddInt64(&mp.augmentedLatestBlockViewSequenceNumber, 1) case <-mp.quit: @@ -840,9 +849,10 @@ func (mp *PosMempool) GetAugmentedUniversalView() (*UtxoView, error) { if !mp.IsRunning() { return nil, errors.Wrapf(MempoolErrorNotRunning, "PosMempool.GetAugmentedUniversalView: ") } - mp.augmentedLatestBlockViewMutex.RLock() - defer mp.augmentedLatestBlockViewMutex.RUnlock() - newView, err := mp.augmentedLatestBlockView.CopyUtxoView() + mp.augmentedReadOnlyLatestBlockViewMutex.RLock() + readOnlyViewPointer := mp.augmentedReadOnlyLatestBlockView + mp.augmentedReadOnlyLatestBlockViewMutex.RUnlock() + newView, err := readOnlyViewPointer.CopyUtxoView() if err != nil { return nil, errors.Wrapf(err, "PosMempool.GetAugmentedUniversalView: Problem copying utxo view") } @@ -854,9 +864,13 @@ func (mp *PosMempool) GetAugmentedUtxoViewForPublicKey(pk []byte, optionalTx *Ms func (mp *PosMempool) BlockUntilReadOnlyViewRegenerated() { oldSeqNum := atomic.LoadInt64(&mp.augmentedLatestBlockViewSequenceNumber) newSeqNum := oldSeqNum + // Check fairly often. Not too often. + checkIntervalMillis := mp.augmentedBlockViewRefreshIntervalMillis / 5 + if checkIntervalMillis == 0 { + checkIntervalMillis = 1 + } for newSeqNum == oldSeqNum { - // Check fairly often. Not too often. - time.Sleep(25 * time.Millisecond) + time.Sleep(time.Duration(checkIntervalMillis) * time.Millisecond) newSeqNum = atomic.LoadInt64(&mp.augmentedLatestBlockViewSequenceNumber) } } diff --git a/lib/server.go b/lib/server.go index aed1f6345..245c07d56 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1563,8 +1563,8 @@ func (srv *Server) dirtyHackUpdateDbOpts(opts badger.Options) { // Make sure that the pos mempool process doesn't try to access the DB while we're closing and re-opening it. srv.posMempool.Lock() defer srv.posMempool.Unlock() - srv.posMempool.augmentedLatestBlockViewMutex.Lock() - defer srv.posMempool.augmentedLatestBlockViewMutex.Unlock() + srv.posMempool.augmentedReadOnlyLatestBlockViewMutex.Lock() + defer srv.posMempool.augmentedReadOnlyLatestBlockViewMutex.Unlock() // Make sure that a server process doesn't try to access the DB while we're closing and re-opening it. srv.DbMutex.Lock() defer srv.DbMutex.Unlock() @@ -1583,8 +1583,8 @@ func (srv *Server) dirtyHackUpdateDbOpts(opts badger.Options) { if srv.posMempool.readOnlyLatestBlockView != nil { srv.posMempool.readOnlyLatestBlockView.Handle = srv.blockchain.db } - if srv.posMempool.augmentedLatestBlockView != nil { - srv.posMempool.augmentedLatestBlockView.Handle = srv.blockchain.db + if srv.posMempool.augmentedReadOnlyLatestBlockView != nil { + srv.posMempool.augmentedReadOnlyLatestBlockView.Handle = srv.blockchain.db } // Save the new options to the DB so that we know what to use if the node restarts. From 0a4150c1fdc3d80dd7496c4e42021898addf2cba Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 5 Feb 2024 10:58:03 -0500 Subject: [PATCH 466/762] Minor Cleanup to PoS Mempool (#1012) --- lib/pos_mempool.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 70d6aa2d3..f2825c37d 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -312,7 +312,7 @@ func (mp *PosMempool) startAugmentedViewRefreshRoutine() { } // If the transaction failed to connect, we connect the transaction as a failed txn // directly on newView. - if mp.latestBlockHeight+1 >= uint64(mp.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + if mp.params.IsPoSBlockHeight(mp.latestBlockHeight + 1) { // Copy the view again in case we hit an error. copiedView, err = newView.CopyUtxoView() if err != nil { @@ -321,7 +321,7 @@ func (mp *PosMempool) startAugmentedViewRefreshRoutine() { } // Try to connect as failing txn directly to newView _, _, _, err = copiedView._connectFailingTransaction( - txn.GetTxn(), uint32(mp.latestBlockHeight), false) + txn.GetTxn(), uint32(mp.latestBlockHeight+1), false) if err != nil { glog.Errorf( "PosMempool.startAugmentedViewRefreshRoutine: Problem connecting transaction: %v", err) From 305d7904d618ee24f4be8c1072d75251febe7a6b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 5 Feb 2024 11:14:36 -0500 Subject: [PATCH 467/762] Move BlockUntilReadOnlyViewRegenerated Outside of _addTxn (#1013) * Move BlockUntilReadOnlyViewRegenerated Outside of _addTxn * Update mempool augment block view refresh frequency --- cmd/run.go | 2 +- lib/server.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 585328abf..8b8965d6a 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -101,7 +101,7 @@ func SetupRunFlags(cmd *cobra.Command) { "The number of future blocks to break the PoS mempool into when estimating txn fee for the next block.") cmd.PersistentFlags().Uint64("mempool-fee-estimator-num-past-blocks", 50, "The number of past blocks to use when estimating txn fee for the next block from the PoS mempool.") - cmd.PersistentFlags().Uint64("augmented-block-view-refresh-interval-millis", 100, + cmd.PersistentFlags().Uint64("augmented-block-view-refresh-interval-millis", 10, "The frequency in milliseconds with which the augmented block view will be refreshed. "+ "The default value is 100 milliseconds.") diff --git a/lib/server.go b/lib/server.go index 245c07d56..23a821fa5 100644 --- a/lib/server.go +++ b/lib/server.go @@ -255,6 +255,10 @@ func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MsgDeSoTxn, error) return nil, errors.Wrapf(err, "BroadcastTransaction: ") } + // At this point, we know the transaction has been run through the mempool. + // Now wait for an update of the ReadOnlyUtxoView so we don't break anything. + srv.GetMempool().BlockUntilReadOnlyViewRegenerated() + return mempoolTxs, nil } @@ -1867,10 +1871,6 @@ func (srv *Server) _addNewTxn( return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") } - // At this point, we know the transaction has been run through the mempool. - // Now wait for an update of the ReadOnlyUtxoView so we don't break anything. - srv.GetMempool().BlockUntilReadOnlyViewRegenerated() - return []*MsgDeSoTxn{txn}, nil } From 818711ad80ac2ef641945064aa3214abc3cee1cd Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 5 Feb 2024 16:40:47 -0500 Subject: [PATCH 468/762] Replace TimeSource With Strictly Increasing Block Timestamps (#1014) --- lib/pos_block_producer.go | 33 ++++++++++++++++++++------------- lib/pos_block_producer_test.go | 9 ++++----- lib/pos_blockchain_test.go | 2 +- lib/pos_consensus.go | 12 +++++++++--- 4 files changed, 34 insertions(+), 22 deletions(-) diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index aba26dacc..57bc129b1 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -2,8 +2,8 @@ package lib import ( "math" + "time" - chainlib "github.com/btcsuite/btcd/blockchain" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" "github.com/pkg/errors" @@ -20,26 +20,26 @@ type BlockTemplate *MsgDeSoBlock // CreateUnsignedTimeoutBlock methods. As such, PosBlockProducer exists primarily for the purpose of cleaner separation of // concerns. Instantiating the PosBlockProducer can also be optional for nodes who do not wish to produce blocks. type PosBlockProducer struct { - mp Mempool - params *DeSoParams - proposerPublicKey *PublicKey - proposerVotingPublicKey *bls.PublicKey - timeSource chainlib.MedianTimeSource + mp Mempool + params *DeSoParams + proposerPublicKey *PublicKey + proposerVotingPublicKey *bls.PublicKey + previousBlockTimestampNanoSecs int64 } func NewPosBlockProducer( mp Mempool, params *DeSoParams, - timeSource chainlib.MedianTimeSource, proposerPublicKey *PublicKey, proposerVotingPublicKey *bls.PublicKey, + previousBlockTimestampNanoSecs int64, ) *PosBlockProducer { return &PosBlockProducer{ - mp: mp, - params: params, - proposerPublicKey: proposerPublicKey, - proposerVotingPublicKey: proposerVotingPublicKey, - timeSource: timeSource, + mp: mp, + params: params, + proposerPublicKey: proposerPublicKey, + proposerVotingPublicKey: proposerVotingPublicKey, + previousBlockTimestampNanoSecs: previousBlockTimestampNanoSecs, } } @@ -83,7 +83,7 @@ func (pbp *PosBlockProducer) CreateUnsignedTimeoutBlock(latestBlockView *UtxoVie func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, proposerRandomSeedSignature *bls.Signature) (BlockTemplate, error) { // First get the block without the header. - currentTimestamp := pbp.timeSource.AdjustedTime().UnixNano() + currentTimestamp := _maxInt64(time.Now().UnixNano(), pbp.previousBlockTimestampNanoSecs+1) block, err := pbp.createBlockWithoutHeader(latestBlockView, newBlockHeight, currentTimestamp) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.CreateBlockTemplate: Problem creating block without header") @@ -253,3 +253,10 @@ func (pbp *PosBlockProducer) getBlockTransactions( return blocksTxns, txnConnectStatusByIndex, maxUtilityFee, nil } + +func _maxInt64(a, b int64) int64 { + if a > b { + return a + } + return b +} diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index fef7549de..896d4a060 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - chainlib "github.com/btcsuite/btcd/blockchain" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" "github.com/stretchr/testify/require" @@ -61,7 +60,7 @@ func TestCreateBlockTemplate(t *testing.T) { _, err = seedSignature.FromBytes(Sha256DoubleHash([]byte("seed")).ToBytes()) require.NoError(err) m0Pk := NewPublicKey(m0PubBytes) - pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), m0Pk, pub) + pbp := NewPosBlockProducer(mempool, params, m0Pk, pub, time.Now().UnixNano()) blockTemplate, err := pbp.createBlockTemplate(latestBlockView, 3, 10, seedSignature) require.NoError(err) @@ -123,7 +122,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { // Test cases where the block producer is the transactor for the mempool txns { - pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), NewPublicKey(m0PubBytes), blsPubKey) + pbp := NewPosBlockProducer(mempool, params, NewPublicKey(m0PubBytes), blsPubKey, time.Now().UnixNano()) txns, txnConnectStatus, _, err := pbp.getBlockTransactions( NewPublicKey(m0PubBytes), latestBlockView, 3, 0, 50000) require.NoError(err) @@ -139,7 +138,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { // Test cases where the block producer is not the transactor for the mempool txns { - pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), NewPublicKey(m1PubBytes), blsPubKey) + pbp := NewPosBlockProducer(mempool, params, NewPublicKey(m1PubBytes), blsPubKey, time.Now().UnixNano()) txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions( NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 50000) require.NoError(err) @@ -198,7 +197,7 @@ func TestGetBlockTransactions(t *testing.T) { _wrappedPosMempoolAddTransaction(t, mempool, txn) } - pbp := NewPosBlockProducer(mempool, params, chainlib.NewMedianTime(), NewPublicKey(m1PubBytes), nil) + pbp := NewPosBlockProducer(mempool, params, NewPublicKey(m1PubBytes), nil, time.Now().UnixNano()) _testProduceBlockNoSizeLimit(t, mempool, pbp, latestBlockView, 3, len(passingTxns), 0, 0) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 760250852..a43f6517b 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2783,7 +2783,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { require.True(t, mempool.IsRunning()) priv := _generateRandomBLSPrivateKey(t) m0Pk := NewPublicKey(m0PubBytes) - posBlockProducer := NewPosBlockProducer(mempool, params, chain.timeSource, m0Pk, priv.PublicKey()) + posBlockProducer := NewPosBlockProducer(mempool, params, m0Pk, priv.PublicKey(), time.Now().UnixNano()) // TODO: do we need to update the encoder migration stuff for global params. Probably. testMeta.mempool = nil testMeta.posMempool = mempool diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 86e18adde..5948b8b51 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -613,6 +613,12 @@ func (cc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( // Get the parent block's hash parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) + // Fetch the parent block + parentBlock, parentBlockExists := cc.blockchain.blockIndexByHash[*parentBlockHash] + if !parentBlockExists { + return nil, errors.Errorf("Error fetching parent block: %v", parentBlockHash) + } + // Build a UtxoView at the parent block utxoViewAtParent, err := cc.blockchain.getUtxoViewAtBlockHash(*parentBlockHash) if err != nil { @@ -621,7 +627,7 @@ func (cc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( } // Dynamically create a new block producer at the current block height - blockProducer, err := cc.createBlockProducer(utxoViewAtParent) + blockProducer, err := cc.createBlockProducer(utxoViewAtParent, parentBlock.Header.TstampNanoSecs) if err != nil { return nil, errors.Errorf("Error creating block producer: %v", err) } @@ -745,7 +751,7 @@ func (cc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgD return blocksWithValidatorLists, nil } -func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView) (*PosBlockProducer, error) { +func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView, previousBlockTimestampNanoSecs int64) (*PosBlockProducer, error) { blockProducerBlsPublicKey := fc.signer.GetPublicKey() blockProducerValidatorEntry, err := bav.GetCurrentSnapshotValidatorBLSPublicKeyPKIDPairEntry(blockProducerBlsPublicKey) if err != nil { @@ -759,9 +765,9 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView) (*PosBlockPr blockProducer := NewPosBlockProducer( fc.mempool, fc.params, - fc.blockchain.timeSource, blockProducerPublicKey, blockProducerBlsPublicKey, + previousBlockTimestampNanoSecs, ) return blockProducer, nil } From cb0fdbf0a5a6c8ae9fe060542c393df60e06ea95 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 5 Feb 2024 18:33:59 -0500 Subject: [PATCH 469/762] Minor Comment Cleanup (#1015) --- lib/pos_consensus.go | 2 +- lib/pos_constants.go | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 5948b8b51..84665e664 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -66,7 +66,7 @@ func (cc *FastHotStuffConsensus) Start() error { tipHeight := tipBlock.Header.Height // If the chain is not at the final PoW block height or higher, then we cannot start the PoS consensus. - if cc.params.IsPoWBlockHeight(tipHeight) && !cc.params.IsFinalPoWBlockHeight(tipHeight) { + if tipHeight < cc.params.GetFinalPoWBlockHeight() { return errors.Errorf( "FastHotStuffConsensus.Start: Block tip %d is not at the final PoW block height", tipBlock.Height, ) diff --git a/lib/pos_constants.go b/lib/pos_constants.go index a387673f0..3a19e8caf 100644 --- a/lib/pos_constants.go +++ b/lib/pos_constants.go @@ -10,7 +10,13 @@ import ( // The Proof of Work -> Proof of Stake cutover requires a synthetic QC to protect against a timeout // during the exact point of the cutover. This synthetic QC is built and signed locally by every node -// using a known and consistent private key. +// using a known and hard-coded private key. +// +// We need a synthetic QC for the final PoW block in case we have a timeout during the first 1-2 block +// heights of the PoS chain. If we have such timeouts, validators need to be able to send a high QC +// for the final PoW block. To guarantee that validators can do this, we have a synthetic QC that +// can only be built and signed by this hard-coded private key. The synthetic QC can only be applied +// to the final PoW block. Every validator has access to this private key and can build this QC. const proofOfStakeCutoverValidatorBLSPrivateKeyHex = "0x0570b78ce822f902b203ee075a7e2147d6b9a420a9409c038154589de64eec96" func BuildProofOfStakeCutoverValidatorBLSPrivateKey() (*bls.PrivateKey, error) { From 25722abdfa9d2c8e77cd59661a70d780589b2ac7 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 7 Feb 2024 07:00:27 -0800 Subject: [PATCH 470/762] PoS SHA3 Random Seed (#1017) * PoS SHA3 Random Seed * Fix Test --- lib/pos_blockchain_test.go | 3 ++- lib/pos_random_seed.go | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index a43f6517b..6f0187c93 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -5,6 +5,7 @@ package lib import ( "bytes" "fmt" + "golang.org/x/crypto/sha3" "math" "math/rand" "testing" @@ -2521,7 +2522,7 @@ func getLeaderForBlockHeightAndView(testMeta *TestMeta, blockHeight uint64, view func getRandomSeedSignature(testMeta *TestMeta, height uint64, view uint64, prevRandomSeedSignature *bls.Signature) *bls.Signature { leaderPublicKey, _ := getLeaderForBlockHeightAndView(testMeta, height, view) leaderBLSPrivKey := testMeta.pubKeyToBLSKeyMap[leaderPublicKey] - prevRandomSeedHashSHA256 := sha256.Sum256(prevRandomSeedSignature.ToBytes()) + prevRandomSeedHashSHA256 := sha3.Sum256(prevRandomSeedSignature.ToBytes()) newRandomSeedSignature, err := leaderBLSPrivKey.Sign(prevRandomSeedHashSHA256[:]) require.NoError(testMeta.t, err) return newRandomSeedSignature diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index fab607fbd..2d69340a2 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -2,7 +2,7 @@ package lib import ( "bytes" - "crypto/sha256" + "golang.org/x/crypto/sha3" "io" "github.com/deso-protocol/core/bls" @@ -114,7 +114,7 @@ func verifySignatureOnRandomSeedHash( func HashRandomSeedSignature(randomSeedSignature *bls.Signature) (*RandomSeedHash, error) { // This function takes in a random seed signature and computes the random seed hash for it // Convert the RandomSeedSignature to a RandomSeedHash. - randomSeedSHA256 := sha256.Sum256(randomSeedSignature.ToBytes()) + randomSeedSHA256 := sha3.Sum256(randomSeedSignature.ToBytes()) newRandomSeedHash, err := (&RandomSeedHash{}).FromBytes(randomSeedSHA256[:]) if err != nil { return nil, errors.Wrapf(err, "hashRandomSeedSignature: problem hashing RandomSeedSignature: ") From d45ab17c0645b136a05a78093c9509c9a9fdb4fe Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 7 Feb 2024 14:20:38 -0500 Subject: [PATCH 471/762] Fix Txn Fee Validation in UtxoView._connectFailingTransaction (#1018) * Fix Txn Fee Validation in UtxoView._connectFailingTransaction * Cleanup --- lib/block_view.go | 30 ++++++-- lib/block_view_test.go | 153 +++++++++++++++++++++++++++++------------ 2 files changed, 135 insertions(+), 48 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index b90980863..39e33f628 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3923,10 +3923,14 @@ func (bav *UtxoView) _connectFailingTransaction(txn *MsgDeSoTxn, blockHeight uin failingTransactionRate := uint256.NewInt().SetUint64(gp.FailingTransactionBMFMultiplierBasisPoints) failingTransactionFee := uint256.NewInt().SetUint64(txn.TxnFeeNanos) - basisPointsAsUint256 := uint256.NewInt().SetUint64(10000) + basisPointsAsUint256 := uint256.NewInt().SetUint64(MaxBasisPoints) - effectiveFeeU256 := failingTransactionRate.Mul(failingTransactionRate, failingTransactionFee) + effectiveFeeU256 := uint256.NewInt() + if effectiveFeeU256.MulOverflow(failingTransactionRate, failingTransactionFee) { + return nil, 0, 0, fmt.Errorf("_connectFailingTransaction: Problem computing effective fee") + } effectiveFeeU256.Div(effectiveFeeU256, basisPointsAsUint256) + // We should never overflow on the effective fee, since FailingTransactionBMFMultiplierBasisPoints is <= 10000. // But if for some magical reason we do, we set the effective fee to the max uint64. We don't error, and // instead let _spendBalance handle the overflow. @@ -3934,10 +3938,26 @@ func (bav *UtxoView) _connectFailingTransaction(txn *MsgDeSoTxn, blockHeight uin effectiveFeeU256.SetUint64(math.MaxUint64) } effectiveFee := effectiveFeeU256.Uint64() - // If the effective fee is less than the minimum network fee, we set it to the minimum network fee. - if effectiveFee < gp.MinimumNetworkFeeNanosPerKB { - effectiveFee = gp.MinimumNetworkFeeNanosPerKB + + // Serialize the transaction to bytes so we can compute its size. + txnBytes, err := txn.ToBytes(false) + if err != nil { + return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: Problem serializing transaction: ") } + txnSizeBytes := uint64(len(txnBytes)) + + // If the effective fee rate per KB is less than the minimum network fee rate per KB, we set it to the minimum + // network fee rate per KB. We multiply by 1000 and divide by the txn bytes to convert the txn's total effective + // fee to a fee rate per KB. + // + // The effectiveFee * 1000 computation is guaranteed to not overflow because an overflow check is already + // performed in ValidateDeSoTxnSanityBalanceModel above. + effectiveFeeRateNanosPerKB := (effectiveFee * 1000) / txnSizeBytes + if effectiveFeeRateNanosPerKB < gp.MinimumNetworkFeeNanosPerKB { + // The minimum effective fee for the txn is the txn size * the minimum network fee rate per KB. + effectiveFee = (gp.MinimumNetworkFeeNanosPerKB * txnSizeBytes) / 1000 + } + burnFee, utilityFee := computeBMF(effectiveFee) var utxoOps []*UtxoOperation diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 1bfe3d3f1..ee52947bf 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -4,13 +4,14 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/deso-protocol/core/bls" "math" _ "net/http/pprof" "reflect" "sort" "testing" + "github.com/deso-protocol/core/bls" + "math/rand" "github.com/btcsuite/btcd/btcec" @@ -2243,19 +2244,8 @@ func TestConnectFailingTransaction(t *testing.T) { senderPrivString, 200000, 11) blockHeight := chain.BlockTip().Height + 1 - blockView, err := NewUtxoView(db, params, nil, nil, chain.eventManager) - require.NoError(err) - txn1 := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) - utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn1, blockHeight, true) - require.NoError(err) - require.Equal(1, len(utxoOps)) - expectedBurnFee, expectedUtilityFee := _getBMFForTxn(txn1, globalParams) - require.Equal(expectedBurnFee, burnFee) - require.Equal(expectedUtilityFee, utilityFee) - - err = blockView.FlushToDb(uint64(blockHeight)) - // Also test updating the global params for FailingTransactionBMFMultiplierBasisPoints and FeeBucketGrowthRateBasisPoints. + // Set up the test meta. testMeta := &TestMeta{ t: t, chain: chain, @@ -2268,42 +2258,119 @@ func TestConnectFailingTransaction(t *testing.T) { } // Allow m0 to update global params. params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(m0PubBytes)] = true + + // Test failing txn with default global params { - // Set FailingTransactionBMFMultiplierBasisPoints=7000 or 70%. - _updateGlobalParamsEntryWithExtraData( - testMeta, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - map[string][]byte{FailingTransactionBMFMultiplierBasisPointsKey: UintToBuf(7000)}, - ) + blockView, err := NewUtxoView(db, params, nil, nil, chain.eventManager) + require.NoError(err) + txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) + utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) + require.NoError(err) + require.Equal(1, len(utxoOps)) + expectedBurnFee, expectedUtilityFee := _getBMFForTxn(txn, globalParams) + require.Equal(expectedBurnFee, burnFee) + require.Equal(expectedUtilityFee, utilityFee) + + err = blockView.FlushToDb(uint64(blockHeight)) + require.NoError(err) } + + // Test case where the failing txn fee rate is applied as expected. { - // Set FeeBucketGrowthRateBasisPoints=7000 or 70%. - _updateGlobalParamsEntryWithExtraData( - testMeta, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - map[string][]byte{FeeBucketGrowthRateBasisPointsKey: UintToBuf(7000)}, - ) + + { + // Set FailingTransactionBMFMultiplierBasisPoints=7000 or 70%. + _updateGlobalParamsEntryWithExtraData( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + map[string][]byte{FailingTransactionBMFMultiplierBasisPointsKey: UintToBuf(7000)}, + ) + } + blockView, err := NewUtxoView(db, params, nil, nil, chain.eventManager) + require.NoError(err) + + newParams := blockView.GetCurrentGlobalParamsEntry() + require.Equal(uint64(7000), newParams.FailingTransactionBMFMultiplierBasisPoints) + + startingBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) + require.NoError(err) + + // Try connecting another failing transaction, and make sure the burn and utility fees are computed accurately. + txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) + + utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) + require.NoError(err) + require.Equal(1, len(utxoOps)) + + // The final balance is m0's starting balance minus the failing txn fee paid. + finalBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) + require.NoError(err) + + // Recompute the failing txn fee, which is expected to use the minimum network fee rate because + // the failing txn fee rate is too low on its own. + expectedFailingTxnFee := txn.TxnFeeNanos * newParams.FailingTransactionBMFMultiplierBasisPoints / MaxBasisPoints + require.Equal(startingBalance, finalBalance+expectedFailingTxnFee) + + expectedBurnFee, expectedUtilityFee := _getBMFForTxn(txn, newParams) + require.Equal(expectedBurnFee, burnFee) + require.Equal(expectedUtilityFee, utilityFee) + + err = blockView.FlushToDb(uint64(blockHeight)) + require.NoError(err) } - blockView, err = NewUtxoView(db, params, nil, nil, chain.eventManager) - require.NoError(err) - newParams := blockView.GetCurrentGlobalParamsEntry() - require.Equal(uint64(7000), newParams.FailingTransactionBMFMultiplierBasisPoints) - require.Equal(uint64(7000), newParams.FeeBucketGrowthRateBasisPoints) - // Try connecting another failing transaction, and make sure the burn and utility fees are computed accurately. - txn2 := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) - utxoOps, burnFee, utilityFee, err = blockView._connectFailingTransaction(txn2, blockHeight, true) - require.NoError(err) - require.Equal(1, len(utxoOps)) - expectedBurnFee, expectedUtilityFee = _getBMFForTxn(txn2, newParams) - require.Equal(expectedBurnFee, burnFee) - require.Equal(expectedUtilityFee, utilityFee) + // Test case where the failing txn fee rate is too low and replaced by the minimum network fee. + { + { + // Set FailingTransactionBMFMultiplierBasisPoints=1 or 0.01%. + _updateGlobalParamsEntryWithExtraData( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + map[string][]byte{FailingTransactionBMFMultiplierBasisPointsKey: UintToBuf(1)}, + ) + } + + // Set the txn fee to ~1000 nanos, which guarantees that the effective failing txn fee rate is too low. + feeMin := uint64(1000) + feeMax := uint64(1001) + + blockView, err := NewUtxoView(db, params, nil, nil, chain.eventManager) + require.NoError(err) - err = blockView.FlushToDb(uint64(blockHeight)) + newParams := blockView.GetCurrentGlobalParamsEntry() + require.Equal(uint64(1), newParams.FailingTransactionBMFMultiplierBasisPoints) + + startingBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) + require.NoError(err) + + txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) + utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) + require.NoError(err) + require.Equal(1, len(utxoOps)) + + // The final balance is m0's starting balance minus the failing txn fee paid. + finalBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) + require.NoError(err) + + txnBytes, err := txn.ToBytes(false) + require.NoError(err) + + // Recompute the failing txn fee, which is expected to use the minimum network fee rate because + // the failing txn fee rate is too low on its own. + expectedFailingTxnFee := uint64(len(txnBytes)) * newParams.MinimumNetworkFeeNanosPerKB / 1000 + require.Equal(startingBalance, finalBalance+expectedFailingTxnFee) + + expectedBurnFee, expectedUtilityFee := computeBMF(expectedFailingTxnFee) + require.Equal(expectedBurnFee, burnFee) + require.Equal(expectedUtilityFee, utilityFee) + + err = blockView.FlushToDb(uint64(blockHeight)) + require.NoError(err) + } } func _getBMFForTxn(txn *MsgDeSoTxn, gp *GlobalParamsEntry) (_burnFee uint64, _utilityFee uint64) { From 6b98b0f8b1d57749e49d1abca21807e916eb3dd6 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 8 Feb 2024 16:42:52 -0500 Subject: [PATCH 472/762] Mempool Tweaks To Fix Backend CI (#1019) --- lib/pos_transaction_register.go | 6 +++--- lib/server.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index b1f77d3a8..8a89fc8db 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -50,7 +50,7 @@ func NewTransactionRegister() *TransactionRegister { totalTxnsSizeBytes: 0, // Set default values for the uninitialized fields. This is safe because any transactions // added to the register will be re-bucketed once the params are updated. - minimumNetworkFeeNanosPerKB: minimumNetworkFeeNanosPerKB, // Default to 1000 nanos per KB + minimumNetworkFeeNanosPerKB: minimumNetworkFeeNanosPerKB, // Default to 100 nanos per KB feeBucketGrowthRateBasisPoints: feeBucketMultiplier, // Default to 10% } } @@ -697,7 +697,7 @@ func _isValidMinimumFeeAndMultiplier(minimumNetworkFeeNanosPerKB *big.Float, fee } func _getFallbackSafeMinimumFeeAndMultiplier() (*big.Float, *big.Float) { - minimumNetworkFeeNanosPerKB := big.NewFloat(1000) // Default to 1000 nanos per KB - feeBucketMultiplier := big.NewFloat(1000) // Default to 10% + minimumNetworkFeeNanosPerKB := big.NewFloat(100) // Default to 100 nanos per KB + feeBucketMultiplier := big.NewFloat(1000) // Default to 10% return minimumNetworkFeeNanosPerKB, feeBucketMultiplier } diff --git a/lib/server.go b/lib/server.go index 23a821fa5..dc5231f55 100644 --- a/lib/server.go +++ b/lib/server.go @@ -524,7 +524,7 @@ func NewServer( currentUtxoView, uint64(_chain.blockTip().Height), _mempoolDumpDir, - false, + _mempoolDumpDir == "", // If no mempool dump dir is set, then the mempool will be in memory only _maxMempoolPosSizeBytes, _mempoolBackupIntervalMillis, _mempoolFeeEstimatorNumMempoolBlocks, From 6bd9ec82389514496d34af35bca132510c44a29b Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 12 Feb 2024 16:27:30 -0500 Subject: [PATCH 473/762] add core_state flag to all relevant (non-snapshot) db prefixes (#1021) --- lib/db_utils.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index 13a28e2aa..9e6c87c0c 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -493,7 +493,7 @@ type DBPrefixes struct { // PrefixValidatorByPKID: Retrieve a validator by PKID. // Prefix, -> ValidatorEntry - PrefixValidatorByPKID []byte `prefix_id:"[80]" is_state:"true"` + PrefixValidatorByPKID []byte `prefix_id:"[80]" is_state:"true" core_state:"true"` // PrefixValidatorByStatusAndStakeAmount: Retrieve the top N active validators by stake. // Prefix, , , -> nil @@ -502,7 +502,7 @@ type DBPrefixes struct { // PrefixStakeByValidatorAndStaker: Retrieve a StakeEntry. // Prefix, , -> StakeEntry - PrefixStakeByValidatorAndStaker []byte `prefix_id:"[82]" is_state:"true"` + PrefixStakeByValidatorAndStaker []byte `prefix_id:"[82]" is_state:"true" core_state:"true"` // PrefixStakeByStakeAmount: Retrieve the top N stake entries by stake amount. // Prefix, , , -> nil @@ -533,11 +533,11 @@ type DBPrefixes struct { // (CurrentEpoch - LockedAtEpochNumber) = 133 - 123 = 10, which is greater than // cooldown=3. Thus the UnlockStake will succeed, which will result in the // LockedStakeEntry being deleted and 25 DESO being added to the user's balance. - PrefixLockedStakeByValidatorAndStakerAndLockedAt []byte `prefix_id:"[84]" is_state:"true"` + PrefixLockedStakeByValidatorAndStakerAndLockedAt []byte `prefix_id:"[84]" is_state:"true" core_state:"true"` // PrefixCurrentEpoch: Retrieve the current EpochEntry. // Prefix -> EpochEntry - PrefixCurrentEpoch []byte `prefix_id:"[85]" is_state:"true"` + PrefixCurrentEpoch []byte `prefix_id:"[85]" is_state:"true" core_state:"true"` // PrefixCurrentRandomSeedHash: Retrieve the current RandomSeedHash. // Prefix -> . @@ -579,18 +579,18 @@ type DBPrefixes struct { // among the vested and unvested locked balance entries without separate indexes. // Prefix, , , , // , -> - PrefixLockedBalanceEntry []byte `prefix_id:"[93]" is_state:"true"` + PrefixLockedBalanceEntry []byte `prefix_id:"[93]" is_state:"true" core_state:"true"` // PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs: // Retrieves a LockupYieldCurvePoint. // The structure of the key enables quick lookups for a (ProfilePKID, Duration) pair as well // as quick construction of yield curve plots over time. // Prefix, , -> - PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs []byte `prefix_id:"[94]" is_state:"true"` + PrefixLockupYieldCurvePointByProfilePKIDAndDurationNanoSecs []byte `prefix_id:"[94]" is_state:"true" core_state:"true"` // PrefixValidatorBLSPublicKeyPKIDPairEntry: Retrieve a BLSPublicKeyPKIDPairEntry by BLS public key. // Prefix, -> *BLSPublicKeyPKIDPairEntry - PrefixValidatorBLSPublicKeyPKIDPairEntry []byte `prefix_id:"[95]" is_state:"true"` + PrefixValidatorBLSPublicKeyPKIDPairEntry []byte `prefix_id:"[95]" is_state:"true" core_state:"true"` // PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry: Retrieve a snapshotted BLSPublicKeyPKIDPairEntry // by BLS Public Key and SnapshotAtEpochNumber. From 2a006ddb3f38e6b261f4162497ccca55ae79efb2 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 12 Feb 2024 20:25:22 -0500 Subject: [PATCH 474/762] Add StateChangeMetadata to utxo operations for staking rewards (#1022) --- lib/block_view.go | 14 ++++++++-- lib/block_view_types.go | 15 ++++++---- lib/pos_staking_rewards.go | 34 +++++++++++++++++++---- lib/state_change_metadata.go | 53 ++++++++++++++++++++++++++++++++++++ 4 files changed, 103 insertions(+), 13 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 4d75fd0b2..641adf357 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -1001,6 +1001,16 @@ func (bav *UtxoView) _addBalance(amountNanos uint64, balancePublicKey []byte, }, nil } +func (bav *UtxoView) _addBalanceForStakeReward(amountNanos uint64, balancePublicKey []byte, +) (*UtxoOperation, error) { + utxoOp, err := bav._addBalance(amountNanos, balancePublicKey) + if err != nil { + return nil, errors.Wrapf(err, "_addBalanceForStakeReward: ") + } + utxoOp.Type = OperationTypeStakeDistributionPayToBalance + return utxoOp, nil +} + func (bav *UtxoView) _addDESO(amountNanos uint64, publicKey []byte, utxoEntry *UtxoEntry, blockHeight uint32, ) (*UtxoOperation, error) { if blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight { @@ -1659,7 +1669,7 @@ func (bav *UtxoView) DisconnectBlock( for _, nonceEntry := range utxoOp.PrevNonceEntries { bav.SetTransactorNonceEntry(nonceEntry) } - case OperationTypeAddBalance: + case OperationTypeStakeDistributionPayToBalance: // We don't allow add balance utxo operations unless it's the end of an epoch. if !isLastBlockInEpoch { return fmt.Errorf("DisconnectBlock: Found add balance operation in block %d that is not the end "+ @@ -1669,7 +1679,7 @@ func (bav *UtxoView) DisconnectBlock( if err = bav._unAddBalance(utxoOp.BalanceAmountNanos, utxoOp.BalancePublicKey); err != nil { return errors.Wrapf(err, "DisconnectBlock: Problem unAdding balance %v: ", utxoOp.BalanceAmountNanos) } - case OperationTypeStakeDistribution: + case OperationTypeStakeDistributionRestake: // We don't allow stake distribution utxo operations unless it's the end of an epoch. if !isLastBlockInEpoch { return fmt.Errorf("DisconnectBlock: Found add balance operation in block %d that is not the end "+ diff --git a/lib/block_view_types.go b/lib/block_view_types.go index d93dd2aac..aee4cbc2d 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -668,9 +668,10 @@ const ( OperationTypeCoinLockupTransfer OperationType = 46 OperationTypeCoinUnlock OperationType = 47 OperationTypeUpdateCoinLockupParams OperationType = 48 - OperationTypeStakeDistribution OperationType = 49 - OperationTypeSetValidatorLastActiveAtEpoch OperationType = 50 - // NEXT_TAG = 51 + OperationTypeStakeDistributionRestake OperationType = 49 + OperationTypeStakeDistributionPayToBalance OperationType = 50 + OperationTypeSetValidatorLastActiveAtEpoch OperationType = 51 + // NEXT_TAG = 52 ) func (op OperationType) String() string { @@ -771,8 +772,12 @@ func (op OperationType) String() string { return "OperationTypeCoinLockupTransfer" case OperationTypeCoinUnlock: return "OperationTypeCoinUnlock" - case OperationTypeStakeDistribution: - return "OperationTypeStakeDistribution" + case OperationTypeStakeDistributionRestake: + return "OperationTypeStakeDistributionRestake" + case OperationTypeSetValidatorLastActiveAtEpoch: + return "OperationTypeSetValidatorLastActiveAtEpoch" + case OperationTypeStakeDistributionPayToBalance: + return "OperationTypeStakeDistributionPayToBalance" } return "OperationTypeUNKNOWN" } diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index 4492f7568..40f2083ce 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -87,7 +87,9 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64 // Reward the staker their portion of the staking reward. if stakerRewardNanos > 0 { var utxoOperation *UtxoOperation - if utxoOperation, err = bav.distributeStakingReward(snapshotStakeEntry.ValidatorPKID, snapshotStakeEntry.StakerPKID, stakerRewardNanos); err != nil { + if utxoOperation, err = bav.distributeStakingReward( + snapshotStakeEntry.ValidatorPKID, snapshotStakeEntry.StakerPKID, stakerRewardNanos, false, + ); err != nil { return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem distributing staker reward: ") } utxoOperations = append(utxoOperations, utxoOperation) @@ -96,7 +98,8 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64 // Reward the validator their commission from the staking reward. if validatorCommissionNanos > 0 { var utxoOperation *UtxoOperation - if utxoOperation, err = bav.distributeValidatorCommission(snapshotStakeEntry.ValidatorPKID, validatorCommissionNanos); err != nil { + if utxoOperation, err = bav.distributeValidatorCommission( + snapshotStakeEntry.ValidatorPKID, validatorCommissionNanos); err != nil { return nil, errors.Wrapf(err, "DistributeStakingRewardsToSnapshotStakes: problem distributing validator commission reward: ") } utxoOperations = append(utxoOperations, utxoOperation) @@ -182,7 +185,12 @@ func (bav *UtxoView) computeStakerRewardAndValidatorCommission( return stakerRewardNanos.Uint64(), validatorCommissionNanos.Uint64(), nil } -func (bav *UtxoView) distributeStakingReward(validatorPKID *PKID, stakerPKID *PKID, rewardNanos uint64) (*UtxoOperation, error) { +func (bav *UtxoView) distributeStakingReward( + validatorPKID *PKID, + stakerPKID *PKID, + rewardNanos uint64, + isValidatorCommission bool, +) (*UtxoOperation, error) { // Fetch the staker's latest StakeEntry. stakeEntry, err := bav.GetStakeEntry(validatorPKID, stakerPKID) if err != nil { @@ -202,10 +210,17 @@ func (bav *UtxoView) distributeStakingReward(validatorPKID *PKID, stakerPKID *PK return nil, errors.Wrapf(err, "distributeStakingReward: problem fetching validator entry: ") } utxoOperation = &UtxoOperation{ - Type: OperationTypeStakeDistribution, + Type: OperationTypeStakeDistributionRestake, PrevStakeEntries: []*StakeEntry{stakeEntry.Copy()}, PrevValidatorEntry: validatorEntry.Copy(), StakeAmountNanosDiff: rewardNanos, + StateChangeMetadata: &StakeRewardStateChangeMetadata{ + ValidatorPKID: validatorPKID, + StakerPKID: stakerPKID, + RewardNanos: rewardNanos, + StakingRewardMethod: StakingRewardMethodRestake, + IsValidatorCommission: isValidatorCommission, + }, } stakeEntry.StakeAmountNanos = uint256.NewInt().Add(stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(rewardNanos)) bav._setStakeEntryMappings(stakeEntry) @@ -219,9 +234,16 @@ func (bav *UtxoView) distributeStakingReward(validatorPKID *PKID, stakerPKID *PK // the rewards directly to the staker's wallet. stakerPublicKey := bav.GetPublicKeyForPKID(stakerPKID) - if utxoOperation, err = bav._addBalance(rewardNanos, stakerPublicKey); err != nil { + if utxoOperation, err = bav._addBalanceForStakeReward(rewardNanos, stakerPublicKey); err != nil { return nil, errors.Wrapf(err, "distributeStakingReward: problem adding rewards to staker's DESO balance: ") } + utxoOperation.StateChangeMetadata = &StakeRewardStateChangeMetadata{ + ValidatorPKID: validatorPKID, + StakerPKID: stakerPKID, + RewardNanos: rewardNanos, + StakingRewardMethod: StakingRewardMethodPayToBalance, + IsValidatorCommission: isValidatorCommission, + } return utxoOperation, nil } @@ -247,7 +269,7 @@ func (bav *UtxoView) distributeValidatorCommission(validatorPKID *PKID, commissi // they can stake to themselves using a separate wallet and only enable reward restaking for that StakeEntry. // // If the above isn't desired the behavior, then we can alternatively always pay out validator's commission directly to their wallet. - return bav.distributeStakingReward(validatorPKID, validatorPKID, commissionNanos) + return bav.distributeStakingReward(validatorPKID, validatorPKID, commissionNanos, true) } var ( diff --git a/lib/state_change_metadata.go b/lib/state_change_metadata.go index 84622715b..3fb11057a 100644 --- a/lib/state_change_metadata.go +++ b/lib/state_change_metadata.go @@ -21,6 +21,7 @@ const ( EncoderTypeDeleteUserAssociationStateChangeMetadata EncoderType = 2000012 EncoderTypeCreatePostAssociationStateChangeMetadata EncoderType = 2000013 EncoderTypeDeletePostAssociationStateChangeMetadata EncoderType = 2000014 + EncoderTypeStakeRewardStateChangeMetadata EncoderType = 2000015 ) func GetStateChangeMetadataFromOpType(opType OperationType) DeSoEncoder { @@ -55,6 +56,8 @@ func GetStateChangeMetadataFromOpType(opType OperationType) DeSoEncoder { return &CreatePostAssociationStateChangeMetadata{} case OperationTypeDeletePostAssociation: return &DeletePostAssociationStateChangeMetadata{} + case OperationTypeStakeDistributionRestake, OperationTypeStakeDistributionPayToBalance: + return &StakeRewardStateChangeMetadata{} default: return nil } @@ -542,3 +545,53 @@ func (deletePostAssociationSCM *DeletePostAssociationStateChangeMetadata) GetVer func (deletePostAssociationSCM *DeletePostAssociationStateChangeMetadata) GetEncoderType() EncoderType { return EncoderTypeDeletePostAssociationStateChangeMetadata } + +type StakeRewardStateChangeMetadata struct { + ValidatorPKID *PKID + StakerPKID *PKID + RewardNanos uint64 + StakingRewardMethod StakingRewardMethod + IsValidatorCommission bool +} + +func (stakeRewardSCM *StakeRewardStateChangeMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeToBytes(blockHeight, stakeRewardSCM.ValidatorPKID, skipMetadata...)...) + data = append(data, EncodeToBytes(blockHeight, stakeRewardSCM.StakerPKID, skipMetadata...)...) + data = append(data, UintToBuf(stakeRewardSCM.RewardNanos)...) + data = append(data, UintToBuf(uint64(stakeRewardSCM.StakingRewardMethod))...) + data = append(data, BoolToByte(stakeRewardSCM.IsValidatorCommission)) + return data +} + +func (stakeRewardSCM *StakeRewardStateChangeMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + if stakeRewardSCM.ValidatorPKID, err = DecodeDeSoEncoder(&PKID{}, rr); err != nil { + return errors.Wrapf(err, "StakeRewardStateChangeMetadata.Decode: Problem reading ValidatorPKID") + } + if stakeRewardSCM.StakerPKID, err = DecodeDeSoEncoder(&PKID{}, rr); err != nil { + return errors.Wrapf(err, "StakeRewardStateChangeMetadata.Decode: Problem reading StakerPKID") + } + stakeRewardSCM.RewardNanos, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "StakeRewardStateChangeMetadata.Decode: Problem reading RewardNanos") + } + stakingRewardMethod, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "StakeRewardStateChangeMetadata.Decode: Problem reading StakingRewardMethod") + } + stakeRewardSCM.StakingRewardMethod = StakingRewardMethod(stakingRewardMethod) + stakeRewardSCM.IsValidatorCommission, err = ReadBoolByte(rr) + if err != nil { + return errors.Wrapf(err, "StakeRewardStateChangeMetadata.Decode: Problem reading IsValidatorCommission") + } + return nil +} + +func (stakeRewardSCM *StakeRewardStateChangeMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (stakeRewardSCM *StakeRewardStateChangeMetadata) GetEncoderType() EncoderType { + return EncoderTypeStakeRewardStateChangeMetadata +} From 3d46bc9fca95140f62848a164e38bf33eff6f953 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 13 Feb 2024 12:15:35 -0500 Subject: [PATCH 475/762] Add call to signal state syncer event manager when committing blocks in PoS (#1023) --- lib/pos_blockchain.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index d7c7b464f..265e1b04b 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -2,6 +2,7 @@ package lib import ( "fmt" + "github.com/google/uuid" "math" "time" @@ -1639,6 +1640,15 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash) error { UtxoView: utxoView, UtxoOps: utxoOpsForBlock, }) + // TODO: check w/ Z if this is right.... + // Signal the state syncer that we've flushed to the DB so state syncer + // will pick up the latest changes after committing this block. + if !bc.eventManager.isMempoolManager { + bc.eventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ + FlushId: uuid.Nil, + Succeeded: true, + }) + } } // TODO: What else do we need to do in here? return nil From 33aa5788b041c4271775961e0a167ee984694ccf Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 13 Feb 2024 12:16:33 -0500 Subject: [PATCH 476/762] Add nonce to BlockRewardMetadata in pos block producer, update pos regtest conf (#1024) --- lib/block_view_types.go | 3 +-- lib/constants.go | 1 + lib/pos_block_producer.go | 9 ++++++++- lib/pos_server_regtest.go | 4 ++-- 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index aee4cbc2d..59d81cd3f 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -12,12 +12,11 @@ import ( "sort" "strings" - "github.com/spf13/viper" - "github.com/btcsuite/btcd/btcec" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" + "github.com/spf13/viper" ) type UtxoType uint8 diff --git a/lib/constants.go b/lib/constants.go index 1c88f2b8c..b0f9d86e9 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -813,6 +813,7 @@ func (params *DeSoParams) EnableRegtest() { params.ForkHeights = RegtestForkHeights params.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) params.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + params.DefaultStakingRewardsAPYBasisPoints = 10 * 100 // 10% for regtest } func (params *DeSoParams) IsPoWBlockHeight(blockHeight uint64) bool { diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 57bc129b1..61aaac55b 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -1,6 +1,7 @@ package lib import ( + "github.com/btcsuite/btcd/wire" "math" "time" @@ -125,7 +126,13 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( blockRewardOutput.AmountNanos = math.MaxUint64 blockRewardOutput.PublicKey = pbp.proposerPublicKey.ToBytes() blockRewardTxn.TxOutputs = append(blockRewardTxn.TxOutputs, blockRewardOutput) - blockRewardTxn.TxnMeta = &BlockRewardMetadataa{} + extraNonce, err := wire.RandomUint64() + if err != nil { + return nil, errors.Wrapf(err, "Error generating random nonce: ") + } + blockRewardTxn.TxnMeta = &BlockRewardMetadataa{ + ExtraData: UintToBuf(extraNonce), + } blockRewardTxnSizeBytes, err := blockRewardTxn.ToBytes(true) if err != nil { return nil, errors.Wrapf(err, "Error computing block reward txn size: ") diff --git a/lib/pos_server_regtest.go b/lib/pos_server_regtest.go index 8fc8738f1..e4a339416 100644 --- a/lib/pos_server_regtest.go +++ b/lib/pos_server_regtest.go @@ -6,7 +6,7 @@ import ( ) func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { - if block.Header.Height != uint64(srv.blockchain.params.ForkHeights.ProofOfStake1StateSetupBlockHeight+5) { + if block.Header.Height != uint64(srv.blockchain.params.ForkHeights.ProofOfStake1StateSetupBlockHeight+15) { return } @@ -62,7 +62,7 @@ func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { stakeTxnMeta := StakeMetadata{ ValidatorPublicKey: NewPublicKey(transactorPubKey), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt().SetUint64(10), + StakeAmountNanos: uint256.NewInt().SetUint64(10 * 1e9), } stakeTxn, _, _, _, err := srv.blockProducer.chain.CreateStakeTxn( From d9103545511eb658511c979efb4fc837f044a5ab Mon Sep 17 00:00:00 2001 From: Jon Pollock <135658176+poolcoke@users.noreply.github.com> Date: Tue, 13 Feb 2024 13:08:54 -0800 Subject: [PATCH 477/762] _connectCoinLockup transfer restrictions (#1025) * Add TransferRestrictionStatus logic to coin lockup txn. * Transfer restriction tests for coin lockups. * Consolidate repeated logic in CheckLockupTransferRestrictions helper function. --- lib/block_view_dao_coin_test.go | 4 +- lib/block_view_lockups.go | 90 +++++++++++++++++++++++++++------ lib/block_view_lockups_test.go | 83 +++++++++++++++++++++++++++++- lib/errors.go | 1 + 4 files changed, 160 insertions(+), 18 deletions(-) diff --git a/lib/block_view_dao_coin_test.go b/lib/block_view_dao_coin_test.go index 41d8c277b..1050a8b9b 100644 --- a/lib/block_view_dao_coin_test.go +++ b/lib/block_view_dao_coin_test.go @@ -71,7 +71,7 @@ func _daoCoinTxn(t *testing.T, chain *Blockchain, db *badger.DB, require.Equal(OperationTypeDAOCoin, utxoOps[len(utxoOps)-1].Type) - require.NoError(utxoView.FlushToDb(0)) + require.NoError(utxoView.FlushToDb(uint64(blockHeight))) return utxoOps, txn, blockHeight, nil } @@ -151,7 +151,7 @@ func _daoCoinTransferTxn(t *testing.T, chain *Blockchain, db *badger.DB, } require.Equal(OperationTypeDAOCoinTransfer, utxoOps[len(utxoOps)-1].Type) - require.NoError(utxoView.FlushToDb(0)) + require.NoError(utxoView.FlushToDb(uint64(blockHeight))) return utxoOps, txn, blockHeight, nil } diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index e79ccae2b..c2651d428 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -1303,6 +1303,30 @@ func (bav *UtxoView) _connectCoinLockup( } } + // (1.5) Verify transfer restriction statuses as being respected + + // Fetch the transfer restrictions attached to the transfer. + transferRestrictionStatus := profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus + + // Fetch the "sender" (transactor's) PKID entry. + senderPKIDEntry := bav.GetPKIDForPublicKey(txn.PublicKey) + if senderPKIDEntry == nil || senderPKIDEntry.isDeleted { + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupInvalidSenderPKID, "_connectCoinLockup") + } + senderPKID := senderPKIDEntry.PKID + + // Validate transfer restriction rules. + err = bav.CheckLockupTransferRestrictions( + transferRestrictionStatus, + profilePKID, + senderPKID, + hodlerPKID, + lockedBalanceEntry) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectCoinLockup") + } + // (2) Store the previous locked balance entry previousLockedBalanceEntry = lockedBalanceEntry.Copy() @@ -1574,6 +1598,46 @@ func (bav *UtxoView) _connectCoinLockup( return totalInput, totalOutput, utxoOpsForTxn, nil } +// CheckLockupTransferRestrictions is a helper function meant to consolidate the transfer +// checks present in both _connectCoinLockup and _connectCoinLockupTransfer in one place. +// The check takes several expected arguments including the transfer restriction status +// in question (transferRestrictionStatus), the profile being transferred (profilePKID), +// who's initiating the transfer (senderPKID), and who's receiving the transfer (receiverPKID). +// +// In addition, there's a receiverLockedBalanceEntry as there's a context dependent +// check we do where we enable a "DAO" member to be someone who already possesses a +// non-zero unvested locked balance entry with the associated timestamp. +func (bav *UtxoView) CheckLockupTransferRestrictions( + transferRestrictionStatus TransferRestrictionStatus, + profilePKID *PKID, + senderPKID *PKID, + receiverPKID *PKID, + receiverLockedBalanceEntry *LockedBalanceEntry, +) ( + _ruleError error, +) { + // Check if profile owner only transfer restrictions are respected. + if transferRestrictionStatus == TransferRestrictionStatusProfileOwnerOnly && !profilePKID.Eq(senderPKID) { + return RuleErrorCoinLockupTransferRestrictedToProfileOwner + } + + // Check if the DAO member only restrictions are respected. + // Here, a "DAO member" is anyone who holds either unlocked or locked DAO coins associated with the profile. + if transferRestrictionStatus == TransferRestrictionStatusDAOMembersOnly { + // NOTE: It's not possible for the receiverBalanceEntry below to be nil as + // the function will return an empty balance entry with the specified (hodler, profile) + // pair instead of returning nil if there's no entries in the db. + receiverBalanceEntry := + bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(receiverPKID, profilePKID, true) + if receiverBalanceEntry.BalanceNanos.IsZero() && receiverLockedBalanceEntry.BalanceBaseUnits.IsZero() { + return RuleErrorCoinLockupTransferRestrictedToDAOMembers + } + } + + // If we reach here, the lockup transfer is valid. + return nil +} + // SplitVestedLockedBalanceEntry is used for splitting a vested locked balance entry into two pieces. // It is assumed that the startSplitTimestamp lines up with the UnlockTimestampNanoSecs of the lockedBalanceEntry // passed or endSplitTimestampNanoSecs lines up with the VestingEndTimestampNanoSecs of @@ -2283,28 +2347,24 @@ func (bav *UtxoView) _connectCoinLockupTransfer( // Fetch the transfer restrictions attached to the transfer. transferRestrictionStatus := profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus - // Check if transfers are limited to profile owner only. - if transferRestrictionStatus == TransferRestrictionStatusProfileOwnerOnly && !profilePKID.Eq(senderPKID) { + // Validate transfer restriction rules. + err = bav.CheckLockupTransferRestrictions( + transferRestrictionStatus, + profilePKID, + senderPKID, + receiverPKID, + receiverLockedBalanceEntry) + if err != nil { return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupTransferRestrictedToProfileOwner, "_connectCoinLockupTransfer") - } - - // Check if the transfers are limited to DAO members only. - // Here, a "DAO member" is anyone who holds either unlocked or locked DAO coins associated with the profile. - if transferRestrictionStatus == TransferRestrictionStatusDAOMembersOnly { - receiverBalanceEntry := bav._getBalanceEntryForHODLerPKIDAndCreatorPKID(receiverPKID, profilePKID, true) - if receiverBalanceEntry.BalanceNanos.IsZero() && receiverLockedBalanceEntry.BalanceBaseUnits.IsZero() { - return 0, 0, nil, - errors.Wrap(RuleErrorCoinLockupTransferRestrictedToDAOMembers, "_connectCoinLockupTransfer") - } + errors.Wrap(err, "_connectCoinLockupTransfer") } // Add to the recipient's balance entry, checking for overflow. newRecipientBalanceBaseUnits, err := SafeUint256().Add(&receiverLockedBalanceEntry.BalanceBaseUnits, txMeta.LockedCoinsToTransferBaseUnits) if err != nil { - return 0, 0, nil, errors.Wrap(RuleErrorCoinLockupTransferBalanceOverflowAtReceiver, - "_connectCoinLockupTransfer") + return 0, 0, nil, + errors.Wrap(RuleErrorCoinLockupTransferBalanceOverflowAtReceiver, "_connectCoinLockupTransfer") } receiverLockedBalanceEntry.BalanceBaseUnits = *newRecipientBalanceBaseUnits diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 4fc0a5bbe..7680c5dd6 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -310,6 +310,86 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { require.Contains(t, err.Error(), RuleErrorCoinLockupInsufficientCoins) } + // Set m0 locked transfer restrictions to profile owner only and send m1, m2 unlocked m0 tokens. + // We do this to test transfer restriction status on lockups. + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + 0, + 0, + false, + true, + TransferRestrictionStatusProfileOwnerOnly, + ) + _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, DAOCoinTransferMetadata{ + ProfilePublicKey: m0PkBytes, + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), + ReceiverPublicKey: m1PkBytes, + }) + _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, DAOCoinTransferMetadata{ + ProfilePublicKey: m0PkBytes, + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), + ReceiverPublicKey: m2PkBytes, + }) + + // Attempt to perform a m0 lockup where the transactor is not m0. + // (This should fail -- RuleErrorCoinLockupRestrictedToProfileOwner) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m0Pub, m0Pub, + 1000, 1000, uint256.NewInt().SetUint64(1000), 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) + } + + // Update transfer restrictions to DAO members only. + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + 0, + 0, + false, + true, + TransferRestrictionStatusDAOMembersOnly, + ) + + // Attempt to perform a m0 lockup where the receiver m3 is not in the DAO. + // (This should fail -- RuleErrorCoinLockupRestrictedToDAOMembers) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m0Pub, m3Pub, + 1000, 1000, uint256.NewInt().SetUint64(1000), 0) + require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToDAOMembers) + } + + // Try the same m0 lockup but where DAO member m2 is the recipient. + // This should succeed :) + { + _, _, _, err := _coinLockupWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, + m1Pub, m1Priv, m0Pub, m2Pub, + 1000, 1000, uint256.NewInt().SetUint64(1000), 0) + require.NoError(t, err) + } + + // Restore default transfer restrictions. + _updateCoinLockupParamsWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + 0, + 0, + false, + true, + TransferRestrictionStatusUnrestricted, + ) + // NOTE: The only other rule errors for coin lockup txns are related to yield curve overflows. // This is tested separately and more comprehensively in a different test. @@ -3842,7 +3922,8 @@ func _setUpMinerAndTestMetaForTimestampBasedLockupTests(t *testing.T) *TestMeta params.ForkHeights.DAOCoinBlockHeight = uint32(0) // Initialize lockups block height. - params.ForkHeights.LockupsBlockHeight = uint32(1) + params.ForkHeights.LockupsBlockHeight = uint32(11) + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(11) GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) diff --git a/lib/errors.go b/lib/errors.go index b7eea0896..0bae19fbd 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -493,6 +493,7 @@ const ( // Lockups RuleErrorLockupTxnBeforeBlockHeight RuleError = "RuleErrorLockupTxnBeforeBlockHeight" + RuleErrorCoinLockupInvalidSenderPKID RuleError = "RuleErrorCoinLockupInvalidSenderPKID" RuleErrorCoinLockupInvalidRecipientPKID RuleError = "RuleErrorCoinLockupInvalidRecipientPKID" RuleErrorCoinLockupInvalidLockupDuration RuleError = "RuleErrorCoinLockupInvalidLockupDuration" RuleErrorCoinLockupInvalidVestingEndTimestamp RuleError = "RuleErrorCoinLockupInvalidVestingEndTimestamp" From be323d02a0443cdddae6d0b2aef5e385abc7217d Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 13 Feb 2024 16:45:36 -0500 Subject: [PATCH 478/762] Fix txindex w/ PoS (#1026) --- lib/blockchain.go | 8 ++++++++ lib/pos_blockchain.go | 13 +++++++++++-- lib/txindex.go | 9 +++++---- 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 856e3f274..746d4dc2d 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -96,6 +96,14 @@ func (nn *BlockNode) IsStored() bool { return nn.Status&StatusBlockStored != 0 } +// IsProcessed returns true if the BlockNode has been processed and is not an orphan. +// This status is effectively replaced with IsStored for PoS, but is applied to +// blocks once validated to ensure checks for the processed status behave as expected +// in other portions of the codebase. +func (nn *BlockNode) IsProcessed() bool { + return nn.Status&StatusBlockProcessed != 0 +} + // IsValidated returns true if a BlockNode has passed all validations. A BlockNode that is validated is // generally always stored first. func (nn *BlockNode) IsValidated() bool { diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 265e1b04b..7d7b4d137 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -307,7 +307,9 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v glog.Errorf("processBlockPoS: Problem getting disconnected block %v", disconnectedBlockHashes[ii]) continue } - bc.eventManager.blockDisconnected(&BlockEvent{Block: disconnectedBlock}) + if bc.eventManager != nil { + bc.eventManager.blockDisconnected(&BlockEvent{Block: disconnectedBlock}) + } } for ii := 0; ii < len(connectedBlockHashes); ii++ { connectedBlock := bc.GetBlock(&connectedBlockHashes[ii]) @@ -315,7 +317,9 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v glog.Errorf("processBlockPoS: Problem getting connected block %v", connectedBlockHashes[ii]) continue } - bc.eventManager.blockConnected(&BlockEvent{Block: connectedBlock}) + if bc.eventManager != nil { + bc.eventManager.blockConnected(&BlockEvent{Block: connectedBlock}) + } } // Now that we've processed this block, we check for any blocks that were previously @@ -1312,6 +1316,11 @@ func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock) (*Blo if !blockNode.IsStored() { blockNode.Status |= StatusBlockStored } + // If the BlockNode is not already processed, we should set its status to processed. + // This ensures that bc.IsFullyStored will return true for this block. + if !blockNode.IsProcessed() { + blockNode.Status |= StatusBlockProcessed + } // If the DB update fails, then we should return an error. if err = bc.upsertBlockAndBlockNodeToDB(block, blockNode, true); err != nil { return nil, errors.Wrapf(err, "storeValidatedBlockInBlockIndex: Problem upserting block and block node to DB") diff --git a/lib/txindex.go b/lib/txindex.go index 8cca493f7..61d0a7966 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -223,21 +223,22 @@ func (txi *TXIndex) GetTxindexUpdateBlockNodes() ( blockIndexByHashCopy, _ := txi.TXIndexChain.CopyBlockIndexes() txindexTipNode := blockIndexByHashCopy[*txindexTipHash.Hash] + // Get the committed tip. + committedTip, _ := txi.CoreChain.getCommittedTip() if txindexTipNode == nil { glog.Info("GetTxindexUpdateBlockNodes: Txindex tip was not found; building txindex starting at genesis block") newTxIndexBestChain, _ := txi.TXIndexChain.CopyBestChain() newBlockchainBestChain, _ := txi.CoreChain.CopyBestChain() - return txindexTipNode, txi.CoreChain.BlockTip(), nil, newTxIndexBestChain, newBlockchainBestChain + return txindexTipNode, committedTip, nil, newTxIndexBestChain, newBlockchainBestChain } // At this point, we know our txindex tip is in our block index so // there must be a common ancestor between the tip and the block tip. - blockTip := txi.CoreChain.BlockTip() - commonAncestor, detachBlocks, attachBlocks := GetReorgBlocks(txindexTipNode, blockTip) + commonAncestor, detachBlocks, attachBlocks := GetReorgBlocks(txindexTipNode, committedTip) - return txindexTipNode, blockTip, commonAncestor, detachBlocks, attachBlocks + return txindexTipNode, committedTip, commonAncestor, detachBlocks, attachBlocks } // Update syncs the transaction index with the blockchain. From 67b250c6c60bd5b90b1174deba9186e24448302c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 13 Feb 2024 19:09:16 -0500 Subject: [PATCH 479/762] Add collections.Filter Function (#1028) Add collections.Filter Function Add unit tests --- collections/slice.go | 10 ++++++++++ collections/slice_test.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/collections/slice.go b/collections/slice.go index 9f787d8d2..d83f6eb3c 100644 --- a/collections/slice.go +++ b/collections/slice.go @@ -86,3 +86,13 @@ func RemoveDuplicates[T comparable](slice1 []T, slice2 []T) (_slice1Unique []T, return slice1Unique, slice2Unique } + +func Filter[T any](slice []T, predicate func(T) bool) []T { + var result []T + for _, val := range slice { + if predicate(val) { + result = append(result, val) + } + } + return result +} diff --git a/collections/slice_test.go b/collections/slice_test.go index 31f191ab4..480b57aef 100644 --- a/collections/slice_test.go +++ b/collections/slice_test.go @@ -155,3 +155,33 @@ func TestRemoveDuplicates(t *testing.T) { require.Equal(t, slice2Unique, []int{6, 7, 8, 9, 10}) } } + +func TestFilter(t *testing.T) { + // Predicate: all values > 0 + predicate := func(val int) bool { + return val > 0 + } + + // Test example where no values are > 0 + { + slice := []int{-1, -2, -3, -4, -5} + result := Filter(slice, predicate) + require.Equal(t, 0, len(result)) + } + + // Test example where some values are > 0 + { + slice := []int{-1, 2, 3, 4, 5} + result := Filter(slice, predicate) + require.Equal(t, 4, len(result)) + require.Equal(t, []int{2, 3, 4, 5}, result) + } + + // Test example where all values are > 0 + { + slice := []int{1, 2, 3, 4, 5} + result := Filter(slice, predicate) + require.Equal(t, 5, len(result)) + require.Equal(t, []int{1, 2, 3, 4, 5}, result) + } +} From 0e43c78f2a60dbc25c9c2fcf10f5a0100762e2b1 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 13 Feb 2024 19:10:35 -0500 Subject: [PATCH 480/762] Clean up Logging for FastHotStuffEvents (#1029) --- consensus/serialization.go | 26 ++++++++++++++++++++++++++ lib/server.go | 2 +- 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 consensus/serialization.go diff --git a/consensus/serialization.go b/consensus/serialization.go new file mode 100644 index 000000000..37538ece4 --- /dev/null +++ b/consensus/serialization.go @@ -0,0 +1,26 @@ +package consensus + +import "fmt" + +func (eventType FastHotStuffEventType) ToString() string { + switch eventType { + case FastHotStuffEventTypeVote: + return "VOTE" + case FastHotStuffEventTypeTimeout: + return "TIMEOUT" + case FastHotStuffEventTypeConstructVoteQC: + return "VOTE_QC" + case FastHotStuffEventTypeConstructTimeoutQC: + return "TIMEOUT_QC" + } + return "UNKNOWN" +} + +func (event *FastHotStuffEvent) ToString() string { + return fmt.Sprintf( + "{Type: %s, View: %d, Height: %d}", + event.EventType.ToString(), + event.View, + event.TipBlockHeight, + ) +} diff --git a/lib/server.go b/lib/server.go index dc5231f55..2e99a07c4 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2559,7 +2559,7 @@ func (srv *Server) _startConsensus() { select { case consensusEvent := <-srv._getFastHotStuffConsensusEventChannel(): { - glog.Infof("Server._startConsensus: Received consensus event for block height: %v", consensusEvent.TipBlockHeight) + glog.Infof("Server._startConsensus: Received consensus event: %s", consensusEvent.ToString()) srv._handleFastHostStuffConsensusEvent(consensusEvent) } From 1b6dac3209700dfb190a78e323bdc5ed84b7294f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 13 Feb 2024 19:12:31 -0500 Subject: [PATCH 481/762] Tweak pos_blockchain Helpers to Return BlockNodes (#1030) --- lib/pos_blockchain.go | 31 ++++++++++++++++++++++--------- lib/pos_blockchain_test.go | 23 ++++++++++++----------- 2 files changed, 34 insertions(+), 20 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 7d7b4d137..977aec912 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -2,10 +2,11 @@ package lib import ( "fmt" - "github.com/google/uuid" "math" "time" + "github.com/google/uuid" + "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" "github.com/dgraph-io/badger/v3" @@ -223,7 +224,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // is an orphan, then we store it after performing basic validations. // If the block extends from any committed block other than the committed tip, // then we throw it away. - lineageFromCommittedTip, err := bc.getLineageFromCommittedTip(block) + lineageFromCommittedTip, err := bc.getLineageFromCommittedTip(block.Header) if err == RuleErrorDoesNotExtendCommittedTip || err == RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock || err == RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight || @@ -1167,15 +1168,15 @@ func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validator // getLineageFromCommittedTip returns the ancestors of the block provided up to, but not // including the committed tip. The first block in the returned slice is the first uncommitted // ancestor. -func (bc *Blockchain) getLineageFromCommittedTip(block *MsgDeSoBlock) ([]*BlockNode, error) { +func (bc *Blockchain) getLineageFromCommittedTip(header *MsgDeSoHeader) ([]*BlockNode, error) { highestCommittedBlock, idx := bc.getCommittedTip() if idx == -1 || highestCommittedBlock == nil { return nil, errors.New("getLineageFromCommittedTip: No committed blocks found") } - currentHash := block.Header.PrevBlockHash.NewBlockHash() + currentHash := header.PrevBlockHash.NewBlockHash() ancestors := []*BlockNode{} - prevHeight := block.Header.Height - prevView := block.Header.GetView() + prevHeight := header.Height + prevView := header.GetView() for { currentBlock, exists := bc.blockIndexByHash[*currentHash] if !exists { @@ -1746,13 +1747,25 @@ func (bc *Blockchain) getCommittedTip() (*BlockNode, int) { // This function is not thread-safe. The caller needs to hold the chain lock before // calling this function. func (bc *Blockchain) GetSafeBlocks() ([]*MsgDeSoHeader, error) { + safeBlocks, err := bc.getSafeBlockNodes() + if err != nil { + return nil, errors.Wrapf(err, "GetSafeBlocks: Problem getting safe block nodes") + } + headers := []*MsgDeSoHeader{} + for _, blockNode := range safeBlocks { + headers = append(headers, blockNode.Header) + } + return headers, nil +} + +func (bc *Blockchain) getSafeBlockNodes() ([]*BlockNode, error) { // First get committed tip. committedTip, idx := bc.getCommittedTip() if idx == -1 || committedTip == nil { - return nil, errors.New("GetSafeBlocks: No committed blocks found") + return nil, errors.New("getSafeBlockNodes: No committed blocks found") } // Now get all blocks from the committed tip to the best chain tip. - safeBlocks := []*MsgDeSoHeader{committedTip.Header} + safeBlocks := []*BlockNode{committedTip} maxHeightWithSafeBlocks := bc.getMaxSequentialBlockHeightAfter(uint64(committedTip.Height)) for ii := uint64(committedTip.Height + 1); ii < maxHeightWithSafeBlocks+1; ii++ { // If we don't have any blocks at this height, we know that any blocks at a later height are not safe blocks. @@ -1765,7 +1778,7 @@ func (bc *Blockchain) GetSafeBlocks() ([]*MsgDeSoHeader, error) { // TODO: Are there other conditions we should consider? if blockNode.IsValidated() { hasSeenValidatedBlockAtThisHeight = true - safeBlocks = append(safeBlocks, blockNode.Header) + safeBlocks = append(safeBlocks, blockNode) } } // If we didn't see any validated blocks at this height, we know diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 6f0187c93..79badad10 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -5,12 +5,13 @@ package lib import ( "bytes" "fmt" - "golang.org/x/crypto/sha3" "math" "math/rand" "testing" "time" + "golang.org/x/crypto/sha3" + "crypto/sha256" "github.com/deso-protocol/core/bls" @@ -811,13 +812,13 @@ func TestGetLineageFromCommittedTip(t *testing.T) { }, } // If parent is committed tip, we'll have 0 ancestors. - ancestors, err := bc.getLineageFromCommittedTip(block) + ancestors, err := bc.getLineageFromCommittedTip(block.Header) require.NoError(t, err) require.Len(t, ancestors, 0) // If parent block is not in block index, we should get an error block.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) - ancestors, err = bc.getLineageFromCommittedTip(block) + ancestors, err = bc.getLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorMissingAncestorBlock) require.Nil(t, ancestors) @@ -834,7 +835,7 @@ func TestGetLineageFromCommittedTip(t *testing.T) { }, StatusBlockStored|StatusBlockValidated|StatusBlockCommitted) bc.bestChain = append(bc.bestChain, block2) bc.blockIndexByHash[*hash2] = block2 - ancestors, err = bc.getLineageFromCommittedTip(block) + ancestors, err = bc.getLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorDoesNotExtendCommittedTip) @@ -842,14 +843,14 @@ func TestGetLineageFromCommittedTip(t *testing.T) { block2.Status = StatusBlockStored | StatusBlockValidated // set new block's parent as block 2. block.Header.PrevBlockHash = hash2 - ancestors, err = bc.getLineageFromCommittedTip(block) + ancestors, err = bc.getLineageFromCommittedTip(block.Header) require.NoError(t, err) require.Len(t, ancestors, 1) // Testing error cases // Set block 2 to be ValidateFailed block2.Status = StatusBlockStored | StatusBlockValidateFailed - ancestors, err = bc.getLineageFromCommittedTip(block) + ancestors, err = bc.getLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorAncestorBlockValidationFailed) @@ -857,13 +858,13 @@ func TestGetLineageFromCommittedTip(t *testing.T) { block2.Status = StatusBlockStored | StatusBlockValidated // Set block's height to be <= block2's height block.Header.Height = 2 - ancestors, err = bc.getLineageFromCommittedTip(block) + ancestors, err = bc.getLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight) // Revert block 2's height and set block's view to be <= block2's view block.Header.Height = 3 block.Header.ProposedInView = 2 - ancestors, err = bc.getLineageFromCommittedTip(block) + ancestors, err = bc.getLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock) } @@ -1314,7 +1315,7 @@ func TestTryApplyNewTip(t *testing.T) { newBlockHash, err := newBlock.Hash() require.NoError(t, err) - ancestors, err := bc.getLineageFromCommittedTip(newBlock) + ancestors, err := bc.getLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) checkBestChainForHash := func(hash *BlockHash) bool { return collections.Any(bc.bestChain, func(bn *BlockNode) bool { @@ -1392,7 +1393,7 @@ func TestTryApplyNewTip(t *testing.T) { newBlockNode.Header.Height = 7 newBlockNode.Height = 7 require.NoError(t, err) - ancestors, err = bc.getLineageFromCommittedTip(newBlock) + ancestors, err = bc.getLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) // Try to apply newBlock as tip. @@ -1451,7 +1452,7 @@ func TestTryApplyNewTip(t *testing.T) { newBlockNode.Header.Height = 5 newBlockNode.Height = 5 require.NoError(t, err) - ancestors, err = bc.getLineageFromCommittedTip(newBlock) + ancestors, err = bc.getLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) appliedNewTip, connectedBlockHashes, disconnectedBlockHashes, err = bc.tryApplyNewTip(newBlockNode, 6, ancestors) require.True(t, appliedNewTip) From 6d72503972614a52ed171fc619990dfa7740ce43 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 13 Feb 2024 19:14:39 -0500 Subject: [PATCH 482/762] Initialize PoS Chain At Startup To Continue Existing Chain (#1031) --- lib/blockchain.go | 46 ++++++++++++++++++++++++++++++++++++++++++++++ lib/server.go | 12 ++++++++++++ 2 files changed, 58 insertions(+) diff --git a/lib/blockchain.go b/lib/blockchain.go index 746d4dc2d..0d2caf9a8 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -700,6 +700,47 @@ func (bc *Blockchain) _initChain() error { return nil } +func (bc *Blockchain) _applyUncommittedBlocksToBestChain() error { + // For Proof of Stake, we need to update the in-memory data structures to + // include uncommitted blocks that are part of the best chain. This is because + // the initialization above only includes blocks that have been committed. + safeBlockNodes, err := bc.getSafeBlockNodes() + if err != nil { + return errors.Wrapf(err, "_applyUncommittedBlocksToBestChain: ") + } + + // Filter out the committed tip from the safe block nodes. + safeBlockNodes = collections.Filter(safeBlockNodes, func(node *BlockNode) bool { + return !node.IsCommitted() + }) + + // If there are no uncommitted blocks, we're done. + if len(safeBlockNodes) == 0 { + return nil + } + + // Find the safe block with the highest view. That block is the uncommitted tip. + uncommittedTipBlockNode := safeBlockNodes[0] + for _, blockNode := range safeBlockNodes { + if blockNode.Header.ProposedInView > uncommittedTipBlockNode.Header.ProposedInView { + uncommittedTipBlockNode = blockNode + } + } + + // Fetch the lineage of blocks from the committed tip through the uncommitted tip. + lineageFromCommittedTip, err := bc.getLineageFromCommittedTip(uncommittedTipBlockNode.Header) + if err != nil { + return errors.Wrapf(err, "_applyUncommittedBlocksToBestChain: ") + } + + // Add the uncommitted blocks to the in-memory data structures. + if _, _, _, err := bc.tryApplyNewTip(uncommittedTipBlockNode, 0, lineageFromCommittedTip); err != nil { + return errors.Wrapf(err, "_applyUncommittedBlocksToBestChain: ") + } + + return nil +} + // NewBlockchain returns a new blockchain object. It initializes some in-memory // data structures by reading from the db. It also initializes the db if it hasn't // been initialized in the past. This function should only be called once per @@ -763,6 +804,11 @@ func NewBlockchain( return nil, errors.Wrapf(err, "NewBlockchain: ") } + // Update the best chain and best header chain to include uncommitted blocks. + if err := bc._applyUncommittedBlocksToBestChain(); err != nil { + return nil, errors.Wrapf(err, "NewBlockchain: ") + } + return bc, nil } diff --git a/lib/server.go b/lib/server.go index 2e99a07c4..f263b3742 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2815,6 +2815,18 @@ func (srv *Server) Start() { if srv.miner != nil && len(srv.miner.PublicKeys) > 0 { go srv.miner.Start() } + + // On testnet, if the node is configured to be a PoW block producer, and it is configured + // to be also a PoS validator, then we attach block mined listeners to the miner to kick + // off the PoS consensus once the miner is done. + if srv.params.NetworkType == NetworkType_TESTNET && srv.fastHotStuffConsensus != nil { + tipHeight := uint64(srv.blockchain.blockTip().Height) + if srv.params.IsFinalPoWBlockHeight(tipHeight) || srv.params.IsPoSBlockHeight(tipHeight) { + if err := srv.fastHotStuffConsensus.Start(); err != nil { + glog.Errorf("NewServer: Error starting fast hotstuff consensus %v", err) + } + } + } } // SyncPrefixProgress keeps track of sync progress on an individual prefix. It is used in From 7eb28c9b333e8b4433df102062b047396fcb43ff Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 13 Feb 2024 20:03:10 -0500 Subject: [PATCH 483/762] Add Safety Check to validateAndIndexHeaderPoS Base Case (#1032) --- lib/pos_blockchain.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 977aec912..20dd69b07 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -89,17 +89,17 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas _headerBlockNode *BlockNode, _isOrphan bool, _err error, ) { // Look up the header in the block index to check if it has already been validated and indexed. - blockNode, exists := bc.blockIndexByHash[*headerHash] + blockNode, blockNodeExists := bc.blockIndexByHash[*headerHash] // ------------------------------------ Base Cases ----------------------------------- // // The header is already validated. Exit early. - if exists && blockNode.IsHeaderValidated() { + if blockNodeExists && blockNode.IsHeaderValidated() { return blockNode, false, nil } // The header has already failed validations. Exit early. - if exists && blockNode.IsHeaderValidateFailed() { + if blockNodeExists && blockNode.IsHeaderValidateFailed() { return nil, false, errors.New("validateAndIndexHeaderPoS: Header already failed validation") } @@ -115,7 +115,7 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas } // Sanity-check that the parent block is an ancestor of the current block. - if parentBlockNode.Height+1 != blockNode.Height { + if blockNodeExists && (parentBlockNode.Height+1 != blockNode.Height) { return nil, false, errors.New("validateAndIndexHeaderPoS: Parent header has " + "greater or equal height compared to the current header.") } From 03d3162c274b8d558548bee822a7243927784e5e Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 13 Feb 2024 20:12:11 -0500 Subject: [PATCH 484/762] Move BlockTimestampDriftNanoSecs to GlobalParams (#1027) --- lib/block_view.go | 22 ++++-------- lib/block_view_types.go | 10 ++++++ lib/constants.go | 2 +- lib/pos_blockchain.go | 68 +++++++++++++++++++++++++++---------- lib/pos_blockchain_test.go | 52 +++++++++++++++------------- lib/pos_snapshot_entries.go | 3 ++ 6 files changed, 99 insertions(+), 58 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 641adf357..fcd13e2f6 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3322,29 +3322,21 @@ func (bav *UtxoView) _connectUpdateGlobalParams( ) } } - } - - if blockHeight >= bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { - var bytesRead int - if len(extraData[FeeBucketGrowthRateBasisPointsKey]) > 0 { - newGlobalParamsEntry.FeeBucketGrowthRateBasisPoints, bytesRead = Uvarint( - extraData[FeeBucketGrowthRateBasisPointsKey], + if len(extraData[BlockTimestampDriftNanoSecsKey]) > 0 { + val, bytesRead := Varint( + extraData[BlockTimestampDriftNanoSecsKey], ) if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: unable to decode FeeBucketGrowthRateBasisPoints as uint64", + "_connectUpdateGlobalParams: unable to decode BlockTimestampDriftNanoSecs as int64", ) } - } - if len(extraData[FailingTransactionBMFMultiplierBasisPointsKey]) > 0 { - newGlobalParamsEntry.FailingTransactionBMFMultiplierBasisPoints, bytesRead = Uvarint( - extraData[FailingTransactionBMFMultiplierBasisPointsKey], - ) - if bytesRead <= 0 { + if val < 0 { return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: unable to decode FailingTransactionBMFMultiplierBasisPoints as uint64", + "_connectUpdateGlobalParams: BlockTimestampDriftNanoSecs must be >= 0", ) } + newGlobalParamsEntry.BlockTimestampDriftNanoSecs = val } } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 59d81cd3f..c2a437990 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4157,6 +4157,10 @@ type GlobalParamsEntry struct { // BMF. The value is expressed in basis points. For example a value of 2500 means that 25% of the fee will be // failing transaction fee will be used in the BMF algorithm. FailingTransactionBMFMultiplierBasisPoints uint64 + + // BlockTimestampDriftNanoSecs is the maximum number of nanoseconds from the current timestamp that + // we will allow a PoS block to be submitted. + BlockTimestampDriftNanoSecs int64 } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { @@ -4178,6 +4182,7 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { MaximumVestedIntersectionsPerLockupTransaction: gp.MaximumVestedIntersectionsPerLockupTransaction, FeeBucketGrowthRateBasisPoints: gp.FeeBucketGrowthRateBasisPoints, FailingTransactionBMFMultiplierBasisPoints: gp.FailingTransactionBMFMultiplierBasisPoints, + BlockTimestampDriftNanoSecs: gp.BlockTimestampDriftNanoSecs, } } @@ -4204,6 +4209,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, IntToBuf(int64(gp.MaximumVestedIntersectionsPerLockupTransaction))...) data = append(data, UintToBuf(gp.FeeBucketGrowthRateBasisPoints)...) data = append(data, UintToBuf(gp.FailingTransactionBMFMultiplierBasisPoints)...) + data = append(data, IntToBuf(gp.BlockTimestampDriftNanoSecs)...) } return data } @@ -4284,6 +4290,10 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading FailingTransactionBMFMultiplierBasisPoints") } + gp.BlockTimestampDriftNanoSecs, err = ReadVarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading BlockTimestampDriftNanoSecs") + } } return nil } diff --git a/lib/constants.go b/lib/constants.go index b0f9d86e9..f5d25ab2d 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -713,7 +713,6 @@ type DeSoParams struct { // before they are jailed. DefaultJailInactiveValidatorGracePeriodEpochs uint64 - // TODO: add support for putting the drift in global params. // DefaultBlockTimestampDriftNanoSecs is the default number of nanoseconds // from the current timestamp that we will allow a PoS block to be submitted. DefaultBlockTimestampDriftNanoSecs int64 @@ -1506,6 +1505,7 @@ const ( MaximumVestedIntersectionsPerLockupTransactionKey = "MaximumVestedIntersectionsPerLockupTransaction" FeeBucketGrowthRateBasisPointsKey = "FeeBucketGrowthRateBasisPointsKey" FailingTransactionBMFMultiplierBasisPointsKey = "FailingTransactionBMFMultiplierBasisPoints" + BlockTimestampDriftNanoSecsKey = "BlockTimestampDriftNanoSecs" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 20dd69b07..e4715a3ea 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -145,9 +145,18 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas header, errors.New("validateAndIndexHeaderPoS: Parent header failed validations"), ) } - + utxoView, err := bc.getUtxoViewAtBlockHash(*header.PrevBlockHash) + if err != nil { + return nil, false, errors.Wrap(err, "validateAndIndexHeaderPoS: Problem getting UtxoView") + } + // We use the snapshot global params to make the drift timestamp check work properly, + // specifically for orphan blocks. + snapshotGlobalParams, err := utxoView.GetCurrentSnapshotGlobalParamsEntry() + if err != nil { + return nil, false, errors.Wrap(err, "validateAndIndexHeaderPoS: Problem getting snapshot global params") + } // Verify that the header is properly formed. - if err := bc.isValidBlockHeaderPoS(header); err != nil { + if err := bc.isValidBlockHeaderPoS(header, snapshotGlobalParams); err != nil { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( header, errors.New("validateAndIndexHeaderPoS: Header failed validations"), ) @@ -371,6 +380,7 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { return errors.Wrap(err, "processOrphanBlockPoS: Problem getting current epoch entry") } var validatorsByStake []*ValidatorEntry + var snapshotGlobalParams *GlobalParamsEntry // If the block is in a previous or future epoch, we need to compute the // proper validator set for the block. We do this by computing the prev/next // epoch entry and then fetching the validator set at the snapshot of the @@ -464,6 +474,13 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { "processOrphanBlockPoS: Problem getting validator set at snapshot at epoch number %d", epochEntrySnapshotAtEpochNumber) } + // Get the snapshot global params based on the snapshot at epoch number for this orphan block. + snapshotGlobalParams, err = utxoView.GetSnapshotGlobalParamsEntryByEpochNumber(epochEntrySnapshotAtEpochNumber) + if err != nil { + return errors.Wrapf(err, + "processOrphanBlockPoS: Problem getting snapshot global params at snapshot at epoch number %d", + epochEntrySnapshotAtEpochNumber) + } } else { // This block is in the current epoch! // First we validate the proposer vote partial signature @@ -501,6 +518,11 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { if err != nil { return errors.Wrap(err, "processOrphanBlockPoS: Problem getting validator set") } + // Get the snapshot global params based on the current snapshot epoch number. + snapshotGlobalParams, err = utxoView.GetCurrentSnapshotGlobalParamsEntry() + if err != nil { + return errors.Wrap(err, "processOrphanBlockPoS: Problem getting snapshot global params") + } } // Okay now we have the validator set ordered by stake, we can validate the QC. if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { @@ -508,9 +530,12 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // As a spam-prevention measure, we just throw away this block and don't store it. return nil } + if err != nil { + return errors.Wrap(err, "processOrphanBlockPoS: Problem getting snapshot global params") + } // All blocks should pass the basic integrity validations, which ensure the block // is not malformed. If the block is malformed, we should store it as ValidateFailed. - if err = bc.isProperlyFormedBlockPoS(block); err != nil { + if err = bc.isProperlyFormedBlockPoS(block, snapshotGlobalParams); err != nil { if _, innerErr := bc.storeValidateFailedBlockInBlockIndex(block); innerErr != nil { return errors.Wrapf(innerErr, "processOrphanBlockPoS: Problem adding validate failed block to block index: %v", err) @@ -651,11 +676,6 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, return bc.storeBlockInBlockIndex(block) } - // Check if the block is properly formed and passes all basic validations. - if err = bc.isValidBlockPoS(block); err != nil { - return bc.storeValidateFailedBlockWithWrappedError(block, err) - } - // Validate the block's random seed signature isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(block.Header) if err != nil { @@ -685,6 +705,16 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem getting UtxoView") } + snapshotGlobalParams, err := utxoView.GetCurrentSnapshotGlobalParamsEntry() + if err != nil { + return nil, errors.Wrap(err, "validateAndIndexBlockPoS: Problem getting snapshot global params") + } + + // Check if the block is properly formed and passes all basic validations. + if err = bc.isValidBlockPoS(block, snapshotGlobalParams); err != nil { + return bc.storeValidateFailedBlockWithWrappedError(block, err) + } + // Connect this block to the parent block's UtxoView. txHashes := collections.Transform(block.Txns, func(txn *MsgDeSoTxn) *BlockHash { return txn.Hash() @@ -754,9 +784,9 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS(blockHash *BlockHash) (* // isValidBlockPoS performs all basic block integrity checks. Any error // resulting from this function implies that the block is invalid. -func (bc *Blockchain) isValidBlockPoS(block *MsgDeSoBlock) error { +func (bc *Blockchain) isValidBlockPoS(block *MsgDeSoBlock, snapshotGlobalParams *GlobalParamsEntry) error { // Surface Level validation of the block - if err := bc.isProperlyFormedBlockPoS(block); err != nil { + if err := bc.isProperlyFormedBlockPoS(block, snapshotGlobalParams); err != nil { return err } if err := bc.isBlockTimestampValidRelativeToParentPoS(block.Header); err != nil { @@ -775,9 +805,9 @@ func (bc *Blockchain) isValidBlockPoS(block *MsgDeSoBlock) error { // isValidBlockHeaderPoS performs all basic block header integrity checks. Any // error resulting from this function implies that the block header is invalid. -func (bc *Blockchain) isValidBlockHeaderPoS(header *MsgDeSoHeader) error { +func (bc *Blockchain) isValidBlockHeaderPoS(header *MsgDeSoHeader, snapshotGlobalParams *GlobalParamsEntry) error { // Surface Level validation of the block header - if err := bc.isProperlyFormedBlockHeaderPoS(header); err != nil { + if err := bc.isProperlyFormedBlockHeaderPoS(header, snapshotGlobalParams); err != nil { return err } if err := bc.isBlockTimestampValidRelativeToParentPoS(header); err != nil { @@ -813,14 +843,15 @@ func (bc *Blockchain) isBlockTimestampValidRelativeToParentPoS(header *MsgDeSoHe // isProperlyFormedBlockPoS validates the block at a surface level and makes // sure that all fields are populated in a valid manner. It does not verify // signatures nor validate the blockchain state resulting from the block. -func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock) error { +func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock, snapshotGlobalParams *GlobalParamsEntry, +) error { // First, make sure we have a non-nil block if block == nil { return RuleErrorNilBlock } // Make sure the header is properly formed by itself - if err := bc.isProperlyFormedBlockHeaderPoS(block.Header); err != nil { + if err := bc.isProperlyFormedBlockHeaderPoS(block.Header, snapshotGlobalParams); err != nil { return err } @@ -865,7 +896,8 @@ func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock) error { // contents alone, and makes sure that all fields are populated in a valid manner. // It does not verify signatures in the header, nor cross-validate the block with // past blocks in the block index. -func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader) error { +func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader, snapshotGlobalParams *GlobalParamsEntry, +) error { // First make sure we have a non-nil header if header == nil { return RuleErrorNilBlockHeader @@ -876,9 +908,9 @@ func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader) erro return RuleErrorNilPrevBlockHash } - // Timestamp validation - // TODO: Add support for putting the drift into global params. - if header.TstampNanoSecs > time.Now().UnixNano()+bc.params.DefaultBlockTimestampDriftNanoSecs { + // Timestamp validation. We use the snapshotted global params to validate the timestamp, specifically + // so that the drift timestamp check behaves properly even for orphan blocks. + if header.TstampNanoSecs > time.Now().UnixNano()+snapshotGlobalParams.BlockTimestampDriftNanoSecs { return RuleErrorPoSBlockTstampNanoSecsInFuture } diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 79badad10..382cbd486 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -83,15 +83,19 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * Txns: txns, TxnConnectStatusByIndex: bitset.NewBitset().Set(0, true), } + utxoView, err := NewUtxoView(bc.db, bc.params, nil, nil, nil) + require.NoError(t, err) + globalParams, err := utxoView.GetCurrentSnapshotGlobalParamsEntry() + require.NoError(t, err) // Validate the block with a valid timeout QC and header. - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) // There should be no error. require.Nil(t, err) // Timeout QC must have at least one transaction and that transaction must be a block reward txn. block.Txns = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorBlockWithNoTxns) block.Txns = []*MsgDeSoTxn{ @@ -99,7 +103,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * TxnMeta: &BasicTransferMetadata{}, }, } - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorBlockDoesNotStartWithRewardTxn) // Revert txns to be valid. block.Txns = []*MsgDeSoTxn{ @@ -110,7 +114,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Header's Proposed in view must be exactly one greater than the timeout QC's timed out view block.Header.ProposedInView = 2 - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorPoSTimeoutBlockViewNotOneGreaterThanValidatorsTimeoutQCView) // Revert proposed in view @@ -118,7 +122,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Timeout QC also must have a merkle root block.Header.TransactionMerkleRoot = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorNilMerkleRoot) // Make sure block can't have both timeout and vote QC. @@ -131,13 +135,13 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * }, } block.Header.ValidatorsVoteQC = validatorVoteQC - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorBothTimeoutAndVoteQC) // Make sure block has either timeout or vote QC. block.Header.ValidatorsTimeoutAggregateQC = nil block.Header.ValidatorsVoteQC = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorNoTimeoutOrVoteQC) // Reset validator vote QC. @@ -156,12 +160,12 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * require.NoError(t, err) block.Header.TransactionMerkleRoot = merkleRoot // There should be no error. - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Nil(t, err) // Vote QC must have Header's Proposed in view exactly one greater than vote QC's proposed in view. block.Header.ProposedInView = 2 - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanValidatorsVoteQCView) // Revert proposed in view @@ -169,12 +173,12 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Block must have non-nil Merkle root if we have non-zero transactions block.Header.TransactionMerkleRoot = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorNilMerkleRoot) // Block must have a matching merkle root block.Header.TransactionMerkleRoot = &ZeroBlockHash - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorInvalidMerkleRoot) // Reset transactions @@ -187,27 +191,27 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // TxnConnectStatusByIndex tests // TxnConnectStatusByIndex must be non-nil block.TxnConnectStatusByIndex = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorNilTxnConnectStatusByIndex) // TxnConnectStatusByIndexHash must be non-nil block.TxnConnectStatusByIndex = bitset.NewBitset().Set(0, true) block.Header.TxnConnectStatusByIndexHash = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorNilTxnConnectStatusByIndexHash) // The hashed version of TxnConnectStatusByIndex must match the actual TxnConnectStatusByIndexHash block.Header.TxnConnectStatusByIndexHash = HashBitset(bitset.NewBitset().Set(0, false)) - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorTxnConnectStatusByIndexHashMismatch) // Reset TxnConnectStatusByIndexHash block.Header.TxnConnectStatusByIndexHash = HashBitset(block.TxnConnectStatusByIndex) // Block must have valid proposer voting public key block.Header.ProposerVotingPublicKey = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorInvalidProposerVotingPublicKey) block.Header.ProposerVotingPublicKey = &bls.PublicKey{} - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorInvalidProposerVotingPublicKey) // Reset proposer voting public key @@ -215,22 +219,22 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Block must have valid proposer public key block.Header.ProposerPublicKey = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorInvalidProposerPublicKey) block.Header.ProposerPublicKey = &ZeroPublicKey - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorInvalidProposerPublicKey) block.Header.ProposerPublicKey = NewPublicKey(RandomBytes(33)) // Block must have valid proposer random seed hash block.Header.ProposerRandomSeedSignature = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorInvalidProposerRandomSeedSignature) block.Header.ProposerRandomSeedSignature = &bls.Signature{} - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorInvalidProposerRandomSeedSignature) block.Header.ProposerRandomSeedSignature = signature @@ -243,7 +247,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Block timestamps can't be in the future. block.Header.TstampNanoSecs = time.Now().UnixNano() + (11 * time.Minute).Nanoseconds() - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsInFuture) // Revert the Header's timestamp @@ -251,7 +255,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Block Header version must be 2 block.Header.Version = 1 - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorInvalidPoSBlockHeaderVersion) // Revert block header version @@ -259,7 +263,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Nil prev block hash not allowed block.Header.PrevBlockHash = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorNilPrevBlockHash) // Parent must exist in the block index. @@ -269,7 +273,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Nil block header not allowed block.Header = nil - err = bc.isProperlyFormedBlockPoS(block) + err = bc.isProperlyFormedBlockPoS(block, globalParams) require.Equal(t, err, RuleErrorNilBlockHeader) } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 0c7eb8488..37b2bebeb 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -148,6 +148,9 @@ func _mergeGlobalParamEntryDefaults(bav *UtxoView, globalParamsEntry *GlobalPara globalParamsEntryCopy.MaximumVestedIntersectionsPerLockupTransaction = bav.Params.DefaultMaximumVestedIntersectionsPerLockupTransaction } + if globalParamsEntryCopy.BlockTimestampDriftNanoSecs == 0 { + globalParamsEntryCopy.BlockTimestampDriftNanoSecs = bav.Params.DefaultBlockTimestampDriftNanoSecs + } // Return the merged result. return globalParamsEntryCopy From 93d06c03c0c242ec90f586f5865ef3a328d0628f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 14 Feb 2024 11:56:19 -0500 Subject: [PATCH 485/762] Add Mempool Params to GlobalParams (#1033) * Add Mempool Params to GlobalParams * Add setter --- lib/block_view.go | 48 +++++++++++++++++++++++++++++++++++++ lib/block_view_types.go | 29 ++++++++++++++++++++++ lib/constants.go | 35 +++++++++++++++++++++++++++ lib/pos_snapshot_entries.go | 14 +++++++++-- 4 files changed, 124 insertions(+), 2 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index fcd13e2f6..f3f352706 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3338,6 +3338,54 @@ func (bav *UtxoView) _connectUpdateGlobalParams( } newGlobalParamsEntry.BlockTimestampDriftNanoSecs = val } + if len(extraData[MempoolMaxSizeBytesKey]) > 0 { + val, bytesRead := Uvarint( + extraData[MempoolMaxSizeBytesKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode MempoolMaxSizeBytes as uint64", + ) + } + if val <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: MempoolMaxSizeBytes must be > 0", + ) + } + newGlobalParamsEntry.MempoolMaxSizeBytes = val + } + if len(extraData[MempoolFeeEstimatorNumMempoolBlocksKey]) > 0 { + val, bytesRead := Uvarint( + extraData[MempoolFeeEstimatorNumMempoolBlocksKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode MempoolFeeEstimatorNumMempoolBlocks as uint64", + ) + } + if val <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: MempoolFeeEstimatorNumMempoolBlocks must be > 0", + ) + } + newGlobalParamsEntry.MempoolFeeEstimatorNumMempoolBlocks = val + } + if len(extraData[MempoolFeeEstimatorNumPastBlocksKey]) > 0 { + val, bytesRead := Uvarint( + extraData[MempoolFeeEstimatorNumPastBlocksKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode MempoolFeeEstimatorNumPastBlocks as uint64", + ) + } + if val <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: MempoolFeeEstimatorNumPastBlocks must be > 0", + ) + } + newGlobalParamsEntry.MempoolFeeEstimatorNumPastBlocks = val + } } var newForbiddenPubKeyEntry *ForbiddenPubKeyEntry diff --git a/lib/block_view_types.go b/lib/block_view_types.go index c2a437990..c0b4a9ad4 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4161,6 +4161,17 @@ type GlobalParamsEntry struct { // BlockTimestampDriftNanoSecs is the maximum number of nanoseconds from the current timestamp that // we will allow a PoS block to be submitted. BlockTimestampDriftNanoSecs int64 + + // MempoolMaxSizeBytes is the maximum size of the mempool in bytes. + MempoolMaxSizeBytes uint64 + + // MempoolFeeEstimatorNumMempoolBlocks is the number of possible future blocks to a txn may be placed + // into when consider when estimating the fee for a new txn. + MempoolFeeEstimatorNumMempoolBlocks uint64 + + // MempoolFeeEstimatorNumPastBlocks is the number of past blocks to reference txn fees from when estimating + // the fee for a new txn. + MempoolFeeEstimatorNumPastBlocks uint64 } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { @@ -4183,6 +4194,9 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { FeeBucketGrowthRateBasisPoints: gp.FeeBucketGrowthRateBasisPoints, FailingTransactionBMFMultiplierBasisPoints: gp.FailingTransactionBMFMultiplierBasisPoints, BlockTimestampDriftNanoSecs: gp.BlockTimestampDriftNanoSecs, + MempoolMaxSizeBytes: gp.MempoolMaxSizeBytes, + MempoolFeeEstimatorNumMempoolBlocks: gp.MempoolFeeEstimatorNumMempoolBlocks, + MempoolFeeEstimatorNumPastBlocks: gp.MempoolFeeEstimatorNumPastBlocks, } } @@ -4210,6 +4224,9 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.FeeBucketGrowthRateBasisPoints)...) data = append(data, UintToBuf(gp.FailingTransactionBMFMultiplierBasisPoints)...) data = append(data, IntToBuf(gp.BlockTimestampDriftNanoSecs)...) + data = append(data, UintToBuf(gp.MempoolMaxSizeBytes)...) + data = append(data, UintToBuf(gp.MempoolFeeEstimatorNumMempoolBlocks)...) + data = append(data, UintToBuf(gp.MempoolFeeEstimatorNumPastBlocks)...) } return data } @@ -4294,6 +4311,18 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading BlockTimestampDriftNanoSecs") } + gp.MempoolMaxSizeBytes, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MempoolMaxSizeBytes") + } + gp.MempoolFeeEstimatorNumMempoolBlocks, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MempoolFeeEstimatorNumMempoolBlocks") + } + gp.MempoolFeeEstimatorNumPastBlocks, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MempoolFeeEstimatorNumPastBlocks") + } } return nil } diff --git a/lib/constants.go b/lib/constants.go index f5d25ab2d..b6b46daf3 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -731,6 +731,20 @@ type DeSoParams struct { // in GlobalParamsEntry for a detailed description of its usage. DefaultMaximumVestedIntersectionsPerLockupTransaction int + // DefaultMempoolMaxSizeBytes is the default value for GlobalParamsEntry.MempoolMaxSizeBytes. + // See the comment in GlobalParamsEntry for a description of its usage. + DefaultMempoolMaxSizeBytes uint64 + + // DefaultMempoolFeeEstimatorNumMempoolBlocks is the default value for + // GlobalParamsEntry.MempoolFeeEstimatorNumMempoolBlocks. See the comment in GlobalParamsEntry + // for a description of its usage. + DefaultMempoolFeeEstimatorNumMempoolBlocks uint64 + + // DefaultMempoolFeeEstimatorNumPastBlocks is the default value for + // GlobalParamsEntry.MempoolFeeEstimatorNumPastBlocks. See the comment in GlobalParamsEntry + // for a description of its usage. + DefaultMempoolFeeEstimatorNumPastBlocks uint64 + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -1177,6 +1191,15 @@ var DeSoMainnetParams = DeSoParams{ // The maximum number of vested lockup intersections in a lockup transaction. DefaultMaximumVestedIntersectionsPerLockupTransaction: 1000, + // The maximum size of the mempool in bytes. + DefaultMempoolMaxSizeBytes: 3 * 1024 * 1024 * 1024, // 3GB + + // The number of future blocks to consider when estimating the mempool fee. + DefaultMempoolFeeEstimatorNumMempoolBlocks: 1, + + // The number of past blocks to consider when estimating the mempool fee. + DefaultMempoolFeeEstimatorNumPastBlocks: 50, + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1450,6 +1473,15 @@ var DeSoTestnetParams = DeSoParams{ // The maximum number of vested lockup intersections in a lockup transaction. DefaultMaximumVestedIntersectionsPerLockupTransaction: 1000, + // The maximum size of the mempool in bytes. + DefaultMempoolMaxSizeBytes: 3 * 1024 * 1024 * 1024, // 3GB + + // The number of future blocks to consider when estimating the mempool fee. + DefaultMempoolFeeEstimatorNumMempoolBlocks: 1, + + // The number of past blocks to consider when estimating the mempool fee. + DefaultMempoolFeeEstimatorNumPastBlocks: 50, + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), @@ -1506,6 +1538,9 @@ const ( FeeBucketGrowthRateBasisPointsKey = "FeeBucketGrowthRateBasisPointsKey" FailingTransactionBMFMultiplierBasisPointsKey = "FailingTransactionBMFMultiplierBasisPoints" BlockTimestampDriftNanoSecsKey = "BlockTimestampDriftNanoSecs" + MempoolMaxSizeBytesKey = "MempoolMaxSizeBytes" + MempoolFeeEstimatorNumMempoolBlocksKey = "MempoolFeeEstimatorNumMempoolBlocks" + MempoolFeeEstimatorNumPastBlocksKey = "MempoolFeeEstimatorNumPastBlocks" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 37b2bebeb..24ff7ba48 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -3,11 +3,12 @@ package lib import ( "bytes" "fmt" - "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/collections" "math" "sort" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" @@ -151,6 +152,15 @@ func _mergeGlobalParamEntryDefaults(bav *UtxoView, globalParamsEntry *GlobalPara if globalParamsEntryCopy.BlockTimestampDriftNanoSecs == 0 { globalParamsEntryCopy.BlockTimestampDriftNanoSecs = bav.Params.DefaultBlockTimestampDriftNanoSecs } + if globalParamsEntryCopy.MempoolMaxSizeBytes == 0 { + globalParamsEntryCopy.MempoolMaxSizeBytes = bav.Params.DefaultMempoolMaxSizeBytes + } + if globalParamsEntryCopy.MempoolFeeEstimatorNumMempoolBlocks == 0 { + globalParamsEntryCopy.MempoolFeeEstimatorNumMempoolBlocks = bav.Params.DefaultMempoolFeeEstimatorNumMempoolBlocks + } + if globalParamsEntryCopy.MempoolFeeEstimatorNumPastBlocks == 0 { + globalParamsEntryCopy.MempoolFeeEstimatorNumPastBlocks = bav.Params.DefaultMempoolFeeEstimatorNumPastBlocks + } // Return the merged result. return globalParamsEntryCopy From d6f2350fb5e3ffdaaac308cb0871e6753255477f Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Wed, 14 Feb 2024 18:55:23 -0500 Subject: [PATCH 486/762] Renamed unsigned block in pos_consensus.go --- lib/pos_consensus.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 84665e664..9b611a63b 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -251,23 +251,23 @@ func (cc *FastHotStuffConsensus) handleBlockProposalEvent( } // Construct the unsigned block - unsignedBlock, err := cc.produceUnsignedBlockForBlockProposalEvent(event, proposerRandomSeedSignature) + blockProposal, err := cc.produceUnsignedBlockForBlockProposalEvent(event, proposerRandomSeedSignature) if err != nil { return errors.Wrapf(err, "Error producing unsigned block for proposal at height %d", event.TipBlockHeight+1) } // Sign the block - blockHash, err := unsignedBlock.Header.Hash() + blockHash, err := blockProposal.Header.Hash() if err != nil { return errors.Errorf("Error hashing block: %v", err) } - unsignedBlock.Header.ProposerVotePartialSignature, err = cc.signer.SignBlockProposal(unsignedBlock.Header.ProposedInView, blockHash) + blockProposal.Header.ProposerVotePartialSignature, err = cc.signer.SignBlockProposal(blockProposal.Header.ProposedInView, blockHash) if err != nil { return errors.Errorf("Error signing block: %v", err) } // Process the block locally - missingBlockHashes, err := cc.tryProcessBlockAsNewTip(unsignedBlock) + missingBlockHashes, err := cc.tryProcessBlockAsNewTip(blockProposal) if err != nil { return errors.Errorf("Error processing block locally: %v", err) } @@ -283,7 +283,7 @@ func (cc *FastHotStuffConsensus) handleBlockProposalEvent( // TODO: Broadcast the block proposal to the network - cc.logBlockProposal(unsignedBlock, blockHash) + cc.logBlockProposal(blockProposal, blockHash) return nil } From aa0fb853e0ef12dc2bd016cfcfb6aabf00f71b23 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 14 Feb 2024 19:09:05 -0500 Subject: [PATCH 487/762] Add ProtocolVersion To Regtest Params (#1035) --- lib/constants.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/constants.go b/lib/constants.go index 543e5bbc4..cd3e08995 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -844,6 +844,9 @@ func (params *DeSoParams) EnableRegtest() { // Clear the seeds params.DNSSeeds = []string{} + // Set the protocol version + params.ProtocolVersion = ProtocolVersion2 + // Mine blocks incredibly quickly params.TimeBetweenBlocks = 2 * time.Second params.TimeBetweenDifficultyRetargets = 6 * time.Second From 0def028bc3bf40ca77578a1200e41fa9b938e0a3 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Wed, 14 Feb 2024 19:19:04 -0500 Subject: [PATCH 488/762] Fix naming of FastHotStuffConsensus receiver --- lib/pos_consensus.go | 174 +++++++++++++++++++++---------------------- 1 file changed, 87 insertions(+), 87 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 9b611a63b..6c3382899 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -45,69 +45,69 @@ func NewFastHotStuffConsensus( // FastHotStuffConsensus.Start initializes and starts the FastHotStuffEventLoop based on the // blockchain state. This should only be called once the blockchain has synced, the node is // ready to join the validator network, and the node is able to validate blocks in the steady state. -func (cc *FastHotStuffConsensus) Start() error { +func (fc *FastHotStuffConsensus) Start() error { // Hold the consensus' write lock for thread-safety. - cc.lock.Lock() - defer cc.lock.Unlock() + fc.lock.Lock() + defer fc.lock.Unlock() // The consensus can only be kicked off with an uninitialized event loop - if cc.fastHotStuffEventLoop.IsInitialized() { + if fc.fastHotStuffEventLoop.IsInitialized() { return errors.New("FastHotStuffConsensus.Start: FastHotStuffEventLoop is already initialized") } // Hold the blockchain's read lock so that the chain cannot be mutated underneath us. In practice, // this is a no-op, but it guarantees thread-safety in the event that other parts of the codebase // change. - cc.blockchain.ChainLock.RLock() - defer cc.blockchain.ChainLock.RUnlock() + fc.blockchain.ChainLock.RLock() + defer fc.blockchain.ChainLock.RUnlock() // Fetch the current tip of the chain - tipBlock := cc.blockchain.BlockTip() + tipBlock := fc.blockchain.BlockTip() tipHeight := tipBlock.Header.Height // If the chain is not at the final PoW block height or higher, then we cannot start the PoS consensus. - if tipHeight < cc.params.GetFinalPoWBlockHeight() { + if tipHeight < fc.params.GetFinalPoWBlockHeight() { return errors.Errorf( "FastHotStuffConsensus.Start: Block tip %d is not at the final PoW block height", tipBlock.Height, ) } - genesisQC, err := cc.blockchain.GetProofOfStakeGenesisQuorumCertificate() + genesisQC, err := fc.blockchain.GetProofOfStakeGenesisQuorumCertificate() if err != nil { return errors.Errorf("FastHotStuffConsensus.Start: Error creating PoS cutover genesis QC: %v", err) } // Fetch the validator set at each safe block - tipBlockWithValidators, err := cc.fetchValidatorListsForSafeBlocks([]*MsgDeSoHeader{tipBlock.Header}) + tipBlockWithValidators, err := fc.fetchValidatorListsForSafeBlocks([]*MsgDeSoHeader{tipBlock.Header}) if err != nil { return errors.Errorf("FastHotStuffConsensus.Start: Error fetching validator list for tip blocks: %v", err) } // Fetch the safe blocks that are eligible to be extended from by the next incoming tip block - safeBlocks, err := cc.blockchain.GetSafeBlocks() + safeBlocks, err := fc.blockchain.GetSafeBlocks() if err != nil { return errors.Errorf("FastHotStuffConsensus.Start: Error fetching safe blocks: %v", err) } // Fetch the validator set at each safe block - safeBlocksWithValidators, err := cc.fetchValidatorListsForSafeBlocks(safeBlocks) + safeBlocksWithValidators, err := fc.fetchValidatorListsForSafeBlocks(safeBlocks) if err != nil { return errors.Errorf("FastHotStuffConsensus.Start: Error fetching validator lists for safe blocks: %v", err) } // Compute the block production internal and timeout base duration as time.Duration - blockProductionInterval := time.Millisecond * time.Duration(cc.blockProductionIntervalMilliseconds) - timeoutBaseDuration := time.Millisecond * time.Duration(cc.timeoutBaseDurationMilliseconds) + blockProductionInterval := time.Millisecond * time.Duration(fc.blockProductionIntervalMilliseconds) + timeoutBaseDuration := time.Millisecond * time.Duration(fc.timeoutBaseDurationMilliseconds) // Initialize and start the event loop - cc.fastHotStuffEventLoop.Init(blockProductionInterval, timeoutBaseDuration, genesisQC, tipBlockWithValidators[0], safeBlocksWithValidators) - cc.fastHotStuffEventLoop.Start() + fc.fastHotStuffEventLoop.Init(blockProductionInterval, timeoutBaseDuration, genesisQC, tipBlockWithValidators[0], safeBlocksWithValidators) + fc.fastHotStuffEventLoop.Start() return nil } -func (cc *FastHotStuffConsensus) IsRunning() bool { - return cc.fastHotStuffEventLoop.IsRunning() +func (fc *FastHotStuffConsensus) IsRunning() bool { + return fc.fastHotStuffEventLoop.IsRunning() } func (fc *FastHotStuffConsensus) Stop() { @@ -121,24 +121,24 @@ func (fc *FastHotStuffConsensus) Stop() { // HandleLocalBlockProposalEvent is called when FastHotStuffEventLoop has signaled that it can // construct a block at a certain block height. This function validates the block proposal signal, // constructs, processes locally, and then broadcasts the block. -func (cc *FastHotStuffConsensus) HandleLocalBlockProposalEvent(event *consensus.FastHotStuffEvent) error { +func (fc *FastHotStuffConsensus) HandleLocalBlockProposalEvent(event *consensus.FastHotStuffEvent) error { // Hold a read and write lock on the consensus. This is because we need to check // the current view of the consensus event loop, and to update the blockchain. - cc.lock.Lock() - defer cc.lock.Unlock() + fc.lock.Lock() + defer fc.lock.Unlock() - if !cc.fastHotStuffEventLoop.IsRunning() { + if !fc.fastHotStuffEventLoop.IsRunning() { return errors.Errorf("FastHotStuffConsensus.HandleLocalBlockProposalEvent: FastHotStuffEventLoop is not running") } // Hold the blockchain's write lock so that the chain cannot be mutated underneath us. // In practice, this is a no-op, but it guarantees thread-safety in the event that other // parts of the codebase change. - cc.blockchain.ChainLock.Lock() - defer cc.blockchain.ChainLock.Unlock() + fc.blockchain.ChainLock.Lock() + defer fc.blockchain.ChainLock.Unlock() // Handle the event as a block proposal event for a regular block - if err := cc.handleBlockProposalEvent(event, consensus.FastHotStuffEventTypeConstructVoteQC); err != nil { + if err := fc.handleBlockProposalEvent(event, consensus.FastHotStuffEventTypeConstructVoteQC); err != nil { return errors.Wrapf(err, "FastHotStuffConsensus.HandleLocalBlockProposalEvent: ") } @@ -149,24 +149,24 @@ func (cc *FastHotStuffConsensus) HandleLocalBlockProposalEvent(event *consensus. // HandleLocalTimeoutBlockProposalEvent is called when FastHotStuffEventLoop has signaled that it can // construct a timeout block at a certain block height. This function validates the timeout block proposal // signal, constructs, processes locally, and then broadcasts the block. -func (cc *FastHotStuffConsensus) HandleLocalTimeoutBlockProposalEvent(event *consensus.FastHotStuffEvent) error { +func (fc *FastHotStuffConsensus) HandleLocalTimeoutBlockProposalEvent(event *consensus.FastHotStuffEvent) error { // Hold a read and write lock on the consensus. This is because we need to check // the current view of the consensus event loop, and to update the blockchain. - cc.lock.Lock() - defer cc.lock.Unlock() + fc.lock.Lock() + defer fc.lock.Unlock() - if !cc.fastHotStuffEventLoop.IsRunning() { + if !fc.fastHotStuffEventLoop.IsRunning() { return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: FastHotStuffEventLoop is not running") } // Hold the blockchain's write lock so that the chain cannot be mutated underneath us. // In practice, this is a no-op, but it guarantees thread-safety in the event that other // parts of the codebase change. - cc.blockchain.ChainLock.Lock() - defer cc.blockchain.ChainLock.Unlock() + fc.blockchain.ChainLock.Lock() + defer fc.blockchain.ChainLock.Unlock() // Handle the event as a block proposal event for a timeout block - if err := cc.handleBlockProposalEvent(event, consensus.FastHotStuffEventTypeConstructTimeoutQC); err != nil { + if err := fc.handleBlockProposalEvent(event, consensus.FastHotStuffEventTypeConstructTimeoutQC); err != nil { return errors.Wrapf(err, "FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: ") } @@ -187,7 +187,7 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutBlockProposalEvent(event *con // - This will connect the block to the blockchain, remove the transactions from the // mempool, and process the vote in the FastHotStuffEventLoop // 6. Broadcast the block to the network -func (cc *FastHotStuffConsensus) handleBlockProposalEvent( +func (fc *FastHotStuffConsensus) handleBlockProposalEvent( event *consensus.FastHotStuffEvent, expectedEventType consensus.FastHotStuffEventType, ) error { @@ -203,7 +203,7 @@ func (cc *FastHotStuffConsensus) handleBlockProposalEvent( // Fetch the parent block parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) - parentBlock, parentBlockExists := cc.blockchain.blockIndexByHash[*parentBlockHash] + parentBlock, parentBlockExists := fc.blockchain.blockIndexByHash[*parentBlockHash] if !parentBlockExists { return errors.Errorf("Error fetching parent block: %v", parentBlockHash) } @@ -229,7 +229,7 @@ func (cc *FastHotStuffConsensus) handleBlockProposalEvent( // Validate that the event's view is not stale. If the view is stale, then it means that the consensus // has advanced to the next view after queuing this block proposal event. This is normal and an expected // race condition in the steady-state. - currentView := cc.fastHotStuffEventLoop.GetCurrentView() + currentView := fc.fastHotStuffEventLoop.GetCurrentView() if currentView > event.View { return errors.Errorf( "Error constructing block at height %d. Stale view %d", @@ -245,13 +245,13 @@ func (cc *FastHotStuffConsensus) handleBlockProposalEvent( } // Compute the next proposer random seed signature - proposerRandomSeedSignature, err := cc.signer.SignRandomSeedHash(parentBlockRandomSeedHash) + proposerRandomSeedSignature, err := fc.signer.SignRandomSeedHash(parentBlockRandomSeedHash) if err != nil { return errors.Wrapf(err, "Error signing random seed hash for block at height %d: ", event.TipBlockHeight+1) } // Construct the unsigned block - blockProposal, err := cc.produceUnsignedBlockForBlockProposalEvent(event, proposerRandomSeedSignature) + blockProposal, err := fc.produceUnsignedBlockForBlockProposalEvent(event, proposerRandomSeedSignature) if err != nil { return errors.Wrapf(err, "Error producing unsigned block for proposal at height %d", event.TipBlockHeight+1) } @@ -261,13 +261,13 @@ func (cc *FastHotStuffConsensus) handleBlockProposalEvent( if err != nil { return errors.Errorf("Error hashing block: %v", err) } - blockProposal.Header.ProposerVotePartialSignature, err = cc.signer.SignBlockProposal(blockProposal.Header.ProposedInView, blockHash) + blockProposal.Header.ProposerVotePartialSignature, err = fc.signer.SignBlockProposal(blockProposal.Header.ProposedInView, blockHash) if err != nil { return errors.Errorf("Error signing block: %v", err) } // Process the block locally - missingBlockHashes, err := cc.tryProcessBlockAsNewTip(blockProposal) + missingBlockHashes, err := fc.tryProcessBlockAsNewTip(blockProposal) if err != nil { return errors.Errorf("Error processing block locally: %v", err) } @@ -283,7 +283,7 @@ func (cc *FastHotStuffConsensus) handleBlockProposalEvent( // TODO: Broadcast the block proposal to the network - cc.logBlockProposal(blockProposal, blockHash) + fc.logBlockProposal(blockProposal, blockHash) return nil } @@ -296,13 +296,13 @@ func (cc *FastHotStuffConsensus) handleBlockProposalEvent( // 2. Construct the vote message // 3. Process the vote in the consensus module // 4. Broadcast the vote msg to the network -func (cc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotStuffEvent) error { +func (fc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotStuffEvent) error { // Hold a read lock on the consensus. This is because we need to check the // current view and block height of the consensus module. - cc.lock.Lock() - defer cc.lock.Unlock() + fc.lock.Lock() + defer fc.lock.Unlock() - if !cc.fastHotStuffEventLoop.IsRunning() { + if !fc.fastHotStuffEventLoop.IsRunning() { return errors.Errorf("FastHotStuffConsensus.HandleLocalVoteEvent: FastHotStuffEventLoop is not running") } @@ -326,20 +326,20 @@ func (cc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotSt voteMsg := NewMessage(MsgTypeValidatorVote).(*MsgDeSoValidatorVote) voteMsg.MsgVersion = MsgValidatorVoteVersion0 voteMsg.ProposedInView = event.View - voteMsg.VotingPublicKey = cc.signer.GetPublicKey() + voteMsg.VotingPublicKey = fc.signer.GetPublicKey() // Get the block hash voteMsg.BlockHash = BlockHashFromConsensusInterface(event.TipBlockHash) // Sign the vote message - voteMsg.VotePartialSignature, err = cc.signer.SignValidatorVote(event.View, event.TipBlockHash) + voteMsg.VotePartialSignature, err = fc.signer.SignValidatorVote(event.View, event.TipBlockHash) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. return errors.Errorf("FastHotStuffConsensus.HandleLocalVoteEvent: Error signing validator vote: %v", err) } // Process the vote message locally in the FastHotStuffEventLoop - if err := cc.fastHotStuffEventLoop.ProcessValidatorVote(voteMsg); err != nil { + if err := fc.fastHotStuffEventLoop.ProcessValidatorVote(voteMsg); err != nil { // If we can't process the vote locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. Something is very wrong. We should not // broadcast it to the network. @@ -354,12 +354,12 @@ func (cc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotSt // HandleValidatorVote is called when we receive a validator vote message from a peer. This function processes // the vote locally in the FastHotStuffEventLoop. -func (cc *FastHotStuffConsensus) HandleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) error { +func (fc *FastHotStuffConsensus) HandleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) error { // No need to hold a lock on the consensus because this function is a pass-through // for the FastHotStuffEventLoop which guarantees thread-safety for its callers // Process the vote message locally in the FastHotStuffEventLoop - if err := cc.fastHotStuffEventLoop.ProcessValidatorVote(msg); err != nil { + if err := fc.fastHotStuffEventLoop.ProcessValidatorVote(msg); err != nil { // If we can't process the vote locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorVote: Error processing vote: ") @@ -378,21 +378,21 @@ func (cc *FastHotStuffConsensus) HandleValidatorVote(pp *Peer, msg *MsgDeSoValid // 2. Construct the timeout message // 3. Process the timeout in the consensus module // 4. Broadcast the timeout msg to the network -func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHotStuffEvent) error { +func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHotStuffEvent) error { // Hold a read lock on the consensus. This is because we need to check the // current view and block height of the consensus module. - cc.lock.Lock() - defer cc.lock.Unlock() + fc.lock.Lock() + defer fc.lock.Unlock() - if !cc.fastHotStuffEventLoop.IsRunning() { + if !fc.fastHotStuffEventLoop.IsRunning() { return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: FastHotStuffEventLoop is not running") } // Hold the blockchain's write lock so that the chain cannot be mutated underneath us. // In practice, this is a no-op, but it guarantees thread-safety in the event that other // parts of the codebase change. - cc.blockchain.ChainLock.RLock() - defer cc.blockchain.ChainLock.RUnlock() + fc.blockchain.ChainLock.RLock() + defer fc.blockchain.ChainLock.RUnlock() var err error @@ -401,7 +401,7 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Received improperly formed timeout event: %v", event) } - if event.View != cc.fastHotStuffEventLoop.GetCurrentView() { + if event.View != fc.fastHotStuffEventLoop.GetCurrentView() { // It's possible that the event loop signaled to timeout, but at the same time, we // received a block proposal from the network and advanced the view. This is normal // and an expected race condition in the steady-state. @@ -413,7 +413,7 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo // Locally advance the event loop's view so that the node is locally running the Fast-HotStuff // protocol correctly. Any errors below related to broadcasting the timeout message should not // affect the correctness of the protocol's local execution. - if _, err := cc.fastHotStuffEventLoop.AdvanceViewOnTimeout(); err != nil { + if _, err := fc.fastHotStuffEventLoop.AdvanceViewOnTimeout(); err != nil { // This should never happen as long as the event loop is running. If it happens, we return // the error and let the caller handle it. return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error advancing view on timeout: %v", err) @@ -423,7 +423,7 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo tipBlockHash := BlockHashFromConsensusInterface(event.TipBlockHash) // Fetch the HighQC from the Blockchain struct - tipBlockNode, tipBlockExists := cc.blockchain.blockIndexByHash[*tipBlockHash] + tipBlockNode, tipBlockExists := fc.blockchain.blockIndexByHash[*tipBlockHash] if !tipBlockExists { return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error fetching tip block: %v", tipBlockHash) } @@ -432,12 +432,12 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo timeoutMsg := NewMessage(MsgTypeValidatorTimeout).(*MsgDeSoValidatorTimeout) timeoutMsg.MsgVersion = MsgValidatorTimeoutVersion0 timeoutMsg.TimedOutView = event.View - timeoutMsg.VotingPublicKey = cc.signer.GetPublicKey() + timeoutMsg.VotingPublicKey = fc.signer.GetPublicKey() - if cc.params.IsFinalPoWBlockHeight(tipBlockNode.Header.Height) { + if fc.params.IsFinalPoWBlockHeight(tipBlockNode.Header.Height) { // If the tip block is the final block of the PoW chain, then we can use the PoS chain's genesis block // as the highQC for it. - if timeoutMsg.HighQC, err = cc.blockchain.GetProofOfStakeGenesisQuorumCertificate(); err != nil { + if timeoutMsg.HighQC, err = fc.blockchain.GetProofOfStakeGenesisQuorumCertificate(); err != nil { return errors.Errorf("FastHotStuffConsensus.Start: Error creating PoS cutover genesis QC: %v", err) } } else { @@ -446,14 +446,14 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo } // Sign the timeout message - timeoutMsg.TimeoutPartialSignature, err = cc.signer.SignValidatorTimeout(event.View, timeoutMsg.HighQC.GetView()) + timeoutMsg.TimeoutPartialSignature, err = fc.signer.SignValidatorTimeout(event.View, timeoutMsg.HighQC.GetView()) if err != nil { // This should never happen as long as the BLS signer is initialized correctly. return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error signing validator timeout: %v", err) } // Process the timeout message locally in the FastHotStuffEventLoop - if err := cc.fastHotStuffEventLoop.ProcessValidatorTimeout(timeoutMsg); err != nil { + if err := fc.fastHotStuffEventLoop.ProcessValidatorTimeout(timeoutMsg); err != nil { // This should never happen. If we error here, it means that the timeout message is stale // beyond the committed tip, the timeout message is malformed, or the timeout message is // is duplicated for the same view. In any case, something is very wrong. We should not @@ -469,12 +469,12 @@ func (cc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo // HandleValidatorTimeout is called when we receive a validator timeout message from a peer. This function // processes the timeout locally in the FastHotStuffEventLoop. -func (cc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) error { +func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) error { // No need to hold a lock on the consensus because this function is a pass-through // for the FastHotStuffEventLoop which guarantees thread-safety for its callers. // Process the timeout message locally in the FastHotStuffEventLoop - if err := cc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { + if err := fc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { // If we can't process the timeout locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout: ") @@ -484,26 +484,26 @@ func (cc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoVa return nil } -func (cc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { +func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { // Hold a lock on the consensus, because we will need to mutate the Blockchain // and the FastHotStuffEventLoop data structures. - cc.lock.Lock() - defer cc.lock.Unlock() + fc.lock.Lock() + defer fc.lock.Unlock() - if !cc.fastHotStuffEventLoop.IsRunning() { + if !fc.fastHotStuffEventLoop.IsRunning() { return errors.Errorf("FastHotStuffConsensus.HandleBlock: FastHotStuffEventLoop is not running") } // Hold the blockchain's write lock so that the chain cannot be mutated underneath us. // In practice, this is a no-op, but it guarantees thread-safety in the event that other // parts of the codebase change. - cc.blockchain.ChainLock.Lock() - defer cc.blockchain.ChainLock.Unlock() + fc.blockchain.ChainLock.Lock() + defer fc.blockchain.ChainLock.Unlock() // Try to apply the block as the new tip of the blockchain. If the block is an orphan, then // we will get back a list of missing ancestor block hashes. We can fetch the missing blocks // from the network and retry. - missingBlockHashes, err := cc.tryProcessBlockAsNewTip(msg) + missingBlockHashes, err := fc.tryProcessBlockAsNewTip(msg) if err != nil { // If we get an error here, it means something went wrong with the block processing algorithm. // Nothing we can do to recover here. @@ -533,11 +533,11 @@ func (cc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error // // Reference Implementation: // https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L573 -func (cc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]*BlockHash, error) { +func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([]*BlockHash, error) { // Try to apply the block locally as the new tip of the blockchain - successfullyAppliedNewTip, _, missingBlockHashes, err := cc.blockchain.processBlockPoS( + successfullyAppliedNewTip, _, missingBlockHashes, err := fc.blockchain.processBlockPoS( block, // Pass in the block itself - cc.fastHotStuffEventLoop.GetCurrentView(), // Pass in the current view to ensure we don't process a stale block + fc.fastHotStuffEventLoop.GetCurrentView(), // Pass in the current view to ensure we don't process a stale block true, // Make sure we verify signatures in the block ) if err != nil { @@ -556,13 +556,13 @@ func (cc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ // safe blocks // Fetch the safe blocks that are eligible to be extended from by the next incoming tip block - safeBlocks, err := cc.blockchain.GetSafeBlocks() + safeBlocks, err := fc.blockchain.GetSafeBlocks() if err != nil { return nil, errors.Errorf("error fetching safe blocks: %v", err) } // Fetch the validator set at each safe block - safeBlocksWithValidators, err := cc.fetchValidatorListsForSafeBlocks(safeBlocks) + safeBlocksWithValidators, err := fc.fetchValidatorListsForSafeBlocks(safeBlocks) if err != nil { return nil, errors.Errorf("error fetching validator lists for safe blocks: %v", err) } @@ -572,7 +572,7 @@ func (cc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ // it did not result in a new tip. if !successfullyAppliedNewTip { // Update the safe blocks to the FastHotStuffEventLoop - if err = cc.fastHotStuffEventLoop.UpdateSafeBlocks(safeBlocksWithValidators); err != nil { + if err = fc.fastHotStuffEventLoop.UpdateSafeBlocks(safeBlocksWithValidators); err != nil { return nil, errors.Errorf("Error processing safe blocks locally: %v", err) } @@ -586,16 +586,16 @@ func (cc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ // Fetch the new tip from the blockchain. Note: the new tip may or may not be the input block itself. // It's possible that there was a descendant of the tip block that was previously stored as an orphan // in the Blockchain, and was applied as the new tip. - tipBlock := cc.blockchain.BlockTip().Header + tipBlock := fc.blockchain.BlockTip().Header // Fetch the validator set at the new tip block - tipBlockWithValidators, err := cc.fetchValidatorListsForSafeBlocks([]*MsgDeSoHeader{tipBlock}) + tipBlockWithValidators, err := fc.fetchValidatorListsForSafeBlocks([]*MsgDeSoHeader{tipBlock}) if err != nil { return nil, errors.Errorf("Error fetching validator lists for tip block: %v", err) } // Pass the new tip and safe blocks to the FastHotStuffEventLoop - if err = cc.fastHotStuffEventLoop.ProcessTipBlock(tipBlockWithValidators[0], safeBlocksWithValidators); err != nil { + if err = fc.fastHotStuffEventLoop.ProcessTipBlock(tipBlockWithValidators[0], safeBlocksWithValidators); err != nil { return nil, errors.Errorf("Error processing tip block locally: %v", err) } @@ -606,7 +606,7 @@ func (cc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ // produceUnsignedBlockForBlockProposalEvent is a helper function that can produce a new block for proposal based // on Fast-HotStuff block proposal event. This function expects the event to have been pre-validated by the caller. // If the event is malformed or invalid, then the behavior of this function is undefined. -func (cc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( +func (fc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( event *consensus.FastHotStuffEvent, proposerRandomSeedSignature *bls.Signature, ) (*MsgDeSoBlock, error) { @@ -614,20 +614,20 @@ func (cc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) // Fetch the parent block - parentBlock, parentBlockExists := cc.blockchain.blockIndexByHash[*parentBlockHash] + parentBlock, parentBlockExists := fc.blockchain.blockIndexByHash[*parentBlockHash] if !parentBlockExists { return nil, errors.Errorf("Error fetching parent block: %v", parentBlockHash) } // Build a UtxoView at the parent block - utxoViewAtParent, err := cc.blockchain.getUtxoViewAtBlockHash(*parentBlockHash) + utxoViewAtParent, err := fc.blockchain.getUtxoViewAtBlockHash(*parentBlockHash) if err != nil { // This should never happen as long as the parent block is a descendant of the committed tip. return nil, errors.Errorf("Error fetching UtxoView for parent block: %v", parentBlockHash) } // Dynamically create a new block producer at the current block height - blockProducer, err := cc.createBlockProducer(utxoViewAtParent, parentBlock.Header.TstampNanoSecs) + blockProducer, err := fc.createBlockProducer(utxoViewAtParent, parentBlock.Header.TstampNanoSecs) if err != nil { return nil, errors.Errorf("Error creating block producer: %v", err) } @@ -675,7 +675,7 @@ func (cc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( // the current or next epoch after the committed tip, then this function returns an error. Note: it is not possible // for safe blocks to precede the committed tip or to belong to an epoch that is more than one epoch ahead of the // committed tip. -func (cc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgDeSoHeader) ( +func (fc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgDeSoHeader) ( []consensus.BlockWithValidatorList, error, ) { @@ -690,7 +690,7 @@ func (cc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgD // Create a UtxoView for the committed tip block. We will use this to fetch the validator set for // all of the safe blocks. - utxoView, err := NewUtxoView(cc.blockchain.db, cc.params, cc.blockchain.postgres, cc.blockchain.snapshot, nil) + utxoView, err := NewUtxoView(fc.blockchain.db, fc.params, fc.blockchain.postgres, fc.blockchain.snapshot, nil) if err != nil { return nil, errors.Errorf("Error creating UtxoView: %v", err) } From f0c7b5d6a23174b450d0e69c25ef266d548e5954 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Wed, 14 Feb 2024 19:53:29 -0500 Subject: [PATCH 489/762] Fix Constructor Name for NetworkManager --- lib/network_manager.go | 11 ++++++----- lib/server.go | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/network_manager.go b/lib/network_manager.go index 9a83676bc..f3b138c33 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -2,6 +2,11 @@ package lib import ( "fmt" + "net" + "strconv" + "sync" + "time" + "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/bls" @@ -9,10 +14,6 @@ import ( "github.com/deso-protocol/core/consensus" "github.com/golang/glog" "github.com/pkg/errors" - "net" - "strconv" - "sync" - "time" ) // NetworkManager is a structure that oversees all connections to RemoteNodes. NetworkManager has the following @@ -76,7 +77,7 @@ type NetworkManager struct { exitGroup sync.WaitGroup } -func NewConnectionController(params *DeSoParams, cmgr *ConnectionManager, rnManager *RemoteNodeManager, +func NewNetworkManager(params *DeSoParams, cmgr *ConnectionManager, rnManager *RemoteNodeManager, blsKeystore *BLSKeystore, addrMgr *addrmgr.AddrManager, connectIps []string, targetNonValidatorOutboundRemoteNodes uint32, targetNonValidatorInboundRemoteNodes uint32, limitOneInboundConnectionPerIP bool) *NetworkManager { diff --git a/lib/server.go b/lib/server.go index dd13023b2..76017f8a0 100644 --- a/lib/server.go +++ b/lib/server.go @@ -515,7 +515,7 @@ func NewServer( nodeServices |= SFPosValidator } rnManager := NewRemoteNodeManager(srv, _chain, _cmgr, _blsKeystore, _params, _minFeeRateNanosPerKB, nodeServices) - srv.networkManager = NewConnectionController(_params, _cmgr, rnManager, _blsKeystore, _desoAddrMgr, + srv.networkManager = NewNetworkManager(_params, _cmgr, rnManager, _blsKeystore, _desoAddrMgr, _connectIps, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP) if srv.stateChangeSyncer != nil { From 365e30838e722af183b4ca75859b21e27cccfd79 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 15 Feb 2024 11:34:30 -0500 Subject: [PATCH 490/762] Remove Block Timestamp Drift Check From processHeaderPoS (#1038) --- lib/pos_blockchain.go | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index e4715a3ea..0d3093e76 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -145,18 +145,9 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas header, errors.New("validateAndIndexHeaderPoS: Parent header failed validations"), ) } - utxoView, err := bc.getUtxoViewAtBlockHash(*header.PrevBlockHash) - if err != nil { - return nil, false, errors.Wrap(err, "validateAndIndexHeaderPoS: Problem getting UtxoView") - } - // We use the snapshot global params to make the drift timestamp check work properly, - // specifically for orphan blocks. - snapshotGlobalParams, err := utxoView.GetCurrentSnapshotGlobalParamsEntry() - if err != nil { - return nil, false, errors.Wrap(err, "validateAndIndexHeaderPoS: Problem getting snapshot global params") - } + // Verify that the header is properly formed. - if err := bc.isValidBlockHeaderPoS(header, snapshotGlobalParams); err != nil { + if err := bc.isValidBlockHeaderPoS(header); err != nil { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( header, errors.New("validateAndIndexHeaderPoS: Header failed validations"), ) @@ -805,9 +796,9 @@ func (bc *Blockchain) isValidBlockPoS(block *MsgDeSoBlock, snapshotGlobalParams // isValidBlockHeaderPoS performs all basic block header integrity checks. Any // error resulting from this function implies that the block header is invalid. -func (bc *Blockchain) isValidBlockHeaderPoS(header *MsgDeSoHeader, snapshotGlobalParams *GlobalParamsEntry) error { +func (bc *Blockchain) isValidBlockHeaderPoS(header *MsgDeSoHeader) error { // Surface Level validation of the block header - if err := bc.isProperlyFormedBlockHeaderPoS(header, snapshotGlobalParams); err != nil { + if err := bc.isProperlyFormedBlockHeaderPoS(header); err != nil { return err } if err := bc.isBlockTimestampValidRelativeToParentPoS(header); err != nil { @@ -851,10 +842,16 @@ func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock, snapshotGlob } // Make sure the header is properly formed by itself - if err := bc.isProperlyFormedBlockHeaderPoS(block.Header, snapshotGlobalParams); err != nil { + if err := bc.isProperlyFormedBlockHeaderPoS(block.Header); err != nil { return err } + // Timestamp validation. We use the snapshotted global params to validate the timestamp, specifically + // so that the drift timestamp check behaves properly even for orphan blocks. + if block.Header.TstampNanoSecs > time.Now().UnixNano()+snapshotGlobalParams.BlockTimestampDriftNanoSecs { + return RuleErrorPoSBlockTstampNanoSecsInFuture + } + // If the header is properly formed, we can check the rest of the block. // All blocks must have at least one txn @@ -896,8 +893,7 @@ func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock, snapshotGlob // contents alone, and makes sure that all fields are populated in a valid manner. // It does not verify signatures in the header, nor cross-validate the block with // past blocks in the block index. -func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader, snapshotGlobalParams *GlobalParamsEntry, -) error { +func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader) error { // First make sure we have a non-nil header if header == nil { return RuleErrorNilBlockHeader @@ -908,12 +904,6 @@ func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader, snap return RuleErrorNilPrevBlockHash } - // Timestamp validation. We use the snapshotted global params to validate the timestamp, specifically - // so that the drift timestamp check behaves properly even for orphan blocks. - if header.TstampNanoSecs > time.Now().UnixNano()+snapshotGlobalParams.BlockTimestampDriftNanoSecs { - return RuleErrorPoSBlockTstampNanoSecsInFuture - } - // Header validation if header.Version != HeaderVersion2 { return RuleErrorInvalidPoSBlockHeaderVersion From b6311d3b7d1e110003f9b8eff44eb7856a24c6fe Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 15 Feb 2024 16:55:26 -0500 Subject: [PATCH 491/762] CalcBlockRewardNanos returns 0 if block height is a PoS block height (#1039) --- lib/block_producer.go | 14 ++++++++++---- lib/block_view.go | 2 +- lib/block_view_test.go | 2 +- lib/blockchain_test.go | 10 ++++------ lib/miner.go | 2 +- lib/supply.go | 6 +++++- lib/supply_test.go | 41 ++++++++++++++++++----------------------- 7 files changed, 40 insertions(+), 37 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index e968d5463..a4763abf6 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -330,7 +330,8 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) // Now that the total fees have been computed, set the value of the block reward // output. - blockRewardOutput.AmountNanos = CalcBlockRewardNanos(uint32(blockRet.Header.Height)) + totalFeeNanos + blockRewardOutput.AmountNanos = CalcBlockRewardNanos(uint32(blockRet.Header.Height), desoBlockProducer.params) + + totalFeeNanos // Compute the merkle root for the block now that all of the transactions have // been added. @@ -434,7 +435,11 @@ func (desoBlockProducer *DeSoBlockProducer) AddBlockTemplate(block *MsgDeSoBlock } } -func RecomputeBlockRewardWithBlockRewardOutputPublicKey(block *MsgDeSoBlock, blockRewardOutputPublicKeyBytes []byte) (*MsgDeSoBlock, error) { +func RecomputeBlockRewardWithBlockRewardOutputPublicKey( + block *MsgDeSoBlock, + blockRewardOutputPublicKeyBytes []byte, + params *DeSoParams, +) (*MsgDeSoBlock, error) { blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutputPublicKeyBytes, btcec.S256()) if err != nil { return nil, errors.Wrap( @@ -458,7 +463,7 @@ func RecomputeBlockRewardWithBlockRewardOutputPublicKey(block *MsgDeSoBlock, blo } } } - block.Txns[0].TxOutputs[0].AmountNanos = CalcBlockRewardNanos(uint32(block.Header.Height)) + totalFees + block.Txns[0].TxOutputs[0].AmountNanos = CalcBlockRewardNanos(uint32(block.Header.Height), params) + totalFees return block, nil } @@ -490,7 +495,8 @@ func (blockProducer *DeSoBlockProducer) GetHeadersAndExtraDatas( // Swap out the public key in the block latestBLockCopy.Txns[0].TxOutputs[0].PublicKey = publicKeyBytes - latestBLockCopy, err = RecomputeBlockRewardWithBlockRewardOutputPublicKey(latestBLockCopy, publicKeyBytes) + latestBLockCopy, err = RecomputeBlockRewardWithBlockRewardOutputPublicKey( + latestBLockCopy, publicKeyBytes, blockProducer.params) if err != nil { return "", nil, nil, nil, errors.Wrap( fmt.Errorf("GetBlockTemplate: Problem recomputing block reward: %v", err), "") diff --git a/lib/block_view.go b/lib/block_view.go index f3f352706..b24641777 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4273,7 +4273,7 @@ func (bav *UtxoView) ConnectBlock( } // Verify that the block reward does not overflow when added to // the block's fees. - blockReward := CalcBlockRewardNanos(uint32(blockHeader.Height)) + blockReward := CalcBlockRewardNanos(uint32(blockHeader.Height), bav.Params) if totalFees > MaxNanos || blockReward > (math.MaxUint64-totalFees) { diff --git a/lib/block_view_test.go b/lib/block_view_test.go index ee52947bf..7d911a215 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -1657,7 +1657,7 @@ func TestBasicTransfer(t *testing.T) { } // A block with too much block reward should fail. - allowedBlockReward := CalcBlockRewardNanos(chain.blockTip().Height) + allowedBlockReward := CalcBlockRewardNanos(chain.blockTip().Height, params) assert.Equal(int64(allowedBlockReward), int64(1*NanosPerUnit)) blockToMine, _, _, err := miner._getBlockToMine(0 /*threadIndex*/) require.NoError(err) diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 6d384d5ec..90d7adbe4 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -995,8 +995,7 @@ func TestAddInputsAndChangeToTransaction(t *testing.T) { _ = assert _ = require - chain, _, db := NewLowDifficultyBlockchain(t) - _ = db + chain, params, _ := NewLowDifficultyBlockchain(t) _, _, blockB1, blockB2, blockB3, _, _ := getForkedChain(t) @@ -1038,7 +1037,7 @@ func TestAddInputsAndChangeToTransaction(t *testing.T) { } // Save the block reward in the first block to use it for testing. - firstBlockReward := CalcBlockRewardNanos(1) + firstBlockReward := CalcBlockRewardNanos(1, params) // Connect a block. The sender address should have mined some DeSo but // it should be unspendable until the block after this one. See @@ -1111,13 +1110,12 @@ func TestValidateBasicTransfer(t *testing.T) { _ = assert _ = require - chain, _, db := NewLowDifficultyBlockchain(t) - _ = db + chain, params, _ := NewLowDifficultyBlockchain(t) _, _, blockB1, blockB2, _, _, _ := getForkedChain(t) // Save the block reward in the first block to use it for testing. - firstBlockReward := CalcBlockRewardNanos(1) + firstBlockReward := CalcBlockRewardNanos(1, params) // Connect a block. The sender address should have mined some DeSo but // it should be unspendable until the block after this one. See diff --git a/lib/miner.go b/lib/miner.go index 0590facbb..06272da42 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -166,7 +166,7 @@ func (desoMiner *DeSoMiner) _mineSingleBlock(threadIndex uint32) (_diffTarget *B // the header we were just mining on. blockToMine.Txns[0].TxOutputs[0].PublicKey = publicKey blockToMine.Txns[0].TxnMeta.(*BlockRewardMetadataa).ExtraData = UintToBuf(extraNonces[0]) - blockToMine, err = RecomputeBlockRewardWithBlockRewardOutputPublicKey(blockToMine, publicKey) + blockToMine, err = RecomputeBlockRewardWithBlockRewardOutputPublicKey(blockToMine, publicKey, desoMiner.params) if err != nil { glog.Errorf("DeSoMiner._startThread: Error recomputing block reward: %v", err) time.Sleep(1 * time.Second) diff --git a/lib/supply.go b/lib/supply.go index b662f357e..9c6f2630f 100644 --- a/lib/supply.go +++ b/lib/supply.go @@ -93,11 +93,15 @@ var ( ) // CalcBlockRewardNanos computes the block reward for a given block height. -func CalcBlockRewardNanos(blockHeight uint32) uint64 { +func CalcBlockRewardNanos(blockHeight uint32, params *DeSoParams) uint64 { if blockHeight == 0 { return MiningSupplyIntervals[0].BlockRewardNanos } + if params.IsPoSBlockHeight(uint64(blockHeight)) { + return 0 + } + // Skip the first interval since we know we're past block height zero. for intervalIndex, intervalStart := range MiningSupplyIntervals { if intervalIndex == 0 { diff --git a/lib/supply_test.go b/lib/supply_test.go index 7f282fd51..e3ae7a022 100644 --- a/lib/supply_test.go +++ b/lib/supply_test.go @@ -39,36 +39,31 @@ func TestCalcBlockReward(t *testing.T) { blocksPerYear := (time.Hour * 24 * 365 / DeSoMainnetParams.TimeBetweenBlocks) require.Equal(int64(blocksPerYear), int64(BlocksPerYear)) + setPoSBlockHeights(t, 1, 10*BlocksPerYear) - require.Equal(1*NanosPerUnit, CalcBlockRewardNanos(0)) - require.Equal(1*NanosPerUnit, CalcBlockRewardNanos(1)) + require.Equal(1*NanosPerUnit, CalcBlockRewardNanos(0, &GlobalDeSoParams)) + require.Equal(1*NanosPerUnit, CalcBlockRewardNanos(1, &GlobalDeSoParams)) // .75 - require.Equal(1*NanosPerUnit, CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight-1)) - require.Equal(int64(float64(NanosPerUnit)*.75), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight))) + require.Equal(1*NanosPerUnit, CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight-1, &GlobalDeSoParams)) + require.Equal(int64(float64(NanosPerUnit)*.75), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight, &GlobalDeSoParams))) // .5 - require.Equal(int64(float64(NanosPerUnit)*.75), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+288-1))) - require.Equal(int64(float64(NanosPerUnit)*.5), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+288))) + require.Equal(int64(float64(NanosPerUnit)*.75), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+288-1, &GlobalDeSoParams))) + require.Equal(int64(float64(NanosPerUnit)*.5), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+288, &GlobalDeSoParams))) // .25 - require.Equal(int64(float64(NanosPerUnit)*.5), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+2*288-1))) - require.Equal(int64(float64(NanosPerUnit)*.25), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+2*288))) + require.Equal(int64(float64(NanosPerUnit)*.5), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+2*288-1, &GlobalDeSoParams))) + require.Equal(int64(float64(NanosPerUnit)*.25), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+2*288, &GlobalDeSoParams))) // .125 - require.Equal(int64(float64(NanosPerUnit)*.25), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+3*288-1))) - require.Equal(int64(float64(NanosPerUnit)*.125), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+3*288))) + require.Equal(int64(float64(NanosPerUnit)*.25), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+3*288-1, &GlobalDeSoParams))) + require.Equal(int64(float64(NanosPerUnit)*.125), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+3*288, &GlobalDeSoParams))) // .1 - require.Equal(int64(float64(NanosPerUnit)*.125), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+4*288-1))) - require.Equal(int64(float64(NanosPerUnit)*.1), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+4*288))) - - // .05 - require.Equal(int64(1*NanosPerUnit/10), int64(CalcBlockRewardNanos(15*BlocksPerYear-1))) - require.Equal(NanosPerUnit/20, CalcBlockRewardNanos(15*BlocksPerYear)) - require.Equal(NanosPerUnit/20, CalcBlockRewardNanos(15*BlocksPerYear+1)) - // 0 - require.Equal(NanosPerUnit/20, CalcBlockRewardNanos(32*BlocksPerYear-1)) - require.Equal(uint64(0), CalcBlockRewardNanos(32*BlocksPerYear)) - require.Equal(uint64(0), CalcBlockRewardNanos(32*BlocksPerYear+1)) - require.Equal(uint64(0), CalcBlockRewardNanos(35*BlocksPerYear+1)) - require.Equal(uint64(0), CalcBlockRewardNanos(math.MaxUint32)) + require.Equal(int64(float64(NanosPerUnit)*.125), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+4*288-1, &GlobalDeSoParams))) + require.Equal(int64(float64(NanosPerUnit)*.1), int64(CalcBlockRewardNanos(DeflationBombBlockRewardAdjustmentBlockHeight+4*288, &GlobalDeSoParams))) + + // Once we move over to PoS, the block reward should be zero. + require.Equal(uint64(0), CalcBlockRewardNanos(10*BlocksPerYear, &GlobalDeSoParams)) + require.Equal(uint64(0), CalcBlockRewardNanos(10*BlocksPerYear+1, &GlobalDeSoParams)) + require.Equal(uint64(0), CalcBlockRewardNanos(math.MaxUint32, &GlobalDeSoParams)) } func TestGetPrice(t *testing.T) { From 7e3ea47cbad4174c3a9bb958f542464851bed4f1 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 15 Feb 2024 16:56:40 -0500 Subject: [PATCH 492/762] Add new blockchain param for max tip age for pos (#1041) --- lib/blockchain.go | 11 +++++++++-- lib/constants.go | 19 ++++++++++++++----- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 0d2caf9a8..3a8b9484c 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1200,6 +1200,13 @@ func (bc *Blockchain) isTipMaxed(tip *BlockNode) bool { return false } +func (bc *Blockchain) getMaxTipAge(tip *BlockNode) time.Duration { + if bc.params.IsPoSBlockHeight(uint64(tip.Height)) { + return bc.params.MaxTipAgePoS + } + return bc.params.MaxTipAgePoW +} + func (bc *Blockchain) isTipCurrent(tip *BlockNode) bool { if bc.MaxSyncBlockHeight > 0 { return tip.Height >= bc.MaxSyncBlockHeight @@ -1217,8 +1224,8 @@ func (bc *Blockchain) isTipCurrent(tip *BlockNode) bool { // Not current if the tip has a timestamp older than the maximum // tip age. - tipTime := time.Unix(int64(tip.Header.GetTstampSecs()), 0) - oldestAllowedTipTime := bc.timeSource.AdjustedTime().Add(-1 * bc.params.MaxTipAge) + tipTime := time.Unix(tip.Header.GetTstampSecs(), 0) + oldestAllowedTipTime := bc.timeSource.AdjustedTime().Add(-1 * bc.getMaxTipAge(tip)) return !tipTime.Before(oldestAllowedTipTime) } diff --git a/lib/constants.go b/lib/constants.go index b6b46daf3..5494dfb4e 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -579,9 +579,15 @@ type DeSoParams struct { // disk-fill attacks, among other things. MinChainWorkHex string - // This is used for determining whether we are still in initial block download. + // This is used for determining whether we are still in initial block download + // when the chain is running PoW. // If our tip is older than this, we continue with IBD. - MaxTipAge time.Duration + MaxTipAgePoW time.Duration + + // This is used for determining whether we are still in initial block download + // when the chain is running PoS. + // If our tip is older than this, we continue with initial block download. + MaxTipAgePoS time.Duration // Do not allow the difficulty to change by more than a factor of this // variable during each adjustment period. @@ -813,7 +819,8 @@ func (params *DeSoParams) EnableRegtest() { params.TimeBetweenBlocks = 2 * time.Second params.TimeBetweenDifficultyRetargets = 6 * time.Second // Make sure we don't care about blockchain tip age. - params.MaxTipAge = 1000000 * time.Hour + params.MaxTipAgePoW = 1000000 * time.Hour + params.MaxTipAgePoS = 1000000 * time.Hour // Allow block rewards to be spent instantly params.BlockRewardMaturity = 0 @@ -1009,7 +1016,8 @@ var DeSoMainnetParams = DeSoParams{ // Run with --v=2 and look for "cum work" output from miner.go MinChainWorkHex: "000000000000000000000000000000000000000000000000006314f9a85a949b", - MaxTipAge: 24 * time.Hour, + MaxTipAgePoW: 24 * time.Hour, + MaxTipAgePoS: time.Hour, // =================================================================================== // Mainnet Bitcoin config @@ -1359,7 +1367,8 @@ var DeSoTestnetParams = DeSoParams{ // TODO: Set to one day when we launch the testnet. In the meantime this value // is more useful for local testing. - MaxTipAge: time.Hour * 24, + MaxTipAgePoW: time.Hour * 24, + MaxTipAgePoS: time.Hour, // Difficulty can't decrease to below 50% of its previous value or increase // to above 200% of its previous value. From c5ad2fc06b4a60b977e925ac005c4cbc2a567efb Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 15 Feb 2024 18:01:26 -0500 Subject: [PATCH 493/762] Mark PoS Blocks as STORED If Block Timestamp Drift Check Fails (#1044) * Mark PoS Blocks as STORED If Block Timestamp Drift Check Fails * Better comment * Address Nina's feedback * Cleanup --- lib/pos_blockchain.go | 106 ++++++++++++++++++++++---------- lib/pos_blockchain_test.go | 109 +++++++++++++++++++++------------ lib/pos_epoch_complete_hook.go | 38 ++++++++++++ 3 files changed, 180 insertions(+), 73 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 0d3093e76..2a7c9a263 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -260,8 +260,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v return false, false, nil, errors.New("processBlockPoS: Block failed spam prevention check") } - // TODO: Is there any error that would require special handling? If that's the case, we should - // probably push that logic in validateAndIndexBlockPoS anyway. + // Validate the block and store it in the block index. The block is guaranteed to not be an orphan. blockNode, err := bc.validateAndIndexBlockPoS(block) if err != nil { return false, false, nil, errors.Wrap(err, @@ -371,7 +370,6 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { return errors.Wrap(err, "processOrphanBlockPoS: Problem getting current epoch entry") } var validatorsByStake []*ValidatorEntry - var snapshotGlobalParams *GlobalParamsEntry // If the block is in a previous or future epoch, we need to compute the // proper validator set for the block. We do this by computing the prev/next // epoch entry and then fetching the validator set at the snapshot of the @@ -465,13 +463,6 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { "processOrphanBlockPoS: Problem getting validator set at snapshot at epoch number %d", epochEntrySnapshotAtEpochNumber) } - // Get the snapshot global params based on the snapshot at epoch number for this orphan block. - snapshotGlobalParams, err = utxoView.GetSnapshotGlobalParamsEntryByEpochNumber(epochEntrySnapshotAtEpochNumber) - if err != nil { - return errors.Wrapf(err, - "processOrphanBlockPoS: Problem getting snapshot global params at snapshot at epoch number %d", - epochEntrySnapshotAtEpochNumber) - } } else { // This block is in the current epoch! // First we validate the proposer vote partial signature @@ -509,11 +500,6 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { if err != nil { return errors.Wrap(err, "processOrphanBlockPoS: Problem getting validator set") } - // Get the snapshot global params based on the current snapshot epoch number. - snapshotGlobalParams, err = utxoView.GetCurrentSnapshotGlobalParamsEntry() - if err != nil { - return errors.Wrap(err, "processOrphanBlockPoS: Problem getting snapshot global params") - } } // Okay now we have the validator set ordered by stake, we can validate the QC. if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { @@ -526,7 +512,7 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { } // All blocks should pass the basic integrity validations, which ensure the block // is not malformed. If the block is malformed, we should store it as ValidateFailed. - if err = bc.isProperlyFormedBlockPoS(block, snapshotGlobalParams); err != nil { + if err = bc.isProperlyFormedBlockPoS(block); err != nil { if _, innerErr := bc.storeValidateFailedBlockInBlockIndex(block); innerErr != nil { return errors.Wrapf(innerErr, "processOrphanBlockPoS: Problem adding validate failed block to block index: %v", err) @@ -628,7 +614,8 @@ func (bc *Blockchain) validateLeaderAndQC(block *MsgDeSoBlock) (_passedSpamPreve // block. // // The recursive function's invariant is described as follows: -// - Base case: If block is VALIDATED or VALIDATE_FAILED, return the BlockNode as-is. +// - Base case: If block is VALIDATED or VALIDATE_FAILED, return the BlockNode as-is. If the block is STORED and +// has a timestamp too far in the future, we also return the BlockNode as-is. // - Recursive case: If the block is not VALIDATED or VALIDATE_FAILED in the blockIndexByHash, we will perform all // validations and add the block to the block index with the appropriate status (VALIDATED OR VALIDATE_FAILED) and // return the new BlockNode. @@ -646,6 +633,19 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, return blockNode, nil } + // Base case - Check if the block has already been stored and fails the timestamp drift check. + // If it fails the check, then we leave it as stored and return early. + if exists && blockNode.IsStored() { + // If the block is too far in the future, we leave it as STORED and return early. + failsTimestampDriftCheck, err := bc.isBlockTimestampTooFarInFuturePoS(block.Header) + if err != nil { + return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem checking block timestamp") + } + if failsTimestampDriftCheck { + return blockNode, nil + } + } + // Run the validation for the parent and update the block index with the parent's status. We first // check if the parent has a cached status. If so, we use the cached status. Otherwise, we run // the full validation algorithm on it, then index it and use the result. @@ -696,13 +696,8 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem getting UtxoView") } - snapshotGlobalParams, err := utxoView.GetCurrentSnapshotGlobalParamsEntry() - if err != nil { - return nil, errors.Wrap(err, "validateAndIndexBlockPoS: Problem getting snapshot global params") - } - // Check if the block is properly formed and passes all basic validations. - if err = bc.isValidBlockPoS(block, snapshotGlobalParams); err != nil { + if err = bc.isValidBlockPoS(block); err != nil { return bc.storeValidateFailedBlockWithWrappedError(block, err) } @@ -717,6 +712,15 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, return bc.storeValidateFailedBlockWithWrappedError(block, err) } + // If the block is too far in the future, we leave it as STORED and return early. + failsTimestampDriftCheck, err := bc.isBlockTimestampTooFarInFuturePoS(block.Header) + if err != nil { + return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem checking block timestamp") + } + if failsTimestampDriftCheck { + return bc.storeBlockInBlockIndex(block) + } + // We can now add this block to the block index since we have performed all basic validations. blockNode, err = bc.storeValidatedBlockInBlockIndex(block) if err != nil { @@ -775,9 +779,9 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS(blockHash *BlockHash) (* // isValidBlockPoS performs all basic block integrity checks. Any error // resulting from this function implies that the block is invalid. -func (bc *Blockchain) isValidBlockPoS(block *MsgDeSoBlock, snapshotGlobalParams *GlobalParamsEntry) error { +func (bc *Blockchain) isValidBlockPoS(block *MsgDeSoBlock) error { // Surface Level validation of the block - if err := bc.isProperlyFormedBlockPoS(block, snapshotGlobalParams); err != nil { + if err := bc.isProperlyFormedBlockPoS(block); err != nil { return err } if err := bc.isBlockTimestampValidRelativeToParentPoS(block.Header); err != nil { @@ -831,11 +835,53 @@ func (bc *Blockchain) isBlockTimestampValidRelativeToParentPoS(header *MsgDeSoHe return nil } +// isBlockTimestampTooFarInFuturePoS validates that the block's timestamp is not too far in the future based +// on the configured block timestamp drift. +// +// We use the snapshotted global params to validate that the block's timestamp isn't too far ahead in the +// future. We use the snapshotted global params specifically so that the drift timestamp check behaves +// consistently even for orphan blocks that are 1 epoch in the future.. +func (bc *Blockchain) isBlockTimestampTooFarInFuturePoS(header *MsgDeSoHeader) (bool, error) { + // If the block's timestamp is lower than the current time, then there's no reason to check for + // timestamp drift. The check is guaranteed to pass. + currentTstampNanoSecs := time.Now().UnixNano() + if header.TstampNanoSecs <= currentTstampNanoSecs { + return false, nil + } + + // We use NewUtxoView here, which generates a UtxoView at the current committed tip. We can use the view + // to fetch the snapshot global params for the previous epoch, current epoch, and next epoch. As long as + // the block's height is within 3600 blocks of the committed tip, this will always work. In practice, + // the incoming block never be more than 3600 blocks behind or ahead of the tip, while also failing the + // above header.TstampNanoSecs <= currentTstampNanoSecs check. + utxoView, err := NewUtxoView(bc.db, bc.params, nil, bc.snapshot, nil) + if err != nil { + return false, errors.Wrap(err, "isBlockTimestampTooFarInFuturePoS: Problem initializing UtxoView") + } + + simulatedEpochEntryForBlock, err := utxoView.SimulateAdjacentEpochEntryForBlockHeight(header.Height) + if err != nil { + return false, errors.Wrapf(err, "isBlockTimestampTooFarInFuturePoS: Problem simulating epoch entry") + } + + snapshotEpochNumber, err := utxoView.ComputeSnapshotEpochNumberForEpoch(simulatedEpochEntryForBlock.EpochNumber) + if err != nil { + return false, errors.Wrapf(err, "isBlockTimestampTooFarInFuturePoS: Problem getting snapshot epoch number for epoch #%d", + simulatedEpochEntryForBlock.EpochNumber) + } + + snapshotGlobalParams, err := utxoView.GetSnapshotGlobalParamsEntryByEpochNumber(snapshotEpochNumber) + if err != nil { + return false, errors.Wrapf(err, "isBlockTimestampTooFarInFuturePoS: Problem getting snapshot global params") + } + + return header.TstampNanoSecs > time.Now().UnixNano()+snapshotGlobalParams.BlockTimestampDriftNanoSecs, nil +} + // isProperlyFormedBlockPoS validates the block at a surface level and makes // sure that all fields are populated in a valid manner. It does not verify // signatures nor validate the blockchain state resulting from the block. -func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock, snapshotGlobalParams *GlobalParamsEntry, -) error { +func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock) error { // First, make sure we have a non-nil block if block == nil { return RuleErrorNilBlock @@ -846,12 +892,6 @@ func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock, snapshotGlob return err } - // Timestamp validation. We use the snapshotted global params to validate the timestamp, specifically - // so that the drift timestamp check behaves properly even for orphan blocks. - if block.Header.TstampNanoSecs > time.Now().UnixNano()+snapshotGlobalParams.BlockTimestampDriftNanoSecs { - return RuleErrorPoSBlockTstampNanoSecsInFuture - } - // If the header is properly formed, we can check the rest of the block. // All blocks must have at least one txn diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 382cbd486..61508d909 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -83,19 +83,15 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * Txns: txns, TxnConnectStatusByIndex: bitset.NewBitset().Set(0, true), } - utxoView, err := NewUtxoView(bc.db, bc.params, nil, nil, nil) - require.NoError(t, err) - globalParams, err := utxoView.GetCurrentSnapshotGlobalParamsEntry() - require.NoError(t, err) // Validate the block with a valid timeout QC and header. - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) // There should be no error. require.Nil(t, err) // Timeout QC must have at least one transaction and that transaction must be a block reward txn. block.Txns = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorBlockWithNoTxns) block.Txns = []*MsgDeSoTxn{ @@ -103,7 +99,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * TxnMeta: &BasicTransferMetadata{}, }, } - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorBlockDoesNotStartWithRewardTxn) // Revert txns to be valid. block.Txns = []*MsgDeSoTxn{ @@ -114,7 +110,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Header's Proposed in view must be exactly one greater than the timeout QC's timed out view block.Header.ProposedInView = 2 - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorPoSTimeoutBlockViewNotOneGreaterThanValidatorsTimeoutQCView) // Revert proposed in view @@ -122,7 +118,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Timeout QC also must have a merkle root block.Header.TransactionMerkleRoot = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorNilMerkleRoot) // Make sure block can't have both timeout and vote QC. @@ -135,13 +131,13 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * }, } block.Header.ValidatorsVoteQC = validatorVoteQC - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorBothTimeoutAndVoteQC) // Make sure block has either timeout or vote QC. block.Header.ValidatorsTimeoutAggregateQC = nil block.Header.ValidatorsVoteQC = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorNoTimeoutOrVoteQC) // Reset validator vote QC. @@ -160,12 +156,12 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * require.NoError(t, err) block.Header.TransactionMerkleRoot = merkleRoot // There should be no error. - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Nil(t, err) // Vote QC must have Header's Proposed in view exactly one greater than vote QC's proposed in view. block.Header.ProposedInView = 2 - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorPoSVoteBlockViewNotOneGreaterThanValidatorsVoteQCView) // Revert proposed in view @@ -173,12 +169,12 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Block must have non-nil Merkle root if we have non-zero transactions block.Header.TransactionMerkleRoot = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorNilMerkleRoot) // Block must have a matching merkle root block.Header.TransactionMerkleRoot = &ZeroBlockHash - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorInvalidMerkleRoot) // Reset transactions @@ -191,27 +187,27 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // TxnConnectStatusByIndex tests // TxnConnectStatusByIndex must be non-nil block.TxnConnectStatusByIndex = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorNilTxnConnectStatusByIndex) // TxnConnectStatusByIndexHash must be non-nil block.TxnConnectStatusByIndex = bitset.NewBitset().Set(0, true) block.Header.TxnConnectStatusByIndexHash = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorNilTxnConnectStatusByIndexHash) // The hashed version of TxnConnectStatusByIndex must match the actual TxnConnectStatusByIndexHash block.Header.TxnConnectStatusByIndexHash = HashBitset(bitset.NewBitset().Set(0, false)) - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorTxnConnectStatusByIndexHashMismatch) // Reset TxnConnectStatusByIndexHash block.Header.TxnConnectStatusByIndexHash = HashBitset(block.TxnConnectStatusByIndex) // Block must have valid proposer voting public key block.Header.ProposerVotingPublicKey = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorInvalidProposerVotingPublicKey) block.Header.ProposerVotingPublicKey = &bls.PublicKey{} - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorInvalidProposerVotingPublicKey) // Reset proposer voting public key @@ -219,22 +215,22 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Block must have valid proposer public key block.Header.ProposerPublicKey = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorInvalidProposerPublicKey) block.Header.ProposerPublicKey = &ZeroPublicKey - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorInvalidProposerPublicKey) block.Header.ProposerPublicKey = NewPublicKey(RandomBytes(33)) // Block must have valid proposer random seed hash block.Header.ProposerRandomSeedSignature = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorInvalidProposerRandomSeedSignature) block.Header.ProposerRandomSeedSignature = &bls.Signature{} - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorInvalidProposerRandomSeedSignature) block.Header.ProposerRandomSeedSignature = signature @@ -245,17 +241,12 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * err = bc.isBlockTimestampValidRelativeToParentPoS(block.Header) require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsTooOld) - // Block timestamps can't be in the future. - block.Header.TstampNanoSecs = time.Now().UnixNano() + (11 * time.Minute).Nanoseconds() - err = bc.isProperlyFormedBlockPoS(block, globalParams) - require.Equal(t, err, RuleErrorPoSBlockTstampNanoSecsInFuture) - // Revert the Header's timestamp block.Header.TstampNanoSecs = bc.BlockTip().Header.TstampNanoSecs + 10 // Block Header version must be 2 block.Header.Version = 1 - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorInvalidPoSBlockHeaderVersion) // Revert block header version @@ -263,7 +254,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Nil prev block hash not allowed block.Header.PrevBlockHash = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorNilPrevBlockHash) // Parent must exist in the block index. @@ -273,7 +264,7 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Nil block header not allowed block.Header = nil - err = bc.isProperlyFormedBlockPoS(block, globalParams) + err = bc.isProperlyFormedBlockPoS(block) require.Equal(t, err, RuleErrorNilBlockHeader) } @@ -1772,7 +1763,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { _verifyCommitRuleHelper(testMeta, []*BlockHash{}, []*BlockHash{blockHash1}, nil) } - var blockHash2, blockHash3 *BlockHash + var blockHash2, blockHash3, futureBlockHash *BlockHash { // Now let's try adding two more blocks on top of this one to make sure commit rule works properly. var realBlock2 *MsgDeSoBlock @@ -1792,6 +1783,26 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.NoError(t, err) _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1}, []*BlockHash{blockHash2, blockHash3}, blockHash1) + + // Now let's try adding a block that has a timestamp too far in the future, and make sure it's stored. + var futureBlock *MsgDeSoBlock + futureBlock = _generateRealBlockWithTimestampOffset(testMeta, 15, 15, 870, blockHash3, false, time.Hour) + + success, isOrphan, missingBlockHashes, err := testMeta.chain.ProcessBlockPoS(futureBlock, 15, true) + require.False(t, success) + require.False(t, isOrphan) + require.Len(t, missingBlockHashes, 0) + require.Error(t, err) + + futureBlockHash, err = futureBlock.Hash() + require.NoError(t, err) + + futureBlockNode, exists := testMeta.chain.blockIndexByHash[*futureBlockHash] + require.True(t, exists) + require.False(t, futureBlockNode.IsCommitted()) + require.True(t, futureBlockNode.IsStored()) + require.False(t, futureBlockNode.IsValidated()) + require.False(t, futureBlockNode.IsValidateFailed()) } var timeoutBlockHash *BlockHash @@ -1904,7 +1915,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { var blockWithFailingTxnHash *BlockHash { var blockWithFailingTxn *MsgDeSoBlock - blockWithFailingTxn = _generateRealBlockWithFailingTxn(testMeta, 18, 18, 123722, orphanBlockHash, false, 1) + blockWithFailingTxn = _generateRealBlockWithFailingTxn(testMeta, 18, 18, 123722, orphanBlockHash, false, 1, 0) require.Equal(t, blockWithFailingTxn.TxnConnectStatusByIndex.Get(len(blockWithFailingTxn.Txns)-1), false) success, _, _, err := testMeta.chain.ProcessBlockPoS(blockWithFailingTxn, 18, true) require.True(t, success) @@ -2377,11 +2388,23 @@ func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { // PosMempool, generating a RandomSeedHash, updating the latestBlockView in the PosBlockProducer, and calling _getFullRealBlockTemplate. // It can be used to generate a block w/ either a vote or timeout QC. func _generateRealBlock(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64, prevBlockHash *BlockHash, isTimeout bool) BlockTemplate { - return _generateRealBlockWithFailingTxn(testMeta, blockHeight, view, seed, prevBlockHash, isTimeout, 0) + return _generateRealBlockWithFailingTxn(testMeta, blockHeight, view, seed, prevBlockHash, isTimeout, 0, 0) +} + +func _generateRealBlockWithTimestampOffset( + testMeta *TestMeta, + blockHeight uint64, + view uint64, + seed int64, + prevBlockHash *BlockHash, + isTimeout bool, + blockTimestampOffset time.Duration, +) BlockTemplate { + return _generateRealBlockWithFailingTxn(testMeta, blockHeight, view, seed, prevBlockHash, isTimeout, 0, blockTimestampOffset) } func _generateRealBlockWithFailingTxn(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64, - prevBlockHash *BlockHash, isTimeout bool, numFailingTxns uint64) BlockTemplate { + prevBlockHash *BlockHash, isTimeout bool, numFailingTxns uint64, blockTimestampOffset time.Duration) BlockTemplate { globalParams := _testGetDefaultGlobalParams() randSource := rand.New(rand.NewSource(seed)) passingTxns := []*MsgDeSoTxn{} @@ -2418,7 +2441,7 @@ func _generateRealBlockWithFailingTxn(testMeta *TestMeta, blockHeight uint64, vi latestBlockHeight := testMeta.chain.blockIndexByHash[*prevBlockHash].Height testMeta.posMempool.UpdateLatestBlock(latestBlockView, uint64(latestBlockHeight)) seedSignature := getRandomSeedSignature(testMeta, blockHeight, view, prevBlock.Header.ProposerRandomSeedSignature) - fullBlockTemplate := _getFullRealBlockTemplate(testMeta, blockHeight, view, seedSignature, isTimeout) + fullBlockTemplate := _getFullRealBlockTemplate(testMeta, blockHeight, view, seedSignature, isTimeout, blockTimestampOffset) // Remove the transactions from this block from the mempool. // This prevents nonce reuse issues when trying to make reorg blocks. for _, txn := range passingTxns { @@ -2596,7 +2619,14 @@ func _getVoteQC(testMeta *TestMeta, blockHeight uint64, qcBlockHash *BlockHash, // _getFullRealBlockTemplate is a helper function that generates a block template with a valid TxnConnectStatusByIndexHash // and a valid TxnConnectStatusByIndex, a valid vote or timeout QC, does all the required signing by validators, // and generates the proper ProposerVotePartialSignature. -func _getFullRealBlockTemplate(testMeta *TestMeta, blockHeight uint64, view uint64, seedSignature *bls.Signature, isTimeout bool) BlockTemplate { +func _getFullRealBlockTemplate( + testMeta *TestMeta, + blockHeight uint64, + view uint64, + seedSignature *bls.Signature, + isTimeout bool, + blockTimestampOffset time.Duration, +) BlockTemplate { blockTemplate, err := testMeta.posBlockProducer.createBlockTemplate( testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature) require.NoError(testMeta.t, err) @@ -2656,11 +2686,10 @@ func _getFullRealBlockTemplate(testMeta *TestMeta, blockHeight uint64, view uint blockTemplate.Header.ProposerPublicKey = NewPublicKey(leaderPublicKeyBytes) blockTemplate.Header.ProposerVotingPublicKey = leaderVotingPrivateKey.PublicKey() // Ugh we need to adjust the timestamp. - blockTemplate.Header.TstampNanoSecs = time.Now().UnixNano() + blockTemplate.Header.TstampNanoSecs = time.Now().UnixNano() + blockTimestampOffset.Nanoseconds() if chainTip.Header.TstampNanoSecs > blockTemplate.Header.TstampNanoSecs { blockTemplate.Header.TstampNanoSecs = chainTip.Header.TstampNanoSecs + 1 } - require.Less(testMeta.t, blockTemplate.Header.TstampNanoSecs, time.Now().UnixNano()+testMeta.chain.params.DefaultBlockTimestampDriftNanoSecs) var proposerVotePartialSignature *bls.Signature // Just hack it so the leader gets the block reward. blockTemplate.Txns[0].TxOutputs[0].PublicKey = leaderPublicKeyBytes diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 37d45de8b..c299015ba 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -140,6 +140,44 @@ func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) erro return nil } +// SimulateEpochEntryForBlockHeight returns a simulated for the given block height. It only supports block +// heights within the current, previous, and next epochs. The view and timestamp for the simulated epoch +// entries are left empty since they can't be easily simulated, so DO NOT USE CreatedAtBlockTimestampNanoSecs +// or InitialView from the returned EpochEntry. +// +// We use this function to simulate epoch entries so we can perform block validations for blocks within the +// previous, current, and next epochs. +func (bav *UtxoView) SimulateAdjacentEpochEntryForBlockHeight(blockHeight uint64) (*EpochEntry, error) { + currentEpochEntry, err := bav.GetCurrentEpochEntry() + if err != nil { + return nil, errors.Wrap(err, "Problem getting current epoch entry") + } + + if currentEpochEntry.ContainsBlockHeight(blockHeight) { + return currentEpochEntry, nil + } + + var adjacentEpochEntry *EpochEntry + + if blockHeight > currentEpochEntry.FinalBlockHeight { + adjacentEpochEntry, err = bav.simulateNextEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight) + if err != nil { + return nil, errors.Wrap(err, "Problem simulating next epoch entry") + } + } else { + adjacentEpochEntry, err = bav.simulatePrevEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.InitialBlockHeight) + if err != nil { + return nil, errors.Wrap(err, "Problem simulating prev epoch entry") + } + } + + if adjacentEpochEntry.ContainsBlockHeight(blockHeight) { + return adjacentEpochEntry, nil + } + + return nil, errors.Errorf("Block height %d is not within the current, previous, or next epoch", blockHeight) +} + // simulateNextEpochEntry simulates the block range for the next epoch given the current epoch's final // block height and epoch number. The view and timestamp for the simulated epoch are left empty since they can't // be easily simulated, so DO NOT USE CreatedAtBlockTimestampNanoSecs or InitialView from the returned EpochEntry. From aad2b94ba48fa41a84ebd2ee52c67d095a47ddc0 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 15 Feb 2024 18:32:16 -0500 Subject: [PATCH 494/762] Add SnapshotLeaderSchedule to core_state (#1043) --- lib/db_utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index 9e6c87c0c..b90fa1fe3 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -565,7 +565,7 @@ type DBPrefixes struct { // PrefixSnapshotLeaderSchedule: Retrieve a ValidatorPKID by . // Prefix, , -> ValidatorPKID - PrefixSnapshotLeaderSchedule []byte `prefix_id:"[91]" is_state:"true"` + PrefixSnapshotLeaderSchedule []byte `prefix_id:"[91]" is_state:"true" core_state:"true"` // PrefixSnapshotStakeToRewardByValidatorAndStaker: Retrieves snapshotted StakeEntries that are eligible to // receive staking rewards for an epoch. StakeEntries can be retrieved by ValidatorPKID and StakerPKID. From f155e1cbd624ed0c3869a69ef69e6ff45844bcbe Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 15 Feb 2024 19:09:07 -0500 Subject: [PATCH 495/762] Use time.Time instead of uint64 for MempoolTransaction (#1042) --- lib/constants.go | 4 ++++ lib/legacy_mempool.go | 4 ++-- lib/peer.go | 2 +- lib/pos_block_producer_test.go | 4 ++-- lib/pos_fee_estimator.go | 4 ++-- lib/pos_mempool.go | 21 ++++++++++----------- lib/pos_mempool_test.go | 4 ++-- lib/pos_mempool_transaction.go | 12 ++++++++---- lib/server.go | 4 ++-- 9 files changed, 33 insertions(+), 26 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index 5494dfb4e..f95892f2e 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -70,6 +70,10 @@ func NanoSecondsToUint64MicroSeconds(nanos int64) uint64 { return uint64(nanos / 1000) } +func NanoSecondsToTime(nanos int64) time.Time { + return time.Unix(0, nanos) +} + // Snapshot constants const ( // GetSnapshotTimeout is used in Peer when we fetch a snapshot chunk, and we need to retry. diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index ecd6a2a53..c7e3bb770 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -254,13 +254,13 @@ func (mp *DeSoMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { if !exists { return nil } - return NewMempoolTransaction(mempoolTx.Tx, uint64(mempoolTx.Added.UnixMicro())) + return NewMempoolTransaction(mempoolTx.Tx, mempoolTx.Added) } func (mp *DeSoMempool) GetTransactions() []*MempoolTransaction { return collections.Transform( mp.GetOrderedTransactions(), func(mempoolTx *MempoolTx) *MempoolTransaction { - return NewMempoolTransaction(mempoolTx.Tx, uint64(mempoolTx.Added.UnixMicro())) + return NewMempoolTransaction(mempoolTx.Tx, mempoolTx.Added) }, ) } diff --git a/lib/peer.go b/lib/peer.go index cba2a6620..780f72f62 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -204,7 +204,7 @@ func (pp *Peer) HandleGetTransactionsMsg(getTxnMsg *MsgDeSoGetTransactions) { // Doing this helps the Peer when they go to add the transactions by reducing // unconnectedTxns and transactions being rejected due to missing dependencies. sort.Slice(mempoolTxs, func(ii, jj int) bool { - return mempoolTxs[ii].TimestampUnixMicro < mempoolTxs[jj].TimestampUnixMicro + return mempoolTxs[ii].TimestampUnixMicro.Before(mempoolTxs[jj].TimestampUnixMicro) }) // Create a list of the fetched transactions to a response. diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 896d4a060..adc21e593 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -269,10 +269,10 @@ func TestGetBlockTransactions(t *testing.T) { ) require.NoError(testMempool.Start()) defer testMempool.Stop() - currentTime := uint64(time.Now().UnixMicro()) + currentTime := time.Now() for ii, txn := range txns { // Use the Simulated Transaction Timestamp. - mtxn := NewMempoolTransaction(txn, currentTime+uint64(ii)) + mtxn := NewMempoolTransaction(txn, currentTime.Add(time.Duration(ii)*time.Microsecond)) require.NoError(testMempool.AddTransaction(mtxn, false)) } newTxns := testMempool.GetTransactions() diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index dff3bfe6e..aae60d662 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -126,7 +126,7 @@ func addBlockToTransactionRegister(txnRegister *TransactionRegister, block *MsgD if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { continue } - mtxn, err := NewMempoolTx(txn, NanoSecondsToUint64MicroSeconds(block.Header.TstampNanoSecs), block.Header.Height) + mtxn, err := NewMempoolTx(txn, NanoSecondsToTime(block.Header.TstampNanoSecs), block.Header.Height) if err != nil { return errors.Wrap(err, "PoSFeeEstimator.addBlockToTransactionRegister: error creating MempoolTx") } @@ -159,7 +159,7 @@ func (posFeeEstimator *PoSFeeEstimator) removeBlockNoLock(block *MsgDeSoBlock) e if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { continue } - mtxn, err := NewMempoolTx(txn, NanoSecondsToUint64MicroSeconds(block.Header.TstampNanoSecs), block.Header.Height) + mtxn, err := NewMempoolTx(txn, NanoSecondsToTime(block.Header.TstampNanoSecs), block.Header.Height) if err != nil { return errors.Wrap(err, "PoSFeeEstimator.RemoveBlock: error creating MempoolTx") } diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index f2825c37d..864b78ed2 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -62,13 +62,13 @@ type MempoolIterator interface { // MempoolTransaction is a simple wrapper around MsgDeSoTxn that adds a timestamp field. type MempoolTransaction struct { *MsgDeSoTxn - TimestampUnixMicro uint64 + TimestampUnixMicro time.Time } -func NewMempoolTransaction(txn *MsgDeSoTxn, timestampUnixMicro uint64) *MempoolTransaction { +func NewMempoolTransaction(txn *MsgDeSoTxn, timestamp time.Time) *MempoolTransaction { return &MempoolTransaction{ MsgDeSoTxn: txn, - TimestampUnixMicro: timestampUnixMicro, + TimestampUnixMicro: timestamp, } } @@ -76,7 +76,7 @@ func (mtxn *MempoolTransaction) GetTxn() *MsgDeSoTxn { return mtxn.MsgDeSoTxn } -func (mtxn *MempoolTransaction) GetTimestampUnixMicro() uint64 { +func (mtxn *MempoolTransaction) GetTimestamp() time.Time { return mtxn.TimestampUnixMicro } @@ -164,8 +164,7 @@ func (it *PosMempoolIterator) Value() (*MempoolTransaction, bool) { if txn == nil || txn.Tx == nil { return nil, ok } - added := uint64(txn.Added.UnixMicro()) - return NewMempoolTransaction(txn.Tx, added), ok + return NewMempoolTransaction(txn.Tx, txn.Added), ok } func (it *PosMempoolIterator) Initialized() bool { @@ -440,7 +439,7 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { // Add all transactions in the block to the mempool. // Construct the MempoolTx from the MsgDeSoTxn. - mempoolTx, err := NewMempoolTx(txn, NanoSecondsToUint64MicroSeconds(block.Header.TstampNanoSecs), mp.latestBlockHeight) + mempoolTx, err := NewMempoolTx(txn, NanoSecondsToTime(block.Header.TstampNanoSecs), mp.latestBlockHeight) if err != nil { continue } @@ -487,7 +486,7 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction, verifySignature b } // Construct the MempoolTx from the MsgDeSoTxn. - mempoolTx, err := NewMempoolTx(mtxn.GetTxn(), mtxn.GetTimestampUnixMicro(), mp.latestBlockHeight) + mempoolTx, err := NewMempoolTx(mtxn.GetTxn(), mtxn.GetTimestamp(), mp.latestBlockHeight) if err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem constructing MempoolTx") } @@ -664,7 +663,7 @@ func (mp *PosMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { return nil } - return NewMempoolTransaction(txn.Tx, uint64(txn.Added.UnixMicro())) + return NewMempoolTransaction(txn.Tx, txn.Added) } // GetTransactions returns all transactions in the mempool ordered by the Fee-Time algorithm. This function is thread-safe. @@ -683,7 +682,7 @@ func (mp *PosMempool) GetTransactions() []*MempoolTransaction { continue } - mtxn := NewMempoolTransaction(txn.Tx, uint64(txn.Added.UnixMicro())) + mtxn := NewMempoolTransaction(txn.Tx, txn.Added) mempoolTxns = append(mempoolTxns, mtxn) } return mempoolTxns @@ -761,7 +760,7 @@ func (mp *PosMempool) refreshNoLock() error { var txnsToRemove []*MempoolTx txns := mp.getTransactionsNoLock() for _, txn := range txns { - mtxn := NewMempoolTransaction(txn.Tx, uint64(txn.Added.UnixMicro())) + mtxn := NewMempoolTransaction(txn.Tx, txn.Added) err := tempPool.AddTransaction(mtxn, false) if err == nil { continue diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index b95478c92..9afd41694 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -299,7 +299,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { txn2Low.TxnFeeNanos = txn2.TxnFeeNanos - 1000 *txn2Low.TxnNonce = *txn2.TxnNonce _signTxn(t, txn2Low, m1Priv) - added2Low := uint64(time.Now().UnixMicro()) + added2Low := time.Now() mtxn2Low := NewMempoolTransaction(txn2Low, added2Low) err = mempool.AddTransaction(mtxn2Low, true) require.Contains(err.Error(), MempoolFailedReplaceByHigherFee) @@ -391,7 +391,7 @@ func _generateTestTxn(t *testing.T, rand *rand.Rand, feeMin uint64, feeMax uint6 } func _wrappedPosMempoolAddTransaction(t *testing.T, mp *PosMempool, txn *MsgDeSoTxn) { - added := uint64(time.Now().UnixMicro()) + added := time.Now() mtxn := NewMempoolTransaction(txn, added) require.NoError(t, mp.AddTransaction(mtxn, true)) require.Equal(t, true, _checkPosMempoolIntegrity(t, mp)) diff --git a/lib/pos_mempool_transaction.go b/lib/pos_mempool_transaction.go index 7759a7ed0..ceef43aaa 100644 --- a/lib/pos_mempool_transaction.go +++ b/lib/pos_mempool_transaction.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "github.com/pkg/errors" + "math" "time" ) @@ -39,7 +40,7 @@ type MempoolTx struct { index int } -func NewMempoolTx(txn *MsgDeSoTxn, addedUnixMicro uint64, blockHeight uint64) (*MempoolTx, error) { +func NewMempoolTx(txn *MsgDeSoTxn, addedUnixMicro time.Time, blockHeight uint64) (*MempoolTx, error) { txnBytes, err := txn.ToBytes(false) if err != nil { return nil, errors.Wrapf(err, "PosMempool.GetMempoolTx: Problem serializing txn") @@ -54,13 +55,12 @@ func NewMempoolTx(txn *MsgDeSoTxn, addedUnixMicro uint64, blockHeight uint64) (* if err != nil { return nil, errors.Wrapf(err, "PosMempool.GetMempoolTx: Problem computing fee per KB") } - added := time.UnixMicro(int64(addedUnixMicro)) return &MempoolTx{ Tx: txn, Hash: txnHash, TxSizeBytes: serializedLen, - Added: added, + Added: addedUnixMicro, Height: uint32(blockHeight), Fee: txn.TxnFeeNanos, FeePerKB: feePerKb, @@ -115,9 +115,13 @@ func (mempoolTx *MempoolTx) FromBytes(rr *bytes.Reader) error { if err != nil { return errors.Wrapf(err, "MempoolTx.Decode: Problem reading timestamp") } + if timestampUnixMicro > math.MaxInt64 { + return errors.Errorf("MempoolTx.Decode: Invalid timestamp %d exceeds max int64 %d", + timestampUnixMicro, math.MaxInt64) + } // Create a new MempoolTx - newTxn, err := NewMempoolTx(txn, timestampUnixMicro, height) + newTxn, err := NewMempoolTx(txn, time.UnixMicro(int64(timestampUnixMicro)), height) *mempoolTx = *newTxn return nil } diff --git a/lib/server.go b/lib/server.go index f263b3742..d0940cc03 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1866,7 +1866,7 @@ func (srv *Server) _addNewTxn( // Always add the txn to the PoS mempool. This should always succeed if the txn // addition into the PoW mempool succeeded above. - mempoolTxn := NewMempoolTransaction(txn, uint64(time.Now().UnixMicro())) + mempoolTxn := NewMempoolTransaction(txn, time.Now()) if err := srv.posMempool.AddTransaction(mempoolTxn, true /*verifySignatures*/); err != nil { return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") } @@ -2257,7 +2257,7 @@ func (srv *Server) ProcessSingleTxnWithChainLock(pp *Peer, txn *MsgDeSoTxn) ([]* // Regardless of the consensus protocol we're running (PoW or PoS), we use the PoS mempool's to house all // mempool txns. If a txn can't make it into the PoS mempool, which uses a looser unspent balance check for // the the transactor, then it must be invalid. - if err := srv.posMempool.AddTransaction(NewMempoolTransaction(txn, uint64(time.Now().UnixMicro())), true); err != nil { + if err := srv.posMempool.AddTransaction(NewMempoolTransaction(txn, time.Now()), true); err != nil { return nil, errors.Wrapf(err, "Server.ProcessSingleTxnWithChainLock: Problem adding transaction to PoS mempool: ") } From bc95c03a01ede2b2d0d7f0cce6e4eacd79c09fe1 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 16 Feb 2024 10:40:43 -0500 Subject: [PATCH 496/762] Broadcast Votes, Timeouts, and Block Proposals To Validators (#1036) * Broadcast Votes and Timeouts To Validators * Broadcast Block Proposals To Network * Fix failed unit tests * Address Nina's comments --- lib/pos_consensus.go | 34 +++++++++++++++++++++++++++++----- lib/pos_consensus_test.go | 20 ++++++++++++++++++-- lib/server.go | 1 + 3 files changed, 48 insertions(+), 7 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 6c3382899..209665b5e 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -14,6 +14,7 @@ import ( type FastHotStuffConsensus struct { lock sync.RWMutex + networkManager *NetworkManager blockchain *Blockchain fastHotStuffEventLoop consensus.FastHotStuffEventLoop mempool Mempool @@ -25,6 +26,7 @@ type FastHotStuffConsensus struct { func NewFastHotStuffConsensus( params *DeSoParams, + networkManager *NetworkManager, blockchain *Blockchain, mempool Mempool, signer *BLSSigner, @@ -32,6 +34,7 @@ func NewFastHotStuffConsensus( timeoutBaseDurationMilliseconds uint64, ) *FastHotStuffConsensus { return &FastHotStuffConsensus{ + networkManager: networkManager, blockchain: blockchain, fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), mempool: mempool, @@ -281,7 +284,18 @@ func (fc *FastHotStuffConsensus) handleBlockProposalEvent( ) } - // TODO: Broadcast the block proposal to the network + // Broadcast the block to the validator network + validators := fc.networkManager.rnManager.GetValidatorIndex().GetAll() + for _, validator := range validators { + sendMessageToRemoteNodeAsync(validator, blockProposal) + } + + // Broadcast the block to all inbound non-validator peers. This allows them to sync + // blocks from us. + nonValidators := fc.networkManager.rnManager.GetNonValidatorInboundIndex().GetAll() + for _, nonValidator := range nonValidators { + sendMessageToRemoteNodeAsync(nonValidator, blockProposal) + } fc.logBlockProposal(blockProposal, blockHash) return nil @@ -346,8 +360,11 @@ func (fc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotSt return errors.Errorf("FastHotStuffConsensus.HandleLocalVoteEvent: Error processing vote locally: %v", err) } - // Broadcast the vote message to the network - // TODO: Broadcast the vote message to the network or alternatively to just the block proposer + // Broadcast the block to the validator network + validators := fc.networkManager.rnManager.GetValidatorIndex().GetAll() + for _, validator := range validators { + sendMessageToRemoteNodeAsync(validator, voteMsg) + } return nil } @@ -461,8 +478,11 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error processing timeout locally: %v", err) } - // Broadcast the timeout message to the network - // TODO: Broadcast the timeout message to the network or alternatively to just the block proposer + // Broadcast the block to the validator network + validators := fc.networkManager.rnManager.GetValidatorIndex().GetAll() + for _, validator := range validators { + sendMessageToRemoteNodeAsync(validator, timeoutMsg) + } return nil } @@ -815,6 +835,10 @@ func isProperlyFormedBlockProposalEvent(event *consensus.FastHotStuffEvent) bool return false } +func sendMessageToRemoteNodeAsync(remoteNode *RemoteNode, msg DeSoMessage) { + go func(rn *RemoteNode, m DeSoMessage) { rn.SendMessage(m) }(remoteNode, msg) +} + ////////////////////////////////////////// Logging Helper Functions /////////////////////////////////////////////// func (fc *FastHotStuffConsensus) logBlockProposal(block *MsgDeSoBlock, blockHash *BlockHash) { diff --git a/lib/pos_consensus_test.go b/lib/pos_consensus_test.go index 9bdb136a0..99d8c4e9d 100644 --- a/lib/pos_consensus_test.go +++ b/lib/pos_consensus_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" "github.com/deso-protocol/go-deadlock" "github.com/pkg/errors" @@ -26,7 +27,8 @@ func TestFastHotStuffConsensusHandleLocalVoteEvent(t *testing.T) { // Create a mock consensus fastHotStuffConsensus := FastHotStuffConsensus{ - lock: sync.RWMutex{}, + lock: sync.RWMutex{}, + networkManager: _createMockNetworkManagerForConsensus(), blockchain: &Blockchain{ params: &DeSoTestnetParams, }, @@ -104,7 +106,8 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { // Create a mock consensus fastHotStuffConsensus := FastHotStuffConsensus{ - lock: sync.RWMutex{}, + lock: sync.RWMutex{}, + networkManager: _createMockNetworkManagerForConsensus(), signer: &BLSSigner{ privateKey: blsPrivateKey, }, @@ -200,3 +203,16 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { func alwaysReturnTrue() bool { return true } + +func _createMockNetworkManagerForConsensus() *NetworkManager { + return &NetworkManager{ + rnManager: &RemoteNodeManager{ + remoteNodeIndexer: &RemoteNodeIndexer{ + AllRemoteNodes: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + ValidatorIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), + NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + }, + }, + } +} diff --git a/lib/server.go b/lib/server.go index 76017f8a0..86b999fd4 100644 --- a/lib/server.go +++ b/lib/server.go @@ -617,6 +617,7 @@ func NewServer( if _blsKeystore != nil { srv.fastHotStuffConsensus = NewFastHotStuffConsensus( _params, + srv.networkManager, _chain, _posMempool, _blsKeystore.GetSigner(), From 99d0a46845a7c01805e9f9d9d500a0fd62aef694 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 16 Feb 2024 17:00:52 -0500 Subject: [PATCH 497/762] Use RemoteNode To Fetch Missing PoS Blocks (#1045) --- lib/pos_consensus.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 209665b5e..6a78ad92e 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -539,9 +539,11 @@ func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error // // See https://github.com/deso-protocol/core/pull/875#discussion_r1460183510 for more details. if len(missingBlockHashes) > 0 { - pp.QueueMessage(&MsgDeSoGetBlocks{ - HashList: missingBlockHashes, - }) + remoteNode := fc.networkManager.rnManager.GetRemoteNodeFromPeer(pp) + if remoteNode == nil { + return errors.Errorf("FastHotStuffConsensus.HandleBlock: RemoteNode not found for peer: %v", pp) + } + sendMessageToRemoteNodeAsync(remoteNode, &MsgDeSoGetBlocks{HashList: missingBlockHashes}) } return nil From 434628b0b44a2a65f44cc5e32ff2807e20405cdb Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 20 Feb 2024 13:23:27 -0500 Subject: [PATCH 498/762] Add UnjailValidatorStateChangeMetadata (#1047) --- lib/block_view_validator.go | 5 +++++ lib/state_change_metadata.go | 39 ++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 8314e4366..124541ae9 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1666,6 +1666,11 @@ func (bav *UtxoView) _connectUnjailValidator( utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ Type: OperationTypeUnjailValidator, PrevValidatorEntry: prevValidatorEntry, + StateChangeMetadata: &UnjailValidatorStateChangeMetadata{ + ValidatorPKID: transactorPKIDEntry.PKID, + JailedAtEpochNumber: prevValidatorEntry.JailedAtEpochNumber, + UnjailedAtEpochNumber: currentEpochNumber, + }, }) return totalInput, totalOutput, utxoOpsForTxn, nil } diff --git a/lib/state_change_metadata.go b/lib/state_change_metadata.go index 3fb11057a..150174e6b 100644 --- a/lib/state_change_metadata.go +++ b/lib/state_change_metadata.go @@ -22,6 +22,7 @@ const ( EncoderTypeCreatePostAssociationStateChangeMetadata EncoderType = 2000013 EncoderTypeDeletePostAssociationStateChangeMetadata EncoderType = 2000014 EncoderTypeStakeRewardStateChangeMetadata EncoderType = 2000015 + EncoderTypeUnjailValidatorStateChangeMetadata EncoderType = 2000016 ) func GetStateChangeMetadataFromOpType(opType OperationType) DeSoEncoder { @@ -595,3 +596,41 @@ func (stakeRewardSCM *StakeRewardStateChangeMetadata) GetVersionByte(blockHeight func (stakeRewardSCM *StakeRewardStateChangeMetadata) GetEncoderType() EncoderType { return EncoderTypeStakeRewardStateChangeMetadata } + +type UnjailValidatorStateChangeMetadata struct { + ValidatorPKID *PKID + JailedAtEpochNumber uint64 + UnjailedAtEpochNumber uint64 +} + +func (metadata *UnjailValidatorStateChangeMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + data = append(data, EncodeToBytes(blockHeight, metadata.ValidatorPKID, skipMetadata...)...) + data = append(data, UintToBuf(metadata.JailedAtEpochNumber)...) + data = append(data, UintToBuf(metadata.UnjailedAtEpochNumber)...) + return data +} + +func (metadata *UnjailValidatorStateChangeMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + if metadata.ValidatorPKID, err = DecodeDeSoEncoder(&PKID{}, rr); err != nil { + return errors.Wrapf(err, "UnjailValidatorStateChangeMetadata.Decode: Problem reading ValidatorPKID") + } + metadata.JailedAtEpochNumber, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UnjailValidatorStateChangeMetadata.Decode: Problem reading JailedAtEpochNumber") + } + metadata.UnjailedAtEpochNumber, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UnjailValidatorStateChangeMetadata.Decode: Problem reading UnjailedAtEpochNumber") + } + return nil +} + +func (metadata *UnjailValidatorStateChangeMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (metadata *UnjailValidatorStateChangeMetadata) GetEncoderType() EncoderType { + return EncoderTypeUnjailValidatorStateChangeMetadata +} From d047d517c779c99b898b2a7d55a3bc2dec04997f Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 20 Feb 2024 13:24:29 -0500 Subject: [PATCH 499/762] Expose MergeGlobalParamEntryDefaults so state syncer can use it, update func to take in params instead of utxo view (#1048) --- lib/db_utils.go | 2 +- lib/pos_snapshot_entries.go | 38 ++++++++++++++++++------------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index b90fa1fe3..8fef569b0 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -106,7 +106,7 @@ type DBPrefixes struct { // How much Bitcoin is work in USD cents. PrefixUSDCentsPerBitcoinExchangeRate []byte `prefix_id:"[27]" is_state:"true"` // -> - PrefixGlobalParams []byte `prefix_id:"[40]" is_state:"true"` + PrefixGlobalParams []byte `prefix_id:"[40]" is_state:"true" core_state:"true"` // The prefix for the Bitcoin TxID map. If a key is set for a TxID that means this // particular TxID has been processed as part of a BitcoinExchange transaction. If diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 24ff7ba48..69b34c20e 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -56,7 +56,7 @@ func (bav *UtxoView) ComputeSnapshotEpochNumberForEpoch(epochNumber uint64) (uin // func (bav *UtxoView) GetCurrentGlobalParamsEntry() *GlobalParamsEntry { - return _mergeGlobalParamEntryDefaults(bav, bav.GlobalParamsEntry) + return MergeGlobalParamEntryDefaults(bav.GlobalParamsEntry, bav.Params) } // GetCurrentSnapshotGlobalParamsEntry retrieves a snapshot of the GlobalParamsEntry from n epochs ago. If a snapshot @@ -86,7 +86,7 @@ func (bav *UtxoView) GetCurrentSnapshotGlobalParamsEntry() (*GlobalParamsEntry, func (bav *UtxoView) GetSnapshotGlobalParamsEntryByEpochNumber(snapshotAtEpochNumber uint64) (*GlobalParamsEntry, error) { // Check the UtxoView first. if globalParamsEntry, exists := bav.SnapshotGlobalParamEntries[snapshotAtEpochNumber]; exists { - return _mergeGlobalParamEntryDefaults(bav, globalParamsEntry), nil + return MergeGlobalParamEntryDefaults(globalParamsEntry, bav.Params), nil } // If we don't have it in the UtxoView, check the db. globalParamsEntry, err := DBGetSnapshotGlobalParamsEntry(bav.Handle, bav.Snapshot, snapshotAtEpochNumber) @@ -100,10 +100,10 @@ func (bav *UtxoView) GetSnapshotGlobalParamsEntryByEpochNumber(snapshotAtEpochNu // Cache the result in the UtxoView. bav._setSnapshotGlobalParamsEntry(globalParamsEntry, snapshotAtEpochNumber) } - return _mergeGlobalParamEntryDefaults(bav, globalParamsEntry), nil + return MergeGlobalParamEntryDefaults(globalParamsEntry, bav.Params), nil } -func _mergeGlobalParamEntryDefaults(bav *UtxoView, globalParamsEntry *GlobalParamsEntry) *GlobalParamsEntry { +func MergeGlobalParamEntryDefaults(globalParamsEntry *GlobalParamsEntry, params *DeSoParams) *GlobalParamsEntry { // Merge the input GlobalParamsEntry with the default param values. if globalParamsEntry == nil { // This could happen before we have any SnapshotGlobalParamEntries set. @@ -116,50 +116,50 @@ func _mergeGlobalParamEntryDefaults(bav *UtxoView, globalParamsEntry *GlobalPara // Merge the default values. if globalParamsEntryCopy.StakeLockupEpochDuration == 0 { - globalParamsEntryCopy.StakeLockupEpochDuration = bav.Params.DefaultStakeLockupEpochDuration + globalParamsEntryCopy.StakeLockupEpochDuration = params.DefaultStakeLockupEpochDuration } if globalParamsEntryCopy.ValidatorJailEpochDuration == 0 { - globalParamsEntryCopy.ValidatorJailEpochDuration = bav.Params.DefaultValidatorJailEpochDuration + globalParamsEntryCopy.ValidatorJailEpochDuration = params.DefaultValidatorJailEpochDuration } if globalParamsEntryCopy.LeaderScheduleMaxNumValidators == 0 { - globalParamsEntryCopy.LeaderScheduleMaxNumValidators = bav.Params.DefaultLeaderScheduleMaxNumValidators + globalParamsEntryCopy.LeaderScheduleMaxNumValidators = params.DefaultLeaderScheduleMaxNumValidators } if globalParamsEntryCopy.ValidatorSetMaxNumValidators == 0 { - globalParamsEntryCopy.ValidatorSetMaxNumValidators = bav.Params.DefaultValidatorSetMaxNumValidators + globalParamsEntryCopy.ValidatorSetMaxNumValidators = params.DefaultValidatorSetMaxNumValidators } if globalParamsEntryCopy.StakingRewardsMaxNumStakes == 0 { - globalParamsEntryCopy.StakingRewardsMaxNumStakes = bav.Params.DefaultStakingRewardsMaxNumStakes + globalParamsEntryCopy.StakingRewardsMaxNumStakes = params.DefaultStakingRewardsMaxNumStakes } if globalParamsEntryCopy.StakingRewardsAPYBasisPoints == 0 { - globalParamsEntryCopy.StakingRewardsAPYBasisPoints = bav.Params.DefaultStakingRewardsAPYBasisPoints + globalParamsEntryCopy.StakingRewardsAPYBasisPoints = params.DefaultStakingRewardsAPYBasisPoints } if globalParamsEntryCopy.EpochDurationNumBlocks == 0 { - globalParamsEntryCopy.EpochDurationNumBlocks = bav.Params.DefaultEpochDurationNumBlocks + globalParamsEntryCopy.EpochDurationNumBlocks = params.DefaultEpochDurationNumBlocks } if globalParamsEntryCopy.JailInactiveValidatorGracePeriodEpochs == 0 { - globalParamsEntryCopy.JailInactiveValidatorGracePeriodEpochs = bav.Params.DefaultJailInactiveValidatorGracePeriodEpochs + globalParamsEntryCopy.JailInactiveValidatorGracePeriodEpochs = params.DefaultJailInactiveValidatorGracePeriodEpochs } if globalParamsEntryCopy.FeeBucketGrowthRateBasisPoints == 0 { - globalParamsEntryCopy.FeeBucketGrowthRateBasisPoints = bav.Params.DefaultFeeBucketGrowthRateBasisPoints + globalParamsEntryCopy.FeeBucketGrowthRateBasisPoints = params.DefaultFeeBucketGrowthRateBasisPoints } if globalParamsEntryCopy.FailingTransactionBMFMultiplierBasisPoints == 0 { - globalParamsEntryCopy.FailingTransactionBMFMultiplierBasisPoints = bav.Params.DefaultFailingTransactionBMFMultiplierBasisPoints + globalParamsEntryCopy.FailingTransactionBMFMultiplierBasisPoints = params.DefaultFailingTransactionBMFMultiplierBasisPoints } if globalParamsEntryCopy.MaximumVestedIntersectionsPerLockupTransaction == 0 { globalParamsEntryCopy.MaximumVestedIntersectionsPerLockupTransaction = - bav.Params.DefaultMaximumVestedIntersectionsPerLockupTransaction + params.DefaultMaximumVestedIntersectionsPerLockupTransaction } if globalParamsEntryCopy.BlockTimestampDriftNanoSecs == 0 { - globalParamsEntryCopy.BlockTimestampDriftNanoSecs = bav.Params.DefaultBlockTimestampDriftNanoSecs + globalParamsEntryCopy.BlockTimestampDriftNanoSecs = params.DefaultBlockTimestampDriftNanoSecs } if globalParamsEntryCopy.MempoolMaxSizeBytes == 0 { - globalParamsEntryCopy.MempoolMaxSizeBytes = bav.Params.DefaultMempoolMaxSizeBytes + globalParamsEntryCopy.MempoolMaxSizeBytes = params.DefaultMempoolMaxSizeBytes } if globalParamsEntryCopy.MempoolFeeEstimatorNumMempoolBlocks == 0 { - globalParamsEntryCopy.MempoolFeeEstimatorNumMempoolBlocks = bav.Params.DefaultMempoolFeeEstimatorNumMempoolBlocks + globalParamsEntryCopy.MempoolFeeEstimatorNumMempoolBlocks = params.DefaultMempoolFeeEstimatorNumMempoolBlocks } if globalParamsEntryCopy.MempoolFeeEstimatorNumPastBlocks == 0 { - globalParamsEntryCopy.MempoolFeeEstimatorNumPastBlocks = bav.Params.DefaultMempoolFeeEstimatorNumPastBlocks + globalParamsEntryCopy.MempoolFeeEstimatorNumPastBlocks = params.DefaultMempoolFeeEstimatorNumPastBlocks } // Return the merged result. From 80b5d89b9d4731ac286851b59f722f1d32c75d58 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 20 Feb 2024 13:26:30 -0500 Subject: [PATCH 500/762] Add stubbed txindex metadata structs for lockups (#1049) --- lib/block_view_lockups.go | 82 +++++++++++++++++++++++++++++++++++ lib/block_view_types.go | 84 ++++++++++++++++++++---------------- lib/db_utils.go | 90 +++++++++++++++++++++++++-------------- 3 files changed, 187 insertions(+), 69 deletions(-) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index c2651d428..9051421c2 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -3176,3 +3176,85 @@ func _checkLimitKeyAndUpdateDerivedKeyEntry(key LockupLimitKey, derivedKeyEntry // Return true because we found the key and decremented the remaining operations return true } + +// TXINDEX STUBS + +// TYPES: CoinLockupTxindexMetadata +type CoinLockupTxindexMetadata struct { +} + +func (txindexMetadata *CoinLockupTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + return []byte{} +} + +func (txindexMetadata *CoinLockupTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + return nil +} + +func (txindexMetadata *CoinLockupTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *CoinLockupTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeCoinLockupTxindexMetadata +} + +// TYPES: UpdateCoinLockupParamsTxindexMetadata +type UpdateCoinLockupParamsTxindexMetadata struct { +} + +func (txindexMetadata *UpdateCoinLockupParamsTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + return []byte{} +} + +func (txindexMetadata *UpdateCoinLockupParamsTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + return nil +} + +func (txindexMetadata *UpdateCoinLockupParamsTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *UpdateCoinLockupParamsTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeUpdateCoinLockupParamsTxindexMetadata +} + +// TYPES: CoinLockupTransferTxindexMetadata +type CoinLockupTransferTxindexMetadata struct { +} + +func (txindexMetadata *CoinLockupTransferTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + return []byte{} +} + +func (txindexMetadata *CoinLockupTransferTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + return nil +} + +func (txindexMetadata *CoinLockupTransferTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *CoinLockupTransferTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeCoinLockupTransferTxindexMetadata +} + +// TYPES: CoinUnlockTxindexMetadata +type CoinUnlockTxindexMetadata struct { +} + +func (txindexMetadata *CoinUnlockTxindexMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + return []byte{} +} + +func (txindexMetadata *CoinUnlockTxindexMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + return nil +} + +func (txindexMetadata *CoinUnlockTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *CoinUnlockTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeCoinUnlockTxindexMetadata +} diff --git a/lib/block_view_types.go b/lib/block_view_types.go index c0b4a9ad4..04d52e1ae 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -139,42 +139,46 @@ const ( // Txindex encoder types. const ( - EncoderTypeTransactionMetadata EncoderType = 1000000 - EncoderTypeBasicTransferTxindexMetadata EncoderType = 1000001 - EncoderTypeBitcoinExchangeTxindexMetadata EncoderType = 1000002 - EncoderTypeCreatorCoinTxindexMetadata EncoderType = 1000003 - EncoderTypeCreatorCoinTransferTxindexMetadata EncoderType = 1000004 - EncoderTypeDAOCoinTransferTxindexMetadata EncoderType = 1000005 - EncoderTypeFilledDAOCoinLimitOrderMetadata EncoderType = 1000006 - EncoderTypeDAOCoinLimitOrderTxindexMetadata EncoderType = 1000007 - EncoderTypeUpdateProfileTxindexMetadata EncoderType = 1000008 - EncoderTypeSubmitPostTxindexMetadata EncoderType = 1000009 - EncoderTypeLikeTxindexMetadata EncoderType = 1000010 - EncoderTypeFollowTxindexMetadata EncoderType = 1000011 - EncoderTypePrivateMessageTxindexMetadata EncoderType = 1000012 - EncoderTypeSwapIdentityTxindexMetadata EncoderType = 1000013 - EncoderTypeNFTRoyaltiesMetadata EncoderType = 1000014 - EncoderTypeNFTBidTxindexMetadata EncoderType = 1000015 - EncoderTypeAcceptNFTBidTxindexMetadata EncoderType = 1000016 - EncoderTypeNFTTransferTxindexMetadata EncoderType = 1000017 - EncoderTypeAcceptNFTTransferTxindexMetadata EncoderType = 1000018 - EncoderTypeBurnNFTTxindexMetadata EncoderType = 1000019 - EncoderTypeDAOCoinTxindexMetadata EncoderType = 1000020 - EncoderTypeCreateNFTTxindexMetadata EncoderType = 1000021 - EncoderTypeUpdateNFTTxindexMetadata EncoderType = 1000022 - EncoderTypeCreateUserAssociationTxindexMetadata EncoderType = 1000023 - EncoderTypeDeleteUserAssociationTxindexMetadata EncoderType = 1000024 - EncoderTypeCreatePostAssociationTxindexMetadata EncoderType = 1000025 - EncoderTypeDeletePostAssociationTxindexMetadata EncoderType = 1000026 - EncoderTypeAccessGroupTxindexMetadata EncoderType = 1000027 - EncoderTypeAccessGroupMembersTxindexMetadata EncoderType = 1000028 - EncoderTypeNewMessageTxindexMetadata EncoderType = 1000029 - EncoderTypeRegisterAsValidatorTxindexMetadata EncoderType = 1000030 - EncoderTypeUnregisterAsValidatorTxindexMetadata EncoderType = 1000031 - EncoderTypeStakeTxindexMetadata EncoderType = 1000032 - EncoderTypeUnstakeTxindexMetadata EncoderType = 1000033 - EncoderTypeUnlockStakeTxindexMetadata EncoderType = 1000034 - EncoderTypeUnjailValidatorTxindexMetadata EncoderType = 1000035 + EncoderTypeTransactionMetadata EncoderType = 1000000 + EncoderTypeBasicTransferTxindexMetadata EncoderType = 1000001 + EncoderTypeBitcoinExchangeTxindexMetadata EncoderType = 1000002 + EncoderTypeCreatorCoinTxindexMetadata EncoderType = 1000003 + EncoderTypeCreatorCoinTransferTxindexMetadata EncoderType = 1000004 + EncoderTypeDAOCoinTransferTxindexMetadata EncoderType = 1000005 + EncoderTypeFilledDAOCoinLimitOrderMetadata EncoderType = 1000006 + EncoderTypeDAOCoinLimitOrderTxindexMetadata EncoderType = 1000007 + EncoderTypeUpdateProfileTxindexMetadata EncoderType = 1000008 + EncoderTypeSubmitPostTxindexMetadata EncoderType = 1000009 + EncoderTypeLikeTxindexMetadata EncoderType = 1000010 + EncoderTypeFollowTxindexMetadata EncoderType = 1000011 + EncoderTypePrivateMessageTxindexMetadata EncoderType = 1000012 + EncoderTypeSwapIdentityTxindexMetadata EncoderType = 1000013 + EncoderTypeNFTRoyaltiesMetadata EncoderType = 1000014 + EncoderTypeNFTBidTxindexMetadata EncoderType = 1000015 + EncoderTypeAcceptNFTBidTxindexMetadata EncoderType = 1000016 + EncoderTypeNFTTransferTxindexMetadata EncoderType = 1000017 + EncoderTypeAcceptNFTTransferTxindexMetadata EncoderType = 1000018 + EncoderTypeBurnNFTTxindexMetadata EncoderType = 1000019 + EncoderTypeDAOCoinTxindexMetadata EncoderType = 1000020 + EncoderTypeCreateNFTTxindexMetadata EncoderType = 1000021 + EncoderTypeUpdateNFTTxindexMetadata EncoderType = 1000022 + EncoderTypeCreateUserAssociationTxindexMetadata EncoderType = 1000023 + EncoderTypeDeleteUserAssociationTxindexMetadata EncoderType = 1000024 + EncoderTypeCreatePostAssociationTxindexMetadata EncoderType = 1000025 + EncoderTypeDeletePostAssociationTxindexMetadata EncoderType = 1000026 + EncoderTypeAccessGroupTxindexMetadata EncoderType = 1000027 + EncoderTypeAccessGroupMembersTxindexMetadata EncoderType = 1000028 + EncoderTypeNewMessageTxindexMetadata EncoderType = 1000029 + EncoderTypeRegisterAsValidatorTxindexMetadata EncoderType = 1000030 + EncoderTypeUnregisterAsValidatorTxindexMetadata EncoderType = 1000031 + EncoderTypeStakeTxindexMetadata EncoderType = 1000032 + EncoderTypeUnstakeTxindexMetadata EncoderType = 1000033 + EncoderTypeUnlockStakeTxindexMetadata EncoderType = 1000034 + EncoderTypeUnjailValidatorTxindexMetadata EncoderType = 1000035 + EncoderTypeCoinLockupTxindexMetadata EncoderType = 1000036 + EncoderTypeUpdateCoinLockupParamsTxindexMetadata EncoderType = 1000037 + EncoderTypeCoinLockupTransferTxindexMetadata EncoderType = 1000038 + EncoderTypeCoinUnlockTxindexMetadata EncoderType = 1000039 // EncoderTypeEndTxIndex encoder type should be at the end and is used for automated tests. EncoderTypeEndTxIndex EncoderType = 1000036 @@ -364,6 +368,14 @@ func (encoderType EncoderType) New() DeSoEncoder { return &UnlockStakeTxindexMetadata{} case EncoderTypeUnjailValidatorTxindexMetadata: return &UnjailValidatorTxindexMetadata{} + case EncoderTypeCoinLockupTxindexMetadata: + return &CoinLockupTxindexMetadata{} + case EncoderTypeUpdateCoinLockupParamsTxindexMetadata: + return &UpdateCoinLockupParamsTxindexMetadata{} + case EncoderTypeCoinLockupTransferTxindexMetadata: + return &CoinLockupTransferTxindexMetadata{} + case EncoderTypeCoinUnlockTxindexMetadata: + return &CoinUnlockTxindexMetadata{} default: return nil } diff --git a/lib/db_utils.go b/lib/db_utils.go index 8fef569b0..e0aa9f9de 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -7001,39 +7001,43 @@ type TransactionMetadata struct { // when looking up output amounts TxnOutputs []*DeSoOutput - BasicTransferTxindexMetadata *BasicTransferTxindexMetadata `json:",omitempty"` - BitcoinExchangeTxindexMetadata *BitcoinExchangeTxindexMetadata `json:",omitempty"` - CreatorCoinTxindexMetadata *CreatorCoinTxindexMetadata `json:",omitempty"` - CreatorCoinTransferTxindexMetadata *CreatorCoinTransferTxindexMetadata `json:",omitempty"` - UpdateProfileTxindexMetadata *UpdateProfileTxindexMetadata `json:",omitempty"` - SubmitPostTxindexMetadata *SubmitPostTxindexMetadata `json:",omitempty"` - LikeTxindexMetadata *LikeTxindexMetadata `json:",omitempty"` - FollowTxindexMetadata *FollowTxindexMetadata `json:",omitempty"` - PrivateMessageTxindexMetadata *PrivateMessageTxindexMetadata `json:",omitempty"` - SwapIdentityTxindexMetadata *SwapIdentityTxindexMetadata `json:",omitempty"` - NFTBidTxindexMetadata *NFTBidTxindexMetadata `json:",omitempty"` - AcceptNFTBidTxindexMetadata *AcceptNFTBidTxindexMetadata `json:",omitempty"` - NFTTransferTxindexMetadata *NFTTransferTxindexMetadata `json:",omitempty"` - AcceptNFTTransferTxindexMetadata *AcceptNFTTransferTxindexMetadata `json:",omitempty"` - BurnNFTTxindexMetadata *BurnNFTTxindexMetadata `json:",omitempty"` - DAOCoinTxindexMetadata *DAOCoinTxindexMetadata `json:",omitempty"` - DAOCoinTransferTxindexMetadata *DAOCoinTransferTxindexMetadata `json:",omitempty"` - CreateNFTTxindexMetadata *CreateNFTTxindexMetadata `json:",omitempty"` - UpdateNFTTxindexMetadata *UpdateNFTTxindexMetadata `json:",omitempty"` - DAOCoinLimitOrderTxindexMetadata *DAOCoinLimitOrderTxindexMetadata `json:",omitempty"` - CreateUserAssociationTxindexMetadata *CreateUserAssociationTxindexMetadata `json:",omitempty"` - DeleteUserAssociationTxindexMetadata *DeleteUserAssociationTxindexMetadata `json:",omitempty"` - CreatePostAssociationTxindexMetadata *CreatePostAssociationTxindexMetadata `json:",omitempty"` - DeletePostAssociationTxindexMetadata *DeletePostAssociationTxindexMetadata `json:",omitempty"` - AccessGroupTxindexMetadata *AccessGroupTxindexMetadata `json:",omitempty"` - AccessGroupMembersTxindexMetadata *AccessGroupMembersTxindexMetadata `json:",omitempty"` - NewMessageTxindexMetadata *NewMessageTxindexMetadata `json:",omitempty"` - RegisterAsValidatorTxindexMetadata *RegisterAsValidatorTxindexMetadata `json:",omitempty"` - UnregisterAsValidatorTxindexMetadata *UnregisterAsValidatorTxindexMetadata `json:",omitempty"` - StakeTxindexMetadata *StakeTxindexMetadata `json:",omitempty"` - UnstakeTxindexMetadata *UnstakeTxindexMetadata `json:",omitempty"` - UnlockStakeTxindexMetadata *UnlockStakeTxindexMetadata `json:",omitempty"` - UnjailValidatorTxindexMetadata *UnjailValidatorTxindexMetadata `json:",omitempty"` + BasicTransferTxindexMetadata *BasicTransferTxindexMetadata `json:",omitempty"` + BitcoinExchangeTxindexMetadata *BitcoinExchangeTxindexMetadata `json:",omitempty"` + CreatorCoinTxindexMetadata *CreatorCoinTxindexMetadata `json:",omitempty"` + CreatorCoinTransferTxindexMetadata *CreatorCoinTransferTxindexMetadata `json:",omitempty"` + UpdateProfileTxindexMetadata *UpdateProfileTxindexMetadata `json:",omitempty"` + SubmitPostTxindexMetadata *SubmitPostTxindexMetadata `json:",omitempty"` + LikeTxindexMetadata *LikeTxindexMetadata `json:",omitempty"` + FollowTxindexMetadata *FollowTxindexMetadata `json:",omitempty"` + PrivateMessageTxindexMetadata *PrivateMessageTxindexMetadata `json:",omitempty"` + SwapIdentityTxindexMetadata *SwapIdentityTxindexMetadata `json:",omitempty"` + NFTBidTxindexMetadata *NFTBidTxindexMetadata `json:",omitempty"` + AcceptNFTBidTxindexMetadata *AcceptNFTBidTxindexMetadata `json:",omitempty"` + NFTTransferTxindexMetadata *NFTTransferTxindexMetadata `json:",omitempty"` + AcceptNFTTransferTxindexMetadata *AcceptNFTTransferTxindexMetadata `json:",omitempty"` + BurnNFTTxindexMetadata *BurnNFTTxindexMetadata `json:",omitempty"` + DAOCoinTxindexMetadata *DAOCoinTxindexMetadata `json:",omitempty"` + DAOCoinTransferTxindexMetadata *DAOCoinTransferTxindexMetadata `json:",omitempty"` + CreateNFTTxindexMetadata *CreateNFTTxindexMetadata `json:",omitempty"` + UpdateNFTTxindexMetadata *UpdateNFTTxindexMetadata `json:",omitempty"` + DAOCoinLimitOrderTxindexMetadata *DAOCoinLimitOrderTxindexMetadata `json:",omitempty"` + CreateUserAssociationTxindexMetadata *CreateUserAssociationTxindexMetadata `json:",omitempty"` + DeleteUserAssociationTxindexMetadata *DeleteUserAssociationTxindexMetadata `json:",omitempty"` + CreatePostAssociationTxindexMetadata *CreatePostAssociationTxindexMetadata `json:",omitempty"` + DeletePostAssociationTxindexMetadata *DeletePostAssociationTxindexMetadata `json:",omitempty"` + AccessGroupTxindexMetadata *AccessGroupTxindexMetadata `json:",omitempty"` + AccessGroupMembersTxindexMetadata *AccessGroupMembersTxindexMetadata `json:",omitempty"` + NewMessageTxindexMetadata *NewMessageTxindexMetadata `json:",omitempty"` + RegisterAsValidatorTxindexMetadata *RegisterAsValidatorTxindexMetadata `json:",omitempty"` + UnregisterAsValidatorTxindexMetadata *UnregisterAsValidatorTxindexMetadata `json:",omitempty"` + StakeTxindexMetadata *StakeTxindexMetadata `json:",omitempty"` + UnstakeTxindexMetadata *UnstakeTxindexMetadata `json:",omitempty"` + UnlockStakeTxindexMetadata *UnlockStakeTxindexMetadata `json:",omitempty"` + UnjailValidatorTxindexMetadata *UnjailValidatorTxindexMetadata `json:",omitempty"` + CoinLockupTxindexMetadata *CoinLockupTxindexMetadata `json:",omitempty"` + UpdateCoinLockupParamsTxindexMetadata *UpdateCoinLockupParamsTxindexMetadata `json:",omitempty"` + CoinLockupTransferTxindexMetadata *CoinLockupTransferTxindexMetadata `json:",omitempty"` + CoinUnlockTxindexMetadata *CoinUnlockTxindexMetadata `json:",omitempty"` } func (txnMeta *TransactionMetadata) GetEncoderForTxType(txnType TxnType) DeSoEncoder { @@ -7092,6 +7096,26 @@ func (txnMeta *TransactionMetadata) GetEncoderForTxType(txnType TxnType) DeSoEnc return txnMeta.AccessGroupMembersTxindexMetadata case TxnTypeNewMessage: return txnMeta.NewMessageTxindexMetadata + case TxnTypeRegisterAsValidator: + return txnMeta.RegisterAsValidatorTxindexMetadata + case TxnTypeUnregisterAsValidator: + return txnMeta.UnregisterAsValidatorTxindexMetadata + case TxnTypeStake: + return txnMeta.StakeTxindexMetadata + case TxnTypeUnstake: + return txnMeta.UnstakeTxindexMetadata + case TxnTypeUnlockStake: + return txnMeta.UnlockStakeTxindexMetadata + case TxnTypeUnjailValidator: + return txnMeta.UnjailValidatorTxindexMetadata + case TxnTypeCoinLockup: + return txnMeta.CoinLockupTxindexMetadata + case TxnTypeUpdateCoinLockupParams: + return txnMeta.UpdateCoinLockupParamsTxindexMetadata + case TxnTypeCoinLockupTransfer: + return txnMeta.CoinLockupTransferTxindexMetadata + case TxnTypeCoinUnlock: + return txnMeta.CoinUnlockTxindexMetadata default: return nil } From 5b0eb4045e31b6ea48a1c9fe9469f176e8b78d7b Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 20 Feb 2024 13:27:32 -0500 Subject: [PATCH 501/762] All pos-validator-seed flag to be a hex string (#1046) --- cmd/run.go | 5 +++-- lib/bls_keystore.go | 26 +++++++++++++++++--------- lib/bls_keystore_test.go | 5 +++++ 3 files changed, 25 insertions(+), 11 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index 8b8965d6a..c5ebe87a8 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -83,8 +83,9 @@ func SetupRunFlags(cmd *cobra.Command) { download historical blocks. Can only be set if HyperSync is true.`) // PoS Validator - cmd.PersistentFlags().String("pos-validator-seed", "", "A BIP39 seed phrase used to generate the private key of the "+ - "Proof of Stake validator. Setting this flag automatically makes the node run as a Proof of Stake Validator.") + cmd.PersistentFlags().String("pos-validator-seed", "", "A BIP39 seed phrase or seed hex used to generate the "+ + "private key of the Proof of Stake validator. Setting this flag automatically makes the node run as a Proof "+ + "of Stake Validator.") cmd.PersistentFlags().Uint64("pos-block-production-interval-milliseconds", 1500, "The interval in milliseconds "+ "between blocks produced by the Proof of Stake block proposer. The default value is 1.5 seconds, or 1500 milliseconds.") cmd.PersistentFlags().Uint64("pos-timeout-base-duration-milliseconds", 30000, "The base duration in milliseconds "+ diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go index cd0ec28f8..106348e4f 100644 --- a/lib/bls_keystore.go +++ b/lib/bls_keystore.go @@ -5,6 +5,7 @@ import ( "github.com/deso-protocol/core/consensus" "github.com/pkg/errors" "github.com/tyler-smith/go-bip39" + "strings" ) // BLSSigner is a wrapper for the bls.PrivateKey type, which abstracts away the private key @@ -48,19 +49,26 @@ type BLSKeystore struct { signer *BLSSigner } -func NewBLSKeystore(seedPhrase string) (*BLSKeystore, error) { - seedBytes, err := bip39.NewSeedWithErrorChecking(seedPhrase, "") - if err != nil { - return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating seed bytes from seed phrase") - } - +// NewBLSKeystore creates a new BLSKeystore from either a seed phrase or a seed hex. +// If the seed begins with 0x, it is assumed to be a hex seed. Otherwise, it is assumed to be a seed phrase. +func NewBLSKeystore(seed string) (*BLSKeystore, error) { privateKey, err := bls.NewPrivateKey() if err != nil { return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed phrase") } - - if _, err = privateKey.FromSeed(seedBytes); err != nil { - return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed phrase") + if strings.HasPrefix(seed, "0x") { + if _, err = privateKey.FromString(seed); err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed hex") + } + } else { + var seedBytes []byte + seedBytes, err = bip39.NewSeedWithErrorChecking(seed, "") + if err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating seed bytes from seed phrase") + } + if _, err = privateKey.FromSeed(seedBytes); err != nil { + return nil, errors.Wrapf(err, "NewBLSKeystore: Problem generating private key from seed phrase") + } } signer, err := NewBLSSigner(privateKey) diff --git a/lib/bls_keystore_test.go b/lib/bls_keystore_test.go index 6fee62124..0f424e710 100644 --- a/lib/bls_keystore_test.go +++ b/lib/bls_keystore_test.go @@ -32,6 +32,11 @@ func TestNewBLSKeystore(t *testing.T) { { keystore, err := NewBLSKeystore("vapor educate wood post fiber proof cannon chunk luggage hedgehog merit dove network lemon scorpion job law more salt market excuse auction refuse apart") require.NoError(t, err) + require.Equal(t, keystore.GetSigner().privateKey.ToString(), "0x13b5febb384a3d3dec5c579724872607cd0ddb97adef592efaf144f6d25a70d7") + } + // Test valid seed hex + { + keystore, err := NewBLSKeystore("0x13b5febb384a3d3dec5c579724872607cd0ddb97adef592efaf144f6d25a70d7") require.NoError(t, err) require.Equal(t, keystore.GetSigner().privateKey.ToString(), "0x13b5febb384a3d3dec5c579724872607cd0ddb97adef592efaf144f6d25a70d7") } From 69655543ed7dd8dc51d7db83f29280ea23c75d96 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 20 Feb 2024 15:25:44 -0500 Subject: [PATCH 502/762] Remove Noisy Warning Message in TransactionRegister (#1050) --- lib/pos_transaction_register.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index 8a89fc8db..99cf30134 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -59,11 +59,6 @@ func (tr *TransactionRegister) Init(globalParams *GlobalParamsEntry) { minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() if !_isValidMinimumFeeAndMultiplier(minNetworkFee, bucketMultiplier) { minNetworkFee, bucketMultiplier = _getFallbackSafeMinimumFeeAndMultiplier() - glog.Warningf( - "TransactionRegister.Init: Invalid minimumNetworkFeeNanosPerKB or feeBucketMultiplier. "+ - "Using fallback values: minimumNetworkFeeNanosPerKB: %v, feeBucketMultiplier: %v", - minNetworkFee, bucketMultiplier, - ) } tr.minimumNetworkFeeNanosPerKB = minNetworkFee tr.feeBucketGrowthRateBasisPoints = bucketMultiplier From ed997c961c5006a12d288e5d9042c6372b55d7e8 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 20 Feb 2024 15:27:30 -0500 Subject: [PATCH 503/762] Remove ECDSA PublicKey From Vote and Timeout Messages (#1051) These public key fields are redundant. They can be verified through the on-chain bls.PublicKey -> PKID mapping on receipt. --- lib/pos_network.go | 33 --------------------------------- lib/pos_network_test.go | 8 ++------ 2 files changed, 2 insertions(+), 39 deletions(-) diff --git a/lib/pos_network.go b/lib/pos_network.go index 868d302f6..891315937 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -26,11 +26,6 @@ type MsgDeSoValidatorVote struct { // encode and decode the message. MsgVersion MsgValidatorVoteVersion - // The ECDSA public key for the validator who constructed this vote message. - // Given the validator's ECDSA public key, we can look up their Validator PKID - // and their stake in consensus. This allows us to verify that the vote message - // was sent by a registered validator. - PublicKey *PublicKey // The BLS voting public key for the validator who constructed this vote message. // The BLS public key is included in the vote message because it allows us to // easily verify if the BLS VotePartialSignature is correctly formed, without having @@ -64,12 +59,6 @@ func (msg *MsgDeSoValidatorVote) ToBytes(bool) ([]byte, error) { // MsgVersion retBytes = append(retBytes, msg.MsgVersion) - // PublicKey - if msg.PublicKey == nil { - return nil, errors.New("MsgDeSoValidatorVote.ToBytes: PublicKey must not be nil") - } - retBytes = append(retBytes, msg.PublicKey.ToBytes()...) - // VotingPublicKey if msg.VotingPublicKey == nil { return nil, errors.New("MsgDeSoValidatorVote.ToBytes: VotingPublicKey must not be nil") @@ -107,12 +96,6 @@ func (msg *MsgDeSoValidatorVote) FromBytes(data []byte) error { } msg.MsgVersion = msgVersion - // PublicKey - msg.PublicKey, err = ReadPublicKey(rr) - if err != nil { - return errors.Wrapf(err, "MsgDeSoValidatorVote.FromBytes: Error decoding PublicKey") - } - // VotingPublicKey msg.VotingPublicKey, err = DecodeBLSPublicKey(rr) if err != nil { @@ -153,10 +136,6 @@ type MsgDeSoValidatorTimeout struct { // encode and decode the message. MsgVersion MsgValidatorTimeoutVersion - // The ECDSA public key for the validator who constructed this timeout message. - // Given the validator's ECDSA public key, we can look up their Validator PKID. - // This allows us to verify that the timeout originated from a registered validator. - PublicKey *PublicKey // The BLS voting public key for the validator who constructed this timeout. The BLS // public key is included in the timeout message because it allows us to easily // verify that the BLS TimeoutPartialSignature is correctly formed, without having to @@ -193,12 +172,6 @@ func (msg *MsgDeSoValidatorTimeout) ToBytes(bool) ([]byte, error) { // MsgVersion retBytes = append(retBytes, msg.MsgVersion) - // PublicKey - if msg.PublicKey == nil { - return nil, errors.New("MsgDeSoValidatorTimeout.ToBytes: PublicKey must not be nil") - } - retBytes = append(retBytes, msg.PublicKey.ToBytes()...) - // VotingPublicKey if msg.VotingPublicKey == nil { return nil, errors.New("MsgDeSoValidatorTimeout.ToBytes: VotingPublicKey must not be nil") @@ -240,12 +213,6 @@ func (msg *MsgDeSoValidatorTimeout) FromBytes(data []byte) error { } msg.MsgVersion = msgVersion - // PublicKey - msg.PublicKey, err = ReadPublicKey(rr) - if err != nil { - return errors.Wrapf(err, "MsgDeSoValidatorTimeout.FromBytes: Error decoding PublicKey") - } - // VotingPublicKey msg.VotingPublicKey, err = DecodeBLSPublicKey(rr) if err != nil { diff --git a/lib/pos_network_test.go b/lib/pos_network_test.go index 0c093fb33..8e3f262e3 100644 --- a/lib/pos_network_test.go +++ b/lib/pos_network_test.go @@ -15,7 +15,6 @@ func TestValidatorVoteEncodeDecode(t *testing.T) { originalMsg := MsgDeSoValidatorVote{ MsgVersion: MsgValidatorVoteVersion0, - PublicKey: &PublicKey{}, VotingPublicKey: validatorVotingPublicKey, BlockHash: &BlockHash{}, ProposedInView: 9910, @@ -25,7 +24,7 @@ func TestValidatorVoteEncodeDecode(t *testing.T) { // Encode the message and verify the length is correct. encodedMsgBytes, err := originalMsg.ToBytes(false) require.NoError(t, err) - require.Equal(t, 214, len(encodedMsgBytes)) + require.Equal(t, 181, len(encodedMsgBytes)) // Decode the message. decodedMsg := &MsgDeSoValidatorVote{} @@ -34,7 +33,6 @@ func TestValidatorVoteEncodeDecode(t *testing.T) { // Check that the message bodies are the same. require.Equal(t, originalMsg.MsgVersion, decodedMsg.MsgVersion) - require.True(t, originalMsg.PublicKey.Equal(*decodedMsg.PublicKey)) require.True(t, originalMsg.VotingPublicKey.Eq(decodedMsg.VotingPublicKey)) require.Equal(t, originalMsg.BlockHash, decodedMsg.BlockHash) require.Equal(t, originalMsg.ProposedInView, decodedMsg.ProposedInView) @@ -52,7 +50,6 @@ func TestValidatorTimeoutEncodeDecode(t *testing.T) { originalMsg := MsgDeSoValidatorTimeout{ MsgVersion: MsgValidatorTimeoutVersion0, - PublicKey: &PublicKey{}, VotingPublicKey: validatorVotingPublicKey, TimedOutView: 999912, HighQC: &QuorumCertificate{ @@ -69,7 +66,7 @@ func TestValidatorTimeoutEncodeDecode(t *testing.T) { // Encode the message and verify the length is correct. encodedMsgBytes, err := originalMsg.ToBytes(false) require.NoError(t, err) - require.Equal(t, 269, len(encodedMsgBytes)) + require.Equal(t, 236, len(encodedMsgBytes)) // Decode the message. decodedMsg := &MsgDeSoValidatorTimeout{} @@ -78,7 +75,6 @@ func TestValidatorTimeoutEncodeDecode(t *testing.T) { // Check that the message bodies are the same. require.Equal(t, originalMsg.MsgVersion, decodedMsg.MsgVersion) - require.True(t, originalMsg.PublicKey.Equal(*decodedMsg.PublicKey)) require.True(t, originalMsg.VotingPublicKey.Eq(decodedMsg.VotingPublicKey)) require.Equal(t, originalMsg.TimedOutView, decodedMsg.TimedOutView) require.True(t, originalMsg.TimeoutPartialSignature.Eq(decodedMsg.TimeoutPartialSignature)) From 5659f731088092ee455bff2713645490a8bae6aa Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:29:01 -0500 Subject: [PATCH 504/762] Return UnjailValidatorStateChangeMetadata from GetStateChangeMetadataFromOpType. Reduce jail grace period to 3 epochs on regtest (#1053) --- lib/constants.go | 2 ++ lib/state_change_metadata.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/lib/constants.go b/lib/constants.go index f95892f2e..9c92a47a3 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -831,6 +831,8 @@ func (params *DeSoParams) EnableRegtest() { // Set the PoS epoch duration to 10 blocks params.DefaultEpochDurationNumBlocks = 10 + // Set the PoS default jail inactive validator grace period epochs to 3. + params.DefaultJailInactiveValidatorGracePeriodEpochs = 3 // In regtest, we start all the fork heights at zero. These can be adjusted // for testing purposes to ensure that a transition does not cause issues. diff --git a/lib/state_change_metadata.go b/lib/state_change_metadata.go index 150174e6b..73f6cec10 100644 --- a/lib/state_change_metadata.go +++ b/lib/state_change_metadata.go @@ -59,6 +59,8 @@ func GetStateChangeMetadataFromOpType(opType OperationType) DeSoEncoder { return &DeletePostAssociationStateChangeMetadata{} case OperationTypeStakeDistributionRestake, OperationTypeStakeDistributionPayToBalance: return &StakeRewardStateChangeMetadata{} + case OperationTypeUnjailValidator: + return &UnjailValidatorStateChangeMetadata{} default: return nil } From 2a2376b8307b3ac0e247bfb23f1a6e723b64a5df Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:49:14 -0500 Subject: [PATCH 505/762] Remove ProposerPublicKey From MsgDeSoHeader (#1052) * Remove ProposerPublicKey From MsgDeSoHeader * Address Nina's comments * Fix incorrect map lookup --- lib/block_view_test.go | 1 + lib/network.go | 18 --------- lib/network_test.go | 3 +- lib/pos_block_producer.go | 4 +- lib/pos_block_producer_test.go | 1 - lib/pos_blockchain.go | 5 --- lib/pos_blockchain_test.go | 59 +++++------------------------ lib/pos_consensus.go | 2 - lib/pos_epoch_complete_hook_test.go | 4 ++ 9 files changed, 17 insertions(+), 80 deletions(-) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 7d911a215..0056fbbed 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -1144,6 +1144,7 @@ type TestMeta struct { posMempool *PosMempool posBlockProducer *PosBlockProducer pubKeyToBLSKeyMap map[string]*bls.PrivateKey + blsPubKeyToBLSKeyMap map[string]*bls.PrivateKey } func _executeAllTestRollbackAndFlush(testMeta *TestMeta) { diff --git a/lib/network.go b/lib/network.go index ccfc8dfee..efcc1772b 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1959,12 +1959,6 @@ type MsgDeSoHeader struct { // proposer. The full index is stored in the block to offload space complexity. TxnConnectStatusByIndexHash *BlockHash - // ProposerPublicKey is only used for Proof of Stake blocks, starting with MsgDeSoHeader - // version 2. For all earlier versions, this field will default to nil. - // - // The ECDSA public key of the validator who proposed this block. - ProposerPublicKey *PublicKey - // ProposerVotingPublicKey is only used for Proof of Stake blocks, starting with // MsgDeSoHeader version 2. For all earlier versions, this field will default to nil. // @@ -2187,12 +2181,6 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error } retBytes = append(retBytes, msg.TxnConnectStatusByIndexHash[:]...) - // ProposerPublicKey - if msg.ProposerPublicKey == nil { - return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerPublicKey must be non-nil") - } - retBytes = append(retBytes, msg.ProposerPublicKey.ToBytes()...) - // ProposerVotingPublicKey if msg.ProposerVotingPublicKey == nil { return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerVotingPublicKey must be non-nil") @@ -2409,12 +2397,6 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TxnConnectStatusByIndexHash") } - // ProposerPublicKey - retHeader.ProposerPublicKey, err = ReadPublicKey(rr) - if err != nil { - return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding ProposerPublicKey") - } - // ProposerVotingPublicKey retHeader.ProposerVotingPublicKey, err = DecodeBLSPublicKey(rr) if err != nil { diff --git a/lib/network_test.go b/lib/network_test.go index fbd9932e2..85dc3d85e 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -148,7 +148,6 @@ func createTestBlockHeaderVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSo Nonce: uint64(0), ExtraNonce: uint64(0), TxnConnectStatusByIndexHash: &testTxnConnectStatusByIndex, - ProposerPublicKey: NewPublicKey(pkForTesting1), ProposerVotingPublicKey: testBLSPublicKey, ProposerRandomSeedSignature: testBLSSignature, ProposedInView: uint64(1432101234), @@ -211,7 +210,7 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { require.NoError(err) require.Equal(hdrPayload, data) - require.Equalf(15, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), + require.Equalf(14, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), "Number of fields in HEADER message is different from expected. "+ "Did you add a new field? If so, make sure the serialization code "+ "works, add the new field to the test case, and fix this error.") diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 61aaac55b..d216a1153 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -1,10 +1,11 @@ package lib import ( - "github.com/btcsuite/btcd/wire" "math" "time" + "github.com/btcsuite/btcd/wire" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" "github.com/pkg/errors" @@ -105,7 +106,6 @@ func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newB block.Header.ProposedInView = view // Set the proposer information. - block.Header.ProposerPublicKey = pbp.proposerPublicKey block.Header.ProposerVotingPublicKey = pbp.proposerVotingPublicKey block.Header.ProposerRandomSeedSignature = proposerRandomSeedSignature diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index adc21e593..a5f90c519 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -74,7 +74,6 @@ func TestCreateBlockTemplate(t *testing.T) { require.Equal(true, blockTemplate.Header.TstampNanoSecs < time.Now().UnixNano()) require.Equal(blockTemplate.Header.Height, uint64(3)) require.Equal(blockTemplate.Header.ProposedInView, uint64(10)) - require.Equal(blockTemplate.Header.ProposerPublicKey, m0Pk) require.Equal(blockTemplate.Header.ProposerVotingPublicKey, pub) require.True(blockTemplate.Header.ProposerRandomSeedSignature.Eq(seedSignature)) require.Equal(blockTemplate.Header.TxnConnectStatusByIndexHash, HashBitset(blockTemplate.TxnConnectStatusByIndex)) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 2a7c9a263..65ade5b80 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -969,10 +969,6 @@ func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader) erro return RuleErrorInvalidProposerVotingPublicKey } - if header.ProposerPublicKey == nil || header.ProposerPublicKey.IsZeroPublicKey() { - return RuleErrorInvalidProposerPublicKey - } - if header.ProposerRandomSeedSignature.IsEmpty() { return RuleErrorInvalidProposerRandomSeedSignature } @@ -1930,7 +1926,6 @@ const ( RuleErrorNilMerkleRoot RuleError = "RuleErrorNilMerkleRoot" RuleErrorInvalidMerkleRoot RuleError = "RuleErrorInvalidMerkleRoot" RuleErrorInvalidProposerVotingPublicKey RuleError = "RuleErrorInvalidProposerVotingPublicKey" - RuleErrorInvalidProposerPublicKey RuleError = "RuleErrorInvalidProposerPublicKey" RuleErrorInvalidProposerRandomSeedSignature RuleError = "RuleErrorInvalidProposerRandomSeedSignature" RuleErrorInvalidPoSBlockHeight RuleError = "RuleErrorInvalidPoSBlockHeight" diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 61508d909..c6b808c18 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -75,7 +75,6 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * }, }, ProposerRandomSeedSignature: signature, - ProposerPublicKey: NewPublicKey(RandomBytes(33)), ProposerVotingPublicKey: randomBLSPrivateKey.PublicKey(), TransactionMerkleRoot: merkleRoot, TxnConnectStatusByIndexHash: HashBitset(bitset.NewBitset().Set(0, true)), @@ -213,17 +212,6 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * // Reset proposer voting public key block.Header.ProposerVotingPublicKey = randomBLSPrivateKey.PublicKey() - // Block must have valid proposer public key - block.Header.ProposerPublicKey = nil - err = bc.isProperlyFormedBlockPoS(block) - require.Equal(t, err, RuleErrorInvalidProposerPublicKey) - - block.Header.ProposerPublicKey = &ZeroPublicKey - err = bc.isProperlyFormedBlockPoS(block) - require.Equal(t, err, RuleErrorInvalidProposerPublicKey) - - block.Header.ProposerPublicKey = NewPublicKey(RandomBytes(33)) - // Block must have valid proposer random seed hash block.Header.ProposerRandomSeedSignature = nil err = bc.isProperlyFormedBlockPoS(block) @@ -386,7 +374,6 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { TstampNanoSecs: time.Now().UnixNano() - 10, Height: 2, ProposedInView: 1, - ProposerPublicKey: NewPublicKey(RandomBytes(33)), ProposerVotingPublicKey: blsPrivKey.PublicKey(), ProposerRandomSeedSignature: signature, ProposerVotePartialSignature: signature, @@ -481,7 +468,7 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { // If we're missing a field in the header, we should get an error // as we can't compute the hash. - block.Header.ProposerPublicKey = nil + block.Header.ProposerVotingPublicKey = nil _, err = bc.storeBlockInBlockIndex(block) require.Error(t, err) } @@ -705,13 +692,11 @@ func TestHasValidBlockProposerPoS(t *testing.T) { // First block, we should have the first leader. leader0PKID := leaderSchedule[0] leader0Entry := validatorPKIDToValidatorEntryMap[*leader0PKID] - leader0PublicKey := utxoView.GetPublicKeyForPKID(leader0PKID) dummyBlock := &MsgDeSoBlock{ Header: &MsgDeSoHeader{ PrevBlockHash: testMeta.chain.BlockTip().Hash, ProposedInView: viewNumber + 1, Height: blockHeight + 1, - ProposerPublicKey: NewPublicKey(leader0PublicKey), ProposerVotingPublicKey: leader0Entry.VotingPublicKey, }, } @@ -2062,13 +2047,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { require.NoError(t, err) require.True(t, currentEpochEntry.ContainsBlockHeight(12)) // Change the block proposer to some any other validator's public key. - wrongBlockProposer := NewPublicKey(m0PkBytes) - if wrongBlockProposer.Equal(*realBlock.Header.ProposerPublicKey) { - wrongBlockProposer = NewPublicKey(m1PkBytes) - } - wrongBlockProposerVotingPublicKey := testMeta.pubKeyToBLSKeyMap[Base58CheckEncode(wrongBlockProposer.ToBytes(), false, testMeta.chain.params)].PublicKey() - realBlock.Header.ProposerPublicKey = wrongBlockProposer - realBlock.Header.ProposerVotingPublicKey = wrongBlockProposerVotingPublicKey + realBlock.Header.ProposerVotingPublicKey = _generateRandomBLSPrivateKey(t).PublicKey() updateProposerVotePartialSignatureForBlock(testMeta, realBlock) // There should be no error, but the block should be marked as ValidateFailed. err = testMeta.chain.processOrphanBlockPoS(realBlock) @@ -2280,30 +2259,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { require.NoError(t, err) require.True(t, isValid) - realProposerPublicKey := realBlock.Header.ProposerPublicKey - realProposerPublicKeyBase58Check := Base58CheckEncode(realProposerPublicKey.ToBytes(), false, testMeta.params) - wrongProposerPublicKey := NewPublicKey(m1PkBytes) - if wrongProposerPublicKey.Equal(*realProposerPublicKey) { - wrongProposerPublicKey = NewPublicKey(m2PkBytes) - } - wrongProposerPublicKeyBase58Check := Base58CheckEncode(wrongProposerPublicKey.ToBytes(), false, testMeta.params) - // Using a different validator's public key as ProposerPublicKey should fail - { - realBlock.Header.ProposerPublicKey = wrongProposerPublicKey - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) - require.NoError(t, err) - require.False(t, isValid) - } - // Using a non-validator's public key as ProposerPublicKey should fail. - { - realBlock.Header.ProposerPublicKey = NewPublicKey(paramUpdaterPkBytes) - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) - require.NoError(t, err) - require.False(t, isValid) - // Reset the proposer public key - realBlock.Header.ProposerPublicKey = realProposerPublicKey - } - // If the block proposer's voting public key doesn't match the snapshot, it should fail. + // If the block proposer's voting public key doesn't match the signature, it should fail. realVotingPublicKey := realBlock.Header.ProposerVotingPublicKey { realBlock.Header.ProposerVotingPublicKey = _generateRandomBLSPrivateKey(t).PublicKey() @@ -2313,21 +2269,25 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { // Reset the proposer voting public key realBlock.Header.ProposerVotingPublicKey = realVotingPublicKey } + // Signature on incorrect payload should fail. { incorrectPayload := consensus.GetVoteSignaturePayload(13, testMeta.chain.BlockTip().Hash) - realBlock.Header.ProposerVotePartialSignature, err = testMeta.pubKeyToBLSKeyMap[realProposerPublicKeyBase58Check].Sign(incorrectPayload[:]) + realBlock.Header.ProposerVotePartialSignature, err = + testMeta.blsPubKeyToBLSKeyMap[realBlock.Header.ProposerVotingPublicKey.ToString()].Sign(incorrectPayload[:]) isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) } + // Signature on correct payload from wrong public key should fail. { var realBlockHash *BlockHash realBlockHash, err = realBlock.Hash() require.NoError(t, err) correctPayload := consensus.GetVoteSignaturePayload(12, realBlockHash) - realBlock.Header.ProposerVotePartialSignature, err = testMeta.pubKeyToBLSKeyMap[wrongProposerPublicKeyBase58Check].Sign(correctPayload[:]) + wrongPrivateKey := _generateRandomBLSPrivateKey(t) + realBlock.Header.ProposerVotePartialSignature, err = wrongPrivateKey.Sign(correctPayload[:]) isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) @@ -2683,7 +2643,6 @@ func _getFullRealBlockTemplate( } blockTemplate.Header.ValidatorsTimeoutAggregateQC = timeoutQC } - blockTemplate.Header.ProposerPublicKey = NewPublicKey(leaderPublicKeyBytes) blockTemplate.Header.ProposerVotingPublicKey = leaderVotingPrivateKey.PublicKey() // Ugh we need to adjust the timestamp. blockTemplate.Header.TstampNanoSecs = time.Now().UnixNano() + blockTimestampOffset.Nanoseconds() diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 6c3382899..b05ed2fda 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -831,7 +831,6 @@ func (fc *FastHotStuffConsensus) logBlockProposal(block *MsgDeSoBlock, blockHash glog.Infof( "\n==================================== YOU PROPOSED A NEW FAST-HOTSTUFF BLOCK! ===================================="+ "\n Timestamp: %d, View: %d, Height: %d, BlockHash: %v"+ - "\n Proposer PKey: %s"+ "\n Proposer Voting PKey: %s"+ "\n Proposer Signature: %s"+ "\n High QC View: %d, High QC Num Validators: %d, High QC BlockHash: %s"+ @@ -839,7 +838,6 @@ func (fc *FastHotStuffConsensus) logBlockProposal(block *MsgDeSoBlock, blockHash "\n Num Block Transactions: %d, Num Transactions Remaining In Mempool: %d"+ "\n=================================================================================================================", block.Header.GetTstampSecs(), block.Header.GetView(), block.Header.Height, blockHash.String(), - PkToString(block.Header.ProposerPublicKey.ToBytes(), fc.params), block.Header.ProposerVotingPublicKey.ToString(), block.Header.ProposerVotePartialSignature.ToString(), block.Header.GetQC().GetView(), block.Header.GetQC().GetAggregatedSignature().GetSignersList().Size(), block.Header.PrevBlockHash.String(), diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 6188a51d0..c574eef64 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -847,8 +847,12 @@ func _registerValidatorAndStake( if testMeta.pubKeyToBLSKeyMap == nil { testMeta.pubKeyToBLSKeyMap = make(map[string]*bls.PrivateKey) } + if testMeta.blsPubKeyToBLSKeyMap == nil { + testMeta.blsPubKeyToBLSKeyMap = make(map[string]*bls.PrivateKey) + } // Stash the voting private key in testmeta for convenience testMeta.pubKeyToBLSKeyMap[publicKey] = votingPrivateKey + testMeta.blsPubKeyToBLSKeyMap[votingPublicKey.ToString()] = votingPrivateKey registerMetadata := &RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, VotingPublicKey: votingPublicKey, From d2f2cdd7815c98f5dcdfc16a99b6a92c1f5653f9 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 20 Feb 2024 18:55:57 -0500 Subject: [PATCH 506/762] Update Active Validator Connections In PoS Consensus (#1054) --- lib/pos_blockchain.go | 5 +++ lib/pos_consensus.go | 75 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 78 insertions(+), 2 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 65ade5b80..ad1a8a172 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1722,6 +1722,11 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash) error { return nil } +// GetCommittedTipView builds a UtxoView to the committed tip. +func (bc *Blockchain) GetCommittedTipView() (*UtxoView, error) { + return NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, nil) +} + // GetUncommittedTipView builds a UtxoView to the uncommitted tip. func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index ebb6e3b86..376b6afa0 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -102,10 +102,22 @@ func (fc *FastHotStuffConsensus) Start() error { blockProductionInterval := time.Millisecond * time.Duration(fc.blockProductionIntervalMilliseconds) timeoutBaseDuration := time.Millisecond * time.Duration(fc.timeoutBaseDurationMilliseconds) - // Initialize and start the event loop - fc.fastHotStuffEventLoop.Init(blockProductionInterval, timeoutBaseDuration, genesisQC, tipBlockWithValidators[0], safeBlocksWithValidators) + // Initialize the event loop. This should never fail. If it does, we return the error to the caller. + // The caller handle the error and decide when to retry. + err = fc.fastHotStuffEventLoop.Init(blockProductionInterval, timeoutBaseDuration, genesisQC, tipBlockWithValidators[0], safeBlocksWithValidators) + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error initializing FastHotStuffEventLoop: %v", err) + } + + // Start the event loop fc.fastHotStuffEventLoop.Start() + // Update the validator connections in the NetworkManager. This is a best effort operation. If it fails, + // we log the error and continue. + if err = fc.updateActiveValidatorConnections(); err != nil { + glog.Errorf("FastHotStuffConsensus.tryProcessBlockAsNewTip: Error updating validator connections: %v", err) + } + return nil } @@ -621,6 +633,12 @@ func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("Error processing tip block locally: %v", err) } + // Update the validator connections in the NetworkManager. This is a best effort operation. If it fails, + // we log the error and continue. + if err = fc.updateActiveValidatorConnections(); err != nil { + glog.Errorf("FastHotStuffConsensus.tryProcessBlockAsNewTip: Error updating validator connections: %v", err) + } + // Happy path. The block was processed successfully and applied as the new tip. Nothing left to do. return nil, nil } @@ -794,6 +812,59 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView, previousBloc return blockProducer, nil } +func (fc *FastHotStuffConsensus) updateActiveValidatorConnections() error { + // Fetch the committed tip view. This ends up being as good as using the uncommitted tip view + // but without the overhead of connecting at least two blocks' worth of txns to the view. + utxoView, err := fc.blockchain.GetCommittedTipView() + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching uncommitted tip view: %v", err) + } + + // Get the current snapshot epoch number from the committed tip. This will be behind the uncommitted tip + // by up to two blocks, but this is fine since we fetch both the current epoch's and next epoch's validator + // sets. + snapshotEpochNumber, err := utxoView.GetCurrentSnapshotEpochNumber() + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching snapshot epoch number: %v", err) + } + + // Fetch the current snapshot epoch's validator set. + currentValidatorList, err := utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching validator list: %v", err) + } + + // Fetch the next snapshot epoch's validator set. This is useful when we're close to epoch transitions and + // allows us to pre-connect to the next epoch's validator set. In the event that there is a timeout at + // the epoch transition, reverting us to the previous epoch, this allows us to maintain connections to the + // next epoch's validators. + // + // TODO: There is an optimization we can add here to only fetch the next epoch's validator list once we're + // within 300 blocks of the next epoch. This way, we don't prematurely attempt connections to the next + // epoch's validators. In production, this will reduce the lead time with which we connect to the next epoch's + // validator set from 1 hour to 5 minutes. + nextValidatorList, err := utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber + 1) + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching validator list: %v", err) + } + + // Merge the current and next validator lists. Place the current epoch's validators last so that they override + // the next epoch's validators in the event of a conflict. + mergedValidatorList := append(nextValidatorList, currentValidatorList...) + validatorsMap := collections.NewConcurrentMap[bls.SerializedPublicKey, consensus.Validator]() + for _, validator := range mergedValidatorList { + if validator.VotingPublicKey.Eq(fc.signer.GetPublicKey()) { + continue + } + validatorsMap.Set(validator.VotingPublicKey.Serialize(), validator) + } + + // Update the active validators map in the network manager + fc.networkManager.SetActiveValidatorsMap(validatorsMap) + + return nil +} + // Finds the epoch entry for the block and returns the epoch number. func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { for _, epochEntry := range epochEntries { From 50db24d40eff8decb94dd493dfa26d1baa7797ae Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 20 Feb 2024 19:12:41 -0500 Subject: [PATCH 507/762] Remove Rollbacks To bestHeaderChain When bestChain is Mutated (#1055) --- lib/pos_blockchain.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index ad1a8a172..aa9d6b79d 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -53,6 +53,12 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader) ( return false, false, errors.Wrapf(err, "processHeaderPoS: Problem hashing header") } + // If the incoming header is already part of the best header chain, then we can exit early. + // The header is not part of a fork, and is already an ancestor of the current header chain tip. + if _, isInBestHeaderChain := bc.bestHeaderChainMap[*headerHash]; isInBestHeaderChain { + return true, false, nil + } + // Validate the header and index it in the block index. blockNode, isOrphan, err := bc.validateAndIndexHeaderPoS(header, headerHash) if err != nil { @@ -1549,8 +1555,6 @@ func (bc *Blockchain) shouldReorg(blockNode *BlockNode, currentView uint64) bool func (bc *Blockchain) addTipBlockToBestChain(blockNode *BlockNode) { bc.bestChain = append(bc.bestChain, blockNode) bc.bestChainMap[*blockNode.Hash] = blockNode - bc.bestHeaderChain = append(bc.bestHeaderChain, blockNode) - bc.bestHeaderChainMap[*blockNode.Hash] = blockNode } // removeTipBlockFromBestChain removes the current tip from the best chain. It @@ -1562,8 +1566,6 @@ func (bc *Blockchain) removeTipBlockFromBestChain() *BlockNode { lastBlock := bc.bestChain[len(bc.bestChain)-1] delete(bc.bestChainMap, *lastBlock.Hash) bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] - bc.bestHeaderChain = bc.bestHeaderChain[:len(bc.bestChain)] - delete(bc.bestHeaderChainMap, *lastBlock.Hash) return lastBlock } From e87c72b2527e4ddb1ee22277986bff611c63d0b3 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 20 Feb 2024 19:14:32 -0500 Subject: [PATCH 508/762] PoW Miner Event Loop Cleanup (#1056) --- lib/miner.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/miner.go b/lib/miner.go index 06272da42..d3bab8b62 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -198,10 +198,6 @@ func (desoMiner *DeSoMiner) MineAndProcessSingleBlock(threadIndex uint32, mempoo return nil, fmt.Errorf("DeSoMiner._startThread: _mineSingleBlock returned nil; should only happen if we're stopping") } - if desoMiner.params.IsPoSBlockHeight(blockToMine.Header.Height) { - return nil, fmt.Errorf("DeSoMiner._startThread: _mineSingleBlock returned a block that is past the Proof of Stake Cutover") - } - // Log information on the block we just mined. bestHash, _ := blockToMine.Hash() glog.Infof("================== YOU MINED A NEW BLOCK! ================== Height: %d, Hash: %s", blockToMine.Header.Height, hex.EncodeToString(bestHash[:])) @@ -293,6 +289,12 @@ func (desoMiner *DeSoMiner) _startThread(threadIndex uint32) { continue } + // Exit if blockchain has connected a block at the final PoW block height. + currentTip := desoMiner.BlockProducer.chain.blockTip() + if currentTip.Header.Height >= desoMiner.params.GetFinalPoWBlockHeight() { + return + } + newBlock, err := desoMiner.MineAndProcessSingleBlock(threadIndex, nil /*mempoolToUpdate*/) if err != nil { glog.Errorf(err.Error()) From cdd15ae90f8a00b78771d94fb5719ccc9459b717 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:06:18 -0500 Subject: [PATCH 509/762] Set RandomSeedHash in ConnectBlock (#1059) --- lib/block_view.go | 11 +++++++++++ lib/pos_blockchain_test.go | 19 +++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/lib/block_view.go b/lib/block_view.go index b24641777..168f2a997 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4111,6 +4111,17 @@ func (bav *UtxoView) ConnectBlock( return nil, fmt.Errorf("ConnectBlock: Parent hash of block being connected does not match tip") } + // If the block height is past the Proof of Stake cutover, then we update the random seed hash. + // We do this first before connecting any transactions so that the latest seed hash is used for + // transactions that use on-chain randomness. + if blockHeight >= uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + randomSeedHash, err := HashRandomSeedSignature(desoBlock.Header.ProposerRandomSeedSignature) + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: Problem hashing random seed signature") + } + bav._setCurrentRandomSeedHash(randomSeedHash) + } + blockHeader := desoBlock.Header var blockRewardOutputPublicKey *btcec.PublicKey // If the block height is greater than or equal to the block reward patch height, diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index c6b808c18..8f40597e8 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1669,6 +1669,22 @@ func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, u } } +// _verifyRandomSeedHashHelper is a helper function that verifies the random seed hash is set +// after connecting a new tip block. +func _verifyRandomSeedHashHelper(testMeta *TestMeta, tipBlock *MsgDeSoBlock) { + // Get the utxo view for the tip block. + utxoView, err := testMeta.chain.GetUncommittedTipView() + require.NoError(testMeta.t, err) + // Verify that the random seed hash is set. + randomSeedHash, err := utxoView.GetCurrentRandomSeedHash() + require.NoError(testMeta.t, err) + + // Verify that the random seed hash is set based on the random seed signature on the block. + expectedRandomSeedHash, err := HashRandomSeedSignature(tipBlock.Header.ProposerRandomSeedSignature) + require.NoError(testMeta.t, err) + require.True(testMeta.t, expectedRandomSeedHash.Eq(randomSeedHash)) +} + func TestProcessHeaderPoS(t *testing.T) { // Initialize the chain and test metadata. testMeta := NewTestPoSBlockchainWithValidators(t) @@ -1746,6 +1762,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { blockHash1, err = realBlock.Hash() require.NoError(t, err) _verifyCommitRuleHelper(testMeta, []*BlockHash{}, []*BlockHash{blockHash1}, nil) + _verifyRandomSeedHashHelper(testMeta, realBlock) } var blockHash2, blockHash3, futureBlockHash *BlockHash @@ -1768,6 +1785,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.NoError(t, err) _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1}, []*BlockHash{blockHash2, blockHash3}, blockHash1) + _verifyRandomSeedHashHelper(testMeta, realBlock3) // Now let's try adding a block that has a timestamp too far in the future, and make sure it's stored. var futureBlock *MsgDeSoBlock @@ -1816,6 +1834,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { // We expect blockHash1 and blockHash2 to be committed, but blockHash3 and reorgBlockHash to not be committed. // Timeout block will no longer be in best chain, and will still be in an uncommitted state in the block index _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, reorgBlockHash}, blockHash2) + _verifyRandomSeedHashHelper(testMeta, reorgBlock) _, exists := testMeta.chain.bestChainMap[*timeoutBlockHash] require.False(t, exists) From 3dc58f301235dfc663577c8182b1f85aa0bf0f95 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:08:31 -0500 Subject: [PATCH 510/762] Add Nil Check for Block Producer's Validator Entry Lookup (#1060) --- lib/pos_consensus.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index b05ed2fda..99c582d71 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -757,6 +757,9 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView, previousBloc if err != nil { return nil, errors.Errorf("Error fetching validator entry for block producer: %v", err) } + if blockProducerValidatorEntry == nil { + return nil, errors.New("Error fetching validator entry for block producer") + } blockProducerPublicKeyBytes := bav.GetPublicKeyForPKID(blockProducerValidatorEntry.PKID) blockProducerPublicKey := NewPublicKey(blockProducerPublicKeyBytes) if blockProducerPublicKey == nil { From 4239b8e16fd94fd2d0675cbbf516c4a2e85c9902 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:37:06 -0500 Subject: [PATCH 511/762] Log Peer Validator Vote Messages (#1057) --- bls/signature.go | 16 ++++++++++++++++ bls/signature_no_relic.go | 8 ++++++++ lib/block_view.go | 11 +++++++++++ lib/pos_blockchain_test.go | 19 +++++++++++++++++++ lib/pos_consensus.go | 10 ++++++++-- lib/pos_network.go | 11 +++++++++++ 6 files changed, 73 insertions(+), 2 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 673f24307..83fdb7d2d 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -227,6 +227,14 @@ func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, erro return publicKey, err } +func (publicKey *PublicKey) ToAbbreviatedString() string { + str := publicKey.ToString() + if len(str) <= 8 { + return str + } + return str[:8] + "..." + str[len(str)-8:] +} + func (publicKey *PublicKey) MarshalJSON() ([]byte, error) { // This is called automatically by the JSON library when converting a // bls.PublicKey to JSON. This is useful when passing a bls.PublicKey @@ -324,6 +332,14 @@ func (signature *Signature) FromString(signatureString string) (*Signature, erro return signature, nil } +func (signature *Signature) ToAbbreviatedString() string { + str := signature.ToString() + if len(str) <= 8 { + return str + } + return str[:8] + "..." + str[len(str)-8:] +} + func (signature *Signature) MarshalJSON() ([]byte, error) { // This is called automatically by the JSON library when converting a // bls.Signature to JSON. This is useful when passing a bls.Signature diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go index b3d472739..0f3988253 100644 --- a/bls/signature_no_relic.go +++ b/bls/signature_no_relic.go @@ -84,6 +84,10 @@ func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, erro panic(BLSNoRelicError) } +func (publicKey *PublicKey) ToAbbreviatedString() string { + panic(BLSNoRelicError) +} + func (publicKey *PublicKey) MarshalJSON() ([]byte, error) { panic(BLSNoRelicError) } @@ -136,6 +140,10 @@ func (signature *Signature) FromString(signatureString string) (*Signature, erro panic(BLSNoRelicError) } +func (signature *Signature) ToAbbreviatedString() string { + panic(BLSNoRelicError) +} + func (signature *Signature) MarshalJSON() ([]byte, error) { panic(BLSNoRelicError) } diff --git a/lib/block_view.go b/lib/block_view.go index b24641777..168f2a997 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4111,6 +4111,17 @@ func (bav *UtxoView) ConnectBlock( return nil, fmt.Errorf("ConnectBlock: Parent hash of block being connected does not match tip") } + // If the block height is past the Proof of Stake cutover, then we update the random seed hash. + // We do this first before connecting any transactions so that the latest seed hash is used for + // transactions that use on-chain randomness. + if blockHeight >= uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + randomSeedHash, err := HashRandomSeedSignature(desoBlock.Header.ProposerRandomSeedSignature) + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: Problem hashing random seed signature") + } + bav._setCurrentRandomSeedHash(randomSeedHash) + } + blockHeader := desoBlock.Header var blockRewardOutputPublicKey *btcec.PublicKey // If the block height is greater than or equal to the block reward patch height, diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index c6b808c18..8f40597e8 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1669,6 +1669,22 @@ func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, u } } +// _verifyRandomSeedHashHelper is a helper function that verifies the random seed hash is set +// after connecting a new tip block. +func _verifyRandomSeedHashHelper(testMeta *TestMeta, tipBlock *MsgDeSoBlock) { + // Get the utxo view for the tip block. + utxoView, err := testMeta.chain.GetUncommittedTipView() + require.NoError(testMeta.t, err) + // Verify that the random seed hash is set. + randomSeedHash, err := utxoView.GetCurrentRandomSeedHash() + require.NoError(testMeta.t, err) + + // Verify that the random seed hash is set based on the random seed signature on the block. + expectedRandomSeedHash, err := HashRandomSeedSignature(tipBlock.Header.ProposerRandomSeedSignature) + require.NoError(testMeta.t, err) + require.True(testMeta.t, expectedRandomSeedHash.Eq(randomSeedHash)) +} + func TestProcessHeaderPoS(t *testing.T) { // Initialize the chain and test metadata. testMeta := NewTestPoSBlockchainWithValidators(t) @@ -1746,6 +1762,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { blockHash1, err = realBlock.Hash() require.NoError(t, err) _verifyCommitRuleHelper(testMeta, []*BlockHash{}, []*BlockHash{blockHash1}, nil) + _verifyRandomSeedHashHelper(testMeta, realBlock) } var blockHash2, blockHash3, futureBlockHash *BlockHash @@ -1768,6 +1785,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.NoError(t, err) _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1}, []*BlockHash{blockHash2, blockHash3}, blockHash1) + _verifyRandomSeedHashHelper(testMeta, realBlock3) // Now let's try adding a block that has a timestamp too far in the future, and make sure it's stored. var futureBlock *MsgDeSoBlock @@ -1816,6 +1834,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { // We expect blockHash1 and blockHash2 to be committed, but blockHash3 and reorgBlockHash to not be committed. // Timeout block will no longer be in best chain, and will still be in an uncommitted state in the block index _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, reorgBlockHash}, blockHash2) + _verifyRandomSeedHashHelper(testMeta, reorgBlock) _, exists := testMeta.chain.bestChainMap[*timeoutBlockHash] require.False(t, exists) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 376b6afa0..624efce88 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -384,6 +384,8 @@ func (fc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotSt // HandleValidatorVote is called when we receive a validator vote message from a peer. This function processes // the vote locally in the FastHotStuffEventLoop. func (fc *FastHotStuffConsensus) HandleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) error { + glog.V(2).Infof("FastHotStuffConsensus.HandleValidatorVote: Received vote msg %s", msg.ToString()) + // No need to hold a lock on the consensus because this function is a pass-through // for the FastHotStuffEventLoop which guarantees thread-safety for its callers @@ -391,7 +393,8 @@ func (fc *FastHotStuffConsensus) HandleValidatorVote(pp *Peer, msg *MsgDeSoValid if err := fc.fastHotStuffEventLoop.ProcessValidatorVote(msg); err != nil { // If we can't process the vote locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. - return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorVote: Error processing vote: ") + glog.Errorf("FastHotStuffConsensus.HandleValidatorVote: Error processing vote msg: %v", err) + return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorVote: Error processing vote msg: ") } // Happy path @@ -797,6 +800,9 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView, previousBloc if err != nil { return nil, errors.Errorf("Error fetching validator entry for block producer: %v", err) } + if blockProducerValidatorEntry == nil { + return nil, errors.New("Error fetching validator entry for block producer") + } blockProducerPublicKeyBytes := bav.GetPublicKeyForPKID(blockProducerValidatorEntry.PKID) blockProducerPublicKey := NewPublicKey(blockProducerPublicKeyBytes) if blockProducerPublicKey == nil { @@ -933,7 +939,7 @@ func (fc *FastHotStuffConsensus) logBlockProposal(block *MsgDeSoBlock, blockHash "\n High QC View: %d, High QC Num Validators: %d, High QC BlockHash: %s"+ "\n Timeout Agg QC View: %d, Timeout Agg QC Num Validators: %d, Timeout High QC Views: %s"+ "\n Num Block Transactions: %d, Num Transactions Remaining In Mempool: %d"+ - "\n=================================================================================================================", + "\n=================================================================================================================\n", block.Header.GetTstampSecs(), block.Header.GetView(), block.Header.Height, blockHash.String(), block.Header.ProposerVotingPublicKey.ToString(), block.Header.ProposerVotePartialSignature.ToString(), diff --git a/lib/pos_network.go b/lib/pos_network.go index 891315937..324cbb9d1 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -123,6 +123,17 @@ func (msg *MsgDeSoValidatorVote) FromBytes(data []byte) error { return nil } +func (msg *MsgDeSoValidatorVote) ToString() string { + return fmt.Sprintf( + "{MsgVersion: %d, VotingPublicKey: %s, BlockHash: %v, ProposedInView: %d, VotePartialSignature: %v}", + msg.MsgVersion, + msg.VotingPublicKey.ToAbbreviatedString(), + msg.BlockHash, + msg.ProposedInView, + msg.VotePartialSignature.ToAbbreviatedString(), + ) +} + // ================================================================== // Proof of Stake Timeout Message // ================================================================== From 3d3b8165b3e2d9d8888ef30c36dfca4f054f633b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:38:29 -0500 Subject: [PATCH 512/762] Log Peer Validator Timeout Messages (#1058) --- lib/pos_consensus.go | 7 ++++++- lib/pos_network.go | 12 ++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 624efce88..b700f6a88 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -505,6 +505,8 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo // HandleValidatorTimeout is called when we receive a validator timeout message from a peer. This function // processes the timeout locally in the FastHotStuffEventLoop. func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) error { + glog.V(2).Infof("FastHotStuffConsensus.HandleLocalTimeoutEvent: Received timeout msg: %s", msg.ToString()) + // No need to hold a lock on the consensus because this function is a pass-through // for the FastHotStuffEventLoop which guarantees thread-safety for its callers. @@ -512,7 +514,8 @@ func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoVa if err := fc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { // If we can't process the timeout locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. - return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout: ") + glog.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout msg: %v", err) + return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout msg: ") } // Happy path @@ -936,6 +939,7 @@ func (fc *FastHotStuffConsensus) logBlockProposal(block *MsgDeSoBlock, blockHash "\n Timestamp: %d, View: %d, Height: %d, BlockHash: %v"+ "\n Proposer Voting PKey: %s"+ "\n Proposer Signature: %s"+ + "\n Proposer Random Seed Signature: %s"+ "\n High QC View: %d, High QC Num Validators: %d, High QC BlockHash: %s"+ "\n Timeout Agg QC View: %d, Timeout Agg QC Num Validators: %d, Timeout High QC Views: %s"+ "\n Num Block Transactions: %d, Num Transactions Remaining In Mempool: %d"+ @@ -943,6 +947,7 @@ func (fc *FastHotStuffConsensus) logBlockProposal(block *MsgDeSoBlock, blockHash block.Header.GetTstampSecs(), block.Header.GetView(), block.Header.Height, blockHash.String(), block.Header.ProposerVotingPublicKey.ToString(), block.Header.ProposerVotePartialSignature.ToString(), + block.Header.ProposerRandomSeedSignature.ToString(), block.Header.GetQC().GetView(), block.Header.GetQC().GetAggregatedSignature().GetSignersList().Size(), block.Header.PrevBlockHash.String(), aggQCView, aggQCNumValidators, aggQCHighQCViews, len(block.Txns), len(fc.mempool.GetTransactions()), diff --git a/lib/pos_network.go b/lib/pos_network.go index 324cbb9d1..08aae1bbc 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -251,6 +251,18 @@ func (msg *MsgDeSoValidatorTimeout) FromBytes(data []byte) error { return nil } +func (msg *MsgDeSoValidatorTimeout) ToString() string { + return fmt.Sprintf( + "{MsgVersion: %d, VotingPublicKey: %s, TimedOutView: %d, HighQCView: %v, HighQCBlockHash: %v, TimeoutPartialSignature: %s}", + msg.MsgVersion, + msg.VotingPublicKey.ToAbbreviatedString(), + msg.TimedOutView, + msg.HighQC.ProposedInView, + msg.HighQC.BlockHash, + msg.TimeoutPartialSignature.ToAbbreviatedString(), + ) +} + // A QuorumCertificate contains an aggregated signature from 2/3rds of the validators // on the network, weighted by stake. The signatures are associated with a block hash // and a view, both of which are identified in the certificate. From f10d6dcaf5805cd8d35850445aaf00acf196c376 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 22 Feb 2024 14:48:59 -0500 Subject: [PATCH 513/762] Request Missing HighQC Block For Timeout Message (#1061) * Request Missing HighQC Block For Timeout Message * Address Nina's comments --- lib/blockchain.go | 8 ++++++++ lib/pos_consensus.go | 30 ++++++++++++++++++++++++++++-- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 3a8b9484c..764a53681 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1146,6 +1146,14 @@ func (bc *Blockchain) HasBlock(blockHash *BlockHash) bool { return true } +func (bc *Blockchain) HasBlockInBlockIndex(blockHash *BlockHash) bool { + bc.ChainLock.RLock() + defer bc.ChainLock.RUnlock() + + _, exists := bc.blockIndexByHash[*blockHash] + return exists +} + // This needs to hold a lock on the blockchain because it read from an in-memory map that is // not thread-safe. func (bc *Blockchain) GetBlockHeaderFromIndex(blockHash *BlockHash) *MsgDeSoHeader { diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index b700f6a88..fac24e887 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -507,8 +507,23 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) error { glog.V(2).Infof("FastHotStuffConsensus.HandleLocalTimeoutEvent: Received timeout msg: %s", msg.ToString()) - // No need to hold a lock on the consensus because this function is a pass-through - // for the FastHotStuffEventLoop which guarantees thread-safety for its callers. + // Hold a write lock on the consensus, since we need to update the timeout message in the + // FastHotStuffEventLoop. + fc.lock.Lock() + defer fc.lock.Unlock() + + if !fc.fastHotStuffEventLoop.IsRunning() { + return errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: FastHotStuffEventLoop is not running") + } + + // If we don't have the highQC's block on hand, then we need to request it from the peer. We do + // that first before storing the timeout message locally in the FastHotStuffEventLoop. This + // prevents spamming of timeout messages by peers. + if !fc.blockchain.HasBlockInBlockIndex(msg.HighQC.BlockHash) { + fc.trySendMessageToPeer(pp, &MsgDeSoGetBlocks{HashList: []*BlockHash{msg.HighQC.BlockHash}}) + glog.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Requesting missing highQC's block: %v", msg.HighQC.BlockHash) + return errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Missing highQC's block: %v", msg.HighQC.BlockHash) + } // Process the timeout message locally in the FastHotStuffEventLoop if err := fc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { @@ -874,6 +889,17 @@ func (fc *FastHotStuffConsensus) updateActiveValidatorConnections() error { return nil } +func (fc *FastHotStuffConsensus) trySendMessageToPeer(pp *Peer, msg DeSoMessage) { + remoteNode := fc.networkManager.rnManager.GetRemoteNodeFromPeer(pp) + if remoteNode == nil { + glog.Errorf("FastHotStuffConsensus.trySendMessageToPeer: RemoteNode not found for peer: %v", pp) + return + } + + // Send the message to the peer + remoteNode.SendMessage(msg) +} + // Finds the epoch entry for the block and returns the epoch number. func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { for _, epochEntry := range epochEntries { From 5d1bb335153d6b19651c6387e5393514d5310054 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:34:05 -0500 Subject: [PATCH 514/762] Remove Peer Disconnects in Server._handleBlock (#1062) * Remove Peer Disconnects in Server._handleBlock * Warning for unrequested blocks * Catch case for spam prevention check * Address Nina's comment --- lib/pos_blockchain.go | 3 ++- lib/server.go | 39 ++++++++++++++++++++++----------------- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index aa9d6b79d..e0bea2745 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -263,7 +263,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } if !passedSpamPreventionCheck { // If the block fails the spam prevention check, we throw it away. - return false, false, nil, errors.New("processBlockPoS: Block failed spam prevention check") + return false, false, nil, errors.Wrapf(RuleErrorFailedSpamPreventionsCheck, "processBlockPoS: Block failed spam prevention check: ") } // Validate the block and store it in the block index. The block is guaranteed to not be an orphan. @@ -1929,6 +1929,7 @@ const ( RuleErrorAncestorBlockValidationFailed RuleError = "RuleErrorAncestorBlockValidationFailed" RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock RuleError = "RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock" RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight RuleError = "RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight" + RuleErrorFailedSpamPreventionsCheck RuleError = "RuleErrorFailedSpamPreventionsCheck" RuleErrorNilMerkleRoot RuleError = "RuleErrorNilMerkleRoot" RuleErrorInvalidMerkleRoot RuleError = "RuleErrorInvalidMerkleRoot" diff --git a/lib/server.go b/lib/server.go index 38da289cf..185f7a75f 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2102,15 +2102,14 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { return } - if pp != nil { - if _, exists := pp.requestedBlocks[*blockHash]; !exists { - glog.Errorf("_handleBlock: Getting a block that we haven't requested before, "+ - "block hash (%v)", *blockHash) - } - delete(pp.requestedBlocks, *blockHash) - } else { - glog.Errorf("_handleBlock: Called with nil peer, this should never happen.") + // Log a warning if we receive a block we haven't requested yet. It is still possible to receive + // a block in this case if we're connected directly to the block producer and they send us a block + // directly. + if _, exists := pp.requestedBlocks[*blockHash]; !exists { + glog.Warningf("_handleBlock: Getting a block that we haven't requested before, "+ + "block hash (%v)", *blockHash) } + delete(pp.requestedBlocks, *blockHash) // Check that the mempool has not received a transaction that would forbid this block's signature pubkey. // This is a minimal check, a more thorough check is made in the ProcessBlock function. This check is @@ -2157,20 +2156,28 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { // headers comment above but in the future we should probably try and figure // out a way to be more strict about things. glog.Warningf("Got duplicate block %v from peer %v", blk, pp) + } else if strings.Contains(err.Error(), RuleErrorFailedSpamPreventionsCheck.Error()) { + // If the block fails the spam prevention check, then it must be signed by the + // bad block proposer signature or it has a bad QC. In either case, we should + // disconnect the peer. + srv._logAndDisconnectPeer(pp, blk, errors.Wrapf(err, "Error while processing block: ").Error()) + return } else { - srv._logAndDisconnectPeer( - pp, blk, - errors.Wrapf(err, "Error while processing block: ").Error()) + // For any other error, we log the error and continue. + glog.Errorf("Server._handleBlock: Error while processing block: %v", err) return } } + if isOrphan { - // We should generally never receive orphan blocks. It indicates something - // went wrong in our headers syncing. - glog.Errorf("ERROR: Received orphan block with hash %v height %v. "+ + // It's possible to receive an orphan block if we're connected directly to the + // block producer, and they are broadcasting blocks in the steady state. We log + // a warning in this case and move on. + glog.Warningf("ERROR: Received orphan block with hash %v height %v. "+ "This should never happen", blockHash, blk.Header.Height) return } + srv.timer.End("Server._handleBlock: Process Block") srv.timer.Print("Server._handleBlock: General") @@ -2178,9 +2185,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { // We shouldn't be receiving blocks while syncing headers. if srv.blockchain.chainState() == SyncStateSyncingHeaders { - srv._logAndDisconnectPeer( - pp, blk, - "We should never get blocks when we're syncing headers") + glog.Warningf("Server._handleBlock: Received block while syncing headers: %v", blk) return } From ac60b950e900bc4f201fcce985530fbcddb4e17c Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Thu, 22 Feb 2024 17:12:03 -0500 Subject: [PATCH 515/762] Change log level for consensus events --- lib/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/server.go b/lib/server.go index 185f7a75f..19caef905 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2658,7 +2658,7 @@ func (srv *Server) _startConsensus() { select { case consensusEvent := <-srv._getFastHotStuffConsensusEventChannel(): { - glog.Infof("Server._startConsensus: Received consensus event: %s", consensusEvent.ToString()) + glog.V(2).Infof("Server._startConsensus: Received consensus event: %s", consensusEvent.ToString()) srv._handleFastHostStuffConsensusEvent(consensusEvent) } From c1a4cbb2d5f88220c44912dc177d3b273327a1f3 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Thu, 22 Feb 2024 17:14:39 -0500 Subject: [PATCH 516/762] Update regtest block producer domain --- lib/pos_server_regtest.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/pos_server_regtest.go b/lib/pos_server_regtest.go index e4a339416..bd74780d6 100644 --- a/lib/pos_server_regtest.go +++ b/lib/pos_server_regtest.go @@ -25,7 +25,7 @@ func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { } txnMeta := RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://deso.com")}, + Domains: [][]byte{[]byte("http://localhost:18000")}, DisableDelegatedStake: false, DelegatedStakeCommissionBasisPoints: 100, VotingPublicKey: blsSigner.GetPublicKey(), From 19e8511a9c4ac115d2d66947140b56921624ebe4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 22 Feb 2024 17:38:03 -0500 Subject: [PATCH 517/762] Prevent Commited Tip Reorgs in ProcessHeaderPoS (#1063) * Prevent Commited Tip Reorgs in ProcessHeaderPoS * Clean up error message --- lib/pos_blockchain.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index e0bea2745..45a5f2a69 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -59,6 +59,13 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader) ( return true, false, nil } + // If the incoming header is part of a reorg that uncommits the committed tip from the best chain, + // then we exit early. Such headers are invalid and should not be synced. + committedBlockchainTip, _ := bc.getCommittedTip() + if committedBlockchainTip != nil && committedBlockchainTip.Header.Height >= header.Height { + return false, false, errors.New("processHeaderPoS: Header conflicts with committed tip") + } + // Validate the header and index it in the block index. blockNode, isOrphan, err := bc.validateAndIndexHeaderPoS(header, headerHash) if err != nil { From 53cf485de88685f6684a4eeca356b31e18613b49 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 23 Feb 2024 16:50:03 -0800 Subject: [PATCH 518/762] Atomics Initial Framework --- lib/block_view_atomic_txns.go | 174 ++++++++++++++++++++++++++++++++++ lib/constants.go | 5 + 2 files changed, 179 insertions(+) create mode 100644 lib/block_view_atomic_txns.go diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go new file mode 100644 index 000000000..2006243e9 --- /dev/null +++ b/lib/block_view_atomic_txns.go @@ -0,0 +1,174 @@ +package lib + +import ( + "bytes" + "github.com/pkg/errors" + "io" +) + +// +// TYPES: AtomicTxnsMetadata +// + +type AtomicTxnsMetadata struct { + // The AtomicTxnsMetadata represents the transaction structure for the + // TxnTypeAtomicTxns transaction type. The transactions in the + // AtomicTxnsMetadata.Txns slice are committed atomically in-order on the + // blockchain. This means either all the transactions with be executed + // on the blockchain in the order specified or none of the transactions + // will be executed. + // + // The AtomicTxnsMetadata.Txns field must be a specially formed + // slice of DeSo transactions to ensure their atomic execution on the blockchain. + // If this field is not properly structured, the AtomicTxns 'wrapper' transaction + // will be rejected. The transactions in AtomicTxnsMetadata.Txns and their corresponding + // ExtraData must form a circular doubly linked list. The links are embedded in the extra data map as follows: + // ** Take special note of the encoding schema for the AtomicTxnsChainLength ** + // + // For the first transaction: + // AtomicTxnsMetadata.Txns[0].ExtraData = { + // AtomicTxnsChainLength: UintToBuf(uint64(len(AtomicTxnsMetadata.Txns)))... + // NextAtomicTxnPreHash: AtomicTxnsMetadata.Txns[1].AtomicHash() + // PreviousAtomicTxnPreHash: AtomicTxnsMetadata.Txns[len(AtomicTxnsMetadata.Txns)-1].AtomicHash() + // } + // + // For the ith transaction where 0 < i < len(AtomicTxnsMetadata.Txns)-1: + // AtomicTxnsMetadata.Txns[i].ExtraData = { + // NextAtomicTxnPreHash: AtomicTxnsMetadata.Txns[i+1].AtomicHash() + // PreviousAtomicTxnPreHash: AtomicTxnsMetadata.Txns[i-1].AtomicHash() + // } + // + // For the last transaction: + // AtomicTxnsMetadata.Txns[len(AtomicTxnsMetadata.Txns)-1].ExtraData = { + // NextAtomicTxnPreHash: AtomicTxnsMetadata.Txns[0].AtomicHash() + // PreviousAtomicTxnPreHash: AtomicTxnsMetadata.Txns[len(AtomicTxnsMetadata.Txns)-2].AtomicHash() + // } + // + // The "AtomicHash()" function is a special transaction hash taken without consideration for the signature + // on a transaction as well as certain extra data fields. Otherwise, constructing an atomic transaction + // would be impossible as deriving the links using MsgDeSoTxn.Hash() would have circular dependencies. + // The purpose of using the AtomicHash for links is to prevent a malicious 3rd party from injecting or + // modifying the transactions included in the atomic transaction. This helps ensure the atomicity of the + // atomic transactions. NOTE: The MsgDeSoTxn.AtomicHash() operation DOES keep the AtomicTxnsChainLength + // key in the ExtraData map to ensure that start of the chain is not compromised. + // + // The AtomicTxnsChainLength key is crucial for pinning the start of the atomic transaction. It's + // arbitrary and redundant that we use the chains length, but it adds an extra sanity check when + // connecting the transaction to the blockchain. Without a key representing the starting transaction, + // a malicious entity could reorder the transactions while still preserving the validity of the hashes + // in the circularly linked list. The AtomicTxnsChainLength included in the first transaction ensures + // the transactions are atomically executed in the order specified. + // NOTE: Technically, multiple transactions can include a AtomicTxnsChainLength key in their extra data + // which would enable the atomic transactions to be possibly reordered. While this is possible, + // it's not necessarily recommended. + Txns []*MsgDeSoTxn +} + +func (msg *MsgDeSoTxn) IsAtomicTxn() bool { + // An atomic transaction is qualified by the existence of the NextAtomicTxnPreHash + // and PreviousAtomicTxnPreHash keys in the ExtraData map. + if _, keyExists := msg.ExtraData[NextAtomicTxnPreHash]; !keyExists { + return false + } + if _, keyExists := msg.ExtraData[PreviousAtomicTxnPreHash]; !keyExists { + return false + } + return true +} + +func (msg *MsgDeSoTxn) AtomicHash() (*BlockHash, error) { + // Create a duplicate of the transaction to ensure we don't edit the existing transaction. + msgDuplicate, err := msg.Copy() + if err != nil { + return nil, errors.Wrap(err, "MsgDeSoTxn.AtomicHash: Cannot create duplicate transaction") + } + + // Sanity check that the transaction includes the necessary extra data to be included in an atomic transaction. + if !msgDuplicate.IsAtomicTxn() { + return nil, errors.New("MsgDeSoTxn.AtomicHash: Cannot compute atomic hash on non-atomic transaction") + } + + // Delete the NextAtomicTxnPreHash and PreviousAtomicTxnPreHash from the ExtraData map. + delete(msgDuplicate.ExtraData, NextAtomicTxnPreHash) + delete(msgDuplicate.ExtraData, PreviousAtomicTxnPreHash) + + // Convert the transaction to bytes but do NOT encode the transaction signature. + preSignature := true + txBytes, err := msgDuplicate.ToBytes(preSignature) + if err != nil { + return nil, errors.Wrap(err, "MsgDeSoTxn.AtomicHash: cannot convert modified transaction to bytes") + } + + // Return the SHA256 double hash of the resulting bytes. + return Sha256DoubleHash(txBytes), nil +} + +func (txnData *AtomicTxnsMetadata) GetTxnType() TxnType { + return TxnTypeAtomicTxns +} + +func (txnData *AtomicTxnsMetadata) ToBytes(preSignature bool) ([]byte, error) { + var data []byte + data = append(data, UintToBuf(uint64(len(txnData.Txns)))...) + for _, txn := range txnData.Txns { + txnBytes, err := txn.ToBytes(preSignature) + if err != nil { + return nil, errors.Wrap(err, + "AtomicTxnsMetadata.ToBytes: Problem serializing txn") + } + data = append(data, UintToBuf(uint64(len(txnBytes)))...) + data = append(data, txnBytes...) + } + return data, nil +} + +func (txnData *AtomicTxnsMetadata) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + + // Read the number of transactions within the atomic transaction. + numTxns, err := ReadUvarint(rr) + if err != nil { + return errors.Wrap(err, + "AtomicTxnsMetadata.FromBytes: Problem reading numTxns") + } + txnData.Txns, err = SafeMakeSliceWithLength[*MsgDeSoTxn](numTxns) + if err != nil { + return errors.Wrap(err, "AtomicTxnsMetadata.FromBytes: Problem allocating txnData.Txns") + } + + // Read the transactions. + for ii := uint64(0); ii < numTxns; ii++ { + txnData.Txns[ii] = &MsgDeSoTxn{} + + // Figure out how many bytes are associated with the ith transaction. + numTxnBytes, err := ReadUvarint(rr) + if err != nil { + return errors.Wrap(err, + "AtomicTxnsMetadata.FromBytes: Problem reading number of bytes in transaction") + } + + // Allocate memory for the transaction bytes to be read into. + txnBytes, err := SafeMakeSliceWithLength[byte](numTxnBytes) + if err != nil { + return errors.Wrap(err, + "AtomicTxnsMetadata.FromBytes: Problem allocating bytes for transaction") + } + + // Read the transaction into the txnBytes memory buffer. + if _, err = io.ReadFull(rr, txnBytes); err != nil { + return errors.Wrap(err, + "AtomicTxnsMetadata.FromBytes: Problem reading bytes for transaction") + } + + // Convert the txnBytes buffer to a MsgDeSoTxn struct. + if err = txnData.Txns[ii].FromBytes(txnBytes); err != nil { + return errors.Wrap(err, + "AtomicTxnsMetadata.FromBytes: Problem parsing transaction bytes") + } + } + return nil +} + +func (txnData *AtomicTxnsMetadata) New() DeSoTxnMetadata { + return &AtomicTxnsMetadata{} +} diff --git a/lib/constants.go b/lib/constants.go index 9c92a47a3..ebe528d69 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1560,6 +1560,11 @@ const ( DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" + // Atomic Transaction Keys + AtomicTxnsChainLength = "AtomicTxnsChainLength" + NextAtomicTxnPreHash = "NextAtomicTxnPreHash" + PreviousAtomicTxnPreHash = "PreviousAtomicTxnPreHash" + // Key in transaction's extra data map containing the derived key used in signing the txn. DerivedPublicKey = "DerivedPublicKey" From cf47baabc11ca353c7624afd2022cb5e6e920e06 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 26 Feb 2024 15:25:48 -0500 Subject: [PATCH 519/762] Add Estimate fee rate to mempool interface (#1066) --- lib/legacy_mempool.go | 16 +++++++++++++++- lib/pos_fee_estimator.go | 14 ++++++++------ lib/pos_mempool.go | 21 ++++++++++++++++++++- 3 files changed, 43 insertions(+), 8 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index c7e3bb770..9617ab0dc 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2446,7 +2446,21 @@ func EstimateMaxTxnFeeV1(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) uint64 { func (mp *DeSoMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, _ uint64, _ uint64, _ uint64, _ uint64, _ uint64) (uint64, error) { - return EstimateMaxTxnFeeV1(txn, minFeeRateNanosPerKB), nil + feeRate, _ := mp.EstimateFeeRate(minFeeRateNanosPerKB, 0, 0, 0, 0, 0) + return EstimateMaxTxnFeeV1(txn, feeRate), nil +} + +func (mp *DeSoMempool) EstimateFeeRate( + minFeeRateNanosPerKB uint64, + _ uint64, + _ uint64, + _ uint64, + _ uint64, + _ uint64) (uint64, error) { + if minFeeRateNanosPerKB < mp.readOnlyUtxoView.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB { + return mp.readOnlyUtxoView.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, nil + } + return minFeeRateNanosPerKB, nil } func convertMempoolTxsToSummaryStats(mempoolTxs []*MempoolTx) map[string]*SummaryStats { diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index aae60d662..ad43ca405 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -230,16 +230,18 @@ func (posFeeEstimator *PoSFeeEstimator) sortCachedBlocks() { // and past blocks using the congestionFactorBasisPoints, priorityPercentileBasisPoints, and // maxBlockSize params. func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( - congestionFactorBasisPoints uint64, - priorityPercentileBasisPoints uint64, + mempoolCongestionFactorBasisPoints uint64, + mempoolPriorityPercentileBasisPoints uint64, + pastBlocksCongestionFactorBasisPoints uint64, + pastBlocksPriorityPercentileBasisPoints uint64, maxBlockSize uint64, ) (uint64, error) { posFeeEstimator.rwLock.RLock() defer posFeeEstimator.rwLock.RUnlock() pastBlockFeeRate, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, - congestionFactorBasisPoints, - priorityPercentileBasisPoints, + pastBlocksCongestionFactorBasisPoints, + pastBlocksPriorityPercentileBasisPoints, posFeeEstimator.numPastBlocks, maxBlockSize, ) @@ -248,8 +250,8 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( } mempoolFeeRate, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.mempoolTransactionRegister, - congestionFactorBasisPoints, - priorityPercentileBasisPoints, + mempoolCongestionFactorBasisPoints, + mempoolPriorityPercentileBasisPoints, posFeeEstimator.numMempoolBlocks, maxBlockSize, ) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 864b78ed2..7aaceb81c 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -51,6 +51,14 @@ type Mempool interface { pastBlocksPriorityPercentileBasisPoints uint64, maxBlockSize uint64, ) (uint64, error) + EstimateFeeRate( + minFeeRateNanosPerKB uint64, + mempoolCongestionFactorBasisPoints uint64, + mempoolPriorityPercentileBasisPoints uint64, + pastBlocksCongestionFactorBasisPoints uint64, + pastBlocksPriorityPercentileBasisPoints uint64, + maxBlockSize uint64, + ) (uint64, error) } type MempoolIterator interface { @@ -911,8 +919,19 @@ func (mp *PosMempool) EstimateFee(txn *MsgDeSoTxn, pastBlocksCongestionFactorBasisPoints uint64, pastBlocksPriorityPercentileBasisPoints uint64, maxBlockSize uint64) (uint64, error) { - // TODO: replace MaxBasisPoints with variables configured by flags. return mp.feeEstimator.EstimateFee( txn, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, maxBlockSize) } + +func (mp *PosMempool) EstimateFeeRate( + _ uint64, + mempoolCongestionFactorBasisPoints uint64, + mempoolPriorityPercentileBasisPoints uint64, + pastBlocksCongestionFactorBasisPoints uint64, + pastBlocksPriorityPercentileBasisPoints uint64, + maxBlockSize uint64) (uint64, error) { + return mp.feeEstimator.EstimateFeeRateNanosPerKB( + mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, + pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, maxBlockSize) +} From 7ab45bb5d8d27e0e043c9063cf43a4c44787d326 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 26 Feb 2024 15:46:41 -0500 Subject: [PATCH 520/762] Add BLS Public Key PKID Pair Entry Snapshot to core state (#1068) --- lib/db_utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index e0aa9f9de..5ee4e649a 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -595,7 +595,7 @@ type DBPrefixes struct { // PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry: Retrieve a snapshotted BLSPublicKeyPKIDPairEntry // by BLS Public Key and SnapshotAtEpochNumber. // Prefix, , -> *BLSPublicKeyPKIDPairEntry - PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry []byte `prefix_id:"[96]" is_state:"true"` + PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry []byte `prefix_id:"[96]" is_state:"true" core_state:"true"` // NEXT_TAG: 97 } From 5a20c5cdf00a8ced1ed5227eee9d936649a49550 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 27 Feb 2024 19:58:57 -0500 Subject: [PATCH 521/762] Fix txindex for failing txns (#1069) --- lib/legacy_mempool.go | 18 ------------------ lib/txindex.go | 43 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 42 insertions(+), 19 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 9617ab0dc..6fc918ed7 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2129,24 +2129,6 @@ func _computeBitcoinExchangeFields(params *DeSoParams, }, PkToString(publicKey.SerializeCompressed(), params), nil } -func ConnectTxnAndComputeTransactionMetadata( - txn *MsgDeSoTxn, utxoView *UtxoView, blockHash *BlockHash, - blockHeight uint32, blockTimestampNanoSecs int64, txnIndexInBlock uint64) (*TransactionMetadata, error) { - - totalNanosPurchasedBefore := utxoView.NanosPurchased - usdCentsPerBitcoinBefore := utxoView.GetCurrentUSDCentsPerBitcoin() - utxoOps, totalInput, totalOutput, fees, err := utxoView._connectTransaction( - txn, txn.Hash(), blockHeight, blockTimestampNanoSecs, false, false, - ) - if err != nil { - return nil, fmt.Errorf( - "UpdateTxindex: Error connecting txn to UtxoView: %v", err) - } - - return ComputeTransactionMetadata(txn, utxoView, blockHash, totalNanosPurchasedBefore, - usdCentsPerBitcoinBefore, totalInput, totalOutput, fees, txnIndexInBlock, utxoOps, uint64(blockHeight)), nil -} - // This is the main function used for adding a new txn to the pool. It will // run all needed validation on the txn before adding it, and it will only // accept the txn if these validations pass. diff --git a/lib/txindex.go b/lib/txindex.go index 61d0a7966..97715a876 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -419,9 +419,21 @@ func (txi *TXIndex) Update() error { // - Compute its mapping values, which may include custom metadata fields // - add all its mappings to the db. for txnIndexInBlock, txn := range blockMsg.Txns { + hasPoWBlockHeight := txi.Params.IsPoWBlockHeight(blockMsg.Header.Height) + // Also, the first transaction in the block, the block reward transaction, should always be a connecting transaction. + isBlockRewardTxn := (txnIndexInBlock == 0) && (txn.TxnMeta.GetTxnType() == TxnTypeBlockReward) + // Finally, if the transaction is not the first in the block, we check the TxnConnectStatusByIndex to see if + // it's marked by the block producer as a connecting transaction. PoS blocks should reflect this in TxnConnectStatusByIndex. + hasConnectingPoSTxnStatus := false + if txi.Params.IsPoSBlockHeight(blockHeight) && (txnIndexInBlock > 0) && (blockMsg.TxnConnectStatusByIndex != nil) { + // Note that TxnConnectStatusByIndex doesn't include the first block reward transaction. + hasConnectingPoSTxnStatus = blockMsg.TxnConnectStatusByIndex.Get(txnIndexInBlock - 1) + } + connects := hasPoWBlockHeight || isBlockRewardTxn || hasConnectingPoSTxnStatus + txnMeta, err := ConnectTxnAndComputeTransactionMetadata( txn, utxoView, blockToAttach.Hash, blockToAttach.Height, - int64(blockToAttach.Header.TstampNanoSecs), uint64(txnIndexInBlock)) + blockToAttach.Header.TstampNanoSecs, uint64(txnIndexInBlock), connects) if err != nil { return fmt.Errorf("Update: Problem connecting txn %v to txindex: %v", txn, err) @@ -454,3 +466,32 @@ func (txi *TXIndex) Update() error { return nil } + +func ConnectTxnAndComputeTransactionMetadata( + txn *MsgDeSoTxn, utxoView *UtxoView, blockHash *BlockHash, + blockHeight uint32, blockTimestampNanoSecs int64, txnIndexInBlock uint64, connects bool) (*TransactionMetadata, error) { + + totalNanosPurchasedBefore := utxoView.NanosPurchased + usdCentsPerBitcoinBefore := utxoView.GetCurrentUSDCentsPerBitcoin() + + var utxoOps []*UtxoOperation + var totalInput, totalOutput, fees, burnFee, utilityFee uint64 + var err error + if connects { + utxoOps, totalInput, totalOutput, fees, err = utxoView._connectTransaction( + txn, txn.Hash(), blockHeight, blockTimestampNanoSecs, false, false, + ) + } else { + utxoOps, burnFee, utilityFee, err = utxoView._connectFailingTransaction( + txn, blockHeight, false) + fees = burnFee + utilityFee + } + + if err != nil { + return nil, fmt.Errorf( + "UpdateTxindex: Error connecting txn to UtxoView: %v", err) + } + + return ComputeTransactionMetadata(txn, utxoView, blockHash, totalNanosPurchasedBefore, + usdCentsPerBitcoinBefore, totalInput, totalOutput, fees, txnIndexInBlock, utxoOps, uint64(blockHeight)), nil +} From ae9e118d2e5362296da1e0c59f5218f7ab0b44fe Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 27 Feb 2024 20:00:28 -0500 Subject: [PATCH 522/762] Introduce failing txn type so state syncer understand failing txns (#1070) --- lib/block_view.go | 1 + lib/block_view_types.go | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/block_view.go b/lib/block_view.go index 168f2a997..5f56c6c22 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4037,6 +4037,7 @@ func (bav *UtxoView) _connectFailingTransaction(txn *MsgDeSoTxn, blockHeight uin "spending balance") } utxoOps = append(utxoOps, feeUtxoOp) + utxoOps = append(utxoOps, &UtxoOperation{Type: OperationTypeFailingTxn}) // If verifySignatures is passed, we check transaction signature. if verifySignatures { diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 04d52e1ae..b487c91ec 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -682,7 +682,8 @@ const ( OperationTypeStakeDistributionRestake OperationType = 49 OperationTypeStakeDistributionPayToBalance OperationType = 50 OperationTypeSetValidatorLastActiveAtEpoch OperationType = 51 - // NEXT_TAG = 52 + OperationTypeFailingTxn OperationType = 52 + // NEXT_TAG = 53 ) func (op OperationType) String() string { From 0eff225436335212b39476f23ca3813f5b3439bd Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 27 Feb 2024 20:22:25 -0500 Subject: [PATCH 523/762] Support uncommitted blocks in state syncer mempool flush (#1065) --- lib/legacy_mempool.go | 2 + lib/pos_blockchain.go | 39 ++++++++++++++ lib/server.go | 4 ++ lib/state_change_syncer.go | 108 ++++++++++++++++++++++++++++++++----- 4 files changed, 140 insertions(+), 13 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 6fc918ed7..6a3756d37 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -286,6 +286,8 @@ func (mp *DeSoMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { } func (mp *DeSoMempool) GetOrderedTransactions() []*MempoolTx { + mp.mtx.RLock() + defer mp.mtx.RUnlock() orderedTxns, _, _ := mp.GetTransactionsOrderedByTimeAdded() return orderedTxns } diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 45a5f2a69..2782df9e8 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1731,6 +1731,45 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash) error { return nil } +// GetUncommittedFullBlocks is a helper that the state syncer uses to fetch all uncommitted +// blocks, so it can flush them just like we would with mempool transactions. It returns +// all uncommitted blocks from the specified tip to the last uncommitted block. +// Note: it would be more efficient if we cached these results. +func (bc *Blockchain) GetUncommittedFullBlocks(tipHash *BlockHash) ([]*MsgDeSoBlock, error) { + if tipHash == nil { + tipHash = bc.BlockTip().Hash + } + bc.ChainLock.RLock() + defer bc.ChainLock.RUnlock() + tipBlock, exists := bc.bestChainMap[*tipHash] + if !exists { + return nil, errors.Errorf("GetUncommittedFullBlocks: Block %v not found in best chain map", tipHash.String()) + } + // If the tip block is committed, we can't get uncommitted blocks from it so we return an empty slice. + if tipBlock.IsCommitted() { + return []*MsgDeSoBlock{}, nil + } + var uncommittedBlocks []*MsgDeSoBlock + currentBlock := tipBlock + for !currentBlock.IsCommitted() { + fullBlock, err := GetBlock(currentBlock.Hash, bc.db, bc.snapshot) + if err != nil { + return nil, errors.Wrapf(err, "GetUncommittedFullBlocks: Problem fetching block %v", + currentBlock.Hash.String()) + } + uncommittedBlocks = append(uncommittedBlocks, fullBlock) + currentParentHash := currentBlock.Header.PrevBlockHash + if currentParentHash == nil { + return nil, errors.Errorf("GetUncommittedFullBlocks: Block %v has nil PrevBlockHash", currentBlock.Hash) + } + currentBlock = bc.blockIndexByHash[*currentParentHash] + if currentBlock == nil { + return nil, errors.Errorf("GetUncommittedFullBlocks: Block %v not found in block index", currentBlock.Hash) + } + } + return collections.Reverse(uncommittedBlocks), nil +} + // GetCommittedTipView builds a UtxoView to the committed tip. func (bc *Blockchain) GetCommittedTipView() (*UtxoView, error) { return NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, nil) diff --git a/lib/server.go b/lib/server.go index 19caef905..b911ae3ad 100644 --- a/lib/server.go +++ b/lib/server.go @@ -667,6 +667,10 @@ func NewServer( timer.Initialize() srv.timer = timer + if srv.stateChangeSyncer != nil { + srv.stateChangeSyncer.StartMempoolSyncRoutine(srv) + } + // If shouldRestart is true, it means that the state checksum is likely corrupted, and we need to enter a recovery mode. // This can happen if the node was terminated mid-operation last time it was running. The recovery process rolls back // blocks to the beginning of the current snapshot epoch and resets to the state checksum to the epoch checksum. diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index a5a9f1777..421e2ef34 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "fmt" + "github.com/deso-protocol/core/collections" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" "github.com/google/uuid" @@ -624,10 +625,9 @@ func createMempoolTxKey(operationType StateSyncerOperationType, keyBytes []byte) // in the mempool state change file. It also loops through all unconnected transactions and their associated // utxo ops and adds them to the mempool state change file. func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Server) (bool, error) { - originalCommittedFlushId := stateChangeSyncer.BlockSyncFlushId - if server.mempool.stopped { + if !server.GetMempool().IsRunning() { return true, nil } @@ -655,7 +655,9 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser // Kill the snapshot so that it doesn't affect the original snapshot. mempoolUtxoView.Snapshot = nil + server.blockchain.ChainLock.RLock() mempoolUtxoView.TipHash = server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Hash + server.blockchain.ChainLock.RUnlock() // A new transaction is created so that we can simulate writes to the db without actually writing to the db. // Using the transaction here rather than a stubbed badger db allows the process to query the db for any entries @@ -672,10 +674,10 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser } // Loop through all the transactions in the mempool and connect them and their utxo ops to the mempool view. - server.mempool.mtx.RLock() - mempoolTxns, _, err := server.mempool._getTransactionsOrderedByTimeAdded() - server.mempool.mtx.RUnlock() + mempoolTxns := server.GetMempool().GetOrderedTransactions() + // Get the uncommitted blocks from the chain. + uncommittedBlocks, err := server.blockchain.GetUncommittedFullBlocks(mempoolUtxoView.TipHash) if err != nil { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: uuid.Nil, @@ -685,13 +687,93 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer: ") } - currentTimestamp := time.Now().UnixNano() - for _, mempoolTx := range mempoolTxns { - utxoOpsForTxn, _, _, _, err := mempoolTxUtxoView.ConnectTransaction( - mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), - currentTimestamp, false, false /*ignoreUtxos*/) + // First connect the uncommitted blocks to the mempool view. + for _, uncommittedBlock := range uncommittedBlocks { + var utxoOpsForBlock [][]*UtxoOperation + txHashes := collections.Transform(uncommittedBlock.Txns, func(txn *MsgDeSoTxn) *BlockHash { + return txn.Hash() + }) + // TODO: there is a slight performance enhancement we could make here + // by rewriting the ConnectBlock logic to avoid unnecessary UtxoView copying + // for failing transactions. However, we'd also need to rewrite the end-of-epoch + // logic here which would make this function a bit long. + // Connect this block to the mempoolTxUtxoView so we can get the utxo ops. + utxoOpsForBlock, err = mempoolTxUtxoView.ConnectBlock( + uncommittedBlock, txHashes, false, nil, uncommittedBlock.Header.Height) if err != nil { - return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer ConnectTransaction: ") + mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ + FlushId: uuid.Nil, + Succeeded: false, + IsMempoolFlush: true, + }) + return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer ConnectBlock uncommitted block: ") + } + blockHash, _ := uncommittedBlock.Hash() + // Emit the UtxoOps event. + mempoolUtxoView.EventManager.stateSyncerOperation(&StateSyncerOperationEvent{ + StateChangeEntry: &StateChangeEntry{ + OperationType: DbOperationTypeUpsert, + KeyBytes: _DbKeyForUtxoOps(blockHash), + EncoderBytes: EncodeToBytes(blockHeight, &UtxoOperationBundle{ + UtxoOpBundle: utxoOpsForBlock, + }, false), + Block: uncommittedBlock, + }, + FlushId: uuid.Nil, + IsMempoolTxn: true, + }) + } + + currentTimestamp := time.Now().UnixNano() + // TODO: introduce flag to control the number of mempool txns to sync to the state change file. + numMempoolTxLimit := 10000 + for ii, mempoolTx := range mempoolTxns { + if server.params.IsPoSBlockHeight(blockHeight) && ii > numMempoolTxLimit { + break + } + var utxoOpsForTxn []*UtxoOperation + if server.params.IsPoSBlockHeight(blockHeight + 1) { + // We need to create a copy of the view in the event that the transaction fails to + // connect. If it fails to connect, we need to reset the view to its original state. + // and try to connect it as a failing transaction. If that fails as well, we just continue + // and the mempoolTxUtxoView is unmodified. + var copiedView *UtxoView + copiedView, err = mempoolTxUtxoView.CopyUtxoView() + if err != nil { + return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer CopyUtxoView: ") + } + utxoOpsForTxn, _, _, _, err = copiedView.ConnectTransaction( + mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), + currentTimestamp, false, false /*ignoreUtxos*/) + // If the transaction successfully connected, we update mempoolTxUtxoView to the copied view. + if err == nil { + mempoolTxUtxoView = copiedView + } else { + // If the transaction fails to connect, we need to reset the view to its original state + // and connect it as a failing transaction. + copiedView, err = mempoolTxUtxoView.CopyUtxoView() + if err != nil { + return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer CopyUtxoView: ") + } + utxoOpsForTxn, _, _, err = copiedView._connectFailingTransaction( + mempoolTx.Tx, uint32(blockHeight+1), false) + // If we fail to connect the transaction as a failing transaction, we just continue and the + // mempoolTxUtxoView is unmodified. + if err != nil { + glog.V(2).Infof("StateChangeSyncer.SyncMempoolToStateSyncer "+ + "ConnectFailingTransaction for mempool tx: %v", err) + continue + } + mempoolTxUtxoView = copiedView + } + } else { + // For PoW block heights, we can just connect the transaction to the mempool view. + utxoOpsForTxn, _, _, _, err = mempoolTxUtxoView.ConnectTransaction( + mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), + currentTimestamp, false, false /*ignoreUtxos*/) + if err != nil { + return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer ConnectTransaction: ") + } } // Emit transaction state change. @@ -747,7 +829,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser func (stateChangeSyncer *StateChangeSyncer) StartMempoolSyncRoutine(server *Server) { go func() { // Wait for mempool to be initialized. - for server.mempool == nil || server.blockchain.chainState() != SyncStateFullyCurrent { + for server.GetMempool() == nil || server.blockchain.chainState() != SyncStateFullyCurrent { time.Sleep(15000 * time.Millisecond) } if !stateChangeSyncer.BlocksyncCompleteEntriesFlushed && stateChangeSyncer.SyncType == NodeSyncTypeBlockSync { @@ -757,7 +839,7 @@ func (stateChangeSyncer *StateChangeSyncer) StartMempoolSyncRoutine(server *Serv fmt.Printf("StateChangeSyncer.StartMempoolSyncRoutine: Error flushing all entries to file: %v", err) } } - mempoolClosed := server.mempool.stopped + mempoolClosed := !server.GetMempool().IsRunning() for !mempoolClosed { // Sleep for a short while to avoid a tight loop. time.Sleep(100 * time.Millisecond) From bf848886af214955d2fb4fe9b33e1d7fba3a18c7 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 27 Feb 2024 20:47:13 -0500 Subject: [PATCH 524/762] Fix Connect failing txn test (#1071) --- lib/block_view_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 0056fbbed..6cfbe72b4 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -2267,7 +2267,7 @@ func TestConnectFailingTransaction(t *testing.T) { txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) require.NoError(err) - require.Equal(1, len(utxoOps)) + require.Equal(2, len(utxoOps)) expectedBurnFee, expectedUtilityFee := _getBMFForTxn(txn, globalParams) require.Equal(expectedBurnFee, burnFee) require.Equal(expectedUtilityFee, utilityFee) @@ -2303,7 +2303,7 @@ func TestConnectFailingTransaction(t *testing.T) { utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) require.NoError(err) - require.Equal(1, len(utxoOps)) + require.Equal(2, len(utxoOps)) // The final balance is m0's starting balance minus the failing txn fee paid. finalBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) @@ -2351,7 +2351,7 @@ func TestConnectFailingTransaction(t *testing.T) { txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) require.NoError(err) - require.Equal(1, len(utxoOps)) + require.Equal(2, len(utxoOps)) // The final balance is m0's starting balance minus the failing txn fee paid. finalBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) From 1f644e855a20b807951c5ed0fa11582b51121d84 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 28 Feb 2024 18:21:03 -0500 Subject: [PATCH 525/762] Add support for seed hex in block producer seed (#1072) --- lib/block_producer.go | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index a4763abf6..b5ad66cb0 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "fmt" "math" + "strings" "sync" "sync/atomic" "time" @@ -80,15 +81,26 @@ func NewDeSoBlockProducer( var privKey *btcec.PrivateKey if blockProducerSeed != "" { - seedBytes, err := bip39.NewSeedWithErrorChecking(blockProducerSeed, "") - if err != nil { - return nil, fmt.Errorf("NewDeSoBlockProducer: Error converting mnemonic: %+v", err) - } + // If a blockProducerSeed is provided then we use it to generate a private key. + // If the block producer seed beings with 0x, we treat it as a hex seed. Otherwise, + // we treat it as a seed phrase. + if strings.HasPrefix(blockProducerSeed, "0x") { + privKeyBytes, err := hex.DecodeString(blockProducerSeed[2:]) + if err != nil { + return nil, fmt.Errorf("NewDeSoBlockProducer: Error decoding hex seed: %+v", err) + } + privKey, _ = btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) + } else { + seedBytes, err := bip39.NewSeedWithErrorChecking(blockProducerSeed, "") + if err != nil { + return nil, fmt.Errorf("NewDeSoBlockProducer: Error converting mnemonic: %+v", err) + } - _, privKey, _, err = ComputeKeysFromSeed(seedBytes, 0, params) - if err != nil { - return nil, fmt.Errorf( - "NewDeSoBlockProducer: Error computing keys from seed: %+v", err) + _, privKey, _, err = ComputeKeysFromSeed(seedBytes, 0, params) + if err != nil { + return nil, fmt.Errorf( + "NewDeSoBlockProducer: Error computing keys from seed: %+v", err) + } } } From 0c40ac87b021fb1b54f9b39fe366ba1356e82b68 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 29 Feb 2024 09:33:08 -0500 Subject: [PATCH 526/762] Make GetCommittedTip public (#1073) --- lib/pos_blockchain.go | 14 +++++++------- lib/txindex.go | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 2782df9e8..56bb473fc 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -61,7 +61,7 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader) ( // If the incoming header is part of a reorg that uncommits the committed tip from the best chain, // then we exit early. Such headers are invalid and should not be synced. - committedBlockchainTip, _ := bc.getCommittedTip() + committedBlockchainTip, _ := bc.GetCommittedTip() if committedBlockchainTip != nil && committedBlockchainTip.Header.Height >= header.Height { return false, false, errors.New("processHeaderPoS: Header conflicts with committed tip") } @@ -1240,7 +1240,7 @@ func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validator // including the committed tip. The first block in the returned slice is the first uncommitted // ancestor. func (bc *Blockchain) getLineageFromCommittedTip(header *MsgDeSoHeader) ([]*BlockNode, error) { - highestCommittedBlock, idx := bc.getCommittedTip() + highestCommittedBlock, idx := bc.GetCommittedTip() if idx == -1 || highestCommittedBlock == nil { return nil, errors.New("getLineageFromCommittedTip: No committed blocks found") } @@ -1588,7 +1588,7 @@ func (bc *Blockchain) runCommitRuleOnBestChain() error { return nil } // Find all uncommitted ancestors of block to commit - _, idx := bc.getCommittedTip() + _, idx := bc.GetCommittedTip() if idx == -1 { // This is an edge case we'll never hit in practice since all the PoW blocks // are committed. @@ -1792,7 +1792,7 @@ func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, er // If the provided block is committed, we need to make sure it's the committed tip. // Otherwise, we return an error. if currentBlock.IsCommitted() { - highestCommittedBlock, _ := bc.getCommittedTip() + highestCommittedBlock, _ := bc.GetCommittedTip() if highestCommittedBlock == nil { return nil, errors.Errorf("getUtxoViewAtBlockHash: No committed blocks found") } @@ -1839,8 +1839,8 @@ func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, er return utxoView, nil } -// getCommittedTip returns the highest committed block and its index in the best chain. -func (bc *Blockchain) getCommittedTip() (*BlockNode, int) { +// GetCommittedTip returns the highest committed block and its index in the best chain. +func (bc *Blockchain) GetCommittedTip() (*BlockNode, int) { for ii := len(bc.bestChain) - 1; ii >= 0; ii-- { if bc.bestChain[ii].IsCommitted() { return bc.bestChain[ii], ii @@ -1871,7 +1871,7 @@ func (bc *Blockchain) GetSafeBlocks() ([]*MsgDeSoHeader, error) { func (bc *Blockchain) getSafeBlockNodes() ([]*BlockNode, error) { // First get committed tip. - committedTip, idx := bc.getCommittedTip() + committedTip, idx := bc.GetCommittedTip() if idx == -1 || committedTip == nil { return nil, errors.New("getSafeBlockNodes: No committed blocks found") } diff --git a/lib/txindex.go b/lib/txindex.go index 97715a876..61110f098 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -224,7 +224,7 @@ func (txi *TXIndex) GetTxindexUpdateBlockNodes() ( txindexTipNode := blockIndexByHashCopy[*txindexTipHash.Hash] // Get the committed tip. - committedTip, _ := txi.CoreChain.getCommittedTip() + committedTip, _ := txi.CoreChain.GetCommittedTip() if txindexTipNode == nil { glog.Info("GetTxindexUpdateBlockNodes: Txindex tip was not found; building txindex starting at genesis block") From 9445f7738dfe07b04f6ac016b36eacb641275cd5 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 29 Feb 2024 09:35:30 -0500 Subject: [PATCH 527/762] Assorted small enhancements for node restart w/ pos (#1079) --- lib/miner.go | 11 ++++++++++- lib/server.go | 14 ++++++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/lib/miner.go b/lib/miner.go index d3bab8b62..5bc1e45d1 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -319,8 +319,12 @@ func (desoMiner *DeSoMiner) Start() { "start the miner") return } - glog.Infof("DeSoMiner.Start: Starting miner with difficulty target %s", desoMiner.params.MinDifficultyTargetHex) blockTip := desoMiner.BlockProducer.chain.blockTip() + if desoMiner.params.IsPoSBlockHeight(blockTip.Header.Height) { + glog.Infof("DeSoMiner.Start: NOT starting miner because we are at a PoS block height %d", blockTip.Header.Height) + return + } + glog.Infof("DeSoMiner.Start: Starting miner with difficulty target %s", desoMiner.params.MinDifficultyTargetHex) glog.Infof("DeSoMiner.Start: Block tip height %d, cum work %v, and difficulty %v", blockTip.Header.Height, BigintToHash(blockTip.CumWork), blockTip.DifficultyTarget) // Start a bunch of threads to mine for blocks. @@ -402,6 +406,10 @@ func HashToBigint(hash *BlockHash) *big.Int { } func BigintToHash(bigint *big.Int) *BlockHash { + if bigint == nil { + glog.Errorf("BigintToHash: Bigint is nil") + return nil + } hexStr := bigint.Text(16) if len(hexStr)%2 != 0 { // If we have an odd number of bytes add one to the beginning (remember @@ -412,6 +420,7 @@ func BigintToHash(bigint *big.Int) *BlockHash { if err != nil { glog.Errorf("Failed in converting bigint (%#v) with hex "+ "string (%s) to hash.", bigint, hexStr) + return nil } if len(hexBytes) > HashSizeBytes { glog.Errorf("BigintToHash: Bigint %v overflows the hash size %d", bigint, HashSizeBytes) diff --git a/lib/server.go b/lib/server.go index b911ae3ad..c8cf4b731 100644 --- a/lib/server.go +++ b/lib/server.go @@ -496,13 +496,23 @@ func NewServer( return nil, errors.Wrapf(err, "NewServer: Problem initializing blockchain"), true } + headerCumWorkStr := "" + headerCumWork := BigintToHash(_chain.headerTip().CumWork) + if headerCumWork != nil { + headerCumWorkStr = hex.EncodeToString(headerCumWork[:]) + } + blockCumWorkStr := "" + blockCumWork := BigintToHash(_chain.blockTip().CumWork) + if blockCumWork != nil { + blockCumWorkStr = hex.EncodeToString(blockCumWork[:]) + } glog.V(1).Infof("Initialized chain: Best Header Height: %d, Header Hash: %s, Header CumWork: %s, Best Block Height: %d, Block Hash: %s, Block CumWork: %s", _chain.headerTip().Height, hex.EncodeToString(_chain.headerTip().Hash[:]), - hex.EncodeToString(BigintToHash(_chain.headerTip().CumWork)[:]), + headerCumWorkStr, _chain.blockTip().Height, hex.EncodeToString(_chain.blockTip().Hash[:]), - hex.EncodeToString(BigintToHash(_chain.blockTip().CumWork)[:])) + blockCumWorkStr) nodeServices := SFFullNodeDeprecated if _hyperSync { From e1312f94831e5bbbbed8daf7b1003209a04f258c Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 1 Mar 2024 15:00:21 -0500 Subject: [PATCH 528/762] Fix txindex dependent transaction within uncommitted blocks (#1082) --- lib/txindex.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/txindex.go b/lib/txindex.go index 61110f098..7dad5db4a 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -149,7 +149,11 @@ func NewTXIndex(coreChain *Blockchain, params *DeSoParams, dataDirectory string) } func (txi *TXIndex) FinishedSyncing() bool { - return txi.TXIndexChain.BlockTip().Height == txi.CoreChain.BlockTip().Height + committedTip, idx := txi.CoreChain.GetCommittedTip() + if idx == -1 { + return false + } + return txi.TXIndexChain.BlockTip().Height == committedTip.Height } func (txi *TXIndex) Start() { @@ -408,10 +412,16 @@ func (txi *TXIndex) Update() error { return fmt.Errorf( "Update: Error initializing UtxoView: %v", err) } + if blockToAttach.Header.PrevBlockHash != nil { + utxoView, err = txi.TXIndexChain.getUtxoViewAtBlockHash(*blockToAttach.Header.PrevBlockHash) + if err != nil { + return fmt.Errorf("Update: Problem getting UtxoView at block hash %v: %v", + blockToAttach.Header.PrevBlockHash, err) + } + } // Do each block update in a single transaction so we're safe in case the node // restarts. - blockHeight := uint64(txi.CoreChain.BlockTip().Height) err = txi.TXIndexChain.DB().Update(func(dbTxn *badger.Txn) error { // Iterate through each transaction in the block and do the following: @@ -425,7 +435,7 @@ func (txi *TXIndex) Update() error { // Finally, if the transaction is not the first in the block, we check the TxnConnectStatusByIndex to see if // it's marked by the block producer as a connecting transaction. PoS blocks should reflect this in TxnConnectStatusByIndex. hasConnectingPoSTxnStatus := false - if txi.Params.IsPoSBlockHeight(blockHeight) && (txnIndexInBlock > 0) && (blockMsg.TxnConnectStatusByIndex != nil) { + if txi.Params.IsPoSBlockHeight(blockMsg.Header.Height) && (txnIndexInBlock > 0) && (blockMsg.TxnConnectStatusByIndex != nil) { // Note that TxnConnectStatusByIndex doesn't include the first block reward transaction. hasConnectingPoSTxnStatus = blockMsg.TxnConnectStatusByIndex.Get(txnIndexInBlock - 1) } @@ -439,7 +449,7 @@ func (txi *TXIndex) Update() error { txn, err) } - err = DbPutTxindexTransactionMappingsWithTxn(dbTxn, nil, blockHeight, + err = DbPutTxindexTransactionMappingsWithTxn(dbTxn, nil, blockMsg.Header.Height, txn, txi.Params, txnMeta, txi.CoreChain.eventManager) if err != nil { return fmt.Errorf("Update: Problem adding txn %v to txindex: %v", From ea2db8f6f8ef34fdbab4a2edb5e55dc0853818fb Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 1 Mar 2024 15:57:26 -0500 Subject: [PATCH 529/762] Add flag to control number of mempool txns to sync in state syncer routine (#1083) --- cmd/config.go | 4 +++- cmd/node.go | 1 + cmd/run.go | 2 ++ lib/server.go | 3 ++- lib/state_change_syncer.go | 10 ++++++---- 5 files changed, 14 insertions(+), 6 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 3e3a47f97..4d47aad55 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -80,7 +80,8 @@ type Config struct { TimeEvents bool // State Syncer - StateChangeDir string + StateChangeDir string + StateSyncerMempoolTxnSyncLimit uint64 } func LoadConfig() *Config { @@ -176,6 +177,7 @@ func LoadConfig() *Config { // State Syncer config.StateChangeDir = viper.GetString("state-change-dir") + config.StateSyncerMempoolTxnSyncLimit = viper.GetUint64("state-syncer-mempool-txn-sync-limit") return &config } diff --git a/cmd/node.go b/cmd/node.go index 74dc9afce..18f6e3b5e 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -282,6 +282,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.AugmentedBlockViewRefreshIntervalMillis, node.Config.PosBlockProductionIntervalMilliseconds, node.Config.PosTimeoutBaseDurationMilliseconds, + node.Config.StateSyncerMempoolTxnSyncLimit, ) if err != nil { // shouldRestart can be true if, on the previous run, we did not finish flushing all ancestral diff --git a/cmd/run.go b/cmd/run.go index c5ebe87a8..2eaf1fa67 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -222,6 +222,8 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().Bool("time-events", false, "Enable simple event timer, helpful in hands-on performance testing") cmd.PersistentFlags().String("state-change-dir", "", "The directory for state change logs. WARNING: Changing this "+ "from an empty string to a non-empty string (or from a non-empty string to the empty string) requires a resync.") + cmd.PersistentFlags().Uint("state-syncer-mempool-txn-sync-limit", 10000, "The maximum number of transactions to "+ + "process in the mempool tx state syncer at a time.") cmd.PersistentFlags().VisitAll(func(flag *pflag.Flag) { viper.BindPFlag(flag.Name, flag) }) diff --git a/lib/server.go b/lib/server.go index c8cf4b731..cc04defef 100644 --- a/lib/server.go +++ b/lib/server.go @@ -409,6 +409,7 @@ func NewServer( _augmentedBlockViewRefreshIntervalMillis uint64, _posBlockProductionIntervalMilliseconds uint64, _posTimeoutBaseDurationMilliseconds uint64, + _stateSyncerMempoolTxnSyncLimit uint64, ) ( _srv *Server, _err error, @@ -422,7 +423,7 @@ func NewServer( if _stateChangeDir != "" { // Create the state change syncer to handle syncing state changes to disk, and assign some of its methods // to the event manager. - stateChangeSyncer = NewStateChangeSyncer(_stateChangeDir, _syncType) + stateChangeSyncer = NewStateChangeSyncer(_stateChangeDir, _syncType, _stateSyncerMempoolTxnSyncLimit) eventManager.OnStateSyncerOperation(stateChangeSyncer._handleStateSyncerOperation) eventManager.OnStateSyncerFlushed(stateChangeSyncer._handleStateSyncerFlush) } diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 421e2ef34..02ed421f7 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -259,6 +259,8 @@ type StateChangeSyncer struct { // of each entry, the consumer only has to sync the most recent version of each entry. // BlocksyncCompleteEntriesFlushed is used to track whether this one time flush has been completed. BlocksyncCompleteEntriesFlushed bool + + MempoolTxnSyncLimit uint64 } // Open a file, create if it doesn't exist. @@ -276,7 +278,8 @@ func openOrCreateLogFile(filePath string) (*os.File, error) { } // NewStateChangeSyncer initializes necessary log files and returns a StateChangeSyncer. -func NewStateChangeSyncer(stateChangeDir string, nodeSyncType NodeSyncType) *StateChangeSyncer { +func NewStateChangeSyncer(stateChangeDir string, nodeSyncType NodeSyncType, mempoolTxnSyncLimit uint64, +) *StateChangeSyncer { stateChangeFilePath := filepath.Join(stateChangeDir, StateChangeFileName) stateChangeIndexFilePath := filepath.Join(stateChangeDir, StateChangeIndexFileName) stateChangeMempoolFilePath := filepath.Join(stateChangeDir, StateChangeMempoolFileName) @@ -323,6 +326,7 @@ func NewStateChangeSyncer(stateChangeDir string, nodeSyncType NodeSyncType) *Sta StateSyncerMutex: &sync.Mutex{}, SyncType: nodeSyncType, BlocksyncCompleteEntriesFlushed: blocksyncCompleteEntriesFlushed, + MempoolTxnSyncLimit: mempoolTxnSyncLimit, } } @@ -725,10 +729,8 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser } currentTimestamp := time.Now().UnixNano() - // TODO: introduce flag to control the number of mempool txns to sync to the state change file. - numMempoolTxLimit := 10000 for ii, mempoolTx := range mempoolTxns { - if server.params.IsPoSBlockHeight(blockHeight) && ii > numMempoolTxLimit { + if server.params.IsPoSBlockHeight(blockHeight) && uint64(ii) > stateChangeSyncer.MempoolTxnSyncLimit { break } var utxoOpsForTxn []*UtxoOperation From 0c9f9d9d6f2c325c16adaf2c4cc066929082ad04 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 4 Mar 2024 08:58:26 -0800 Subject: [PATCH 530/762] wip: atomic connection logic --- lib/block_view.go | 4 + lib/block_view_atomic_txns.go | 340 +++++++++++++++++++++++++++++----- lib/block_view_types.go | 52 +++++- lib/errors.go | 13 ++ lib/network.go | 10 + 5 files changed, 376 insertions(+), 43 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 168f2a997..1c56cbce4 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -1539,6 +1539,8 @@ func (bav *UtxoView) DisconnectTransaction(currentTxn *MsgDeSoTxn, txnHash *Bloc OperationTypeCoinLockupTransfer, currentTxn, txnHash, utxoOpsForTxn, blockHeight) case TxnTypeCoinUnlock: return bav._disconnectCoinUnlock(OperationTypeCoinUnlock, currentTxn, txnHash, utxoOpsForTxn, blockHeight) + case TxnTypeAtomicTxns: + return bav._disconnectAtomicTxns(OperationTypeAtomicTxns, currentTxn, txnHash, utxoOpsForTxn, blockHeight) } @@ -3753,6 +3755,8 @@ func (bav *UtxoView) _connectTransaction( totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinLockupTransfer(txn, txHash, blockHeight, verifySignatures) case TxnTypeCoinUnlock: totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinUnlock(txn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures) + case TxnTypeAtomicTxns: + totalInput, totalOutput, utxoOpsForTxn, err = bav._connectAtomicTxns(txn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures) default: err = fmt.Errorf("ConnectTransaction: Unimplemented txn type %v", txn.TxnMeta.GetTxnType().String()) diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 2006243e9..283e035fd 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -2,8 +2,10 @@ package lib import ( "bytes" + "fmt" "github.com/pkg/errors" "io" + "reflect" ) // @@ -58,51 +60,11 @@ type AtomicTxnsMetadata struct { // a malicious entity could reorder the transactions while still preserving the validity of the hashes // in the circularly linked list. The AtomicTxnsChainLength included in the first transaction ensures // the transactions are atomically executed in the order specified. - // NOTE: Technically, multiple transactions can include a AtomicTxnsChainLength key in their extra data - // which would enable the atomic transactions to be possibly reordered. While this is possible, - // it's not necessarily recommended. + // NOTE: As a measure to reduce potential mempool divergences, only one transaction can have an + // AtomicTxnsChainLength key. Txns []*MsgDeSoTxn } -func (msg *MsgDeSoTxn) IsAtomicTxn() bool { - // An atomic transaction is qualified by the existence of the NextAtomicTxnPreHash - // and PreviousAtomicTxnPreHash keys in the ExtraData map. - if _, keyExists := msg.ExtraData[NextAtomicTxnPreHash]; !keyExists { - return false - } - if _, keyExists := msg.ExtraData[PreviousAtomicTxnPreHash]; !keyExists { - return false - } - return true -} - -func (msg *MsgDeSoTxn) AtomicHash() (*BlockHash, error) { - // Create a duplicate of the transaction to ensure we don't edit the existing transaction. - msgDuplicate, err := msg.Copy() - if err != nil { - return nil, errors.Wrap(err, "MsgDeSoTxn.AtomicHash: Cannot create duplicate transaction") - } - - // Sanity check that the transaction includes the necessary extra data to be included in an atomic transaction. - if !msgDuplicate.IsAtomicTxn() { - return nil, errors.New("MsgDeSoTxn.AtomicHash: Cannot compute atomic hash on non-atomic transaction") - } - - // Delete the NextAtomicTxnPreHash and PreviousAtomicTxnPreHash from the ExtraData map. - delete(msgDuplicate.ExtraData, NextAtomicTxnPreHash) - delete(msgDuplicate.ExtraData, PreviousAtomicTxnPreHash) - - // Convert the transaction to bytes but do NOT encode the transaction signature. - preSignature := true - txBytes, err := msgDuplicate.ToBytes(preSignature) - if err != nil { - return nil, errors.Wrap(err, "MsgDeSoTxn.AtomicHash: cannot convert modified transaction to bytes") - } - - // Return the SHA256 double hash of the resulting bytes. - return Sha256DoubleHash(txBytes), nil -} - func (txnData *AtomicTxnsMetadata) GetTxnType() TxnType { return TxnTypeAtomicTxns } @@ -172,3 +134,297 @@ func (txnData *AtomicTxnsMetadata) FromBytes(data []byte) error { func (txnData *AtomicTxnsMetadata) New() DeSoTxnMetadata { return &AtomicTxnsMetadata{} } + +// +// HELPER FUNCTIONS: MsgDeSoTxn +// + +func (msg *MsgDeSoTxn) IsAtomicTxn() bool { + // An atomic transaction is qualified by the existence of the NextAtomicTxnPreHash + // and PreviousAtomicTxnPreHash keys in the ExtraData map. + if _, keyExists := msg.ExtraData[NextAtomicTxnPreHash]; !keyExists { + return false + } + if _, keyExists := msg.ExtraData[PreviousAtomicTxnPreHash]; !keyExists { + return false + } + return true +} + +func (msg *MsgDeSoTxn) AtomicHash() (*BlockHash, error) { + // Create a duplicate of the transaction to ensure we don't edit the existing transaction. + msgDuplicate, err := msg.Copy() + if err != nil { + return nil, errors.Wrap(err, "MsgDeSoTxn.AtomicHash: Cannot create duplicate transaction") + } + + // Sanity check that the transaction includes the necessary extra data to be included in an atomic transaction. + if !msgDuplicate.IsAtomicTxn() { + return nil, errors.New("MsgDeSoTxn.AtomicHash: Cannot compute atomic hash on non-atomic transaction") + } + + // Delete the NextAtomicTxnPreHash and PreviousAtomicTxnPreHash from the ExtraData map. + delete(msgDuplicate.ExtraData, NextAtomicTxnPreHash) + delete(msgDuplicate.ExtraData, PreviousAtomicTxnPreHash) + + // Convert the transaction to bytes but do NOT encode the transaction signature. + preSignature := true + txBytes, err := msgDuplicate.ToBytes(preSignature) + if err != nil { + return nil, errors.Wrap(err, "MsgDeSoTxn.AtomicHash: cannot convert modified transaction to bytes") + } + + // Return the SHA256 double hash of the resulting bytes. + return Sha256DoubleHash(txBytes), nil +} + +// +// Connect and Disconnect Atomic Txn Logic +// + +func (bav *UtxoView) _connectAtomicTxns( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + blockTimestampNanoSecs int64, + verifySignature bool, +) ( + _totalInput uint64, + _totalOutput uint64, + _utxoOps []*UtxoOperation, + _err error, +) { + var utxoOpsForTxn []*UtxoOperation + + // Validate the connecting block height. + if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { + return 0, 0, nil, + errors.Wrap(RuleErrorAtomicTxnBeforeBlockHeight, "_connectAtomicTxns") + } + + // Validate the transaction metadata type. + if txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxns { + return 0, 0, nil, + fmt.Errorf("_connectAtomicTxns: TxnMeta type: %v", txn.TxnMeta.GetTxnType().GetTxnString()) + } + + // Verify the wrapper of the transaction. This does not verify the txn.TxnMeta contents. + if err := _verifyAtomicTxnsWrapper(txn); err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") + } + + // Extract the metadata from the transaction. + txMeta := txn.TxnMeta.(*AtomicTxnsMetadata) + + // Verify the chain of transactions as being not tampered with. This verifies the txn.TxnMeta contents. + if err := _verifyAtomicTxnsChain(txMeta); err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") + } + + // Connect the inner atomic transactions. + var innerUtxoOps [][]*UtxoOperation + var totalInput, totalOutput, totalFees uint64 + for _, innerTxn := range txMeta.Txns { + // TODO: Verify TxnTypeSubmitPost and TxnTypeUpdateProfile to ensure ignoreUtxos is always safe to set as false. + innerTxnUtxoOps, innerTxnInput, innerTxnOutput, txnFees, err := bav._connectTransaction( + innerTxn, txHash, blockHeight, blockTimestampNanoSecs, verifySignature, false) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") + } + + // Collect the inner txn utxo ops. We will use these if we ever disconnect. + innerUtxoOps = append(innerUtxoOps, innerTxnUtxoOps) + + // Collect the input/output/fees to ensure fees are being paid properly. + // + // NOTE: There's two design options that can be utilized here. The first + // involves checking that every transaction covers their own fees. + // The second involves checking that the cumulative fees paid across all + // transactions satisfies the fees for the entire atomic transaction wrapper. + // The second design has two key advantages and hence why we use it here: + // (1) It's easier to implement and doesn't require a fee check within _connectAtomicTxns + // (2) It enables a special app layer use case where an atomic transaction + // could be used to subsidize user transactions (likes, comments, update profiles, etc) + // without directly sending the user DESO (the no crypto faucet use cases). In effect, + // the user can utilize apps without needing DESO if the app is willing to subsidize + // their transaction fees. + totalInput, err = SafeUint64().Add(totalInput, innerTxnInput) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") + } + totalOutput, err = SafeUint64().Add(totalOutput, innerTxnOutput) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") + } + totalFees, err = SafeUint64().Add(totalFees, txnFees) + if err != nil { + return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") + } + } + + // Construct a UtxoOp for the transaction. + utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ + Type: OperationTypeAtomicTxns, + AtomicTxnsInnerUtxoOps: innerUtxoOps, + }) + + return totalInput, totalOutput, utxoOpsForTxn, nil +} + +func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { + // An atomic transaction consists of two parts: (1) a 'wrapper' transaction and (2) a sequence of internal + // transactions that abide by the rules specified in AtomicTxnsMetadata. Both parts must be properly formed + // for the atomic transaction to be connected to the blockchain. + + // + // (1) Validate the wrapper transaction. + // + + // Since the wrapper does not require a public key nor a corresponding signature, we force both + // the transaction public key to be the ZeroPublicKey and the signature to be nil. + if !NewPublicKey(txn.PublicKey).IsZeroPublicKey() { + return RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero + } + if txn.Signature.Sign != nil { + return RuleErrorAtomicTxnsWrapperSignatureMustBeNil + } + + // Since the transaction is "signed" by the ZeroPublicKey which has non-zero balance + // due to DESO burns, we must verify the transaction fees to be zero to ensure validators + // are not un-burning DESO via fees. + // TODO: Figure out and comment why TxOutputs must be zero + // TODO: Figure out and comment if TxInputs must also be zero + if txn.TxnFeeNanos != 0 { + return RuleErrorAtomicTxnsWrapperMustHaveZeroFee + } + if len(txn.TxOutputs) != 0 { + return RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs + } + + // To ensure consistent hashing, we check that the + if txn.TxnNonce.ExpirationBlockHeight != 0 || txn.TxnNonce.PartialID != 0 { + return RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce + } + + // Since the wrapper is free, we check to ensure the associated ExtraData is empty to prevent + // free storage on the blockchain. + if len(txn.ExtraData) != 0 { + return RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData + } + + return nil +} + +func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsMetadata) error { + // Validate: + // (1) The inner transactions are meant to be included in an atomic transaction. + // (2) The start point is the first inner transaction and there's only one start point. + // We also collect the atomic hash of each inner transaction here for convenience. + var atomicHashes []*BlockHash + for ii, innerTxn := range txnMeta.Txns { + // Validate the inner transaction as meant to be included in an atomic transaction. + if !innerTxn.IsAtomicTxn() { + return RuleErrorAtomicTxnsHasNonAtomicInnerTxn + } + + // Validate the starting point of the atomic transactions chain. + _, keyExists := innerTxn.ExtraData[AtomicTxnsChainLength] + if keyExists && ii == 0 { + return RuleErrorAtomicTxnsMustStartWithChainLength + } + if keyExists && ii > 0 { + return RuleErrorAtomicTxnsHasMoreThanOneStartPoint + } + + // The error check in AtomicHash() is almost redundant, but we must keep it in the event + // that the byte buffer for the Sha256 hash fails to allocate. This should almost never + // occur, and there's more serious issues if it does. + innerTxnAtomicHash, err := innerTxn.AtomicHash() + if err != nil { + return errors.Wrap(err, "_verifyAtomicTxnsChain") + } + atomicHashes = append(atomicHashes, innerTxnAtomicHash) + } + + // Construct special helper functions for circular doubly linked list indexing. + nextIndex := func(currentIndex int, chainLength int) int { + // Check for the special case of an atomic chain of length 1. + if chainLength == 1 { + return currentIndex + } + return (currentIndex + 1) % chainLength + } + prevIndex := func(currentIndex int, chainLength int) int { + // Check for the special case of an atomic chain of length 1. + if chainLength == 1 { + return currentIndex + } + + // Check for the wrap around case. + if currentIndex == 0 { + return chainLength - 1 + } + return currentIndex - 1 + } + + // Validate the chain sequence specified. + for ii, innerTxn := range txnMeta.Txns { + // Check the next transaction. + if !reflect.DeepEqual( + innerTxn.ExtraData[NextAtomicTxnPreHash], + atomicHashes[nextIndex(ii, len(txnMeta.Txns))]) { + return RuleErrorAtomicTxnsHasBrokenChain + } + + // Check the previous transaction + if !reflect.DeepEqual( + innerTxn.ExtraData[PreviousAtomicTxnPreHash], + atomicHashes[prevIndex(ii, len(txnMeta.Txns))]) { + return RuleErrorAtomicTxnsHasBrokenChain + } + } + return nil +} + +func (bav *UtxoView) _disconnectAtomicTxns( + operationType OperationType, + currentTxn *MsgDeSoTxn, + txnHash *BlockHash, + utxoOpsForTxn []*UtxoOperation, + blockHeight uint32, +) error { + if len(utxoOpsForTxn) == 0 { + return fmt.Errorf("_disconnectAtomicTxns: utxoOperations are missing") + } + operationIndex := len(utxoOpsForTxn) - 1 + + // Verify the last operation as being of type OperationTypeAtomicTxns. + if utxoOpsForTxn[operationIndex].Type != OperationTypeAtomicTxns { + return fmt.Errorf("_disconnectAtomicTxns: Trying to revert "+ + "OperationTypeAtomicTxns but found type %v", utxoOpsForTxn[operationIndex].Type) + } + + // Gather the transaction metadata so we know the internal transactions. + txMeta := currentTxn.TxnMeta.(*AtomicTxnsMetadata) + + // Sanity check the AtomicTxns operation exists. + operationData := utxoOpsForTxn[operationIndex] + if operationData.AtomicTxnsInnerUtxoOps == nil || + len(operationData.AtomicTxnsInnerUtxoOps) != len(txMeta.Txns) { + return fmt.Errorf("_disconnectAtomicTxns: Trying to revert OperationTypeAtomicTxns " + + "but found nil or mistmatched number of UtxoOps for inner transactions") + } + + // Disconnect the internal transactions in reverse. + for ii := len(txMeta.Txns) - 1; ii >= 0; ii-- { + innerTxn := txMeta.Txns[ii] + + if err := bav.DisconnectTransaction( + innerTxn, + innerTxn.Hash(), + operationData.AtomicTxnsInnerUtxoOps[ii], + blockHeight); err != nil { + return errors.Wrapf(err, "_disconnectAtomicTxns") + } + } +} diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 04d52e1ae..f58d4aa97 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -682,7 +682,8 @@ const ( OperationTypeStakeDistributionRestake OperationType = 49 OperationTypeStakeDistributionPayToBalance OperationType = 50 OperationTypeSetValidatorLastActiveAtEpoch OperationType = 51 - // NEXT_TAG = 52 + OperationTypeAtomicTxns OperationType = 52 + // NEXT_TAG = 53 ) func (op OperationType) String() string { @@ -1043,6 +1044,21 @@ type UtxoOperation struct { // transaction metadata itself doesn't specify the information we need to return to // rosetta. LockedAtEpochNumber uint64 + + // AtomicTxnInnerUtxoOps maintains a 2D slice of all UtxoOps collected from transactions + // who were executed atomically. The 2D array allows us to easily disconnect transactions + // who are part of an atomic transaction as we hold each of their UtxoOps separately. + // + // NOTE: While it may seem erroneous to have a field within the UtxoOperation struct of + // type UtxoOperation, this is valid because the size of the pointer is always known at + // compile time. Hence, there's no circular dependency as is the case if we were to use + // [][]UtoOperation for this field instead. This could equivalently be a 2D array of + // void pointers from the compiler's perspective. In addition, it may seem as though + // there's a recursive issue in RawEncodeWithoutMetadata resulting from cyclic dependencies, + // this is not the case as we only call RawEncodeWithoutMetadata if the length of the + // AtomicTxnsInnerUtxoOps transaction is non-zero. This will always occur, meaning we + // can deterministically encode and decode AtomicTxnsInnerUtxoOps. + AtomicTxnsInnerUtxoOps [][]*UtxoOperation } // FIXME: This hackIsRunningStateSyncer() call is a hack to get around the fact that @@ -1415,6 +1431,15 @@ func (op *UtxoOperation) RawEncodeWithoutMetadata(blockHeight uint64, skipMetada // LockedAtEpochNumber data = append(data, UintToBuf(op.LockedAtEpochNumber)...) + + // AtomicTxnsInnerUtxoOps + data = append(data, UintToBuf(uint64(len(op.AtomicTxnsInnerUtxoOps)))...) + for _, entry := range op.AtomicTxnsInnerUtxoOps { + data = append(data, UintToBuf(uint64(len(entry)))...) + for _, utxoOps := range entry { + data = append(data, EncodeToBytes(blockHeight, utxoOps, skipMetadata...)...) + } + } } return data @@ -2110,6 +2135,31 @@ func (op *UtxoOperation) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes. if op.LockedAtEpochNumber, err = ReadUvarint(rr); err != nil { return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading LockedAtEpochNumber: ") } + + // AtomicTxnsInnerUtxoOps + lenAtomicTnxInnerUtxoOps, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UtxoOperation.Decode: Problem reading len of AtomicTxnsInnerUtxoOps") + } + for ii := uint64(0); ii < lenAtomicTnxInnerUtxoOps; ii++ { + lenInnerOperations, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, + "UtxoOperation.Decode: Problem reading len of AtomicTxnsInnerUtxoOps[%d]", ii) + } + + var innerOperations []*UtxoOperation + for jj := uint64(0); jj < lenInnerOperations; jj++ { + innerOperation := &UtxoOperation{} + if exist, err := DecodeFromBytes(innerOperation, rr); exist && err == nil { + innerOperations = append(innerOperations, innerOperation) + } else { + return errors.Wrapf(err, + "UtxoOperation.Decode: Problem decoding AtomicTxnsInnerUtxoOps[%d][%d]", ii, jj) + } + } + op.AtomicTxnsInnerUtxoOps = append(op.AtomicTxnsInnerUtxoOps, innerOperations) + } } return nil diff --git a/lib/errors.go b/lib/errors.go index 0bae19fbd..5debc5cb9 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -537,6 +537,19 @@ const ( RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile RuleError = "RuleErrorUpdateCoinLockupParamsUpdatingNonExistentProfile" RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction RuleError = "RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction" + // Atomic Transactions + RuleErrorAtomicTxnBeforeBlockHeight RuleError = "RuleErrorAtomicTxnBeforeBlockHeight" + RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero RuleError = "RuleErrorAtomicTxnPublicKeyMustBeZero" + RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs RuleError = "RuleErrorAtomicTxnMustHaveZeroOutputs" + RuleErrorAtomicTxnsWrapperMustHaveZeroFee RuleError = "RuleErrorAtomicTxnMustHaveZeroFee" + RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData RuleError = "RuleErrorAtomicTxnMustHaveZeroExtraData" + RuleErrorAtomicTxnsWrapperSignatureMustBeNil RuleError = "RuleErrorAtomicTxnSignatureMustBeNil" + RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce" + RuleErrorAtomicTxnsMustStartWithChainLength RuleError = "RuleErrorAtomicTxnsMustStartWithChainLength" + RuleErrorAtomicTxnsHasMoreThanOneStartPoint RuleError = "RuleErrorAtomicTxnsHasMoreThanOneStartPoint" + RuleErrorAtomicTxnsHasNonAtomicInnerTxn RuleError = "RuleErrorAtomicTxnsHasNonAtomicInnerTxn" + RuleErrorAtomicTxnsHasBrokenChain RuleError = "RuleErrorAtomicTxnsHasBrokenChain" + HeaderErrorDuplicateHeader RuleError = "HeaderErrorDuplicateHeader" HeaderErrorNilPrevHash RuleError = "HeaderErrorNilPrevHash" HeaderErrorInvalidParent RuleError = "HeaderErrorInvalidParent" diff --git a/lib/network.go b/lib/network.go index efcc1772b..467d76fb6 100644 --- a/lib/network.go +++ b/lib/network.go @@ -262,6 +262,7 @@ const ( TxnTypeUpdateCoinLockupParams TxnType = 41 TxnTypeCoinLockupTransfer TxnType = 42 TxnTypeCoinUnlock TxnType = 43 + TxnTypeAtomicTxns TxnType = 44 // NEXT_ID = 44 ) @@ -313,6 +314,7 @@ const ( TxnStringUpdateCoinLockupParams TxnString = "UPDATE_COIN_LOCKUP_PARAMS" TxnStringCoinLockupTransfer TxnString = "COIN_LOCKUP_TRANSFER" TxnStringCoinUnlock TxnString = "COIN_UNLOCK" + TxnStringAtomicTxns TxnString = "ATOMIC_TXNS" ) var ( @@ -327,6 +329,7 @@ var ( TxnTypeAccessGroup, TxnTypeAccessGroupMembers, TxnTypeNewMessage, TxnTypeRegisterAsValidator, TxnTypeUnregisterAsValidator, TxnTypeStake, TxnTypeUnstake, TxnTypeUnlockStake, TxnTypeUnjailValidator, TxnTypeCoinLockup, TxnTypeUpdateCoinLockupParams, TxnTypeCoinLockupTransfer, TxnTypeCoinUnlock, + TxnTypeAtomicTxns, } AllTxnString = []TxnString{ TxnStringUnset, TxnStringBlockReward, TxnStringBasicTransfer, TxnStringBitcoinExchange, TxnStringPrivateMessage, @@ -339,6 +342,7 @@ var ( TxnStringAccessGroup, TxnStringAccessGroupMembers, TxnStringNewMessage, TxnStringRegisterAsValidator, TxnStringUnregisterAsValidator, TxnStringStake, TxnStringUnstake, TxnStringUnlockStake, TxnStringUnjailValidator, TxnStringCoinLockup, TxnStringUpdateCoinLockupParams, TxnStringCoinLockupTransfer, TxnStringCoinUnlock, + TxnStringAtomicTxns, } ) @@ -438,6 +442,8 @@ func (txnType TxnType) GetTxnString() TxnString { return TxnStringCoinLockupTransfer case TxnTypeCoinUnlock: return TxnStringCoinUnlock + case TxnTypeAtomicTxns: + return TxnStringAtomicTxns default: return TxnStringUndefined } @@ -531,6 +537,8 @@ func GetTxnTypeFromString(txnString TxnString) TxnType { return TxnTypeCoinLockupTransfer case TxnStringCoinUnlock: return TxnTypeCoinUnlock + case TxnStringAtomicTxns: + return TxnTypeAtomicTxns default: // TxnTypeUnset means we couldn't find a matching txn type return TxnTypeUnset @@ -632,6 +640,8 @@ func NewTxnMetadata(txType TxnType) (DeSoTxnMetadata, error) { return (&CoinLockupTransferMetadata{}).New(), nil case TxnTypeCoinUnlock: return (&CoinUnlockMetadata{}).New(), nil + case TxnTypeAtomicTxns: + return (&AtomicTxnsMetadata{}).New(), nil default: return nil, fmt.Errorf("NewTxnMetadata: Unrecognized TxnType: %v; make sure you add the new type of transaction to NewTxnMetadata", txType) } From 62cae9d9c9840fd12abaae844f290afdd0eabc28 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 4 Mar 2024 12:34:00 -0800 Subject: [PATCH 531/762] Wrapper verification and connection logic for atomic transactions. --- lib/block_view_atomic_txns.go | 75 +++++++++++++++++++++-------------- lib/errors.go | 24 ++++++----- 2 files changed, 58 insertions(+), 41 deletions(-) diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 283e035fd..b4614a0dd 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -209,6 +209,9 @@ func (bav *UtxoView) _connectAtomicTxns( } // Verify the wrapper of the transaction. This does not verify the txn.TxnMeta contents. + // NOTE: The intentional lack of a _connectBasicTransfer or _connectBasicTransferWithExtraSpend + // operation here skips a signature check on the wrapper. Each internal transaction + // will have its signatures checked when it's connected via _connectTransaction below. if err := _verifyAtomicTxnsWrapper(txn); err != nil { return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") } @@ -223,10 +226,10 @@ func (bav *UtxoView) _connectAtomicTxns( // Connect the inner atomic transactions. var innerUtxoOps [][]*UtxoOperation - var totalInput, totalOutput, totalFees uint64 + var totalFees uint64 for _, innerTxn := range txMeta.Txns { // TODO: Verify TxnTypeSubmitPost and TxnTypeUpdateProfile to ensure ignoreUtxos is always safe to set as false. - innerTxnUtxoOps, innerTxnInput, innerTxnOutput, txnFees, err := bav._connectTransaction( + innerTxnUtxoOps, _, _, txnFees, err := bav._connectTransaction( innerTxn, txHash, blockHeight, blockTimestampNanoSecs, verifySignature, false) if err != nil { return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") @@ -235,7 +238,7 @@ func (bav *UtxoView) _connectAtomicTxns( // Collect the inner txn utxo ops. We will use these if we ever disconnect. innerUtxoOps = append(innerUtxoOps, innerTxnUtxoOps) - // Collect the input/output/fees to ensure fees are being paid properly. + // Collect the fees to ensure fees are being paid properly. // // NOTE: There's two design options that can be utilized here. The first // involves checking that every transaction covers their own fees. @@ -248,14 +251,6 @@ func (bav *UtxoView) _connectAtomicTxns( // without directly sending the user DESO (the no crypto faucet use cases). In effect, // the user can utilize apps without needing DESO if the app is willing to subsidize // their transaction fees. - totalInput, err = SafeUint64().Add(totalInput, innerTxnInput) - if err != nil { - return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") - } - totalOutput, err = SafeUint64().Add(totalOutput, innerTxnOutput) - if err != nil { - return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") - } totalFees, err = SafeUint64().Add(totalFees, txnFees) if err != nil { return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") @@ -268,18 +263,10 @@ func (bav *UtxoView) _connectAtomicTxns( AtomicTxnsInnerUtxoOps: innerUtxoOps, }) - return totalInput, totalOutput, utxoOpsForTxn, nil + return 0, 0, utxoOpsForTxn, nil } func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { - // An atomic transaction consists of two parts: (1) a 'wrapper' transaction and (2) a sequence of internal - // transactions that abide by the rules specified in AtomicTxnsMetadata. Both parts must be properly formed - // for the atomic transaction to be connected to the blockchain. - - // - // (1) Validate the wrapper transaction. - // - // Since the wrapper does not require a public key nor a corresponding signature, we force both // the transaction public key to be the ZeroPublicKey and the signature to be nil. if !NewPublicKey(txn.PublicKey).IsZeroPublicKey() { @@ -289,25 +276,51 @@ func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { return RuleErrorAtomicTxnsWrapperSignatureMustBeNil } - // Since the transaction is "signed" by the ZeroPublicKey which has non-zero balance - // due to DESO burns, we must verify the transaction fees to be zero to ensure validators - // are not un-burning DESO via fees. - // TODO: Figure out and comment why TxOutputs must be zero - // TODO: Figure out and comment if TxInputs must also be zero - if txn.TxnFeeNanos != 0 { - return RuleErrorAtomicTxnsWrapperMustHaveZeroFee + // We force TxInputs on the wrapper to be empty for several reasons: + // (1) This is consistent with the logic found in _connectBasicTransferWithExtraSpend() + // that forces TxInputs to be empty following the balance model fork. + // (2) Allowing TxInputs to not be empty would lead to an attack vector were the transaction + // size may be bloated with random TxInputs that do nothing. + // (3) Leads to consistent hashing for the same atomic transaction wrapper and its inner transactions. + // (4) It's generally safer to be more restrictive on the transaction structure. + if len(txn.TxInputs) != 0 { + return RuleErrorAtomicTxnsWrapperMustHaveZeroInputs } + + // We force TxOutputs on the wrapper to be empty even though this field is still used post balance model fork. + // The reason is this transaction is effectively "signed" by the ZeroPublicKey which is a potential + // burn address. Hence, allowing TxOutputs to be populated would enable un-burning DESO which we do not want. if len(txn.TxOutputs) != 0 { return RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs } - // To ensure consistent hashing, we check that the + // There exists three design options for txn.TxnFeeNanos rules in atomic transaction wrappers: + // (1) Force txn.TxnFeeNanos to equal zero. + // (2) Force txn.TxnFeeNanos to equal the sum of the internal transaction's txn.TxnFeeNanos fields. + // (3) Ignore txn.TxnFeeNanos entirely. + // + // Because txn.TxnFeeNanos gets used in several places for non-connection logic (e.g. BMF), + // it's important to use design option (2) to be consistent across core. This check as a result + // becomes extremely important in _connectAtomicTxns(). + var totalInnerTxnFees uint64 + var err error + for _, innerTxn := range txn.TxnMeta.(*AtomicTxnsMetadata).Txns { + totalInnerTxnFees, err = SafeUint64().Add(totalInnerTxnFees, innerTxn.TxnFeeNanos) + if err != nil { + return RuleErrorAtomicTxnsWrapperHasInteralFeeOverflow + } + } + if txn.TxnFeeNanos != totalInnerTxnFees { + return RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns + } + + // Technically, the txn.TxnNonce field could be if txn.TxnNonce.ExpirationBlockHeight != 0 || txn.TxnNonce.PartialID != 0 { return RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce } - // Since the wrapper is free, we check to ensure the associated ExtraData is empty to prevent - // free storage on the blockchain. + // Since the wrapper is free and modifiable by anyone, we check to ensure the + // associated ExtraData is empty to prevent free storage on the blockchain. if len(txn.ExtraData) != 0 { return RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData } @@ -427,4 +440,6 @@ func (bav *UtxoView) _disconnectAtomicTxns( return errors.Wrapf(err, "_disconnectAtomicTxns") } } + + return nil } diff --git a/lib/errors.go b/lib/errors.go index 5debc5cb9..59126891a 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -538,17 +538,19 @@ const ( RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction RuleError = "RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction" // Atomic Transactions - RuleErrorAtomicTxnBeforeBlockHeight RuleError = "RuleErrorAtomicTxnBeforeBlockHeight" - RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero RuleError = "RuleErrorAtomicTxnPublicKeyMustBeZero" - RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs RuleError = "RuleErrorAtomicTxnMustHaveZeroOutputs" - RuleErrorAtomicTxnsWrapperMustHaveZeroFee RuleError = "RuleErrorAtomicTxnMustHaveZeroFee" - RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData RuleError = "RuleErrorAtomicTxnMustHaveZeroExtraData" - RuleErrorAtomicTxnsWrapperSignatureMustBeNil RuleError = "RuleErrorAtomicTxnSignatureMustBeNil" - RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce" - RuleErrorAtomicTxnsMustStartWithChainLength RuleError = "RuleErrorAtomicTxnsMustStartWithChainLength" - RuleErrorAtomicTxnsHasMoreThanOneStartPoint RuleError = "RuleErrorAtomicTxnsHasMoreThanOneStartPoint" - RuleErrorAtomicTxnsHasNonAtomicInnerTxn RuleError = "RuleErrorAtomicTxnsHasNonAtomicInnerTxn" - RuleErrorAtomicTxnsHasBrokenChain RuleError = "RuleErrorAtomicTxnsHasBrokenChain" + RuleErrorAtomicTxnBeforeBlockHeight RuleError = "RuleErrorAtomicTxnBeforeBlockHeight" + RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero RuleError = "RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero" + RuleErrorAtomicTxnsWrapperMustHaveZeroInputs RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroInputs" + RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs" + RuleErrorAtomicTxnsWrapperHasInteralFeeOverflow RuleError = "RuleErrorAtomicTxnsWrapperHasInternalFeeOverflow" + RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns RuleError = "RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns" + RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData" + RuleErrorAtomicTxnsWrapperSignatureMustBeNil RuleError = "RuleErrorAtomicTxnsWrapperSignatureMustBeNil" + RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce" + RuleErrorAtomicTxnsMustStartWithChainLength RuleError = "RuleErrorAtomicTxnsMustStartWithChainLength" + RuleErrorAtomicTxnsHasMoreThanOneStartPoint RuleError = "RuleErrorAtomicTxnsHasMoreThanOneStartPoint" + RuleErrorAtomicTxnsHasNonAtomicInnerTxn RuleError = "RuleErrorAtomicTxnsHasNonAtomicInnerTxn" + RuleErrorAtomicTxnsHasBrokenChain RuleError = "RuleErrorAtomicTxnsHasBrokenChain" HeaderErrorDuplicateHeader RuleError = "HeaderErrorDuplicateHeader" HeaderErrorNilPrevHash RuleError = "HeaderErrorNilPrevHash" From 8937613edc65e10bb7d35e03671d8fa58ab5424e Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 4 Mar 2024 12:43:54 -0800 Subject: [PATCH 532/762] Add rule error check for nested atomic transactions. --- lib/block_view_atomic_txns.go | 12 +++++++++--- lib/errors.go | 3 ++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index b4614a0dd..1557ff216 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -307,7 +307,7 @@ func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { for _, innerTxn := range txn.TxnMeta.(*AtomicTxnsMetadata).Txns { totalInnerTxnFees, err = SafeUint64().Add(totalInnerTxnFees, innerTxn.TxnFeeNanos) if err != nil { - return RuleErrorAtomicTxnsWrapperHasInteralFeeOverflow + return RuleErrorAtomicTxnsWrapperHasInternalFeeOverflow } } if txn.TxnFeeNanos != totalInnerTxnFees { @@ -330,11 +330,17 @@ func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsMetadata) error { // Validate: - // (1) The inner transactions are meant to be included in an atomic transaction. - // (2) The start point is the first inner transaction and there's only one start point. + // (1) The inner transactions are not additional redundant atomic transactions wrappers. + // (2) The inner transactions are meant to be included in an atomic transaction. + // (3) The start point is the first inner transaction and there's only one start point. // We also collect the atomic hash of each inner transaction here for convenience. var atomicHashes []*BlockHash for ii, innerTxn := range txnMeta.Txns { + // Validate this transaction is not another redundant atomic transaction. + if innerTxn.TxnMeta.GetTxnType() == TxnTypeAtomicTxns { + return RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn + } + // Validate the inner transaction as meant to be included in an atomic transaction. if !innerTxn.IsAtomicTxn() { return RuleErrorAtomicTxnsHasNonAtomicInnerTxn diff --git a/lib/errors.go b/lib/errors.go index 59126891a..9f084cbe7 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -542,13 +542,14 @@ const ( RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero RuleError = "RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero" RuleErrorAtomicTxnsWrapperMustHaveZeroInputs RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroInputs" RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs" - RuleErrorAtomicTxnsWrapperHasInteralFeeOverflow RuleError = "RuleErrorAtomicTxnsWrapperHasInternalFeeOverflow" + RuleErrorAtomicTxnsWrapperHasInternalFeeOverflow RuleError = "RuleErrorAtomicTxnsWrapperHasInternalFeeOverflow" RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns RuleError = "RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns" RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData" RuleErrorAtomicTxnsWrapperSignatureMustBeNil RuleError = "RuleErrorAtomicTxnsWrapperSignatureMustBeNil" RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce" RuleErrorAtomicTxnsMustStartWithChainLength RuleError = "RuleErrorAtomicTxnsMustStartWithChainLength" RuleErrorAtomicTxnsHasMoreThanOneStartPoint RuleError = "RuleErrorAtomicTxnsHasMoreThanOneStartPoint" + RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn RuleError = "RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn" RuleErrorAtomicTxnsHasNonAtomicInnerTxn RuleError = "RuleErrorAtomicTxnsHasNonAtomicInnerTxn" RuleErrorAtomicTxnsHasBrokenChain RuleError = "RuleErrorAtomicTxnsHasBrokenChain" From b492efeb34503a39edc7c09659de1100e45783cb Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 4 Mar 2024 13:36:38 -0800 Subject: [PATCH 533/762] PoS NetworkManager Consolidation (#1078) * NetworkManager consolidation * Nits --- .../network_manager_routines_test.go | 6 +- .../network_manager_utils_test.go | 36 +- lib/handshake_manager.go | 168 ------ lib/network_manager.go | 559 ++++++++++++++++-- lib/pos_consensus.go | 12 +- lib/pos_consensus_test.go | 12 +- lib/remote_node_indexer.go | 46 -- lib/remote_node_manager.go | 349 ----------- lib/server.go | 20 +- 9 files changed, 542 insertions(+), 666 deletions(-) delete mode 100644 lib/handshake_manager.go delete mode 100644 lib/remote_node_indexer.go delete mode 100644 lib/remote_node_manager.go diff --git a/integration_testing/network_manager_routines_test.go b/integration_testing/network_manager_routines_test.go index 368c419c8..f89bb2465 100644 --- a/integration_testing/network_manager_routines_test.go +++ b/integration_testing/network_manager_routines_test.go @@ -551,16 +551,16 @@ func waitForMinNonValidatorCountRemoteNodeIndexer(t *testing.T, node *cmd.Node, minNonValidatorOutboundCount int, minNonValidatorInboundCount int) { userAgent := node.Params.UserAgent - rnManager := node.Server.GetNetworkManager().GetRemoteNodeManager() + nm := node.Server.GetNetworkManager() condition := func() bool { - return checkRemoteNodeIndexerMinNonValidatorCount(rnManager, allCount, validatorCount, + return checkRemoteNodeIndexerMinNonValidatorCount(nm, allCount, validatorCount, minNonValidatorOutboundCount, minNonValidatorInboundCount) } waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have at least %d non-validator outbound nodes and %d non-validator inbound nodes", userAgent, minNonValidatorOutboundCount, minNonValidatorInboundCount), condition) } -func checkRemoteNodeIndexerMinNonValidatorCount(manager *lib.RemoteNodeManager, allCount int, validatorCount int, +func checkRemoteNodeIndexerMinNonValidatorCount(manager *lib.NetworkManager, allCount int, validatorCount int, minNonValidatorOutboundCount int, minNonValidatorInboundCount int) bool { if allCount != manager.GetAllRemoteNodes().Count() { diff --git a/integration_testing/network_manager_utils_test.go b/integration_testing/network_manager_utils_test.go index d46b5a22f..6c1e95010 100644 --- a/integration_testing/network_manager_utils_test.go +++ b/integration_testing/network_manager_utils_test.go @@ -11,9 +11,9 @@ import ( func waitForValidatorConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { userAgentN1 := node1.Params.UserAgent userAgentN2 := node2.Params.UserAgent - rnManagerN1 := node1.Server.GetNetworkManager().GetRemoteNodeManager() + nmN1 := node1.Server.GetNetworkManager() n1ValidatedN2 := func() bool { - if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, true, false, false) { + if true != checkRemoteNodeIndexerUserAgent(nmN1, userAgentN2, true, false, false) { return false } rnFromN2 := getRemoteNodeWithUserAgent(node1, userAgentN2) @@ -41,9 +41,9 @@ func conditionNonValidatorOutboundConnection(t *testing.T, node1 *cmd.Node, node func conditionNonValidatorOutboundConnectionDynamic(t *testing.T, node1 *cmd.Node, node2 *cmd.Node, inactiveValidator bool) func() bool { userAgentN2 := node2.Params.UserAgent - rnManagerN1 := node1.Server.GetNetworkManager().GetRemoteNodeManager() + nmN1 := node1.Server.GetNetworkManager() return func() bool { - if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, false, true, false) { + if true != checkRemoteNodeIndexerUserAgent(nmN1, userAgentN2, false, true, false) { return false } rnFromN2 := getRemoteNodeWithUserAgent(node1, userAgentN2) @@ -82,9 +82,9 @@ func conditionNonValidatorInboundConnection(t *testing.T, node1 *cmd.Node, node2 func conditionNonValidatorInboundConnectionDynamic(t *testing.T, node1 *cmd.Node, node2 *cmd.Node, inactiveValidator bool) func() bool { userAgentN2 := node2.Params.UserAgent - rnManagerN1 := node1.Server.GetNetworkManager().GetRemoteNodeManager() + nmN1 := node1.Server.GetNetworkManager() return func() bool { - if true != checkRemoteNodeIndexerUserAgent(rnManagerN1, userAgentN2, false, false, true) { + if true != checkRemoteNodeIndexerUserAgent(nmN1, userAgentN2, false, false, true) { return false } rnFromN2 := getRemoteNodeWithUserAgent(node1, userAgentN2) @@ -104,9 +104,9 @@ func conditionNonValidatorInboundConnectionDynamic(t *testing.T, node1 *cmd.Node func waitForEmptyRemoteNodeIndexer(t *testing.T, node1 *cmd.Node) { userAgentN1 := node1.Params.UserAgent - rnManagerN1 := node1.Server.GetNetworkManager().GetRemoteNodeManager() + nmN1 := node1.Server.GetNetworkManager() n1ValidatedN2 := func() bool { - if true != checkRemoteNodeIndexerEmpty(rnManagerN1) { + if true != checkRemoteNodeIndexerEmpty(nmN1) { return false } return true @@ -118,9 +118,9 @@ func waitForCountRemoteNodeIndexer(t *testing.T, node1 *cmd.Node, allCount int, nonValidatorOutboundCount int, nonValidatorInboundCount int) { userAgent := node1.Params.UserAgent - rnManager := node1.Server.GetNetworkManager().GetRemoteNodeManager() + nm := node1.Server.GetNetworkManager() condition := func() bool { - if true != checkRemoteNodeIndexerCount(rnManager, allCount, validatorCount, nonValidatorOutboundCount, nonValidatorInboundCount) { + if true != checkRemoteNodeIndexerCount(nm, allCount, validatorCount, nonValidatorOutboundCount, nonValidatorInboundCount) { return false } return true @@ -132,15 +132,15 @@ func waitForCountRemoteNodeIndexerHandshakeCompleted(t *testing.T, node1 *cmd.No nonValidatorOutboundCount int, nonValidatorInboundCount int) { userAgent := node1.Params.UserAgent - rnManager := node1.Server.GetNetworkManager().GetRemoteNodeManager() + nm := node1.Server.GetNetworkManager() condition := func() bool { - return checkRemoteNodeIndexerCountHandshakeCompleted(rnManager, allCount, validatorCount, + return checkRemoteNodeIndexerCountHandshakeCompleted(nm, allCount, validatorCount, nonValidatorOutboundCount, nonValidatorInboundCount) } waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have appropriate RemoteNodes counts", userAgent), condition) } -func checkRemoteNodeIndexerUserAgent(manager *lib.RemoteNodeManager, userAgent string, validator bool, +func checkRemoteNodeIndexerUserAgent(manager *lib.NetworkManager, userAgent string, validator bool, nonValidatorOutbound bool, nonValidatorInbound bool) bool { if true != checkUserAgentInRemoteNodeList(userAgent, manager.GetAllRemoteNodes().GetAll()) { @@ -159,7 +159,7 @@ func checkRemoteNodeIndexerUserAgent(manager *lib.RemoteNodeManager, userAgent s return true } -func checkRemoteNodeIndexerCount(manager *lib.RemoteNodeManager, allCount int, validatorCount int, +func checkRemoteNodeIndexerCount(manager *lib.NetworkManager, allCount int, validatorCount int, nonValidatorOutboundCount int, nonValidatorInboundCount int) bool { if allCount != manager.GetAllRemoteNodes().Count() { @@ -178,7 +178,7 @@ func checkRemoteNodeIndexerCount(manager *lib.RemoteNodeManager, allCount int, v return true } -func checkRemoteNodeIndexerCountHandshakeCompleted(manager *lib.RemoteNodeManager, allCount int, validatorCount int, +func checkRemoteNodeIndexerCountHandshakeCompleted(manager *lib.NetworkManager, allCount int, validatorCount int, nonValidatorOutboundCount int, nonValidatorInboundCount int) bool { if allCount != manager.GetAllRemoteNodes().Count() { @@ -214,7 +214,7 @@ func checkRemoteNodeIndexerCountHandshakeCompleted(manager *lib.RemoteNodeManage return true } -func checkRemoteNodeIndexerEmpty(manager *lib.RemoteNodeManager) bool { +func checkRemoteNodeIndexerEmpty(manager *lib.NetworkManager) bool { if manager.GetAllRemoteNodes().Count() != 0 { return false } @@ -243,8 +243,8 @@ func checkUserAgentInRemoteNodeList(userAgent string, rnList []*lib.RemoteNode) } func getRemoteNodeWithUserAgent(node *cmd.Node, userAgent string) *lib.RemoteNode { - rnManager := node.Server.GetNetworkManager().GetRemoteNodeManager() - rnList := rnManager.GetAllRemoteNodes().GetAll() + nm := node.Server.GetNetworkManager() + rnList := nm.GetAllRemoteNodes().GetAll() for _, rn := range rnList { if rn.GetUserAgent() == userAgent { return rn diff --git a/lib/handshake_manager.go b/lib/handshake_manager.go deleted file mode 100644 index 2d558317f..000000000 --- a/lib/handshake_manager.go +++ /dev/null @@ -1,168 +0,0 @@ -package lib - -import ( - "fmt" - "github.com/decred/dcrd/lru" - "github.com/golang/glog" - "math" - "sync" -) - -// HandshakeManager is a structure that handles the handshake process with remote nodes. It is the entry point for -// initiating a handshake with a remote node. It is also responsible for handling version/verack messages from remote -// nodes. And for handling the handshake complete control message. -type HandshakeManager struct { - mtxHandshakeComplete sync.Mutex - - rnManager *RemoteNodeManager - usedNonces lru.Cache -} - -func NewHandshakeController(rnManager *RemoteNodeManager) *HandshakeManager { - - vm := &HandshakeManager{ - rnManager: rnManager, - usedNonces: lru.NewCache(1000), - } - - return vm -} - -// InitiateHandshake kicks off handshake with a remote node. -func (hm *HandshakeManager) InitiateHandshake(rn *RemoteNode) { - nonce := uint64(RandInt64(math.MaxInt64)) - if err := rn.InitiateHandshake(nonce); err != nil { - glog.Errorf("RemoteNode.InitiateHandshake: Error initiating handshake: %v", err) - hm.rnManager.Disconnect(rn) - } - hm.usedNonces.Add(nonce) -} - -// handleHandshakeComplete handles HandshakeComplete control messages, sent by RemoteNodes. -func (hm *HandshakeManager) handleHandshakeComplete(remoteNode *RemoteNode) { - // Prevent race conditions while handling handshake complete messages. - hm.mtxHandshakeComplete.Lock() - defer hm.mtxHandshakeComplete.Unlock() - - // Get the handshake information of this peer. - if remoteNode == nil { - return - } - - if remoteNode.GetNegotiatedProtocolVersion().Before(ProtocolVersion2) { - hm.rnManager.ProcessCompletedHandshake(remoteNode) - return - } - - if err := hm.handleHandshakeCompletePoSMessage(remoteNode); err != nil { - glog.Errorf("HandshakeManager.handleHandshakeComplete: Error handling PoS handshake peer message: %v, "+ - "remoteNodePk (%s)", err, remoteNode.GetValidatorPublicKey().Serialize()) - hm.rnManager.Disconnect(remoteNode) - return - } - hm.rnManager.ProcessCompletedHandshake(remoteNode) -} - -func (hm *HandshakeManager) handleHandshakeCompletePoSMessage(remoteNode *RemoteNode) error { - - validatorPk := remoteNode.GetValidatorPublicKey() - // If the remote node is not a potential validator, we don't need to do anything. - if validatorPk == nil { - return nil - } - - // Lookup the validator in the ValidatorIndex with the same public key. - existingValidator, ok := hm.rnManager.GetValidatorIndex().Get(validatorPk.Serialize()) - // For inbound RemoteNodes, we should ensure that there isn't an existing validator connected with the same public key. - // Inbound nodes are not initiated by us, so we shouldn't have added the RemoteNode to the ValidatorIndex yet. - if remoteNode.IsInbound() && ok { - return fmt.Errorf("HandshakeManager.handleHandshakeCompletePoSMessage: Inbound RemoteNode with duplicate validator public key") - } - // For outbound RemoteNodes, we have two possible scenarios. Either the RemoteNode has been initiated as a validator, - // in which case it should already be in the ValidatorIndex. Or the RemoteNode has been initiated as a regular node, - // in which case it should not be in the ValidatorIndex, but in the NonValidatorOutboundIndex. So to ensure there is - // no duplicate connection with the same public key, we only check whether there is a validator in the ValidatorIndex - // with the RemoteNode's public key. If there is one, we want to ensure that these two RemoteNodes have identical ids. - if remoteNode.IsOutbound() && ok { - if remoteNode.GetId() != existingValidator.GetId() { - return fmt.Errorf("HandshakeManager.handleHandshakeCompletePoSMessage: Outbound RemoteNode with duplicate validator public key. "+ - "Existing validator id: %v, new validator id: %v", existingValidator.GetId().ToUint64(), remoteNode.GetId().ToUint64()) - } - } - return nil -} - -// handleVersionMessage handles version messages, sent by RemoteNodes. -func (hm *HandshakeManager) handleVersionMessage(origin *Peer, desoMsg DeSoMessage) { - if desoMsg.GetMsgType() != MsgTypeVersion { - return - } - - rn := hm.rnManager.GetRemoteNodeFromPeer(origin) - if rn == nil { - // This should never happen. - return - } - - var verMsg *MsgDeSoVersion - var ok bool - if verMsg, ok = desoMsg.(*MsgDeSoVersion); !ok { - glog.Errorf("HandshakeManager.handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ - "error casting version message", origin.ID) - hm.rnManager.Disconnect(rn) - return - } - - // If we've seen this nonce before then return an error since this is a connection from ourselves. - msgNonce := verMsg.Nonce - if hm.usedNonces.Contains(msgNonce) { - hm.usedNonces.Delete(msgNonce) - glog.Errorf("HandshakeManager.handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ - "nonce collision, nonce (%v)", origin.ID, msgNonce) - hm.rnManager.Disconnect(rn) - return - } - - // Call HandleVersionMessage on the RemoteNode. - responseNonce := uint64(RandInt64(math.MaxInt64)) - if err := rn.HandleVersionMessage(verMsg, responseNonce); err != nil { - glog.Errorf("HandshakeManager.handleVersionMessage: Requesting PeerDisconnect for id: (%v) "+ - "error handling version message: %v", origin.ID, err) - hm.rnManager.Disconnect(rn) - return - - } - hm.usedNonces.Add(responseNonce) -} - -// handleVerackMessage handles verack messages, sent by RemoteNodes. -func (hm *HandshakeManager) handleVerackMessage(origin *Peer, desoMsg DeSoMessage) { - if desoMsg.GetMsgType() != MsgTypeVerack { - return - } - - rn := hm.rnManager.GetRemoteNodeFromPeer(origin) - if rn == nil { - // This should never happen. - return - } - - var vrkMsg *MsgDeSoVerack - var ok bool - if vrkMsg, ok = desoMsg.(*MsgDeSoVerack); !ok { - glog.Errorf("HandshakeManager.handleVerackMessage: Disconnecting RemoteNode with id: (%v) "+ - "error casting verack message", origin.ID) - hm.rnManager.Disconnect(rn) - return - } - - // Call HandleVerackMessage on the RemoteNode. - if err := rn.HandleVerackMessage(vrkMsg); err != nil { - glog.Errorf("HandshakeManager.handleVerackMessage: Requesting PeerDisconnect for id: (%v) "+ - "error handling verack message: %v", origin.ID, err) - hm.rnManager.Disconnect(rn) - return - } - - hm.handleHandshakeComplete(rn) -} diff --git a/lib/network_manager.go b/lib/network_manager.go index f3b138c33..d48a281e0 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -2,18 +2,20 @@ package lib import ( "fmt" - "net" - "strconv" - "sync" - "time" - "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/lru" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" "github.com/golang/glog" "github.com/pkg/errors" + "math" + "net" + "strconv" + "sync" + "sync/atomic" + "time" ) // NetworkManager is a structure that oversees all connections to RemoteNodes. NetworkManager has the following @@ -36,14 +38,33 @@ import ( // The NetworkManager also runs an auxiliary goroutine that periodically cleans up RemoteNodes that may have timed out // the handshake process, or became invalid for some other reason. type NetworkManager struct { + mtx sync.Mutex + mtxHandshakeComplete sync.Mutex + // The parameters we are initialized with. params *DeSoParams - cmgr *ConnectionManager - blsKeystore *BLSKeystore + srv *Server + bc *Blockchain + cmgr *ConnectionManager + keystore *BLSKeystore + + // configs + minTxFeeRateNanosPerKB uint64 + nodeServices ServiceFlag + + // Used to set remote node ids. Must be incremented atomically. + remoteNodeIndex uint64 + // AllRemoteNodes is a map storing all remote nodes by their IDs. + AllRemoteNodes *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] + + // Indices for various types of remote nodes. + ValidatorIndex *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] + NonValidatorOutboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] + NonValidatorInboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] - handshake *HandshakeManager - rnManager *RemoteNodeManager + // Cache of nonces used during handshake. + usedNonces lru.Cache // The address manager keeps track of peer addresses we're aware of. When // we need to connect to a new outbound peer, it chooses one of the addresses @@ -77,18 +98,25 @@ type NetworkManager struct { exitGroup sync.WaitGroup } -func NewNetworkManager(params *DeSoParams, cmgr *ConnectionManager, rnManager *RemoteNodeManager, +func NewNetworkManager(params *DeSoParams, srv *Server, bc *Blockchain, cmgr *ConnectionManager, blsKeystore *BLSKeystore, addrMgr *addrmgr.AddrManager, connectIps []string, targetNonValidatorOutboundRemoteNodes uint32, targetNonValidatorInboundRemoteNodes uint32, - limitOneInboundConnectionPerIP bool) *NetworkManager { + limitOneInboundConnectionPerIP bool, minTxFeeRateNanosPerKB uint64, nodeServices ServiceFlag) *NetworkManager { return &NetworkManager{ params: params, + srv: srv, + bc: bc, cmgr: cmgr, - blsKeystore: blsKeystore, - handshake: NewHandshakeController(rnManager), - rnManager: rnManager, + keystore: blsKeystore, AddrMgr: addrMgr, + minTxFeeRateNanosPerKB: minTxFeeRateNanosPerKB, + nodeServices: nodeServices, + AllRemoteNodes: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + ValidatorIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), + NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + usedNonces: lru.NewCache(1000), connectIps: connectIps, persistentIpToRemoteNodeIdsMap: collections.NewConcurrentMap[string, RemoteNodeId](), activeValidatorsMap: collections.NewConcurrentMap[bls.SerializedPublicKey, consensus.Validator](), @@ -122,11 +150,7 @@ func (nm *NetworkManager) Stop() { close(nm.exitChan) nm.exitGroup.Wait() } - nm.rnManager.DisconnectAll() -} - -func (nm *NetworkManager) GetRemoteNodeManager() *RemoteNodeManager { - return nm.rnManager + nm.DisconnectAll() } func (nm *NetworkManager) SetTargetOutboundPeers(numPeers uint32) { @@ -204,7 +228,7 @@ func (nm *NetworkManager) startRemoteNodeCleanup() { nm.exitGroup.Done() return case <-time.After(1 * time.Second): - nm.rnManager.Cleanup() + nm.Cleanup() } } @@ -214,16 +238,79 @@ func (nm *NetworkManager) startRemoteNodeCleanup() { // ## Handlers (Peer, DeSoMessage) // ########################### -// _handleVersionMessage is called when a new version message is received. It is a wrapper around the handshake's -// handleVersionMessage function. +// _handleVersionMessage is called when a new version message is received. func (nm *NetworkManager) _handleVersionMessage(origin *Peer, desoMsg DeSoMessage) { - nm.handshake.handleVersionMessage(origin, desoMsg) + if desoMsg.GetMsgType() != MsgTypeVersion { + return + } + + rn := nm.GetRemoteNodeFromPeer(origin) + if rn == nil { + // This should never happen. + return + } + + var verMsg *MsgDeSoVersion + var ok bool + if verMsg, ok = desoMsg.(*MsgDeSoVersion); !ok { + glog.Errorf("NetworkManager.handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ + "error casting version message", origin.ID) + nm.Disconnect(rn) + return + } + + // If we've seen this nonce before then return an error since this is a connection from ourselves. + msgNonce := verMsg.Nonce + if nm.usedNonces.Contains(msgNonce) { + nm.usedNonces.Delete(msgNonce) + glog.Errorf("NetworkManager.handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ + "nonce collision, nonce (%v)", origin.ID, msgNonce) + nm.Disconnect(rn) + return + } + + // Call HandleVersionMessage on the RemoteNode. + responseNonce := uint64(RandInt64(math.MaxInt64)) + if err := rn.HandleVersionMessage(verMsg, responseNonce); err != nil { + glog.Errorf("NetworkManager.handleVersionMessage: Requesting PeerDisconnect for id: (%v) "+ + "error handling version message: %v", origin.ID, err) + nm.Disconnect(rn) + return + + } + nm.usedNonces.Add(responseNonce) } -// _handleVerackMessage is called when a new verack message is received. It is a wrapper around the handshake's -// handleVerackMessage function. +// _handleVerackMessage is called when a new verack message is received. func (nm *NetworkManager) _handleVerackMessage(origin *Peer, desoMsg DeSoMessage) { - nm.handshake.handleVerackMessage(origin, desoMsg) + if desoMsg.GetMsgType() != MsgTypeVerack { + return + } + + rn := nm.GetRemoteNodeFromPeer(origin) + if rn == nil { + // This should never happen. + return + } + + var vrkMsg *MsgDeSoVerack + var ok bool + if vrkMsg, ok = desoMsg.(*MsgDeSoVerack); !ok { + glog.Errorf("NetworkManager.handleVerackMessage: Disconnecting RemoteNode with id: (%v) "+ + "error casting verack message", origin.ID) + nm.Disconnect(rn) + return + } + + // Call HandleVerackMessage on the RemoteNode. + if err := rn.HandleVerackMessage(vrkMsg); err != nil { + glog.Errorf("NetworkManager.handleVerackMessage: Requesting PeerDisconnect for id: (%v) "+ + "error handling verack message: %v", origin.ID, err) + nm.Disconnect(rn) + return + } + + nm.handleHandshakeComplete(rn) } // _handleDisconnectedPeerMessage is called when a peer is disconnected. It is responsible for cleaning up the @@ -235,7 +322,7 @@ func (nm *NetworkManager) _handleDisconnectedPeerMessage(origin *Peer, desoMsg D glog.V(2).Infof("NetworkManager._handleDisconnectedPeerMessage: Handling disconnected peer message for "+ "id=%v", origin.ID) - nm.rnManager.DisconnectById(NewRemoteNodeId(origin.ID)) + nm.DisconnectById(NewRemoteNodeId(origin.ID)) // Update the persistentIpToRemoteNodeIdsMap, in case the disconnected peer was a persistent peer. ipRemoteNodeIdMap := nm.persistentIpToRemoteNodeIdsMap.ToMap() for ip, id := range ipRemoteNodeIdMap { @@ -278,7 +365,7 @@ func (nm *NetworkManager) _handleNewConnectionMessage(origin *Peer, desoMsg DeSo } // If we made it here, we have a valid remote node. We will now initiate the handshake. - nm.handshake.InitiateHandshake(remoteNode) + nm.InitiateHandshake(remoteNode) } // processInboundConnection is called when a new inbound connection is established. At this point, the connection is not validated, @@ -306,7 +393,7 @@ func (nm *NetworkManager) processInboundConnection(conn Connection) (*RemoteNode "ConvertIPStringToNetAddress for addr: (%s)", ic.connection.RemoteAddr().String()) } - remoteNode, err := nm.rnManager.AttachInboundConnection(ic.connection, na) + remoteNode, err := nm.AttachInboundConnection(ic.connection, na) if remoteNode == nil || err != nil { return nil, errors.Wrapf(err, "NetworkManager.handleInboundConnection: Problem calling "+ "AttachInboundConnection for addr: (%s)", ic.connection.RemoteAddr().String()) @@ -349,9 +436,9 @@ func (nm *NetworkManager) processOutboundConnection(conn Connection) (*RemoteNod } // Attach the connection before additional validation steps because it is already established. - remoteNode, err := nm.rnManager.AttachOutboundConnection(oc.connection, na, oc.attemptId, oc.isPersistent) + remoteNode, err := nm.AttachOutboundConnection(oc.connection, na, oc.attemptId, oc.isPersistent) if remoteNode == nil || err != nil { - return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling rnManager.AttachOutboundConnection "+ + return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling AttachOutboundConnection "+ "for addr: (%s)", oc.connection.RemoteAddr().String()) } @@ -380,7 +467,7 @@ func (nm *NetworkManager) processOutboundConnection(conn Connection) (*RemoteNod func (nm *NetworkManager) cleanupFailedInboundConnection(remoteNode *RemoteNode, connection Connection) { glog.V(2).Infof("NetworkManager.cleanupFailedInboundConnection: Cleaning up failed inbound connection") if remoteNode != nil { - nm.rnManager.Disconnect(remoteNode) + nm.Disconnect(remoteNode) } connection.Close() } @@ -397,9 +484,9 @@ func (nm *NetworkManager) cleanupFailedOutboundConnection(connection Connection) // Find the RemoteNode associated with the connection. It should almost always exist, since we create the RemoteNode // as we're attempting to connect to the address. id := NewRemoteNodeId(oc.attemptId) - rn := nm.rnManager.GetRemoteNodeById(id) + rn := nm.GetRemoteNodeById(id) if rn != nil { - nm.rnManager.Disconnect(rn) + nm.Disconnect(rn) } oc.Close() nm.cmgr.RemoveAttemptedOutboundAddrs(oc.address) @@ -456,17 +543,17 @@ func (nm *NetworkManager) refreshValidatorIndex(activeValidatorsMap *collections // De-index inactive validators. We skip any checks regarding RemoteNodes connection status, nor do we verify whether // de-indexing the validator would result in an excess number of outbound/inbound connections. Any excess connections // will be cleaned up by the NonValidator connector. - validatorRemoteNodeMap := nm.rnManager.GetValidatorIndex().ToMap() + validatorRemoteNodeMap := nm.GetValidatorIndex().ToMap() for pk, rn := range validatorRemoteNodeMap { // If the validator is no longer active, de-index it. if _, ok := activeValidatorsMap.Get(pk); !ok { - nm.rnManager.SetNonValidator(rn) - nm.rnManager.UnsetValidator(rn) + nm.SetNonValidator(rn) + nm.UnsetValidator(rn) } } // Look for validators in our existing outbound / inbound connections. - allNonValidators := nm.rnManager.GetAllNonValidators() + allNonValidators := nm.GetAllNonValidators() for _, rn := range allNonValidators { // It is possible for a RemoteNode to be in the non-validator indices, and still have a public key. This can happen // if the RemoteNode advertised support for the SFValidator service flag during handshake, and provided us @@ -478,17 +565,17 @@ func (nm *NetworkManager) refreshValidatorIndex(activeValidatorsMap *collections // It is possible that through unlikely concurrence, and malevolence, two non-validators happen to have the same // public key, which goes undetected during handshake. To prevent this from affecting the indexing of the validator // set, we check that the non-validator's public key is not already present in the validator index. - if _, ok := nm.rnManager.GetValidatorIndex().Get(pk.Serialize()); ok { + if _, ok := nm.GetValidatorIndex().Get(pk.Serialize()); ok { glog.V(2).Infof("NetworkManager.refreshValidatorIndex: Disconnecting Validator RemoteNode "+ "(%v) has validator public key (%v) that is already present in validator index", rn, pk) - nm.rnManager.Disconnect(rn) + nm.Disconnect(rn) continue } // If the RemoteNode turns out to be in the validator set, index it. if _, ok := activeValidatorsMap.Get(pk.Serialize()); ok { - nm.rnManager.SetValidator(rn) - nm.rnManager.UnsetNonValidator(rn) + nm.SetValidator(rn) + nm.UnsetNonValidator(rn) } } } @@ -497,19 +584,19 @@ func (nm *NetworkManager) refreshValidatorIndex(activeValidatorsMap *collections // periodically by the validator connector. func (nm *NetworkManager) connectValidators(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { // Look through the active validators and connect to any that we're not already connected to. - if nm.blsKeystore == nil { + if nm.keystore == nil { return } validators := activeValidatorsMap.ToMap() for pk, validator := range validators { - _, exists := nm.rnManager.GetValidatorIndex().Get(pk) + _, exists := nm.GetValidatorIndex().Get(pk) // If we're already connected to the validator, continue. if exists { continue } // If the validator is our node, continue. - if nm.blsKeystore.GetSigner().GetPublicKey().Serialize() == pk { + if nm.keystore.GetSigner().GetPublicKey().Serialize() == pk { continue } @@ -543,14 +630,14 @@ func (nm *NetworkManager) refreshNonValidatorOutboundIndex() { // First let's run a quick check to see if the number of our non-validator remote nodes exceeds our target. Note that // this number will include the persistent nodes. - numOutboundRemoteNodes := uint32(nm.rnManager.GetNonValidatorOutboundIndex().Count()) + numOutboundRemoteNodes := uint32(nm.GetNonValidatorOutboundIndex().Count()) if numOutboundRemoteNodes <= nm.targetNonValidatorOutboundRemoteNodes { return } // If we get here, it means that we should potentially disconnect some remote nodes. Let's first separate the // attempted and connected remote nodes, ignoring the persistent ones. - allOutboundRemoteNodes := nm.rnManager.GetNonValidatorOutboundIndex().GetAll() + allOutboundRemoteNodes := nm.GetNonValidatorOutboundIndex().GetAll() var attemptedOutboundRemoteNodes, connectedOutboundRemoteNodes []*RemoteNode for _, rn := range allOutboundRemoteNodes { if rn.IsPersistent() || rn.IsExpectedValidator() { @@ -578,7 +665,7 @@ func (nm *NetworkManager) refreshNonValidatorOutboundIndex() { } glog.V(2).Infof("NetworkManager.refreshNonValidatorOutboundIndex: Disconnecting attempted remote "+ "node (id=%v) due to excess outbound RemoteNodes", rn.GetId()) - nm.rnManager.Disconnect(rn) + nm.Disconnect(rn) excessiveOutboundRemoteNodes-- } // Now disconnect the connected remote nodes, if we still have too many remote nodes. @@ -588,7 +675,7 @@ func (nm *NetworkManager) refreshNonValidatorOutboundIndex() { } glog.V(2).Infof("NetworkManager.refreshNonValidatorOutboundIndex: Disconnecting connected remote "+ "node (id=%v) due to excess outbound RemoteNodes", rn.GetId()) - nm.rnManager.Disconnect(rn) + nm.Disconnect(rn) excessiveOutboundRemoteNodes-- } } @@ -597,13 +684,13 @@ func (nm *NetworkManager) refreshNonValidatorOutboundIndex() { // disconnecting excess inbound remote nodes. func (nm *NetworkManager) refreshNonValidatorInboundIndex() { // First let's check if we have an excess number of inbound remote nodes. If we do, we'll disconnect some of them. - numConnectedInboundRemoteNodes := uint32(nm.rnManager.GetNonValidatorInboundIndex().Count()) + numConnectedInboundRemoteNodes := uint32(nm.GetNonValidatorInboundIndex().Count()) if numConnectedInboundRemoteNodes <= nm.targetNonValidatorInboundRemoteNodes { return } // Disconnect random inbound non-validators if we have too many of them. - inboundRemoteNodes := nm.rnManager.GetNonValidatorInboundIndex().GetAll() + inboundRemoteNodes := nm.GetNonValidatorInboundIndex().GetAll() var connectedInboundRemoteNodes []*RemoteNode for _, rn := range inboundRemoteNodes { // We only want to disconnect remote nodes that have completed handshake. RemoteNodes that don't have the @@ -628,7 +715,7 @@ func (nm *NetworkManager) refreshNonValidatorInboundIndex() { } glog.V(2).Infof("NetworkManager.refreshNonValidatorInboundIndex: Disconnecting inbound remote "+ "node (id=%v) due to excess inbound RemoteNodes", rn.GetId()) - nm.rnManager.Disconnect(rn) + nm.Disconnect(rn) excessiveInboundRemoteNodes-- } } @@ -637,7 +724,7 @@ func (nm *NetworkManager) refreshNonValidatorInboundIndex() { // nonValidator connector. func (nm *NetworkManager) connectNonValidators() { // First, find all nonValidator outbound remote nodes that are not persistent. - allOutboundRemoteNodes := nm.rnManager.GetNonValidatorOutboundIndex().GetAll() + allOutboundRemoteNodes := nm.GetNonValidatorOutboundIndex().GetAll() var nonValidatorOutboundRemoteNodes []*RemoteNode for _, rn := range allOutboundRemoteNodes { if rn.IsPersistent() || rn.IsExpectedValidator() { @@ -662,7 +749,7 @@ func (nm *NetworkManager) connectNonValidators() { } // Attempt to connect to the address. nm.AddrMgr.Attempt(addr) - if err := nm.rnManager.CreateNonValidatorOutboundConnection(addr); err != nil { + if err := nm.createNonValidatorOutboundConnection(addr); err != nil { glog.V(2).Infof("NetworkManager.connectNonValidators: Problem creating non-validator outbound "+ "connection to addr: %v; err: %v", addr, err) } @@ -698,7 +785,7 @@ func (nm *NetworkManager) getRandomUnconnectedAddress() *wire.NetAddress { } // ########################### -// ## RemoteNode Dial Functions +// ## Create RemoteNode Functions // ########################### func (nm *NetworkManager) CreateValidatorConnection(ipStr string, publicKey *bls.PublicKey) error { @@ -706,7 +793,22 @@ func (nm *NetworkManager) CreateValidatorConnection(ipStr string, publicKey *bls if err != nil { return err } - return nm.rnManager.CreateValidatorConnection(netAddr, publicKey) + if netAddr == nil || publicKey == nil { + return fmt.Errorf("NetworkManager.CreateValidatorConnection: netAddr or public key is nil") + } + + if _, ok := nm.GetValidatorIndex().Get(publicKey.Serialize()); ok { + return fmt.Errorf("NetworkManager.CreateValidatorConnection: RemoteNode already exists for public key: %v", publicKey) + } + + remoteNode := nm.newRemoteNode(publicKey, false) + if err := remoteNode.DialOutboundConnection(netAddr); err != nil { + return errors.Wrapf(err, "NetworkManager.CreateValidatorConnection: Problem calling DialPersistentOutboundConnection "+ + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + } + nm.setRemoteNode(remoteNode) + nm.GetValidatorIndex().Set(publicKey.Serialize(), remoteNode) + return nil } func (nm *NetworkManager) CreateNonValidatorPersistentOutboundConnection(ipStr string) (RemoteNodeId, error) { @@ -714,7 +816,18 @@ func (nm *NetworkManager) CreateNonValidatorPersistentOutboundConnection(ipStr s if err != nil { return 0, err } - return nm.rnManager.CreateNonValidatorPersistentOutboundConnection(netAddr) + if netAddr == nil { + return 0, fmt.Errorf("NetworkManager.CreateNonValidatorPersistentOutboundConnection: netAddr is nil") + } + + remoteNode := nm.newRemoteNode(nil, true) + if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { + return 0, errors.Wrapf(err, "NetworkManager.CreateNonValidatorPersistentOutboundConnection: Problem calling DialPersistentOutboundConnection "+ + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + } + nm.setRemoteNode(remoteNode) + nm.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) + return remoteNode.GetId(), nil } func (nm *NetworkManager) CreateNonValidatorOutboundConnection(ipStr string) error { @@ -722,7 +835,337 @@ func (nm *NetworkManager) CreateNonValidatorOutboundConnection(ipStr string) err if err != nil { return err } - return nm.rnManager.CreateNonValidatorOutboundConnection(netAddr) + return nm.createNonValidatorOutboundConnection(netAddr) +} + +func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.NetAddress) error { + if netAddr == nil { + return fmt.Errorf("NetworkManager.CreateNonValidatorOutboundConnection: netAddr is nil") + } + + remoteNode := nm.newRemoteNode(nil, false) + if err := remoteNode.DialOutboundConnection(netAddr); err != nil { + return errors.Wrapf(err, "NetworkManager.CreateNonValidatorOutboundConnection: Problem calling DialOutboundConnection "+ + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + } + nm.setRemoteNode(remoteNode) + nm.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) + return nil +} + +func (nm *NetworkManager) AttachInboundConnection(conn net.Conn, + na *wire.NetAddress) (*RemoteNode, error) { + + remoteNode := nm.newRemoteNode(nil, false) + if err := remoteNode.AttachInboundConnection(conn, na); err != nil { + return remoteNode, errors.Wrapf(err, "NetworkManager.AttachInboundConnection: Problem calling AttachInboundConnection "+ + "for addr: (%s)", conn.RemoteAddr().String()) + } + + nm.setRemoteNode(remoteNode) + return remoteNode, nil +} + +func (nm *NetworkManager) AttachOutboundConnection(conn net.Conn, na *wire.NetAddress, + remoteNodeId uint64, isPersistent bool) (*RemoteNode, error) { + + id := NewRemoteNodeId(remoteNodeId) + remoteNode := nm.GetRemoteNodeById(id) + if remoteNode == nil { + return nil, fmt.Errorf("NetworkManager.AttachOutboundConnection: Problem getting remote node by id (%d)", + id.ToUint64()) + } + + if err := remoteNode.AttachOutboundConnection(conn, na, isPersistent); err != nil { + nm.Disconnect(remoteNode) + return nil, errors.Wrapf(err, "NetworkManager.AttachOutboundConnection: Problem calling AttachOutboundConnection "+ + "for addr: (%s). Disconnecting remote node (id=%v)", conn.RemoteAddr().String(), remoteNode.GetId()) + } + + return remoteNode, nil +} + +// ########################### +// ## RemoteNode Management +// ########################### + +func (nm *NetworkManager) DisconnectAll() { + allRemoteNodes := nm.GetAllRemoteNodes().GetAll() + for _, rn := range allRemoteNodes { + glog.V(2).Infof("NetworkManager.DisconnectAll: Disconnecting from remote node (id=%v)", rn.GetId()) + nm.Disconnect(rn) + } +} + +func (nm *NetworkManager) newRemoteNode(validatorPublicKey *bls.PublicKey, isPersistent bool) *RemoteNode { + id := atomic.AddUint64(&nm.remoteNodeIndex, 1) + remoteNodeId := NewRemoteNodeId(id) + latestBlockHeight := uint64(nm.bc.BlockTip().Height) + return NewRemoteNode(remoteNodeId, validatorPublicKey, isPersistent, nm.srv, nm.cmgr, nm.keystore, + nm.params, nm.minTxFeeRateNanosPerKB, latestBlockHeight, nm.nodeServices) +} + +func (nm *NetworkManager) ProcessCompletedHandshake(remoteNode *RemoteNode) { + if remoteNode == nil { + return + } + + if remoteNode.IsValidator() { + nm.SetValidator(remoteNode) + nm.UnsetNonValidator(remoteNode) + } else { + nm.UnsetValidator(remoteNode) + nm.SetNonValidator(remoteNode) + } + nm.srv.HandleAcceptedPeer(remoteNode) + nm.srv.maybeRequestAddresses(remoteNode) +} + +func (nm *NetworkManager) Disconnect(rn *RemoteNode) { + if rn == nil { + return + } + glog.V(2).Infof("NetworkManager.Disconnect: Disconnecting from remote node id=%v", rn.GetId()) + rn.Disconnect() + nm.removeRemoteNodeFromIndexer(rn) +} + +func (nm *NetworkManager) DisconnectById(id RemoteNodeId) { + rn := nm.GetRemoteNodeById(id) + if rn == nil { + return + } + + nm.Disconnect(rn) +} + +func (nm *NetworkManager) SendMessage(rn *RemoteNode, desoMessage DeSoMessage) error { + if rn == nil { + return fmt.Errorf("NetworkManager.SendMessage: RemoteNode is nil") + } + + return rn.SendMessage(desoMessage) +} + +func (nm *NetworkManager) removeRemoteNodeFromIndexer(rn *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if rn == nil { + return + } + + nm.GetAllRemoteNodes().Remove(rn.GetId()) + nm.GetNonValidatorOutboundIndex().Remove(rn.GetId()) + nm.GetNonValidatorInboundIndex().Remove(rn.GetId()) + + // Try to evict the remote node from the validator index. If the remote node is not a validator, then there is nothing to do. + if rn.GetValidatorPublicKey() == nil { + return + } + // Only remove from the validator index if the fetched remote node is the same as the one we are trying to remove. + // Otherwise, we could have a fun edge-case where a duplicated validator connection ends up removing an + // existing validator connection from the index. + fetchedRn, ok := nm.GetValidatorIndex().Get(rn.GetValidatorPublicKey().Serialize()) + if ok && fetchedRn.GetId() == rn.GetId() { + nm.GetValidatorIndex().Remove(rn.GetValidatorPublicKey().Serialize()) + } +} + +func (nm *NetworkManager) Cleanup() { + allRemoteNodes := nm.GetAllRemoteNodes().GetAll() + for _, rn := range allRemoteNodes { + if rn.IsTimedOut() { + glog.V(2).Infof("NetworkManager.Cleanup: Disconnecting from remote node (id=%v)", rn.GetId()) + nm.Disconnect(rn) + } + } +} + +// ########################### +// ## RemoteNode Setters +// ########################### + +func (nm *NetworkManager) setRemoteNode(rn *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if rn == nil || rn.IsTerminated() { + return + } + + nm.GetAllRemoteNodes().Set(rn.GetId(), rn) +} + +func (nm *NetworkManager) SetNonValidator(rn *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if rn == nil || rn.IsTerminated() { + return + } + + if rn.IsOutbound() { + nm.GetNonValidatorOutboundIndex().Set(rn.GetId(), rn) + } else { + nm.GetNonValidatorInboundIndex().Set(rn.GetId(), rn) + } +} + +func (nm *NetworkManager) SetValidator(remoteNode *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if remoteNode == nil || remoteNode.IsTerminated() { + return + } + + pk := remoteNode.GetValidatorPublicKey() + if pk == nil { + return + } + nm.GetValidatorIndex().Set(pk.Serialize(), remoteNode) +} + +func (nm *NetworkManager) UnsetValidator(remoteNode *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if remoteNode == nil || remoteNode.IsTerminated() { + return + } + + pk := remoteNode.GetValidatorPublicKey() + if pk == nil { + return + } + nm.GetValidatorIndex().Remove(pk.Serialize()) +} + +func (nm *NetworkManager) UnsetNonValidator(rn *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if rn == nil || rn.IsTerminated() { + return + } + + if rn.IsOutbound() { + nm.GetNonValidatorOutboundIndex().Remove(rn.GetId()) + } else { + nm.GetNonValidatorInboundIndex().Remove(rn.GetId()) + } +} + +// ########################### +// ## RemoteNode Getters +// ########################### + +func (nm *NetworkManager) GetAllRemoteNodes() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return nm.AllRemoteNodes +} + +func (nm *NetworkManager) GetValidatorIndex() *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] { + return nm.ValidatorIndex +} + +func (nm *NetworkManager) GetNonValidatorOutboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return nm.NonValidatorOutboundIndex +} + +func (nm *NetworkManager) GetNonValidatorInboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return nm.NonValidatorInboundIndex +} + +func (nm *NetworkManager) GetRemoteNodeFromPeer(peer *Peer) *RemoteNode { + if peer == nil { + return nil + } + id := NewRemoteNodeId(peer.GetId()) + rn, _ := nm.GetAllRemoteNodes().Get(id) + return rn +} + +func (nm *NetworkManager) GetRemoteNodeById(id RemoteNodeId) *RemoteNode { + rn, ok := nm.GetAllRemoteNodes().Get(id) + if !ok { + return nil + } + return rn +} + +func (nm *NetworkManager) GetAllNonValidators() []*RemoteNode { + outboundRemoteNodes := nm.GetNonValidatorOutboundIndex().GetAll() + inboundRemoteNodes := nm.GetNonValidatorInboundIndex().GetAll() + return append(outboundRemoteNodes, inboundRemoteNodes...) +} + +// ########################### +// ## RemoteNode Handshake +// ########################### + +// InitiateHandshake kicks off handshake with a remote node. +func (nm *NetworkManager) InitiateHandshake(rn *RemoteNode) { + nonce := uint64(RandInt64(math.MaxInt64)) + if err := rn.InitiateHandshake(nonce); err != nil { + glog.Errorf("NetworkManager.InitiateHandshake: Error initiating handshake: %v", err) + nm.Disconnect(rn) + } + nm.usedNonces.Add(nonce) +} + +// handleHandshakeComplete is called on a completed handshake with a RemoteNodes. +func (nm *NetworkManager) handleHandshakeComplete(remoteNode *RemoteNode) { + // Prevent race conditions while handling handshake complete messages. + nm.mtxHandshakeComplete.Lock() + defer nm.mtxHandshakeComplete.Unlock() + + // Get the handshake information of this peer. + if remoteNode == nil { + return + } + + if remoteNode.GetNegotiatedProtocolVersion().Before(ProtocolVersion2) { + nm.ProcessCompletedHandshake(remoteNode) + return + } + + if err := nm.handleHandshakeCompletePoSMessage(remoteNode); err != nil { + glog.Errorf("NetworkManager.handleHandshakeComplete: Error handling PoS handshake peer message: %v, "+ + "remoteNodePk (%s)", err, remoteNode.GetValidatorPublicKey().Serialize()) + nm.Disconnect(remoteNode) + return + } + nm.ProcessCompletedHandshake(remoteNode) +} + +func (nm *NetworkManager) handleHandshakeCompletePoSMessage(remoteNode *RemoteNode) error { + + validatorPk := remoteNode.GetValidatorPublicKey() + // If the remote node is not a potential validator, we don't need to do anything. + if validatorPk == nil { + return nil + } + + // Lookup the validator in the ValidatorIndex with the same public key. + existingValidator, ok := nm.GetValidatorIndex().Get(validatorPk.Serialize()) + // For inbound RemoteNodes, we should ensure that there isn't an existing validator connected with the same public key. + // Inbound nodes are not initiated by us, so we shouldn't have added the RemoteNode to the ValidatorIndex yet. + if remoteNode.IsInbound() && ok { + return fmt.Errorf("NetworkManager.handleHandshakeCompletePoSMessage: Inbound RemoteNode with duplicate validator public key") + } + // For outbound RemoteNodes, we have two possible scenarios. Either the RemoteNode has been initiated as a validator, + // in which case it should already be in the ValidatorIndex. Or the RemoteNode has been initiated as a regular node, + // in which case it should not be in the ValidatorIndex, but in the NonValidatorOutboundIndex. So to ensure there is + // no duplicate connection with the same public key, we only check whether there is a validator in the ValidatorIndex + // with the RemoteNode's public key. If there is one, we want to ensure that these two RemoteNodes have identical ids. + if remoteNode.IsOutbound() && ok { + if remoteNode.GetId() != existingValidator.GetId() { + return fmt.Errorf("NetworkManager.handleHandshakeCompletePoSMessage: Outbound RemoteNode with duplicate validator public key. "+ + "Existing validator id: %v, new validator id: %v", existingValidator.GetId().ToUint64(), remoteNode.GetId().ToUint64()) + } + } + return nil } // ########################### diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index fac24e887..18ca73e26 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -297,14 +297,14 @@ func (fc *FastHotStuffConsensus) handleBlockProposalEvent( } // Broadcast the block to the validator network - validators := fc.networkManager.rnManager.GetValidatorIndex().GetAll() + validators := fc.networkManager.GetValidatorIndex().GetAll() for _, validator := range validators { sendMessageToRemoteNodeAsync(validator, blockProposal) } // Broadcast the block to all inbound non-validator peers. This allows them to sync // blocks from us. - nonValidators := fc.networkManager.rnManager.GetNonValidatorInboundIndex().GetAll() + nonValidators := fc.networkManager.GetNonValidatorInboundIndex().GetAll() for _, nonValidator := range nonValidators { sendMessageToRemoteNodeAsync(nonValidator, blockProposal) } @@ -373,7 +373,7 @@ func (fc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotSt } // Broadcast the block to the validator network - validators := fc.networkManager.rnManager.GetValidatorIndex().GetAll() + validators := fc.networkManager.GetValidatorIndex().GetAll() for _, validator := range validators { sendMessageToRemoteNodeAsync(validator, voteMsg) } @@ -494,7 +494,7 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo } // Broadcast the block to the validator network - validators := fc.networkManager.rnManager.GetValidatorIndex().GetAll() + validators := fc.networkManager.GetValidatorIndex().GetAll() for _, validator := range validators { sendMessageToRemoteNodeAsync(validator, timeoutMsg) } @@ -572,7 +572,7 @@ func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error // // See https://github.com/deso-protocol/core/pull/875#discussion_r1460183510 for more details. if len(missingBlockHashes) > 0 { - remoteNode := fc.networkManager.rnManager.GetRemoteNodeFromPeer(pp) + remoteNode := fc.networkManager.GetRemoteNodeFromPeer(pp) if remoteNode == nil { return errors.Errorf("FastHotStuffConsensus.HandleBlock: RemoteNode not found for peer: %v", pp) } @@ -890,7 +890,7 @@ func (fc *FastHotStuffConsensus) updateActiveValidatorConnections() error { } func (fc *FastHotStuffConsensus) trySendMessageToPeer(pp *Peer, msg DeSoMessage) { - remoteNode := fc.networkManager.rnManager.GetRemoteNodeFromPeer(pp) + remoteNode := fc.networkManager.GetRemoteNodeFromPeer(pp) if remoteNode == nil { glog.Errorf("FastHotStuffConsensus.trySendMessageToPeer: RemoteNode not found for peer: %v", pp) return diff --git a/lib/pos_consensus_test.go b/lib/pos_consensus_test.go index 99d8c4e9d..b5d1f3bb1 100644 --- a/lib/pos_consensus_test.go +++ b/lib/pos_consensus_test.go @@ -206,13 +206,9 @@ func alwaysReturnTrue() bool { func _createMockNetworkManagerForConsensus() *NetworkManager { return &NetworkManager{ - rnManager: &RemoteNodeManager{ - remoteNodeIndexer: &RemoteNodeIndexer{ - AllRemoteNodes: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), - ValidatorIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), - NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), - NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), - }, - }, + AllRemoteNodes: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + ValidatorIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), + NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), } } diff --git a/lib/remote_node_indexer.go b/lib/remote_node_indexer.go deleted file mode 100644 index 834dbda51..000000000 --- a/lib/remote_node_indexer.go +++ /dev/null @@ -1,46 +0,0 @@ -package lib - -import ( - "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/collections" -) - -// RemoteNodeIndexer is a structure that holds information about all remote nodes and their indices. -type RemoteNodeIndexer struct { - // AllRemoteNodes is a map storing all remote nodes by their IDs. - AllRemoteNodes *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] - - // Indices for various types of remote nodes. - ValidatorIndex *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] - NonValidatorOutboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] - NonValidatorInboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] -} - -// NewRemoteNodeIndexer initializes and returns a new instance of RemoteNodeIndexer. -func NewRemoteNodeIndexer() *RemoteNodeIndexer { - rni := &RemoteNodeIndexer{ - AllRemoteNodes: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), - ValidatorIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), - NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), - NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), - } - - return rni -} - -// Getter methods for accessing the different indices. -func (rni *RemoteNodeIndexer) GetAllRemoteNodes() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { - return rni.AllRemoteNodes -} - -func (rni *RemoteNodeIndexer) GetValidatorIndex() *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] { - return rni.ValidatorIndex -} - -func (rni *RemoteNodeIndexer) GetNonValidatorOutboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { - return rni.NonValidatorOutboundIndex -} - -func (rni *RemoteNodeIndexer) GetNonValidatorInboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { - return rni.NonValidatorInboundIndex -} diff --git a/lib/remote_node_manager.go b/lib/remote_node_manager.go deleted file mode 100644 index 3300d5d62..000000000 --- a/lib/remote_node_manager.go +++ /dev/null @@ -1,349 +0,0 @@ -package lib - -import ( - "fmt" - "github.com/btcsuite/btcd/wire" - "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/collections" - "github.com/golang/glog" - "github.com/pkg/errors" - "net" - "sync" - "sync/atomic" -) - -// RemoteNodeManager manages all the RemoteNode that the node is connected to. It is responsible for starting, maintaining, -// and stopping remote node connections. It is also responsible for organizing the remote nodes into indices for easy -// access, through the RemoteNodeIndexer. -type RemoteNodeManager struct { - mtx sync.Mutex - - // remoteNodeIndexer is a structure that stores and indexes all created remote nodes. - remoteNodeIndexer *RemoteNodeIndexer - - params *DeSoParams - srv *Server - bc *Blockchain - cmgr *ConnectionManager - - // keystore is a reference to the node's BLS private key storage. - keystore *BLSKeystore - - // configs - minTxFeeRateNanosPerKB uint64 - nodeServices ServiceFlag - - // Used to set remote node ids. Must be incremented atomically. - remoteNodeIndex uint64 -} - -func NewRemoteNodeManager(srv *Server, bc *Blockchain, cmgr *ConnectionManager, keystore *BLSKeystore, params *DeSoParams, - minTxFeeRateNanosPerKB uint64, nodeServices ServiceFlag) *RemoteNodeManager { - return &RemoteNodeManager{ - remoteNodeIndexer: NewRemoteNodeIndexer(), - params: params, - srv: srv, - bc: bc, - cmgr: cmgr, - keystore: keystore, - minTxFeeRateNanosPerKB: minTxFeeRateNanosPerKB, - nodeServices: nodeServices, - } -} - -func (manager *RemoteNodeManager) DisconnectAll() { - allRemoteNodes := manager.GetAllRemoteNodes().GetAll() - for _, rn := range allRemoteNodes { - glog.V(2).Infof("RemoteNodeManager.DisconnectAll: Disconnecting from remote node (id=%v)", rn.GetId()) - manager.Disconnect(rn) - } -} - -func (manager *RemoteNodeManager) newRemoteNode(validatorPublicKey *bls.PublicKey, isPersistent bool) *RemoteNode { - id := atomic.AddUint64(&manager.remoteNodeIndex, 1) - remoteNodeId := NewRemoteNodeId(id) - latestBlockHeight := uint64(manager.bc.BlockTip().Height) - return NewRemoteNode(remoteNodeId, validatorPublicKey, isPersistent, manager.srv, manager.cmgr, manager.keystore, - manager.params, manager.minTxFeeRateNanosPerKB, latestBlockHeight, manager.nodeServices) -} - -func (manager *RemoteNodeManager) ProcessCompletedHandshake(remoteNode *RemoteNode) { - if remoteNode == nil { - return - } - - if remoteNode.IsValidator() { - manager.SetValidator(remoteNode) - manager.UnsetNonValidator(remoteNode) - } else { - manager.UnsetValidator(remoteNode) - manager.SetNonValidator(remoteNode) - } - manager.srv.HandleAcceptedPeer(remoteNode) - manager.srv.maybeRequestAddresses(remoteNode) -} - -func (manager *RemoteNodeManager) Disconnect(rn *RemoteNode) { - if rn == nil { - return - } - glog.V(2).Infof("RemoteNodeManager.Disconnect: Disconnecting from remote node id=%v", rn.GetId()) - rn.Disconnect() - manager.removeRemoteNodeFromIndexer(rn) -} - -func (manager *RemoteNodeManager) DisconnectById(id RemoteNodeId) { - rn := manager.GetRemoteNodeById(id) - if rn == nil { - return - } - - manager.Disconnect(rn) -} - -func (manager *RemoteNodeManager) removeRemoteNodeFromIndexer(rn *RemoteNode) { - manager.mtx.Lock() - defer manager.mtx.Unlock() - - if rn == nil { - return - } - - indexer := manager.remoteNodeIndexer - indexer.GetAllRemoteNodes().Remove(rn.GetId()) - indexer.GetNonValidatorOutboundIndex().Remove(rn.GetId()) - indexer.GetNonValidatorInboundIndex().Remove(rn.GetId()) - - // Try to evict the remote node from the validator index. If the remote node is not a validator, then there is nothing to do. - if rn.GetValidatorPublicKey() == nil { - return - } - // Only remove from the validator index if the fetched remote node is the same as the one we are trying to remove. - // Otherwise, we could have a fun edge-case where a duplicated validator connection ends up removing an - // existing validator connection from the index. - fetchedRn, ok := indexer.GetValidatorIndex().Get(rn.GetValidatorPublicKey().Serialize()) - if ok && fetchedRn.GetId() == rn.GetId() { - indexer.GetValidatorIndex().Remove(rn.GetValidatorPublicKey().Serialize()) - } -} - -func (manager *RemoteNodeManager) SendMessage(rn *RemoteNode, desoMessage DeSoMessage) error { - if rn == nil { - return fmt.Errorf("RemoteNodeManager.SendMessage: RemoteNode is nil") - } - - return rn.SendMessage(desoMessage) -} - -func (manager *RemoteNodeManager) Cleanup() { - allRemoteNodes := manager.GetAllRemoteNodes().GetAll() - for _, rn := range allRemoteNodes { - if rn.IsTimedOut() { - glog.V(2).Infof("RemoteNodeManager.Cleanup: Disconnecting from remote node (id=%v)", rn.GetId()) - manager.Disconnect(rn) - } - } -} - -// ########################### -// ## Create RemoteNode -// ########################### - -func (manager *RemoteNodeManager) CreateValidatorConnection(netAddr *wire.NetAddress, publicKey *bls.PublicKey) error { - if netAddr == nil || publicKey == nil { - return fmt.Errorf("RemoteNodeManager.CreateValidatorConnection: netAddr or public key is nil") - } - - if _, ok := manager.GetValidatorIndex().Get(publicKey.Serialize()); ok { - return fmt.Errorf("RemoteNodeManager.CreateValidatorConnection: RemoteNode already exists for public key: %v", publicKey) - } - - remoteNode := manager.newRemoteNode(publicKey, false) - if err := remoteNode.DialOutboundConnection(netAddr); err != nil { - return errors.Wrapf(err, "RemoteNodeManager.CreateValidatorConnection: Problem calling DialPersistentOutboundConnection "+ - "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) - } - manager.setRemoteNode(remoteNode) - manager.GetValidatorIndex().Set(publicKey.Serialize(), remoteNode) - return nil -} - -func (manager *RemoteNodeManager) CreateNonValidatorPersistentOutboundConnection(netAddr *wire.NetAddress) (RemoteNodeId, error) { - if netAddr == nil { - return 0, fmt.Errorf("RemoteNodeManager.CreateNonValidatorPersistentOutboundConnection: netAddr is nil") - } - - remoteNode := manager.newRemoteNode(nil, true) - if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { - return 0, errors.Wrapf(err, "RemoteNodeManager.CreateNonValidatorPersistentOutboundConnection: Problem calling DialPersistentOutboundConnection "+ - "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) - } - manager.setRemoteNode(remoteNode) - manager.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) - return remoteNode.GetId(), nil -} - -func (manager *RemoteNodeManager) CreateNonValidatorOutboundConnection(netAddr *wire.NetAddress) error { - if netAddr == nil { - return fmt.Errorf("RemoteNodeManager.CreateNonValidatorOutboundConnection: netAddr is nil") - } - - remoteNode := manager.newRemoteNode(nil, false) - if err := remoteNode.DialOutboundConnection(netAddr); err != nil { - return errors.Wrapf(err, "RemoteNodeManager.CreateNonValidatorOutboundConnection: Problem calling DialOutboundConnection "+ - "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) - } - manager.setRemoteNode(remoteNode) - manager.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) - return nil -} - -func (manager *RemoteNodeManager) AttachInboundConnection(conn net.Conn, - na *wire.NetAddress) (*RemoteNode, error) { - - remoteNode := manager.newRemoteNode(nil, false) - if err := remoteNode.AttachInboundConnection(conn, na); err != nil { - return remoteNode, errors.Wrapf(err, "RemoteNodeManager.AttachInboundConnection: Problem calling AttachInboundConnection "+ - "for addr: (%s)", conn.RemoteAddr().String()) - } - - manager.setRemoteNode(remoteNode) - return remoteNode, nil -} - -func (manager *RemoteNodeManager) AttachOutboundConnection(conn net.Conn, na *wire.NetAddress, - remoteNodeId uint64, isPersistent bool) (*RemoteNode, error) { - - id := NewRemoteNodeId(remoteNodeId) - remoteNode := manager.GetRemoteNodeById(id) - if remoteNode == nil { - return nil, fmt.Errorf("RemoteNodeManager.AttachOutboundConnection: Problem getting remote node by id (%d)", - id.ToUint64()) - } - - if err := remoteNode.AttachOutboundConnection(conn, na, isPersistent); err != nil { - manager.Disconnect(remoteNode) - return nil, errors.Wrapf(err, "RemoteNodeManager.AttachOutboundConnection: Problem calling AttachOutboundConnection "+ - "for addr: (%s). Disconnecting remote node (id=%v)", conn.RemoteAddr().String(), remoteNode.GetId()) - } - - return remoteNode, nil -} - -// ########################### -// ## Setters -// ########################### - -func (manager *RemoteNodeManager) setRemoteNode(rn *RemoteNode) { - manager.mtx.Lock() - defer manager.mtx.Unlock() - - if rn == nil || rn.IsTerminated() { - return - } - - manager.GetAllRemoteNodes().Set(rn.GetId(), rn) -} - -func (manager *RemoteNodeManager) SetNonValidator(rn *RemoteNode) { - manager.mtx.Lock() - defer manager.mtx.Unlock() - - if rn == nil || rn.IsTerminated() { - return - } - - if rn.IsOutbound() { - manager.GetNonValidatorOutboundIndex().Set(rn.GetId(), rn) - } else { - manager.GetNonValidatorInboundIndex().Set(rn.GetId(), rn) - } -} - -func (manager *RemoteNodeManager) SetValidator(remoteNode *RemoteNode) { - manager.mtx.Lock() - defer manager.mtx.Unlock() - - if remoteNode == nil || remoteNode.IsTerminated() { - return - } - - pk := remoteNode.GetValidatorPublicKey() - if pk == nil { - return - } - manager.GetValidatorIndex().Set(pk.Serialize(), remoteNode) -} - -func (manager *RemoteNodeManager) UnsetValidator(remoteNode *RemoteNode) { - manager.mtx.Lock() - defer manager.mtx.Unlock() - - if remoteNode == nil || remoteNode.IsTerminated() { - return - } - - pk := remoteNode.GetValidatorPublicKey() - if pk == nil { - return - } - manager.GetValidatorIndex().Remove(pk.Serialize()) -} - -func (manager *RemoteNodeManager) UnsetNonValidator(rn *RemoteNode) { - manager.mtx.Lock() - defer manager.mtx.Unlock() - - if rn == nil || rn.IsTerminated() { - return - } - - if rn.IsOutbound() { - manager.GetNonValidatorOutboundIndex().Remove(rn.GetId()) - } else { - manager.GetNonValidatorInboundIndex().Remove(rn.GetId()) - } -} - -// ########################### -// ## Getters -// ########################### - -func (manager *RemoteNodeManager) GetAllRemoteNodes() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { - return manager.remoteNodeIndexer.GetAllRemoteNodes() -} - -func (manager *RemoteNodeManager) GetValidatorIndex() *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] { - return manager.remoteNodeIndexer.GetValidatorIndex() -} - -func (manager *RemoteNodeManager) GetNonValidatorOutboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { - return manager.remoteNodeIndexer.GetNonValidatorOutboundIndex() -} - -func (manager *RemoteNodeManager) GetNonValidatorInboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { - return manager.remoteNodeIndexer.GetNonValidatorInboundIndex() -} - -func (manager *RemoteNodeManager) GetRemoteNodeFromPeer(peer *Peer) *RemoteNode { - if peer == nil { - return nil - } - id := NewRemoteNodeId(peer.GetId()) - rn, _ := manager.GetAllRemoteNodes().Get(id) - return rn -} - -func (manager *RemoteNodeManager) GetRemoteNodeById(id RemoteNodeId) *RemoteNode { - rn, ok := manager.GetAllRemoteNodes().Get(id) - if !ok { - return nil - } - return rn -} - -func (manager *RemoteNodeManager) GetAllNonValidators() []*RemoteNode { - outboundRemoteNodes := manager.GetNonValidatorOutboundIndex().GetAll() - inboundRemoteNodes := manager.GetNonValidatorInboundIndex().GetAll() - return append(outboundRemoteNodes, inboundRemoteNodes...) -} diff --git a/lib/server.go b/lib/server.go index cc04defef..ec9a35bee 100644 --- a/lib/server.go +++ b/lib/server.go @@ -525,9 +525,9 @@ func NewServer( if _blsKeystore != nil { nodeServices |= SFPosValidator } - rnManager := NewRemoteNodeManager(srv, _chain, _cmgr, _blsKeystore, _params, _minFeeRateNanosPerKB, nodeServices) - srv.networkManager = NewNetworkManager(_params, _cmgr, rnManager, _blsKeystore, _desoAddrMgr, - _connectIps, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP) + srv.networkManager = NewNetworkManager(_params, srv, _chain, _cmgr, _blsKeystore, _desoAddrMgr, + _connectIps, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP, + _minFeeRateNanosPerKB, nodeServices) if srv.stateChangeSyncer != nil { srv.stateChangeSyncer.BlockHeight = uint64(_chain.headerTip().Height) @@ -2443,7 +2443,7 @@ func (srv *Server) _handleAddrMessage(pp *Peer, desoMsg DeSoMessage) { var ok bool if msg, ok = desoMsg.(*MsgDeSoAddr); !ok { glog.Errorf("Server._handleAddrMessage: Problem decoding MsgDeSoAddr: %v", spew.Sdump(desoMsg)) - srv.networkManager.rnManager.DisconnectById(id) + srv.networkManager.DisconnectById(id) return } @@ -2459,7 +2459,7 @@ func (srv *Server) _handleAddrMessage(pp *Peer, desoMsg DeSoMessage) { "Peer id=%v for sending us an addr message with %d transactions, which exceeds "+ "the max allowed %d", pp.ID, len(msg.AddrList), MaxAddrsPerAddrMsg)) - srv.networkManager.rnManager.DisconnectById(id) + srv.networkManager.DisconnectById(id) return } @@ -2510,7 +2510,7 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, desoMsg DeSoMessage) { if _, ok := desoMsg.(*MsgDeSoGetAddr); !ok { glog.Errorf("Server._handleAddrMessage: Problem decoding "+ "MsgDeSoAddr: %v", spew.Sdump(desoMsg)) - srv.networkManager.rnManager.DisconnectById(id) + srv.networkManager.DisconnectById(id) return } @@ -2536,10 +2536,10 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, desoMsg DeSoMessage) { } res.AddrList = append(res.AddrList, singleAddr) } - rn := srv.networkManager.rnManager.GetRemoteNodeById(id) - if err := srv.networkManager.rnManager.SendMessage(rn, res); err != nil { + rn := srv.networkManager.GetRemoteNodeById(id) + if err := srv.networkManager.SendMessage(rn, res); err != nil { glog.Errorf("Server._handleGetAddrMessage: Problem sending addr message to peer %v: %v", pp, err) - srv.networkManager.rnManager.DisconnectById(id) + srv.networkManager.DisconnectById(id) return } } @@ -2752,7 +2752,7 @@ func (srv *Server) _startAddressRelayer() { // For the first ten minutes after the connection controller starts, relay our address to all // peers. After the first ten minutes, do it once every 24 hours. glog.V(1).Infof("Server.startAddressRelayer: Relaying our own addr to peers") - remoteNodes := srv.networkManager.rnManager.GetAllRemoteNodes().GetAll() + remoteNodes := srv.networkManager.GetAllRemoteNodes().GetAll() if numMinutesPassed < 10 || numMinutesPassed%(RebroadcastNodeAddrIntervalMinutes) == 0 { for _, rn := range remoteNodes { if !rn.IsHandshakeCompleted() { From c507ea183a5315528da3625b00b830d692d02f91 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 7 Mar 2024 12:52:16 -0800 Subject: [PATCH 534/762] Updated plumbing architecture for atomic transactions. --- lib/block_view.go | 102 +++++++++++++++--- lib/block_view_atomic_txns.go | 189 +++++++++++++++++++--------------- lib/block_view_types.go | 2 +- lib/errors.go | 1 + lib/network.go | 20 ++-- 5 files changed, 209 insertions(+), 105 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 1c56cbce4..bedda1dc8 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -1362,6 +1362,17 @@ func (bav *UtxoView) _disconnectUpdateGlobalParams( func (bav *UtxoView) DisconnectTransaction(currentTxn *MsgDeSoTxn, txnHash *BlockHash, utxoOpsForTxn []*UtxoOperation, blockHeight uint32) error { + // Atomic transactions must have their inner transactions disconnected in series, while the + // wrapper must skip the nonce resetting mentioned below. + if currentTxn.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { + return bav._disconnectAtomicTxnsWrapper( + OperationTypeAtomicTxnsWrapper, + currentTxn, + txnHash, + utxoOpsForTxn, + blockHeight, + ) + } // Start by resetting the expected nonce for this txn's public key. if blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight && currentTxn.TxnMeta.GetTxnType() != TxnTypeBlockReward { @@ -1539,8 +1550,6 @@ func (bav *UtxoView) DisconnectTransaction(currentTxn *MsgDeSoTxn, txnHash *Bloc OperationTypeCoinLockupTransfer, currentTxn, txnHash, utxoOpsForTxn, blockHeight) case TxnTypeCoinUnlock: return bav._disconnectCoinUnlock(OperationTypeCoinUnlock, currentTxn, txnHash, utxoOpsForTxn, blockHeight) - case TxnTypeAtomicTxns: - return bav._disconnectAtomicTxns(OperationTypeAtomicTxns, currentTxn, txnHash, utxoOpsForTxn, blockHeight) } @@ -3498,17 +3507,88 @@ func (bav *UtxoView) ValidateDiamondsAndGetNumDeSoNanos( } func (bav *UtxoView) ConnectTransaction( - txn *MsgDeSoTxn, txHash *BlockHash, - blockHeight uint32, blockTimestampNanoSecs int64, verifySignatures bool, - ignoreUtxos bool) (_utxoOps []*UtxoOperation, _totalInput uint64, _totalOutput uint64, _fees uint64, _err error) { - return bav._connectTransaction(txn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) - + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + blockTimestampNanoSecs int64, + verifySignatures bool, + ignoreUtxos bool, +) ( + _utxoOps []*UtxoOperation, + _totalInput uint64, + _totalOutput uint64, + _fees uint64, + _err error, +) { + return bav._connectTransaction( + txn, + txHash, + blockHeight, + blockTimestampNanoSecs, + verifySignatures, + ignoreUtxos, + ) } func (bav *UtxoView) _connectTransaction( - txn *MsgDeSoTxn, txHash *BlockHash, blockHeight uint32, - blockTimestampNanoSecs int64, verifySignatures bool, - ignoreUtxos bool) (_utxoOps []*UtxoOperation, _totalInput uint64, _totalOutput uint64, _fees uint64, _err error) { + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + blockTimestampNanoSecs int64, + verifySignatures bool, + ignoreUtxos bool, +) ( + _utxoOps []*UtxoOperation, + _totalInput uint64, + _totalOutput uint64, + _fees uint64, + _err error, +) { + // If the transaction is actually a series of atomic transactions, we process the transaction via + // _connectAtomicTransactionsWrapper which will recursively call each inner transaction as + // well as provide cumulative fee checking for the atomic transactions. + if txn.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { + return bav._connectAtomicTxnsWrapper( + txn, + txHash, + blockHeight, + blockTimestampNanoSecs, + verifySignatures, + ignoreUtxos, + ) + } + + // Check that we're not trying to commit a transaction meant to be part of a series of atomic transactions + // outside an atomic transactions wrapper. + if txn.IsAtomicTxnsInnerTxn() { + return nil, 0, 0, 0, RuleErrorAtomicTxnsRequiresWrapper + } + + // By here, we should know the transaction to be non-atomic. + return bav._connectNonAtomicTransaction( + txn, + txHash, + blockHeight, + blockTimestampNanoSecs, + verifySignatures, + ignoreUtxos, + ) +} + +func (bav *UtxoView) _connectNonAtomicTransaction( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + blockTimestampNanoSecs int64, + verifySignatures bool, + ignoreUtxos bool, +) ( + _utxoOps []*UtxoOperation, + _totalInput uint64, + _totalOutput uint64, + _fees uint64, + _err error, +) { // Do a quick sanity check before trying to connect. if err := CheckTransactionSanity(txn, blockHeight, bav.Params); err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "_connectTransaction: ") @@ -3755,8 +3835,6 @@ func (bav *UtxoView) _connectTransaction( totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinLockupTransfer(txn, txHash, blockHeight, verifySignatures) case TxnTypeCoinUnlock: totalInput, totalOutput, utxoOpsForTxn, err = bav._connectCoinUnlock(txn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures) - case TxnTypeAtomicTxns: - totalInput, totalOutput, utxoOpsForTxn, err = bav._connectAtomicTxns(txn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures) default: err = fmt.Errorf("ConnectTransaction: Unimplemented txn type %v", txn.TxnMeta.GetTxnType().String()) diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 1557ff216..8046da958 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -9,41 +9,41 @@ import ( ) // -// TYPES: AtomicTxnsMetadata +// TYPES: AtomicTxnsWrapperMetadata // -type AtomicTxnsMetadata struct { - // The AtomicTxnsMetadata represents the transaction structure for the - // TxnTypeAtomicTxns transaction type. The transactions in the - // AtomicTxnsMetadata.Txns slice are committed atomically in-order on the +type AtomicTxnsWrapperMetadata struct { + // The AtomicTxnsWrapperMetadata represents the transaction structure for the + // TxnTypeAtomicTxnsWrapper transaction type. The transactions in the + // AtomicTxnsWrapperMetadata.Txns slice are committed atomically in-order on the // blockchain. This means either all the transactions with be executed // on the blockchain in the order specified or none of the transactions // will be executed. // - // The AtomicTxnsMetadata.Txns field must be a specially formed + // The AtomicTxnsWrapperMetadata.Txns field must be a specially formed // slice of DeSo transactions to ensure their atomic execution on the blockchain. // If this field is not properly structured, the AtomicTxns 'wrapper' transaction - // will be rejected. The transactions in AtomicTxnsMetadata.Txns and their corresponding + // will be rejected. The transactions in AtomicTxnsWrapperMetadata.Txns and their corresponding // ExtraData must form a circular doubly linked list. The links are embedded in the extra data map as follows: // ** Take special note of the encoding schema for the AtomicTxnsChainLength ** // // For the first transaction: - // AtomicTxnsMetadata.Txns[0].ExtraData = { - // AtomicTxnsChainLength: UintToBuf(uint64(len(AtomicTxnsMetadata.Txns)))... - // NextAtomicTxnPreHash: AtomicTxnsMetadata.Txns[1].AtomicHash() - // PreviousAtomicTxnPreHash: AtomicTxnsMetadata.Txns[len(AtomicTxnsMetadata.Txns)-1].AtomicHash() + // AtomicTxnsWrapperMetadata.Txns[0].ExtraData = { + // AtomicTxnsChainLength: UintToBuf(uint64(len(AtomicTxnsWrapperMetadata.Txns)))... + // NextAtomicTxnPreHash: AtomicTxnsWrapperMetadata.Txns[1].AtomicHash() + // PreviousAtomicTxnPreHash: AtomicTxnsWrapperMetadata.Txns[len(AtomicTxnsWrapperMetadata.Txns)-1].AtomicHash() // } // - // For the ith transaction where 0 < i < len(AtomicTxnsMetadata.Txns)-1: - // AtomicTxnsMetadata.Txns[i].ExtraData = { - // NextAtomicTxnPreHash: AtomicTxnsMetadata.Txns[i+1].AtomicHash() - // PreviousAtomicTxnPreHash: AtomicTxnsMetadata.Txns[i-1].AtomicHash() + // For the ith transaction where 0 < i < len(AtomicTxnsWrapperMetadata.Txns)-1: + // AtomicTxnsWrapperMetadata.Txns[i].ExtraData = { + // NextAtomicTxnPreHash: AtomicTxnsWrapperMetadata.Txns[i+1].AtomicHash() + // PreviousAtomicTxnPreHash: AtomicTxnsWrapperMetadata.Txns[i-1].AtomicHash() // } // // For the last transaction: - // AtomicTxnsMetadata.Txns[len(AtomicTxnsMetadata.Txns)-1].ExtraData = { - // NextAtomicTxnPreHash: AtomicTxnsMetadata.Txns[0].AtomicHash() - // PreviousAtomicTxnPreHash: AtomicTxnsMetadata.Txns[len(AtomicTxnsMetadata.Txns)-2].AtomicHash() + // AtomicTxnsWrapperMetadata.Txns[len(AtomicTxnsWrapperMetadata.Txns)-1].ExtraData = { + // NextAtomicTxnPreHash: AtomicTxnsWrapperMetadata.Txns[0].AtomicHash() + // PreviousAtomicTxnPreHash: AtomicTxnsWrapperMetadata.Txns[len(AtomicTxnsWrapperMetadata.Txns)-2].AtomicHash() // } // // The "AtomicHash()" function is a special transaction hash taken without consideration for the signature @@ -65,18 +65,18 @@ type AtomicTxnsMetadata struct { Txns []*MsgDeSoTxn } -func (txnData *AtomicTxnsMetadata) GetTxnType() TxnType { - return TxnTypeAtomicTxns +func (txnData *AtomicTxnsWrapperMetadata) GetTxnType() TxnType { + return TxnTypeAtomicTxnsWrapper } -func (txnData *AtomicTxnsMetadata) ToBytes(preSignature bool) ([]byte, error) { +func (txnData *AtomicTxnsWrapperMetadata) ToBytes(preSignature bool) ([]byte, error) { var data []byte data = append(data, UintToBuf(uint64(len(txnData.Txns)))...) for _, txn := range txnData.Txns { txnBytes, err := txn.ToBytes(preSignature) if err != nil { return nil, errors.Wrap(err, - "AtomicTxnsMetadata.ToBytes: Problem serializing txn") + "AtomicTxnsWrapperMetadata.ToBytes: Problem serializing txn") } data = append(data, UintToBuf(uint64(len(txnBytes)))...) data = append(data, txnBytes...) @@ -84,18 +84,18 @@ func (txnData *AtomicTxnsMetadata) ToBytes(preSignature bool) ([]byte, error) { return data, nil } -func (txnData *AtomicTxnsMetadata) FromBytes(data []byte) error { +func (txnData *AtomicTxnsWrapperMetadata) FromBytes(data []byte) error { rr := bytes.NewReader(data) // Read the number of transactions within the atomic transaction. numTxns, err := ReadUvarint(rr) if err != nil { return errors.Wrap(err, - "AtomicTxnsMetadata.FromBytes: Problem reading numTxns") + "AtomicTxnsWrapperMetadata.FromBytes: Problem reading numTxns") } txnData.Txns, err = SafeMakeSliceWithLength[*MsgDeSoTxn](numTxns) if err != nil { - return errors.Wrap(err, "AtomicTxnsMetadata.FromBytes: Problem allocating txnData.Txns") + return errors.Wrap(err, "AtomicTxnsWrapperMetadata.FromBytes: Problem allocating txnData.Txns") } // Read the transactions. @@ -106,40 +106,40 @@ func (txnData *AtomicTxnsMetadata) FromBytes(data []byte) error { numTxnBytes, err := ReadUvarint(rr) if err != nil { return errors.Wrap(err, - "AtomicTxnsMetadata.FromBytes: Problem reading number of bytes in transaction") + "AtomicTxnsWrapperMetadata.FromBytes: Problem reading number of bytes in transaction") } // Allocate memory for the transaction bytes to be read into. txnBytes, err := SafeMakeSliceWithLength[byte](numTxnBytes) if err != nil { return errors.Wrap(err, - "AtomicTxnsMetadata.FromBytes: Problem allocating bytes for transaction") + "AtomicTxnsWrapperMetadata.FromBytes: Problem allocating bytes for transaction") } // Read the transaction into the txnBytes memory buffer. if _, err = io.ReadFull(rr, txnBytes); err != nil { return errors.Wrap(err, - "AtomicTxnsMetadata.FromBytes: Problem reading bytes for transaction") + "AtomicTxnsWrapperMetadata.FromBytes: Problem reading bytes for transaction") } // Convert the txnBytes buffer to a MsgDeSoTxn struct. if err = txnData.Txns[ii].FromBytes(txnBytes); err != nil { return errors.Wrap(err, - "AtomicTxnsMetadata.FromBytes: Problem parsing transaction bytes") + "AtomicTxnsWrapperMetadata.FromBytes: Problem parsing transaction bytes") } } return nil } -func (txnData *AtomicTxnsMetadata) New() DeSoTxnMetadata { - return &AtomicTxnsMetadata{} +func (txnData *AtomicTxnsWrapperMetadata) New() DeSoTxnMetadata { + return &AtomicTxnsWrapperMetadata{} } // // HELPER FUNCTIONS: MsgDeSoTxn // -func (msg *MsgDeSoTxn) IsAtomicTxn() bool { +func (msg *MsgDeSoTxn) IsAtomicTxnsInnerTxn() bool { // An atomic transaction is qualified by the existence of the NextAtomicTxnPreHash // and PreviousAtomicTxnPreHash keys in the ExtraData map. if _, keyExists := msg.ExtraData[NextAtomicTxnPreHash]; !keyExists { @@ -159,7 +159,7 @@ func (msg *MsgDeSoTxn) AtomicHash() (*BlockHash, error) { } // Sanity check that the transaction includes the necessary extra data to be included in an atomic transaction. - if !msgDuplicate.IsAtomicTxn() { + if !msgDuplicate.IsAtomicTxnsInnerTxn() { return nil, errors.New("MsgDeSoTxn.AtomicHash: Cannot compute atomic hash on non-atomic transaction") } @@ -182,88 +182,113 @@ func (msg *MsgDeSoTxn) AtomicHash() (*BlockHash, error) { // Connect and Disconnect Atomic Txn Logic // -func (bav *UtxoView) _connectAtomicTxns( +func (bav *UtxoView) _connectAtomicTxnsWrapper( txn *MsgDeSoTxn, txHash *BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, - verifySignature bool, + verifySignatures bool, + ignoreUtxos bool, ) ( + _utxoOps []*UtxoOperation, _totalInput uint64, _totalOutput uint64, - _utxoOps []*UtxoOperation, + _fees uint64, _err error, ) { var utxoOpsForTxn []*UtxoOperation + // Don't allow the atomic transactions and the wrapper to take up more than half of the block. + txnBytes, err := txn.ToBytes(false) + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "_connectTransaction: Problem serializing transaction: ") + } + txnSizeBytes := uint64(len(txnBytes)) + if txnSizeBytes > bav.Params.MaxBlockSizeBytes/2 { + return nil, 0, 0, 0, RuleErrorTxnTooBig + } + // Validate the connecting block height. if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { - return 0, 0, nil, - errors.Wrap(RuleErrorAtomicTxnBeforeBlockHeight, "_connectAtomicTxns") + return nil, 0, 0, 0, + errors.Wrap(RuleErrorAtomicTxnBeforeBlockHeight, "_connectAtomicTxnsWrapper") } // Validate the transaction metadata type. - if txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxns { - return 0, 0, nil, - fmt.Errorf("_connectAtomicTxns: TxnMeta type: %v", txn.TxnMeta.GetTxnType().GetTxnString()) + if txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { + return nil, 0, 0, 0, + fmt.Errorf("_connectAtomicTxnsWrapper: TxnMeta type: %v", txn.TxnMeta.GetTxnType().GetTxnString()) } - // Verify the wrapper of the transaction. This does not verify the txn.TxnMeta contents. - // NOTE: The intentional lack of a _connectBasicTransfer or _connectBasicTransferWithExtraSpend - // operation here skips a signature check on the wrapper. Each internal transaction - // will have its signatures checked when it's connected via _connectTransaction below. + // Verify the wrapper of the transaction. This does not verify the txn.TxnMeta contents, just that + // the wrapper is well formatted. if err := _verifyAtomicTxnsWrapper(txn); err != nil { - return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") + return nil, 0, 0, 0, errors.Wrap(err, "_connectAtomicTxnsWrapper") } // Extract the metadata from the transaction. - txMeta := txn.TxnMeta.(*AtomicTxnsMetadata) + txMeta := txn.TxnMeta.(*AtomicTxnsWrapperMetadata) // Verify the chain of transactions as being not tampered with. This verifies the txn.TxnMeta contents. if err := _verifyAtomicTxnsChain(txMeta); err != nil { - return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") + return nil, 0, 0, 0, errors.Wrap(err, "_connectAtomicTxnsWrapper") } - // Connect the inner atomic transactions. + // Execute the internal transactions. var innerUtxoOps [][]*UtxoOperation - var totalFees uint64 + var totalInput, totalOutput, totalFees uint64 for _, innerTxn := range txMeta.Txns { - // TODO: Verify TxnTypeSubmitPost and TxnTypeUpdateProfile to ensure ignoreUtxos is always safe to set as false. - innerTxnUtxoOps, _, _, txnFees, err := bav._connectTransaction( - innerTxn, txHash, blockHeight, blockTimestampNanoSecs, verifySignature, false) + // NOTE: By recursively calling _connectNonAtomicTransaction, each inner transaction is checked that + // it is capable of paying for its own fees as well as having a valid signature. + innerTxnUtxoOps, txnInput, txnOutput, txnFees, err := bav._connectNonAtomicTransaction( + innerTxn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) if err != nil { - return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") + return nil, 0, 0, 0, + errors.Wrap(err, "_connectAtomicTxnsWrapper") } // Collect the inner txn utxo ops. We will use these if we ever disconnect. innerUtxoOps = append(innerUtxoOps, innerTxnUtxoOps) - // Collect the fees to ensure fees are being paid properly. - // - // NOTE: There's two design options that can be utilized here. The first - // involves checking that every transaction covers their own fees. - // The second involves checking that the cumulative fees paid across all - // transactions satisfies the fees for the entire atomic transaction wrapper. - // The second design has two key advantages and hence why we use it here: - // (1) It's easier to implement and doesn't require a fee check within _connectAtomicTxns - // (2) It enables a special app layer use case where an atomic transaction - // could be used to subsidize user transactions (likes, comments, update profiles, etc) - // without directly sending the user DESO (the no crypto faucet use cases). In effect, - // the user can utilize apps without needing DESO if the app is willing to subsidize - // their transaction fees. + // Collect the input/output/fees to ensure fees are being paid properly. + totalInput, err = SafeUint64().Add(totalInput, txnInput) + if err != nil { + return nil, 0, 0, 0, + errors.Wrap(err, "_connectAtomicTxnsWrapper") + } + totalOutput, err = SafeUint64().Add(totalOutput, txnOutput) + if err != nil { + return nil, 0, 0, 0, + errors.Wrap(err, "_connectAtomicTxnsWrapper") + } totalFees, err = SafeUint64().Add(totalFees, txnFees) if err != nil { - return 0, 0, nil, errors.Wrap(err, "_connectAtomicTxns") + return nil, 0, 0, 0, + errors.Wrap(err, "_connectAtomicTxnsWrapper") + } + } + + // Validate that the internal transactions cumulatively pay enough in fees to + // cover the atomic transactions AS WELL AS the wrapper. + if txnSizeBytes != 0 && bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB != 0 { + // Make sure there isn't overflow in the fee. + if txn.TxnFeeNanos != ((txn.TxnFeeNanos * 1000) / 1000) { + return nil, 0, 0, 0, RuleErrorOverflowDetectedInFeeRateCalculation + } + // If the fee is less than the minimum network fee per KB, return an error. + if (txn.TxnFeeNanos*1000)/uint64(txnSizeBytes) < bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB { + return nil, 0, 0, 0, RuleErrorTxnFeeBelowNetworkMinimum } } - // Construct a UtxoOp for the transaction. + // Construct a UtxoOp for the atomic transactions wrapper. utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ - Type: OperationTypeAtomicTxns, + Type: OperationTypeAtomicTxnsWrapper, AtomicTxnsInnerUtxoOps: innerUtxoOps, }) - return 0, 0, utxoOpsForTxn, nil + return utxoOpsForTxn, totalInput, totalOutput, totalFees, nil } func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { @@ -304,7 +329,7 @@ func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { // becomes extremely important in _connectAtomicTxns(). var totalInnerTxnFees uint64 var err error - for _, innerTxn := range txn.TxnMeta.(*AtomicTxnsMetadata).Txns { + for _, innerTxn := range txn.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns { totalInnerTxnFees, err = SafeUint64().Add(totalInnerTxnFees, innerTxn.TxnFeeNanos) if err != nil { return RuleErrorAtomicTxnsWrapperHasInternalFeeOverflow @@ -328,7 +353,7 @@ func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { return nil } -func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsMetadata) error { +func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { // Validate: // (1) The inner transactions are not additional redundant atomic transactions wrappers. // (2) The inner transactions are meant to be included in an atomic transaction. @@ -337,12 +362,12 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsMetadata) error { var atomicHashes []*BlockHash for ii, innerTxn := range txnMeta.Txns { // Validate this transaction is not another redundant atomic transaction. - if innerTxn.TxnMeta.GetTxnType() == TxnTypeAtomicTxns { + if innerTxn.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { return RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn } // Validate the inner transaction as meant to be included in an atomic transaction. - if !innerTxn.IsAtomicTxn() { + if !innerTxn.IsAtomicTxnsInnerTxn() { return RuleErrorAtomicTxnsHasNonAtomicInnerTxn } @@ -405,7 +430,7 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsMetadata) error { return nil } -func (bav *UtxoView) _disconnectAtomicTxns( +func (bav *UtxoView) _disconnectAtomicTxnsWrapper( operationType OperationType, currentTxn *MsgDeSoTxn, txnHash *BlockHash, @@ -413,24 +438,24 @@ func (bav *UtxoView) _disconnectAtomicTxns( blockHeight uint32, ) error { if len(utxoOpsForTxn) == 0 { - return fmt.Errorf("_disconnectAtomicTxns: utxoOperations are missing") + return fmt.Errorf("_disconnectAtomicTxnsWrapper: utxoOperations are missing") } operationIndex := len(utxoOpsForTxn) - 1 // Verify the last operation as being of type OperationTypeAtomicTxns. - if utxoOpsForTxn[operationIndex].Type != OperationTypeAtomicTxns { - return fmt.Errorf("_disconnectAtomicTxns: Trying to revert "+ + if utxoOpsForTxn[operationIndex].Type != OperationTypeAtomicTxnsWrapper { + return fmt.Errorf("_disconnectAtomicTxnsWrapper: Trying to revert "+ "OperationTypeAtomicTxns but found type %v", utxoOpsForTxn[operationIndex].Type) } // Gather the transaction metadata so we know the internal transactions. - txMeta := currentTxn.TxnMeta.(*AtomicTxnsMetadata) + txMeta := currentTxn.TxnMeta.(*AtomicTxnsWrapperMetadata) // Sanity check the AtomicTxns operation exists. operationData := utxoOpsForTxn[operationIndex] if operationData.AtomicTxnsInnerUtxoOps == nil || len(operationData.AtomicTxnsInnerUtxoOps) != len(txMeta.Txns) { - return fmt.Errorf("_disconnectAtomicTxns: Trying to revert OperationTypeAtomicTxns " + + return fmt.Errorf("_disconnectAtomicTxnsWrapper: Trying to revert OperationTypeAtomicTxns " + "but found nil or mistmatched number of UtxoOps for inner transactions") } @@ -443,7 +468,7 @@ func (bav *UtxoView) _disconnectAtomicTxns( innerTxn.Hash(), operationData.AtomicTxnsInnerUtxoOps[ii], blockHeight); err != nil { - return errors.Wrapf(err, "_disconnectAtomicTxns") + return errors.Wrapf(err, "_disconnectAtomicTxnsWrapper") } } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index f58d4aa97..81bc63068 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -682,7 +682,7 @@ const ( OperationTypeStakeDistributionRestake OperationType = 49 OperationTypeStakeDistributionPayToBalance OperationType = 50 OperationTypeSetValidatorLastActiveAtEpoch OperationType = 51 - OperationTypeAtomicTxns OperationType = 52 + OperationTypeAtomicTxnsWrapper OperationType = 52 // NEXT_TAG = 53 ) diff --git a/lib/errors.go b/lib/errors.go index 9f084cbe7..75a6eb36f 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -538,6 +538,7 @@ const ( RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction RuleError = "RuleErrorUpdateCoinLockupParamsUpdatingPermanentTransferRestriction" // Atomic Transactions + RuleErrorAtomicTxnsRequiresWrapper RuleError = "RuleErrorAtomicTxnsRequiresWrapper" RuleErrorAtomicTxnBeforeBlockHeight RuleError = "RuleErrorAtomicTxnBeforeBlockHeight" RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero RuleError = "RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero" RuleErrorAtomicTxnsWrapperMustHaveZeroInputs RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroInputs" diff --git a/lib/network.go b/lib/network.go index 467d76fb6..c066fc660 100644 --- a/lib/network.go +++ b/lib/network.go @@ -262,7 +262,7 @@ const ( TxnTypeUpdateCoinLockupParams TxnType = 41 TxnTypeCoinLockupTransfer TxnType = 42 TxnTypeCoinUnlock TxnType = 43 - TxnTypeAtomicTxns TxnType = 44 + TxnTypeAtomicTxnsWrapper TxnType = 44 // NEXT_ID = 44 ) @@ -314,7 +314,7 @@ const ( TxnStringUpdateCoinLockupParams TxnString = "UPDATE_COIN_LOCKUP_PARAMS" TxnStringCoinLockupTransfer TxnString = "COIN_LOCKUP_TRANSFER" TxnStringCoinUnlock TxnString = "COIN_UNLOCK" - TxnStringAtomicTxns TxnString = "ATOMIC_TXNS" + TxnStringAtomicTxnsWrapper TxnString = "ATOMIC_TXNS_WRAPPER" ) var ( @@ -329,7 +329,7 @@ var ( TxnTypeAccessGroup, TxnTypeAccessGroupMembers, TxnTypeNewMessage, TxnTypeRegisterAsValidator, TxnTypeUnregisterAsValidator, TxnTypeStake, TxnTypeUnstake, TxnTypeUnlockStake, TxnTypeUnjailValidator, TxnTypeCoinLockup, TxnTypeUpdateCoinLockupParams, TxnTypeCoinLockupTransfer, TxnTypeCoinUnlock, - TxnTypeAtomicTxns, + TxnTypeAtomicTxnsWrapper, } AllTxnString = []TxnString{ TxnStringUnset, TxnStringBlockReward, TxnStringBasicTransfer, TxnStringBitcoinExchange, TxnStringPrivateMessage, @@ -342,7 +342,7 @@ var ( TxnStringAccessGroup, TxnStringAccessGroupMembers, TxnStringNewMessage, TxnStringRegisterAsValidator, TxnStringUnregisterAsValidator, TxnStringStake, TxnStringUnstake, TxnStringUnlockStake, TxnStringUnjailValidator, TxnStringCoinLockup, TxnStringUpdateCoinLockupParams, TxnStringCoinLockupTransfer, TxnStringCoinUnlock, - TxnStringAtomicTxns, + TxnStringAtomicTxnsWrapper, } ) @@ -442,8 +442,8 @@ func (txnType TxnType) GetTxnString() TxnString { return TxnStringCoinLockupTransfer case TxnTypeCoinUnlock: return TxnStringCoinUnlock - case TxnTypeAtomicTxns: - return TxnStringAtomicTxns + case TxnTypeAtomicTxnsWrapper: + return TxnStringAtomicTxnsWrapper default: return TxnStringUndefined } @@ -537,8 +537,8 @@ func GetTxnTypeFromString(txnString TxnString) TxnType { return TxnTypeCoinLockupTransfer case TxnStringCoinUnlock: return TxnTypeCoinUnlock - case TxnStringAtomicTxns: - return TxnTypeAtomicTxns + case TxnStringAtomicTxnsWrapper: + return TxnTypeAtomicTxnsWrapper default: // TxnTypeUnset means we couldn't find a matching txn type return TxnTypeUnset @@ -640,8 +640,8 @@ func NewTxnMetadata(txType TxnType) (DeSoTxnMetadata, error) { return (&CoinLockupTransferMetadata{}).New(), nil case TxnTypeCoinUnlock: return (&CoinUnlockMetadata{}).New(), nil - case TxnTypeAtomicTxns: - return (&AtomicTxnsMetadata{}).New(), nil + case TxnTypeAtomicTxnsWrapper: + return (&AtomicTxnsWrapperMetadata{}).New(), nil default: return nil, fmt.Errorf("NewTxnMetadata: Unrecognized TxnType: %v; make sure you add the new type of transaction to NewTxnMetadata", txType) } From 3cd83f7535c5175c565f933092c4d72a32389b69 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 8 Mar 2024 15:36:30 -0800 Subject: [PATCH 535/762] Review comment changes. --- lib/block_view_atomic_txns.go | 38 ++++++++++++++++------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 8046da958..fe0b7df79 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/pkg/errors" "io" - "reflect" ) // @@ -158,11 +157,6 @@ func (msg *MsgDeSoTxn) AtomicHash() (*BlockHash, error) { return nil, errors.Wrap(err, "MsgDeSoTxn.AtomicHash: Cannot create duplicate transaction") } - // Sanity check that the transaction includes the necessary extra data to be included in an atomic transaction. - if !msgDuplicate.IsAtomicTxnsInnerTxn() { - return nil, errors.New("MsgDeSoTxn.AtomicHash: Cannot compute atomic hash on non-atomic transaction") - } - // Delete the NextAtomicTxnPreHash and PreviousAtomicTxnPreHash from the ExtraData map. delete(msgDuplicate.ExtraData, NextAtomicTxnPreHash) delete(msgDuplicate.ExtraData, PreviousAtomicTxnPreHash) @@ -224,7 +218,8 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( // Verify the wrapper of the transaction. This does not verify the txn.TxnMeta contents, just that // the wrapper is well formatted. if err := _verifyAtomicTxnsWrapper(txn); err != nil { - return nil, 0, 0, 0, errors.Wrap(err, "_connectAtomicTxnsWrapper") + return nil, 0, 0, 0, + errors.Wrap(err, "_connectAtomicTxnsWrapper: failed to verify wrapper transaction") } // Extract the metadata from the transaction. @@ -232,7 +227,8 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( // Verify the chain of transactions as being not tampered with. This verifies the txn.TxnMeta contents. if err := _verifyAtomicTxnsChain(txMeta); err != nil { - return nil, 0, 0, 0, errors.Wrap(err, "_connectAtomicTxnsWrapper") + return nil, 0, 0, 0, + errors.Wrap(err, "_connectAtomicTxnsWrapper: failed to verify transaction chain") } // Execute the internal transactions. @@ -245,7 +241,7 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( innerTxn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) if err != nil { return nil, 0, 0, 0, - errors.Wrap(err, "_connectAtomicTxnsWrapper") + errors.Wrap(err, "_connectAtomicTxnsWrapper: failed to connect non-atomic transaction") } // Collect the inner txn utxo ops. We will use these if we ever disconnect. @@ -326,7 +322,7 @@ func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { // // Because txn.TxnFeeNanos gets used in several places for non-connection logic (e.g. BMF), // it's important to use design option (2) to be consistent across core. This check as a result - // becomes extremely important in _connectAtomicTxns(). + // becomes extremely important in _connectAtomicTxnsWrapper(). var totalInnerTxnFees uint64 var err error for _, innerTxn := range txn.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns { @@ -339,16 +335,16 @@ func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { return RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns } - // Technically, the txn.TxnNonce field could be + // Technically the txn.TxnNonce field could be anything but for consistent + // hashing we force all fields of the nonce to be zero. This also makes + // it more consistent with the rest of the rules regarding atomic transactions + // wrappers. if txn.TxnNonce.ExpirationBlockHeight != 0 || txn.TxnNonce.PartialID != 0 { return RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce } - // Since the wrapper is free and modifiable by anyone, we check to ensure the - // associated ExtraData is empty to prevent free storage on the blockchain. - if len(txn.ExtraData) != 0 { - return RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData - } + // NOTE: We do not enforce rules on txn.ExtraData as it's both useful + // for app developers and is being paid for via txn.TxnFeeNanos. return nil } @@ -373,7 +369,7 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { // Validate the starting point of the atomic transactions chain. _, keyExists := innerTxn.ExtraData[AtomicTxnsChainLength] - if keyExists && ii == 0 { + if !keyExists && ii == 0 { return RuleErrorAtomicTxnsMustStartWithChainLength } if keyExists && ii > 0 { @@ -414,16 +410,16 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { // Validate the chain sequence specified. for ii, innerTxn := range txnMeta.Txns { // Check the next transaction. - if !reflect.DeepEqual( + if !bytes.Equal( innerTxn.ExtraData[NextAtomicTxnPreHash], - atomicHashes[nextIndex(ii, len(txnMeta.Txns))]) { + atomicHashes[nextIndex(ii, len(txnMeta.Txns))].ToBytes()) { return RuleErrorAtomicTxnsHasBrokenChain } // Check the previous transaction - if !reflect.DeepEqual( + if !bytes.Equal( innerTxn.ExtraData[PreviousAtomicTxnPreHash], - atomicHashes[prevIndex(ii, len(txnMeta.Txns))]) { + atomicHashes[prevIndex(ii, len(txnMeta.Txns))].ToBytes()) { return RuleErrorAtomicTxnsHasBrokenChain } } From 0e8865508ac2d92c162e7d56d03a6f35611f131c Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Fri, 8 Mar 2024 17:08:54 -0800 Subject: [PATCH 536/762] Added support for CreateAtomicTxnsWrapper along with explainer comment. --- lib/blockchain.go | 123 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) diff --git a/lib/blockchain.go b/lib/blockchain.go index 3a8b9484c..96bdd4a45 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5747,3 +5747,126 @@ func (bc *Blockchain) CreateCoinUnlockTxn( return txn, totalInput, 0, fees, nil } + +// ------------------------------------------------- +// Atomic Transaction Creation Function +// ------------------------------------------------- + +// CreateAtomicTxnsWrapper is unlike other Create... transaction creation tools. +// CreateAtomicTxnsWrapper is passed a list of UNSIGNED transactions who are then +// chained together, wrapped, and converted into a single transaction of type +// TxnTypeAtomicTxnsWrapper. It is then the responsibility of the calling parties +// to verify the response and sign the transactions which now reside within the atomic +// wrapper transaction. +// +// A full example can be used to illustrate how CreateAtomicTxnsWrapper is meant to +// be used in a production application. Consider an application which wants +// to subsidize all likes for users using atomic transactions. The following is +// the step-by-step procedure by which the app can do so: +// +// (1) The user creates an unsigned LIKE transaction as though they have DESO to pay for it. +// The LIKE transaction has no special metadata and is no different from an unsubsidized +// LIKE transaction up and to this point. +// (2) The user submits the unsigned LIKE transaction to the application to be subsidized +// as they cannot pay for the LIKE transaction themselves. +// (3) The application creates an unsigned BASIC_TRANSFER transaction to transfer the user enough +// DESO to cover their LIKE transaction. The transfer amount can be computed exactly +// using the TxnFeeNanos field of the provided LIKE transaction. +// (4) Rather than returning the BASIC_TRANSFER to the user, the application then calls +// CreateAtomicTxnsWrapper where unsignedTransactions = [BASIC_TRANSFER, LIKE]. This +// returns an atomic transaction that forces the user to use the BASIC_TRANSFER for this +// specific LIKE transaction. Note that this adds extra data to both the BASIC_TRANSFER +// and the LIKE transactions to ensure their atomic execution. +// (5) Before returning the atomic transaction to the user, the application now signs +// the BASIC_TRANSFER which resides within the atomic transaction wrapper on the server side. +// At this point, only the LIKE transaction remains unsigned within the atomic transaction. +// (6) The atomic transaction is returned to the user NOT the raw BASIC_TRANSFER transaction +// created in step 3. +// (7) The user signs the LIKE transaction using their private key on the client side. +// At this point, all transactions which reside within the AtomicTxns wrapper are signed. +// (8) The user submits the atomic transaction containing both the signed BASIC_TRANSFER +// transaction and the signed LIKE transaction to the blockchain. +func (bc *Blockchain) CreateAtomicTxnsWrapper( + unsignedTransactions []*MsgDeSoTxn, + extraData map[string][]byte, +) ( + _txn *MsgDeSoTxn, + _fees uint64, + _err error, +) { + // First we must convert the unsigned transactions into a doubly linked list via the + // transaction extra data. We create a copy of the transactions to ensure we do not + // modify the caller's data. + + // Construct special helper functions for circular doubly linked list indexing. + nextIndex := func(currentIndex int, chainLength int) int { + // Check for the special case of an atomic chain of length 1. + if chainLength == 1 { + return currentIndex + } + return (currentIndex + 1) % chainLength + } + prevIndex := func(currentIndex int, chainLength int) int { + // Check for the special case of an atomic chain of length 1. + if chainLength == 1 { + return currentIndex + } + + // Check for the wrap around case. + if currentIndex == 0 { + return chainLength - 1 + } + return currentIndex - 1 + } + + // Construct the chained transactions and keep track of the total fees paid. + var chainedUnsignedTransactions []*MsgDeSoTxn + var totalFees uint64 + for ii, txn := range unsignedTransactions { + // Copy the transaction to prevent pointer reuse. + txnDuplicate, err := txn.Copy() + if err != nil { + return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to copy transaction") + } + + // Compute the atomic hashes. + nextHash, err := unsignedTransactions[nextIndex(ii, len(unsignedTransactions))].AtomicHash() + if err != nil { + return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute next hash") + } + prevHash, err := unsignedTransactions[prevIndex(ii, len(unsignedTransactions))].AtomicHash() + if err != nil { + return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to copy prev hash") + } + + // Set the transaction extra data and append to the chained list. + txnDuplicate.ExtraData[NextAtomicTxnPreHash] = nextHash.ToBytes() + txnDuplicate.ExtraData[PreviousAtomicTxnPreHash] = prevHash.ToBytes() + chainedUnsignedTransactions = append(chainedUnsignedTransactions, txnDuplicate) + + // Track the total fees paid. + totalFees, err = SafeUint64().Add(totalFees, txnDuplicate.TxnFeeNanos) + if err != nil { + return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: total fee overflow") + } + } + + // Set the starting point for the atomic transactions. + chainedUnsignedTransactions[0].ExtraData[AtomicTxnsChainLength] = UintToBuf(uint64(len(unsignedTransactions))) + + // Create an atomic transactions wrapper taking special care to the rules specified in _verifyAtomicTxnsWrapper. + // Because we do not call AddInputsAndChangeToTransaction on the wrapper, we must specify ALL fields exactly. + txn := &MsgDeSoTxn{ + TxnVersion: 1, + TxInputs: nil, + TxOutputs: nil, + TxnFeeNanos: totalFees, + TxnNonce: &DeSoNonce{ExpirationBlockHeight: 0, PartialID: 0}, + TxnMeta: &AtomicTxnsWrapperMetadata{Txns: chainedUnsignedTransactions}, + PublicKey: ZeroPublicKey.ToBytes(), + ExtraData: extraData, + Signature: DeSoSignature{}, + } + + return txn, totalFees, nil +} From 4afebad7727165104081fb0fa882ccb131356304 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 11 Mar 2024 14:02:54 -0700 Subject: [PATCH 537/762] Atomic transaction testing scaffolding and helper functions. --- lib/block_view_atomic_txns_test.go | 388 +++++++++++++++++++++++++++++ 1 file changed, 388 insertions(+) create mode 100644 lib/block_view_atomic_txns_test.go diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go new file mode 100644 index 000000000..8489de21f --- /dev/null +++ b/lib/block_view_atomic_txns_test.go @@ -0,0 +1,388 @@ +package lib + +import ( + "github.com/btcsuite/btcd/btcec" + "github.com/dgraph-io/badger/v3" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "math" + "testing" +) + +func TestDependentAtomicTransactionGeneration(t *testing.T) { + // Initialize test chain, miner, and testMeta. + testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) + + // Initialize m0, m1, m2, m3, m4. + _setUpUsersForAtomicTransactionsTesting(testMeta) + + // Generate 1,000 dependent atomic transactions. + atomicTxns := _generateDependentAtomicTransactions(testMeta, 100) + + // Construct a new view to connect the transactions to. + utxoView, err := NewUtxoView( + testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + blockHeight := testMeta.chain.BlockTip().Height + 1 + + // Connect the transactions to ensure they can actually be connected. + for _, txn := range atomicTxns { + // Connect the transaction. + txHash := txn.Hash() + _, _, _, _, err := utxoView.ConnectTransaction( + txn, txHash, blockHeight, 0, true, false) + require.NoError(t, err) + } + + // Flush the view to ensure everything is working properly. + require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + + // + // Now we test that the transactions are truly dependent on each-other by reorganizing them. + // + + // Reorganize the transactions. + initialTxn := atomicTxns[0] + atomicTxns[0] = atomicTxns[len(atomicTxns)-1] + atomicTxns[0] = initialTxn + + // Initialize test chain, miner, and testMeta for failing use. + testMetaFail := _setUpMinerAndTestMetaForAtomicTransactionTests(t) + + // Initialize m0, m1, m2, m3, m4. + _setUpUsersForAtomicTransactionsTesting(testMetaFail) + + // Construct a new view to connect the transactions to. + utxoView, err = NewUtxoView( + testMetaFail.db, testMetaFail.params, testMetaFail.chain.postgres, testMetaFail.chain.snapshot, nil) + require.NoError(t, err) + blockHeight = testMetaFail.chain.BlockTip().Height + 1 + + // Connect the transactions to ensure they can actually be connected. + for _, txn := range atomicTxns { + // Connect the transaction. + txHash := txn.Hash() + _, _, _, _, err := utxoView.ConnectTransaction( + txn, txHash, blockHeight, 0, true, false) + if err != nil { + require.Contains(t, err.Error(), RuleErrorInsufficientBalance) + } + } +} + +//---------------------------------------------------------- +// (Testing) Atomic Transactions Setup Helper Functions +//---------------------------------------------------------- + +// The goal of _generateDependentAtomicTransactions is to generate +// a sequence of transactions who CANNOT be reordered meaning they +// must be executed in the sequence returned. This mean transaction +// with position ii in atomicTransactions CANNOT be placed in an +// index jj of atomicTransactions such that jj < ii +// +// How can we generate "dependent atomic transactions" algorithmically using +// TestMeta initialized with _setUpUsersForAtomicTransactionTesting? +// +// (1) Choose an arbitrary starter public key with DESO (m0PkBytes) +// (2) For ii in [0, numberOfTransactions): +// (2a) Generate a new public/private key pair (pub_ii, priv_ii) +// (2b) Have pub_(ii-1) do a max DESO transfer to pub_ii. Use m0PkBytes as pub_(-1). +// (3) Have pub_numberOfTransactions perform a max transfer back to m0PkBytes +// +// Notice that because pub_ii only has DESO at following the transaction with +// the iith index in numberOfTransactions, it's impossible to reorder the transactions +// in any other order. Hence, these transactions are dependent on each other. +// +// The length of the returned list of transactions is specified by numberOfTransactions. +func _generateDependentAtomicTransactions( + testMeta *TestMeta, + numberOfTransactions int, +) ( + _atomicTransactions []*MsgDeSoTxn, +) { + var atomicTransactions []*MsgDeSoTxn + var receiverPublicKeysBase58 []string + var receiverPrivateKeysBase58 []string + var receiverBalancesNanos []uint64 + + // Get the initial balance of m0. + m0InitialBalanceNanos := _getBalance( + testMeta.t, testMeta.chain, testMeta.mempool, m0Pub) + + // Generate the atomic transactions. + for ii := 0; ii < numberOfTransactions-1; ii++ { + // Generate a new public/private key pair. + privKey_ii, err := btcec.NewPrivateKey(btcec.S256()) + require.NoError(testMeta.t, err) + pubKey_ii := privKey_ii.PubKey() + receiverPrivateKeysBase58 = append(receiverPrivateKeysBase58, Base58CheckEncode( + privKey_ii.Serialize(), true, testMeta.params)) + receiverPublicKeysBase58 = append(receiverPublicKeysBase58, Base58CheckEncode( + pubKey_ii.SerializeCompressed(), false, testMeta.params)) + + // Determine the sender. + var senderPubKeyBase58 string + var senderPrivKeyBase58 string + var senderBalanceNanos uint64 + if ii == 0 { + senderPubKeyBase58 = m0Pub + senderPrivKeyBase58 = m0Priv + senderBalanceNanos = m0InitialBalanceNanos + } else { + senderPubKeyBase58 = receiverPublicKeysBase58[ii-1] + senderPrivKeyBase58 = receiverPrivateKeysBase58[ii-1] + senderBalanceNanos = receiverBalancesNanos[ii-1] + } + + // Generate a max atomic transfer. + maxTransferTxn, receiverBalanceNanos, err := _generateMaxBasicTransfer( + testMeta, senderPubKeyBase58, senderPrivKeyBase58, senderBalanceNanos, receiverPublicKeysBase58[ii]) + require.NoError(testMeta.t, err) + atomicTransactions = append(atomicTransactions, maxTransferTxn) + + // Store the receiver balance as blockchain state is not updated yet. + receiverBalancesNanos = append(receiverBalancesNanos, receiverBalanceNanos) + } + + // Perform a max transfer back to m0. + maxTransferTxn, _, err := _generateMaxBasicTransfer( + testMeta, + receiverPublicKeysBase58[len(receiverPublicKeysBase58)-1], + receiverPrivateKeysBase58[len(receiverPrivateKeysBase58)-1], + receiverBalancesNanos[len(receiverBalancesNanos)-1], + m0Pub) + require.NoError(testMeta.t, err) + atomicTransactions = append(atomicTransactions, maxTransferTxn) + + return atomicTransactions +} + +func _generateMaxBasicTransfer( + testMeta *TestMeta, + senderPubKeyBase58 string, + senderPrivKeyBase58 string, + senderBalanceNanos uint64, + receiverPubKeyBase58 string, +) ( + _maxTransferTransaction *MsgDeSoTxn, + _transferredAmount uint64, + _err error, +) { + // Convert the public keys to bytes. + senderPubKeyBytes, _, err := Base58CheckDecode(senderPubKeyBase58) + if err != nil { + return nil, 0, err + } + receiverPubKeyBytes, _, err := Base58CheckDecode(receiverPubKeyBase58) + if err != nil { + return nil, 0, err + } + + // Construct a UtxoView for fetching state. + utxoView, err := NewUtxoView( + testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(testMeta.t, err) + + // Construct a transfer template. + // NOTE: Because of variable encoding of txn.TxOutputs[0].AmountNanos, we use MaxUint64 to ensure + // the computed transaction fees are sufficient. + txnNonce, err := utxoView.ConstructNonceForPublicKey(senderPubKeyBytes, uint64(testMeta.chain.BlockTip().Height)) + require.NoError(testMeta.t, err) + txn := &MsgDeSoTxn{ + TxnVersion: 1, + TxInputs: []*DeSoInput{}, + TxOutputs: []*DeSoOutput{ + { + PublicKey: receiverPubKeyBytes, + AmountNanos: math.MaxUint64, + }, + }, + TxnMeta: &BasicTransferMetadata{}, + TxnNonce: txnNonce, + PublicKey: senderPubKeyBytes, + } + + // Compute the fees and update the template to reflect the accurate transfer amount. + txn.TxnFeeNanos = EstimateMaxTxnFeeV1(txn, testMeta.feeRateNanosPerKb) + if txn.TxnFeeNanos > senderBalanceNanos { + return nil, 0, + errors.New("_generateMaxBasicTransfer: transaction fees more than sender balance.") + } + txn.TxOutputs[0].AmountNanos = senderBalanceNanos - txn.TxnFeeNanos + + // Sign and return the transaction. + _signTxn(testMeta.t, txn, senderPrivKeyBase58) + return txn, txn.TxOutputs[0].AmountNanos, nil +} + +// _setUpUsersForAtomicTransactionsTesting is a simple helper function which takes +// with a miner who has a DESO balance equivalent to 10 block rewards assumed +// to be assigned the public key senderPkString. After running _setUpUsersForAtomicTransactionsTesting +// we expect the following test state: +// +// m0Pub - 1e9 nDESO, m0 profile +// m1Pub - 1e6 nDESO, m1 profile +// m2Pub - 1e6 nDESO +// m3Pub - 1e6 nDESO +// m4Pub - 1e6 nDESO +func _setUpUsersForAtomicTransactionsTesting(testMeta *TestMeta) { + // Create on-chain public keys with DESO sent from miner + _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e9) + _registerOrTransferWithTestMeta(testMeta, "m1", senderPkString, m1Pub, senderPrivString, 1e6) + _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e6) + _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e6) + _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e6) + + // Create profile for m0 and m1. + { + _updateProfileWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m0Pub, + m0Priv, + []byte{}, + "m0", + "i am the m0", + shortPic, + 10*100, + 1.25*100*100, + false, + ) + } + { + _updateProfileWithTestMeta( + testMeta, + testMeta.feeRateNanosPerKb, + m1Pub, + m1Priv, + []byte{}, + "m1", + "i am the m1", + shortPic, + 10*100, + 1.25*100*100, + false, + ) + } +} + +func _setUpMinerAndTestMetaForAtomicTransactionTests(t *testing.T) *TestMeta { + // Initialize balance model fork heights. + setBalanceModelBlockHeights(t) + + // Initialize pos fork heights. + setPoSBlockHeights(t, 11, 100) + + // Initialize test chain and miner. + chain, params, db := NewLowDifficultyBlockchain(t) + mempool, miner := NewTestMiner(t, chain, params, true) + + // Ensure DAO coins are enabled (helpful for atomic testing) + params.ForkHeights.DAOCoinBlockHeight = uint32(0) + + // Initialize atomics block height. + params.ForkHeights.ProofOfStake1StateSetupBlockHeight = uint32(11) + GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) + GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) + + // Mine a few blocks to give the senderPkString some money. + for ii := 0; ii < 10; ii++ { + _, err := miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(t, err) + } + + // We build the testMeta obj after mining blocks so that we save the correct block height. + blockHeight := uint64(chain.blockTip().Height) + 1 + + return &TestMeta{ + t: t, + chain: chain, + params: params, + db: db, + mempool: mempool, + miner: miner, + savedHeight: uint32(blockHeight), + feeRateNanosPerKb: uint64(101), + } +} + +//---------------------------------------------------------- +// (Testing) Atomic Transaction Connection Helper Functions +//---------------------------------------------------------- + +func _atomicTransactionsWithTestMeta( + testMeta *TestMeta, + atomicTransactions []*MsgDeSoTxn, + connectTimestamp int64, +) { + // For atomic transaction sanity check reasons, save the ZeroPublicKey's balance. + testMeta.expectedSenderBalances = + append(testMeta.expectedSenderBalances, + _getBalance(testMeta.t, testMeta.chain, nil, + Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params))) + + // Connect the transactions. + currentOps, currentTxn, _, err := _atomicTransactionsWithConnectTimestamp( + testMeta.t, + testMeta.chain, + testMeta.db, + testMeta.params, + atomicTransactions, + connectTimestamp) + require.NoError(testMeta.t, err) + + // Append the transaction as well as the transaction ops. + testMeta.txnOps = append(testMeta.txnOps, currentOps) + testMeta.txns = append(testMeta.txns, currentTxn) +} + +func _atomicTransactionsWithConnectTimestamp( + t *testing.T, + chain *Blockchain, + db *badger.DB, + params *DeSoParams, + atomicTransactions []*MsgDeSoTxn, + connectTimestamp int64, +) ( + _utxoOps []*UtxoOperation, + _txn *MsgDeSoTxn, + _height uint32, + _err error, +) { + assert := assert.New(t) + require := require.New(t) + _ = assert + _ = require + + // Construct a new view to connect the transactions to. + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) + require.NoError(err) + + // Create the atomic transaction wrapper. + txn, totalFees, err := chain.CreateAtomicTxnsWrapper(atomicTransactions, nil) + if err != nil { + return nil, nil, 0, err + } + + // Connect the transaction. + txHash := txn.Hash() + blockHeight := chain.BlockTip().Height + 1 + utxoOps, totalInput, _, fees, err := utxoView.ConnectTransaction( + txn, txHash, blockHeight, connectTimestamp, true, false) + if err != nil { + return nil, nil, 0, err + } + + // Check that the total input reflected from the transaction connect equals the total fees. + require.Equal(totalInput, totalFees) + require.Equal(totalInput, fees) + + // Check that the UtxoOps reflect those of an atomic transaction. + require.Equal(1, len(utxoOps)) + require.Equal(OperationTypeAtomicTxnsWrapper, utxoOps[0].Type) + + // Ensure the transaction can be flushed without issue. + require.NoError(utxoView.FlushToDb(uint64(blockHeight))) + return utxoOps, txn, blockHeight, nil +} From 31944cd0429a312058bc6ca1ac858f0e5c64fd21 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Mon, 11 Mar 2024 14:53:20 -0700 Subject: [PATCH 538/762] Verify wrapper and verify atomic chain tests. --- lib/block_view_atomic_txns_test.go | 206 ++++++++++++++++++++++++++++- lib/blockchain.go | 3 + lib/errors.go | 1 - 3 files changed, 207 insertions(+), 3 deletions(-) diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 8489de21f..6a8b996ad 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -10,6 +10,165 @@ import ( "testing" ) +func TestVerifyAtomicTxnsWrapperRuleErrors(t *testing.T) { + // Initialize test chain, miner, and testMeta. + testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) + + // Initialize m0, m1, m2, m3, m4. + _setUpUsersForAtomicTransactionsTesting(testMeta) + + // Generate 100 dependent atomic transactions. + atomicTxns := _generateDependentAtomicTransactions(testMeta, 100) + + // Bundle the transactions together in a (valid) wrapper. + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + require.NoError(t, err) + + // Try to use a public key other than the zero public key in the wrapper. + // (This should fail -- RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero) + atomicTxnsWrapperDuplicate, err := atomicTxnsWrapper.Copy() + require.NoError(t, err) + atomicTxnsWrapperDuplicate.PublicKey = m0PkBytes + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsWrapperPublicKeyMustBeZero) + + // Try to sign the wrapper. + // (This should fail -- RuleErrorAtomicTxnsWrapperSignatureMustBeNil) + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() + require.NoError(t, err) + _signTxn(t, atomicTxnsWrapperDuplicate, m0Priv) + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsWrapperSignatureMustBeNil) + + // Try to add inputs to the wrapper. + // (This should fail -- RuleErrorAtomicTxnsWrapperMustHaveZeroInputs) + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() + require.NoError(t, err) + atomicTxnsWrapperDuplicate.TxInputs = append(atomicTxnsWrapperDuplicate.TxInputs, &DeSoInput{ + TxID: ZeroBlockHash, + Index: 0, + }) + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsWrapperMustHaveZeroInputs) + + // Try to add outputs to the wrapper. + // (This should fail -- RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs) + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() + require.NoError(t, err) + atomicTxnsWrapperDuplicate.TxOutputs = append(atomicTxnsWrapperDuplicate.TxOutputs, &DeSoOutput{ + PublicKey: m0PkBytes, + AmountNanos: 10000, + }) + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs) + + // Try to trigger overflow when summing the inner transactions. + // (This should fail -- RuleErrorAtomicTxnsWrapperHasInternalFeeOverflow) + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() + require.NoError(t, err) + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[0].TxnFeeNanos = math.MaxUint64 + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsWrapperHasInternalFeeOverflow) + + // Try to mismatch the fees in the wrapper and the total fees of the atomic transactions. + // (This should fail -- RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns) + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() + require.NoError(t, err) + atomicTxnsWrapperDuplicate.TxnFeeNanos = atomicTxnsWrapperDuplicate.TxnFeeNanos - 1 + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns) + + // Try to use a non-zeroed nonce for the wrapper. + // (This should fail -- RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce) + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() + require.NoError(t, err) + atomicTxnsWrapperDuplicate.TxnNonce.ExpirationBlockHeight = 1 + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce) +} + +func TestVerifyAtomicTxnsChain(t *testing.T) { + // Initialize test chain, miner, and testMeta. + testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) + + // Initialize m0, m1, m2, m3, m4. + _setUpUsersForAtomicTransactionsTesting(testMeta) + + // Generate 100 dependent atomic transactions. + atomicTxns := _generateDependentAtomicTransactions(testMeta, 100) + + // Bundle the transactions together in a (valid) wrapper. + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + require.NoError(t, err) + + // Try to put an atomic transaction wrapper INSIDE an atomic transaction wrapper. + // (This should fail -- RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn) + atomicTxnsWrapperDuplicate, err := atomicTxnsWrapper.Copy() + require.NoError(t, err) + innerAtomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns[:100], nil) + require.NoError(t, err) + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns = + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[100:] + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns = + append([]*MsgDeSoTxn{innerAtomicTxnsWrapper}, + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns...) + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn) + + // Try to have a transaction not meant for inclusion in an atomic transaction wrapper + // in the atomic transaction wrapper. + // (This should fail -- RuleErrorAtomicTxnsHasNonAtomicInnerTxn) + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() + require.NoError(t, err) + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[1].ExtraData = make(map[string][]byte) + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsHasNonAtomicInnerTxn) + + // Remove the chain length starter pointer for the atomic transactions. + // (This should fail -- RuleErrorAtomicTxnsMustStartWithChainLength) + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() + require.NoError(t, err) + delete( + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[0].ExtraData, + AtomicTxnsChainLength) + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsMustStartWithChainLength) + + // Add a second start point for the atomic transactions. + // (This should fail -- RuleErrorAtomicTxnsHasMoreThanOneStartPoint) + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() + require.NoError(t, err) + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[1].ExtraData[AtomicTxnsChainLength] = + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[0].ExtraData[AtomicTxnsChainLength] + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsHasMoreThanOneStartPoint) + + // Try to change the sequence of transactions, ultimately breaking the chain. + // (This should fail -- RuleErrorAtomicTxnsHasBrokenChain) + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() + require.NoError(t, err) + txnCopy, err := atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[2].Copy() + require.NoError(t, err) + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[2] = + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[1] + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[1] = txnCopy + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsHasBrokenChain) + +} + func TestDependentAtomicTransactionGeneration(t *testing.T) { // Initialize test chain, miner, and testMeta. testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) @@ -17,7 +176,7 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { // Initialize m0, m1, m2, m3, m4. _setUpUsersForAtomicTransactionsTesting(testMeta) - // Generate 1,000 dependent atomic transactions. + // Generate 100 dependent atomic transactions. atomicTxns := _generateDependentAtomicTransactions(testMeta, 100) // Construct a new view to connect the transactions to. @@ -26,18 +185,29 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { require.NoError(t, err) blockHeight := testMeta.chain.BlockTip().Height + 1 + // Get the initial balance for m0. + m0InitialBalanceNanos := _getBalance(t, testMeta.chain, testMeta.mempool, m0Pub) + // Connect the transactions to ensure they can actually be connected. + var totalFees uint64 for _, txn := range atomicTxns { // Connect the transaction. txHash := txn.Hash() - _, _, _, _, err := utxoView.ConnectTransaction( + _, _, _, fees, err := utxoView.ConnectTransaction( txn, txHash, blockHeight, 0, true, false) require.NoError(t, err) + totalFees += fees } // Flush the view to ensure everything is working properly. require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) + // Get the final balance for m0. + m0FinalBalanceNanos := _getBalance(t, testMeta.chain, testMeta.mempool, m0Pub) + + // Check that fees were paid. + require.Equal(t, m0InitialBalanceNanos-totalFees, m0FinalBalanceNanos) + // // Now we test that the transactions are truly dependent on each-other by reorganizing them. // @@ -386,3 +556,35 @@ func _atomicTransactionsWithConnectTimestamp( require.NoError(utxoView.FlushToDb(uint64(blockHeight))) return utxoOps, txn, blockHeight, nil } + +func _atomicTransactionsWrapperWithConnectTimestamp( + t *testing.T, + chain *Blockchain, + db *badger.DB, + params *DeSoParams, + atomicTransactionsWrapper *MsgDeSoTxn, + connectTimestamp int64, +) ( + _utxoOps []*UtxoOperation, + _err error, +) { + assert := assert.New(t) + require := require.New(t) + _ = assert + _ = require + + // Construct a new view to connect the transactions to. + utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) + require.NoError(err) + + // Connect the transaction. + txHash := atomicTransactionsWrapper.Hash() + blockHeight := chain.BlockTip().Height + 1 + utxoOps, _, _, _, err := utxoView.ConnectTransaction( + atomicTransactionsWrapper, txHash, blockHeight, connectTimestamp, true, false) + if err != nil { + return nil, err + } + + return utxoOps, nil +} diff --git a/lib/blockchain.go b/lib/blockchain.go index 96bdd4a45..e380dd813 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5840,6 +5840,9 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( } // Set the transaction extra data and append to the chained list. + if len(txnDuplicate.ExtraData) == 0 { + txnDuplicate.ExtraData = make(map[string][]byte) + } txnDuplicate.ExtraData[NextAtomicTxnPreHash] = nextHash.ToBytes() txnDuplicate.ExtraData[PreviousAtomicTxnPreHash] = prevHash.ToBytes() chainedUnsignedTransactions = append(chainedUnsignedTransactions, txnDuplicate) diff --git a/lib/errors.go b/lib/errors.go index 75a6eb36f..277972d14 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -545,7 +545,6 @@ const ( RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroOutputs" RuleErrorAtomicTxnsWrapperHasInternalFeeOverflow RuleError = "RuleErrorAtomicTxnsWrapperHasInternalFeeOverflow" RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns RuleError = "RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns" - RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroExtraData" RuleErrorAtomicTxnsWrapperSignatureMustBeNil RuleError = "RuleErrorAtomicTxnsWrapperSignatureMustBeNil" RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce" RuleErrorAtomicTxnsMustStartWithChainLength RuleError = "RuleErrorAtomicTxnsMustStartWithChainLength" From 02d8120f66fee6fedb45f3b0da7508988e830fcb Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 13 Mar 2024 17:15:36 -0700 Subject: [PATCH 539/762] Add atomic transactions fee testing, simplify doubly linked list indexing. --- lib/block_view_atomic_txns.go | 60 ++++++++++++---------------- lib/block_view_atomic_txns_test.go | 63 +++++++++++++++++++++++++++++- lib/blockchain.go | 27 ++----------- lib/errors.go | 1 + 4 files changed, 91 insertions(+), 60 deletions(-) diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index fe0b7df79..8bbc1ef59 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -215,6 +215,21 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( fmt.Errorf("_connectAtomicTxnsWrapper: TxnMeta type: %v", txn.TxnMeta.GetTxnType().GetTxnString()) } + // Validate that the internal transactions cumulatively pay enough in fees to + // cover the atomic transactions AS WELL AS the wrapper. We validate this + // here to ensure we can test for these edge cases as they're also logically caught + // by _verifyAtomicTxnsWrapper. + if txnSizeBytes != 0 && bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB != 0 { + // Make sure there isn't overflow in the fee. + if txn.TxnFeeNanos != ((txn.TxnFeeNanos * 1000) / 1000) { + return nil, 0, 0, 0, RuleErrorOverflowDetectedInFeeRateCalculation + } + // If the fee is less than the minimum network fee per KB, return an error. + if (txn.TxnFeeNanos*1000)/uint64(txnSizeBytes) < bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB { + return nil, 0, 0, 0, RuleErrorTxnFeeBelowNetworkMinimum + } + } + // Verify the wrapper of the transaction. This does not verify the txn.TxnMeta contents, just that // the wrapper is well formatted. if err := _verifyAtomicTxnsWrapper(txn); err != nil { @@ -265,19 +280,6 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( } } - // Validate that the internal transactions cumulatively pay enough in fees to - // cover the atomic transactions AS WELL AS the wrapper. - if txnSizeBytes != 0 && bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB != 0 { - // Make sure there isn't overflow in the fee. - if txn.TxnFeeNanos != ((txn.TxnFeeNanos * 1000) / 1000) { - return nil, 0, 0, 0, RuleErrorOverflowDetectedInFeeRateCalculation - } - // If the fee is less than the minimum network fee per KB, return an error. - if (txn.TxnFeeNanos*1000)/uint64(txnSizeBytes) < bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB { - return nil, 0, 0, 0, RuleErrorTxnFeeBelowNetworkMinimum - } - } - // Construct a UtxoOp for the atomic transactions wrapper. utxoOpsForTxn = append(utxoOpsForTxn, &UtxoOperation{ Type: OperationTypeAtomicTxnsWrapper, @@ -350,6 +352,11 @@ func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { } func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { + // Check that there's any transactions at all. + if len(txnMeta.Txns) == 0 { + return RuleErrorAtomicTxnsHasNoTransactions + } + // Validate: // (1) The inner transactions are not additional redundant atomic transactions wrappers. // (2) The inner transactions are meant to be included in an atomic transaction. @@ -386,40 +393,21 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { atomicHashes = append(atomicHashes, innerTxnAtomicHash) } - // Construct special helper functions for circular doubly linked list indexing. - nextIndex := func(currentIndex int, chainLength int) int { - // Check for the special case of an atomic chain of length 1. - if chainLength == 1 { - return currentIndex - } - return (currentIndex + 1) % chainLength - } - prevIndex := func(currentIndex int, chainLength int) int { - // Check for the special case of an atomic chain of length 1. - if chainLength == 1 { - return currentIndex - } - - // Check for the wrap around case. - if currentIndex == 0 { - return chainLength - 1 - } - return currentIndex - 1 - } - // Validate the chain sequence specified. for ii, innerTxn := range txnMeta.Txns { // Check the next transaction. + nextIndex := (ii + 1) % len(txnMeta.Txns) if !bytes.Equal( innerTxn.ExtraData[NextAtomicTxnPreHash], - atomicHashes[nextIndex(ii, len(txnMeta.Txns))].ToBytes()) { + atomicHashes[nextIndex].ToBytes()) { return RuleErrorAtomicTxnsHasBrokenChain } // Check the previous transaction + prevIndex := (ii - 1 + len(txnMeta.Txns)) % len(txnMeta.Txns) if !bytes.Equal( innerTxn.ExtraData[PreviousAtomicTxnPreHash], - atomicHashes[prevIndex(ii, len(txnMeta.Txns))].ToBytes()) { + atomicHashes[prevIndex].ToBytes()) { return RuleErrorAtomicTxnsHasBrokenChain } } diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 6a8b996ad..01be8bac6 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -10,6 +10,56 @@ import ( "testing" ) +func TestConnectAtomicTxnsWrapperRuleErrors(t *testing.T) { + // Initialize test chain, miner, and testMeta. + testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) + + // Initialize m0, m1, m2, m3, m4. + _setUpUsersForAtomicTransactionsTesting(testMeta) + + // Try and generate an atomic transaction wrapper larger than half the block size. + // For simplicity, we estimate a basic transfer at greater than 100 bytes. + // (This should fail -- RuleErrorTxnTooBig) + numTxnsToGenerate := testMeta.params.MaxBlockSizeBytes / 200 + atomicTxns := _generateDependentAtomicTransactions(testMeta, int(numTxnsToGenerate)) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + require.NoError(t, err) + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapper, 0) + require.Contains(t, err.Error(), RuleErrorTxnTooBig) + + // Update the network fees to test for fee rule error testing. + testMeta.params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true + _updateGlobalParamsEntryWithTestMeta( + testMeta, + 200, + paramUpdaterPub, + paramUpdaterPriv, + 100, + 100, + 100, + 100, + 100, + ) + + // Try and cause overflow in the atomic transactions wrapper fee verification. + // (This should fail -- RuleErrorOverflowDetectedInFeeRateCalculation) + atomicTxns = _generateDependentAtomicTransactions(testMeta, int(100)) + atomicTxnsWrapper, _, err = testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + require.NoError(t, err) + atomicTxnsWrapper.TxnFeeNanos = math.MaxUint64 + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapper, 0) + require.Contains(t, err.Error(), RuleErrorOverflowDetectedInFeeRateCalculation) + + // Have a fee below the network minimum. + // (This should fail -- RuleErrorTxnFeeBelowNetworkMinimum) + atomicTxnsWrapper.TxnFeeNanos = 0 + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapper, 0) + require.Contains(t, err.Error(), RuleErrorTxnFeeBelowNetworkMinimum) +} + func TestVerifyAtomicTxnsWrapperRuleErrors(t *testing.T) { // Initialize test chain, miner, and testMeta. testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) @@ -108,9 +158,19 @@ func TestVerifyAtomicTxnsChain(t *testing.T) { atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) require.NoError(t, err) + // Try to remove all the inner transactions. + // (This should fail -- RuleErrorAtomicTxnsHasNoTransactions) + atomicTxnsWrapperDuplicate, err := atomicTxnsWrapper.Copy() + require.NoError(t, err) + atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns = []*MsgDeSoTxn{} + atomicTxnsWrapperDuplicate.TxnFeeNanos = 0 + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapperDuplicate, 0) + require.Contains(t, err.Error(), RuleErrorAtomicTxnsHasNoTransactions) + // Try to put an atomic transaction wrapper INSIDE an atomic transaction wrapper. // (This should fail -- RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn) - atomicTxnsWrapperDuplicate, err := atomicTxnsWrapper.Copy() + atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() require.NoError(t, err) innerAtomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns[:100], nil) require.NoError(t, err) @@ -403,6 +463,7 @@ func _setUpUsersForAtomicTransactionsTesting(testMeta *TestMeta) { _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e6) _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e6) _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e6) + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 10000) // Create profile for m0 and m1. { diff --git a/lib/blockchain.go b/lib/blockchain.go index e380dd813..a39922f82 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5798,27 +5798,6 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( // transaction extra data. We create a copy of the transactions to ensure we do not // modify the caller's data. - // Construct special helper functions for circular doubly linked list indexing. - nextIndex := func(currentIndex int, chainLength int) int { - // Check for the special case of an atomic chain of length 1. - if chainLength == 1 { - return currentIndex - } - return (currentIndex + 1) % chainLength - } - prevIndex := func(currentIndex int, chainLength int) int { - // Check for the special case of an atomic chain of length 1. - if chainLength == 1 { - return currentIndex - } - - // Check for the wrap around case. - if currentIndex == 0 { - return chainLength - 1 - } - return currentIndex - 1 - } - // Construct the chained transactions and keep track of the total fees paid. var chainedUnsignedTransactions []*MsgDeSoTxn var totalFees uint64 @@ -5830,11 +5809,13 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( } // Compute the atomic hashes. - nextHash, err := unsignedTransactions[nextIndex(ii, len(unsignedTransactions))].AtomicHash() + nextIndex := (ii + 1) % len(unsignedTransactions) + nextHash, err := unsignedTransactions[nextIndex].AtomicHash() if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute next hash") } - prevHash, err := unsignedTransactions[prevIndex(ii, len(unsignedTransactions))].AtomicHash() + prevIndex := (ii - 1 + len(unsignedTransactions)) % len(unsignedTransactions) + prevHash, err := unsignedTransactions[prevIndex].AtomicHash() if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to copy prev hash") } diff --git a/lib/errors.go b/lib/errors.go index 277972d14..bccb42488 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -547,6 +547,7 @@ const ( RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns RuleError = "RuleErrorAtomicTxnsWrapperMustHaveEqualFeeToInternalTxns" RuleErrorAtomicTxnsWrapperSignatureMustBeNil RuleError = "RuleErrorAtomicTxnsWrapperSignatureMustBeNil" RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce RuleError = "RuleErrorAtomicTxnsWrapperMustHaveZeroedNonce" + RuleErrorAtomicTxnsHasNoTransactions RuleError = "RuleErrorAtomicTxnsHasNoTransactions" RuleErrorAtomicTxnsMustStartWithChainLength RuleError = "RuleErrorAtomicTxnsMustStartWithChainLength" RuleErrorAtomicTxnsHasMoreThanOneStartPoint RuleError = "RuleErrorAtomicTxnsHasMoreThanOneStartPoint" RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn RuleError = "RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn" From f05e34791ceca8f188ab6c9565cff51b01deb178 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 13 Mar 2024 17:46:50 -0700 Subject: [PATCH 540/762] Fix CreateAtomicTxnsWrapper and add signature failure for inner transactions test. --- lib/block_view_atomic_txns_test.go | 19 ++++++++++++++ lib/blockchain.go | 40 +++++++++++++++++------------- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 01be8bac6..807728923 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -10,6 +10,25 @@ import ( "testing" ) +func TestAtomicTxnsSignatureFailure(t *testing.T) { + // Initialize test chain, miner, and testMeta. + testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) + + // Initialize m0, m1, m2, m3, m4. + _setUpUsersForAtomicTransactionsTesting(testMeta) + + // Create a series of valid (unsigned) dependent transactions. + atomicTxns := _generateDependentAtomicTransactions(testMeta, int(100)) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + require.NoError(t, err) + + // Try to connect them atomically. + // (This should fail -- RuleErrorInvalidTransactionSignature) + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapper, 0) + require.Contains(t, err.Error(), RuleErrorInvalidTransactionSignature) +} + func TestConnectAtomicTxnsWrapperRuleErrors(t *testing.T) { // Initialize test chain, miner, and testMeta. testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) diff --git a/lib/blockchain.go b/lib/blockchain.go index a39922f82..46230bb6b 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5798,46 +5798,52 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( // transaction extra data. We create a copy of the transactions to ensure we do not // modify the caller's data. - // Construct the chained transactions and keep track of the total fees paid. + // Create a copy of the transactions to prevent pointer reuse. var chainedUnsignedTransactions []*MsgDeSoTxn - var totalFees uint64 - for ii, txn := range unsignedTransactions { - // Copy the transaction to prevent pointer reuse. + for _, txn := range unsignedTransactions { txnDuplicate, err := txn.Copy() if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to copy transaction") } + chainedUnsignedTransactions = append(chainedUnsignedTransactions, txnDuplicate) + } + + // Set the starting point of the atomic transaction. + // We must do this first to ensure the atomic hash is properly computed for the first transaction. + if len(chainedUnsignedTransactions[0].ExtraData) == 0 { + chainedUnsignedTransactions[0].ExtraData = make(map[string][]byte) + } + chainedUnsignedTransactions[0].ExtraData[AtomicTxnsChainLength] = UintToBuf(uint64(len(unsignedTransactions))) + // Construct the chained transactions and keep track of the total fees paid. + var totalFees uint64 + for ii, txn := range chainedUnsignedTransactions { // Compute the atomic hashes. - nextIndex := (ii + 1) % len(unsignedTransactions) - nextHash, err := unsignedTransactions[nextIndex].AtomicHash() + nextIndex := (ii + 1) % len(chainedUnsignedTransactions) + nextHash, err := chainedUnsignedTransactions[nextIndex].AtomicHash() if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute next hash") } - prevIndex := (ii - 1 + len(unsignedTransactions)) % len(unsignedTransactions) - prevHash, err := unsignedTransactions[prevIndex].AtomicHash() + prevIndex := (ii - 1 + len(chainedUnsignedTransactions)) % len(chainedUnsignedTransactions) + prevHash, err := chainedUnsignedTransactions[prevIndex].AtomicHash() if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to copy prev hash") } // Set the transaction extra data and append to the chained list. - if len(txnDuplicate.ExtraData) == 0 { - txnDuplicate.ExtraData = make(map[string][]byte) + if len(txn.ExtraData) == 0 { + txn.ExtraData = make(map[string][]byte) } - txnDuplicate.ExtraData[NextAtomicTxnPreHash] = nextHash.ToBytes() - txnDuplicate.ExtraData[PreviousAtomicTxnPreHash] = prevHash.ToBytes() - chainedUnsignedTransactions = append(chainedUnsignedTransactions, txnDuplicate) + txn.ExtraData[NextAtomicTxnPreHash] = nextHash.ToBytes() + txn.ExtraData[PreviousAtomicTxnPreHash] = prevHash.ToBytes() // Track the total fees paid. - totalFees, err = SafeUint64().Add(totalFees, txnDuplicate.TxnFeeNanos) + totalFees, err = SafeUint64().Add(totalFees, txn.TxnFeeNanos) if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: total fee overflow") } } - // Set the starting point for the atomic transactions. - chainedUnsignedTransactions[0].ExtraData[AtomicTxnsChainLength] = UintToBuf(uint64(len(unsignedTransactions))) - // Create an atomic transactions wrapper taking special care to the rules specified in _verifyAtomicTxnsWrapper. // Because we do not call AddInputsAndChangeToTransaction on the wrapper, we must specify ALL fields exactly. txn := &MsgDeSoTxn{ From cbdfedd67cba322de089d456a1c7b3c6c35cdbca Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 13 Mar 2024 20:19:13 -0700 Subject: [PATCH 541/762] Atomicity test for atomic transactions. --- lib/block_view_atomic_txns_test.go | 229 ++++++++++++++++------------- 1 file changed, 130 insertions(+), 99 deletions(-) diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 807728923..4e6383447 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -1,6 +1,7 @@ package lib import ( + "bytes" "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v3" "github.com/pkg/errors" @@ -10,6 +11,87 @@ import ( "testing" ) +func TestAtomicTxnsWrapperAtomicity(t *testing.T) { + // Initialize test chain, miner, and testMeta. + testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) + + // Initialize m0, m1, m2, m3, m4. + _setUpUsersForAtomicTransactionsTesting(testMeta) + + // Create a series of valid (unsigned) dependent transactions. + atomicTxns, signerPrivKeysBase58 := _generateUnsignedDependentAtomicTransactions(testMeta, int(100)) + + // Fetch all starting balances for affected public keys. + fetchTransactorBalances := func(transactions []*MsgDeSoTxn) []uint64 { + var balancesNanos []uint64 + for _, txn := range transactions { + balancesNanos = append(balancesNanos, + _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, + Base58CheckEncode(txn.PublicKey, false, testMeta.params))) + } + return balancesNanos + } + startingBalances := fetchTransactorBalances(atomicTxns) + + // Wrap the transactions in an atomic wrapper. + // NOTE: This must be done before signing to ensure the extra data is present. + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + require.NoError(t, err) + + // Sign all but the final transaction, sign the last one incorrectly. + for ii := range atomicTxns { + // Sign the transaction. + if ii != len(atomicTxns)-1 { + _signTxn(t, + atomicTxnsWrapper.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[ii], + signerPrivKeysBase58[ii], + ) + } else { + _signTxn(t, + atomicTxnsWrapper.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[ii], + signerPrivKeysBase58[(ii+1)%len(atomicTxns)], + ) + } + } + + // Try to connect the atomic transaction wrapper. + // This should fail on the final transaction as it's incorrectly signed. + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapper, 0) + require.Contains(t, err.Error(), RuleErrorInvalidTransactionSignature) + + // Check that the balances are not updated. + // This ensures that if a single transaction within the atomic transaction + // fails, the entire transaction as a whole fails. + failingBalances := fetchTransactorBalances(atomicTxns) + for ii, failingBalance := range failingBalances { + require.Equal(t, startingBalances[ii], failingBalance) + } + + // Sign the final transaction and make a valid atomic transaction. + _signTxn(t, + atomicTxnsWrapper.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[len(atomicTxns)-1], + signerPrivKeysBase58[len(atomicTxns)-1], + ) + + // Try to connect the atomic transaction, this should now succeed. + _, err = _atomicTransactionsWrapperWithConnectTimestamp( + t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapper, 0) + require.NoError(t, err) + + // Validate that only M0's balance has changed by the total fees paid. + // This is the expected output from the artificially generated dependent transactions + // and shows the transactions were connected properly. + endingBalances := fetchTransactorBalances(atomicTxns) + for ii, endingBalance := range endingBalances { + if bytes.Equal(atomicTxns[ii].PublicKey, m0PkBytes) { + require.Equal(t, startingBalances[ii]-atomicTxnsWrapper.TxnFeeNanos, endingBalance) + } else { + require.Equal(t, uint64(0), endingBalance) + } + } +} + func TestAtomicTxnsSignatureFailure(t *testing.T) { // Initialize test chain, miner, and testMeta. testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) @@ -18,7 +100,13 @@ func TestAtomicTxnsSignatureFailure(t *testing.T) { _setUpUsersForAtomicTransactionsTesting(testMeta) // Create a series of valid (unsigned) dependent transactions. - atomicTxns := _generateDependentAtomicTransactions(testMeta, int(100)) + atomicTxns, signerPrivKeysBase58 := _generateUnsignedDependentAtomicTransactions(testMeta, int(100)) + + // Sign them incorrectly then make them atomic. + for ii, txn := range atomicTxns { + nextIndex := (ii + 1) % len(atomicTxns) + _signTxn(t, txn, signerPrivKeysBase58[nextIndex]) + } atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) require.NoError(t, err) @@ -40,7 +128,7 @@ func TestConnectAtomicTxnsWrapperRuleErrors(t *testing.T) { // For simplicity, we estimate a basic transfer at greater than 100 bytes. // (This should fail -- RuleErrorTxnTooBig) numTxnsToGenerate := testMeta.params.MaxBlockSizeBytes / 200 - atomicTxns := _generateDependentAtomicTransactions(testMeta, int(numTxnsToGenerate)) + atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, int(numTxnsToGenerate)) atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) require.NoError(t, err) _, err = _atomicTransactionsWrapperWithConnectTimestamp( @@ -63,7 +151,7 @@ func TestConnectAtomicTxnsWrapperRuleErrors(t *testing.T) { // Try and cause overflow in the atomic transactions wrapper fee verification. // (This should fail -- RuleErrorOverflowDetectedInFeeRateCalculation) - atomicTxns = _generateDependentAtomicTransactions(testMeta, int(100)) + atomicTxns, _ = _generateSignedDependentAtomicTransactions(testMeta, int(100)) atomicTxnsWrapper, _, err = testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) require.NoError(t, err) atomicTxnsWrapper.TxnFeeNanos = math.MaxUint64 @@ -87,7 +175,7 @@ func TestVerifyAtomicTxnsWrapperRuleErrors(t *testing.T) { _setUpUsersForAtomicTransactionsTesting(testMeta) // Generate 100 dependent atomic transactions. - atomicTxns := _generateDependentAtomicTransactions(testMeta, 100) + atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, 100) // Bundle the transactions together in a (valid) wrapper. atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) @@ -171,7 +259,7 @@ func TestVerifyAtomicTxnsChain(t *testing.T) { _setUpUsersForAtomicTransactionsTesting(testMeta) // Generate 100 dependent atomic transactions. - atomicTxns := _generateDependentAtomicTransactions(testMeta, 100) + atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, 100) // Bundle the transactions together in a (valid) wrapper. atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) @@ -256,7 +344,7 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { _setUpUsersForAtomicTransactionsTesting(testMeta) // Generate 100 dependent atomic transactions. - atomicTxns := _generateDependentAtomicTransactions(testMeta, 100) + atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, 100) // Construct a new view to connect the transactions to. utxoView, err := NewUtxoView( @@ -324,7 +412,25 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { // (Testing) Atomic Transactions Setup Helper Functions //---------------------------------------------------------- -// The goal of _generateDependentAtomicTransactions is to generate +func _generateSignedDependentAtomicTransactions( + testMeta *TestMeta, + numberOfTransactions int, +) ( + _atomicTransactions []*MsgDeSoTxn, + _signerPrivKeysBase58 []string, +) { + // Generate unsigned transactions. + atomicTransactions, signerPrivKeysBase58 := _generateUnsignedDependentAtomicTransactions( + testMeta, numberOfTransactions) + + // Sign transactions and return. + for ii, txn := range atomicTransactions { + _signTxn(testMeta.t, txn, signerPrivKeysBase58[ii]) + } + return atomicTransactions, signerPrivKeysBase58 +} + +// The goal of _generateUnsignedDependentAtomicTransactions is to generate // a sequence of transactions who CANNOT be reordered meaning they // must be executed in the sequence returned. This mean transaction // with position ii in atomicTransactions CANNOT be placed in an @@ -344,11 +450,12 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { // in any other order. Hence, these transactions are dependent on each other. // // The length of the returned list of transactions is specified by numberOfTransactions. -func _generateDependentAtomicTransactions( +func _generateUnsignedDependentAtomicTransactions( testMeta *TestMeta, numberOfTransactions int, ) ( _atomicTransactions []*MsgDeSoTxn, + _signerPrivKeysBase58 []string, ) { var atomicTransactions []*MsgDeSoTxn var receiverPublicKeysBase58 []string @@ -372,21 +479,18 @@ func _generateDependentAtomicTransactions( // Determine the sender. var senderPubKeyBase58 string - var senderPrivKeyBase58 string var senderBalanceNanos uint64 if ii == 0 { senderPubKeyBase58 = m0Pub - senderPrivKeyBase58 = m0Priv senderBalanceNanos = m0InitialBalanceNanos } else { senderPubKeyBase58 = receiverPublicKeysBase58[ii-1] - senderPrivKeyBase58 = receiverPrivateKeysBase58[ii-1] senderBalanceNanos = receiverBalancesNanos[ii-1] } // Generate a max atomic transfer. - maxTransferTxn, receiverBalanceNanos, err := _generateMaxBasicTransfer( - testMeta, senderPubKeyBase58, senderPrivKeyBase58, senderBalanceNanos, receiverPublicKeysBase58[ii]) + maxTransferTxn, receiverBalanceNanos, err := _generateUnsignedMaxBasicTransfer( + testMeta, senderPubKeyBase58, senderBalanceNanos, receiverPublicKeysBase58[ii]) require.NoError(testMeta.t, err) atomicTransactions = append(atomicTransactions, maxTransferTxn) @@ -395,22 +499,17 @@ func _generateDependentAtomicTransactions( } // Perform a max transfer back to m0. - maxTransferTxn, _, err := _generateMaxBasicTransfer( - testMeta, - receiverPublicKeysBase58[len(receiverPublicKeysBase58)-1], - receiverPrivateKeysBase58[len(receiverPrivateKeysBase58)-1], - receiverBalancesNanos[len(receiverBalancesNanos)-1], - m0Pub) + maxTransferTxn, _, err := _generateUnsignedMaxBasicTransfer( + testMeta, receiverPublicKeysBase58[len(receiverPublicKeysBase58)-1], receiverBalancesNanos[len(receiverBalancesNanos)-1], m0Pub) require.NoError(testMeta.t, err) atomicTransactions = append(atomicTransactions, maxTransferTxn) - return atomicTransactions + return atomicTransactions, append([]string{m0Priv}, receiverPrivateKeysBase58...) } -func _generateMaxBasicTransfer( +func _generateUnsignedMaxBasicTransfer( testMeta *TestMeta, senderPubKeyBase58 string, - senderPrivKeyBase58 string, senderBalanceNanos uint64, receiverPubKeyBase58 string, ) ( @@ -456,12 +555,11 @@ func _generateMaxBasicTransfer( txn.TxnFeeNanos = EstimateMaxTxnFeeV1(txn, testMeta.feeRateNanosPerKb) if txn.TxnFeeNanos > senderBalanceNanos { return nil, 0, - errors.New("_generateMaxBasicTransfer: transaction fees more than sender balance.") + errors.New("_generateUnsignedMaxBasicTransfer: transaction fees more than sender balance.") } txn.TxOutputs[0].AmountNanos = senderBalanceNanos - txn.TxnFeeNanos - // Sign and return the transaction. - _signTxn(testMeta.t, txn, senderPrivKeyBase58) + // Return the transaction. return txn, txn.TxOutputs[0].AmountNanos, nil } @@ -561,43 +659,15 @@ func _setUpMinerAndTestMetaForAtomicTransactionTests(t *testing.T) *TestMeta { // (Testing) Atomic Transaction Connection Helper Functions //---------------------------------------------------------- -func _atomicTransactionsWithTestMeta( - testMeta *TestMeta, - atomicTransactions []*MsgDeSoTxn, - connectTimestamp int64, -) { - // For atomic transaction sanity check reasons, save the ZeroPublicKey's balance. - testMeta.expectedSenderBalances = - append(testMeta.expectedSenderBalances, - _getBalance(testMeta.t, testMeta.chain, nil, - Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params))) - - // Connect the transactions. - currentOps, currentTxn, _, err := _atomicTransactionsWithConnectTimestamp( - testMeta.t, - testMeta.chain, - testMeta.db, - testMeta.params, - atomicTransactions, - connectTimestamp) - require.NoError(testMeta.t, err) - - // Append the transaction as well as the transaction ops. - testMeta.txnOps = append(testMeta.txnOps, currentOps) - testMeta.txns = append(testMeta.txns, currentTxn) -} - -func _atomicTransactionsWithConnectTimestamp( +func _atomicTransactionsWrapperWithConnectTimestamp( t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoParams, - atomicTransactions []*MsgDeSoTxn, + atomicTransactionsWrapper *MsgDeSoTxn, connectTimestamp int64, ) ( _utxoOps []*UtxoOperation, - _txn *MsgDeSoTxn, - _height uint32, _err error, ) { assert := assert.New(t) @@ -609,24 +679,17 @@ func _atomicTransactionsWithConnectTimestamp( utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) require.NoError(err) - // Create the atomic transaction wrapper. - txn, totalFees, err := chain.CreateAtomicTxnsWrapper(atomicTransactions, nil) - if err != nil { - return nil, nil, 0, err - } - // Connect the transaction. - txHash := txn.Hash() + txHash := atomicTransactionsWrapper.Hash() blockHeight := chain.BlockTip().Height + 1 - utxoOps, totalInput, _, fees, err := utxoView.ConnectTransaction( - txn, txHash, blockHeight, connectTimestamp, true, false) + utxoOps, totalInput, totalOutput, totalFees, err := utxoView.ConnectTransaction( + atomicTransactionsWrapper, txHash, blockHeight, connectTimestamp, true, false) if err != nil { - return nil, nil, 0, err + return nil, err } // Check that the total input reflected from the transaction connect equals the total fees. - require.Equal(totalInput, totalFees) - require.Equal(totalInput, fees) + require.Equal(totalInput, totalOutput+totalFees) // Check that the UtxoOps reflect those of an atomic transaction. require.Equal(1, len(utxoOps)) @@ -634,37 +697,5 @@ func _atomicTransactionsWithConnectTimestamp( // Ensure the transaction can be flushed without issue. require.NoError(utxoView.FlushToDb(uint64(blockHeight))) - return utxoOps, txn, blockHeight, nil -} - -func _atomicTransactionsWrapperWithConnectTimestamp( - t *testing.T, - chain *Blockchain, - db *badger.DB, - params *DeSoParams, - atomicTransactionsWrapper *MsgDeSoTxn, - connectTimestamp int64, -) ( - _utxoOps []*UtxoOperation, - _err error, -) { - assert := assert.New(t) - require := require.New(t) - _ = assert - _ = require - - // Construct a new view to connect the transactions to. - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(err) - - // Connect the transaction. - txHash := atomicTransactionsWrapper.Hash() - blockHeight := chain.BlockTip().Height + 1 - utxoOps, _, _, _, err := utxoView.ConnectTransaction( - atomicTransactionsWrapper, txHash, blockHeight, connectTimestamp, true, false) - if err != nil { - return nil, err - } - return utxoOps, nil } From 751d3a2524dbfb4cdf2366bb3e3eabf87ea1577d Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 18 Mar 2024 18:08:24 -0700 Subject: [PATCH 542/762] PoS Mempool Simplified AddTransaction Validation (#1090) * Remove Spend and Signature verification * review --- lib/legacy_mempool.go | 2 +- lib/pos_block_producer_test.go | 2 +- lib/pos_mempool.go | 43 +++------------- lib/pos_mempool_ledger.go | 93 ---------------------------------- lib/pos_mempool_ledger_test.go | 48 ------------------ lib/pos_mempool_test.go | 25 +-------- lib/server.go | 4 +- 7 files changed, 13 insertions(+), 204 deletions(-) delete mode 100644 lib/pos_mempool_ledger.go delete mode 100644 lib/pos_mempool_ledger_test.go diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 6a3756d37..08fff9fc3 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -241,7 +241,7 @@ func (mp *DeSoMempool) IsRunning() bool { return !mp.stopped } -func (mp *DeSoMempool) AddTransaction(txn *MempoolTransaction, verifySignature bool) error { +func (mp *DeSoMempool) AddTransaction(txn *MempoolTransaction) error { return errors.New("Not implemented") } diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index a5f90c519..161d069a3 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -272,7 +272,7 @@ func TestGetBlockTransactions(t *testing.T) { for ii, txn := range txns { // Use the Simulated Transaction Timestamp. mtxn := NewMempoolTransaction(txn, currentTime.Add(time.Duration(ii)*time.Microsecond)) - require.NoError(testMempool.AddTransaction(mtxn, false)) + require.NoError(testMempool.AddTransaction(mtxn)) } newTxns := testMempool.GetTransactions() require.Equal(len(txns), len(newTxns)) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 7aaceb81c..eb4841fd8 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -25,7 +25,7 @@ type Mempool interface { Start() error Stop() IsRunning() bool - AddTransaction(txn *MempoolTransaction, verifySignature bool) error + AddTransaction(txn *MempoolTransaction) error RemoveTransaction(txnHash *BlockHash) error GetTransaction(txnHash *BlockHash) *MempoolTransaction GetTransactions() []*MempoolTransaction @@ -115,9 +115,6 @@ type PosMempool struct { // The persister runs on its dedicated thread and events are used to notify the persister thread whenever // transactions are added/removed from the mempool. The persister thread then updates the database accordingly. persister *MempoolPersister - // ledger is a simple data structure that keeps track of cumulative transaction fees in the mempool. - // The ledger keeps track of how much each user would have spent in fees across all their transactions in the mempool. - ledger *BalanceLedger // nonceTracker is responsible for keeping track of a (public key, nonce) -> Txn index. The index is useful in // facilitating a "replace by higher fee" feature. This feature gives users the ability to replace their existing // mempool transaction with a new transaction having the same nonce but higher fee. @@ -188,7 +185,6 @@ func NewPosMempool() *PosMempool { status: PosMempoolStatusNotInitialized, txnRegister: NewTransactionRegister(), feeEstimator: NewPoSFeeEstimator(), - ledger: NewBalanceLedger(), nonceTracker: NewNonceTracker(), quit: make(chan interface{}), } @@ -256,7 +252,6 @@ func (mp *PosMempool) Start() error { // Create the transaction register, the ledger, and the nonce tracker, mp.txnRegister = NewTransactionRegister() mp.txnRegister.Init(mp.globalParams) - mp.ledger = NewBalanceLedger() mp.nonceTracker = NewNonceTracker() // Setup the database and create the persister @@ -370,7 +365,6 @@ func (mp *PosMempool) Stop() { // Reset the transaction register, the ledger, and the nonce tracker. mp.txnRegister.Reset() - mp.ledger.Reset() mp.nonceTracker.Reset() mp.feeEstimator = NewPoSFeeEstimator() close(mp.quit) @@ -472,7 +466,7 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { // AddTransaction validates a MsgDeSoTxn transaction and adds it to the mempool if it is valid. // If the mempool overflows as a result of adding the transaction, the mempool is pruned. The // transaction signature verification can be skipped if verifySignature is passed as true. -func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction, verifySignature bool) error { +func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { if mtxn == nil || mtxn.GetTxn() == nil { return fmt.Errorf("PosMempool.AddTransaction: Cannot add a nil transaction") } @@ -480,7 +474,7 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction, verifySignature b // First, validate that the transaction is properly formatted according to BalanceModel. We acquire a read lock on // the mempool. This allows multiple goroutines to safely perform transaction validation concurrently. In particular, // transaction signature verification can be parallelized. - if err := mp.validateTransaction(mtxn.GetTxn(), verifySignature); err != nil { + if err := mp.validateTransaction(mtxn.GetTxn()); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem verifying transaction") } @@ -511,7 +505,7 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction, verifySignature b return nil } -func (mp *PosMempool) validateTransaction(txn *MsgDeSoTxn, verifySignature bool) error { +func (mp *PosMempool) validateTransaction(txn *MsgDeSoTxn) error { mp.RLock() defer mp.RUnlock() @@ -527,32 +521,11 @@ func (mp *PosMempool) validateTransaction(txn *MsgDeSoTxn, verifySignature bool) return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction nonce") } - if !verifySignature { - return nil - } - - // Check transaction signature. - if _, err := mp.readOnlyLatestBlockView.VerifySignature(txn, uint32(mp.latestBlockHeight)); err != nil { - return errors.Wrapf(err, "PosMempool.AddTransaction: Signature validation failed") - } - return nil } func (mp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) error { userPk := NewPublicKey(txn.Tx.PublicKey) - txnFee := txn.Tx.TxnFeeNanos - - // Validate that the user has enough balance to cover the transaction fees. - spendableBalanceNanos, err := mp.readOnlyLatestBlockView.GetSpendableDeSoBalanceNanosForPublicKey(userPk.ToBytes(), - uint32(mp.latestBlockHeight)) - if err != nil { - return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem getting spendable balance") - } - if err := mp.ledger.CanIncreaseEntryWithLimit(*userPk, txnFee, spendableBalanceNanos); err != nil { - return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem checking balance increase for transaction with"+ - "hash %v, fee %v", txn.Tx.Hash(), txnFee) - } // Check the nonceTracker to see if this transaction is meant to replace an existing one. existingTxn := mp.nonceTracker.GetTxnByPublicKeyNonce(*userPk, *txn.Tx.TxnNonce) @@ -577,8 +550,7 @@ func (mp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) err } } - // At this point the transaction is in the mempool. We can now update the ledger and nonce tracker. - mp.ledger.IncreaseEntry(*userPk, txnFee) + // At this point the transaction is in the mempool. We can now update the nonce tracker. mp.nonceTracker.AddTxnByPublicKeyNonce(txn, *userPk, *txn.Tx.TxnNonce) // Emit an event for the newly added transaction. @@ -641,8 +613,7 @@ func (mp *PosMempool) removeTransactionNoLock(txn *MempoolTx, persistToDb bool) return errors.Wrapf(err, "PosMempool.removeTransactionNoLock: Problem removing txn from register") } - // Remove the txn from the balance ledger and the nonce tracker. - mp.ledger.DecreaseEntry(*userPk, txn.Fee) + // Remove the txn from the nonce tracker. mp.nonceTracker.RemoveTxnByPublicKeyNonce(*userPk, *txn.Tx.TxnNonce) // Emit an event for the removed transaction. @@ -769,7 +740,7 @@ func (mp *PosMempool) refreshNoLock() error { txns := mp.getTransactionsNoLock() for _, txn := range txns { mtxn := NewMempoolTransaction(txn.Tx, txn.Added) - err := tempPool.AddTransaction(mtxn, false) + err := tempPool.AddTransaction(mtxn) if err == nil { continue } diff --git a/lib/pos_mempool_ledger.go b/lib/pos_mempool_ledger.go deleted file mode 100644 index e44096cd4..000000000 --- a/lib/pos_mempool_ledger.go +++ /dev/null @@ -1,93 +0,0 @@ -package lib - -import ( - "github.com/pkg/errors" - "math" - "sync" -) - -// BalanceLedger is a simple in-memory ledger of balances for user public keys. The balances in the ledger can be -// increased or decreased, as long as user's new balance doesn't exceed the user's total max balance. -type BalanceLedger struct { - sync.RWMutex - - // Map of public keys to balances. - balances map[PublicKey]uint64 -} - -func NewBalanceLedger() *BalanceLedger { - return &BalanceLedger{ - balances: make(map[PublicKey]uint64), - } -} - -// CanIncreaseEntryWithLimit checks if the user's ledger entry can be increased by delta. If the user's -// balance + delta is less or equal than the balanceLimit, the increase is allowed. Otherwise, an error is returned. -func (bl *BalanceLedger) CanIncreaseEntryWithLimit(publicKey PublicKey, delta uint64, balanceLimit uint64) error { - bl.RLock() - defer bl.RUnlock() - - balance, exists := bl.balances[publicKey] - - // Check for balance overflow. - if exists && delta > math.MaxUint64-balance { - return errors.Errorf("CanIncreaseEntryWithLimit: balance overflow") - } - - newBalance := balance + delta - if newBalance > balanceLimit { - return errors.Errorf("CanIncreaseEntryWithLimit: Balance + delta exceeds balance limit "+ - "(balance: %d, delta %v, balanceLimit: %d)", balance, delta, balanceLimit) - } - return nil -} - -// IncreaseEntry increases the user's ledger entry by delta. CanIncreaseEntryWithLimit should be called before -// calling this function to ensure the increase is allowed. -func (bl *BalanceLedger) IncreaseEntry(publicKey PublicKey, delta uint64) { - bl.Lock() - defer bl.Unlock() - - balance, _ := bl.balances[publicKey] - // Check for balance overflow. - if delta > math.MaxUint64-balance { - bl.balances[publicKey] = math.MaxUint64 - return - } - - bl.balances[publicKey] = balance + delta -} - -// DecreaseEntry decreases the user's ledger entry by delta. -func (bl *BalanceLedger) DecreaseEntry(publicKey PublicKey, delta uint64) { - bl.Lock() - defer bl.Unlock() - - balance, exists := bl.balances[publicKey] - if !exists { - return - } - // Check for balance underflow. - if delta > balance { - delete(bl.balances, publicKey) - return - } - - bl.balances[publicKey] = balance - delta -} - -// GetEntry returns the user's ledger entry. -func (bl *BalanceLedger) GetEntry(publicKey PublicKey) uint64 { - bl.RLock() - defer bl.RUnlock() - - balance, _ := bl.balances[publicKey] - return balance -} - -func (bl *BalanceLedger) Reset() { - bl.Lock() - defer bl.Unlock() - - bl.balances = make(map[PublicKey]uint64) -} diff --git a/lib/pos_mempool_ledger_test.go b/lib/pos_mempool_ledger_test.go deleted file mode 100644 index b909ce44b..000000000 --- a/lib/pos_mempool_ledger_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package lib - -import ( - "github.com/stretchr/testify/require" - "math" - "testing" -) - -func TestBalanceLedger(t *testing.T) { - require := require.New(t) - - pk0 := *NewPublicKey(m0PkBytes) - pk1 := *NewPublicKey(m1PkBytes) - pk2 := *NewPublicKey(m2PkBytes) - - // Sanity-check some balance increase and decreases for pk0 - balanceLedger := NewBalanceLedger() - require.NoError(balanceLedger.CanIncreaseEntryWithLimit(pk0, 100, 100)) - require.NoError(balanceLedger.CanIncreaseEntryWithLimit(pk0, 0, 100)) - balanceLedger.IncreaseEntry(pk0, 100) - require.Equal(uint64(100), balanceLedger.GetEntry(pk0)) - require.NoError(balanceLedger.CanIncreaseEntryWithLimit(pk0, 0, 100)) - require.Error(balanceLedger.CanIncreaseEntryWithLimit(pk0, 1, 100)) - require.Error(balanceLedger.CanIncreaseEntryWithLimit(pk0, 0, 99)) - require.Error(balanceLedger.CanIncreaseEntryWithLimit(pk0, math.MaxUint64, math.MaxUint64)) - balanceLedger.DecreaseEntry(pk0, 100) - require.Equal(uint64(0), balanceLedger.GetEntry(pk0)) - balanceLedger.IncreaseEntry(pk0, 10) - require.Equal(uint64(10), balanceLedger.GetEntry(pk0)) - balanceLedger.DecreaseEntry(pk0, 100) - require.Equal(uint64(0), balanceLedger.GetEntry(pk0)) - balanceLedger.IncreaseEntry(pk0, 100) - - // Increase balance for pk1 and pk2 a couple of times - balanceLedger.IncreaseEntry(pk1, 100) - balanceLedger.IncreaseEntry(pk2, 100) - balanceLedger.DecreaseEntry(pk1, 40) - balanceLedger.IncreaseEntry(pk2, 40) - require.Equal(uint64(100), balanceLedger.GetEntry(pk0)) - require.Equal(uint64(60), balanceLedger.GetEntry(pk1)) - require.Equal(uint64(140), balanceLedger.GetEntry(pk2)) - - // Test clearing balance ledger - balanceLedger.Reset() - require.Equal(uint64(0), balanceLedger.GetEntry(pk0)) - require.Equal(uint64(0), balanceLedger.GetEntry(pk1)) - require.Equal(uint64(0), balanceLedger.GetEntry(pk2)) -} diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 9afd41694..beeb0ff91 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -301,7 +301,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { _signTxn(t, txn2Low, m1Priv) added2Low := time.Now() mtxn2Low := NewMempoolTransaction(txn2Low, added2Low) - err = mempool.AddTransaction(mtxn2Low, true) + err = mempool.AddTransaction(mtxn2Low) require.Contains(err.Error(), MempoolFailedReplaceByHigherFee) // Now generate a proper new transaction for m1, with same nonce, and higher fee. @@ -393,7 +393,7 @@ func _generateTestTxn(t *testing.T, rand *rand.Rand, feeMin uint64, feeMax uint6 func _wrappedPosMempoolAddTransaction(t *testing.T, mp *PosMempool, txn *MsgDeSoTxn) { added := time.Now() mtxn := NewMempoolTransaction(txn, added) - require.NoError(t, mp.AddTransaction(mtxn, true)) + require.NoError(t, mp.AddTransaction(mtxn)) require.Equal(t, true, _checkPosMempoolIntegrity(t, mp)) } @@ -426,26 +426,5 @@ func _checkPosMempoolIntegrity(t *testing.T, mp *PosMempool) bool { } balances[*pk] += txn.TxnFeeNanos } - - if len(balances) > len(mp.ledger.balances) { - t.Errorf("PosMempool ledger is out of sync length balances (%v) > ledger (%v)", len(balances), len(mp.ledger.balances)) - return false - } - activeBalances := 0 - for pk, ledgerBalance := range mp.ledger.balances { - if ledgerBalance > 0 { - activeBalances++ - } else { - continue - } - if balance, exists := balances[pk]; !exists || ledgerBalance != balance { - t.Errorf("PosMempool ledger is out of sync pk %v", PkToStringTestnet(pk.ToBytes())) - return false - } - } - if len(balances) != activeBalances { - t.Errorf("PosMempool ledger is out of sync length") - return false - } return true } diff --git a/lib/server.go b/lib/server.go index ec9a35bee..902433f68 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1935,7 +1935,7 @@ func (srv *Server) _addNewTxn( // Always add the txn to the PoS mempool. This should always succeed if the txn // addition into the PoW mempool succeeded above. mempoolTxn := NewMempoolTransaction(txn, time.Now()) - if err := srv.posMempool.AddTransaction(mempoolTxn, true /*verifySignatures*/); err != nil { + if err := srv.posMempool.AddTransaction(mempoolTxn); err != nil { return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") } @@ -2330,7 +2330,7 @@ func (srv *Server) ProcessSingleTxnWithChainLock(pp *Peer, txn *MsgDeSoTxn) ([]* // Regardless of the consensus protocol we're running (PoW or PoS), we use the PoS mempool's to house all // mempool txns. If a txn can't make it into the PoS mempool, which uses a looser unspent balance check for // the the transactor, then it must be invalid. - if err := srv.posMempool.AddTransaction(NewMempoolTransaction(txn, time.Now()), true); err != nil { + if err := srv.posMempool.AddTransaction(NewMempoolTransaction(txn, time.Now())); err != nil { return nil, errors.Wrapf(err, "Server.ProcessSingleTxnWithChainLock: Problem adding transaction to PoS mempool: ") } From 1117ef46ba0784679ffeed11ceab2c07b20d0572 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Mon, 18 Mar 2024 18:09:44 -0700 Subject: [PATCH 543/762] PoS MempoolTx Validated status (#1098) * PoS MempoolTx Validated status * Review --- lib/legacy_mempool.go | 9 ++------ lib/pos_block_producer_test.go | 2 +- lib/pos_mempool.go | 41 +++++++++++++++++++++++++--------- lib/pos_mempool_test.go | 4 ++-- lib/pos_mempool_transaction.go | 11 +++++++++ lib/server.go | 6 ++--- 6 files changed, 49 insertions(+), 24 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 08fff9fc3..10eebc11b 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -254,13 +254,13 @@ func (mp *DeSoMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { if !exists { return nil } - return NewMempoolTransaction(mempoolTx.Tx, mempoolTx.Added) + return NewMempoolTransaction(mempoolTx.Tx, mempoolTx.Added, true) } func (mp *DeSoMempool) GetTransactions() []*MempoolTransaction { return collections.Transform( mp.GetOrderedTransactions(), func(mempoolTx *MempoolTx) *MempoolTransaction { - return NewMempoolTransaction(mempoolTx.Tx, mempoolTx.Added) + return NewMempoolTransaction(mempoolTx.Tx, mempoolTx.Added, true) }, ) } @@ -270,11 +270,6 @@ func (mp *DeSoMempool) GetIterator() MempoolIterator { panic("implement me") } -func (mp *DeSoMempool) Refresh() error { - //TODO implement me - panic("implement me") -} - func (mp *DeSoMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) { //TODO implement me panic("implement me") diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 161d069a3..15f15258a 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -271,7 +271,7 @@ func TestGetBlockTransactions(t *testing.T) { currentTime := time.Now() for ii, txn := range txns { // Use the Simulated Transaction Timestamp. - mtxn := NewMempoolTransaction(txn, currentTime.Add(time.Duration(ii)*time.Microsecond)) + mtxn := NewMempoolTransaction(txn, currentTime.Add(time.Duration(ii)*time.Microsecond), false) require.NoError(testMempool.AddTransaction(mtxn)) } newTxns := testMempool.GetTransactions() diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index eb4841fd8..325dbbe1f 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -30,7 +30,6 @@ type Mempool interface { GetTransaction(txnHash *BlockHash) *MempoolTransaction GetTransactions() []*MempoolTransaction GetIterator() MempoolIterator - Refresh() error UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) UpdateGlobalParams(globalParams *GlobalParamsEntry) @@ -71,12 +70,14 @@ type MempoolIterator interface { type MempoolTransaction struct { *MsgDeSoTxn TimestampUnixMicro time.Time + Validated bool } -func NewMempoolTransaction(txn *MsgDeSoTxn, timestamp time.Time) *MempoolTransaction { +func NewMempoolTransaction(txn *MsgDeSoTxn, timestamp time.Time, validated bool) *MempoolTransaction { return &MempoolTransaction{ MsgDeSoTxn: txn, TimestampUnixMicro: timestamp, + Validated: validated, } } @@ -88,6 +89,10 @@ func (mtxn *MempoolTransaction) GetTimestamp() time.Time { return mtxn.TimestampUnixMicro } +func (mtxn *MempoolTransaction) IsValidated() bool { + return mtxn.Validated +} + // PosMempool is used by the node to keep track of uncommitted transactions. The main responsibilities of the PosMempool // include addition/removal of transactions, back up of transaction to database, and retrieval of transactions ordered // by Fee-Time algorithm. More on the Fee-Time algorithm can be found in the documentation of TransactionRegister. @@ -169,7 +174,7 @@ func (it *PosMempoolIterator) Value() (*MempoolTransaction, bool) { if txn == nil || txn.Tx == nil { return nil, ok } - return NewMempoolTransaction(txn.Tx, txn.Added), ok + return NewMempoolTransaction(txn.Tx, txn.Added, txn.IsValidated()), ok } func (it *PosMempoolIterator) Initialized() bool { @@ -474,7 +479,7 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { // First, validate that the transaction is properly formatted according to BalanceModel. We acquire a read lock on // the mempool. This allows multiple goroutines to safely perform transaction validation concurrently. In particular, // transaction signature verification can be parallelized. - if err := mp.validateTransaction(mtxn.GetTxn()); err != nil { + if err := mp.checkTransactionSanity(mtxn.GetTxn()); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem verifying transaction") } @@ -505,7 +510,7 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { return nil } -func (mp *PosMempool) validateTransaction(txn *MsgDeSoTxn) error { +func (mp *PosMempool) checkTransactionSanity(txn *MsgDeSoTxn) error { mp.RLock() defer mp.RUnlock() @@ -524,6 +529,22 @@ func (mp *PosMempool) validateTransaction(txn *MsgDeSoTxn) error { return nil } +func (mp *PosMempool) updateTransactionValidatedStatus(txnHash *BlockHash, validated bool) { + mp.Lock() + defer mp.Unlock() + + if !mp.IsRunning() { + return + } + + txn := mp.txnRegister.GetTransaction(txnHash) + if txn == nil { + return + } + + txn.SetValidated(validated) +} + func (mp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) error { userPk := NewPublicKey(txn.Tx.PublicKey) @@ -534,9 +555,7 @@ func (mp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) err "by higher fee failed. New transaction has lower fee.") } - // If we get here, it means that the transaction's sender has enough balance to cover transaction fees. Moreover, if - // this transaction is meant to replace an existing one, at this point we know the new txn has a sufficient fee to - // do so. We can now add the transaction to mempool. + // We can now add the transaction to the mempool. if err := mp.txnRegister.AddTransaction(txn); err != nil { return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem adding txn to register") } @@ -642,7 +661,7 @@ func (mp *PosMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { return nil } - return NewMempoolTransaction(txn.Tx, txn.Added) + return NewMempoolTransaction(txn.Tx, txn.Added, txn.IsValidated()) } // GetTransactions returns all transactions in the mempool ordered by the Fee-Time algorithm. This function is thread-safe. @@ -661,7 +680,7 @@ func (mp *PosMempool) GetTransactions() []*MempoolTransaction { continue } - mtxn := NewMempoolTransaction(txn.Tx, txn.Added) + mtxn := NewMempoolTransaction(txn.Tx, txn.Added, txn.IsValidated()) mempoolTxns = append(mempoolTxns, mtxn) } return mempoolTxns @@ -739,7 +758,7 @@ func (mp *PosMempool) refreshNoLock() error { var txnsToRemove []*MempoolTx txns := mp.getTransactionsNoLock() for _, txn := range txns { - mtxn := NewMempoolTransaction(txn.Tx, txn.Added) + mtxn := NewMempoolTransaction(txn.Tx, txn.Added, txn.IsValidated()) err := tempPool.AddTransaction(mtxn) if err == nil { continue diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index beeb0ff91..102585c47 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -300,7 +300,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { *txn2Low.TxnNonce = *txn2.TxnNonce _signTxn(t, txn2Low, m1Priv) added2Low := time.Now() - mtxn2Low := NewMempoolTransaction(txn2Low, added2Low) + mtxn2Low := NewMempoolTransaction(txn2Low, added2Low, false) err = mempool.AddTransaction(mtxn2Low) require.Contains(err.Error(), MempoolFailedReplaceByHigherFee) @@ -392,7 +392,7 @@ func _generateTestTxn(t *testing.T, rand *rand.Rand, feeMin uint64, feeMax uint6 func _wrappedPosMempoolAddTransaction(t *testing.T, mp *PosMempool, txn *MsgDeSoTxn) { added := time.Now() - mtxn := NewMempoolTransaction(txn, added) + mtxn := NewMempoolTransaction(txn, added, false) require.NoError(t, mp.AddTransaction(mtxn)) require.Equal(t, true, _checkPosMempoolIntegrity(t, mp)) } diff --git a/lib/pos_mempool_transaction.go b/lib/pos_mempool_transaction.go index ceef43aaa..8281f1b4a 100644 --- a/lib/pos_mempool_transaction.go +++ b/lib/pos_mempool_transaction.go @@ -26,6 +26,9 @@ type MempoolTx struct { // The time when the txn was added to the pool Added time.Time + // Whether this transaction has been validated by the mempool + validated bool + // The block height when the txn was added to the pool. It's generally set // to tip+1. Height uint32 @@ -125,3 +128,11 @@ func (mempoolTx *MempoolTx) FromBytes(rr *bytes.Reader) error { *mempoolTx = *newTxn return nil } + +func (mempoolTx *MempoolTx) SetValidated(validated bool) { + mempoolTx.validated = validated +} + +func (mempoolTx *MempoolTx) IsValidated() bool { + return mempoolTx.validated +} diff --git a/lib/server.go b/lib/server.go index 902433f68..a8a4a7aa5 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1920,7 +1920,7 @@ func (srv *Server) _addNewTxn( // Only attempt to add the transaction to the PoW mempool if we're on the // PoW protocol. If we're on the PoW protocol, then we use the PoW mempool's, // txn validity checks to signal whether the txn has been added or not. The PoW - // mempool has stricter txn validity checks than the PoW mempool, so this works + // mempool has stricter txn validity checks than the PoS mempool, so this works // out conveniently, as it allows us to always add a txn to the PoS mempool. if srv.params.IsPoWBlockHeight(tipHeight) { _, err := srv.mempool.ProcessTransaction( @@ -1934,7 +1934,7 @@ func (srv *Server) _addNewTxn( // Always add the txn to the PoS mempool. This should always succeed if the txn // addition into the PoW mempool succeeded above. - mempoolTxn := NewMempoolTransaction(txn, time.Now()) + mempoolTxn := NewMempoolTransaction(txn, time.Now(), false) if err := srv.posMempool.AddTransaction(mempoolTxn); err != nil { return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") } @@ -2330,7 +2330,7 @@ func (srv *Server) ProcessSingleTxnWithChainLock(pp *Peer, txn *MsgDeSoTxn) ([]* // Regardless of the consensus protocol we're running (PoW or PoS), we use the PoS mempool's to house all // mempool txns. If a txn can't make it into the PoS mempool, which uses a looser unspent balance check for // the the transactor, then it must be invalid. - if err := srv.posMempool.AddTransaction(NewMempoolTransaction(txn, time.Now())); err != nil { + if err := srv.posMempool.AddTransaction(NewMempoolTransaction(txn, time.Now(), false)); err != nil { return nil, errors.Wrapf(err, "Server.ProcessSingleTxnWithChainLock: Problem adding transaction to PoS mempool: ") } From a970551218b11c9282687a61f11b5920b3bd7f7b Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Tue, 19 Mar 2024 08:25:53 -0700 Subject: [PATCH 544/762] Review comments and the introduction of AtomicTxnsWrapperTxindexMetadata. --- lib/block_view.go | 6 +- lib/block_view_atomic_txns.go | 141 ++++++++++++++++++++++++----- lib/block_view_atomic_txns_test.go | 58 +++--------- lib/block_view_types.go | 5 + lib/constants.go | 6 +- lib/db_utils.go | 3 + 6 files changed, 145 insertions(+), 74 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index bedda1dc8..905daa380 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3558,14 +3558,14 @@ func (bav *UtxoView) _connectTransaction( ) } - // Check that we're not trying to commit a transaction meant to be part of a series of atomic transactions + // Check that we're not trying to connect a transaction meant to be part of a series of atomic transactions // outside an atomic transactions wrapper. if txn.IsAtomicTxnsInnerTxn() { return nil, 0, 0, 0, RuleErrorAtomicTxnsRequiresWrapper } // By here, we should know the transaction to be non-atomic. - return bav._connectNonAtomicTransaction( + return bav._connectSingleTxn( txn, txHash, blockHeight, @@ -3575,7 +3575,7 @@ func (bav *UtxoView) _connectTransaction( ) } -func (bav *UtxoView) _connectNonAtomicTransaction( +func (bav *UtxoView) _connectSingleTxn( txn *MsgDeSoTxn, txHash *BlockHash, blockHeight uint32, diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 8bbc1ef59..d029e0b06 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -138,9 +138,10 @@ func (txnData *AtomicTxnsWrapperMetadata) New() DeSoTxnMetadata { // HELPER FUNCTIONS: MsgDeSoTxn // +// IsAtomicTxnsInnerTxn is used to determine if a MsgDeSoTxn is an inner tnx +// of an atomic transaction. An atomic transaction is qualified by the existence +// of the NextAtomicTxnPreHash and PreviousAtomicTxnPreHash keys in the ExtraData map. func (msg *MsgDeSoTxn) IsAtomicTxnsInnerTxn() bool { - // An atomic transaction is qualified by the existence of the NextAtomicTxnPreHash - // and PreviousAtomicTxnPreHash keys in the ExtraData map. if _, keyExists := msg.ExtraData[NextAtomicTxnPreHash]; !keyExists { return false } @@ -150,6 +151,17 @@ func (msg *MsgDeSoTxn) IsAtomicTxnsInnerTxn() bool { return true } +// AtomicHash calculates the "atomic" hash of a MsgDeSoTxn by removing the +// NextAtomicTxnPreHash and PreviousAtomicTxnPreHash keys in the ExtraData +// map as well as the transaction signature and computing the SHA256 double +// hash of the resulting transaction. +// +// The atomic hash is only meant for use in validating the sequence of +// a series of atomic transactions. Transactions are sequenced by +// referencing each-other's atomic hashes in their ExtraData map under the +// NextAtomicTxnPreHash and PreviousAtomicTxnPreHash. For a complete explanation +// on how to create a series of atomic transactions, read the +// AtomicTxnsWrapperMetadata comment. func (msg *MsgDeSoTxn) AtomicHash() (*BlockHash, error) { // Create a duplicate of the transaction to ensure we don't edit the existing transaction. msgDuplicate, err := msg.Copy() @@ -162,8 +174,7 @@ func (msg *MsgDeSoTxn) AtomicHash() (*BlockHash, error) { delete(msgDuplicate.ExtraData, PreviousAtomicTxnPreHash) // Convert the transaction to bytes but do NOT encode the transaction signature. - preSignature := true - txBytes, err := msgDuplicate.ToBytes(preSignature) + txBytes, err := msgDuplicate.ToBytes(true) if err != nil { return nil, errors.Wrap(err, "MsgDeSoTxn.AtomicHash: cannot convert modified transaction to bytes") } @@ -192,17 +203,6 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( ) { var utxoOpsForTxn []*UtxoOperation - // Don't allow the atomic transactions and the wrapper to take up more than half of the block. - txnBytes, err := txn.ToBytes(false) - if err != nil { - return nil, 0, 0, 0, errors.Wrapf( - err, "_connectTransaction: Problem serializing transaction: ") - } - txnSizeBytes := uint64(len(txnBytes)) - if txnSizeBytes > bav.Params.MaxBlockSizeBytes/2 { - return nil, 0, 0, 0, RuleErrorTxnTooBig - } - // Validate the connecting block height. if blockHeight < bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight { return nil, 0, 0, 0, @@ -215,6 +215,17 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( fmt.Errorf("_connectAtomicTxnsWrapper: TxnMeta type: %v", txn.TxnMeta.GetTxnType().GetTxnString()) } + // Don't allow the atomic transactions and the wrapper to take up more than half of the block. + txnBytes, err := txn.ToBytes(false) + if err != nil { + return nil, 0, 0, 0, errors.Wrapf( + err, "_connectTransaction: Problem serializing transaction: ") + } + txnSizeBytes := uint64(len(txnBytes)) + if txnSizeBytes > bav.Params.MaxBlockSizeBytes/2 { + return nil, 0, 0, 0, RuleErrorTxnTooBig + } + // Validate that the internal transactions cumulatively pay enough in fees to // cover the atomic transactions AS WELL AS the wrapper. We validate this // here to ensure we can test for these edge cases as they're also logically caught @@ -250,9 +261,9 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( var innerUtxoOps [][]*UtxoOperation var totalInput, totalOutput, totalFees uint64 for _, innerTxn := range txMeta.Txns { - // NOTE: By recursively calling _connectNonAtomicTransaction, each inner transaction is checked that + // NOTE: By recursively calling _connectSingleTxn, each inner transaction is checked that // it is capable of paying for its own fees as well as having a valid signature. - innerTxnUtxoOps, txnInput, txnOutput, txnFees, err := bav._connectNonAtomicTransaction( + innerTxnUtxoOps, txnInput, txnOutput, txnFees, err := bav._connectSingleTxn( innerTxn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) if err != nil { return nil, 0, 0, 0, @@ -346,7 +357,9 @@ func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { } // NOTE: We do not enforce rules on txn.ExtraData as it's both useful - // for app developers and is being paid for via txn.TxnFeeNanos. + // for app developers and the bytes being taken up by the optional ExtraData + // is being paid for via by the cumulative transaction fees of all + // included atomic transactions. return nil } @@ -362,7 +375,7 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { // (2) The inner transactions are meant to be included in an atomic transaction. // (3) The start point is the first inner transaction and there's only one start point. // We also collect the atomic hash of each inner transaction here for convenience. - var atomicHashes []*BlockHash + var atomicHashes [][]byte for ii, innerTxn := range txnMeta.Txns { // Validate this transaction is not another redundant atomic transaction. if innerTxn.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { @@ -386,11 +399,13 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { // The error check in AtomicHash() is almost redundant, but we must keep it in the event // that the byte buffer for the Sha256 hash fails to allocate. This should almost never // occur, and there's more serious issues if it does. + // In addition, by calling AtomicHash here and storing the result we ensure that + // we compute the AtomicHash for each inner transaction once versus twice. innerTxnAtomicHash, err := innerTxn.AtomicHash() if err != nil { return errors.Wrap(err, "_verifyAtomicTxnsChain") } - atomicHashes = append(atomicHashes, innerTxnAtomicHash) + atomicHashes = append(atomicHashes, innerTxnAtomicHash.ToBytes()) } // Validate the chain sequence specified. @@ -399,7 +414,7 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { nextIndex := (ii + 1) % len(txnMeta.Txns) if !bytes.Equal( innerTxn.ExtraData[NextAtomicTxnPreHash], - atomicHashes[nextIndex].ToBytes()) { + atomicHashes[nextIndex]) { return RuleErrorAtomicTxnsHasBrokenChain } @@ -407,7 +422,7 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { prevIndex := (ii - 1 + len(txnMeta.Txns)) % len(txnMeta.Txns) if !bytes.Equal( innerTxn.ExtraData[PreviousAtomicTxnPreHash], - atomicHashes[prevIndex].ToBytes()) { + atomicHashes[prevIndex]) { return RuleErrorAtomicTxnsHasBrokenChain } } @@ -458,3 +473,85 @@ func (bav *UtxoView) _disconnectAtomicTxnsWrapper( return nil } + +// +// TYPES: AtomicTxnsWrapperTxindexMetadata +// + +type AtomicTxnsWrapperTxindexMetadata struct { + InnerTxnsTransactionMetadata []*TransactionMetadata +} + +func (txindexMetadata *AtomicTxnsWrapperTxindexMetadata) RawEncodeWithoutMetadata( + blockHeight uint64, + skipMetadata ...bool, +) []byte { + var data []byte + data = append(data, UintToBuf(uint64(len(txindexMetadata.InnerTxnsTransactionMetadata)))...) + for _, innerMetadata := range txindexMetadata.InnerTxnsTransactionMetadata { + txnBytes := innerMetadata.RawEncodeWithoutMetadata(blockHeight, skipMetadata...) + data = append(data, UintToBuf(uint64(len(txnBytes)))...) + data = append(data, txnBytes...) + } + return data +} + +func (txindexMetadata *AtomicTxnsWrapperTxindexMetadata) RawDecodeWithoutMetadata( + blockHeight uint64, + rr *bytes.Reader, +) error { + // Read the number of inner transactions. + numTxns, err := ReadUvarint(rr) + if err != nil { + return errors.Wrap(err, + "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: Problem reading numTxns") + } + txindexMetadata.InnerTxnsTransactionMetadata, err = SafeMakeSliceWithLength[*TransactionMetadata](numTxns) + if err != nil { + return errors.Wrap(err, + "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: "+ + "Problem allocating InnerTxnsTransactionMetadata") + } + + // Read the transactions. + for ii := uint64(0); ii < numTxns; ii++ { + txindexMetadata.InnerTxnsTransactionMetadata[ii] = &TransactionMetadata{} + + // Figure out how many bytes are associated with the ith transaction metadata. + numTxnMetadataBytes, err := ReadUvarint(rr) + if err != nil { + return errors.Wrap(err, + "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: "+ + "Problem reading number of bytes in transaction metadata") + } + + // Allocate memory for the transaction metadata bytes to be read into. + txnMetadataBytes, err := SafeMakeSliceWithLength[byte](numTxnMetadataBytes) + if err != nil { + return errors.Wrap(err, + "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: "+ + "Problem allocating bytes for transaction") + } + + // Read the transaction metadata into the txnBytes memory buffer. + if _, err = io.ReadFull(rr, txnMetadataBytes); err != nil { + return errors.Wrap(err, + "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: Problem reading bytes for transaction") + } + + // Convert the txnBytes buffer to a TransactionMetadata struct. + if err = txindexMetadata.InnerTxnsTransactionMetadata[ii].RawDecodeWithoutMetadata(blockHeight, rr); err != nil { + return errors.Wrap(err, + "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: Problem parsing transaction bytes") + } + } + return nil +} + +func (txindexMetadata *AtomicTxnsWrapperTxindexMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (txindexMetadata *AtomicTxnsWrapperTxindexMetadata) GetEncoderType() EncoderType { + return EncoderTypeAtomicTxnsWrapperTxindexMetadata +} diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 4e6383447..018eb6761 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -379,22 +379,19 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { // Now we test that the transactions are truly dependent on each-other by reorganizing them. // + // Generate 100 new dependent atomic transactions. + atomicTxns, _ = _generateSignedDependentAtomicTransactions(testMeta, 100) + // Reorganize the transactions. initialTxn := atomicTxns[0] atomicTxns[0] = atomicTxns[len(atomicTxns)-1] atomicTxns[0] = initialTxn - // Initialize test chain, miner, and testMeta for failing use. - testMetaFail := _setUpMinerAndTestMetaForAtomicTransactionTests(t) - - // Initialize m0, m1, m2, m3, m4. - _setUpUsersForAtomicTransactionsTesting(testMetaFail) - // Construct a new view to connect the transactions to. utxoView, err = NewUtxoView( - testMetaFail.db, testMetaFail.params, testMetaFail.chain.postgres, testMetaFail.chain.snapshot, nil) + testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) - blockHeight = testMetaFail.chain.BlockTip().Height + 1 + blockHeight = testMeta.chain.BlockTip().Height + 1 // Connect the transactions to ensure they can actually be connected. for _, txn := range atomicTxns { @@ -431,8 +428,8 @@ func _generateSignedDependentAtomicTransactions( } // The goal of _generateUnsignedDependentAtomicTransactions is to generate -// a sequence of transactions who CANNOT be reordered meaning they -// must be executed in the sequence returned. This mean transaction +// a sequence of transactions which CANNOT be reordered meaning they +// must be executed in the sequence returned. This means the transaction // with position ii in atomicTransactions CANNOT be placed in an // index jj of atomicTransactions such that jj < ii // @@ -445,7 +442,7 @@ func _generateSignedDependentAtomicTransactions( // (2b) Have pub_(ii-1) do a max DESO transfer to pub_ii. Use m0PkBytes as pub_(-1). // (3) Have pub_numberOfTransactions perform a max transfer back to m0PkBytes // -// Notice that because pub_ii only has DESO at following the transaction with +// Notice that because pub_ii only has DESO following the transaction at // the iith index in numberOfTransactions, it's impossible to reorder the transactions // in any other order. Hence, these transactions are dependent on each other. // @@ -568,11 +565,12 @@ func _generateUnsignedMaxBasicTransfer( // to be assigned the public key senderPkString. After running _setUpUsersForAtomicTransactionsTesting // we expect the following test state: // -// m0Pub - 1e9 nDESO, m0 profile -// m1Pub - 1e6 nDESO, m1 profile +// m0Pub - 1e9 nDESO +// m1Pub - 1e6 nDESO // m2Pub - 1e6 nDESO // m3Pub - 1e6 nDESO // m4Pub - 1e6 nDESO +// paramUpdaterPub - 1e6 nDESO func _setUpUsersForAtomicTransactionsTesting(testMeta *TestMeta) { // Create on-chain public keys with DESO sent from miner _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 1e9) @@ -580,39 +578,7 @@ func _setUpUsersForAtomicTransactionsTesting(testMeta *TestMeta) { _registerOrTransferWithTestMeta(testMeta, "m2", senderPkString, m2Pub, senderPrivString, 1e6) _registerOrTransferWithTestMeta(testMeta, "m3", senderPkString, m3Pub, senderPrivString, 1e6) _registerOrTransferWithTestMeta(testMeta, "m4", senderPkString, m4Pub, senderPrivString, 1e6) - _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 10000) - - // Create profile for m0 and m1. - { - _updateProfileWithTestMeta( - testMeta, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - []byte{}, - "m0", - "i am the m0", - shortPic, - 10*100, - 1.25*100*100, - false, - ) - } - { - _updateProfileWithTestMeta( - testMeta, - testMeta.feeRateNanosPerKb, - m1Pub, - m1Priv, - []byte{}, - "m1", - "i am the m1", - shortPic, - 10*100, - 1.25*100*100, - false, - ) - } + _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1e6) } func _setUpMinerAndTestMetaForAtomicTransactionTests(t *testing.T) *TestMeta { diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 81bc63068..3869d84b3 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -179,6 +179,7 @@ const ( EncoderTypeUpdateCoinLockupParamsTxindexMetadata EncoderType = 1000037 EncoderTypeCoinLockupTransferTxindexMetadata EncoderType = 1000038 EncoderTypeCoinUnlockTxindexMetadata EncoderType = 1000039 + EncoderTypeAtomicTxnsWrapperTxindexMetadata EncoderType = 1000040 // EncoderTypeEndTxIndex encoder type should be at the end and is used for automated tests. EncoderTypeEndTxIndex EncoderType = 1000036 @@ -376,6 +377,8 @@ func (encoderType EncoderType) New() DeSoEncoder { return &CoinLockupTransferTxindexMetadata{} case EncoderTypeCoinUnlockTxindexMetadata: return &CoinUnlockTxindexMetadata{} + case EncoderTypeAtomicTxnsWrapperTxindexMetadata: + return &AtomicTxnsWrapperTxindexMetadata{} default: return nil } @@ -790,6 +793,8 @@ func (op OperationType) String() string { return "OperationTypeSetValidatorLastActiveAtEpoch" case OperationTypeStakeDistributionPayToBalance: return "OperationTypeStakeDistributionPayToBalance" + case OperationTypeAtomicTxnsWrapper: + return "OperationTypeAtomicTxnsWrapper" } return "OperationTypeUNKNOWN" } diff --git a/lib/constants.go b/lib/constants.go index ebe528d69..2e8eebedf 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1561,9 +1561,9 @@ const ( DiamondPostHashKey = "DiamondPostHash" // Atomic Transaction Keys - AtomicTxnsChainLength = "AtomicTxnsChainLength" - NextAtomicTxnPreHash = "NextAtomicTxnPreHash" - PreviousAtomicTxnPreHash = "PreviousAtomicTxnPreHash" + AtomicTxnsChainLength = "AtmcChnLen" + NextAtomicTxnPreHash = "NxtAtmcHsh" + PreviousAtomicTxnPreHash = "PrvAtmcHsh" // Key in transaction's extra data map containing the derived key used in signing the txn. DerivedPublicKey = "DerivedPublicKey" diff --git a/lib/db_utils.go b/lib/db_utils.go index e0aa9f9de..395cc8e7e 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -7038,6 +7038,7 @@ type TransactionMetadata struct { UpdateCoinLockupParamsTxindexMetadata *UpdateCoinLockupParamsTxindexMetadata `json:",omitempty"` CoinLockupTransferTxindexMetadata *CoinLockupTransferTxindexMetadata `json:",omitempty"` CoinUnlockTxindexMetadata *CoinUnlockTxindexMetadata `json:",omitempty"` + AtomicTxnsWrapperTxindexMetadata *AtomicTxnsWrapperTxindexMetadata `json:",omitempty"` } func (txnMeta *TransactionMetadata) GetEncoderForTxType(txnType TxnType) DeSoEncoder { @@ -7116,6 +7117,8 @@ func (txnMeta *TransactionMetadata) GetEncoderForTxType(txnType TxnType) DeSoEnc return txnMeta.CoinLockupTransferTxindexMetadata case TxnTypeCoinUnlock: return txnMeta.CoinUnlockTxindexMetadata + case TxnTypeAtomicTxnsWrapper: + return txnMeta.AtomicTxnsWrapperTxindexMetadata default: return nil } From 3ceb15670017a7ed551f712d96026342868a560f Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Tue, 19 Mar 2024 13:56:53 -0700 Subject: [PATCH 545/762] Update ComputeTransactionMetadata to properly handle atomic transactions. --- lib/legacy_mempool.go | 46 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 41 insertions(+), 5 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index c7e3bb770..8614e8b46 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -1172,9 +1172,19 @@ func (mp *DeSoMempool) tryAcceptTransaction( return nil, mempoolTx, nil } -func ComputeTransactionMetadata(txn *MsgDeSoTxn, utxoView *UtxoView, blockHash *BlockHash, - totalNanosPurchasedBefore uint64, usdCentsPerBitcoinBefore uint64, totalInput uint64, totalOutput uint64, - fees uint64, txnIndexInBlock uint64, utxoOps []*UtxoOperation, blockHeight uint64) *TransactionMetadata { +func ComputeTransactionMetadata( + txn *MsgDeSoTxn, + utxoView *UtxoView, + blockHash *BlockHash, + totalNanosPurchasedBefore uint64, + usdCentsPerBitcoinBefore uint64, + totalInput uint64, + totalOutput uint64, + fees uint64, + txnIndexInBlock uint64, + utxoOps []*UtxoOperation, + blockHeight uint64, +) *TransactionMetadata { var err error txnMeta := &TransactionMetadata{ @@ -2007,9 +2017,34 @@ func ComputeTransactionMetadata(txn *MsgDeSoTxn, utxoView *UtxoView, blockHash * txindexMetadata, affectedPublicKeys := utxoView.CreateUnjailValidatorTxindexMetadata(utxoOps[len(utxoOps)-1], txn) txnMeta.UnjailValidatorTxindexMetadata = txindexMetadata txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) + case TxnTypeAtomicTxnsWrapper: + realTxMeta := txn.TxnMeta.(*AtomicTxnsWrapperMetadata) + txnMeta.AtomicTxnsWrapperTxindexMetadata.InnerTxnsTransactionMetadata = []*TransactionMetadata{} + for _, innerTxn := range realTxMeta.Txns { + // Compute the transaction metadata for each inner transaction. + innerTxnsTxnMetadata := ComputeTransactionMetadata( + innerTxn, + utxoView, + blockHash, + totalNanosPurchasedBefore, + usdCentsPerBitcoinBefore, + totalInput, + totalOutput, + fees, + txnIndexInBlock, + utxoOps, + blockHeight, + ) + txnMeta.AtomicTxnsWrapperTxindexMetadata.InnerTxnsTransactionMetadata = append( + txnMeta.AtomicTxnsWrapperTxindexMetadata.InnerTxnsTransactionMetadata, innerTxnsTxnMetadata) + + // Create a global list of all affected public keys from each inner transaction. + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, innerTxnsTxnMetadata.AffectedPublicKeys...) + } } // Check if the transactor is an affected public key. If not, add them. - if txnMeta.TransactorPublicKeyBase58Check != "" { + // We skip this for atomic transactions as their transactor is the ZeroPublicKey. + if txnMeta.TransactorPublicKeyBase58Check != "" && txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { transactorPublicKeyFound := false for _, affectedPublicKey := range txnMeta.AffectedPublicKeys { if affectedPublicKey.PublicKeyBase58Check == txnMeta.TransactorPublicKeyBase58Check { @@ -2025,7 +2060,8 @@ func ComputeTransactionMetadata(txn *MsgDeSoTxn, utxoView *UtxoView, blockHash * } } // Check if the transactor is an affected public key. If not, add them. - if txnMeta.TransactorPublicKeyBase58Check != "" { + // We skip this for atomic transactions as their transactor is the ZeroPublicKey. + if txnMeta.TransactorPublicKeyBase58Check != "" && txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { transactorPublicKeyFound := false for _, affectedPublicKey := range txnMeta.AffectedPublicKeys { if affectedPublicKey.PublicKeyBase58Check == txnMeta.TransactorPublicKeyBase58Check { From 9fb0232a2791851c3d9c0624497fb0d54fee7b35 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 20 Mar 2024 06:24:25 -0400 Subject: [PATCH 546/762] feature/pos-networking-and-syncing (#899) * Add BLS Keystore * PoS Validator Constants and Network Changes (#876) * PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment * Constants and network changes * Test MsgDeSoVerack encoding * Fix snapshot hack * Revert "Remove constants/network" This reverts commit b467ddbcd034c2e8d2728a7e77f4b714b686a760. * Fix compilation errors * Address review comments * PoS Validator Connect/Disconnect Flow (#768) * PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment * Another review round * gofmt * Comment change * PoS RemoteNode and RemoteNodeId (#857) * RemoteNode and RemoteNodeId * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test * Review round * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test * Final pass * PoS Remote Node Indexer and Manager (#879) * Add RemoteNodeIndexer * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation Simplify RemoteNodeManager More RemoteNodeManager updates Nits * PoS HandshakeController (#860) * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation Simplify RemoteNodeManager * Merge HandshakeStage with RemoteNodeStatus; small HandshakeController nits * Nit * HandshakeController updates * Nits * Quick nit * Nits * Comment nit * Add HandshakeController (#861) PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation Simplify RemoteNodeManager More RemoteNodeManager updates Nits * Add validator deduplication test (#942) * Revert "Code split" (#943) This reverts commit 831096ac1d3008233868ac8b8f0eca4cd2b9553e. * Revert "Code split" (#944) This reverts commit 0604b6d3fc155177a2bb295e6635ed21b20dd947. * Nits * PoS NetworkManager RemoteNode Cleanup (#945) * Revert "Code split" This reverts commit c0c32f3943ead0e06fdfb3343954a6b5273ea887. * Review * Sync trunk * Rename * PoS NetworkManager Address (#957) * Revert "Another split" This reverts commit eaeec5875a84621b4888fc2a6104e9904e7ef53d. * Revert routine stops * gofmt * Add addrMgr to Server * Review * PoS NetworkManager Rename and Nits (#959) * Renames * nits * More renames * Review * PoS NetworkManager Fix Integration Tests (#960) * Some fixes * Fixes * Fix another integration test * Fix integration tests * Fix RegtestMiner * Fix fmt (#973) * PoS Networking and Syncing Documentation (#974) * noop * NetworkManager documentation * gofmt * Rewording (#981) * Rename (#986) * PoS NetworkManager Fix Deadlock and Test AddIps (#996) * Fix Deadlock and Test AddIps * Glog fix * PoS NetworkManager Comment Nit (#997) * Add ProtocolVersion To Regtest Params (#1035) * Fix Constructor Name for NetworkManager * Broadcast Votes, Timeouts, and Block Proposals To Validators (#1036) * Broadcast Votes and Timeouts To Validators * Broadcast Block Proposals To Network * Fix failed unit tests * Address Nina's comments * Use RemoteNode To Fetch Missing PoS Blocks (#1045) * Update Active Validator Connections In PoS Consensus (#1054) * Remove Rollbacks To bestHeaderChain When bestChain is Mutated (#1055) * PoW Miner Event Loop Cleanup (#1056) * Log Peer Validator Vote Messages (#1057) * Log Peer Validator Timeout Messages (#1058) * Request Missing HighQC Block For Timeout Message (#1061) * Request Missing HighQC Block For Timeout Message * Address Nina's comments * Remove Peer Disconnects in Server._handleBlock (#1062) * Remove Peer Disconnects in Server._handleBlock * Warning for unrequested blocks * Catch case for spam prevention check * Address Nina's comment * Change log level for consensus events * Update regtest block producer domain * Prevent Commited Tip Reorgs in ProcessHeaderPoS (#1063) * Prevent Commited Tip Reorgs in ProcessHeaderPoS * Clean up error message * Add Estimate fee rate to mempool interface (#1066) * Add BLS Public Key PKID Pair Entry Snapshot to core state (#1068) * Fix txindex for failing txns (#1069) * Introduce failing txn type so state syncer understand failing txns (#1070) * Support uncommitted blocks in state syncer mempool flush (#1065) * Fix Connect failing txn test (#1071) * Add support for seed hex in block producer seed (#1072) * Make GetCommittedTip public (#1073) * Assorted small enhancements for node restart w/ pos (#1079) * Fix txindex dependent transaction within uncommitted blocks (#1082) * Add flag to control number of mempool txns to sync in state syncer routine (#1083) * PoS NetworkManager Consolidation (#1078) * NetworkManager consolidation * Nits * Connect Uncommitted Blocks To Best Header Chain on Startup (#1087) * Smarter Transition From Syncing To Steady State PoS Consensus (#1088) * Fix FastHotStuffConsensus Transition Timer (#1091) * Gossip New Block Hashes Via Inv Message to Non-Validators (#1092) * Fix Edge Cases With Header Syncing, Block Syncing, and PoS Steady State (#1093) * Safer Heuristic to Detemine When to Transition to FastHotStuffConsensus (#1094) * Fix Mempool and Fee Estimator Past Blocks Refresh on Block Connect (#1095) * Fix Mempool and Fee Estimator Past Blocks Refresh on Block Connect * Cleanup * Address Nina's comments * Fix Race Conditions With Mempool Txn Relay (#1096) * Add Peer nil Check in Block Gossiping (#1097) * Beef up logging for FastHotStuffConsensus (#1107) * Fix Validation For Zero Validator HighQC View Value In Timeout QCs (#1111) * Fix Validation For Zero Validator HighQC View Value In Timeout QCs * Fix typo * Better comments * Better validations * Print Current Leader Check Debugging Info (#1112) * Fix Block Hash Comparison In ProcessValidatorVote (#1117) * Pass uncommitted utxo view through validation funcs to prevent rebuilds (#1109) * Cache block views (#1110) * VerifySignature syncing enhancements (#1115) * Update flow crypto dependency (#1123) * Reject Forking PoW Blocks Once Chain Reaches Final Height of PoW Protocol (#1129) * PoS NetworkManager Review (#1119) * PoS NetworkManager Review * nit * PoS NetworkManager Review Nits * nits * more nits * PoS NetworkManager ConnectIps Fixes (#1120) * PoS NetworkManager ConnectIps Updates * nits * nit * PoS NetworManager ConnectIps Tests (#1121) --------- Co-authored-by: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Co-authored-by: Lazy Nina <81658138+lazynina@users.noreply.github.com> --- Dockerfile | 4 +- bls/signature.go | 22 +- bls/signature_no_relic.go | 157 -- bls/signature_no_relic_test.go | 15 - bls/signature_test.go | 2 - cmd/config.go | 4 +- cmd/node.go | 19 +- cmd/run.go | 2 + collections/concurrent_map.go | 80 + collections/concurrent_map_test.go | 61 + collections/slice.go | 17 +- consensus/event_loop.go | 42 +- consensus/event_loop_test.go | 59 +- consensus/integration_test.go | 2 - consensus/integration_test_types.go | 4 + consensus/mocking.go | 4 + consensus/types.go | 2 + consensus/types_internal.go | 4 + consensus/utils.go | 54 +- consensus/utils_test.go | 140 +- go.mod | 5 +- go.sum | 23 +- integration_testing/blocksync_test.go | 136 +- integration_testing/connection_bridge.go | 67 +- integration_testing/hypersync_test.go | 292 +--- integration_testing/migrations_test.go | 55 +- integration_testing/mining_test.go | 31 +- .../network_manager_routines_test.go | 632 ++++++++ integration_testing/network_manager_test.go | 464 ++++++ .../network_manager_utils_test.go | 311 ++++ integration_testing/rollback_test.go | 3 + integration_testing/tools.go | 72 +- integration_testing/txindex_test.go | 48 +- lib/block_producer.go | 28 +- lib/block_view.go | 5 +- lib/block_view_stake_test.go | 2 - lib/block_view_test.go | 6 +- lib/block_view_types.go | 3 +- lib/block_view_types_test.go | 2 - lib/block_view_validator_test.go | 2 - lib/blockchain.go | 46 +- lib/blockchain_test.go | 10 +- lib/bls_keystore_test.go | 2 - lib/connection_manager.go | 700 +++------ lib/constants.go | 86 +- lib/db_utils.go | 2 +- lib/errors.go | 2 + lib/legacy_mempool.go | 36 +- lib/miner.go | 21 +- lib/network.go | 232 ++- lib/network_connection.go | 220 +++ lib/network_connection_test.go | 167 ++ lib/network_manager.go | 1341 +++++++++++++++++ lib/network_test.go | 50 +- lib/peer.go | 334 +--- lib/pos_block_producer_test.go | 2 - lib/pos_blockchain.go | 332 ++-- lib/pos_blockchain_test.go | 14 +- lib/pos_consensus.go | 184 ++- lib/pos_consensus_test.go | 19 +- lib/pos_constants_test.go | 2 - lib/pos_epoch_complete_hook_test.go | 2 - lib/pos_fee_estimator.go | 161 +- lib/pos_fee_estimator_test.go | 2 +- lib/pos_leader_schedule_test.go | 2 - lib/pos_mempool.go | 25 +- lib/pos_network.go | 23 + lib/pos_network_message_interface.go | 4 + lib/pos_network_no_relic_test.go | 17 - lib/pos_network_test.go | 2 - lib/pos_random_seed_test.go | 2 - lib/pos_server_regtest.go | 16 +- lib/pos_transaction_register.go | 5 +- lib/remote_node.go | 730 +++++++++ lib/server.go | 556 ++++--- lib/snapshot.go | 8 +- lib/state_change_syncer.go | 112 +- lib/txindex.go | 61 +- scripts/install-relic.sh | 32 - scripts/pos/validator_registration_utils.go | 5 - test.Dockerfile | 4 +- 81 files changed, 6383 insertions(+), 2069 deletions(-) delete mode 100644 bls/signature_no_relic.go delete mode 100644 bls/signature_no_relic_test.go create mode 100644 collections/concurrent_map.go create mode 100644 collections/concurrent_map_test.go create mode 100644 integration_testing/network_manager_routines_test.go create mode 100644 integration_testing/network_manager_test.go create mode 100644 integration_testing/network_manager_utils_test.go create mode 100644 lib/network_connection.go create mode 100644 lib/network_connection_test.go create mode 100644 lib/network_manager.go delete mode 100644 lib/pos_network_no_relic_test.go create mode 100644 lib/remote_node.go delete mode 100755 scripts/install-relic.sh diff --git a/Dockerfile b/Dockerfile index 56e466d8e..fc52e7d02 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,10 +24,8 @@ COPY scripts scripts COPY test_data test_data COPY main.go . -RUN ./scripts/install-relic.sh - # build backend -RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core -tags=relic main.go +RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go # create tiny image FROM alpine:edge diff --git a/bls/signature.go b/bls/signature.go index 673f24307..9d12a2b38 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -1,5 +1,3 @@ -//go:build relic - package bls import ( @@ -11,8 +9,8 @@ import ( "fmt" "strings" - flowCrypto "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + flowCrypto "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" ) // The signingAlgorithm for BLS keys is BLSBLS12381 which is BLS on the BLS 12-381 curve. @@ -227,6 +225,14 @@ func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, erro return publicKey, err } +func (publicKey *PublicKey) ToAbbreviatedString() string { + str := publicKey.ToString() + if len(str) <= 8 { + return str + } + return str[:8] + "..." + str[len(str)-8:] +} + func (publicKey *PublicKey) MarshalJSON() ([]byte, error) { // This is called automatically by the JSON library when converting a // bls.PublicKey to JSON. This is useful when passing a bls.PublicKey @@ -324,6 +330,14 @@ func (signature *Signature) FromString(signatureString string) (*Signature, erro return signature, nil } +func (signature *Signature) ToAbbreviatedString() string { + str := signature.ToString() + if len(str) <= 8 { + return str + } + return str[:8] + "..." + str[len(str)-8:] +} + func (signature *Signature) MarshalJSON() ([]byte, error) { // This is called automatically by the JSON library when converting a // bls.Signature to JSON. This is useful when passing a bls.Signature diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go deleted file mode 100644 index b3d472739..000000000 --- a/bls/signature_no_relic.go +++ /dev/null @@ -1,157 +0,0 @@ -//go:build !relic - -package bls - -const BLSNoRelicError = "BLS keys can't be used without Relic installed" - -func AggregateSignatures(signatures []*Signature) (*Signature, error) { - panic(BLSNoRelicError) -} - -func VerifyAggregateSignatureSinglePayload(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { - panic(BLSNoRelicError) -} - -func VerifyAggregateSignatureMultiplePayloads(publicKeys []*PublicKey, signature *Signature, payloadsBytes [][]byte) (bool, error) { - panic(BLSNoRelicError) -} - -// -// TYPES: PrivateKey -// - -type PrivateKey struct{} - -func NewPrivateKey() (*PrivateKey, error) { - panic(BLSNoRelicError) -} - -func (privateKey *PrivateKey) Sign(payloadBytes []byte) (*Signature, error) { - panic(BLSNoRelicError) -} - -func (privateKey *PrivateKey) PublicKey() *PublicKey { - panic(BLSNoRelicError) -} - -func (privateKey *PrivateKey) ToString() string { - panic(BLSNoRelicError) -} - -func (privateKey *PrivateKey) FromSeed(seed []byte) (*PrivateKey, error) { - panic(BLSNoRelicError) -} - -func (privateKey *PrivateKey) FromString(privateKeyString string) (*PrivateKey, error) { - panic(BLSNoRelicError) -} - -func (privateKey *PrivateKey) MarshalJSON() ([]byte, error) { - panic(BLSNoRelicError) -} - -func (privateKey *PrivateKey) UnmarshalJSON(data []byte) error { - panic(BLSNoRelicError) -} - -func (privateKey *PrivateKey) Eq(other *PrivateKey) bool { - panic(BLSNoRelicError) -} - -// -// TYPES: PublicKey -// - -type PublicKey struct{} - -func (publicKey *PublicKey) Verify(signature *Signature, input []byte) (bool, error) { - panic(BLSNoRelicError) -} - -func (publicKey *PublicKey) ToBytes() []byte { - panic(BLSNoRelicError) -} - -func (publicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { - panic(BLSNoRelicError) -} - -func (publicKey *PublicKey) ToString() string { - panic(BLSNoRelicError) -} - -func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { - panic(BLSNoRelicError) -} - -func (publicKey *PublicKey) MarshalJSON() ([]byte, error) { - panic(BLSNoRelicError) -} - -func (publicKey *PublicKey) UnmarshalJSON(data []byte) error { - panic(BLSNoRelicError) -} - -func (publicKey *PublicKey) Eq(other *PublicKey) bool { - panic(BLSNoRelicError) -} - -func (publicKey *PublicKey) Copy() *PublicKey { - panic(BLSNoRelicError) -} - -func (publicKey *PublicKey) IsEmpty() bool { - panic(BLSNoRelicError) -} - -type SerializedPublicKey string - -func (publicKey *PublicKey) Serialize() SerializedPublicKey { - panic(BLSNoRelicError) -} - -func (serializedPublicKey SerializedPublicKey) Deserialize() (*PublicKey, error) { - panic(BLSNoRelicError) -} - -// -// TYPES: Signature -// - -type Signature struct{} - -func (signature *Signature) ToBytes() []byte { - panic(BLSNoRelicError) -} - -func (signature *Signature) FromBytes(signatureBytes []byte) (*Signature, error) { - panic(BLSNoRelicError) -} - -func (signature *Signature) ToString() string { - panic(BLSNoRelicError) -} - -func (signature *Signature) FromString(signatureString string) (*Signature, error) { - panic(BLSNoRelicError) -} - -func (signature *Signature) MarshalJSON() ([]byte, error) { - panic(BLSNoRelicError) -} - -func (signature *Signature) UnmarshalJSON(data []byte) error { - panic(BLSNoRelicError) -} - -func (signature *Signature) Eq(other *Signature) bool { - panic(BLSNoRelicError) -} - -func (signature *Signature) Copy() *Signature { - panic(BLSNoRelicError) -} - -func (signature *Signature) IsEmpty() bool { - panic(BLSNoRelicError) -} diff --git a/bls/signature_no_relic_test.go b/bls/signature_no_relic_test.go deleted file mode 100644 index 3b4456b14..000000000 --- a/bls/signature_no_relic_test.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !relic - -package bls - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestVerifyingBLSSignatures(t *testing.T) { - assert.Panics(t, func() { NewPrivateKey() }) - assert.Panics(t, func() { (&PrivateKey{}).FromString("") }) - assert.Panics(t, func() { (&PublicKey{}).FromString("") }) - assert.Panics(t, func() { (&Signature{}).FromString("") }) -} diff --git a/bls/signature_test.go b/bls/signature_test.go index d41bd6646..5596e3958 100644 --- a/bls/signature_test.go +++ b/bls/signature_test.go @@ -1,5 +1,3 @@ -//go:build relic - package bls import ( diff --git a/cmd/config.go b/cmd/config.go index 3e3a47f97..4d47aad55 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -80,7 +80,8 @@ type Config struct { TimeEvents bool // State Syncer - StateChangeDir string + StateChangeDir string + StateSyncerMempoolTxnSyncLimit uint64 } func LoadConfig() *Config { @@ -176,6 +177,7 @@ func LoadConfig() *Config { // State Syncer config.StateChangeDir = viper.GetString("state-change-dir") + config.StateSyncerMempoolTxnSyncLimit = viper.GetUint64("state-syncer-mempool-txn-sync-limit") return &config } diff --git a/cmd/node.go b/cmd/node.go index 4e64870f4..18f6e3b5e 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -27,12 +27,13 @@ import ( ) type Node struct { - Server *lib.Server - ChainDB *badger.DB - TXIndex *lib.TXIndex - Params *lib.DeSoParams - Config *Config - Postgres *lib.Postgres + Server *lib.Server + ChainDB *badger.DB + TXIndex *lib.TXIndex + Params *lib.DeSoParams + Config *Config + Postgres *lib.Postgres + Listeners []net.Listener // IsRunning is false when a NewNode is created, set to true on Start(), set to false // after Stop() is called. Mainly used in testing. @@ -117,8 +118,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { // This just gets localhost listening addresses on the protocol port. // Such as [{127.0.0.1 18000 } {::1 18000 }], and associated listener structs. - listeningAddrs, listeners := GetAddrsToListenOn(node.Config.ProtocolPort) - _ = listeningAddrs + _, node.Listeners = GetAddrsToListenOn(node.Config.ProtocolPort) // If --connect-ips is not passed, we will connect the addresses from // --add-ips, DNSSeeds, and DNSSeedGenerators. @@ -238,7 +238,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { shouldRestart := false node.Server, err, shouldRestart = lib.NewServer( node.Params, - listeners, + node.Listeners, desoAddrMgr, node.Config.ConnectIPs, node.ChainDB, @@ -282,6 +282,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.AugmentedBlockViewRefreshIntervalMillis, node.Config.PosBlockProductionIntervalMilliseconds, node.Config.PosTimeoutBaseDurationMilliseconds, + node.Config.StateSyncerMempoolTxnSyncLimit, ) if err != nil { // shouldRestart can be true if, on the previous run, we did not finish flushing all ancestral diff --git a/cmd/run.go b/cmd/run.go index c5ebe87a8..2eaf1fa67 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -222,6 +222,8 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().Bool("time-events", false, "Enable simple event timer, helpful in hands-on performance testing") cmd.PersistentFlags().String("state-change-dir", "", "The directory for state change logs. WARNING: Changing this "+ "from an empty string to a non-empty string (or from a non-empty string to the empty string) requires a resync.") + cmd.PersistentFlags().Uint("state-syncer-mempool-txn-sync-limit", 10000, "The maximum number of transactions to "+ + "process in the mempool tx state syncer at a time.") cmd.PersistentFlags().VisitAll(func(flag *pflag.Flag) { viper.BindPFlag(flag.Name, flag) }) diff --git a/collections/concurrent_map.go b/collections/concurrent_map.go new file mode 100644 index 000000000..e16d64dc7 --- /dev/null +++ b/collections/concurrent_map.go @@ -0,0 +1,80 @@ +package collections + +import "sync" + +type ConcurrentMap[Key comparable, Value any] struct { + mtx sync.RWMutex + m map[Key]Value +} + +func NewConcurrentMap[Key comparable, Value any]() *ConcurrentMap[Key, Value] { + return &ConcurrentMap[Key, Value]{ + m: make(map[Key]Value), + } +} + +func (cm *ConcurrentMap[Key, Value]) Set(key Key, val Value) { + cm.mtx.Lock() + defer cm.mtx.Unlock() + + cm.m[key] = val +} + +func (cm *ConcurrentMap[Key, Value]) Remove(key Key) { + cm.mtx.Lock() + defer cm.mtx.Unlock() + + _, ok := cm.m[key] + if !ok { + return + } + delete(cm.m, key) +} + +func (cm *ConcurrentMap[Key, Value]) Get(key Key) (Value, bool) { + cm.mtx.RLock() + defer cm.mtx.RUnlock() + + val, ok := cm.m[key] + return val, ok +} + +func (cm *ConcurrentMap[Key, Value]) Clone() *ConcurrentMap[Key, Value] { + cm.mtx.RLock() + defer cm.mtx.RUnlock() + + clone := NewConcurrentMap[Key, Value]() + for key, val := range cm.m { + clone.Set(key, val) + } + return clone +} + +func (cm *ConcurrentMap[Key, Value]) ToMap() map[Key]Value { + cm.mtx.RLock() + defer cm.mtx.RUnlock() + + index := make(map[Key]Value) + for key, node := range cm.m { + index[key] = node + } + return index +} + +func (cm *ConcurrentMap[Key, Value]) GetAll() []Value { + cm.mtx.RLock() + defer cm.mtx.RUnlock() + + var vals []Value + for _, val := range cm.m { + vals = append(vals, val) + } + return vals +} + +func (cm *ConcurrentMap[Key, Value]) Count() int { + cm.mtx.RLock() + defer cm.mtx.RUnlock() + + return len(cm.m) +} diff --git a/collections/concurrent_map_test.go b/collections/concurrent_map_test.go new file mode 100644 index 000000000..aac89b2fb --- /dev/null +++ b/collections/concurrent_map_test.go @@ -0,0 +1,61 @@ +package collections + +import ( + "fmt" + "testing" +) + +func TestConcurrentMap(t *testing.T) { + m := NewConcurrentMap[string, int]() + control := make(map[string]int) + + // test add + for ii := 0; ii < 100; ii++ { + key := fmt.Sprintf("%v", ii) + m.Set(key, ii) + control[key] = ii + } + + for key, val := range control { + if mVal, ok := m.Get(key); !ok || mVal != val { + t.Errorf("Expected %d, got %d", val, m.m[key]) + } + } + + // test remove + for ii := 0; ii < 50; ii++ { + key := fmt.Sprintf("%v", ii) + m.Remove(key) + delete(control, key) + } + + for key, val := range control { + if mVal, ok := m.Get(key); !ok || mVal != val { + t.Errorf("Expected %d, got %d", val, m.m[key]) + } + } + + // test copy + copy := m.ToMap() + for key, val := range control { + if mVal, ok := copy[key]; !ok || mVal != val { + t.Errorf("Expected %d, got %d", val, m.m[key]) + } + } + if len(copy) != len(control) { + t.Errorf("Expected %d, got %d", len(control), len(copy)) + } + + // test get all + vals := m.GetAll() + for _, val := range vals { + if _, ok := control[fmt.Sprintf("%v", val)]; !ok { + t.Errorf("Expected %d, got %d", val, m.m[fmt.Sprintf("%v", val)]) + } + } + + // test size + if m.Count() != len(control) { + t.Errorf("Expected %d, got %d", len(control), m.Count()) + } +} diff --git a/collections/slice.go b/collections/slice.go index d83f6eb3c..27aa4eff0 100644 --- a/collections/slice.go +++ b/collections/slice.go @@ -1,6 +1,11 @@ package collections -import "sort" +import ( + "fmt" + "math/rand" + "sort" + "time" +) func All[T any](slice []T, predicate func(T) bool) bool { negatedPredicate := func(val T) bool { @@ -32,6 +37,16 @@ func Transform[TInput any, TOutput any](slice []TInput, transformFn func(TInput) return result } +func RandomElement[T any](slice []T) (T, error) { + if len(slice) == 0 { + return *new(T), fmt.Errorf("RandomElement: input slice is empty") + } + + src := rand.NewSource(time.Now().UnixNano()) + index := src.Int63() % int64(len(slice)) + return slice[index], nil +} + // SortStable wraps the built-in sort.SliceStable function to return a sorted slice // given an input slice, without any side effects on the input. Params: // - input: the original slice whose contents will be sorted diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 27168c054..6a768042a 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -1,6 +1,7 @@ package consensus import ( + "fmt" "time" "github.com/golang/glog" @@ -344,7 +345,7 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { } // Check if the vote is for the chain tip. If not, then there's nothing more to do. - if vote.GetBlockHash() != fc.tip.block.GetBlockHash() { + if !IsEqualBlockHash(vote.GetBlockHash(), fc.tip.block.GetBlockHash()) { return nil } @@ -533,6 +534,45 @@ func (fc *fastHotStuffEventLoop) IsRunning() bool { return fc.status == eventLoopStatusRunning } +func (fc *fastHotStuffEventLoop) ToString() string { + fc.lock.RLock() + defer fc.lock.RUnlock() + + if fc.status != eventLoopStatusRunning { + return "FastHotStuffEventLoop is not running" + } + + // Get the Tip Block + tipBlock := fc.tip.block + + // Get the votes for the tip + tipBlockVotePayload := GetVoteSignaturePayload(tipBlock.GetView(), tipBlock.GetBlockHash()) + votesForTip := fc.votesSeenByBlockHash[tipBlockVotePayload] + + // Get the timeouts for the current and previous view + timeoutsForCurrentView := fc.timeoutsSeenByView[fc.currentView] + timeoutsForPreviousView := fc.timeoutsSeenByView[fc.currentView-1] + + return fmt.Sprintf( + "Printing FastHotStuffEventLoop state: "+ + "\n Status: %d, CurrentView: %d"+ + "\n Tip Height: %d, Tip Hash: %v, Tip View: %d, Num Safe Blocks: %d"+ + "\n Crank Duration: %v, Timeout Interval: %v"+ + "\n Votes For Tip: %d, Timeouts For Current View: %d, Timeouts For Prev View: %d", + fc.status, + fc.currentView, + tipBlock.GetHeight(), + tipBlock.GetBlockHash(), + tipBlock.GetView(), + len(fc.safeBlocks), + fc.crankTimerTask.GetDuration(), + fc.nextTimeoutTask.GetDuration(), + len(votesForTip), + len(timeoutsForCurrentView), + len(timeoutsForPreviousView), + ) +} + // resetScheduledTasks recomputes the nextBlockConstructionTimeStamp and nextTimeoutTimeStamp // values, and reschedules the crank timer and timeout tasks. func (fc *fastHotStuffEventLoop) resetScheduledTasks() { diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 7eba6a3b7..cdba7e439 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -1,5 +1,3 @@ -//go:build relic - package consensus import ( @@ -783,6 +781,63 @@ func TestVoteQCConstructionSignal(t *testing.T) { require.Equal(t, blockConstructionSignal.QC.GetAggregatedSignature().GetSignersList().ToBytes(), bitset.NewBitset().Set(0, true).ToBytes()) require.Equal(t, blockConstructionSignal.QC.GetAggregatedSignature().GetSignature().ToBytes(), validator1Vote.ToBytes()) } + + // Happy path, crank timer has elapsed the vote QC construction signal is triggered by an incoming vote + { + fe := NewFastHotStuffEventLoop() + + // Init the event loop + err := fe.Init( + time.Hour, + time.Hour, + block.GetQC(), // genesisQC + BlockWithValidatorList{block, validatorList}, // tip + []BlockWithValidatorList{{block, validatorList}}, // safeBlocks + ) + require.NoError(t, err) + + // Start the event loop + fe.Start() + + // Manually trigger the crank timer to elapse. The crank timer's execution is a no-op because + // there are no votes. + fe.onCrankTimerTaskExecuted(fe.currentView) + + // Process a vote from validator 1, which has enough stake to construct a QC + { + validator1VoteMsg := voteMessage{ + view: block.view, + blockHash: copyBlockHash(block.blockHash), + publicKey: validatorPrivateKey1.PublicKey(), + signature: validator1Vote, + } + err = fe.ProcessValidatorVote(&validator1VoteMsg) + require.NoError(t, err) + } + + var blockConstructionSignal *FastHotStuffEvent + + // Wait up to 100 milliseconds for a block construction signal to be sent + select { + case blockConstructionSignal = <-fe.Events: + // Do nothing + case <-time.After(100 * time.Millisecond): + require.Fail(t, "Did not receive a block construction signal when there were enough votes to construct a vote QC") + } + + // Verify the block construction signal + require.Equal(t, blockConstructionSignal.EventType, FastHotStuffEventTypeConstructVoteQC) + require.Equal(t, blockConstructionSignal.View, block.GetView()+1) + require.Equal(t, blockConstructionSignal.TipBlockHash.GetValue(), block.GetBlockHash().GetValue()) + require.Equal(t, blockConstructionSignal.TipBlockHeight, block.GetHeight()) + require.Equal(t, blockConstructionSignal.QC.GetView(), block.GetView()) + require.Equal(t, blockConstructionSignal.QC.GetBlockHash().GetValue(), block.GetBlockHash().GetValue()) + require.Equal(t, blockConstructionSignal.QC.GetAggregatedSignature().GetSignersList().ToBytes(), bitset.NewBitset().Set(0, true).ToBytes()) + require.Equal(t, blockConstructionSignal.QC.GetAggregatedSignature().GetSignature().ToBytes(), validator1Vote.ToBytes()) + + // Stop the event loop + fe.Stop() + } } func TestTimeoutQCConstructionSignal(t *testing.T) { diff --git a/consensus/integration_test.go b/consensus/integration_test.go index 545fc9a5d..d7d4e941b 100644 --- a/consensus/integration_test.go +++ b/consensus/integration_test.go @@ -1,5 +1,3 @@ -//go:build relic - package consensus import ( diff --git a/consensus/integration_test_types.go b/consensus/integration_test_types.go index dfc2397db..962342500 100644 --- a/consensus/integration_test_types.go +++ b/consensus/integration_test_types.go @@ -104,6 +104,10 @@ func (node *validatorNode) GetStakeAmount() *uint256.Int { return node.stake } +func (node *validatorNode) GetDomains() [][]byte { + return [][]byte{} +} + func (node *validatorNode) ProcessBlock(incomingBlock *block) { node.lock.Lock() defer node.lock.Unlock() diff --git a/consensus/mocking.go b/consensus/mocking.go index 6c0b8be82..33044c895 100644 --- a/consensus/mocking.go +++ b/consensus/mocking.go @@ -63,3 +63,7 @@ func (fc *MockFastHotStuffEventLoop) IsInitialized() bool { func (fc *MockFastHotStuffEventLoop) IsRunning() bool { return fc.OnIsRunning() } + +func (fc *MockFastHotStuffEventLoop) ToString() string { + return "" +} diff --git a/consensus/types.go b/consensus/types.go index b724b062e..b81a6ffa2 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -78,6 +78,7 @@ type FastHotStuffEventLoop interface { Stop() IsInitialized() bool IsRunning() bool + ToString() string } // BlockHash is a 32-byte hash of a block used to uniquely identify a block. It's re-defined here @@ -92,6 +93,7 @@ type BlockHash interface { type Validator interface { GetPublicKey() *bls.PublicKey GetStakeAmount() *uint256.Int + GetDomains() [][]byte } type AggregateQuorumCertificate interface { diff --git a/consensus/types_internal.go b/consensus/types_internal.go index 7b98ce1a2..1eb40eeb7 100644 --- a/consensus/types_internal.go +++ b/consensus/types_internal.go @@ -35,6 +35,10 @@ func (v *validator) GetStakeAmount() *uint256.Int { return v.stakeAmount } +func (v *validator) GetDomains() [][]byte { + return [][]byte{} +} + //////////////////////////////////////////////////////////////////////// // AggregateQuorumCertificate interface implementation for internal use. // We use this type for unit tests, and to construct timeout QCs for diff --git a/consensus/utils.go b/consensus/utils.go index 32d8e7ca7..078743313 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -89,26 +89,48 @@ func IsValidSuperMajorityAggregateQuorumCertificate(aggQC AggregateQuorumCertifi return false } - // Compute the timeout payloads signed by each validator. - // Each validator should sign a payload with the pair (View, HighQCView). - // The ordering of the high QC views and validators in the aggregate signature - // will match the ordering of active validators in descending order of stake for - // the timed out view's epoch. + // Compute the timeout payloads signed by each validator. Each validator should sign a payload + // with the pair (View, HighQCView). The ordering of the high QC views and validators in the + // aggregate signature will match the ordering of active validators in descending order of stake + // for the timed out view's epoch. + // + // The highQC views slice may contain 0 values for validators that did not send a timeout message + // for the timed out view. The 0 values are kept in the slice to maintain the ordering of the signers + // in the highQC views identical to the ordering of the validators in the validator list and signers list. signedPayloads := [][]byte{} for _, highQCView := range aggQC.GetHighQCViews() { + // If we encounter a 0 value for the validator at the current index, then it means that the + // the validator did not send a timeout message for the timed out view. We skip this validator. + if highQCView == 0 { + continue + } + payload := GetTimeoutSignaturePayload(aggQC.GetView(), highQCView) signedPayloads = append(signedPayloads, payload[:]) } - // Validate the signers' aggregate signatures + // This is a safety check to ensure that the number of signed payloads matches the number of signers. + // All validators that did not send a timeout message for the timed out view have been filtered out. + if len(signedPayloads) != len(signerPublicKeys) { + return false + } + + // Validate the signers' aggregate signatures. At this point, the signedPayloads slice contains + // payloads for all signers that signed a timeout message for the timed out view. The signedPayloads + // list is ordered in the same way as the signers list. All missing validators have been filtered out. + // + // Ex: If the signerPublicKeys list is [A, B, C, D, E] and the high QC views are [5, 4, 3, 4, 1], + // then it means that signer A has a highQC view of 5, signer B has a highQC view of 4,... isValidSignature, err := bls.VerifyAggregateSignatureMultiplePayloads( signerPublicKeys, aggQC.GetAggregatedSignature().GetSignature(), signedPayloads, ) + if err != nil || !isValidSignature { return false } + return true } @@ -293,17 +315,15 @@ func isProperlyFormedAggregateQC(aggQC AggregateQuorumCertificate) bool { } // Verify that AggregateSignature's HighQC view is the highest view in the HighQCViews. - // Also validate that all of the high QC views are non-zero highestView := uint64(0) for _, highQCView := range aggQC.GetHighQCViews() { - if highQCView == 0 { - return false - } if highQCView > highestView { highestView = highQCView } } - if highestView != aggQC.GetHighQC().GetView() { + + // The highest view in the high QC views must be non-zero and equal to the high QC's view. + if highestView == 0 || highestView != aggQC.GetHighQC().GetView() { return false } @@ -588,6 +608,18 @@ func createDummyBlockHash() *blockHash { } } +func copyBlockHash(input BlockHash) *blockHash { + rawInputHash := input.GetValue() + + byteArray := [32]byte{} + copy(byteArray[:], rawInputHash[:]) + + return &blockHash{ + value: byteArray, + } + +} + func generateRandomBytes(numBytes int) []byte { randomBytes := make([]byte, numBytes) rand.Read(randomBytes) diff --git a/consensus/utils_test.go b/consensus/utils_test.go index afe419589..ab0e5e350 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -1,5 +1,3 @@ -//go:build relic - package consensus import ( @@ -117,7 +115,7 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { validator2 := validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(2), + stakeAmount: uint256.NewInt().SetUint64(1), } validator3 := validator{ @@ -134,9 +132,6 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { // Compute the signature payload signaturePayload := GetVoteSignaturePayload(view, dummyBlockHash) - // Compute the aggregate signature payload - timeoutPayload := GetTimeoutSignaturePayload(view+2, view) - validator1Signature, err := validatorPrivateKey1.Sign(signaturePayload[:]) require.NoError(t, err) validator2Signature, err := validatorPrivateKey2.Sign(signaturePayload[:]) @@ -151,10 +146,17 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { }, } - // Test with no super-majority stake + // Sad Path: Test 3/5 stake which is not a super-majority { - validator1TimeoutSignature, err := validatorPrivateKey1.Sign(timeoutPayload[:]) + // Validator 1 signs a timeout payload where its high QC view is equal to the aggQC's high QC view. + validator1TimeoutPayload := GetTimeoutSignaturePayload(view+2, view) + validator1TimeoutSignature, err := validatorPrivateKey1.Sign(validator1TimeoutPayload[:]) require.NoError(t, err) + + // Validator 2 does not time out. + + // Validator 3 does not time out. + qc := aggregateQuorumCertificate{ view: view + 2, highQC: &highQC, @@ -167,46 +169,54 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) } - // Test with 5/6 super-majority stake + // Sad Path: Test 4/5 stake but one of the validators has a higher view than the highQC view. { - validator1TimeoutSignature, err := validatorPrivateKey1.Sign(timeoutPayload[:]) + // Validator 1 signs a timeout payload where its high QC view is equal to the aggQC's high QC view. + validator1TimeoutPayload := GetTimeoutSignaturePayload(view+2, view) + validator1TimeoutSignature, err := validatorPrivateKey1.Sign(validator1TimeoutPayload[:]) require.NoError(t, err) - // For fun, let's have validator 2 sign a timeout payload where its high QC is further behind. - validator2TimeoutPayload := GetTimeoutSignaturePayload(view+2, view-1) + + // Validator 2 signs a timeout payload where its high QC view is higher than the aggQC's high QC view. + validator2TimeoutPayload := GetTimeoutSignaturePayload(view+2, view+1) validator2TimeoutSignature, err := validatorPrivateKey2.Sign(validator2TimeoutPayload[:]) require.NoError(t, err) + // Validator 3 does not time out. + timeoutAggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1TimeoutSignature, validator2TimeoutSignature}) require.NoError(t, err) qc := aggregateQuorumCertificate{ view: view + 2, highQC: &highQC, - highQCViews: []uint64{view, view - 1}, + highQCViews: []uint64{view, view + 1}, aggregatedSignature: &aggregatedSignature{ signersList: bitset.NewBitset().FromBytes([]byte{0x3}), // 0b0011, which represents validators 1 and 2 signature: timeoutAggSig, }, } - require.True(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) + require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) } - // Test highQC with view lower than the highest view in the highQCViews. + // Sad Path: Test 4/5 stake, but the highQC views slice has too many views. { - // Compute the aggregate signature payload - validator1TimeoutPayload := GetTimeoutSignaturePayload(view+3, view) + // Validator 1 signs a timeout payload where its high QC view is equal to the aggQC's high QC view. + validator1TimeoutPayload := GetTimeoutSignaturePayload(view+2, view) validator1TimeoutSignature, err := validatorPrivateKey1.Sign(validator1TimeoutPayload[:]) require.NoError(t, err) - // Let's have validator 2 sign a timeout payload where high QC is for higher view. - validator2TimeoutPayload := GetTimeoutSignaturePayload(view+3, view+1) + + // Validator 2 signs a timeout payload where its high QC view is lower than the aggQC's high QC view. + validator2TimeoutPayload := GetTimeoutSignaturePayload(view+2, view-1) validator2TimeoutSignature, err := validatorPrivateKey2.Sign(validator2TimeoutPayload[:]) require.NoError(t, err) + // Validator 3 does not time out. + timeoutAggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1TimeoutSignature, validator2TimeoutSignature}) require.NoError(t, err) qc := aggregateQuorumCertificate{ - view: view + 3, + view: view + 2, highQC: &highQC, - highQCViews: []uint64{view, view + 1}, + highQCViews: []uint64{view, view - 1, view - 1}, aggregatedSignature: &aggregatedSignature{ signersList: bitset.NewBitset().FromBytes([]byte{0x3}), // 0b0011, which represents validators 1 and 2 signature: timeoutAggSig, @@ -214,6 +224,94 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { } require.False(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) } + + // Happy Path: Test with 4/5 super-majority stake + { + // Validator 1 signs a timeout payload where its high QC view is equal to the aggQC's high QC view. + validator1TimeoutPayload := GetTimeoutSignaturePayload(view+2, view) + validator1TimeoutSignature, err := validatorPrivateKey1.Sign(validator1TimeoutPayload[:]) + require.NoError(t, err) + + // Validator 2 signs a timeout payload where its high QC view is lower than the aggQC's high QC view. + validator2TimeoutPayload := GetTimeoutSignaturePayload(view+2, view-1) + validator2TimeoutSignature, err := validatorPrivateKey2.Sign(validator2TimeoutPayload[:]) + require.NoError(t, err) + + // Validator 3 does not time out. + + timeoutAggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1TimeoutSignature, validator2TimeoutSignature}) + require.NoError(t, err) + qc := aggregateQuorumCertificate{ + view: view + 2, + highQC: &highQC, + highQCViews: []uint64{view, view - 1}, + aggregatedSignature: &aggregatedSignature{ + signersList: bitset.NewBitset().FromBytes([]byte{0x3}), // 0b0011, which represents validators 1 and 2 + signature: timeoutAggSig, + }, + } + require.True(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) + } + + // Happy Path: Test with 4/5 super-majority stake, where the highQC views slice has a 0 due to validator 2 + // not timing out. + { + // Validator 1 signs a timeout payload where its high QC view is equal to the aggQC's high QC view. + validator1TimeoutPayload := GetTimeoutSignaturePayload(view+2, view) + validator1TimeoutSignature, err := validatorPrivateKey1.Sign(validator1TimeoutPayload[:]) + require.NoError(t, err) + + // Validator 2 does not time out. + + // Validator 3 signs a timeout payload where its high QC view is lower than the aggQC's high QC view. + validator3TimeoutPayload := GetTimeoutSignaturePayload(view+2, view-1) + validator3TimeoutSignature, err := validatorPrivateKey3.Sign(validator3TimeoutPayload[:]) + require.NoError(t, err) + + timeoutAggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1TimeoutSignature, validator3TimeoutSignature}) + require.NoError(t, err) + qc := aggregateQuorumCertificate{ + view: view + 2, + highQC: &highQC, + highQCViews: []uint64{view, 0, view - 1}, // The 0 is due to validator 2 not timing out. + aggregatedSignature: &aggregatedSignature{ + signersList: bitset.NewBitset().FromBytes([]byte{0x5}), // 0b0101, which represents validators 1 and 3 + signature: timeoutAggSig, + }, + } + require.True(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) + } + + // Happy Path: Test with 5/5 super-majority stake where all validators time out. + { + // Validator 1 signs a timeout payload where its high QC view is equal to the aggQC's high QC view. + validator1TimeoutPayload := GetTimeoutSignaturePayload(view+2, view) + validator1TimeoutSignature, err := validatorPrivateKey1.Sign(validator1TimeoutPayload[:]) + require.NoError(t, err) + + // Validator 2 signs a timeout payload where its high QC view is lower than the aggQC's high QC view. + validator2TimeoutPayload := GetTimeoutSignaturePayload(view+2, view-2) + validator2TimeoutSignature, err := validatorPrivateKey2.Sign(validator2TimeoutPayload[:]) + require.NoError(t, err) + + // Validator 3 signs a timeout payload where its high QC view is lower than the aggQC's high QC view. + validator3TimeoutPayload := GetTimeoutSignaturePayload(view+2, view-1) + validator3TimeoutSignature, err := validatorPrivateKey3.Sign(validator3TimeoutPayload[:]) + require.NoError(t, err) + + timeoutAggSig, err := bls.AggregateSignatures([]*bls.Signature{validator1TimeoutSignature, validator2TimeoutSignature, validator3TimeoutSignature}) + require.NoError(t, err) + qc := aggregateQuorumCertificate{ + view: view + 2, + highQC: &highQC, + highQCViews: []uint64{view, view - 2, view - 1}, + aggregatedSignature: &aggregatedSignature{ + signersList: bitset.NewBitset().FromBytes([]byte{0x7}), // 0b0111, which represents validators 1, 2 and 3 + signature: timeoutAggSig, + }, + } + require.True(t, IsValidSuperMajorityAggregateQuorumCertificate(&qc, validators, validators)) + } } func TestIsProperlyFormedBlock(t *testing.T) { diff --git a/go.mod b/go.mod index d91530157..829ab59d8 100644 --- a/go.mod +++ b/go.mod @@ -21,10 +21,11 @@ require ( github.com/gernest/mention v2.0.0+incompatible github.com/go-pg/pg/v10 v10.10.0 github.com/golang/glog v1.0.0 + github.com/google/uuid v1.2.0 github.com/holiman/uint256 v1.1.1 github.com/mitchellh/go-homedir v1.1.0 github.com/oleiade/lane v1.0.1 - github.com/onflow/flow-go/crypto v0.24.7 + github.com/onflow/crypto v0.25.0 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0 @@ -65,7 +66,6 @@ require ( github.com/golang/snappy v0.0.3 // indirect github.com/google/flatbuffers v2.0.0+incompatible // indirect github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 // indirect - github.com/google/uuid v1.2.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.8 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect @@ -105,6 +105,7 @@ require ( golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect golang.org/x/tools v0.1.5 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + gonum.org/v1/gonum v0.6.1 // indirect google.golang.org/protobuf v1.26.0 // indirect gopkg.in/AlecAivazis/survey.v1 v1.8.7 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect diff --git a/go.sum b/go.sum index 7a4c6513b..23375cc2d 100644 --- a/go.sum +++ b/go.sum @@ -39,6 +39,7 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= @@ -151,6 +152,7 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF github.com/fergusstrange/embedded-postgres v1.19.0 h1:NqDufJHeA03U7biULlPHZ0pZ10/mDOMKPILEpT50Fyk= github.com/fergusstrange/embedded-postgres v1.19.0/go.mod h1:0B+3bPsMvcNgR9nN+bdM2x9YaNYDnf3ksUqYp1OAub0= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -176,6 +178,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -292,6 +295,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= @@ -362,8 +366,8 @@ github.com/oleiade/lane v1.0.1 h1:hXofkn7GEOubzTwNpeL9MaNy8WxolCYb9cInAIeqShU= github.com/oleiade/lane v1.0.1/go.mod h1:IyTkraa4maLfjq/GmHR+Dxb4kCMtEGeb+qmhlrQ5Mk4= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onflow/flow-go/crypto v0.24.7 h1:RCLuB83At4z5wkAyUCF7MYEnPoIIOHghJaODuJyEoW0= -github.com/onflow/flow-go/crypto v0.24.7/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= +github.com/onflow/crypto v0.25.0 h1:BeWbLsh3ZD13Ej+Uky6kg1PL1ZIVBDVX+2MVBNwqddg= +github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= @@ -471,7 +475,6 @@ github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PK github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/supranational/blst v0.3.10 h1:CMciDZ/h4pXDDXQASe8ZGTNKUiVNxVVA5hpci2Uuhuk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= @@ -542,12 +545,17 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -665,8 +673,10 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -695,6 +705,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.1 h1:/LSrTrgZtpbXyAR6+0e152SROCkJJSh7goYWVmdPFGc= +gonum.org/v1/gonum v0.6.1/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -779,3 +795,4 @@ mellium.im/sasl v0.2.1 h1:nspKSRg7/SyO0cRGY71OkfHab8tf9kCts6a6oTDut0w= mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/integration_testing/blocksync_test.go b/integration_testing/blocksync_test.go index 8be96d735..cf077f2c1 100644 --- a/integration_testing/blocksync_test.go +++ b/integration_testing/blocksync_test.go @@ -1,11 +1,6 @@ package integration_testing import ( - "fmt" - "github.com/deso-protocol/core/cmd" - "github.com/deso-protocol/core/lib" - "github.com/stretchr/testify/require" - "os" "testing" ) @@ -16,41 +11,22 @@ import ( // 4. node2 syncs MaxSyncBlockHeight blocks from node1. // 5. compare node1 db matches node2 db. func TestSimpleBlockSync(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeBlockSync - - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) compareNodesByDB(t, node1, node2, 0) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() + t.Logf("Databases match!") } // TestSimpleSyncRestart tests if a node can successfully restart while syncing blocks. @@ -62,45 +38,26 @@ func TestSimpleBlockSync(t *testing.T) { // 6. node2 reconnects with node1 and syncs remaining blocks. // 7. compare node1 db matches node2 db. func TestSimpleSyncRestart(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeBlockSync - - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) - randomHeight := randomUint32Between(t, 10, config2.MaxSyncBlockHeight) - fmt.Println("Random height for a restart (re-use if test failed):", randomHeight) + randomHeight := randomUint32Between(t, 10, node2.Config.MaxSyncBlockHeight) + t.Logf("Random height for a restart (re-use if test failed): %v", randomHeight) // Reboot node2 at a specific height and reconnect it with node1 - node2, bridge = restartAtHeightAndReconnectNode(t, node2, node1, bridge, randomHeight) + node2 = restartAtHeight(t, node2, randomHeight) waitForNodeToFullySync(node2) compareNodesByDB(t, node1, node2, 0) - fmt.Println("Random restart successful! Random height was", randomHeight) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() + t.Logf("Random restart successful! Random height was: %v", randomHeight) + t.Logf("Databases match!") } // TestSimpleSyncDisconnectWithSwitchingToNewPeer tests if a node can successfully restart while syncing blocks, and @@ -114,60 +71,35 @@ func TestSimpleSyncRestart(t *testing.T) { // 7. compare node1 state matches node2 state. // 8. compare node3 state matches node2 state. func TestSimpleSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - dbDir3 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - defer os.RemoveAll(dbDir3) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeBlockSync - config3 := generateConfig(t, 18002, dbDir3, 10) - config3.SyncType = lib.NodeSyncTypeBlockSync - - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - config3.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - node3 := cmd.NewNode(config3) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) - node3 = startNode(t, node3) // wait for node1 to sync blocks waitForNodeToFullySync(node1) + + node3 := spawnNodeProtocol1(t, 18002, "node3") + node3.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} + node3 = startNode(t, node3) + // wait for node3 to sync blocks waitForNodeToFullySync(node3) - // bridge the nodes together. - bridge12 := NewConnectionBridge(node1, node2) - require.NoError(bridge12.Start()) - - randomHeight := randomUint32Between(t, 10, config2.MaxSyncBlockHeight) - fmt.Println("Random height for a restart (re-use if test failed):", randomHeight) - disconnectAtBlockHeight(t, node2, bridge12, randomHeight) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) - // bridge the nodes together. - bridge23 := NewConnectionBridge(node2, node3) - require.NoError(bridge23.Start()) + randomHeight := randomUint32Between(t, 10, node2.Config.MaxSyncBlockHeight) + t.Logf("Random height for a restart (re-use if test failed): %v", randomHeight) - // Reboot node2 at a specific height and reconnect it with node1 - //node2, bridge12 = restartAtHeightAndReconnectNode(t, node2, node1, bridge12, randomHeight) + // Reboot node2 at a specific height and reconnect it with node3 + node2 = shutdownAtHeight(t, node2, randomHeight) + node2.Config.ConnectIPs = []string{"127.0.0.1:18002"} + node2 = startNode(t, node2) waitForNodeToFullySync(node2) compareNodesByDB(t, node1, node2, 0) compareNodesByDB(t, node3, node2, 0) - fmt.Println("Random restart successful! Random height was", randomHeight) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() - node3.Stop() + t.Logf("Random restart successful! Random height was %v", randomHeight) + t.Logf("Databases match!") } diff --git a/integration_testing/connection_bridge.go b/integration_testing/connection_bridge.go index 1d0228467..b93fabac5 100644 --- a/integration_testing/connection_bridge.go +++ b/integration_testing/connection_bridge.go @@ -13,6 +13,7 @@ import ( "time" ) +// TODO: DEPRECATE // ConnectionBridge is a bidirectional communication channel between two nodes. A bridge creates a pair of inbound and // outbound peers for each of the nodes to handle communication. In total, it creates four peers. // @@ -111,13 +112,13 @@ func (bridge *ConnectionBridge) createInboundConnection(node *cmd.Node) *lib.Pee } // This channel is redundant in our setting. - messagesFromPeer := make(chan *lib.ServerMessage) + messagesFromPeer := make(chan *lib.ServerMessage, 100) + donePeerChan := make(chan *lib.Peer, 100) // Because it is an inbound Peer of the node, it is simultaneously a "fake" outbound Peer of the bridge. // Hence, we will mark the _isOutbound parameter as "true" in NewPeer. - peer := lib.NewPeer(conn, true, netAddress, true, - 10000, 0, &lib.DeSoMainnetParams, - messagesFromPeer, nil, nil, lib.NodeSyncTypeAny) - peer.ID = uint64(lib.RandInt64(math.MaxInt64)) + peer := lib.NewPeer(uint64(lib.RandInt64(math.MaxInt64)), conn, true, + netAddress, true, 10000, 0, &lib.DeSoMainnetParams, + messagesFromPeer, nil, nil, lib.NodeSyncTypeAny, donePeerChan) return peer } @@ -139,27 +140,27 @@ func (bridge *ConnectionBridge) createOutboundConnection(node *cmd.Node, otherNo fmt.Println("createOutboundConnection: Got a connection from remote:", conn.RemoteAddr().String(), "on listener:", ll.Addr().String()) - na, err := lib.IPToNetAddr(conn.RemoteAddr().String(), otherNode.Server.GetConnectionManager().AddrMgr, - otherNode.Params) - messagesFromPeer := make(chan *lib.ServerMessage) - peer := lib.NewPeer(conn, false, na, false, - 10000, 0, bridge.nodeB.Params, - messagesFromPeer, nil, nil, lib.NodeSyncTypeAny) - peer.ID = uint64(lib.RandInt64(math.MaxInt64)) + addrMgr := addrmgr.New("", net.LookupIP) + na, err := lib.IPToNetAddr(conn.RemoteAddr().String(), addrMgr, otherNode.Params) + messagesFromPeer := make(chan *lib.ServerMessage, 100) + donePeerChan := make(chan *lib.Peer, 100) + peer := lib.NewPeer(uint64(lib.RandInt64(math.MaxInt64)), conn, + false, na, false, 10000, 0, bridge.nodeB.Params, + messagesFromPeer, nil, nil, lib.NodeSyncTypeAny, donePeerChan) bridge.newPeerChan <- peer //} }(ll) // Make the provided node to make an outbound connection to our listener. - netAddress, _ := lib.IPToNetAddr(ll.Addr().String(), addrmgr.New("", net.LookupIP), &lib.DeSoMainnetParams) - fmt.Println("createOutboundConnection: IP:", netAddress.IP, "Port:", netAddress.Port) - go node.Server.GetConnectionManager().ConnectPeer(nil, netAddress) + addrMgr := addrmgr.New("", net.LookupIP) + addr, _ := lib.IPToNetAddr(ll.Addr().String(), addrMgr, node.Params) + go node.Server.GetConnectionManager().DialOutboundConnection(addr, uint64(lib.RandInt64(math.MaxInt64))) } // getVersionMessage simulates a version message that the provided node would have sent. func (bridge *ConnectionBridge) getVersionMessage(node *cmd.Node) *lib.MsgDeSoVersion { ver := lib.NewMessage(lib.MsgTypeVersion).(*lib.MsgDeSoVersion) - ver.Version = node.Params.ProtocolVersion + ver.Version = node.Params.ProtocolVersion.ToUint64() ver.TstampSecs = time.Now().Unix() ver.Nonce = uint64(lib.RandInt64(math.MaxInt64)) ver.UserAgent = node.Params.UserAgent @@ -172,27 +173,43 @@ func (bridge *ConnectionBridge) getVersionMessage(node *cmd.Node) *lib.MsgDeSoVe } if node.Server != nil { - ver.StartBlockHeight = uint32(node.Server.GetBlockchain().BlockTip().Header.Height) + ver.LatestBlockHeight = node.Server.GetBlockchain().BlockTip().Header.Height } ver.MinFeeRateNanosPerKB = node.Config.MinFeerate return ver } +func ReadWithTimeout(readFunc func() error, readTimeout time.Duration) error { + errChan := make(chan error) + go func() { + errChan <- readFunc() + }() + select { + case err := <-errChan: + { + return err + } + case <-time.After(readTimeout): + { + return fmt.Errorf("ReadWithTimeout: Timed out reading message") + } + } +} + // startConnection starts the connection by performing version and verack exchange with // the provided connection, pretending to be the otherNode. func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode *cmd.Node) error { // Prepare the version message. versionMessage := bridge.getVersionMessage(otherNode) - connection.VersionNonceSent = versionMessage.Nonce // Send the version message. - fmt.Println("Sending version message:", versionMessage, versionMessage.StartBlockHeight) + fmt.Println("Sending version message:", versionMessage, versionMessage.LatestBlockHeight) if err := connection.WriteDeSoMessage(versionMessage); err != nil { return err } // Wait for a response to the version message. - if err := connection.ReadWithTimeout( + if err := ReadWithTimeout( func() error { msg, err := connection.ReadDeSoMessage() if err != nil { @@ -204,7 +221,6 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode return err } - connection.VersionNonceReceived = verMsg.Nonce connection.TimeConnected = time.Unix(verMsg.TstampSecs, 0) connection.TimeOffsetSecs = verMsg.TstampSecs - time.Now().Unix() return nil @@ -215,7 +231,6 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode // Now prepare the verack message. verackMsg := lib.NewMessage(lib.MsgTypeVerack) - verackMsg.(*lib.MsgDeSoVerack).Nonce = connection.VersionNonceReceived // And send it to the connection. if err := connection.WriteDeSoMessage(verackMsg); err != nil { @@ -223,7 +238,7 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode } // And finally wait for connection's response to the verack message. - if err := connection.ReadWithTimeout( + if err := ReadWithTimeout( func() error { msg, err := connection.ReadDeSoMessage() if err != nil { @@ -233,17 +248,11 @@ func (bridge *ConnectionBridge) startConnection(connection *lib.Peer, otherNode if msg.GetMsgType() != lib.MsgTypeVerack { return fmt.Errorf("message is not verack! Type: %v", msg.GetMsgType()) } - verackMsg := msg.(*lib.MsgDeSoVerack) - if verackMsg.Nonce != connection.VersionNonceSent { - return fmt.Errorf("verack message nonce doesn't match (received: %v, sent: %v)", - verackMsg.Nonce, connection.VersionNonceSent) - } return nil }, lib.DeSoMainnetParams.VersionNegotiationTimeout); err != nil { return err } - connection.VersionNegotiated = true return nil } diff --git a/integration_testing/hypersync_test.go b/integration_testing/hypersync_test.go index aad90ee0e..b76b1db48 100644 --- a/integration_testing/hypersync_test.go +++ b/integration_testing/hypersync_test.go @@ -1,11 +1,7 @@ package integration_testing import ( - "fmt" - "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" - "github.com/stretchr/testify/require" - "os" "testing" ) @@ -16,35 +12,19 @@ import ( // 4. node2 hypersyncs from node1 // 5. once done, compare node1 state, db, and checksum matches node2. func TestSimpleHyperSync(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeHyperSync - - config1.HyperSync = true - config2.HyperSync = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSync + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) @@ -52,9 +32,7 @@ func TestSimpleHyperSync(t *testing.T) { compareNodesByState(t, node1, node2, 0) //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() + t.Logf("Databases match!") } // TestHyperSyncFromHyperSyncedNode test if a node can successfully hypersync from another hypersynced node: @@ -65,49 +43,28 @@ func TestSimpleHyperSync(t *testing.T) { // 5. once done, bridge node3 and node2 so that node3 hypersyncs from node2. // 6. compare node1 state, db, and checksum matches node2, and node3. func TestHyperSyncFromHyperSyncedNode(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - dbDir3 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - defer os.RemoveAll(dbDir3) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - config3 := generateConfig(t, 18002, dbDir3, 10) - config3.SyncType = lib.NodeSyncTypeHyperSyncArchival - - config1.HyperSync = true - config2.HyperSync = true - config3.HyperSync = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - node3 := cmd.NewNode(config3) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) - node3 = startNode(t, node3) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge12 := NewConnectionBridge(node1, node2) - require.NoError(bridge12.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) - // bridge node3 to node2 to kick off hyper sync from a hyper synced node - bridge23 := NewConnectionBridge(node2, node3) - require.NoError(bridge23.Start()) + node3 := spawnNodeProtocol1(t, 18002, "node3") + node3.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node3.Config.HyperSync = true + node3.Config.ConnectIPs = []string{"127.0.0.1:18001"} + node3 = startNode(t, node3) // wait for node2 to sync blocks. waitForNodeToFullySync(node3) @@ -121,10 +78,7 @@ func TestHyperSyncFromHyperSyncedNode(t *testing.T) { //compareNodesByDB(t, node2, node3, 0) compareNodesByChecksum(t, node2, node3) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() - node3.Stop() + t.Logf("Databases match!") } // TestSimpleHyperSyncRestart test if a node can successfully hyper sync from another node: @@ -135,51 +89,34 @@ func TestHyperSyncFromHyperSyncedNode(t *testing.T) { // 5. node2 reconnects to node1 and hypersyncs again. // 6. Once node2 finishes sync, compare node1 state, db, and checksum matches node2. func TestSimpleHyperSyncRestart(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config2 := generateConfig(t, 18001, dbDir2, 10) - - config1.HyperSync = true - config1.SyncType = lib.NodeSyncTypeBlockSync - config2.HyperSync = true - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) syncIndex := randomUint32Between(t, 0, uint32(len(lib.StatePrefixes.StatePrefixesList))) syncPrefix := lib.StatePrefixes.StatePrefixesList[syncIndex] - fmt.Println("Random sync prefix for a restart (re-use if test failed):", syncPrefix) + t.Logf("Random sync prefix for a restart (re-use if test failed): %v", syncPrefix) + // Reboot node2 at a specific sync prefix and reconnect it with node1 - node2, bridge = restartAtSyncPrefixAndReconnectNode(t, node2, node1, bridge, syncPrefix) + node2 = restartAtSyncPrefix(t, node2, syncPrefix) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) compareNodesByState(t, node1, node2, 0) //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) - fmt.Println("Random restart successful! Random sync prefix was", syncPrefix) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() + t.Logf("Random restart successful! Random sync prefix was: %v", syncPrefix) + t.Logf("Databases match!") } // TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer tests if a node can successfully restart while hypersyncing. @@ -190,57 +127,34 @@ func TestSimpleHyperSyncRestart(t *testing.T) { // 5. after restart, bridge node2 with node3 and resume hypersync. // 6. once node2 finishes, compare node1, node2, node3 state, db, and checksums are identical. func TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - dbDir3 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - defer os.RemoveAll(dbDir3) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - config3 := generateConfig(t, 18002, dbDir3, 10) - config3.SyncType = lib.NodeSyncTypeBlockSync - - config1.HyperSync = true - config2.HyperSync = true - config3.HyperSync = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - config3.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - node3 := cmd.NewNode(config3) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) - node3 = startNode(t, node3) - // wait for node1 to sync blocks waitForNodeToFullySync(node1) + + node3 := spawnNodeProtocol1(t, 18002, "node3") + node3.Config.HyperSync = true + node3.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node3 = startNode(t, node3) // wait for node3 to sync blocks waitForNodeToFullySync(node3) - // bridge the nodes together. - bridge12 := NewConnectionBridge(node1, node2) - require.NoError(bridge12.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) + // Reboot node2 at a specific height and reconnect it with node1 syncIndex := randomUint32Between(t, 0, uint32(len(lib.StatePrefixes.StatePrefixesList))) syncPrefix := lib.StatePrefixes.StatePrefixesList[syncIndex] - fmt.Println("Random prefix for a restart (re-use if test failed):", syncPrefix) - disconnectAtSyncPrefix(t, node2, bridge12, syncPrefix) - - // bridge the nodes together. - bridge23 := NewConnectionBridge(node2, node3) - require.NoError(bridge23.Start()) + t.Logf("Random prefix for a restart (re-use if test failed): %v", syncPrefix) + node2 = shutdownAtSyncPrefix(t, node2, syncPrefix) + node2.Config.ConnectIPs = []string{"127.0.0.1:18002"} + node2 = startNode(t, node2) - // Reboot node2 at a specific height and reconnect it with node1 - //node2, bridge12 = restartAtHeightAndReconnectNode(t, node2, node1, bridge12, randomHeight) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) @@ -253,11 +167,8 @@ func TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { compareNodesByState(t, node1, node2, 0) //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) - fmt.Println("Random restart successful! Random sync prefix was", syncPrefix) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() - node3.Stop() + t.Logf("Random restart successful! Random sync prefix was: %v", syncPrefix) + t.Logf("Databases match!") } // TODO: disconnecting the provider peer during hypersync doesn't work. @@ -311,92 +222,49 @@ func TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { //} func TestArchivalMode(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config2 := generateConfig(t, 18001, dbDir2, 10) - - config1.HyperSync = true - config2.HyperSync = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - config1.SyncType = lib.NodeSyncTypeBlockSync - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) compareNodesByDB(t, node1, node2, 0) - - //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() + t.Logf("Databases match!") } func TestBlockSyncFromArchivalModeHyperSync(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - dbDir3 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - defer os.RemoveAll(dbDir3) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config2 := generateConfig(t, 18001, dbDir2, 10) - config3 := generateConfig(t, 18002, dbDir3, 10) - - config1.HyperSync = true - config1.SyncType = lib.NodeSyncTypeBlockSync - config2.HyperSync = true - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - config3.HyperSync = false - config3.SyncType = lib.NodeSyncTypeBlockSync - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - node3 := cmd.NewNode(config3) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) - node3 = startNode(t, node3) - // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge12 := NewConnectionBridge(node1, node2) - require.NoError(bridge12.Start()) - + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) - bridge23 := NewConnectionBridge(node2, node3) - require.NoError(bridge23.Start()) - + node3 := spawnNodeProtocol1(t, 18002, "node3") + node3.Config.SyncType = lib.NodeSyncTypeBlockSync + node3.Config.HyperSync = true + node3.Config.ConnectIPs = []string{"127.0.0.1:18001"} + node3 = startNode(t, node3) // wait for node3 to sync blocks. waitForNodeToFullySync(node3) @@ -405,7 +273,5 @@ func TestBlockSyncFromArchivalModeHyperSync(t *testing.T) { //compareNodesByDB(t, node1, node2, 0) compareNodesByChecksum(t, node1, node2) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() + t.Logf("Databases match!") } diff --git a/integration_testing/migrations_test.go b/integration_testing/migrations_test.go index b0a692b52..067a2f3b6 100644 --- a/integration_testing/migrations_test.go +++ b/integration_testing/migrations_test.go @@ -1,64 +1,39 @@ package integration_testing import ( - "fmt" - "github.com/deso-protocol/core/cmd" - "github.com/deso-protocol/core/lib" "github.com/stretchr/testify/require" - "os" "testing" ) // TODO: Add an encoder migration height in constants.go then modify some // random struct like UtxoEntry. Until we have a migration, we can't fully test this. func TestEncoderMigrations(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeHyperSync - - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - config1.HyperSync = true - config2.HyperSync = true - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.HyperSync = true + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} node1 = startNode(t, node1) - node2 = startNode(t, node2) - // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) - + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.HyperSync = true + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2 = startNode(t, node2) // wait for node2 to sync blocks. waitForNodeToFullySync(node2) - fmt.Println("Chain state and operation channel", node2.Server.GetBlockchain().ChainState(), + t.Logf("Chain state and operation channel (state: %v), (len: %v)", node2.Server.GetBlockchain().ChainState(), len(node2.Server.GetBlockchain().Snapshot().OperationChannel.OperationChannel)) compareNodesByState(t, node1, node2, 0) - fmt.Println("node1 checksum:", computeNodeStateChecksum(t, node1, 1500)) - fmt.Println("node2 checksum:", computeNodeStateChecksum(t, node2, 1500)) + t.Logf("node1 checksum: %v", computeNodeStateChecksum(t, node1, 1500)) + t.Logf("node2 checksum: %v", computeNodeStateChecksum(t, node2, 1500)) checksum1, err := node1.Server.GetBlockchain().Snapshot().Checksum.ToBytes() - require.NoError(err) + require.NoError(t, err) checksum2, err := node2.Server.GetBlockchain().Snapshot().Checksum.ToBytes() - require.NoError(err) - fmt.Println("node1 server checksum:", checksum1) - fmt.Println("node2 server checksum:", checksum2) + require.NoError(t, err) + t.Logf("node1 server checksum: %v", checksum1) + t.Logf("node2 server checksum: %v", checksum2) compareNodesByChecksum(t, node1, node2) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() + t.Logf("Databases match!") } diff --git a/integration_testing/mining_test.go b/integration_testing/mining_test.go index 49a23333c..facbce226 100644 --- a/integration_testing/mining_test.go +++ b/integration_testing/mining_test.go @@ -1,37 +1,22 @@ package integration_testing import ( - "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" - "github.com/stretchr/testify/require" - "os" "testing" ) // TestSimpleBlockSync test if a node can mine blocks on regtest func TestRegtestMiner(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - defer os.RemoveAll(dbDir1) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config1.Params = &lib.DeSoTestnetParams - config1.MaxSyncBlockHeight = 0 - config1.MinerPublicKeys = []string{"tBCKVERmG9nZpHTk2AVPqknWc1Mw9HHAnqrTpW1RnXpXMQ4PsQgnmV"} - - config1.Regtest = true - - node1 := cmd.NewNode(config1) + node1 := spawnNodeProtocol1(t, 18000, "node1") + params := lib.DeSoTestnetParams + node1.Config.Params = ¶ms + node1.Params = ¶ms + node1.Config.MaxSyncBlockHeight = 0 + node1.Config.MinerPublicKeys = []string{"tBCKVERmG9nZpHTk2AVPqknWc1Mw9HHAnqrTpW1RnXpXMQ4PsQgnmV"} + node1.Config.Regtest = true node1 = startNode(t, node1) // wait for node1 to sync blocks mineHeight := uint32(40) - listener := make(chan bool) - listenForBlockHeight(t, node1, mineHeight, listener) - <-listener - - node1.Stop() + <-listenForBlockHeight(node1, mineHeight) } diff --git a/integration_testing/network_manager_routines_test.go b/integration_testing/network_manager_routines_test.go new file mode 100644 index 000000000..b16b1a19f --- /dev/null +++ b/integration_testing/network_manager_routines_test.go @@ -0,0 +1,632 @@ +package integration_testing + +import ( + "fmt" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/cmd" + "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/consensus" + "github.com/deso-protocol/core/lib" + "github.com/stretchr/testify/require" + "github.com/tyler-smith/go-bip39" + "testing" + "time" +) + +func TestConnectionControllerInitiatePersistentConnections(t *testing.T) { + // NonValidator Node1 will set its --connect-ips to two non-validators node2 and node3, + // and two validators node4 and node5. + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + blsSeedPhrase5, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase5) + + node2 = startNode(t, node2) + node3 = startNode(t, node3) + node4 = startNode(t, node4) + node5 = startNode(t, node5) + + node1.Config.ConnectIPs = []string{ + node2.Listeners[0].Addr().String(), + node3.Listeners[0].Addr().String(), + node4.Listeners[0].Addr().String(), + node5.Listeners[0].Addr().String(), + } + node1 = startNode(t, node1) + // Let node1 connect to its connect-ips. + time.Sleep(1 * node1.Config.Params.NetworkManagerRefreshDuration) + // Now set the active validators to node4 and node5. + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node4, node5) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5) + waitForNonValidatorOutboundConnection(t, node1, node2) + waitForNonValidatorOutboundConnection(t, node1, node3) + waitForValidatorConnection(t, node1, node4) + waitForValidatorConnection(t, node1, node5) + waitForValidatorConnection(t, node4, node5) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 4, 2, 2, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node2, 1, 0, 0, 1) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node3, 1, 0, 0, 1) + // The allCount for node4 and node5 should be 3, since they'll both open an outbound connection to each other. + // Then the validatorCount should be 1, since the GetAllValidators call used de-duplicates RemoteNodes with the same public keys. + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node4, 3, 1, 0, 1) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node5, 3, 1, 0, 1) + node1.Stop() + t.Logf("Test #1 passed | Successfully run non-validator node1 with --connect-ips set to node2, node3, node4, node5") + + // Now try again with a validator node6, with connect-ips set to node2, node3, node4, node5. + blsSeedPhrase6, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node6 := spawnValidatorNodeProtocol2(t, 18005, "node6", blsSeedPhrase6) + node6.Config.ConnectIPs = []string{ + node2.Listeners[0].Addr().String(), + node3.Listeners[0].Addr().String(), + node4.Listeners[0].Addr().String(), + node5.Listeners[0].Addr().String(), + } + node6 = startNode(t, node6) + // Let node6 connect to its connect-ips. + time.Sleep(1 * node1.Config.Params.NetworkManagerRefreshDuration) + activeValidatorsMap = getActiveValidatorsMapWithValidatorNodes(t, node4, node5, node6) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6) + waitForNonValidatorOutboundConnection(t, node6, node2) + waitForNonValidatorOutboundConnection(t, node6, node3) + waitForValidatorConnection(t, node6, node4) + waitForValidatorConnection(t, node6, node5) + waitForValidatorConnection(t, node4, node5) + // The allCount for node6 should be 6, since it will have 4 validator (inbound+outbound) connections between node4 and node5. + // The validatorCount should be 2, since the GetAllValidators call used de-duplicates RemoteNodes with the same public keys. + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node6, 6, 2, 2, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node2, 1, 1, 0, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node3, 1, 1, 0, 0) + // The allCount for node4 and node5 should be 4, since they'll both open an outbound connection to each other. + // The validatorCount should be 2, since the GetAllValidators call used de-duplicates RemoteNodes with the same public keys. + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node4, 4, 2, 0, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node5, 4, 2, 0, 0) + t.Logf("Test #2 passed | Successfully run validator node6 with --connect-ips set to node2, node3, node4, node5") +} + +func TestConnectionControllerNonValidatorCircularConnectIps(t *testing.T) { + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + + node1.Config.ConnectIPs = []string{"127.0.0.1:18001"} + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + + node1 = startNode(t, node1) + node2 = startNode(t, node2) + + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 2, 0, 1, 1) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node2, 2, 0, 1, 1) +} + +func TestConnectionControllerValidatorCircularConnectIps(t *testing.T) { + blsSeedPhrase1, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsSeedPhrase1) + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + + node1.Config.ConnectIPs = []string{"127.0.0.1:18001"} + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node1 = startNode(t, node1) + node2 = startNode(t, node2) + + time.Sleep(1 * node1.Config.Params.NetworkManagerRefreshDuration) + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node1, node2) + setActiveValidators(activeValidatorsMap, node1, node2) + // The allCount for node1 and node2 should be 2, as they should have an outbound and an inbound RemoteNode + // for each other. And the validatorCount should be 1, because the GetAllValidators call used de-duplicates + // RemoteNodes with the same public keys. + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 2, 1, 0, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node2, 2, 1, 0, 0) +} + +func TestNetworkManagerPersistentConnectorReconnect(t *testing.T) { + // Ensure that a node that is disconnected from a persistent connection will be reconnected to. + // Spawn three nodes: a non-validator node1, and node2, and a validator node3. Then set node1 connectIps + // to node2, node3, as well as a non-existing ip. Then we will stop node2, and wait for node1 to drop the + // connection. Then we will restart node2, and wait for node1 to reconnect to node2. We will repeat this + // process for node3. + + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + // Set TargetOutboundPeers to 0 to ensure the non-validator connector doesn't interfere. + node1.Config.TargetOutboundPeers = 0 + + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + + node2 = startNode(t, node2) + node3 = startNode(t, node3) + + node1.Config.ConnectIPs = []string{ + node2.Listeners[0].Addr().String(), + node3.Listeners[0].Addr().String(), + "127.0.0.1:18003", + } + node1 = startNode(t, node1) + // Let node1 connect to its connect-ips. + time.Sleep(1 * node1.Config.Params.NetworkManagerRefreshDuration) + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node3) + setActiveValidators(activeValidatorsMap, node1, node2, node3) + + waitForNonValidatorOutboundConnection(t, node1, node2) + waitForValidatorConnection(t, node1, node3) + waitForCountRemoteNodeIndexer(t, node1, 3, 1, 2, 0) + + node2.Stop() + waitForCountRemoteNodeIndexer(t, node1, 2, 1, 1, 0) + // node1 should reopen the connection to node2, and it should be re-indexed as a non-validator (attempted). + waitForCountRemoteNodeIndexer(t, node1, 3, 1, 2, 0) + node2 = startNode(t, node2) + setActiveValidators(activeValidatorsMap, node2) + waitForCountRemoteNodeIndexer(t, node1, 3, 1, 2, 0) + t.Logf("Test #1 passed | Successfully run reconnect test with non-validator node1 with --connect-ips for node2") + + // Now we will do the same for node3. + node3.Stop() + waitForCountRemoteNodeIndexer(t, node1, 2, 0, 2, 0) + // node1 should reopen the connection to node3, and it should be re-indexed as a non-validator (attempted). + waitForCountRemoteNodeIndexer(t, node1, 3, 0, 3, 0) + node3 = startNode(t, node3) + setActiveValidators(activeValidatorsMap, node3) + waitForValidatorConnection(t, node1, node3) + waitForCountRemoteNodeIndexer(t, node1, 3, 1, 2, 0) + t.Logf("Test #2 passed | Successfully run reconnect test with non-validator node1 with --connect-ips for node3") +} + +func TestConnectionControllerValidatorConnector(t *testing.T) { + // Spawn 5 validators node1, node2, node3, node4, node5 and two non-validators node6 and node7. + // All the validators are initially in the validator set. And later, node1 and node2 will be removed from the + // validator set. Then, make node3 inactive, and node2 active again. Then, make all the validators inactive. + // Make node6, and node7 connect-ips to all the validators. + + blsSeedPhrase1, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsSeedPhrase1) + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + blsSeedPhrase5, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase5) + + node6 := spawnNonValidatorNodeProtocol2(t, 18005, "node6") + node7 := spawnNonValidatorNodeProtocol2(t, 18006, "node7") + + node1 = startNode(t, node1) + node2 = startNode(t, node2) + node3 = startNode(t, node3) + node4 = startNode(t, node4) + node5 = startNode(t, node5) + + node6.Config.ConnectIPs = []string{ + node1.Listeners[0].Addr().String(), + node2.Listeners[0].Addr().String(), + node3.Listeners[0].Addr().String(), + node4.Listeners[0].Addr().String(), + node5.Listeners[0].Addr().String(), + } + node7.Config.ConnectIPs = node6.Config.ConnectIPs + node6 = startNode(t, node6) + node7 = startNode(t, node7) + // Let node6 and node7 connect to their connect-ips. + time.Sleep(1 * node1.Config.Params.NetworkManagerRefreshDuration) + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node1, node2, node3, node4, node5) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7) + + // Verify full graph between active validators. + waitForValidatorFullGraph(t, node1, node2, node3, node4, node5) + // Verify connections of non-validators. + for _, nonValidator := range []*cmd.Node{node6, node7} { + waitForValidatorConnectionOneWay(t, nonValidator, node1, node2, node3, node4, node5) + } + // Verify connections of initial validators. + for _, validator := range []*cmd.Node{node1, node2, node3, node4, node5} { + waitForNonValidatorInboundConnection(t, validator, node6) + waitForNonValidatorInboundConnection(t, validator, node7) + } + // Verify connection counts of active validators. + for _, validator := range []*cmd.Node{node1, node2, node3, node4, node5} { + // The allCount for each validator should be 10, since we'll have 8 validator connections total and 2 non-validator connections. + // Note this means only 4 unique validators, which is the expected validatorCount. + waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 10, 4, 0, 2) + } + // NOOP Verify connection counts of inactive validators. + // Verify connection counts of non-validators. + waitForCountRemoteNodeIndexer(t, node6, 5, 5, 0, 0) + waitForCountRemoteNodeIndexer(t, node7, 5, 5, 0, 0) + t.Logf("Test #1 passed | Successfully run validators node1, node2, node3, node4, node5; non-validators node6, node7") + + // Remove node1 and node2 from the validator set. + activeValidatorsMap = getActiveValidatorsMapWithValidatorNodes(t, node3, node4, node5) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7) + // Verify full graph between active validators. + waitForValidatorFullGraph(t, node3, node4, node5) + // Verify connections of non-validators. + for _, nonValidator := range []*cmd.Node{node1, node2, node6, node7} { + waitForValidatorConnectionOneWay(t, nonValidator, node3, node4, node5) + } + // Verify connections of initial validators. + for _, validator := range []*cmd.Node{node1, node2, node3, node4, node5} { + waitForNonValidatorInboundConnection(t, validator, node6) + waitForNonValidatorInboundConnection(t, validator, node7) + } + // Verify connections of active validators. + for _, validator := range []*cmd.Node{node3, node4, node5} { + waitForInactiveValidatorConnection(t, validator, node1) + waitForInactiveValidatorConnection(t, validator, node2) + // The allCount for each validator should remain at 10, since no connection should be dropped. + // The validatorCount of unique validators should be 2. + waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 10, 2, 0, 2) + } + // Verify connection counts of inactive validators. + for _, inactiveValidator := range []*cmd.Node{node1, node2} { + // The validatorCount of unique validators should be 3, since inactiveValidator is connected to all validators. + waitForMinNonValidatorCountRemoteNodeIndexer(t, inactiveValidator, 10, 3, 0, 2) + } + // Verify connection counts of non-validators. + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node6, 5, 3, 2, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node7, 5, 3, 2, 0) + t.Logf("Test #2 passed | Successfully run validators node3, node4, node5; inactive-validators node1, node2; " + + "non-validators node6, node7") + + // Remove node3 from the validator set. Make node1 active again. + activeValidatorsMap = getActiveValidatorsMapWithValidatorNodes(t, node1, node4, node5) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7) + // Verify full graph between active validators. + waitForValidatorFullGraph(t, node1, node4, node5) + // Verify connections of non-validators. + for _, nonValidator := range []*cmd.Node{node2, node3, node6, node7} { + waitForValidatorConnectionOneWay(t, nonValidator, node1, node4, node5) + } + // Verify connections of initial validators. + for _, validator := range []*cmd.Node{node1, node2, node3, node4, node5} { + waitForNonValidatorInboundConnection(t, validator, node6) + waitForNonValidatorInboundConnection(t, validator, node7) + } + // Verify connections of active validators. + for _, validator := range []*cmd.Node{node1, node4, node5} { + waitForInactiveValidatorConnection(t, validator, node2) + waitForInactiveValidatorConnection(t, validator, node3) + // The allCount for each validator should remain at 10, since no connection should be dropped. + // The validatorCount of unique validators should be 2. + waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 10, 2, 0, 2) + } + // Verify connection counts of inactive validators. + for _, inactiveValidator := range []*cmd.Node{node2, node3} { + // The allCount for each inactiveValidator should remain at 10, since no connection should be dropped. + // The validatorCount of unique validators should be 2. + waitForMinNonValidatorCountRemoteNodeIndexer(t, inactiveValidator, 10, 3, 0, 2) + } + // Verify connection counts of non-validators. + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node6, 5, 3, 2, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node7, 5, 3, 2, 0) + t.Logf("Test #3 passed | Successfully run validators node1, node4, node5; inactive validators node2, node3; " + + "non-validators node6, node7") + + // Make all validators inactive. + activeValidatorsMap = getActiveValidatorsMapWithValidatorNodes(t) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7) + // NOOP Verify full graph between active validators. + // NOOP Verify connections of non-validators. + // Verify connections of initial validators. + for _, validator := range []*cmd.Node{node1, node2, node3, node4, node5} { + waitForNonValidatorInboundConnection(t, validator, node6) + waitForNonValidatorInboundConnection(t, validator, node7) + } + // NOOP Verify connections of active validators. + // Verify connections and counts of inactive validators. + inactiveValidators := []*cmd.Node{node1, node2, node3, node4, node5} + for ii := 0; ii < len(inactiveValidators); ii++ { + for jj := ii + 1; jj < len(inactiveValidators); jj++ { + waitForInactiveValidatorConnection(t, inactiveValidators[ii], inactiveValidators[jj]) + } + } + inactiveValidatorsRev := []*cmd.Node{node5, node4, node3, node2, node1} + for ii := 0; ii < len(inactiveValidatorsRev); ii++ { + for jj := ii + 1; jj < len(inactiveValidatorsRev); jj++ { + waitForInactiveValidatorConnection(t, inactiveValidatorsRev[ii], inactiveValidatorsRev[jj]) + } + } + for _, validator := range inactiveValidators { + waitForMinNonValidatorCountRemoteNodeIndexer(t, validator, 10, 0, 0, 2) + } + // Verify connection counts of non-validators. + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node6, 5, 0, 5, 0) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node7, 5, 0, 5, 0) + t.Logf("Test #4 passed | Successfully run inactive validators node1, node2, node3, node4, node5; " + + "non-validators node6, node7") +} + +func TestConnectionControllerValidatorInboundDeduplication(t *testing.T) { + // Spawn a non-validator node1, and two validators node2, node3. The validator nodes will have the same public key. + // Node2 and node3 will not initially be in the validator set. First, node2 will start an outbound connection to + // node1. We wait until the node2 is re-indexed as non-validator by node1, and then we make node3 open an outbound + // connection to node1. We wait until node3 is re-indexed as non-validator by node1. Then, we make node2 and node3 + // join the validator set (i.e. add one entry with the duplicated public key). Now, node1 should disconnect from + // either node2 or node3 because of duplicate public key. + + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase2) + + node1 = startNode(t, node1) + node2 = startNode(t, node2) + node3 = startNode(t, node3) + + nm2 := node2.Server.GetNetworkManager() + require.NoError(t, nm2.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + // First wait for node2 to be indexed as a validator by node1. + waitForValidatorConnection(t, node1, node2) + // Now wait for node2 to be re-indexed as a non-validator. + waitForNonValidatorInboundConnectionDynamic(t, node1, node2, true) + waitForNonValidatorOutboundConnection(t, node2, node1) + + // Now connect node3 to node1. + nm3 := node3.Server.GetNetworkManager() + require.NoError(t, nm3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + // First wait for node3 to be indexed as a validator by node1. + waitForValidatorConnection(t, node1, node3) + // Now wait for node3 to be re-indexed as a non-validator. + waitForNonValidatorInboundConnectionDynamic(t, node1, node3, true) + waitForNonValidatorOutboundConnection(t, node3, node1) + + // Now add node2 and node3 to the validator set. + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node2) + setActiveValidators(activeValidatorsMap, node1, node2, node3) + // Now wait for node1 to disconnect from either node2 or node3. + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 1, 1, 0, 0) + t.Logf("Test #1 passed | Successfully run non-validator node1; validators node2, node3 with duplicate public key") +} + +func TestConnectionControllerNonValidatorConnectorOutbound(t *testing.T) { + // Spawn 6 non-validators node1, node2, node3, node4, node5, node6. Set node1's targetOutboundPeers to 3. Then make + // node1 create persistent outbound connections to node2, node3, and node4, as well as non-validator connections to + // node5 and node6. + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Config.TargetOutboundPeers = 0 + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node4 := spawnNonValidatorNodeProtocol2(t, 18003, "node4") + node5 := spawnNonValidatorNodeProtocol2(t, 18004, "node5") + node6 := spawnNonValidatorNodeProtocol2(t, 18005, "node6") + + node2 = startNode(t, node2) + node3 = startNode(t, node3) + node4 = startNode(t, node4) + node5 = startNode(t, node5) + node6 = startNode(t, node6) + + node1.Config.ConnectIPs = []string{ + node2.Listeners[0].Addr().String(), + node3.Listeners[0].Addr().String(), + node4.Listeners[0].Addr().String(), + } + node1 = startNode(t, node1) + + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node5.Listeners[0].Addr().String())) + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node6.Listeners[0].Addr().String())) + + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 3, 0, 3, 0) + waitForNonValidatorOutboundConnection(t, node1, node2) + waitForNonValidatorOutboundConnection(t, node1, node3) + waitForNonValidatorOutboundConnection(t, node1, node4) +} + +func TestConnectionControllerNonValidatorConnectorInbound(t *testing.T) { + // Spawn validators node1, node2, node3, node4, node5, node6. Also spawn non-validators node7, node8, node9, node10. + // Set node1's targetOutboundPeers to 0 and targetInboundPeers to 1. Then make node1 create outbound connections to + // node2, node3, and make node4, node5, node6 create inbound connections to node1. Then make node1 create outbound + // connections to node7, node8, and make node9, node10 create inbound connections to node1. + blsSeedPhrase1, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsSeedPhrase1) + node1.Config.TargetOutboundPeers = 0 + node1.Config.MaxInboundPeers = 1 + node1.Params.DialTimeout = 1 * time.Second + node1.Params.VerackNegotiationTimeout = 1 * time.Second + node1.Params.VersionNegotiationTimeout = 1 * time.Second + + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node2.Config.GlogV = 0 + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + node3.Config.GlogV = 0 + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Config.GlogV = 0 + blsSeedPhrase5, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase5) + node5.Config.GlogV = 0 + blsSeedPhrase6, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node6 := spawnValidatorNodeProtocol2(t, 18005, "node6", blsSeedPhrase6) + node6.Config.GlogV = 0 + + node7 := spawnNonValidatorNodeProtocol2(t, 18006, "node7") + node8 := spawnNonValidatorNodeProtocol2(t, 18007, "node8") + node9 := spawnNonValidatorNodeProtocol2(t, 18008, "node9") + node10 := spawnNonValidatorNodeProtocol2(t, 18009, "node10") + + node1 = startNode(t, node1) + node2 = startNode(t, node2) + node3 = startNode(t, node3) + node4 = startNode(t, node4) + node5 = startNode(t, node5) + node6 = startNode(t, node6) + node7 = startNode(t, node7) + node8 = startNode(t, node8) + node9 = startNode(t, node9) + node10 = startNode(t, node10) + + // Connect node1 to node2, node3, node7, and node8. + nm1 := node1.Server.GetNetworkManager() + require.NoError(t, nm1.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + require.NoError(t, nm1.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + require.NoError(t, nm1.CreateNonValidatorOutboundConnection(node7.Listeners[0].Addr().String())) + require.NoError(t, nm1.CreateNonValidatorOutboundConnection(node8.Listeners[0].Addr().String())) + // Connect node4, node5, node6 to node1. + nm4 := node4.Server.GetNetworkManager() + require.NoError(t, nm4.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm5 := node5.Server.GetNetworkManager() + require.NoError(t, nm5.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm6 := node6.Server.GetNetworkManager() + require.NoError(t, nm6.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + + // Connect node9, node10 to node1. + nm9 := node9.Server.GetNetworkManager() + require.NoError(t, nm9.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + nm10 := node10.Server.GetNetworkManager() + require.NoError(t, nm10.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + + activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node1, node2, node3, node4, node5, node6) + setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7, node8, node9, node10) + + waitForValidatorConnection(t, node1, node2) + waitForValidatorConnection(t, node1, node3) + waitForValidatorConnection(t, node1, node4) + waitForValidatorConnection(t, node1, node5) + waitForValidatorConnection(t, node1, node6) + // The allCount for node1 should be 11, since it will have 10 validator (inbound+outbound) connections. + // This means we expect validatorCount of unique validators to be 5. Also, expect 1 non-validator inbound connection. + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 11, 5, 0, 1) +} + +func TestConnectionControllerNonValidatorConnectorAddressMgr(t *testing.T) { + // Spawn a non-validator node1. Set node1's targetOutboundPeers to 1 and targetInboundPeers to 0. Then + // add one ip address to AddrMgr. Make sure that node1 creates outbound connections to this node. + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.TargetOutboundPeers = 1 + node1.Config.MaxInboundPeers = 0 + node1.Config.MaxSyncBlockHeight = 1 + + node1 = startNode(t, node1) + nm := node1.Server.GetNetworkManager() + na1, err := nm.ConvertIPStringToNetAddress("deso-seed-2.io:17000") + require.NoError(t, err) + nm.AddrMgr.AddAddress(na1, na1) + waitForCountRemoteNodeIndexerHandshakeCompleted(t, node1, 1, 0, 1, 0) +} + +func TestConnectionControllerNonValidatorConnectorAddIps(t *testing.T) { + // Spawn a non-validator node1. Set node1's targetOutboundPeers to 2 and targetInboundPeers to 0. Then + // add two ip addresses to AddIps. Make sure that node1 creates outbound connections to these nodes. + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.TargetOutboundPeers = 2 + node1.Config.MaxInboundPeers = 0 + node1.Config.MaxSyncBlockHeight = 1 + node1.Config.AddIPs = []string{"deso-seed-2.io", "deso-seed-3.io"} + + node1 = startNode(t, node1) + waitForCountRemoteNodeIndexer(t, node1, 2, 0, 2, 0) +} + +func getActiveValidatorsMapWithValidatorNodes(t *testing.T, validators ...*cmd.Node) *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator] { + mapping := collections.NewConcurrentMap[bls.SerializedPublicKey, consensus.Validator]() + for _, validator := range validators { + seed := validator.Config.PosValidatorSeed + if seed == "" { + t.Fatalf("Validator node %s does not have a PosValidatorSeed set", validator.Params.UserAgent) + } + keystore, err := lib.NewBLSKeystore(seed) + require.NoError(t, err) + mapping.Set(keystore.GetSigner().GetPublicKey().Serialize(), createSimpleValidatorEntry(validator)) + } + return mapping +} + +func setActiveValidators(validatorMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator], nodes ...*cmd.Node) { + for _, node := range nodes { + node.Server.GetNetworkManager().SetActiveValidatorsMap(validatorMap) + } +} + +func createSimpleValidatorEntry(node *cmd.Node) *lib.ValidatorEntry { + return &lib.ValidatorEntry{ + Domains: [][]byte{[]byte(node.Listeners[0].Addr().String())}, + } +} + +func waitForValidatorFullGraph(t *testing.T, validators ...*cmd.Node) { + for ii := 0; ii < len(validators); ii++ { + waitForValidatorConnectionOneWay(t, validators[ii], validators[ii+1:]...) + } +} + +func waitForValidatorConnectionOneWay(t *testing.T, n *cmd.Node, validators ...*cmd.Node) { + if len(validators) == 0 { + return + } + for _, validator := range validators { + waitForValidatorConnection(t, n, validator) + } +} + +func waitForInactiveValidatorConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { + userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + andCondition := func() bool { + return checkInactiveValidatorConnection(t, node1, node2) + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to inbound AND outbound non-validator Node (%s)", + userAgentN1, userAgentN2), andCondition) +} + +func waitForMinNonValidatorCountRemoteNodeIndexer(t *testing.T, node *cmd.Node, allCount int, validatorCount int, + minNonValidatorOutboundCount int, minNonValidatorInboundCount int) { + + userAgent := node.Params.UserAgent + nm := node.Server.GetNetworkManager() + condition := func() bool { + return checkRemoteNodeIndexerMinNonValidatorCount(nm, allCount, validatorCount, + minNonValidatorOutboundCount, minNonValidatorInboundCount) + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have at least %d non-validator outbound nodes and %d non-validator inbound nodes", + userAgent, minNonValidatorOutboundCount, minNonValidatorInboundCount), condition) +} + +func checkRemoteNodeIndexerMinNonValidatorCount(manager *lib.NetworkManager, allCount int, validatorCount int, + minNonValidatorOutboundCount int, minNonValidatorInboundCount int) bool { + + if allCount != manager.GetAllRemoteNodes().Count() { + return false + } + if validatorCount != manager.GetAllValidators().Count() { + return false + } + if minNonValidatorOutboundCount > manager.GetNonValidatorOutboundIndex().Count() { + return false + } + if minNonValidatorInboundCount > manager.GetNonValidatorInboundIndex().Count() { + return false + } + if allCount != manager.GetValidatorOutboundIndex().Count()+ + manager.GetValidatorInboundIndex().Count()+ + manager.GetNonValidatorOutboundIndex().Count()+ + manager.GetNonValidatorInboundIndex().Count() { + return false + } + return true +} diff --git a/integration_testing/network_manager_test.go b/integration_testing/network_manager_test.go new file mode 100644 index 000000000..9a447b134 --- /dev/null +++ b/integration_testing/network_manager_test.go @@ -0,0 +1,464 @@ +package integration_testing + +import ( + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/lib" + "github.com/stretchr/testify/require" + "github.com/tyler-smith/go-bip39" + "testing" +) + +func TestConnectionControllerNonValidator(t *testing.T) { + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.DisableNetworkManagerRoutines = true + node1 = startNode(t, node1) + + // Make sure NonValidator Node1 can create an outbound connection to NonValidator Node2 + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node2.Params.DisableNetworkManagerRoutines = true + node2 = startNode(t, node2) + + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + waitForNonValidatorOutboundConnection(t, node1, node2) + waitForNonValidatorInboundConnection(t, node2, node1) + + node2.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #1 passed | Successfully created outbound connection from NonValidator Node1 to NonValidator Node2") + + // Make sure NonValidator Node1 can create an outbound connection to validator Node3 + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + node3.Params.DisableNetworkManagerRoutines = true + node3 = startNode(t, node3) + + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + waitForValidatorConnection(t, node1, node3) + waitForNonValidatorInboundConnection(t, node3, node1) + + node3.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #2 passed | Successfully created outbound connection from NonValidator Node1 to Validator Node3") + + // Make sure NonValidator Node1 can create a non-validator connection to validator Node4 + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true + node4 = startNode(t, node4) + + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + waitForValidatorConnection(t, node1, node4) + waitForNonValidatorInboundConnection(t, node4, node1) + t.Logf("Test #3 passed | Successfully created outbound connection from NonValidator Node1 to Validator Node4") +} + +func TestConnectionControllerValidator(t *testing.T) { + blsSeedPhrase1, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsSeedPhrase1) + node1.Params.DisableNetworkManagerRoutines = true + node1 = startNode(t, node1) + + // Make sure Validator Node1 can create an outbound connection to Validator Node2 + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore2, err := lib.NewBLSKeystore(blsSeedPhrase2) + require.NoError(t, err) + blsPub2 := blsKeyStore2.GetSigner().GetPublicKey() + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node2.Params.DisableNetworkManagerRoutines = true + node2 = startNode(t, node2) + + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsPub2)) + waitForValidatorConnection(t, node1, node2) + waitForValidatorConnection(t, node2, node1) + + node2.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #1 passed | Successfully created outbound connection from Validator Node1 to Validator Node2") + + // Make sure Validator Node1 can create an outbound connection to NonValidator Node3 + node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node3.Params.DisableNetworkManagerRoutines = true + node3 = startNode(t, node3) + + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + waitForNonValidatorOutboundConnection(t, node1, node3) + waitForValidatorConnection(t, node3, node1) + + node3.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #2 passed | Successfully created outbound connection from Validator Node1 to NonValidator Node3") + + // Make sure Validator Node1 can create an outbound non-validator connection to Validator Node4 + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true + node4 = startNode(t, node4) + + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + waitForValidatorConnection(t, node1, node4) + waitForValidatorConnection(t, node4, node1) + t.Logf("Test #3 passed | Successfully created non-validator outbound connection from Validator Node1 to Validator Node4") +} + +func TestConnectionControllerHandshakeDataErrors(t *testing.T) { + blsSeedPhrase1, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node1 := spawnValidatorNodeProtocol2(t, 18000, "node1", blsSeedPhrase1) + node1.Params.DisableNetworkManagerRoutines = true + + // This node should have ProtocolVersion2, but it has ProtocolVersion1 as we want it to disconnect. + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node2.Params.DisableNetworkManagerRoutines = true + node2.Params.ProtocolVersion = lib.ProtocolVersion1 + + node1 = startNode(t, node1) + node2 = startNode(t, node2) + + nm := node2.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node2) + t.Logf("Test #1 passed | Successfuly disconnected node with SFValidator flag and ProtocolVersion1 mismatch") + + // This node shouldn't have ProtocolVersion3, which is beyond latest ProtocolVersion2, meaning nodes should disconnect. + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + node3.Params.DisableNetworkManagerRoutines = true + node3.Params.ProtocolVersion = lib.ProtocolVersionType(3) + node3 = startNode(t, node3) + + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node3.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node3) + t.Logf("Test #2 passed | Successfuly disconnected node with ProtocolVersion3") + + // This node shouldn't have ProtocolVersion0, which is outdated. + node4 := spawnNonValidatorNodeProtocol2(t, 18003, "node4") + node4.Params.DisableNetworkManagerRoutines = true + node4.Params.ProtocolVersion = lib.ProtocolVersion0 + node4 = startNode(t, node4) + + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node4.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node4) + t.Logf("Test #3 passed | Successfuly disconnected node with ProtocolVersion0") + + // This node will have a different public key than the one it's supposed to have. + blsSeedPhrase5, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsSeedPhrase5Wrong, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore5Wrong, err := lib.NewBLSKeystore(blsSeedPhrase5Wrong) + require.NoError(t, err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase5) + node5.Params.DisableNetworkManagerRoutines = true + node5 = startNode(t, node5) + + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsKeyStore5Wrong.GetSigner().GetPublicKey())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node5) + t.Logf("Test #4 passed | Successfuly disconnected node with public key mismatch") + + // This node will be missing SFPosValidator flag while being connected as a validator. + blsPriv6, err := bls.NewPrivateKey() + require.NoError(t, err) + node6 := spawnNonValidatorNodeProtocol2(t, 18005, "node6") + node6.Params.DisableNetworkManagerRoutines = true + node6 = startNode(t, node6) + + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateValidatorConnection(node6.Listeners[0].Addr().String(), blsPriv6.PublicKey())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node6) + t.Logf("Test #5 passed | Successfuly disconnected supposed validator node with missing SFPosValidator flag") + + // This node will have ProtocolVersion1 and be connected as an outbound non-validator node. + node7 := spawnNonValidatorNodeProtocol2(t, 18006, "node7") + node7.Params.DisableNetworkManagerRoutines = true + node7.Params.ProtocolVersion = lib.ProtocolVersion1 + node7 = startNode(t, node7) + + nm = node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node7.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node7) + t.Logf("Test #6 passed | Successfuly disconnected outbound non-validator node with ProtocolVersion1") +} + +func TestConnectionControllerHandshakeTimeouts(t *testing.T) { + // Set version negotiation timeout to 0 to make sure that the node will be disconnected + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.DisableNetworkManagerRoutines = true + node1.Params.VersionNegotiationTimeout = 0 + node1 = startNode(t, node1) + + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node2.Params.DisableNetworkManagerRoutines = true + node2 = startNode(t, node2) + + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node2) + t.Logf("Test #1 passed | Successfuly disconnected node after version negotiation timeout") + + // Now let's try timing out verack exchange + node1.Params.VersionNegotiationTimeout = lib.DeSoTestnetParams.VersionNegotiationTimeout + node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node3.Params.DisableNetworkManagerRoutines = true + node3.Params.VerackNegotiationTimeout = 0 + node3 = startNode(t, node3) + + nm = node3.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node1) + waitForEmptyRemoteNodeIndexer(t, node3) + t.Logf("Test #2 passed | Successfuly disconnected node after verack exchange timeout") + + // Now let's try timing out handshake between two validators node4 and node5 + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true + node4.Params.HandshakeTimeoutMicroSeconds = 0 + node4 = startNode(t, node4) + + blsSeedPhrase5, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore5, err := lib.NewBLSKeystore(blsSeedPhrase5) + require.NoError(t, err) + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase5) + node5.Params.DisableNetworkManagerRoutines = true + node5 = startNode(t, node5) + + nm = node4.Server.GetNetworkManager() + require.NoError(t, nm.CreateValidatorConnection(node5.Listeners[0].Addr().String(), blsKeyStore5.GetSigner().GetPublicKey())) + waitForEmptyRemoteNodeIndexer(t, node4) + waitForEmptyRemoteNodeIndexer(t, node5) + t.Logf("Test #3 passed | Successfuly disconnected validator node after handshake timeout") +} + +func TestConnectionControllerValidatorDuplication(t *testing.T) { + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.DisableNetworkManagerRoutines = true + node1 = startNode(t, node1) + + // Create a validator Node2 + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore2, err := lib.NewBLSKeystore(blsSeedPhrase2) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node2.Params.DisableNetworkManagerRoutines = true + node2 = startNode(t, node2) + + // Create a duplicate validator Node3 + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase2) + node3.Params.DisableNetworkManagerRoutines = true + node3 = startNode(t, node3) + + // Create validator connection from Node1 to Node2 and from Node1 to Node3 + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateValidatorConnection(node2.Listeners[0].Addr().String(), blsKeyStore2.GetSigner().GetPublicKey())) + // This should fail out right because Node3 has a duplicate public key. + require.Error(t, nm.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsKeyStore2.GetSigner().GetPublicKey())) + waitForValidatorConnection(t, node1, node2) + waitForNonValidatorInboundConnection(t, node2, node1) + + // Now create an outbound connection from Node1 to Node3, which should pass handshake. + nm3 := node3.Server.GetNetworkManager() + require.NoError(t, nm3.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForCountRemoteNodeIndexer(t, node1, 2, 1, 0, 0) + t.Logf("Test #1 passed | Successfuly connected to inbound/outbound validators") + + node3.Stop() + node2.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + + // Create two more validators Node4, Node5 with duplicate public keys + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true + node4 = startNode(t, node4) + + node5 := spawnValidatorNodeProtocol2(t, 18004, "node5", blsSeedPhrase4) + node5.Params.DisableNetworkManagerRoutines = true + node5 = startNode(t, node5) + + // Create validator connections from Node4 to Node1 and from Node5 to Node1 + nm4 := node4.Server.GetNetworkManager() + require.NoError(t, nm4.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForValidatorConnection(t, node1, node4) + waitForNonValidatorOutboundConnection(t, node4, node1) + nm5 := node5.Server.GetNetworkManager() + require.NoError(t, nm5.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node5) + waitForCountRemoteNodeIndexer(t, node1, 1, 1, 0, 0) + t.Logf("Test #2 passed | Successfuly rejected duplicate validator connection with multiple outbound validators") +} + +func TestConnectionControllerProtocolDifference(t *testing.T) { + // Create a ProtocolVersion1 Node1 + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.DisableNetworkManagerRoutines = true + node1.Params.ProtocolVersion = lib.ProtocolVersion1 + node1 = startNode(t, node1) + + // Create a ProtocolVersion2 NonValidator Node2 + node2 := spawnNonValidatorNodeProtocol2(t, 18001, "node2") + node2.Params.DisableNetworkManagerRoutines = true + node2 = startNode(t, node2) + + // Create non-validator connection from Node1 to Node2 + nm := node1.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node2.Listeners[0].Addr().String())) + waitForNonValidatorOutboundConnection(t, node1, node2) + waitForNonValidatorInboundConnection(t, node2, node1) + t.Logf("Test #1 passed | Successfuly connected to a ProtocolVersion1 node with a ProtocolVersion2 non-validator") + + // Create a ProtocolVersion2 Validator Node3 + blsSeedPhrase3, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore3, err := lib.NewBLSKeystore(blsSeedPhrase3) + require.NoError(t, err) + node3 := spawnValidatorNodeProtocol2(t, 18002, "node3", blsSeedPhrase3) + node3.Params.DisableNetworkManagerRoutines = true + node3 = startNode(t, node3) + + // Create validator connection from Node1 to Node3 + require.NoError(t, nm.CreateValidatorConnection(node3.Listeners[0].Addr().String(), blsKeyStore3.GetSigner().GetPublicKey())) + waitForValidatorConnection(t, node1, node3) + waitForNonValidatorInboundConnection(t, node3, node1) + t.Logf("Test #2 passed | Successfuly connected to a ProtocolVersion1 node with a ProtocolVersion2 validator") + + node2.Stop() + node3.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + + // Create a ProtocolVersion2 validator Node4 + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + blsKeyStore4, err := lib.NewBLSKeystore(blsSeedPhrase4) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true + node4 = startNode(t, node4) + + // Attempt to create non-validator connection from Node4 to Node1 + nm = node4.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node4) + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #3 passed | Successfuly rejected outbound connection from ProtocolVersion2 node to ProtcolVersion1 node") + + // Attempt to create validator connection from Node4 to Node1 + require.NoError(t, nm.CreateValidatorConnection(node1.Listeners[0].Addr().String(), blsKeyStore4.GetSigner().GetPublicKey())) + waitForEmptyRemoteNodeIndexer(t, node4) + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #4 passed | Successfuly rejected validator connection from ProtocolVersion2 node to ProtcolVersion1 node") + + // Create a ProtocolVersion2 non-validator Node5 + node5 := spawnNonValidatorNodeProtocol2(t, 18004, "node5") + node5.Params.DisableNetworkManagerRoutines = true + node5 = startNode(t, node5) + + // Attempt to create non-validator connection from Node5 to Node1 + nm = node5.Server.GetNetworkManager() + require.NoError(t, nm.CreateNonValidatorOutboundConnection(node1.Listeners[0].Addr().String())) + waitForEmptyRemoteNodeIndexer(t, node5) + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #5 passed | Successfuly rejected outbound connection from ProtocolVersion2 node to ProtcolVersion1 node") +} + +func TestConnectionControllerPersistentConnection(t *testing.T) { + // Create a NonValidator Node1 + node1 := spawnNonValidatorNodeProtocol2(t, 18000, "node1") + node1.Params.DisableNetworkManagerRoutines = true + node1 = startNode(t, node1) + + // Create a Validator Node2 + blsSeedPhrase2, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node2 := spawnValidatorNodeProtocol2(t, 18001, "node2", blsSeedPhrase2) + node2.Params.DisableNetworkManagerRoutines = true + node2 = startNode(t, node2) + + // Create a persistent connection from Node1 to Node2 + nm := node1.Server.GetNetworkManager() + _, err = nm.CreateNonValidatorPersistentOutboundConnection(node2.Listeners[0].Addr().String()) + require.NoError(t, err) + waitForValidatorConnection(t, node1, node2) + waitForNonValidatorInboundConnection(t, node2, node1) + node2.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + t.Logf("Test #1 passed | Successfuly created persistent connection from non-validator Node1 to validator Node2") + + // Create a Non-validator Node3 + node3 := spawnNonValidatorNodeProtocol2(t, 18002, "node3") + node3.Params.DisableNetworkManagerRoutines = true + node3 = startNode(t, node3) + + // Create a persistent connection from Node1 to Node3 + _, err = nm.CreateNonValidatorPersistentOutboundConnection(node3.Listeners[0].Addr().String()) + require.NoError(t, err) + waitForNonValidatorOutboundConnection(t, node1, node3) + waitForNonValidatorInboundConnection(t, node3, node1) + node3.Stop() + waitForEmptyRemoteNodeIndexer(t, node1) + node1.Stop() + t.Logf("Test #2 passed | Successfuly created persistent connection from non-validator Node1 to non-validator Node3") + + // Create a Validator Node4 + blsSeedPhrase4, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node4 := spawnValidatorNodeProtocol2(t, 18003, "node4", blsSeedPhrase4) + node4.Params.DisableNetworkManagerRoutines = true + node4 = startNode(t, node4) + + // Create a non-validator Node5 + node5 := spawnNonValidatorNodeProtocol2(t, 18004, "node5") + node5.Params.DisableNetworkManagerRoutines = true + node5 = startNode(t, node5) + + // Create a persistent connection from Node4 to Node5 + nm = node4.Server.GetNetworkManager() + _, err = nm.CreateNonValidatorPersistentOutboundConnection(node5.Listeners[0].Addr().String()) + require.NoError(t, err) + waitForNonValidatorOutboundConnection(t, node4, node5) + waitForValidatorConnection(t, node5, node4) + node5.Stop() + waitForEmptyRemoteNodeIndexer(t, node4) + t.Logf("Test #3 passed | Successfuly created persistent connection from validator Node4 to non-validator Node5") + + // Create a Validator Node6 + blsSeedPhrase6, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node6 := spawnValidatorNodeProtocol2(t, 18005, "node6", blsSeedPhrase6) + node6.Params.DisableNetworkManagerRoutines = true + node6 = startNode(t, node6) + + // Create a persistent connection from Node4 to Node6 + _, err = nm.CreateNonValidatorPersistentOutboundConnection(node6.Listeners[0].Addr().String()) + require.NoError(t, err) + waitForValidatorConnection(t, node4, node6) + waitForValidatorConnection(t, node6, node4) + t.Logf("Test #4 passed | Successfuly created persistent connection from validator Node4 to validator Node6") +} diff --git a/integration_testing/network_manager_utils_test.go b/integration_testing/network_manager_utils_test.go new file mode 100644 index 000000000..d8344c67c --- /dev/null +++ b/integration_testing/network_manager_utils_test.go @@ -0,0 +1,311 @@ +package integration_testing + +import ( + "fmt" + "github.com/deso-protocol/core/cmd" + "github.com/deso-protocol/core/lib" + "os" + "testing" +) + +func waitForValidatorConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { + userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + nmN1 := node1.Server.GetNetworkManager() + n1ValidatedN2 := func() bool { + if true != checkRemoteNodeIndexerUserAgent(nmN1, userAgentN2, true, false, false) { + return false + } + rnFromN2 := getRemoteNodeWithUserAgent(node1, userAgentN2) + if rnFromN2 == nil { + return false + } + if !rnFromN2.IsHandshakeCompleted() { + return false + } + return true + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to validator Node (%s)", userAgentN1, userAgentN2), n1ValidatedN2) +} + +func waitForNonValidatorOutboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { + userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + condition := conditionNonValidatorOutboundConnection(t, node1, node2) + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to outbound non-validator Node (%s)", userAgentN1, userAgentN2), condition) +} + +func conditionNonValidatorOutboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) func() bool { + return conditionNonValidatorOutboundConnectionDynamic(t, node1, node2, false) +} + +func conditionNonValidatorOutboundConnectionDynamic(t *testing.T, node1 *cmd.Node, node2 *cmd.Node, inactiveValidator bool) func() bool { + userAgentN2 := node2.Params.UserAgent + nmN1 := node1.Server.GetNetworkManager() + return func() bool { + if true != checkRemoteNodeIndexerUserAgent(nmN1, userAgentN2, false, true, false) { + return false + } + rnFromN2 := getRemoteNodeWithUserAgent(node1, userAgentN2) + if rnFromN2 == nil { + return false + } + if !rnFromN2.IsHandshakeCompleted() { + return false + } + // inactiveValidator should have the public key. + if inactiveValidator { + return rnFromN2.GetValidatorPublicKey() != nil + } + return rnFromN2.GetValidatorPublicKey() == nil + } +} + +func waitForNonValidatorInboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) { + userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + condition := conditionNonValidatorInboundConnection(t, node1, node2) + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to inbound non-validator Node (%s)", userAgentN1, userAgentN2), condition) +} + +func waitForNonValidatorInboundConnectionDynamic(t *testing.T, node1 *cmd.Node, node2 *cmd.Node, inactiveValidator bool) { + userAgentN1 := node1.Params.UserAgent + userAgentN2 := node2.Params.UserAgent + condition := conditionNonValidatorInboundConnectionDynamic(t, node1, node2, inactiveValidator) + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to connect to inbound non-validator Node (%s), "+ + "inactiveValidator (%v)", userAgentN1, userAgentN2, inactiveValidator), condition) +} + +func conditionNonValidatorInboundConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) func() bool { + return conditionNonValidatorInboundConnectionDynamic(t, node1, node2, false) +} + +func conditionNonValidatorInboundConnectionDynamic(t *testing.T, node1 *cmd.Node, node2 *cmd.Node, inactiveValidator bool) func() bool { + userAgentN2 := node2.Params.UserAgent + nmN1 := node1.Server.GetNetworkManager() + return func() bool { + if true != checkRemoteNodeIndexerUserAgent(nmN1, userAgentN2, false, false, true) { + return false + } + rnFromN2 := getRemoteNodeWithUserAgent(node1, userAgentN2) + if rnFromN2 == nil { + return false + } + if !rnFromN2.IsHandshakeCompleted() { + return false + } + // inactiveValidator should have the public key. + if inactiveValidator { + return rnFromN2.GetValidatorPublicKey() != nil + } + return rnFromN2.GetValidatorPublicKey() == nil + } +} + +func checkInactiveValidatorConnection(t *testing.T, node1 *cmd.Node, node2 *cmd.Node) bool { + userAgentN2 := node2.Params.UserAgent + nmN1 := node1.Server.GetNetworkManager() + if true != checkRemoteNodeIndexerUserAgent(nmN1, userAgentN2, false, true, true) { + return false + } + rnFromN2 := getRemoteNodeWithUserAgent(node1, userAgentN2) + if rnFromN2 == nil { + return false + } + if !rnFromN2.IsHandshakeCompleted() { + return false + } + return rnFromN2.GetValidatorPublicKey() != nil +} + +func waitForEmptyRemoteNodeIndexer(t *testing.T, node1 *cmd.Node) { + userAgentN1 := node1.Params.UserAgent + nmN1 := node1.Server.GetNetworkManager() + n1ValidatedN2 := func() bool { + if true != checkRemoteNodeIndexerEmpty(nmN1) { + return false + } + return true + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to disconnect from all RemoteNodes", userAgentN1), n1ValidatedN2) +} + +func waitForCountRemoteNodeIndexer(t *testing.T, node1 *cmd.Node, allCount int, validatorCount int, + nonValidatorOutboundCount int, nonValidatorInboundCount int) { + + userAgent := node1.Params.UserAgent + nm := node1.Server.GetNetworkManager() + condition := func() bool { + if true != checkRemoteNodeIndexerCount(nm, allCount, validatorCount, nonValidatorOutboundCount, nonValidatorInboundCount) { + return false + } + return true + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have appropriate RemoteNodes counts", userAgent), condition) +} + +func waitForCountRemoteNodeIndexerHandshakeCompleted(t *testing.T, node1 *cmd.Node, allCount, validatorCount int, + nonValidatorOutboundCount int, nonValidatorInboundCount int) { + + userAgent := node1.Params.UserAgent + nm := node1.Server.GetNetworkManager() + condition := func() bool { + return checkRemoteNodeIndexerCountHandshakeCompleted(nm, allCount, validatorCount, + nonValidatorOutboundCount, nonValidatorInboundCount) + } + waitForCondition(t, fmt.Sprintf("Waiting for Node (%s) to have appropriate RemoteNodes counts", userAgent), condition) +} + +func checkRemoteNodeIndexerUserAgent(manager *lib.NetworkManager, userAgent string, validator bool, + nonValidatorOutbound bool, nonValidatorInbound bool) bool { + + if true != checkUserAgentInRemoteNodeList(userAgent, manager.GetAllRemoteNodes().GetAll()) { + return false + } + if validator != checkUserAgentInRemoteNodeList(userAgent, manager.GetAllValidators().GetAll()) { + return false + } + if nonValidatorOutbound != checkUserAgentInRemoteNodeList(userAgent, manager.GetNonValidatorOutboundIndex().GetAll()) { + return false + } + if nonValidatorInbound != checkUserAgentInRemoteNodeList(userAgent, manager.GetNonValidatorInboundIndex().GetAll()) { + return false + } + + return true +} + +func checkRemoteNodeIndexerCount(manager *lib.NetworkManager, allCount int, validatorCount int, + nonValidatorOutboundCount int, nonValidatorInboundCount int) bool { + + if allCount != manager.GetAllRemoteNodes().Count() { + return false + } + if validatorCount != manager.GetAllValidators().Count() { + return false + } + if nonValidatorOutboundCount != manager.GetNonValidatorOutboundIndex().Count() { + return false + } + if nonValidatorInboundCount != manager.GetNonValidatorInboundIndex().Count() { + return false + } + + return true +} + +func checkRemoteNodeIndexerCountHandshakeCompleted(manager *lib.NetworkManager, allCount int, validatorCount int, + nonValidatorOutboundCount int, nonValidatorInboundCount int) bool { + + if allCount != manager.GetAllRemoteNodes().Count() { + return false + } + allValidators := manager.GetAllValidators() + if validatorCount != allValidators.Count() { + return false + } + for _, rn := range allValidators.GetAll() { + if !rn.IsHandshakeCompleted() { + return false + } + } + + if nonValidatorOutboundCount != manager.GetNonValidatorOutboundIndex().Count() { + return false + } + for _, rn := range manager.GetNonValidatorOutboundIndex().GetAll() { + if !rn.IsHandshakeCompleted() { + return false + } + } + + if nonValidatorInboundCount != manager.GetNonValidatorInboundIndex().Count() { + return false + } + for _, rn := range manager.GetNonValidatorInboundIndex().GetAll() { + if !rn.IsHandshakeCompleted() { + return false + } + } + + return true +} + +func checkRemoteNodeIndexerEmpty(manager *lib.NetworkManager) bool { + if manager.GetAllRemoteNodes().Count() != 0 { + return false + } + if manager.GetAllValidators().Count() != 0 { + return false + } + if manager.GetNonValidatorOutboundIndex().Count() != 0 { + return false + } + if manager.GetNonValidatorInboundIndex().Count() != 0 { + return false + } + return true +} + +func checkUserAgentInRemoteNodeList(userAgent string, rnList []*lib.RemoteNode) bool { + for _, rn := range rnList { + if rn == nil { + continue + } + if rn.GetUserAgent() == userAgent { + return true + } + } + return false +} + +func getRemoteNodeWithUserAgent(node *cmd.Node, userAgent string) *lib.RemoteNode { + nm := node.Server.GetNetworkManager() + rnList := nm.GetAllRemoteNodes().GetAll() + for _, rn := range rnList { + if rn.GetUserAgent() == userAgent { + return rn + } + } + return nil +} + +func spawnNodeProtocol1(t *testing.T, port uint32, id string) *cmd.Node { + dbDir := getDirectory(t) + t.Cleanup(func() { + os.RemoveAll(dbDir) + }) + config := generateConfig(t, port, dbDir, 10) + config.SyncType = lib.NodeSyncTypeBlockSync + node := cmd.NewNode(config) + node.Params.UserAgent = id + node.Params.ProtocolVersion = lib.ProtocolVersion1 + return node +} + +func spawnNonValidatorNodeProtocol2(t *testing.T, port uint32, id string) *cmd.Node { + dbDir := getDirectory(t) + t.Cleanup(func() { + os.RemoveAll(dbDir) + }) + config := generateConfig(t, port, dbDir, 10) + config.SyncType = lib.NodeSyncTypeBlockSync + node := cmd.NewNode(config) + node.Params.UserAgent = id + node.Params.ProtocolVersion = lib.ProtocolVersion2 + return node +} + +func spawnValidatorNodeProtocol2(t *testing.T, port uint32, id string, blsSeedPhrase string) *cmd.Node { + dbDir := getDirectory(t) + t.Cleanup(func() { + os.RemoveAll(dbDir) + }) + config := generateConfig(t, port, dbDir, 10) + config.SyncType = lib.NodeSyncTypeBlockSync + config.PosValidatorSeed = blsSeedPhrase + node := cmd.NewNode(config) + node.Params.UserAgent = id + node.Params.ProtocolVersion = lib.ProtocolVersion2 + return node +} diff --git a/integration_testing/rollback_test.go b/integration_testing/rollback_test.go index 8028866ac..c7b440b2b 100644 --- a/integration_testing/rollback_test.go +++ b/integration_testing/rollback_test.go @@ -10,7 +10,10 @@ import ( ) // Start blocks to height 5000 and then disconnect +// TODO: This test won't work now. func TestStateRollback(t *testing.T) { + t.Skipf("DisconnectBlocksToHeight doesn't work in PoS") + require := require.New(t) _ = require diff --git a/integration_testing/tools.go b/integration_testing/tools.go index c73b82873..388a4e4bf 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -82,6 +82,14 @@ func generateConfig(t *testing.T, port uint32, dataDir string, maxPeers uint32) config.SnapshotBlockHeightPeriod = HyperSyncSnapshotPeriod config.MaxSyncBlockHeight = MaxSyncBlockHeight config.SyncType = lib.NodeSyncTypeBlockSync + config.MempoolBackupIntervalMillis = 30000 + config.MaxMempoolPosSizeBytes = 3000000000 + config.MempoolFeeEstimatorNumMempoolBlocks = 1 + config.MempoolFeeEstimatorNumPastBlocks = 50 + config.AugmentedBlockViewRefreshIntervalMillis = 10 + config.PosBlockProductionIntervalMilliseconds = 1500 + config.PosTimeoutBaseDurationMilliseconds = 30000 + //config.ArchivalMode = true return config @@ -150,7 +158,8 @@ func compareNodesByChecksum(t *testing.T, nodeA *cmd.Node, nodeB *cmd.Node) { // compareNodesByState will look through all state records in nodeA and nodeB databases and will compare them. // The nodes pass this comparison iff they have identical states. func compareNodesByState(t *testing.T, nodeA *cmd.Node, nodeB *cmd.Node, verbose int) { - compareNodesByStateWithPrefixList(t, nodeA.ChainDB, nodeB.ChainDB, lib.StatePrefixes.StatePrefixesList, verbose) + compareNodesByStateWithPrefixList(t, nodeA.Server.GetBlockchain().DB(), nodeB.Server.GetBlockchain().DB(), + lib.StatePrefixes.StatePrefixesList, verbose) } // compareNodesByDB will look through all records in nodeA and nodeB databases and will compare them. @@ -164,7 +173,8 @@ func compareNodesByDB(t *testing.T, nodeA *cmd.Node, nodeB *cmd.Node, verbose in } prefixList = append(prefixList, []byte{prefix}) } - compareNodesByStateWithPrefixList(t, nodeA.ChainDB, nodeB.ChainDB, prefixList, verbose) + compareNodesByStateWithPrefixList(t, nodeA.Server.GetBlockchain().DB(), nodeB.Server.GetBlockchain().DB(), + prefixList, verbose) } // compareNodesByDB will look through all records in nodeA and nodeB txindex databases and will compare them. @@ -386,25 +396,25 @@ func restartNode(t *testing.T, node *cmd.Node) *cmd.Node { } // listenForBlockHeight busy-waits until the node's block tip reaches provided height. -func listenForBlockHeight(t *testing.T, node *cmd.Node, height uint32, signal chan<- bool) { +func listenForBlockHeight(node *cmd.Node, height uint32) (_listener chan bool) { + listener := make(chan bool) ticker := time.NewTicker(1 * time.Millisecond) go func() { for { <-ticker.C if node.Server.GetBlockchain().BlockTip().Height >= height { - signal <- true + listener <- true break } } }() + return listener } // disconnectAtBlockHeight busy-waits until the node's block tip reaches provided height, and then disconnects // from the provided bridge. -func disconnectAtBlockHeight(t *testing.T, syncingNode *cmd.Node, bridge *ConnectionBridge, height uint32) { - listener := make(chan bool) - listenForBlockHeight(t, syncingNode, height, listener) - <-listener +func disconnectAtBlockHeight(syncingNode *cmd.Node, bridge *ConnectionBridge, height uint32) { + <-listenForBlockHeight(syncingNode, height) bridge.Disconnect() } @@ -414,7 +424,7 @@ func restartAtHeightAndReconnectNode(t *testing.T, node *cmd.Node, source *cmd.N height uint32) (_node *cmd.Node, _bridge *ConnectionBridge) { require := require.New(t) - disconnectAtBlockHeight(t, node, currentBridge, height) + disconnectAtBlockHeight(node, currentBridge, height) newNode := restartNode(t, node) // Wait after the restart. time.Sleep(1 * time.Second) @@ -425,6 +435,16 @@ func restartAtHeightAndReconnectNode(t *testing.T, node *cmd.Node, source *cmd.N return newNode, bridge } +func restartAtHeight(t *testing.T, node *cmd.Node, height uint32) *cmd.Node { + <-listenForBlockHeight(node, height) + return restartNode(t, node) +} + +func shutdownAtHeight(t *testing.T, node *cmd.Node, height uint32) *cmd.Node { + <-listenForBlockHeight(node, height) + return shutdownNode(t, node) +} + // listenForSyncPrefix will wait until the node starts downloading the provided syncPrefix in hypersync, and then sends // a message to the provided signal channel. func listenForSyncPrefix(t *testing.T, node *cmd.Node, syncPrefix []byte, signal chan<- bool) { @@ -468,6 +488,20 @@ func restartAtSyncPrefixAndReconnectNode(t *testing.T, node *cmd.Node, source *c return newNode, bridge } +func restartAtSyncPrefix(t *testing.T, node *cmd.Node, syncPrefix []byte) *cmd.Node { + listener := make(chan bool) + listenForSyncPrefix(t, node, syncPrefix, listener) + <-listener + return restartNode(t, node) +} + +func shutdownAtSyncPrefix(t *testing.T, node *cmd.Node, syncPrefix []byte) *cmd.Node { + listener := make(chan bool) + listenForSyncPrefix(t, node, syncPrefix, listener) + <-listener + return shutdownNode(t, node) +} + func randomUint32Between(t *testing.T, min, max uint32) uint32 { require := require.New(t) randomNumber, err := wire.RandomUint64() @@ -475,3 +509,23 @@ func randomUint32Between(t *testing.T, min, max uint32) uint32 { randomHeight := uint32(randomNumber) % (max - min) return randomHeight + min } + +func waitForCondition(t *testing.T, id string, condition func() bool) { + signalChan := make(chan struct{}) + go func() { + for { + if condition() { + signalChan <- struct{}{} + return + } + time.Sleep(100 * time.Millisecond) + } + }() + + select { + case <-signalChan: + return + case <-time.After(5 * time.Second): + t.Fatalf("Condition timed out | %s", id) + } +} diff --git a/integration_testing/txindex_test.go b/integration_testing/txindex_test.go index aa13fd265..702e63c10 100644 --- a/integration_testing/txindex_test.go +++ b/integration_testing/txindex_test.go @@ -1,11 +1,7 @@ package integration_testing import ( - "fmt" - "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" - "github.com/stretchr/testify/require" - "os" "testing" ) @@ -16,39 +12,21 @@ import ( // 4. node2 syncs MaxSyncBlockHeight blocks from node1, and builds txindex afterwards. // 5. compare node1 db and txindex matches node2. func TestSimpleTxIndex(t *testing.T) { - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.HyperSync = true - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.HyperSync = true - config2.SyncType = lib.NodeSyncTypeHyperSyncArchival - - config1.TXIndex = true - config2.TXIndex = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - + node1 := spawnNodeProtocol1(t, 18000, "node1") + node1.Config.ConnectIPs = []string{"deso-seed-2.io:17000"} + node1.Config.HyperSync = true + node1.Config.TXIndex = true node1 = startNode(t, node1) - node2 = startNode(t, node2) - // wait for node1 to sync blocks waitForNodeToFullySync(node1) - // bridge the nodes together. - bridge := NewConnectionBridge(node1, node2) - require.NoError(bridge.Start()) - - // wait for node2 to sync blocks. + node2 := spawnNodeProtocol1(t, 18001, "node2") + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2.Config.HyperSync = true + node2.Config.TXIndex = true + node2 = startNode(t, node2) + // wait for node1 to sync blocks waitForNodeToFullySync(node2) waitForNodeToFullySyncTxIndex(node1) @@ -56,7 +34,5 @@ func TestSimpleTxIndex(t *testing.T) { compareNodesByDB(t, node1, node2, 0) compareNodesByTxIndex(t, node1, node2, 0) - fmt.Println("Databases match!") - node1.Stop() - node2.Stop() + t.Logf("Databases match!") } diff --git a/lib/block_producer.go b/lib/block_producer.go index a4763abf6..b5ad66cb0 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "fmt" "math" + "strings" "sync" "sync/atomic" "time" @@ -80,15 +81,26 @@ func NewDeSoBlockProducer( var privKey *btcec.PrivateKey if blockProducerSeed != "" { - seedBytes, err := bip39.NewSeedWithErrorChecking(blockProducerSeed, "") - if err != nil { - return nil, fmt.Errorf("NewDeSoBlockProducer: Error converting mnemonic: %+v", err) - } + // If a blockProducerSeed is provided then we use it to generate a private key. + // If the block producer seed beings with 0x, we treat it as a hex seed. Otherwise, + // we treat it as a seed phrase. + if strings.HasPrefix(blockProducerSeed, "0x") { + privKeyBytes, err := hex.DecodeString(blockProducerSeed[2:]) + if err != nil { + return nil, fmt.Errorf("NewDeSoBlockProducer: Error decoding hex seed: %+v", err) + } + privKey, _ = btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) + } else { + seedBytes, err := bip39.NewSeedWithErrorChecking(blockProducerSeed, "") + if err != nil { + return nil, fmt.Errorf("NewDeSoBlockProducer: Error converting mnemonic: %+v", err) + } - _, privKey, _, err = ComputeKeysFromSeed(seedBytes, 0, params) - if err != nil { - return nil, fmt.Errorf( - "NewDeSoBlockProducer: Error computing keys from seed: %+v", err) + _, privKey, _, err = ComputeKeysFromSeed(seedBytes, 0, params) + if err != nil { + return nil, fmt.Errorf( + "NewDeSoBlockProducer: Error computing keys from seed: %+v", err) + } } } diff --git a/lib/block_view.go b/lib/block_view.go index 168f2a997..a242aa9dc 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -652,6 +652,8 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.SnapshotStakesToReward[mapKey] = snapshotStakeToReward.Copy() } + newView.TipHash = bav.TipHash.NewBlockHash() + return newView, nil } @@ -4037,6 +4039,7 @@ func (bav *UtxoView) _connectFailingTransaction(txn *MsgDeSoTxn, blockHeight uin "spending balance") } utxoOps = append(utxoOps, feeUtxoOp) + utxoOps = append(utxoOps, &UtxoOperation{Type: OperationTypeFailingTxn}) // If verifySignatures is passed, we check transaction signature. if verifySignatures { @@ -4107,7 +4110,7 @@ func (bav *UtxoView) ConnectBlock( // Check that the block being connected references the current tip. ConnectBlock // can only add a block to the current tip. We do this to keep the API simple. - if *desoBlock.Header.PrevBlockHash != *bav.TipHash { + if !desoBlock.Header.PrevBlockHash.IsEqual(bav.TipHash) { return nil, fmt.Errorf("ConnectBlock: Parent hash of block being connected does not match tip") } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 4253c1cea..954a76d16 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 0056fbbed..6cfbe72b4 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -2267,7 +2267,7 @@ func TestConnectFailingTransaction(t *testing.T) { txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) require.NoError(err) - require.Equal(1, len(utxoOps)) + require.Equal(2, len(utxoOps)) expectedBurnFee, expectedUtilityFee := _getBMFForTxn(txn, globalParams) require.Equal(expectedBurnFee, burnFee) require.Equal(expectedUtilityFee, utilityFee) @@ -2303,7 +2303,7 @@ func TestConnectFailingTransaction(t *testing.T) { utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) require.NoError(err) - require.Equal(1, len(utxoOps)) + require.Equal(2, len(utxoOps)) // The final balance is m0's starting balance minus the failing txn fee paid. finalBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) @@ -2351,7 +2351,7 @@ func TestConnectFailingTransaction(t *testing.T) { txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) require.NoError(err) - require.Equal(1, len(utxoOps)) + require.Equal(2, len(utxoOps)) // The final balance is m0's starting balance minus the failing txn fee paid. finalBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 04d52e1ae..b487c91ec 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -682,7 +682,8 @@ const ( OperationTypeStakeDistributionRestake OperationType = 49 OperationTypeStakeDistributionPayToBalance OperationType = 50 OperationTypeSetValidatorLastActiveAtEpoch OperationType = 51 - // NEXT_TAG = 52 + OperationTypeFailingTxn OperationType = 52 + // NEXT_TAG = 53 ) func (op OperationType) String() string { diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index 43bddeaae..fc990d288 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 5ab35076b..2bb00fdee 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/lib/blockchain.go b/lib/blockchain.go index 3a8b9484c..8f2d0ba35 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5,6 +5,7 @@ import ( "container/list" "encoding/hex" "fmt" + "github.com/decred/dcrd/lru" "math" "math/big" "reflect" @@ -501,6 +502,9 @@ type Blockchain struct { // We connect many blocks in the same view and flush every X number of blocks blockView *UtxoView + // cache block view for each block + blockViewCache lru.KVCache + // State checksum is used to verify integrity of state data and when // syncing from snapshot in the hyper sync protocol. // @@ -727,6 +731,8 @@ func (bc *Blockchain) _applyUncommittedBlocksToBestChain() error { } } + ////////////////////////// Update the bestChain in-memory data structures ////////////////////////// + // Fetch the lineage of blocks from the committed tip through the uncommitted tip. lineageFromCommittedTip, err := bc.getLineageFromCommittedTip(uncommittedTipBlockNode.Header) if err != nil { @@ -738,6 +744,16 @@ func (bc *Blockchain) _applyUncommittedBlocksToBestChain() error { return errors.Wrapf(err, "_applyUncommittedBlocksToBestChain: ") } + ////////////////////////// Update the bestHeaderChain in-memory data structures ////////////////////////// + currentHeaderTip := bc.headerTip() + _, blocksToDetach, blocksToAttach := GetReorgBlocks(currentHeaderTip, uncommittedTipBlockNode) + bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( + bc.bestHeaderChain, + bc.bestHeaderChainMap, + blocksToDetach, + blocksToAttach, + ) + return nil } @@ -789,6 +805,8 @@ func NewBlockchain( bestHeaderChainMap: make(map[BlockHash]*BlockNode), + blockViewCache: lru.NewKVCache(1000), // TODO: parameterize + orphanList: list.New(), timer: timer, } @@ -1146,6 +1164,14 @@ func (bc *Blockchain) HasBlock(blockHash *BlockHash) bool { return true } +func (bc *Blockchain) HasBlockInBlockIndex(blockHash *BlockHash) bool { + bc.ChainLock.RLock() + defer bc.ChainLock.RUnlock() + + _, exists := bc.blockIndexByHash[*blockHash] + return exists +} + // This needs to hold a lock on the blockchain because it read from an in-memory map that is // not thread-safe. func (bc *Blockchain) GetBlockHeaderFromIndex(blockHash *BlockHash) *MsgDeSoHeader { @@ -1774,6 +1800,13 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B return false, false, HeaderErrorBlockHeightAfterProofOfStakeCutover } + // Only accept headers if the header chain is still in PoW. Once the header chain reaches the final height + // of the PoW protocol, it will transition to the PoS. We should not accept any more PoW headers as they can + // result in a fork. + if bc.headerTip().Header.Height >= bc.params.GetFinalPoWBlockHeight() { + return false, false, HeaderErrorBestChainIsAtProofOfStakeCutover + } + // Start by checking if the header already exists in our node // index. If it does, then return an error. We should generally // expect that processHeaderPoW will only be called on headers we @@ -1946,7 +1979,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B } // ProcessHeader is a wrapper around processHeaderPoW and processHeaderPoS, which do the leg-work. -func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { +func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _err error) { bc.ChainLock.Lock() defer bc.ChainLock.Unlock() @@ -1958,7 +1991,7 @@ func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc // If the header's height is after the PoS cut-over fork height, then we use the PoS header processing logic. // Otherwise, fall back to the PoW logic. if bc.params.IsPoSBlockHeight(blockHeader.Height) { - return bc.processHeaderPoS(blockHeader) + return bc.processHeaderPoS(blockHeader, verifySignatures) } return bc.processHeaderPoW(blockHeader, headerHash) @@ -1989,6 +2022,13 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures return false, false, RuleErrorBlockHeightAfterProofOfStakeCutover } + // Only accept blocks if the blockchain is still running PoW. Once the chain connects the final block of the + // PoW protocol, it will transition to the PoS. We should not accept any PoW blocks as they can result in a + // fork. + if bc.blockTip().Header.Height >= bc.params.GetFinalPoWBlockHeight() { + return false, false, RuleErrorBestChainIsAtProofOfStakeCutover + } + blockHeight := uint64(bc.BlockTip().Height + 1) bc.timer.Start("Blockchain.ProcessBlock: Initial") @@ -2411,7 +2451,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures lastIndex := len(bc.bestChain) - 1 bestChainHash := bc.bestChain[lastIndex].Hash - if *bestChainHash != *nodeToValidate.Header.PrevBlockHash { + if !bestChainHash.IsEqual(nodeToValidate.Header.PrevBlockHash) { return false, false, fmt.Errorf("ProcessBlock: Last block in bestChain "+ "data structure (%v) is not equal to parent hash of block being "+ "added to tip (%v)", bestChainHash, nodeToValidate.Header.PrevBlockHash) diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 90d7adbe4..65b5120d4 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -708,7 +708,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockA1.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockA1.Header, headerHash) + isMainChain, isOrphan, err := chain.ProcessHeader(blockA1.Header, headerHash, false) require.NoError(err) require.True(isMainChain) require.False(isOrphan) @@ -724,7 +724,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockA2.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockA2.Header, headerHash) + isMainChain, isOrphan, err := chain.ProcessHeader(blockA2.Header, headerHash, false) require.NoError(err) require.True(isMainChain) require.False(isOrphan) @@ -740,7 +740,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockB1.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockB1.Header, headerHash) + isMainChain, isOrphan, err := chain.ProcessHeader(blockB1.Header, headerHash, false) require.NoError(err) // Should not be main chain yet require.False(isMainChain) @@ -757,7 +757,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockB2.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockB2.Header, headerHash) + isMainChain, isOrphan, err := chain.ProcessHeader(blockB2.Header, headerHash, false) require.NoError(err) // Should not be main chain yet require.False(isMainChain) @@ -774,7 +774,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockB3.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockB3.Header, headerHash) + isMainChain, isOrphan, err := chain.ProcessHeader(blockB3.Header, headerHash, false) require.NoError(err) // Should not be main chain yet require.True(isMainChain) diff --git a/lib/bls_keystore_test.go b/lib/bls_keystore_test.go index 0f424e710..f44359352 100644 --- a/lib/bls_keystore_test.go +++ b/lib/bls_keystore_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/lib/connection_manager.go b/lib/connection_manager.go index a14742c8b..b07bc6e97 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -4,17 +4,14 @@ import ( "fmt" "math" "net" - "strconv" + "sync" "sync/atomic" "time" "github.com/btcsuite/btcd/addrmgr" - chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/wire" "github.com/decred/dcrd/lru" - "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" - "github.com/pkg/errors" ) // connection_manager.go contains most of the logic for creating and managing @@ -36,24 +33,10 @@ type ConnectionManager struct { // doesn't need a reference to the Server object. But for now we keep things lazy. srv *Server - // When --connectips is set, we don't connect to anything from the addrmgr. - connectIps []string - - // The address manager keeps track of peer addresses we're aware of. When - // we need to connect to a new outbound peer, it chooses one of the addresses - // it's aware of at random and provides it to us. - AddrMgr *addrmgr.AddrManager // The interfaces we listen on for new incoming connections. listeners []net.Listener // The parameters we are initialized with. params *DeSoParams - // The target number of outbound peers we want to have. - targetOutboundPeers uint32 - // The maximum number of inbound peers we allow. - maxInboundPeers uint32 - // When true, only one connection per IP is allowed. Prevents eclipse attacks - // among other things. - limitOneInboundConnectionPerIP bool // When --hypersync is set to true we will attempt fast block synchronization HyperSync bool @@ -80,17 +63,26 @@ type ConnectionManager struct { // concurrently by many goroutines to figure out if outbound connections // should be made to particular addresses. - mtxOutboundConnIPGroups deadlock.Mutex + mtxOutboundConnIPGroups sync.Mutex outboundConnIPGroups map[string]int // The peer maps map peer ID to peers for various types of peer connections. // // A persistent peer is typically one we got through a commandline argument. // The reason it's called persistent is because we maintain a connection to // it, and retry the connection if it fails. - mtxPeerMaps deadlock.RWMutex + mtxPeerMaps sync.RWMutex persistentPeers map[uint64]*Peer outboundPeers map[uint64]*Peer inboundPeers map[uint64]*Peer + connectedPeers map[uint64]*Peer + + mtxConnectionAttempts sync.Mutex + // outboundConnectionAttempts keeps track of the outbound connections, mapping attemptId [uint64] -> connection attempt. + outboundConnectionAttempts map[uint64]*OutboundConnectionAttempt + // outboundConnectionChan is used to signal successful outbound connections to the connection manager. + outboundConnectionChan chan *outboundConnection + // inboundConnectionChan is used to signal successful inbound connections to the connection manager. + inboundConnectionChan chan *inboundConnection // Track the number of outbound peers we have so that this value can // be accessed concurrently when deciding whether or not to add more // outbound peers. @@ -102,21 +94,14 @@ type ConnectionManager struct { // avoid choosing them in the address manager. We need a mutex on this // guy because many goroutines will be querying the address manager // at once. - mtxConnectedOutboundAddrs deadlock.RWMutex - connectedOutboundAddrs map[string]bool - - // Used to set peer ids. Must be incremented atomically. - peerIndex uint64 + mtxAddrsMaps sync.RWMutex + connectedOutboundAddrs map[string]bool + attemptedOutboundAddrs map[string]bool serverMessageQueue chan *ServerMessage - // Keeps track of the network time, which is the median of all of our - // peers' time. - timeSource chainlib.MedianTimeSource - - // Events that can happen to a peer. - newPeerChan chan *Peer - donePeerChan chan *Peer + // peerDisconnectedChan is notified whenever a peer exits. + peerDisconnectedChan chan *Peer // stallTimeoutSeconds is how long we wait to receive responses from Peers // for certain types of messages. @@ -129,10 +114,7 @@ type ConnectionManager struct { } func NewConnectionManager( - _params *DeSoParams, _addrMgr *addrmgr.AddrManager, _listeners []net.Listener, - _connectIps []string, _timeSource chainlib.MedianTimeSource, - _targetOutboundPeers uint32, _maxInboundPeers uint32, - _limitOneInboundConnectionPerIP bool, + _params *DeSoParams, _listeners []net.Listener, _hyperSync bool, _syncType NodeSyncType, _stallTimeoutSeconds uint64, @@ -143,55 +125,51 @@ func NewConnectionManager( ValidateHyperSyncFlags(_hyperSync, _syncType) return &ConnectionManager{ - srv: _srv, - params: _params, - AddrMgr: _addrMgr, - listeners: _listeners, - connectIps: _connectIps, + srv: _srv, + params: _params, + listeners: _listeners, // We keep track of the last N nonces we've sent in order to detect // self connections. sentNonces: lru.NewCache(1000), - timeSource: _timeSource, - //newestBlock: _newestBlock, // Initialize the peer data structures. - outboundConnIPGroups: make(map[string]int), - persistentPeers: make(map[uint64]*Peer), - outboundPeers: make(map[uint64]*Peer), - inboundPeers: make(map[uint64]*Peer), - connectedOutboundAddrs: make(map[string]bool), + outboundConnIPGroups: make(map[string]int), + persistentPeers: make(map[uint64]*Peer), + outboundPeers: make(map[uint64]*Peer), + inboundPeers: make(map[uint64]*Peer), + connectedPeers: make(map[uint64]*Peer), + outboundConnectionAttempts: make(map[uint64]*OutboundConnectionAttempt), + connectedOutboundAddrs: make(map[string]bool), + attemptedOutboundAddrs: make(map[string]bool), // Initialize the channels. - newPeerChan: make(chan *Peer), - donePeerChan: make(chan *Peer), - - targetOutboundPeers: _targetOutboundPeers, - maxInboundPeers: _maxInboundPeers, - limitOneInboundConnectionPerIP: _limitOneInboundConnectionPerIP, - HyperSync: _hyperSync, - SyncType: _syncType, - serverMessageQueue: _serverMessageQueue, - stallTimeoutSeconds: _stallTimeoutSeconds, - minFeeRateNanosPerKB: _minFeeRateNanosPerKB, - } -} + peerDisconnectedChan: make(chan *Peer, 100), + outboundConnectionChan: make(chan *outboundConnection, 100), + inboundConnectionChan: make(chan *inboundConnection, 100), -func (cmgr *ConnectionManager) GetAddrManager() *addrmgr.AddrManager { - return cmgr.AddrMgr + HyperSync: _hyperSync, + SyncType: _syncType, + serverMessageQueue: _serverMessageQueue, + stallTimeoutSeconds: _stallTimeoutSeconds, + minFeeRateNanosPerKB: _minFeeRateNanosPerKB, + } } -// Check if the address passed shares a group with any addresses already in our -// data structures. -func (cmgr *ConnectionManager) isRedundantGroupKey(na *wire.NetAddress) bool { +// Check if the address passed shares a group with any addresses already in our data structures. +func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddress) bool { groupKey := addrmgr.GroupKey(na) + // For the sake of running multiple nodes on the same machine, we allow localhost connections. + if groupKey == "local" { + return false + } cmgr.mtxOutboundConnIPGroups.Lock() numGroupsForKey := cmgr.outboundConnIPGroups[groupKey] cmgr.mtxOutboundConnIPGroups.Unlock() if numGroupsForKey != 0 && numGroupsForKey != 1 { - glog.V(2).Infof("isRedundantGroupKey: Found numGroupsForKey != (0 or 1). Is (%d) "+ + glog.V(2).Infof("IsFromRedundantOutboundIPAddress: Found numGroupsForKey != (0 or 1). Is (%d) "+ "instead for addr (%s) and group key (%s). This "+ "should never happen.", numGroupsForKey, na.IP.String(), groupKey) } @@ -202,7 +180,7 @@ func (cmgr *ConnectionManager) isRedundantGroupKey(na *wire.NetAddress) bool { return true } -func (cmgr *ConnectionManager) addToGroupKey(na *wire.NetAddress) { +func (cmgr *ConnectionManager) AddToGroupKey(na *wire.NetAddress) { groupKey := addrmgr.GroupKey(na) cmgr.mtxOutboundConnIPGroups.Lock() @@ -218,48 +196,13 @@ func (cmgr *ConnectionManager) subFromGroupKey(na *wire.NetAddress) { cmgr.mtxOutboundConnIPGroups.Unlock() } -func (cmgr *ConnectionManager) getRandomAddr() *wire.NetAddress { - for tries := 0; tries < 100; tries++ { - // Lock the address map since multiple threads will be trying to read - // and modify it at the same time. - cmgr.mtxConnectedOutboundAddrs.RLock() - addr := cmgr.AddrMgr.GetAddress() - cmgr.mtxConnectedOutboundAddrs.RUnlock() - - if addr == nil { - glog.V(2).Infof("ConnectionManager.getRandomAddr: addr from GetAddressWithExclusions was nil") - break - } - - if cmgr.connectedOutboundAddrs[addrmgr.NetAddressKey(addr.NetAddress())] { - glog.V(2).Infof("ConnectionManager.getRandomAddr: Not choosing already connected address %v:%v", addr.NetAddress().IP, addr.NetAddress().Port) - continue - } - - // We can only have one outbound address per /16. This is similar to - // Bitcoin and we do it to prevent Sybil attacks. - if cmgr.isRedundantGroupKey(addr.NetAddress()) { - glog.V(2).Infof("ConnectionManager.getRandomAddr: Not choosing address due to redundant group key %v:%v", addr.NetAddress().IP, addr.NetAddress().Port) - continue - } - - glog.V(2).Infof("ConnectionManager.getRandomAddr: Returning %v:%v at %d iterations", - addr.NetAddress().IP, addr.NetAddress().Port, tries) - return addr.NetAddress() - } - - glog.V(2).Infof("ConnectionManager.getRandomAddr: Returning nil") - return nil -} - -func _delayRetry(retryCount int, persistentAddrForLogging *wire.NetAddress) { +func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddress, unit time.Duration) (_retryDuration time.Duration) { // No delay if we haven't tried yet or if the number of retries isn't positive. if retryCount <= 0 { - time.Sleep(time.Second) - return + return 0 } numSecs := int(math.Pow(2.0, float64(retryCount))) - retryDelay := time.Duration(numSecs) * time.Second + retryDelay := time.Duration(numSecs) * unit if persistentAddrForLogging != nil { glog.V(1).Infof("Retrying connection to outbound persistent peer: "+ @@ -268,122 +211,70 @@ func _delayRetry(retryCount int, persistentAddrForLogging *wire.NetAddress) { } else { glog.V(2).Infof("Retrying connection to outbound non-persistent peer in (%d) seconds.", numSecs) } - time.Sleep(retryDelay) + return retryDelay } -func (cmgr *ConnectionManager) enoughOutboundPeers() bool { - val := atomic.LoadUint32(&cmgr.numOutboundPeers) - if val > cmgr.targetOutboundPeers { - glog.Errorf("enoughOutboundPeers: Connected to too many outbound "+ - "peers: (%d). Should be "+ - "no more than (%d).", val, cmgr.targetOutboundPeers) - return true - } - - if val == cmgr.targetOutboundPeers { - return true - } - return false +func (cmgr *ConnectionManager) IsConnectedOutboundIpAddress(netAddr *wire.NetAddress) bool { + cmgr.mtxAddrsMaps.RLock() + defer cmgr.mtxAddrsMaps.RUnlock() + return cmgr.connectedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] } -// Chooses a random address and tries to connect to it. Repeats this process until -// it finds a peer that can pass version negotiation. -func (cmgr *ConnectionManager) _getOutboundConn(persistentAddr *wire.NetAddress) net.Conn { - // If a persistentAddr was provided then the connection is a persistent - // one. - isPersistent := (persistentAddr != nil) - retryCount := 0 - for { - if atomic.LoadInt32(&cmgr.shutdown) != 0 { - glog.Info("_getOutboundConn: Ignoring connection due to shutdown") - return nil - } - // We want to start backing off exponentially once we've gone through enough - // unsuccessful retries. However, we want to give more slack to non-persistent - // peers before we start backing off, which is why it's not as cut and dry as - // just delaying based on the raw number of retries. - adjustedRetryCount := retryCount - if !isPersistent { - // If the address is not persistent, only start backing off once there - // has been a large number of failed attempts in a row as this likely indicates - // that there's a connection issue we need to wait out. - adjustedRetryCount = retryCount - 5 - } - _delayRetry(adjustedRetryCount, persistentAddr) - retryCount++ - - // If the connection manager is saturated with non-persistent - // outbound peers, no need to keep trying non-persistent outbound - // connections. - if !isPersistent && cmgr.enoughOutboundPeers() { - glog.V(1).Infof("Dropping connection request to non-persistent outbound " + - "peer because we have enough of them.") - return nil - } - - // If we don't have a persistentAddr, pick one from our addrmgr. - ipNetAddr := persistentAddr - if ipNetAddr == nil { - ipNetAddr = cmgr.getRandomAddr() - } - if ipNetAddr == nil { - // This should never happen but if it does, sleep a bit and try again. - glog.V(1).Infof("_getOutboundConn: No valid addresses to connect to.") - time.Sleep(time.Second) - continue - } +func (cmgr *ConnectionManager) IsAttemptedOutboundIpAddress(netAddr *wire.NetAddress) bool { + cmgr.mtxAddrsMaps.RLock() + defer cmgr.mtxAddrsMaps.RUnlock() + return cmgr.attemptedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] +} - netAddr := net.TCPAddr{ - IP: ipNetAddr.IP, - Port: int(ipNetAddr.Port), - } +func (cmgr *ConnectionManager) AddAttemptedOutboundAddrs(netAddr *wire.NetAddress) { + cmgr.mtxAddrsMaps.Lock() + defer cmgr.mtxAddrsMaps.Unlock() + cmgr.attemptedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] = true +} - // If the peer is not persistent, update the addrmgr. - glog.V(1).Infof("Attempting to connect to addr: %v", netAddr) - if !isPersistent { - cmgr.AddrMgr.Attempt(ipNetAddr) - } - var err error - conn, err := net.DialTimeout(netAddr.Network(), netAddr.String(), cmgr.params.DialTimeout) - if err != nil { - // If we failed to connect to this peer, get a new address and try again. - glog.V(1).Infof("Connection to addr (%v) failed: %v", netAddr, err) - continue - } +func (cmgr *ConnectionManager) RemoveAttemptedOutboundAddrs(netAddr *wire.NetAddress) { + cmgr.mtxAddrsMaps.Lock() + defer cmgr.mtxAddrsMaps.Unlock() + delete(cmgr.attemptedOutboundAddrs, addrmgr.NetAddressKey(netAddr)) +} - // We were able to dial successfully so we'll break out now. - glog.V(1).Infof("Connected to addr: %v", netAddr) +// DialPersistentOutboundConnection attempts to connect to a persistent peer. +func (cmgr *ConnectionManager) DialPersistentOutboundConnection(persistentAddr *wire.NetAddress, attemptId uint64) (_attemptId uint64) { + glog.V(2).Infof("ConnectionManager.DialPersistentOutboundConnection: Connecting to peer (IP=%v, Port=%v)", + persistentAddr.IP.String(), persistentAddr.Port) + return cmgr._dialOutboundConnection(persistentAddr, attemptId, true) +} - // If this was a non-persistent outbound connection, mark the address as - // connected in the addrmgr. - if !isPersistent { - cmgr.AddrMgr.Connected(ipNetAddr) - } +// DialOutboundConnection attempts to connect to a non-persistent peer. +func (cmgr *ConnectionManager) DialOutboundConnection(addr *wire.NetAddress, attemptId uint64) { + glog.V(2).Infof("ConnectionManager.ConnectOutboundConnection: Connecting to peer (IP=%v, Port=%v)", + addr.IP.String(), addr.Port) + cmgr._dialOutboundConnection(addr, attemptId, false) +} - // We made a successful outbound connection so return. - return conn +// CloseAttemptedConnection closes an ongoing connection attempt. +func (cmgr *ConnectionManager) CloseAttemptedConnection(attemptId uint64) { + glog.V(2).Infof("ConnectionManager.CloseAttemptedConnection: Closing connection attempt %d", attemptId) + cmgr.mtxConnectionAttempts.Lock() + defer cmgr.mtxConnectionAttempts.Unlock() + if attempt, exists := cmgr.outboundConnectionAttempts[attemptId]; exists { + attempt.Stop() + delete(cmgr.outboundConnectionAttempts, attemptId) } } -func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) (*wire.NetAddress, error) { - port := params.DefaultSocketPort - host, portstr, err := net.SplitHostPort(ipStr) - if err != nil { - // No port specified so leave port=default and set - // host to the ipStr. - host = ipStr - } else { - pp, err := strconv.ParseUint(portstr, 10, 16) - if err != nil { - return nil, errors.Wrapf(err, "IPToNetAddr: Can not parse port from %s for ip", ipStr) - } - port = uint16(pp) - } - netAddr, err := addrMgr.HostToNetAddress(host, port, 0) - if err != nil { - return nil, errors.Wrapf(err, "IPToNetAddr: Can not parse port from %s for ip", ipStr) - } - return netAddr, nil +// _dialOutboundConnection is the internal method that spawns and initiates an OutboundConnectionAttempt, which handles the +// connection attempt logic. It returns the attemptId of the attempt that was created. +func (cmgr *ConnectionManager) _dialOutboundConnection(addr *wire.NetAddress, attemptId uint64, isPersistent bool) (_attemptId uint64) { + connectionAttempt := NewOutboundConnectionAttempt(attemptId, addr, isPersistent, + cmgr.params.DialTimeout, cmgr.outboundConnectionChan) + cmgr.mtxConnectionAttempts.Lock() + cmgr.outboundConnectionAttempts[connectionAttempt.attemptId] = connectionAttempt + cmgr.mtxConnectionAttempts.Unlock() + cmgr.AddAttemptedOutboundAddrs(addr) + + connectionAttempt.Start() + return attemptId } // ConnectPeer connects either an INBOUND or OUTBOUND peer. If Conn == nil, @@ -392,158 +283,40 @@ func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) // is set, then we will connect only to that addr. Otherwise, we will use // the addrmgr to randomly select addrs and create OUTBOUND connections // with them until we find a worthy peer. -func (cmgr *ConnectionManager) ConnectPeer(conn net.Conn, persistentAddr *wire.NetAddress) { - // If we don't have a connection object then we will try and make an - // outbound connection to a peer to get one. - isOutbound := false - if conn == nil { - isOutbound = true - } - isPersistent := (persistentAddr != nil) - retryCount := 0 - for { - // If the peer is persistent use exponential back off delay before retrying. - if isPersistent { - _delayRetry(retryCount, persistentAddr) - } - retryCount++ - - // If this is an outbound peer, create an outbound connection. - if isOutbound { - conn = cmgr._getOutboundConn(persistentAddr) - } - - if conn == nil { - // Conn should only be nil if this is a non-persistent outbound peer. - if isPersistent { - glog.Errorf("ConnectPeer: Got a nil connection for a persistent peer. This should never happen: (%s)", persistentAddr.IP.String()) - } - - // If we end up without a connection object, it implies we had enough - // outbound peers so just return. - return - } +func (cmgr *ConnectionManager) ConnectPeer(id uint64, conn net.Conn, na *wire.NetAddress, isOutbound bool, + isPersistent bool) *Peer { - // At this point Conn is set so create a peer object to do - // a version negotiation. - na, err := IPToNetAddr(conn.RemoteAddr().String(), cmgr.AddrMgr, cmgr.params) - if err != nil { - glog.Errorf("ConnectPeer: Problem calling ipToNetAddr for addr: (%s) err: (%v)", conn.RemoteAddr().String(), err) + // At this point Conn is set so create a peer object to do a version negotiation. + peer := NewPeer(id, conn, isOutbound, na, isPersistent, + cmgr.stallTimeoutSeconds, + cmgr.minFeeRateNanosPerKB, + cmgr.params, + cmgr.srv.incomingMessages, cmgr, cmgr.srv, cmgr.SyncType, + cmgr.peerDisconnectedChan) - // If we get an error in the conversion and this is an - // outbound connection, keep trying it. Otherwise, just return. - if isOutbound { - continue - } - return - } - peer := NewPeer(conn, isOutbound, na, isPersistent, - cmgr.stallTimeoutSeconds, - cmgr.minFeeRateNanosPerKB, - cmgr.params, - cmgr.srv.incomingMessages, cmgr, cmgr.srv, cmgr.SyncType) - - if err := peer.NegotiateVersion(cmgr.params.VersionNegotiationTimeout); err != nil { - glog.Errorf("ConnectPeer: Problem negotiating version with peer with addr: (%s) err: (%v)", conn.RemoteAddr().String(), err) - - // If we have an error in the version negotiation we disconnect - // from this peer. - peer.Conn.Close() - - // If the connection is outbound, then - // we try a new connection until we get one that works. Otherwise - // we break. - if isOutbound { - continue - } - return - } - peer._logVersionSuccess() + // Now we can add the peer to our data structures. + peer._logAddPeer() + cmgr.addPeer(peer) - // If the version negotiation worked and we have an outbound non-persistent - // connection, mark the address as good in the addrmgr. - if isOutbound && !isPersistent { - cmgr.AddrMgr.Good(na) - } + // Start the peer's message loop. + peer.Start() - // We connected to the peer and it passed its version negotiation. - // Handle the next steps in the main loop. - cmgr.newPeerChan <- peer - - // Once we've successfully connected to a valid peer we're done. The connection - // manager will handle starting the peer and, if this is an outbound peer and - // the peer later disconnects, - // it will potentially try and reconnect the peer or replace the peer with - // a new one so that we always maintain a fixed number of outbound peers. - return - } + return peer } -func (cmgr *ConnectionManager) _initiateOutboundConnections() { - // This is a hack to make outbound connections go away. - if cmgr.targetOutboundPeers == 0 { - return - } - if len(cmgr.connectIps) > 0 { - // Connect to addresses passed via the --connect-ips flag. These addresses - // are persistent in the sense that if we disconnect from one, we will - // try to reconnect to the same one. - for _, connectIp := range cmgr.connectIps { - ipNetAddr, err := IPToNetAddr(connectIp, cmgr.AddrMgr, cmgr.params) - if err != nil { - glog.Error(errors.Errorf("Couldn't connect to IP %v: %v", connectIp, err)) - continue - } - - go func(na *wire.NetAddress) { - cmgr.ConnectPeer(nil, na) - }(ipNetAddr) - } - return - } - // Only connect to addresses from the addrmgr if we don't specify --connect-ips. - // These addresses are *not* persistent, meaning if we disconnect from one we'll - // try a different one. - // - // TODO: We should try more addresses than we need initially to increase the - // speed at which we saturate our outbound connections. The ConnectionManager - // will handle the disconnection from peers once we have enough outbound - // connections. I had this as the logic before but removed it because it caused - // contention of the AddrMgr's lock. - for ii := 0; ii < int(cmgr.targetOutboundPeers); ii++ { - go cmgr.ConnectPeer(nil, nil) - } -} - -func (cmgr *ConnectionManager) _isFromRedundantInboundIPAddress(addrToCheck net.Addr) bool { +func (cmgr *ConnectionManager) IsDuplicateInboundIPAddress(netAddr *wire.NetAddress) bool { cmgr.mtxPeerMaps.RLock() defer cmgr.mtxPeerMaps.RUnlock() // Loop through all the peers to see if any have the same IP // address. This map is normally pretty small so doing this // every time a Peer connects should be fine. - netAddr, err := IPToNetAddr(addrToCheck.String(), cmgr.AddrMgr, cmgr.params) - if err != nil { - // Return true in case we have an error. We do this because it - // will result in the peer connection not being accepted, which - // is desired in this case. - glog.Warningf(errors.Wrapf(err, - "ConnectionManager._isFromRedundantInboundIPAddress: Problem parsing "+ - "net.Addr to wire.NetAddress so marking as redundant and not "+ - "making connection").Error()) - return true - } - if netAddr == nil { - glog.Warningf("ConnectionManager._isFromRedundantInboundIPAddress: " + - "address was nil after parsing so marking as redundant and not " + - "making connection") - return true - } + // If the IP is a localhost IP let it slide. This is useful for testing fake // nodes on a local machine. // TODO: Should this be a flag? if net.IP([]byte{127, 0, 0, 1}).Equal(netAddr.IP) { - glog.V(1).Infof("ConnectionManager._isFromRedundantInboundIPAddress: Allowing " + + glog.V(1).Infof("ConnectionManager.IsDuplicateInboundIPAddress: Allowing " + "localhost IP address to connect") return false } @@ -578,38 +351,9 @@ func (cmgr *ConnectionManager) _handleInboundConnections() { continue } - // As a quick check, reject the peer if we have too many already. Note that - // this check isn't perfect but we have a later check at the end after doing - // a version negotiation that will properly reject the peer if this check - // messes up e.g. due to a concurrency issue. - // - // TODO: We should instead have eviction logic here to prevent - // someone from monopolizing a node's inbound connections. - numInboundPeers := atomic.LoadUint32(&cmgr.numInboundPeers) - if numInboundPeers > cmgr.maxInboundPeers { - - glog.Infof("Rejecting INBOUND peer (%s) due to max inbound peers (%d) hit.", - conn.RemoteAddr().String(), cmgr.maxInboundPeers) - conn.Close() - - continue - } - - // If we want to limit inbound connections to one per IP address, check to - // make sure this address isn't already connected. - if cmgr.limitOneInboundConnectionPerIP && - cmgr._isFromRedundantInboundIPAddress(conn.RemoteAddr()) { - - glog.Infof("Rejecting INBOUND peer (%s) due to already having an "+ - "inbound connection from the same IP with "+ - "limit_one_inbound_connection_per_ip set.", - conn.RemoteAddr().String()) - conn.Close() - - continue + cmgr.inboundConnectionChan <- &inboundConnection{ + connection: conn, } - - go cmgr.ConnectPeer(conn, nil) } }(outerListener) } @@ -622,13 +366,7 @@ func (cmgr *ConnectionManager) GetAllPeers() []*Peer { defer cmgr.mtxPeerMaps.RUnlock() allPeers := []*Peer{} - for _, pp := range cmgr.persistentPeers { - allPeers = append(allPeers, pp) - } - for _, pp := range cmgr.outboundPeers { - allPeers = append(allPeers, pp) - } - for _, pp := range cmgr.inboundPeers { + for _, pp := range cmgr.connectedPeers { allPeers = append(allPeers, pp) } @@ -686,12 +424,11 @@ func (cmgr *ConnectionManager) addPeer(pp *Peer) { // number of outbound peers. Also add the peer's address to // our map. if _, ok := peerList[pp.ID]; !ok { - cmgr.addToGroupKey(pp.netAddr) atomic.AddUint32(&cmgr.numOutboundPeers, 1) - cmgr.mtxConnectedOutboundAddrs.Lock() + cmgr.mtxAddrsMaps.Lock() cmgr.connectedOutboundAddrs[addrmgr.NetAddressKey(pp.netAddr)] = true - cmgr.mtxConnectedOutboundAddrs.Unlock() + cmgr.mtxAddrsMaps.Unlock() } } else { // This is an inbound peer. @@ -700,10 +437,45 @@ func (cmgr *ConnectionManager) addPeer(pp *Peer) { } peerList[pp.ID] = pp + cmgr.connectedPeers[pp.ID] = pp +} + +func (cmgr *ConnectionManager) getPeer(id uint64) *Peer { + cmgr.mtxPeerMaps.RLock() + defer cmgr.mtxPeerMaps.RUnlock() + + if peer, ok := cmgr.connectedPeers[id]; ok { + return peer + } + return nil +} + +func (cmgr *ConnectionManager) SendMessage(msg DeSoMessage, peerId uint64) error { + peer := cmgr.getPeer(peerId) + if peer == nil { + return fmt.Errorf("SendMessage: Peer with ID %d not found", peerId) + } + glog.V(1).Infof("SendMessage: Sending message %v to peer %d", msg.GetMsgType().String(), peerId) + peer.AddDeSoMessage(msg, false) + return nil +} + +func (cmgr *ConnectionManager) CloseConnection(peerId uint64) { + glog.V(2).Infof("ConnectionManager.CloseConnection: Closing connection to peer (id= %v)", peerId) + + var peer *Peer + var ok bool + cmgr.mtxPeerMaps.Lock() + peer, ok = cmgr.connectedPeers[peerId] + cmgr.mtxPeerMaps.Unlock() + if !ok { + return + } + peer.Disconnect() } // Update our data structures to remove this peer. -func (cmgr *ConnectionManager) RemovePeer(pp *Peer) { +func (cmgr *ConnectionManager) removePeer(pp *Peer) { // Acquire the mtxPeerMaps lock for writing. cmgr.mtxPeerMaps.Lock() defer cmgr.mtxPeerMaps.Unlock() @@ -724,9 +496,9 @@ func (cmgr *ConnectionManager) RemovePeer(pp *Peer) { cmgr.subFromGroupKey(pp.netAddr) atomic.AddUint32(&cmgr.numOutboundPeers, Uint32Dec) - cmgr.mtxConnectedOutboundAddrs.Lock() + cmgr.mtxAddrsMaps.Lock() delete(cmgr.connectedOutboundAddrs, addrmgr.NetAddressKey(pp.netAddr)) - cmgr.mtxConnectedOutboundAddrs.Unlock() + cmgr.mtxAddrsMaps.Unlock() } } else { // This is an inbound peer. @@ -737,25 +509,12 @@ func (cmgr *ConnectionManager) RemovePeer(pp *Peer) { // Update the last seen time before we finish removing the peer. // TODO: Really, we call 'Connected()' on removing a peer? // I can't find a Disconnected() but seems odd. - cmgr.AddrMgr.Connected(pp.netAddr) + // FIXME: Move this to Done Peer + //cmgr.AddrMgr.Connected(pp.netAddr) // Remove the peer from our data structure. delete(peerList, pp.ID) -} - -func (cmgr *ConnectionManager) _maybeReplacePeer(pp *Peer) { - // If the peer was outbound, replace her with a - // new peer to maintain a fixed number of outbound connections. - if pp.isOutbound { - // If the peer is not persistent then we don't want to pass an - // address to connectPeer. The lack of an address will cause it - // to choose random addresses from the addrmgr until one works. - na := pp.netAddr - if !pp.isPersistent { - na = nil - } - go cmgr.ConnectPeer(nil, na) - } + delete(cmgr.connectedPeers, pp.ID) } func (cmgr *ConnectionManager) _logOutboundPeerData() { @@ -763,24 +522,28 @@ func (cmgr *ConnectionManager) _logOutboundPeerData() { numInboundPeers := int(atomic.LoadUint32(&cmgr.numInboundPeers)) numPersistentPeers := int(atomic.LoadUint32(&cmgr.numPersistentPeers)) glog.V(1).Infof("Num peers: OUTBOUND(%d) INBOUND(%d) PERSISTENT(%d)", numOutboundPeers, numInboundPeers, numPersistentPeers) +} - cmgr.mtxOutboundConnIPGroups.Lock() - for _, vv := range cmgr.outboundConnIPGroups { - if vv != 0 && vv != 1 { - glog.V(1).Infof("_logOutboundPeerData: Peer group count != (0 or 1). "+ - "Is (%d) instead. This "+ - "should never happen.", vv) - } - } - cmgr.mtxOutboundConnIPGroups.Unlock() +func (cmgr *ConnectionManager) GetNumInboundPeers() uint32 { + return atomic.LoadUint32(&cmgr.numInboundPeers) +} + +func (cmgr *ConnectionManager) GetNumOutboundPeers() uint32 { + return atomic.LoadUint32(&cmgr.numOutboundPeers) } func (cmgr *ConnectionManager) Stop() { + cmgr.mtxPeerMaps.Lock() + defer cmgr.mtxPeerMaps.Unlock() + if atomic.AddInt32(&cmgr.shutdown, 1) != 1 { glog.Warningf("ConnectionManager.Stop is already in the process of " + "shutting down") return } + for id := range cmgr.outboundConnectionAttempts { + cmgr.CloseAttemptedConnection(id) + } glog.Infof("ConnectionManager: Stopping, number of inbound peers (%v), number of outbound "+ "peers (%v), number of persistent peers (%v).", len(cmgr.inboundPeers), len(cmgr.outboundPeers), len(cmgr.persistentPeers)) @@ -823,10 +586,6 @@ func (cmgr *ConnectionManager) Start() { // - Have the peer enter a switch statement listening for all kinds of messages. // - Send addr and getaddr messages as appropriate. - // Initiate outbound connections with peers either using the --connect-ips passed - // in or using the addrmgr. - cmgr._initiateOutboundConnections() - // Accept inbound connections from peers on our listeners. cmgr._handleInboundConnections() @@ -837,90 +596,51 @@ func (cmgr *ConnectionManager) Start() { cmgr._logOutboundPeerData() select { - case pp := <-cmgr.newPeerChan: - { - // We have successfully connected to a peer and it passed its version - // negotiation. - - // if this is a non-persistent outbound peer and we already have enough - // outbound peers, then don't bother adding this one. - if !pp.isPersistent && pp.isOutbound && cmgr.enoughOutboundPeers() { - // TODO: Make this less verbose - glog.V(1).Infof("Dropping peer because we already have enough outbound peer connections.") - pp.Conn.Close() - continue - } - - // If this is a non-persistent outbound peer and the group key - // overlaps with another peer we're already connected to then - // abort mission. We only connect to one peer per IP group in - // order to prevent Sybil attacks. - if pp.isOutbound && - !pp.isPersistent && - cmgr.isRedundantGroupKey(pp.netAddr) { - - // TODO: Make this less verbose - glog.Infof("Rejecting OUTBOUND NON-PERSISTENT peer (%v) with "+ - "redundant group key (%s).", - pp, addrmgr.GroupKey(pp.netAddr)) - - pp.Conn.Close() - cmgr._maybeReplacePeer(pp) - continue - } - - // Check that we have not exceeded the maximum number of inbound - // peers allowed. - // - // TODO: We should instead have eviction logic to prevent - // someone from monopolizing a node's inbound connections. - numInboundPeers := atomic.LoadUint32(&cmgr.numInboundPeers) - if !pp.isOutbound && numInboundPeers > cmgr.maxInboundPeers { - - // TODO: Make this less verbose - glog.Infof("Rejecting INBOUND peer (%v) due to max inbound peers (%d) hit.", - pp, cmgr.maxInboundPeers) - - pp.Conn.Close() - continue - } - - // Now we can add the peer to our data structures. - pp._logAddPeer() - cmgr.addPeer(pp) - - // Start the peer's message loop. - pp.Start() - - // Signal the server about the new Peer in case it wants to do something with it. - cmgr.serverMessageQueue <- &ServerMessage{ - Peer: pp, - Msg: &MsgDeSoNewPeer{}, - } - + case oc := <-cmgr.outboundConnectionChan: + if oc.failed { + glog.V(2).Infof("ConnectionManager.Start: Failed to establish an outbound connection with "+ + "(id= %v)", oc.attemptId) + } else { + glog.V(2).Infof("ConnectionManager.Start: Successfully established an outbound connection with "+ + "(addr= %v) (id= %v)", oc.connection.RemoteAddr(), oc.attemptId) + } + cmgr.mtxConnectionAttempts.Lock() + delete(cmgr.outboundConnectionAttempts, oc.attemptId) + cmgr.mtxConnectionAttempts.Unlock() + cmgr.serverMessageQueue <- &ServerMessage{ + Peer: nil, + Msg: &MsgDeSoNewConnection{ + Connection: oc, + }, } - case pp := <-cmgr.donePeerChan: + case ic := <-cmgr.inboundConnectionChan: + glog.V(2).Infof("ConnectionManager.Start: Successfully received an inbound connection from "+ + "(addr= %v)", ic.connection.RemoteAddr()) + cmgr.serverMessageQueue <- &ServerMessage{ + Peer: nil, + Msg: &MsgDeSoNewConnection{ + Connection: ic, + }, + } + case pp := <-cmgr.peerDisconnectedChan: { // By the time we get here, it can be assumed that the Peer's Disconnect function // has already been called, since that is what's responsible for adding the peer // to this queue in the first place. - glog.V(1).Infof("Done with peer (%v).", pp) + glog.V(1).Infof("Done with peer (id=%v).", pp.ID) - if !pp.PeerManuallyRemovedFromConnectionManager { - // Remove the peer from our data structures. - cmgr.RemovePeer(pp) + // Remove the peer from our data structures. + cmgr.removePeer(pp) - // Potentially replace the peer. For example, if the Peer was an outbound Peer - // then we want to find a new peer in order to maintain our TargetOutboundPeers. - cmgr._maybeReplacePeer(pp) - } + // Potentially replace the peer. For example, if the Peer was an outbound Peer + // then we want to find a new peer in order to maintain our TargetOutboundPeers. // Signal the server about the Peer being done in case it wants to do something // with it. cmgr.serverMessageQueue <- &ServerMessage{ Peer: pp, - Msg: &MsgDeSoDonePeer{}, + Msg: &MsgDeSoDisconnectedPeer{}, } } } diff --git a/lib/constants.go b/lib/constants.go index 9c92a47a3..9d5832016 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -496,6 +496,34 @@ func GetEncoderMigrationHeightsList(forkHeights *ForkHeights) ( return migrationHeightsList } +type ProtocolVersionType uint64 + +const ( + // ProtocolVersion0 is the first version of the DeSo protocol, running Proof of Work. + ProtocolVersion0 ProtocolVersionType = 0 + // ProtocolVersion1 nodes run Proof of Work, and new node services such as rosetta, hypersync. + // The version indicates that the node supports P2P features related to these new services. + ProtocolVersion1 ProtocolVersionType = 1 + // ProtocolVersion2 is the latest version of the DeSo protocol, running Proof of Stake. + ProtocolVersion2 ProtocolVersionType = 2 +) + +func NewProtocolVersionType(version uint64) ProtocolVersionType { + return ProtocolVersionType(version) +} + +func (pvt ProtocolVersionType) ToUint64() uint64 { + return uint64(pvt) +} + +func (pvt ProtocolVersionType) Before(version ProtocolVersionType) bool { + return pvt.ToUint64() < version.ToUint64() +} + +func (pvt ProtocolVersionType) After(version ProtocolVersionType) bool { + return pvt.ToUint64() > version.ToUint64() +} + // DeSoParams defines the full list of possible parameters for the // DeSo network. type DeSoParams struct { @@ -504,7 +532,7 @@ type DeSoParams struct { // Set to true when we're running in regtest mode. This is useful for testing. ExtraRegtestParamUpdaterKeys map[PkMapKey]bool // The current protocol version we're running. - ProtocolVersion uint64 + ProtocolVersion ProtocolVersionType // The minimum protocol version we'll allow a peer we connect to // to have. MinProtocolVersion uint64 @@ -549,6 +577,9 @@ type DeSoParams struct { // network before checking for double-spends. BitcoinDoubleSpendWaitSeconds float64 + // ServerMessageChannelSize sets the minimum size of the server's incomingMessage channel, which handles peer messages. + ServerMessageChannelSize uint32 + // This field allows us to set the amount purchased at genesis to a non-zero // value. DeSoNanosPurchasedAtGenesis uint64 @@ -562,6 +593,14 @@ type DeSoParams struct { DialTimeout time.Duration // The amount of time we wait to receive a version message from a peer. VersionNegotiationTimeout time.Duration + // The amount of time we wait to receive a verack message from a peer. + VerackNegotiationTimeout time.Duration + + // The amount of time it takes NetworkManager to refresh its routines. + NetworkManagerRefreshDuration time.Duration + + // The maximum number of addresses to broadcast to peers. + MaxAddressesToBroadcast uint32 // The genesis block to use as the base of our chain. GenesisBlock *MsgDeSoBlock @@ -755,6 +794,12 @@ type DeSoParams struct { // for a description of its usage. DefaultMempoolFeeEstimatorNumPastBlocks uint64 + // HandshakeTimeoutMicroSeconds is the timeout for the peer handshake certificate. The default value is 15 minutes. + HandshakeTimeoutMicroSeconds uint64 + + // DisableNetworkManagerRoutines is a testing flag that disables the network manager routines. + DisableNetworkManagerRoutines bool + ForkHeights ForkHeights EncoderMigrationHeights *EncoderMigrationHeights @@ -819,12 +864,15 @@ func (params *DeSoParams) EnableRegtest() { // Clear the seeds params.DNSSeeds = []string{} + // Set the protocol version + params.ProtocolVersion = ProtocolVersion2 + // Mine blocks incredibly quickly params.TimeBetweenBlocks = 2 * time.Second params.TimeBetweenDifficultyRetargets = 6 * time.Second // Make sure we don't care about blockchain tip age. params.MaxTipAgePoW = 1000000 * time.Hour - params.MaxTipAgePoS = 1000000 * time.Hour + params.MaxTipAgePoS = 4 * time.Hour // Allow block rewards to be spent instantly params.BlockRewardMaturity = 0 @@ -992,7 +1040,7 @@ var MainnetForkHeights = ForkHeights{ // DeSoMainnetParams defines the DeSo parameters for the mainnet. var DeSoMainnetParams = DeSoParams{ NetworkType: NetworkType_MAINNET, - ProtocolVersion: 1, + ProtocolVersion: ProtocolVersion1, MinProtocolVersion: 1, UserAgent: "Architect", DNSSeeds: []string{ @@ -1069,12 +1117,17 @@ var DeSoMainnetParams = DeSoParams{ BitcoinExchangeFeeBasisPoints: 10, BitcoinDoubleSpendWaitSeconds: 5.0, + ServerMessageChannelSize: uint32(100), DeSoNanosPurchasedAtGenesis: uint64(6000000000000000), DefaultSocketPort: uint16(17000), DefaultJSONPort: uint16(17001), - DialTimeout: 30 * time.Second, - VersionNegotiationTimeout: 30 * time.Second, + DialTimeout: 30 * time.Second, + VersionNegotiationTimeout: 30 * time.Second, + VerackNegotiationTimeout: 30 * time.Second, + NetworkManagerRefreshDuration: 1 * time.Second, + + MaxAddressesToBroadcast: 10, BlockRewardMaturity: time.Hour * 3, @@ -1214,6 +1267,12 @@ var DeSoMainnetParams = DeSoParams{ // The number of past blocks to consider when estimating the mempool fee. DefaultMempoolFeeEstimatorNumPastBlocks: 50, + // The peer handshake certificate timeout. + HandshakeTimeoutMicroSeconds: uint64(900000000), + + // DisableNetworkManagerRoutines is a testing flag that disables the network manager routines. + DisableNetworkManagerRoutines: false, + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1313,7 +1372,7 @@ var TestnetForkHeights = ForkHeights{ // DeSoTestnetParams defines the DeSo parameters for the testnet. var DeSoTestnetParams = DeSoParams{ NetworkType: NetworkType_TESTNET, - ProtocolVersion: 0, + ProtocolVersion: ProtocolVersion0, MinProtocolVersion: 0, UserAgent: "Architect", DNSSeeds: []string{ @@ -1328,6 +1387,7 @@ var DeSoTestnetParams = DeSoParams{ BitcoinBurnAddress: "mhziDsPWSMwUqvZkVdKY92CjesziGP3wHL", BitcoinExchangeFeeBasisPoints: 10, BitcoinDoubleSpendWaitSeconds: 5.0, + ServerMessageChannelSize: uint32(100), DeSoNanosPurchasedAtGenesis: uint64(6000000000000000), // See comment in mainnet config. @@ -1354,8 +1414,12 @@ var DeSoTestnetParams = DeSoParams{ DefaultSocketPort: uint16(18000), DefaultJSONPort: uint16(18001), - DialTimeout: 30 * time.Second, - VersionNegotiationTimeout: 30 * time.Second, + DialTimeout: 30 * time.Second, + VersionNegotiationTimeout: 30 * time.Second, + VerackNegotiationTimeout: 30 * time.Second, + NetworkManagerRefreshDuration: 1 * time.Second, + + MaxAddressesToBroadcast: 10, GenesisBlock: &GenesisBlock, GenesisBlockHashHex: GenesisBlockHashHex, @@ -1497,6 +1561,12 @@ var DeSoTestnetParams = DeSoParams{ // The number of past blocks to consider when estimating the mempool fee. DefaultMempoolFeeEstimatorNumPastBlocks: 50, + // The peer handshake certificate timeout. + HandshakeTimeoutMicroSeconds: uint64(900000000), + + // DisableNetworkManagerRoutines is a testing flag that disables the network manager routines. + DisableNetworkManagerRoutines: false, + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/db_utils.go b/lib/db_utils.go index e0aa9f9de..5ee4e649a 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -595,7 +595,7 @@ type DBPrefixes struct { // PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry: Retrieve a snapshotted BLSPublicKeyPKIDPairEntry // by BLS Public Key and SnapshotAtEpochNumber. // Prefix, , -> *BLSPublicKeyPKIDPairEntry - PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry []byte `prefix_id:"[96]" is_state:"true"` + PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry []byte `prefix_id:"[96]" is_state:"true" core_state:"true"` // NEXT_TAG: 97 } diff --git a/lib/errors.go b/lib/errors.go index 0bae19fbd..cef07c820 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -62,6 +62,7 @@ const ( RuleErrorInvalidTransactionSignature RuleError = "RuleErrorInvalidTransactionSignature" RuleErrorBlockRewardTxnMustHaveOneOutput RuleError = "RuleErrorBlockRewardTxnMustHaveOneOutput" RuleErrorBlockHeightAfterProofOfStakeCutover RuleError = "RuleErrorBlockHeightAfterProofOfStakeCutover" + RuleErrorBestChainIsAtProofOfStakeCutover RuleError = "RuleErrorBestChainIsAtProofOfStakeCutover" RuleErrorMissingBlockProducerSignature RuleError = "RuleErrorMissingBlockProducerSignature" RuleErrorInvalidBlockProducerPublicKey RuleError = "RuleErrorInvalidBlockProducerPublicKey" @@ -546,6 +547,7 @@ const ( HeaderErrorHeightInvalid RuleError = "HeaderErrorHeightInvalid" HeaderErrorDifficultyBitsNotConsistentWithTargetDifficultyComputedFromParent RuleError = "HeaderErrorDifficultyBitsNotConsistentWithTargetDifficultyComputedFromParent" HeaderErrorBlockHeightAfterProofOfStakeCutover RuleError = "HeaderErrorBlockHeightAfterProofOfStakeCutover" + HeaderErrorBestChainIsAtProofOfStakeCutover RuleError = "HeaderErrorBestChainIsAtProofOfStakeCutover" TxErrorTooLarge RuleError = "TxErrorTooLarge" TxErrorDuplicate RuleError = "TxErrorDuplicate" diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index c7e3bb770..6a3756d37 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -286,6 +286,8 @@ func (mp *DeSoMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { } func (mp *DeSoMempool) GetOrderedTransactions() []*MempoolTx { + mp.mtx.RLock() + defer mp.mtx.RUnlock() orderedTxns, _, _ := mp.GetTransactionsOrderedByTimeAdded() return orderedTxns } @@ -2129,24 +2131,6 @@ func _computeBitcoinExchangeFields(params *DeSoParams, }, PkToString(publicKey.SerializeCompressed(), params), nil } -func ConnectTxnAndComputeTransactionMetadata( - txn *MsgDeSoTxn, utxoView *UtxoView, blockHash *BlockHash, - blockHeight uint32, blockTimestampNanoSecs int64, txnIndexInBlock uint64) (*TransactionMetadata, error) { - - totalNanosPurchasedBefore := utxoView.NanosPurchased - usdCentsPerBitcoinBefore := utxoView.GetCurrentUSDCentsPerBitcoin() - utxoOps, totalInput, totalOutput, fees, err := utxoView._connectTransaction( - txn, txn.Hash(), blockHeight, blockTimestampNanoSecs, false, false, - ) - if err != nil { - return nil, fmt.Errorf( - "UpdateTxindex: Error connecting txn to UtxoView: %v", err) - } - - return ComputeTransactionMetadata(txn, utxoView, blockHash, totalNanosPurchasedBefore, - usdCentsPerBitcoinBefore, totalInput, totalOutput, fees, txnIndexInBlock, utxoOps, uint64(blockHeight)), nil -} - // This is the main function used for adding a new txn to the pool. It will // run all needed validation on the txn before adding it, and it will only // accept the txn if these validations pass. @@ -2446,7 +2430,21 @@ func EstimateMaxTxnFeeV1(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) uint64 { func (mp *DeSoMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, _ uint64, _ uint64, _ uint64, _ uint64, _ uint64) (uint64, error) { - return EstimateMaxTxnFeeV1(txn, minFeeRateNanosPerKB), nil + feeRate, _ := mp.EstimateFeeRate(minFeeRateNanosPerKB, 0, 0, 0, 0, 0) + return EstimateMaxTxnFeeV1(txn, feeRate), nil +} + +func (mp *DeSoMempool) EstimateFeeRate( + minFeeRateNanosPerKB uint64, + _ uint64, + _ uint64, + _ uint64, + _ uint64, + _ uint64) (uint64, error) { + if minFeeRateNanosPerKB < mp.readOnlyUtxoView.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB { + return mp.readOnlyUtxoView.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, nil + } + return minFeeRateNanosPerKB, nil } func convertMempoolTxsToSummaryStats(mempoolTxs []*MempoolTx) map[string]*SummaryStats { diff --git a/lib/miner.go b/lib/miner.go index 06272da42..5bc1e45d1 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -198,10 +198,6 @@ func (desoMiner *DeSoMiner) MineAndProcessSingleBlock(threadIndex uint32, mempoo return nil, fmt.Errorf("DeSoMiner._startThread: _mineSingleBlock returned nil; should only happen if we're stopping") } - if desoMiner.params.IsPoSBlockHeight(blockToMine.Header.Height) { - return nil, fmt.Errorf("DeSoMiner._startThread: _mineSingleBlock returned a block that is past the Proof of Stake Cutover") - } - // Log information on the block we just mined. bestHash, _ := blockToMine.Hash() glog.Infof("================== YOU MINED A NEW BLOCK! ================== Height: %d, Hash: %s", blockToMine.Header.Height, hex.EncodeToString(bestHash[:])) @@ -293,6 +289,12 @@ func (desoMiner *DeSoMiner) _startThread(threadIndex uint32) { continue } + // Exit if blockchain has connected a block at the final PoW block height. + currentTip := desoMiner.BlockProducer.chain.blockTip() + if currentTip.Header.Height >= desoMiner.params.GetFinalPoWBlockHeight() { + return + } + newBlock, err := desoMiner.MineAndProcessSingleBlock(threadIndex, nil /*mempoolToUpdate*/) if err != nil { glog.Errorf(err.Error()) @@ -317,8 +319,12 @@ func (desoMiner *DeSoMiner) Start() { "start the miner") return } - glog.Infof("DeSoMiner.Start: Starting miner with difficulty target %s", desoMiner.params.MinDifficultyTargetHex) blockTip := desoMiner.BlockProducer.chain.blockTip() + if desoMiner.params.IsPoSBlockHeight(blockTip.Header.Height) { + glog.Infof("DeSoMiner.Start: NOT starting miner because we are at a PoS block height %d", blockTip.Header.Height) + return + } + glog.Infof("DeSoMiner.Start: Starting miner with difficulty target %s", desoMiner.params.MinDifficultyTargetHex) glog.Infof("DeSoMiner.Start: Block tip height %d, cum work %v, and difficulty %v", blockTip.Header.Height, BigintToHash(blockTip.CumWork), blockTip.DifficultyTarget) // Start a bunch of threads to mine for blocks. @@ -400,6 +406,10 @@ func HashToBigint(hash *BlockHash) *big.Int { } func BigintToHash(bigint *big.Int) *BlockHash { + if bigint == nil { + glog.Errorf("BigintToHash: Bigint is nil") + return nil + } hexStr := bigint.Text(16) if len(hexStr)%2 != 0 { // If we have an odd number of bytes add one to the beginning (remember @@ -410,6 +420,7 @@ func BigintToHash(bigint *big.Int) *BlockHash { if err != nil { glog.Errorf("Failed in converting bigint (%#v) with hex "+ "string (%s) to hash.", bigint, hexStr) + return nil } if len(hexBytes) > HashSizeBytes { glog.Errorf("BigintToHash: Bigint %v overflows the hash size %d", bigint, HashSizeBytes) diff --git a/lib/network.go b/lib/network.go index efcc1772b..d8c5af284 100644 --- a/lib/network.go +++ b/lib/network.go @@ -109,11 +109,12 @@ const ( // TODO: Should probably split these out into a separate channel in the server to // make things more parallelized. - MsgTypeQuit MsgType = ControlMessagesStart - MsgTypeNewPeer MsgType = ControlMessagesStart + 1 - MsgTypeDonePeer MsgType = ControlMessagesStart + 2 - MsgTypeBlockAccepted MsgType = ControlMessagesStart + 3 - MsgTypeBitcoinManagerUpdate MsgType = ControlMessagesStart + 4 // Deprecated + MsgTypeQuit MsgType = ControlMessagesStart + MsgTypeDisconnectedPeer MsgType = ControlMessagesStart + 1 + MsgTypeBlockAccepted MsgType = ControlMessagesStart + 2 + MsgTypeBitcoinManagerUpdate MsgType = ControlMessagesStart + 3 // Deprecated + MsgTypePeerHandshakeComplete MsgType = ControlMessagesStart + 4 + MsgTypeNewConnection MsgType = ControlMessagesStart + 5 // NEXT_TAG = 7 ) @@ -171,14 +172,16 @@ func (msgType MsgType) String() string { return "GET_ADDR" case MsgTypeQuit: return "QUIT" - case MsgTypeNewPeer: - return "NEW_PEER" - case MsgTypeDonePeer: + case MsgTypeDisconnectedPeer: return "DONE_PEER" case MsgTypeBlockAccepted: return "BLOCK_ACCEPTED" case MsgTypeBitcoinManagerUpdate: return "BITCOIN_MANAGER_UPDATE" + case MsgTypePeerHandshakeComplete: + return "PEER_HANDSHAKE_COMPLETE" + case MsgTypeNewConnection: + return "NEW_CONNECTION" case MsgTypeGetSnapshot: return "GET_SNAPSHOT" case MsgTypeSnapshotData: @@ -835,34 +838,47 @@ func (msg *MsgDeSoQuit) FromBytes(data []byte) error { return fmt.Errorf("MsgDeSoQuit.FromBytes not implemented") } -type MsgDeSoNewPeer struct { +type MsgDeSoDisconnectedPeer struct { } -func (msg *MsgDeSoNewPeer) GetMsgType() MsgType { - return MsgTypeNewPeer +func (msg *MsgDeSoDisconnectedPeer) GetMsgType() MsgType { + return MsgTypeDisconnectedPeer } -func (msg *MsgDeSoNewPeer) ToBytes(preSignature bool) ([]byte, error) { - return nil, fmt.Errorf("MsgDeSoNewPeer.ToBytes: Not implemented") +func (msg *MsgDeSoDisconnectedPeer) ToBytes(preSignature bool) ([]byte, error) { + return nil, fmt.Errorf("MsgDeSoDisconnectedPeer.ToBytes: Not implemented") } -func (msg *MsgDeSoNewPeer) FromBytes(data []byte) error { - return fmt.Errorf("MsgDeSoNewPeer.FromBytes not implemented") +func (msg *MsgDeSoDisconnectedPeer) FromBytes(data []byte) error { + return fmt.Errorf("MsgDeSoDisconnectedPeer.FromBytes not implemented") } -type MsgDeSoDonePeer struct { +type ConnectionType uint8 + +const ( + ConnectionTypeOutbound ConnectionType = iota + ConnectionTypeInbound +) + +type Connection interface { + GetConnectionType() ConnectionType + Close() } -func (msg *MsgDeSoDonePeer) GetMsgType() MsgType { - return MsgTypeDonePeer +type MsgDeSoNewConnection struct { + Connection Connection } -func (msg *MsgDeSoDonePeer) ToBytes(preSignature bool) ([]byte, error) { - return nil, fmt.Errorf("MsgDeSoDonePeer.ToBytes: Not implemented") +func (msg *MsgDeSoNewConnection) GetMsgType() MsgType { + return MsgTypeNewConnection } -func (msg *MsgDeSoDonePeer) FromBytes(data []byte) error { - return fmt.Errorf("MsgDeSoDonePeer.FromBytes not implemented") +func (msg *MsgDeSoNewConnection) ToBytes(preSignature bool) ([]byte, error) { + return nil, fmt.Errorf("MsgDeSoNewConnection.ToBytes: Not implemented") +} + +func (msg *MsgDeSoNewConnection) FromBytes(data []byte) error { + return fmt.Errorf("MsgDeSoNewConnection.FromBytes not implemented") } // ================================================================== @@ -1509,16 +1525,21 @@ func (msg *MsgDeSoPong) FromBytes(data []byte) error { type ServiceFlag uint64 const ( - // SFFullNodeDeprecated is deprecated, and set on all nodes by default - // now. We basically split it into SFHyperSync and SFArchivalMode. - SFFullNodeDeprecated ServiceFlag = 1 << iota + // SFFullNodeDeprecated is deprecated, and set on all nodes by default now. + SFFullNodeDeprecated ServiceFlag = 1 << 0 // SFHyperSync is a flag used to indicate that the peer supports hyper sync. - SFHyperSync + SFHyperSync ServiceFlag = 1 << 1 // SFArchivalNode is a flag complementary to SFHyperSync. If node is a hypersync node then // it might not be able to support block sync anymore, unless it has archival mode turned on. - SFArchivalNode + SFArchivalNode ServiceFlag = 1 << 2 + // SFPosValidator is a flag used to indicate that the peer is running a PoS validator. + SFPosValidator ServiceFlag = 1 << 3 ) +func (sf ServiceFlag) HasService(serviceFlag ServiceFlag) bool { + return sf&serviceFlag == serviceFlag +} + type MsgDeSoVersion struct { // What is the current version we're on? Version uint64 @@ -1542,8 +1563,7 @@ type MsgDeSoVersion struct { // The height of the last block on the main chain for // this node. // - // TODO: We need to update this to uint64 - StartBlockHeight uint32 + LatestBlockHeight uint64 // MinFeeRateNanosPerKB is the minimum feerate that a peer will // accept from other peers when validating transactions. @@ -1575,11 +1595,11 @@ func (msg *MsgDeSoVersion) ToBytes(preSignature bool) ([]byte, error) { retBytes = append(retBytes, UintToBuf(uint64(len(msg.UserAgent)))...) retBytes = append(retBytes, msg.UserAgent...) - // StartBlockHeight - retBytes = append(retBytes, UintToBuf(uint64(msg.StartBlockHeight))...) + // LatestBlockHeight + retBytes = append(retBytes, UintToBuf(msg.LatestBlockHeight)...) // MinFeeRateNanosPerKB - retBytes = append(retBytes, UintToBuf(uint64(msg.MinFeeRateNanosPerKB))...) + retBytes = append(retBytes, UintToBuf(msg.MinFeeRateNanosPerKB)...) // JSONAPIPort - deprecated retBytes = append(retBytes, UintToBuf(uint64(0))...) @@ -1653,13 +1673,13 @@ func (msg *MsgDeSoVersion) FromBytes(data []byte) error { retVer.UserAgent = string(userAgent) } - // StartBlockHeight + // LatestBlockHeight { - lastBlockHeight, err := ReadUvarint(rr) - if err != nil || lastBlockHeight > math.MaxUint32 { + latestBlockHeight, err := ReadUvarint(rr) + if err != nil || latestBlockHeight > math.MaxUint32 { return errors.Wrapf(err, "MsgDeSoVersion.FromBytes: Problem converting msg.LatestBlockHeight") } - retVer.StartBlockHeight = uint32(lastBlockHeight) + retVer.LatestBlockHeight = latestBlockHeight } // MinFeeRateNanosPerKB @@ -1862,34 +1882,144 @@ func (msg *MsgDeSoGetAddr) GetMsgType() MsgType { // VERACK Message // ================================================================== -// VERACK messages have no payload. +type VerackVersion uint64 + +func NewVerackVersion(version uint64) VerackVersion { + return VerackVersion(version) +} + +const ( + VerackVersion0 VerackVersion = 0 + VerackVersion1 VerackVersion = 1 +) + +func (vv VerackVersion) ToUint64() uint64 { + return uint64(vv) +} + type MsgDeSoVerack struct { - // A verack message must contain the nonce the peer received in the - // initial version message. This ensures the peer that is communicating - // with us actually controls the address she says she does similar to - // "SYN Cookie" DDOS protection. - Nonce uint64 + // The VerackVersion0 message contains only the NonceReceived field, which is the nonce the sender received in the + // initial version message from the peer. This ensures the sender controls the network address, similarly to the + // "SYN Cookie" DDOS protection. The Version field in the VerackVersion0 message is implied, based on the msg length. + // + // The VerackVersion1 message contains the tuple of which correspond to the + // received and sent nonces in the version message from the sender's perspective, as well as a recent timestamp. + // The VerackVersion1 message is used in context of Proof of Stake, where validators register their BLS public keys + // as part of their validator entry. The sender of this message must be a registered validator, and he must attach + // their public key to the message, along with a BLS signature of the tuple. + Version VerackVersion + + NonceReceived uint64 + NonceSent uint64 + TstampMicro uint64 + + PublicKey *bls.PublicKey + Signature *bls.Signature } func (msg *MsgDeSoVerack) ToBytes(preSignature bool) ([]byte, error) { + switch msg.Version { + case VerackVersion0: + return msg.EncodeVerackV0() + case VerackVersion1: + return msg.EncodeVerackV1() + default: + return nil, fmt.Errorf("MsgDeSoVerack.ToBytes: Unrecognized version: %v", msg.Version) + } +} + +func (msg *MsgDeSoVerack) EncodeVerackV0() ([]byte, error) { retBytes := []byte{} // Nonce - retBytes = append(retBytes, UintToBuf(msg.Nonce)...) + retBytes = append(retBytes, UintToBuf(msg.NonceReceived)...) + return retBytes, nil +} + +func (msg *MsgDeSoVerack) EncodeVerackV1() ([]byte, error) { + retBytes := []byte{} + + // Version + retBytes = append(retBytes, UintToBuf(msg.Version.ToUint64())...) + // Nonce Received + retBytes = append(retBytes, UintToBuf(msg.NonceReceived)...) + // Nonce Sent + retBytes = append(retBytes, UintToBuf(msg.NonceSent)...) + // Tstamp Micro + retBytes = append(retBytes, UintToBuf(msg.TstampMicro)...) + // PublicKey + retBytes = append(retBytes, EncodeBLSPublicKey(msg.PublicKey)...) + // Signature + retBytes = append(retBytes, EncodeBLSSignature(msg.Signature)...) + return retBytes, nil } func (msg *MsgDeSoVerack) FromBytes(data []byte) error { rr := bytes.NewReader(data) - retMsg := NewMessage(MsgTypeVerack).(*MsgDeSoVerack) - { - nonce, err := ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Nonce") - } - retMsg.Nonce = nonce + // The V0 verack message is determined from the message length. The V0 message will only contain the NonceReceived field. + if len(data) <= MaxVarintLen64 { + return msg.FromBytesV0(data) + } + + version, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Version") + } + msg.Version = NewVerackVersion(version) + switch msg.Version { + case VerackVersion0: + return fmt.Errorf("MsgDeSoVerack.FromBytes: Outdated Version=0 used for new encoding") + case VerackVersion1: + return msg.FromBytesV1(data) + default: + return fmt.Errorf("MsgDeSoVerack.FromBytes: Unrecognized version: %v", msg.Version) + } +} + +func (msg *MsgDeSoVerack) FromBytesV0(data []byte) error { + var err error + rr := bytes.NewReader(data) + msg.NonceReceived, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Nonce") + } + return nil +} + +func (msg *MsgDeSoVerack) FromBytesV1(data []byte) error { + var err error + rr := bytes.NewReader(data) + version, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Version") + } + msg.Version = NewVerackVersion(version) + + msg.NonceReceived, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Nonce Received") + } + + msg.NonceSent, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Nonce Sent") + } + + msg.TstampMicro, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Tstamp Micro") + } + + msg.PublicKey, err = DecodeBLSPublicKey(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading PublicKey") + } + + msg.Signature, err = DecodeBLSSignature(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoVerack.FromBytes: Problem reading Signature") } - *msg = *retMsg return nil } diff --git a/lib/network_connection.go b/lib/network_connection.go new file mode 100644 index 000000000..4d50d22a8 --- /dev/null +++ b/lib/network_connection.go @@ -0,0 +1,220 @@ +package lib + +import ( + "github.com/btcsuite/btcd/wire" + "github.com/golang/glog" + "net" + "sync" + "time" +) + +// outboundConnection is used to store an established connection with a peer. It can also be used to signal that the +// connection was unsuccessful, in which case the failed flag is set to true. outboundConnection is created after an +// OutboundConnectionAttempt concludes. outboundConnection implements the Connection interface. +type outboundConnection struct { + mtx sync.Mutex + terminated bool + + attemptId uint64 + address *wire.NetAddress + connection net.Conn + isPersistent bool + failed bool +} + +func (oc *outboundConnection) GetConnectionType() ConnectionType { + return ConnectionTypeOutbound +} + +func (oc *outboundConnection) Close() { + oc.mtx.Lock() + defer oc.mtx.Unlock() + + if oc.terminated { + return + } + if oc.connection != nil { + oc.connection.Close() + } + oc.terminated = true +} + +// inboundConnection is used to store an established connection with a peer. inboundConnection is created after +// an external peer connects to the node. inboundConnection implements the Connection interface. +type inboundConnection struct { + mtx sync.Mutex + terminated bool + + connection net.Conn +} + +func (ic *inboundConnection) GetConnectionType() ConnectionType { + return ConnectionTypeInbound +} + +func (ic *inboundConnection) Close() { + ic.mtx.Lock() + defer ic.mtx.Unlock() + + if ic.terminated { + return + } + + if ic.connection != nil { + ic.connection.Close() + } + ic.terminated = true +} + +// OutboundConnectionAttempt is used to store the state of an outbound connection attempt. It is used to initiate +// an outbound connection to a peer, and manage the lifecycle of the connection attempt. +type OutboundConnectionAttempt struct { + mtx sync.Mutex + + // attemptId is used to identify the connection attempt. It will later be the id of the peer, + // if the connection is successful. + attemptId uint64 + + // netAddr is the address of the peer we are attempting to connect to. + netAddr *wire.NetAddress + // isPersistent is used to indicate whether we should retry connecting to the peer if the connection attempt fails. + // If isPersistent is true, we will retry connecting to the peer until we are successful. Each time such connection + // fails, we will sleep according to exponential backoff. Otherwise, we will only attempt to connect to the peer once. + isPersistent bool + // dialTimeout is the amount of time we will wait before timing out an individual connection attempt. + dialTimeout time.Duration + // timeoutUnit is the unit of time we will use to calculate the exponential backoff delay. The initial timeout is + // calculated as timeoutUnit * 2^0, the second timeout is calculated as timeoutUnit * 2^1, and so on. + timeoutUnit time.Duration + // retryCount is the number of times we have attempted to connect to the peer. + retryCount uint64 + // connectionChan is used to send the result of the connection attempt to the caller thread. + connectionChan chan *outboundConnection + + startGroup sync.WaitGroup + exitChan chan bool + status outboundConnectionAttemptStatus +} + +type outboundConnectionAttemptStatus int + +const ( + outboundConnectionAttemptInitialized outboundConnectionAttemptStatus = 0 + outboundConnectionAttemptRunning outboundConnectionAttemptStatus = 1 + outboundConnectionAttemptTerminated outboundConnectionAttemptStatus = 2 +) + +func NewOutboundConnectionAttempt(attemptId uint64, netAddr *wire.NetAddress, isPersistent bool, + dialTimeout time.Duration, connectionChan chan *outboundConnection) *OutboundConnectionAttempt { + + return &OutboundConnectionAttempt{ + attemptId: attemptId, + netAddr: netAddr, + isPersistent: isPersistent, + dialTimeout: dialTimeout, + timeoutUnit: time.Second, + exitChan: make(chan bool), + connectionChan: connectionChan, + status: outboundConnectionAttemptInitialized, + } +} + +func (oca *OutboundConnectionAttempt) Start() { + oca.mtx.Lock() + defer oca.mtx.Unlock() + + if oca.status != outboundConnectionAttemptInitialized { + return + } + + oca.startGroup.Add(1) + go oca.start() + oca.startGroup.Wait() + oca.status = outboundConnectionAttemptRunning +} + +func (oca *OutboundConnectionAttempt) start() { + oca.startGroup.Done() + oca.retryCount = 0 + +out: + for { + sleepDuration := 0 * time.Second + // for persistent peers, calculate the exponential backoff delay. + if oca.isPersistent { + sleepDuration = _delayRetry(oca.retryCount, oca.netAddr, oca.timeoutUnit) + } + + select { + case <-oca.exitChan: + break out + case <-time.After(sleepDuration): + // If the peer is persistent use exponential back off delay before retrying. + // We want to start backing off exponentially once we've gone through enough + // unsuccessful retries. + if oca.isPersistent { + oca.retryCount++ + } + + conn := oca.attemptOutboundConnection() + if conn == nil && oca.isPersistent { + break + } + if conn == nil { + break out + } + + oca.connectionChan <- &outboundConnection{ + attemptId: oca.attemptId, + address: oca.netAddr, + connection: conn, + isPersistent: oca.isPersistent, + failed: false, + } + return + } + } + oca.connectionChan <- &outboundConnection{ + attemptId: oca.attemptId, + address: oca.netAddr, + connection: nil, + isPersistent: oca.isPersistent, + failed: true, + } +} + +func (oca *OutboundConnectionAttempt) Stop() { + oca.mtx.Lock() + defer oca.mtx.Unlock() + + if oca.status == outboundConnectionAttemptTerminated { + return + } + close(oca.exitChan) + oca.status = outboundConnectionAttemptTerminated +} + +func (oca *OutboundConnectionAttempt) SetTimeoutUnit(timeoutUnit time.Duration) { + oca.timeoutUnit = timeoutUnit +} + +// attemptOutboundConnection dials the peer. If the connection attempt is successful, it will return the connection. +// Otherwise, it will return nil. +func (oca *OutboundConnectionAttempt) attemptOutboundConnection() net.Conn { + // If the peer is not persistent, update the addrmgr. + glog.V(1).Infof("Attempting to connect to addr: %v:%v", oca.netAddr.IP.String(), oca.netAddr.Port) + + var err error + tcpAddr := net.TCPAddr{ + IP: oca.netAddr.IP, + Port: int(oca.netAddr.Port), + } + conn, err := net.DialTimeout(tcpAddr.Network(), tcpAddr.String(), oca.dialTimeout) + if err != nil { + // If we failed to connect to this peer, get a new address and try again. + glog.V(2).Infof("Connection to addr (%v) failed: %v", tcpAddr, err) + return nil + } + + return conn +} diff --git a/lib/network_connection_test.go b/lib/network_connection_test.go new file mode 100644 index 000000000..5d3008f72 --- /dev/null +++ b/lib/network_connection_test.go @@ -0,0 +1,167 @@ +package lib + +import ( + "fmt" + "github.com/btcsuite/btcd/addrmgr" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" + "net" + "sync" + "testing" + "time" +) + +type simpleListener struct { + t *testing.T + ll net.Listener + addr *wire.NetAddress + closed bool + + connectionChan chan Connection + + exitChan chan struct{} + startGroup sync.WaitGroup + stopGroup sync.WaitGroup +} + +func newSimpleListener(t *testing.T) *simpleListener { + require := require.New(t) + ll, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(err) + params := &DeSoTestnetParams + addr := ll.Addr() + addrMgr := addrmgr.New("", net.LookupIP) + na, err := IPToNetAddr(addr.String(), addrMgr, params) + + return &simpleListener{ + t: t, + ll: ll, + addr: na, + closed: false, + connectionChan: make(chan Connection, 100), + exitChan: make(chan struct{}), + } +} + +func (sl *simpleListener) start() { + require := require.New(sl.t) + if sl.closed { + ll, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%v", sl.addr.Port)) + require.NoError(err) + sl.ll = ll + sl.connectionChan = make(chan Connection, 100) + sl.exitChan = make(chan struct{}) + sl.closed = false + } + sl.startGroup.Add(1) + sl.stopGroup.Add(1) + + go func() { + sl.startGroup.Done() + defer sl.stopGroup.Done() + for { + select { + case <-sl.exitChan: + return + default: + conn, err := sl.ll.Accept() + if err != nil { + fmt.Println("simpleListener.start: ll.Accept:", err) + return + } + sl.connectionChan <- &inboundConnection{ + connection: conn, + } + } + } + }() + sl.startGroup.Wait() +} + +func (sl *simpleListener) stop() { + sl.ll.Close() + sl.closed = true + close(sl.exitChan) + close(sl.connectionChan) + sl.stopGroup.Wait() + fmt.Println("simpleListener.stop: stopped") +} + +func (sl *simpleListener) getTCPAddr() *net.TCPAddr { + return sl.ll.Addr().(*net.TCPAddr) +} + +func verifyOutboundConnection(t *testing.T, conn *outboundConnection, sl *simpleListener, attemptId uint64, isPersistent bool, failed bool) { + require := require.New(t) + require.Equal(attemptId, conn.attemptId) + require.Equal(isPersistent, conn.isPersistent) + require.Equal(failed, conn.failed) + if failed { + require.Nil(conn.connection) + return + } + + require.Equal(conn.address.IP.String(), sl.getTCPAddr().IP.String()) + require.Equal(conn.address.Port, uint16(sl.getTCPAddr().Port)) + require.Equal(conn.address.IP.String(), sl.getTCPAddr().IP.String()) + require.Equal(conn.address.Port, uint16(sl.getTCPAddr().Port)) +} + +func verifyOutboundConnectionSelect(t *testing.T, connectionChan chan *outboundConnection, timeoutDuration time.Duration, + sl *simpleListener, attemptId uint64, isPersistent bool, failed bool) { + + select { + case conn := <-connectionChan: + verifyOutboundConnection(t, conn, sl, attemptId, isPersistent, failed) + case <-time.After(2 * timeoutDuration): + panic("Timed out waiting for outbound connection.") + } +} + +func TestOutboundConnectionAttempt(t *testing.T) { + require := require.New(t) + _ = require + timeoutDuration := 100 * time.Millisecond + + sl := newSimpleListener(t) + sl.start() + + connectionChan := make(chan *outboundConnection, 100) + attempt := NewOutboundConnectionAttempt(0, sl.addr, false, timeoutDuration, connectionChan) + attempt.Start() + verifyOutboundConnectionSelect(t, connectionChan, 2*timeoutDuration, sl, 0, false, false) + t.Log("TestOutboundConnectionAttempt #1 | Happy path, non-persistent | PASS") + + sl.stop() + attemptFailed := NewOutboundConnectionAttempt(1, sl.addr, false, timeoutDuration, connectionChan) + attemptFailed.Start() + verifyOutboundConnectionSelect(t, connectionChan, 2*timeoutDuration, sl, 1, false, true) + t.Log("TestOutboundConnectionAttempt #2 | Failed connection, non-persistent | PASS") + + sl2 := newSimpleListener(t) + sl2.start() + + attemptPersistent := NewOutboundConnectionAttempt(2, sl2.addr, true, timeoutDuration, connectionChan) + attemptPersistent.Start() + verifyOutboundConnectionSelect(t, connectionChan, 2*timeoutDuration, sl2, 2, true, false) + t.Log("TestOutboundConnectionAttempt #3 | Happy path, persistent | PASS") + + sl2.stop() + attemptPersistentDelay := NewOutboundConnectionAttempt(3, sl2.addr, true, timeoutDuration, connectionChan) + attemptPersistentDelay.SetTimeoutUnit(timeoutDuration) + attemptPersistentDelay.Start() + time.Sleep(timeoutDuration) + sl2.start() + verifyOutboundConnectionSelect(t, connectionChan, 2*timeoutDuration, sl2, 3, true, false) + require.Greater(attemptPersistentDelay.retryCount, uint64(0)) + t.Log("TestOutboundConnectionAttempt #4 | Failed connection, persistent, delayed | PASS") + + sl2.stop() + attemptPersistentCancel := NewOutboundConnectionAttempt(4, sl2.addr, true, timeoutDuration, connectionChan) + attemptPersistentCancel.Start() + time.Sleep(timeoutDuration) + attemptPersistentCancel.Stop() + verifyOutboundConnectionSelect(t, connectionChan, 2*timeoutDuration, sl2, 4, true, true) + require.Greater(attemptPersistentCancel.retryCount, uint64(0)) + t.Log("TestOutboundConnectionAttempt #5 | Failed connection, persistent, delayed, canceled | PASS") +} diff --git a/lib/network_manager.go b/lib/network_manager.go new file mode 100644 index 000000000..0e83ca3b1 --- /dev/null +++ b/lib/network_manager.go @@ -0,0 +1,1341 @@ +package lib + +import ( + "fmt" + "github.com/btcsuite/btcd/addrmgr" + "github.com/btcsuite/btcd/wire" + "github.com/decred/dcrd/lru" + "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/consensus" + "github.com/golang/glog" + "github.com/pkg/errors" + "math" + "net" + "strconv" + "sync" + "sync/atomic" + "time" +) + +// NetworkManager is a structure that oversees all connections to RemoteNodes. NetworkManager has the following +// responsibilities in regard to the lifecycle of RemoteNodes: +// - Maintain a list of all RemoteNodes that the node is connected to through the RemoteNodeManager. +// - Initialize RemoteNodes from established outbound and inbound peer connections. +// - Initiate and handle the communication of the handshake process with RemoteNodes. +// +// The NetworkManager is also responsible for opening and closing connections. It does this by running a set of +// goroutines that periodically check the state of different categories of RemoteNodes, and disconnects or connects +// RemoteNodes as needed. These categories of RemoteNodes include: +// - Persistent RemoteNodes: These are RemoteNodes that we want to maintain a persistent (constant) connection to. +// These are specified by the --connect-ips flag. +// - Validators: These are RemoteNodes that are in the active validators set. We want to maintain a connection to +// all active validators. We also want to disconnect from any validators that are no longer active. +// - Non-Validators: These are RemoteNodes that are not in the active validators set. We want to maintain a connection +// to at most a target number of outbound and inbound non-validators. If we have more than the target number of +// outbound or inbound non-validators, we will disconnect the excess RemoteNodes. +// +// The NetworkManager also runs an auxiliary goroutine that periodically cleans up RemoteNodes that may have timed out +// the handshake process, or became invalid for some other reason. +type NetworkManager struct { + mtx sync.Mutex + mtxHandshakeComplete sync.Mutex + + // The parameters we are initialized with. + params *DeSoParams + + srv *Server + bc *Blockchain + cmgr *ConnectionManager + keystore *BLSKeystore + + // configs + minTxFeeRateNanosPerKB uint64 + nodeServices ServiceFlag + + // Used to set remote node ids. Must be incremented atomically. + remoteNodeNextId uint64 + // AllRemoteNodes is a map storing all remote nodes by their IDs. + AllRemoteNodes *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] + + // Indices for various types of remote nodes. + ValidatorOutboundIndex *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] + ValidatorInboundIndex *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] + NonValidatorOutboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] + NonValidatorInboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] + + // Cache of nonces used during handshake. + usedNonces lru.Cache + + // The address manager keeps track of peer addresses we're aware of. When + // we need to connect to a new outbound peer, it chooses one of the addresses + // it's aware of at random and provides it to us. + AddrMgr *addrmgr.AddrManager + + // When --connect-ips is set, we don't connect to anything from the addrmgr. + connectIps []string + // persistentIpToRemoteNodeIdsMap maps persistent IP addresses, like the --connect-ips, to the RemoteNodeIds of the + // corresponding RemoteNodes. This is used to ensure that we don't connect to the same persistent IP address twice. + // And that we can reconnect to the same persistent IP address if we disconnect from it. + persistentIpToRemoteNodeIdsMap *collections.ConcurrentMap[string, RemoteNodeId] + + activeValidatorsMapLock sync.RWMutex + // activeValidatorsMap is a map of all currently active validators registered in consensus. It will be updated + // periodically by the owner of the NetworkManager. + activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator] + + // The target number of non-validator outbound remote nodes we want to have. We will disconnect remote nodes once + // we've exceeded this number of outbound connections. + targetNonValidatorOutboundRemoteNodes uint32 + // The target number of non-validator inbound remote nodes we want to have. We will disconnect remote nodes once + // we've exceeded this number of inbound connections. + targetNonValidatorInboundRemoteNodes uint32 + // When true, only one connection per IP is allowed. Prevents eclipse attacks + // among other things. + limitOneInboundRemoteNodePerIP bool + + startGroup sync.WaitGroup + exitChan chan struct{} + exitGroup sync.WaitGroup +} + +func NewNetworkManager(params *DeSoParams, srv *Server, bc *Blockchain, cmgr *ConnectionManager, + blsKeystore *BLSKeystore, addrMgr *addrmgr.AddrManager, connectIps []string, + targetNonValidatorOutboundRemoteNodes uint32, targetNonValidatorInboundRemoteNodes uint32, + limitOneInboundConnectionPerIP bool, minTxFeeRateNanosPerKB uint64, nodeServices ServiceFlag) *NetworkManager { + + return &NetworkManager{ + params: params, + srv: srv, + bc: bc, + cmgr: cmgr, + keystore: blsKeystore, + AddrMgr: addrMgr, + minTxFeeRateNanosPerKB: minTxFeeRateNanosPerKB, + nodeServices: nodeServices, + AllRemoteNodes: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + ValidatorInboundIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), + ValidatorOutboundIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), + NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + usedNonces: lru.NewCache(1000), + connectIps: connectIps, + persistentIpToRemoteNodeIdsMap: collections.NewConcurrentMap[string, RemoteNodeId](), + activeValidatorsMap: collections.NewConcurrentMap[bls.SerializedPublicKey, consensus.Validator](), + targetNonValidatorOutboundRemoteNodes: targetNonValidatorOutboundRemoteNodes, + targetNonValidatorInboundRemoteNodes: targetNonValidatorInboundRemoteNodes, + limitOneInboundRemoteNodePerIP: limitOneInboundConnectionPerIP, + exitChan: make(chan struct{}), + } +} + +func (nm *NetworkManager) Start() { + // If the NetworkManager routines are disabled, we do nothing. + if nm.params.DisableNetworkManagerRoutines { + return + } + + // Start the NetworkManager goroutines. The startGroup is used to ensure that all goroutines have started before + // exiting the context of this function. + nm.startGroup.Add(4) + go nm.startPersistentConnector() + go nm.startValidatorConnector() + go nm.startNonValidatorConnector() + go nm.startRemoteNodeCleanup() + + nm.startGroup.Wait() +} + +func (nm *NetworkManager) Stop() { + if !nm.params.DisableNetworkManagerRoutines { + nm.exitGroup.Add(4) + close(nm.exitChan) + nm.exitGroup.Wait() + } + nm.DisconnectAll() +} + +func (nm *NetworkManager) SetTargetOutboundPeers(numPeers uint32) { + nm.targetNonValidatorOutboundRemoteNodes = numPeers +} + +// ########################### +// ## NetworkManager Routines +// ########################### + +// startPersistentConnector is responsible for ensuring that the node is connected to all persistent IP addresses. It +// does this by periodically checking the persistentIpToRemoteNodeIdsMap, and connecting to any persistent IP addresses +// that are not already connected. +func (nm *NetworkManager) startPersistentConnector() { + nm.startGroup.Done() + for { + select { + case <-nm.exitChan: + nm.exitGroup.Done() + return + case <-time.After(nm.params.NetworkManagerRefreshDuration): + nm.refreshConnectIps() + } + } +} + +// startValidatorConnector is responsible for ensuring that the node is connected to all active validators. It does +// this in two steps. First, it looks through the already established connections and checks if any of these connections +// are validators. If they are, it adds them to the validator index. It also checks if any of the existing validators +// are no longer active and removes them from the validator index. Second, it checks if any of the active validators +// are missing from the validator index. If they are, it attempts to connect to them. +func (nm *NetworkManager) startValidatorConnector() { + nm.startGroup.Done() + for { + select { + case <-nm.exitChan: + nm.exitGroup.Done() + return + case <-time.After(nm.params.NetworkManagerRefreshDuration): + activeValidatorsMap := nm.getActiveValidatorsMap() + nm.refreshValidatorIndices(activeValidatorsMap) + nm.connectValidators(activeValidatorsMap) + } + } +} + +// startNonValidatorConnector is responsible for ensuring that the node is connected to the target number of outbound +// and inbound remote nodes. To do this, it periodically checks the number of outbound and inbound remote nodes, and +// if the number is above the target number, it disconnects the excess remote nodes. If the number is below the target +// number, it attempts to connect to new remote nodes. +func (nm *NetworkManager) startNonValidatorConnector() { + nm.startGroup.Done() + + for { + select { + case <-nm.exitChan: + nm.exitGroup.Done() + return + case <-time.After(nm.params.NetworkManagerRefreshDuration): + nm.refreshNonValidatorOutboundIndex() + nm.refreshNonValidatorInboundIndex() + nm.connectNonValidators() + } + } +} + +// startRemoteNodeCleanup is responsible for cleaning up RemoteNodes that may have timed out the handshake process, +// or became invalid for some other reason. +func (nm *NetworkManager) startRemoteNodeCleanup() { + nm.startGroup.Done() + + for { + select { + case <-nm.exitChan: + nm.exitGroup.Done() + return + case <-time.After(nm.params.NetworkManagerRefreshDuration): + nm.Cleanup() + } + } + +} + +// ########################### +// ## Handlers (Peer, DeSoMessage) +// ########################### + +// _handleVersionMessage is called when a new version message is received. +func (nm *NetworkManager) _handleVersionMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeVersion { + return + } + + rn := nm.GetRemoteNodeFromPeer(origin) + if rn == nil { + // This should never happen. + return + } + + var verMsg *MsgDeSoVersion + var ok bool + if verMsg, ok = desoMsg.(*MsgDeSoVersion); !ok { + glog.Errorf("NetworkManager.handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ + "error casting version message", origin.ID) + nm.Disconnect(rn) + return + } + + // If we've seen this nonce before then return an error since this is a connection from ourselves. + msgNonce := verMsg.Nonce + if nm.usedNonces.Contains(msgNonce) { + nm.usedNonces.Delete(msgNonce) + glog.Errorf("NetworkManager.handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ + "nonce collision, nonce (%v)", origin.ID, msgNonce) + nm.Disconnect(rn) + return + } + + // Call HandleVersionMessage on the RemoteNode. + responseNonce := uint64(RandInt64(math.MaxInt64)) + if err := rn.HandleVersionMessage(verMsg, responseNonce); err != nil { + glog.Errorf("NetworkManager.handleVersionMessage: Requesting PeerDisconnect for id: (%v) "+ + "error handling version message: %v", origin.ID, err) + nm.Disconnect(rn) + return + + } + nm.usedNonces.Add(responseNonce) +} + +// _handleVerackMessage is called when a new verack message is received. +func (nm *NetworkManager) _handleVerackMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeVerack { + return + } + + rn := nm.GetRemoteNodeFromPeer(origin) + if rn == nil { + // This should never happen. + return + } + + var vrkMsg *MsgDeSoVerack + var ok bool + if vrkMsg, ok = desoMsg.(*MsgDeSoVerack); !ok { + glog.Errorf("NetworkManager.handleVerackMessage: Disconnecting RemoteNode with id: (%v) "+ + "error casting verack message", origin.ID) + nm.Disconnect(rn) + return + } + + // Call HandleVerackMessage on the RemoteNode. + if err := rn.HandleVerackMessage(vrkMsg); err != nil { + glog.Errorf("NetworkManager.handleVerackMessage: Requesting PeerDisconnect for id: (%v) "+ + "error handling verack message: %v", origin.ID, err) + nm.Disconnect(rn) + return + } + + nm.handleHandshakeComplete(rn) +} + +// _handleDisconnectedPeerMessage is called when a peer is disconnected. It is responsible for cleaning up the +// RemoteNode associated with the peer. +func (nm *NetworkManager) _handleDisconnectedPeerMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeDisconnectedPeer { + return + } + + glog.V(2).Infof("NetworkManager._handleDisconnectedPeerMessage: Handling disconnected peer message for "+ + "id=%v", origin.ID) + nm.DisconnectById(NewRemoteNodeId(origin.ID)) + // Update the persistentIpToRemoteNodeIdsMap, in case the disconnected peer was a persistent peer. + ipRemoteNodeIdMap := nm.persistentIpToRemoteNodeIdsMap.ToMap() + for ip, id := range ipRemoteNodeIdMap { + if id.ToUint64() == origin.ID { + nm.persistentIpToRemoteNodeIdsMap.Remove(ip) + } + } +} + +// _handleNewConnectionMessage is called when a new outbound or inbound connection is established. It is responsible +// for creating a RemoteNode from the connection and initiating the handshake. The incoming DeSoMessage is a control message. +func (nm *NetworkManager) _handleNewConnectionMessage(origin *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeNewConnection { + return + } + + msg, ok := desoMsg.(*MsgDeSoNewConnection) + if !ok { + return + } + + var remoteNode *RemoteNode + var err error + // We create the RemoteNode differently depending on whether the connection is inbound or outbound. + switch msg.Connection.GetConnectionType() { + case ConnectionTypeInbound: + remoteNode, err = nm.processInboundConnection(msg.Connection) + if err != nil { + glog.Errorf("NetworkManager.handleNewConnectionMessage: Problem handling inbound connection: %v", err) + nm.cleanupFailedInboundConnection(remoteNode, msg.Connection) + return + } + case ConnectionTypeOutbound: + remoteNode, err = nm.processOutboundConnection(msg.Connection) + if err != nil { + glog.Errorf("NetworkManager.handleNewConnectionMessage: Problem handling outbound connection: %v", err) + nm.cleanupFailedOutboundConnection(msg.Connection) + return + } + } + + // If we made it here, we have a valid remote node. We will now initiate the handshake. + nm.InitiateHandshake(remoteNode) +} + +// processInboundConnection is called when a new inbound connection is established. At this point, the connection is not validated, +// nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. +// Once the RemoteNode is created, we will initiate handshake. +func (nm *NetworkManager) processInboundConnection(conn Connection) (*RemoteNode, error) { + var ic *inboundConnection + var ok bool + if ic, ok = conn.(*inboundConnection); !ok { + return nil, fmt.Errorf("NetworkManager.handleInboundConnection: Connection is not an inboundConnection") + } + + // If we want to limit inbound connections to one per IP address, check to make sure this address isn't already connected. + if nm.limitOneInboundRemoteNodePerIP && + nm.isDuplicateInboundIPAddress(ic.connection.RemoteAddr()) { + + return nil, fmt.Errorf("NetworkManager.handleInboundConnection: Rejecting INBOUND peer (%s) due to "+ + "already having an inbound connection from the same IP with limit_one_inbound_connection_per_ip set", + ic.connection.RemoteAddr().String()) + } + + na, err := nm.ConvertIPStringToNetAddress(ic.connection.RemoteAddr().String()) + if err != nil { + return nil, errors.Wrapf(err, "NetworkManager.handleInboundConnection: Problem calling "+ + "ConvertIPStringToNetAddress for addr: (%s)", ic.connection.RemoteAddr().String()) + } + + remoteNode, err := nm.AttachInboundConnection(ic.connection, na) + if remoteNode == nil || err != nil { + return nil, errors.Wrapf(err, "NetworkManager.handleInboundConnection: Problem calling "+ + "AttachInboundConnection for addr: (%s)", ic.connection.RemoteAddr().String()) + } + + return remoteNode, nil +} + +// processOutboundConnection is called when a new outbound connection is established. At this point, the connection is not validated, +// nor is it assigned to a RemoteNode. This function is responsible for validating the connection and creating a RemoteNode from it. +// Once the RemoteNode is created, we will initiate handshake. +func (nm *NetworkManager) processOutboundConnection(conn Connection) (*RemoteNode, error) { + var oc *outboundConnection + var ok bool + if oc, ok = conn.(*outboundConnection); !ok { + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Connection is not an outboundConnection") + } + + if oc.failed { + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Failed to connect to peer (%s:%v)", + oc.address.IP.String(), oc.address.Port) + } + + if !oc.isPersistent { + nm.AddrMgr.Connected(oc.address) + nm.AddrMgr.Good(oc.address) + } + + // If this is a non-persistent outbound peer and the group key overlaps with another peer we're already connected to then + // abort mission. We only connect to one peer per IP group in order to prevent Sybil attacks. + if !oc.isPersistent && nm.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ + "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) + } + + na, err := nm.ConvertIPStringToNetAddress(oc.connection.RemoteAddr().String()) + if err != nil { + return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling ipToNetAddr "+ + "for addr: (%s)", oc.connection.RemoteAddr().String()) + } + + // Attach the connection before additional validation steps because it is already established. + remoteNode, err := nm.AttachOutboundConnection(oc.connection, na, oc.attemptId, oc.isPersistent) + if remoteNode == nil || err != nil { + return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling AttachOutboundConnection "+ + "for addr: (%s)", oc.connection.RemoteAddr().String()) + } + + // If this is a persistent remote node or a validator, we don't need to do any extra connection validation. + if remoteNode.IsPersistent() || remoteNode.IsExpectedValidator() { + return remoteNode, nil + } + + // If we get here, it means we're dealing with a non-persistent or non-validator remote node. We perform additional + // connection validation. + + // If the group key overlaps with another peer we're already connected to then abort mission. We only connect to + // one peer per IP group in order to prevent Sybil attacks. + if nm.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { + return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ + "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) + } + nm.cmgr.AddToGroupKey(na) + + return remoteNode, nil +} + +// cleanupFailedInboundConnection is called when an inbound connection fails to be processed. It is responsible for +// cleaning up the RemoteNode and the connection. Most of the time, the RemoteNode will be nil, but if the RemoteNode +// was successfully created, we will disconnect it. +func (nm *NetworkManager) cleanupFailedInboundConnection(remoteNode *RemoteNode, connection Connection) { + glog.V(2).Infof("NetworkManager.cleanupFailedInboundConnection: Cleaning up failed inbound connection") + if remoteNode != nil { + nm.Disconnect(remoteNode) + } + connection.Close() +} + +// cleanupFailedOutboundConnection is called when an outbound connection fails to be processed. It is responsible for +// cleaning up the RemoteNode and the connection. +func (nm *NetworkManager) cleanupFailedOutboundConnection(connection Connection) { + oc, ok := connection.(*outboundConnection) + if !ok { + return + } + glog.V(2).Infof("NetworkManager.cleanupFailedOutboundConnection: Cleaning up failed outbound connection") + + // Find the RemoteNode associated with the connection. It should almost always exist, since we create the RemoteNode + // as we're attempting to connect to the address. + id := NewRemoteNodeId(oc.attemptId) + rn := nm.GetRemoteNodeById(id) + if rn != nil { + nm.Disconnect(rn) + } + oc.Close() + nm.cmgr.RemoveAttemptedOutboundAddrs(oc.address) +} + +// ########################### +// ## Persistent Connections +// ########################### + +// refreshConnectIps is called periodically by the persistent connector. It is responsible for connecting to all +// persistent IP addresses that we are not already connected to. +func (nm *NetworkManager) refreshConnectIps() { + // Connect to addresses passed via the --connect-ips flag. These addresses are persistent in the sense that if we + // disconnect from one, we will try to reconnect to the same one. + for _, connectIp := range nm.connectIps { + if _, ok := nm.persistentIpToRemoteNodeIdsMap.Get(connectIp); ok { + continue + } + + glog.Infof("NetworkManager.initiatePersistentConnections: Connecting to connectIp: %v", connectIp) + id, err := nm.CreateNonValidatorPersistentOutboundConnection(connectIp) + if err != nil { + glog.Errorf("NetworkManager.initiatePersistentConnections: Problem connecting "+ + "to connectIp %v: %v", connectIp, err) + continue + } + + nm.persistentIpToRemoteNodeIdsMap.Set(connectIp, id) + } +} + +// ########################### +// ## Validator Connections +// ########################### + +// SetActiveValidatorsMap is called by the owner of the NetworkManager to update the activeValidatorsMap. This should +// generally be done whenever the active validators set changes. +func (nm *NetworkManager) SetActiveValidatorsMap(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { + nm.activeValidatorsMapLock.Lock() + defer nm.activeValidatorsMapLock.Unlock() + nm.activeValidatorsMap = activeValidatorsMap.Clone() + +} + +func (nm *NetworkManager) getActiveValidatorsMap() *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator] { + nm.activeValidatorsMapLock.RLock() + defer nm.activeValidatorsMapLock.RUnlock() + return nm.activeValidatorsMap.Clone() +} + +// refreshValidatorIndices re-indexes validators based on the activeValidatorsMap. It is called periodically by the +// validator connector. +func (nm *NetworkManager) refreshValidatorIndices(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { + // De-index inactive validators. We skip any checks regarding RemoteNodes connection status, nor do we verify whether + // de-indexing the validator would result in an excess number of outbound/inbound connections. Any excess connections + // will be cleaned up by the NonValidator connector. + // Note that the validator indices can change concurrently to the call below. This is fine, as the ValidatorOutboundIndex + // and ValidatorInboundIndex are concurrent maps, and here we make a copy of the map in a thread safe manner. If + // changes are made to these indices as this function is running, they will be used in the next iteration of + // refreshValidatorIndices. We first refresh the validator outbound index, and then the inbound index. + validatorOutboundMap := nm.GetValidatorOutboundIndex().ToMap() + for pk, rn := range validatorOutboundMap { + // If the validator is no longer active, de-index it. + if _, ok := activeValidatorsMap.Get(pk); !ok { + nm.SetNonValidator(rn) + nm.UnsetValidator(rn) + } + } + validatorInboundMap := nm.GetValidatorInboundIndex().ToMap() + for pk, rn := range validatorInboundMap { + // If the validator is no longer active, de-index it. + if _, ok := activeValidatorsMap.Get(pk); !ok { + nm.SetNonValidator(rn) + nm.UnsetValidator(rn) + } + } + + // Look for validators in our existing outbound / inbound connections. + allNonValidators := nm.GetAllNonValidators() + for _, rn := range allNonValidators { + // It is possible for a RemoteNode to be in the non-validator indices, and still have a public key. This can happen + // if the RemoteNode advertised support for the SFValidator service flag during handshake, and provided us + // with a public key, and a corresponding proof of possession signature. + pk := rn.GetValidatorPublicKey() + if pk == nil { + continue + } + // It is possible that through unlikely concurrence, and malevolence, two non-validators happen to have the same + // public key, which goes undetected during handshake. To prevent this from affecting the indexing of the validator + // set, we check that the non-validator's public key is not already present in the validator indices. + if rn.IsOutbound() { + if _, ok := nm.GetValidatorOutboundIndex().Get(pk.Serialize()); ok { + glog.V(2).Infof("NetworkManager.refreshValidatorIndices: Disconnecting Validator RemoteNode "+ + "(%v) has validator public key (%v) that is already present in validator index", rn, pk) + nm.Disconnect(rn) + continue + } + } else { + if _, ok := nm.GetValidatorInboundIndex().Get(pk.Serialize()); ok { + glog.V(2).Infof("NetworkManager.refreshValidatorIndices: Disconnecting Validator RemoteNode "+ + "(%v) has validator public key (%v) that is already present in validator index", rn, pk) + nm.Disconnect(rn) + continue + } + } + + // If the RemoteNode turns out to be in the validator set, index it. + if _, ok := activeValidatorsMap.Get(pk.Serialize()); ok { + nm.SetValidator(rn) + nm.UnsetNonValidator(rn) + } + } +} + +// connectValidators attempts to connect to all active validators that are not already connected. It is called +// periodically by the validator connector. +func (nm *NetworkManager) connectValidators(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { + // Look through the active validators and connect to any that we're not already connected to. + if nm.keystore == nil { + return + } + + validators := activeValidatorsMap.ToMap() + for pk, validator := range validators { + // Check if we've already dialed an outbound connection to this validator. + // It's worth noting that we look up the outbound index, instead of looking up a union of the outbound and + // inbound indices. This is because we want to allow nodes to create circular outbound/inbound validator + // connections with one another. Therefore, we only check the outbound index to see if we've already dialed + // this validator before ourselves. + _, exists := nm.GetValidatorOutboundIndex().Get(pk) + // If we're already connected to the validator, continue. + if exists { + continue + } + // If the validator is our node, continue. + if nm.keystore.GetSigner().GetPublicKey().Serialize() == pk { + continue + } + + publicKey, err := pk.Deserialize() + if err != nil { + continue + } + + // For now, we only dial the first domain in the validator's domain list. + if len(validator.GetDomains()) == 0 { + continue + } + + // Choose a random domain from the validator's domain list. + randDomain, err := collections.RandomElement(validator.GetDomains()) + if err != nil { + glog.V(2).Infof("NetworkManager.connectValidators: Problem getting random domain for "+ + "validator (pk= %v): (error= %v)", validator.GetPublicKey().Serialize(), err) + continue + } + if err := nm.CreateValidatorConnection(string(randDomain), publicKey); err != nil { + glog.V(2).Infof("NetworkManager.connectValidators: Problem connecting to validator %v: %v", + string(randDomain), err) + continue + } + } +} + +// ########################### +// ## NonValidator Connections +// ########################### + +// refreshNonValidatorOutboundIndex is called periodically by the NonValidator connector. It is responsible for +// disconnecting excess outbound remote nodes. +func (nm *NetworkManager) refreshNonValidatorOutboundIndex() { + // There are three categories of outbound remote nodes: attempted, connected, and persistent. All of these + // remote nodes are stored in the same non-validator outbound index. We want to disconnect excess remote nodes that + // are not persistent, starting with the attempted nodes first. + + // First let's run a quick check to see if the number of our non-validator remote nodes exceeds our target. Note that + // this number will include the persistent nodes. + numOutboundRemoteNodes := uint32(nm.GetNonValidatorOutboundIndex().Count()) + if numOutboundRemoteNodes <= nm.targetNonValidatorOutboundRemoteNodes { + return + } + + // If we get here, it means that we should potentially disconnect some remote nodes. Let's first separate the + // attempted and connected remote nodes, ignoring the persistent ones. + allOutboundRemoteNodes := nm.GetNonValidatorOutboundIndex().GetAll() + var attemptedOutboundRemoteNodes, connectedOutboundRemoteNodes []*RemoteNode + for _, rn := range allOutboundRemoteNodes { + if rn.IsPersistent() || rn.IsExpectedValidator() { + // We do nothing for persistent remote nodes or expected validators. + continue + } else if rn.IsHandshakeCompleted() { + connectedOutboundRemoteNodes = append(connectedOutboundRemoteNodes, rn) + } else { + attemptedOutboundRemoteNodes = append(attemptedOutboundRemoteNodes, rn) + } + } + + // Having separated the attempted and connected remote nodes, we can now find the actual number of attempted and + // connected remote nodes. We can then find out how many remote nodes we need to disconnect. + numOutboundRemoteNodes = uint32(len(attemptedOutboundRemoteNodes) + len(connectedOutboundRemoteNodes)) + excessiveOutboundRemoteNodes := uint32(0) + if numOutboundRemoteNodes > nm.targetNonValidatorOutboundRemoteNodes { + excessiveOutboundRemoteNodes = numOutboundRemoteNodes - nm.targetNonValidatorOutboundRemoteNodes + } + + // First disconnect the attempted remote nodes. + for _, rn := range attemptedOutboundRemoteNodes { + if excessiveOutboundRemoteNodes == 0 { + break + } + glog.V(2).Infof("NetworkManager.refreshNonValidatorOutboundIndex: Disconnecting attempted remote "+ + "node (id=%v) due to excess outbound RemoteNodes", rn.GetId()) + nm.Disconnect(rn) + excessiveOutboundRemoteNodes-- + } + // Now disconnect the connected remote nodes, if we still have too many remote nodes. + for _, rn := range connectedOutboundRemoteNodes { + if excessiveOutboundRemoteNodes == 0 { + break + } + glog.V(2).Infof("NetworkManager.refreshNonValidatorOutboundIndex: Disconnecting connected remote "+ + "node (id=%v) due to excess outbound RemoteNodes", rn.GetId()) + nm.Disconnect(rn) + excessiveOutboundRemoteNodes-- + } +} + +// refreshNonValidatorInboundIndex is called periodically by the non-validator connector. It is responsible for +// disconnecting excess inbound remote nodes. +func (nm *NetworkManager) refreshNonValidatorInboundIndex() { + // First let's check if we have an excess number of inbound remote nodes. If we do, we'll disconnect some of them. + numConnectedInboundRemoteNodes := uint32(nm.GetNonValidatorInboundIndex().Count()) + if numConnectedInboundRemoteNodes <= nm.targetNonValidatorInboundRemoteNodes { + return + } + + // Disconnect random inbound non-validators if we have too many of them. + inboundRemoteNodes := nm.GetNonValidatorInboundIndex().GetAll() + var connectedInboundRemoteNodes []*RemoteNode + for _, rn := range inboundRemoteNodes { + // We only want to disconnect remote nodes that have completed handshake. RemoteNodes that don't have the + // handshake completed status could be validators, in which case we don't want to disconnect them. It is also + // possible that the RemoteNodes without completed handshake will end up never finishing it, in which case + // they will be removed by the cleanup goroutine, once the handshake timeout is reached. + if rn.IsHandshakeCompleted() { + connectedInboundRemoteNodes = append(connectedInboundRemoteNodes, rn) + } + } + + // Having separated the connected remote nodes, we can now find the actual number of connected inbound remote nodes + // that have completed handshake. We can then find out how many remote nodes we need to disconnect. + numConnectedInboundRemoteNodes = uint32(len(connectedInboundRemoteNodes)) + excessiveInboundRemoteNodes := uint32(0) + if numConnectedInboundRemoteNodes > nm.targetNonValidatorInboundRemoteNodes { + excessiveInboundRemoteNodes = numConnectedInboundRemoteNodes - nm.targetNonValidatorInboundRemoteNodes + } + for _, rn := range connectedInboundRemoteNodes { + if excessiveInboundRemoteNodes == 0 { + break + } + glog.V(2).Infof("NetworkManager.refreshNonValidatorInboundIndex: Disconnecting inbound remote "+ + "node (id=%v) due to excess inbound RemoteNodes", rn.GetId()) + nm.Disconnect(rn) + excessiveInboundRemoteNodes-- + } +} + +// connectNonValidators attempts to connect to new outbound nonValidator remote nodes. It is called periodically by the +// nonValidator connector. +func (nm *NetworkManager) connectNonValidators() { + // If the NetworkManager is configured with a list of connectIps, then we don't need to connect to any + // non-validators using the address manager. We will only connect to the connectIps, and potentially validators. + if len(nm.connectIps) != 0 { + return + } + + // First, find all nonValidator outbound remote nodes that are not persistent. + allOutboundRemoteNodes := nm.GetNonValidatorOutboundIndex().GetAll() + var nonValidatorOutboundRemoteNodes []*RemoteNode + for _, rn := range allOutboundRemoteNodes { + if rn.IsPersistent() || rn.IsExpectedValidator() { + // We do nothing for persistent remote nodes or expected validators. + continue + } else { + nonValidatorOutboundRemoteNodes = append(nonValidatorOutboundRemoteNodes, rn) + } + } + // Now find the number of nonValidator, non-persistent outbound remote nodes. + numOutboundRemoteNodes := uint32(len(nonValidatorOutboundRemoteNodes)) + remainingOutboundRemoteNodes := uint32(0) + // Check if we need to connect to more nonValidator outbound remote nodes. + if numOutboundRemoteNodes < nm.targetNonValidatorOutboundRemoteNodes { + remainingOutboundRemoteNodes = nm.targetNonValidatorOutboundRemoteNodes - numOutboundRemoteNodes + } + for ii := uint32(0); ii < remainingOutboundRemoteNodes; ii++ { + // Get a random unconnected address from the address manager. If we can't find one, we break out of the loop. + addr := nm.getRandomUnconnectedAddress() + if addr == nil { + break + } + // Attempt to connect to the address. + nm.AddrMgr.Attempt(addr) + if err := nm.createNonValidatorOutboundConnection(addr); err != nil { + glog.V(2).Infof("NetworkManager.connectNonValidators: Problem creating non-validator outbound "+ + "connection to addr: %v; err: %v", addr, err) + } + } +} + +// getRandomUnconnectedAddress returns a random address from the address manager that we are not already connected to. +func (nm *NetworkManager) getRandomUnconnectedAddress() *wire.NetAddress { + for tries := 0; tries < 100; tries++ { + addr := nm.AddrMgr.GetAddress() + if addr == nil { + break + } + + if nm.cmgr.IsConnectedOutboundIpAddress(addr.NetAddress()) { + continue + } + + if nm.cmgr.IsAttemptedOutboundIpAddress(addr.NetAddress()) { + continue + } + + // We can only have one outbound address per /16. This is similar to + // Bitcoin and we do it to prevent Sybil attacks. + if nm.cmgr.IsFromRedundantOutboundIPAddress(addr.NetAddress()) { + continue + } + + return addr.NetAddress() + } + + return nil +} + +// ########################### +// ## Create RemoteNode Functions +// ########################### + +func (nm *NetworkManager) CreateValidatorConnection(ipStr string, publicKey *bls.PublicKey) error { + netAddr, err := nm.ConvertIPStringToNetAddress(ipStr) + if err != nil { + return err + } + if netAddr == nil || publicKey == nil { + return fmt.Errorf("NetworkManager.CreateValidatorConnection: netAddr or public key is nil") + } + + // Check if we've already dialed an outbound connection to this validator. + if _, ok := nm.GetValidatorOutboundIndex().Get(publicKey.Serialize()); ok { + return fmt.Errorf("NetworkManager.CreateValidatorConnection: RemoteNode already exists for public key: %v", publicKey) + } + + remoteNode := nm.newRemoteNode(publicKey, false) + if err := remoteNode.DialOutboundConnection(netAddr); err != nil { + return errors.Wrapf(err, "NetworkManager.CreateValidatorConnection: Problem calling DialPersistentOutboundConnection "+ + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + } + nm.setRemoteNode(remoteNode) + // Since we're initiating this connection, add the RemoteNode to the outbound validator index. + nm.GetValidatorOutboundIndex().Set(publicKey.Serialize(), remoteNode) + return nil +} + +func (nm *NetworkManager) CreateNonValidatorPersistentOutboundConnection(ipStr string) (RemoteNodeId, error) { + netAddr, err := nm.ConvertIPStringToNetAddress(ipStr) + if err != nil { + return 0, err + } + if netAddr == nil { + return 0, fmt.Errorf("NetworkManager.CreateNonValidatorPersistentOutboundConnection: netAddr is nil") + } + + remoteNode := nm.newRemoteNode(nil, true) + if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { + return 0, errors.Wrapf(err, "NetworkManager.CreateNonValidatorPersistentOutboundConnection: Problem calling DialPersistentOutboundConnection "+ + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + } + nm.setRemoteNode(remoteNode) + nm.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) + return remoteNode.GetId(), nil +} + +func (nm *NetworkManager) CreateNonValidatorOutboundConnection(ipStr string) error { + netAddr, err := nm.ConvertIPStringToNetAddress(ipStr) + if err != nil { + return err + } + return nm.createNonValidatorOutboundConnection(netAddr) +} + +func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.NetAddress) error { + if netAddr == nil { + return fmt.Errorf("NetworkManager.CreateNonValidatorOutboundConnection: netAddr is nil") + } + + remoteNode := nm.newRemoteNode(nil, false) + if err := remoteNode.DialOutboundConnection(netAddr); err != nil { + return errors.Wrapf(err, "NetworkManager.CreateNonValidatorOutboundConnection: Problem calling DialOutboundConnection "+ + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + } + nm.setRemoteNode(remoteNode) + nm.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) + return nil +} + +func (nm *NetworkManager) AttachInboundConnection(conn net.Conn, + na *wire.NetAddress) (*RemoteNode, error) { + + remoteNode := nm.newRemoteNode(nil, false) + if err := remoteNode.AttachInboundConnection(conn, na); err != nil { + return remoteNode, errors.Wrapf(err, "NetworkManager.AttachInboundConnection: Problem calling AttachInboundConnection "+ + "for addr: (%s)", conn.RemoteAddr().String()) + } + + nm.setRemoteNode(remoteNode) + nm.GetNonValidatorInboundIndex().Set(remoteNode.GetId(), remoteNode) + return remoteNode, nil +} + +func (nm *NetworkManager) AttachOutboundConnection(conn net.Conn, na *wire.NetAddress, + remoteNodeId uint64, isPersistent bool) (*RemoteNode, error) { + + id := NewRemoteNodeId(remoteNodeId) + remoteNode := nm.GetRemoteNodeById(id) + if remoteNode == nil { + return nil, fmt.Errorf("NetworkManager.AttachOutboundConnection: Problem getting remote node by id (%d)", + id.ToUint64()) + } + + if err := remoteNode.AttachOutboundConnection(conn, na, isPersistent); err != nil { + nm.Disconnect(remoteNode) + return nil, errors.Wrapf(err, "NetworkManager.AttachOutboundConnection: Problem calling AttachOutboundConnection "+ + "for addr: (%s). Disconnecting remote node (id=%v)", conn.RemoteAddr().String(), remoteNode.GetId()) + } + + return remoteNode, nil +} + +// ########################### +// ## RemoteNode Management +// ########################### + +func (nm *NetworkManager) DisconnectAll() { + allRemoteNodes := nm.GetAllRemoteNodes().GetAll() + for _, rn := range allRemoteNodes { + glog.V(2).Infof("NetworkManager.DisconnectAll: Disconnecting from remote node (id=%v)", rn.GetId()) + nm.Disconnect(rn) + } +} + +func (nm *NetworkManager) newRemoteNode(validatorPublicKey *bls.PublicKey, isPersistent bool) *RemoteNode { + id := atomic.AddUint64(&nm.remoteNodeNextId, 1) + remoteNodeId := NewRemoteNodeId(id) + latestBlockHeight := uint64(nm.bc.BlockTip().Height) + return NewRemoteNode(remoteNodeId, validatorPublicKey, isPersistent, nm.srv, nm.cmgr, nm.keystore, + nm.params, nm.minTxFeeRateNanosPerKB, latestBlockHeight, nm.nodeServices) +} + +func (nm *NetworkManager) ProcessCompletedHandshake(remoteNode *RemoteNode) { + if remoteNode == nil { + return + } + + if remoteNode.IsValidator() { + nm.SetValidator(remoteNode) + nm.UnsetNonValidator(remoteNode) + } else { + nm.UnsetValidator(remoteNode) + nm.SetNonValidator(remoteNode) + } + nm.srv.HandleAcceptedPeer(remoteNode) + nm.srv.maybeRequestAddresses(remoteNode) +} + +func (nm *NetworkManager) Disconnect(rn *RemoteNode) { + if rn == nil { + return + } + glog.V(2).Infof("NetworkManager.Disconnect: Disconnecting from remote node id=%v", rn.GetId()) + rn.Disconnect() + nm.removeRemoteNodeFromIndexer(rn) +} + +func (nm *NetworkManager) DisconnectById(id RemoteNodeId) { + rn := nm.GetRemoteNodeById(id) + if rn == nil { + return + } + + nm.Disconnect(rn) +} + +func (nm *NetworkManager) SendMessage(rn *RemoteNode, desoMessage DeSoMessage) error { + if rn == nil { + return fmt.Errorf("NetworkManager.SendMessage: RemoteNode is nil") + } + + return rn.SendMessage(desoMessage) +} + +func (nm *NetworkManager) removeRemoteNodeFromIndexer(rn *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if rn == nil { + return + } + + nm.GetAllRemoteNodes().Remove(rn.GetId()) + nm.GetNonValidatorOutboundIndex().Remove(rn.GetId()) + nm.GetNonValidatorInboundIndex().Remove(rn.GetId()) + + // Try to evict the remote node from the validator index. If the remote node is not a validator, then there is nothing to do. + if rn.GetValidatorPublicKey() == nil { + return + } + // Only remove from the validator index if the fetched remote node is the same as the one we are trying to remove. + // Otherwise, we could have a fun edge-case where a duplicated validator connection ends up removing an + // existing validator connection from the index. + // First handle the outbound RemoteNode case. + if rn.IsOutbound() { + fetchedRn, ok := nm.GetValidatorOutboundIndex().Get(rn.GetValidatorPublicKey().Serialize()) + if ok && fetchedRn.GetId() == rn.GetId() { + nm.GetValidatorOutboundIndex().Remove(rn.GetValidatorPublicKey().Serialize()) + } + return + } + + // If the node is inbound, perform a similar check. + fetchedRn, ok := nm.GetValidatorInboundIndex().Get(rn.GetValidatorPublicKey().Serialize()) + if ok && fetchedRn.GetId() == rn.GetId() { + nm.GetValidatorInboundIndex().Remove(rn.GetValidatorPublicKey().Serialize()) + } +} + +func (nm *NetworkManager) Cleanup() { + allRemoteNodes := nm.GetAllRemoteNodes().GetAll() + for _, rn := range allRemoteNodes { + if rn.IsTimedOut() { + glog.V(2).Infof("NetworkManager.Cleanup: Disconnecting from remote node (id=%v)", rn.GetId()) + nm.Disconnect(rn) + } + } +} + +// ########################### +// ## RemoteNode Setters +// ########################### + +func (nm *NetworkManager) setRemoteNode(rn *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if rn == nil || rn.IsTerminated() { + return + } + + nm.GetAllRemoteNodes().Set(rn.GetId(), rn) +} + +func (nm *NetworkManager) SetNonValidator(rn *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if rn == nil || rn.IsTerminated() { + return + } + + if rn.IsOutbound() { + nm.GetNonValidatorOutboundIndex().Set(rn.GetId(), rn) + } else { + nm.GetNonValidatorInboundIndex().Set(rn.GetId(), rn) + } +} + +func (nm *NetworkManager) SetValidator(remoteNode *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if remoteNode == nil || remoteNode.IsTerminated() { + return + } + + pk := remoteNode.GetValidatorPublicKey() + if pk == nil { + return + } + + if remoteNode.IsOutbound() { + nm.GetValidatorOutboundIndex().Set(pk.Serialize(), remoteNode) + } else { + nm.GetValidatorInboundIndex().Set(pk.Serialize(), remoteNode) + } +} + +func (nm *NetworkManager) UnsetValidator(remoteNode *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if remoteNode == nil || remoteNode.IsTerminated() { + return + } + + pk := remoteNode.GetValidatorPublicKey() + if pk == nil { + return + } + + if remoteNode.IsOutbound() { + nm.GetValidatorOutboundIndex().Remove(pk.Serialize()) + } else { + nm.GetValidatorInboundIndex().Remove(pk.Serialize()) + } +} + +func (nm *NetworkManager) UnsetNonValidator(rn *RemoteNode) { + nm.mtx.Lock() + defer nm.mtx.Unlock() + + if rn == nil || rn.IsTerminated() { + return + } + + if rn.IsOutbound() { + nm.GetNonValidatorOutboundIndex().Remove(rn.GetId()) + } else { + nm.GetNonValidatorInboundIndex().Remove(rn.GetId()) + } +} + +// ########################### +// ## RemoteNode Getters +// ########################### + +func (nm *NetworkManager) GetAllRemoteNodes() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return nm.AllRemoteNodes +} + +func (nm *NetworkManager) GetValidatorOutboundIndex() *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] { + return nm.ValidatorOutboundIndex +} + +func (nm *NetworkManager) GetValidatorInboundIndex() *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] { + return nm.ValidatorInboundIndex +} + +func (nm *NetworkManager) GetNonValidatorOutboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return nm.NonValidatorOutboundIndex +} + +func (nm *NetworkManager) GetNonValidatorInboundIndex() *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] { + return nm.NonValidatorInboundIndex +} + +func (nm *NetworkManager) GetRemoteNodeFromPeer(peer *Peer) *RemoteNode { + if peer == nil { + return nil + } + id := NewRemoteNodeId(peer.GetId()) + rn, _ := nm.GetAllRemoteNodes().Get(id) + return rn +} + +func (nm *NetworkManager) GetRemoteNodeById(id RemoteNodeId) *RemoteNode { + rn, ok := nm.GetAllRemoteNodes().Get(id) + if !ok { + return nil + } + return rn +} + +// GetAllValidators returns a map of all currently connected validators with unique public keys. If there is an +// inbound and an outbound RemoteNode with the same public key, only the outbound RemoteNode is returned in the output. +// This is because the outbound RemoteNode is the one that we initiated, so it's considered more reliable. +// The returned RemoteNodes can be in any state, not necessarily having completed the handshake. +func (nm *NetworkManager) GetAllValidators() *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] { + allValidators := collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode]() + outboundValidatorsMap := nm.GetValidatorOutboundIndex().ToMap() + inboundValidatorsMap := nm.GetValidatorInboundIndex().ToMap() + + for pk, rn := range outboundValidatorsMap { + allValidators.Set(pk, rn) + } + for pk, rn := range inboundValidatorsMap { + // If the validator is not in the outbound index, we add it to the list of all validators. + // This de-duplicates circular validator connections, which are allowed by the protocol. + if _, ok := outboundValidatorsMap[pk]; !ok { + allValidators.Set(pk, rn) + } + } + return allValidators +} + +// GetConnectedValidators returns a list of all connected validators that have passed handshake. It is a sub-view of +// the GetAllValidators map, with only the validators that have HandshakeCompleted status. +func (nm *NetworkManager) GetConnectedValidators() []*RemoteNode { + var connectedValidators []*RemoteNode + allValidators := nm.GetAllValidators().ToMap() + for _, rn := range allValidators { + // Filter out validators that have not completed the handshake. + if rn.IsHandshakeCompleted() { + connectedValidators = append(connectedValidators, rn) + } + } + return connectedValidators +} + +func (nm *NetworkManager) GetAllNonValidators() []*RemoteNode { + outboundRemoteNodes := nm.GetNonValidatorOutboundIndex().GetAll() + inboundRemoteNodes := nm.GetNonValidatorInboundIndex().GetAll() + return append(outboundRemoteNodes, inboundRemoteNodes...) +} + +// ########################### +// ## RemoteNode Handshake +// ########################### + +// InitiateHandshake kicks off handshake with a remote node. +func (nm *NetworkManager) InitiateHandshake(rn *RemoteNode) { + nonce := uint64(RandInt64(math.MaxInt64)) + if err := rn.InitiateHandshake(nonce); err != nil { + glog.Errorf("NetworkManager.InitiateHandshake: Error initiating handshake: %v", err) + nm.Disconnect(rn) + } + nm.usedNonces.Add(nonce) +} + +// handleHandshakeComplete is called on a completed handshake with a RemoteNodes. +func (nm *NetworkManager) handleHandshakeComplete(remoteNode *RemoteNode) { + // Prevent race conditions while handling handshake complete messages. + nm.mtxHandshakeComplete.Lock() + defer nm.mtxHandshakeComplete.Unlock() + + // Get the handshake information of this peer. + if remoteNode == nil { + return + } + + if remoteNode.GetNegotiatedProtocolVersion().Before(ProtocolVersion2) { + nm.ProcessCompletedHandshake(remoteNode) + return + } + + if err := nm.handleHandshakeCompletePoSMessage(remoteNode); err != nil { + glog.Errorf("NetworkManager.handleHandshakeComplete: Error handling PoS handshake peer message: %v, "+ + "remoteNodePk (%s)", err, remoteNode.GetValidatorPublicKey().Serialize()) + nm.Disconnect(remoteNode) + return + } + nm.ProcessCompletedHandshake(remoteNode) +} + +func (nm *NetworkManager) handleHandshakeCompletePoSMessage(remoteNode *RemoteNode) error { + + validatorPk := remoteNode.GetValidatorPublicKey() + // If the remote node is not a potential validator, we don't need to do anything. + if validatorPk == nil { + return nil + } + + // For inbound RemoteNodes, we should ensure that there isn't an existing validator connected with the same public key. + // Inbound nodes are not initiated by us, so we shouldn't have added the RemoteNode to the ValidatorInboundIndex yet. + if remoteNode.IsInbound() { + _, ok := nm.GetValidatorInboundIndex().Get(validatorPk.Serialize()) + if ok { + return fmt.Errorf("NetworkManager.handleHandshakeCompletePoSMessage: Inbound RemoteNode with duplicate validator public key") + } + return nil + } + // For outbound RemoteNodes, we have two possible scenarios. Either the RemoteNode has been initiated as a validator, + // in which case it should already be in the ValidatorOutboundIndex. Or the RemoteNode has been initiated as a regular node, + // in which case it should not be in the ValidatorOutboundIndex, but in the NonValidatorOutboundIndex. So to ensure there is + // no duplicate connection with the same public key, we only check whether there is a validator in the ValidatorOutboundIndex + // with the RemoteNode's public key. If there is one, we want to ensure that these two RemoteNodes have identical ids. + // Lookup the validator in the ValidatorOutboundIndex with the same public key. + existingValidator, ok := nm.GetValidatorOutboundIndex().Get(validatorPk.Serialize()) + if ok && remoteNode.GetId() != existingValidator.GetId() { + if remoteNode.IsPersistent() && !existingValidator.IsPersistent() { + nm.Disconnect(existingValidator) + return nil + } + return fmt.Errorf("NetworkManager.handleHandshakeCompletePoSMessage: Outbound RemoteNode with duplicate validator public key. "+ + "Existing validator id: %v, new validator id: %v", existingValidator.GetId().ToUint64(), remoteNode.GetId().ToUint64()) + } + return nil +} + +// ########################### +// ## Helper Functions +// ########################### + +func (nm *NetworkManager) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAddress, error) { + netAddr, err := IPToNetAddr(ipStr, nm.AddrMgr, nm.params) + if err != nil { + return nil, errors.Wrapf(err, + "NetworkManager.ConvertIPStringToNetAddress: Problem parsing "+ + "ipString to wire.NetAddress") + } + if netAddr == nil { + return nil, fmt.Errorf("NetworkManager.ConvertIPStringToNetAddress: " + + "address was nil after parsing") + } + return netAddr, nil +} + +func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) (*wire.NetAddress, error) { + port := params.DefaultSocketPort + host, portstr, err := net.SplitHostPort(ipStr) + if err != nil { + // No port specified so leave port=default and set + // host to the ipStr. + host = ipStr + } else { + pp, err := strconv.ParseUint(portstr, 10, 16) + if err != nil { + return nil, errors.Wrapf(err, "IPToNetAddr: Can not parse port from %s for ip", ipStr) + } + port = uint16(pp) + } + netAddr, err := addrMgr.HostToNetAddress(host, port, 0) + if err != nil { + return nil, errors.Wrapf(err, "IPToNetAddr: Can not parse port from %s for ip", ipStr) + } + return netAddr, nil +} + +func (nm *NetworkManager) isDuplicateInboundIPAddress(addr net.Addr) bool { + netAddr, err := IPToNetAddr(addr.String(), nm.AddrMgr, nm.params) + if err != nil { + // Return true in case we have an error. We do this because it + // will result in the peer connection not being accepted, which + // is desired in this case. + glog.Warningf(errors.Wrapf(err, + "NetworkManager.isDuplicateInboundIPAddress: Problem parsing "+ + "net.Addr to wire.NetAddress so marking as redundant and not "+ + "making connection").Error()) + return true + } + if netAddr == nil { + glog.Warningf("NetworkManager.isDuplicateInboundIPAddress: " + + "address was nil after parsing so marking as redundant and not " + + "making connection") + return true + } + + return nm.cmgr.IsDuplicateInboundIPAddress(netAddr) +} diff --git a/lib/network_test.go b/lib/network_test.go index 85dc3d85e..c627f4074 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -1,10 +1,10 @@ -//go:build relic - package lib import ( "bytes" "encoding/hex" + "github.com/deso-protocol/core/bls" + "golang.org/x/crypto/sha3" "math/big" "math/rand" "reflect" @@ -41,7 +41,7 @@ var expectedVer = &MsgDeSoVersion{ TstampSecs: 2, Nonce: uint64(0xffffffffffffffff), UserAgent: "abcdef", - StartBlockHeight: 4, + LatestBlockHeight: 4, MinFeeRateNanosPerKB: 10, } @@ -68,7 +68,7 @@ func TestVersionConversion(t *testing.T) { "works, add the new field to the test case, and fix this error.") } -func TestVerack(t *testing.T) { +func TestVerackV0(t *testing.T) { assert := assert.New(t) require := require.New(t) _ = assert @@ -78,13 +78,51 @@ func TestVerack(t *testing.T) { var buf bytes.Buffer nonce := uint64(12345678910) - _, err := WriteMessage(&buf, &MsgDeSoVerack{Nonce: nonce}, networkType) + _, err := WriteMessage(&buf, &MsgDeSoVerack{Version: VerackVersion0, NonceReceived: nonce}, networkType) require.NoError(err) verBytes := buf.Bytes() testMsg, _, err := ReadMessage(bytes.NewReader(verBytes), networkType) require.NoError(err) - require.Equal(&MsgDeSoVerack{Nonce: nonce}, testMsg) + require.Equal(&MsgDeSoVerack{Version: VerackVersion0, NonceReceived: nonce}, testMsg) +} + +func TestVerackV1(t *testing.T) { + require := require.New(t) + + networkType := NetworkType_MAINNET + var buf1, buf2 bytes.Buffer + + nonceReceived := uint64(12345678910) + nonceSent := nonceReceived + 1 + tstamp := uint64(2345678910) + // First, test that nil public key and signature are allowed. + msg := &MsgDeSoVerack{ + Version: VerackVersion1, + NonceReceived: nonceReceived, + NonceSent: nonceSent, + TstampMicro: tstamp, + PublicKey: nil, + Signature: nil, + } + _, err := WriteMessage(&buf1, msg, networkType) + require.NoError(err) + payload := append(UintToBuf(nonceReceived), UintToBuf(nonceSent)...) + payload = append(payload, UintToBuf(tstamp)...) + hash := sha3.Sum256(payload) + + priv, err := bls.NewPrivateKey() + require.NoError(err) + msg.PublicKey = priv.PublicKey() + msg.Signature, err = priv.Sign(hash[:]) + require.NoError(err) + _, err = WriteMessage(&buf2, msg, networkType) + require.NoError(err) + + verBytes := buf2.Bytes() + testMsg, _, err := ReadMessage(bytes.NewReader(verBytes), networkType) + require.NoError(err) + require.Equal(msg, testMsg) } // Creates fully formatted a PoS block header with random signatures diff --git a/lib/peer.go b/lib/peer.go index 780f72f62..232fcb3f0 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -2,14 +2,13 @@ package lib import ( "fmt" - "math" + "github.com/decred/dcrd/lru" "net" "sort" + "sync" "sync/atomic" "time" - "github.com/decred/dcrd/lru" - "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" @@ -49,7 +48,6 @@ type Peer struct { StatsMtx deadlock.RWMutex TimeOffsetSecs int64 TimeConnected time.Time - startingHeight uint32 ID uint64 // Ping-related fields. LastPingNonce uint64 @@ -64,36 +62,17 @@ type Peer struct { stallTimeoutSeconds uint64 Params *DeSoParams MessageChan chan *ServerMessage - // A hack to make it so that we can allow an API endpoint to manually - // delete a peer. - PeerManuallyRemovedFromConnectionManager bool - - // In order to complete a version negotiation successfully, the peer must - // reply to the initial version message we send them with a verack message - // containing the nonce from that initial version message. This ensures that - // the peer's IP isn't being spoofed since the only way to actually produce - // a verack with the appropriate response is to actually own the IP that - // the peer claims it has. As such, we maintain the version nonce we sent - // the peer and the version nonce they sent us here. - // - // TODO: The way we synchronize the version nonce is currently a bit - // messy; ideally we could do it without keeping global state. - VersionNonceSent uint64 - VersionNonceReceived uint64 // A pointer to the Server srv *Server // Basic state. - PeerInfoMtx deadlock.Mutex - serviceFlags ServiceFlag - addrStr string - netAddr *wire.NetAddress - userAgent string - advertisedProtocolVersion uint64 - negotiatedProtocolVersion uint64 - VersionNegotiated bool - minTxFeeRateNanosPerKB uint64 + PeerInfoMtx deadlock.Mutex + serviceFlags ServiceFlag + latestHeight uint64 + addrStr string + netAddr *wire.NetAddress + minTxFeeRateNanosPerKB uint64 // Messages for which we are expecting a reply within a fixed // amount of time. This list is always sorted by ExpectedTime, // with the item having the earliest time at the front. @@ -104,7 +83,8 @@ type Peer struct { knownAddressesMap map[string]bool // Output queue for messages that need to be sent to the peer. - outputQueueChan chan DeSoMessage + outputQueueChan chan DeSoMessage + peerDisconnectedChan chan *Peer // Set to zero until Disconnect has been called on the Peer. Used to make it // so that the logic in Disconnect will only be executed once. @@ -143,6 +123,13 @@ type Peer struct { // SyncType indicates whether blocksync should not be requested for this peer. If set to true // then we'll only hypersync from this peer. syncType NodeSyncType + + // startGroup ensures that all the Peer's go routines are started when we call Start(). + startGroup sync.WaitGroup +} + +func (pp *Peer) GetId() uint64 { + return pp.ID } func (pp *Peer) AddDeSoMessage(desoMessage DeSoMessage, inbound bool) { @@ -551,6 +538,7 @@ func (pp *Peer) cleanupMessageProcessor() { } func (pp *Peer) StartDeSoMessageProcessor() { + pp.startGroup.Done() glog.Infof("StartDeSoMessageProcessor: Starting for peer %v", pp) for { if pp.disconnected != 0 { @@ -614,15 +602,17 @@ func (pp *Peer) StartDeSoMessageProcessor() { } // NewPeer creates a new Peer object. -func NewPeer(_conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress, +func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress, _isPersistent bool, _stallTimeoutSeconds uint64, _minFeeRateNanosPerKB uint64, params *DeSoParams, messageChan chan *ServerMessage, _cmgr *ConnectionManager, _srv *Server, - _syncType NodeSyncType) *Peer { + _syncType NodeSyncType, + peerDisconnectedChan chan *Peer) *Peer { pp := Peer{ + ID: _id, cmgr: _cmgr, srv: _srv, Conn: _conn, @@ -631,6 +621,7 @@ func NewPeer(_conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress, isOutbound: _isOutbound, isPersistent: _isPersistent, outputQueueChan: make(chan DeSoMessage), + peerDisconnectedChan: peerDisconnectedChan, quit: make(chan interface{}), knownInventory: lru.NewCache(maxKnownInventory), blocksToSend: make(map[BlockHash]bool), @@ -642,9 +633,6 @@ func NewPeer(_conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress, requestedBlocks: make(map[BlockHash]bool), syncType: _syncType, } - if _cmgr != nil { - pp.ID = atomic.AddUint64(&_cmgr.peerIndex, 1) - } // TODO: Before, we would give each Peer its own Logger object. Now we // have a much better way of debugging which is that we include a nonce @@ -679,10 +667,10 @@ func (pp *Peer) MinFeeRateNanosPerKB() uint64 { } // StartingBlockHeight is the height of the peer's blockchain tip. -func (pp *Peer) StartingBlockHeight() uint32 { +func (pp *Peer) StartingBlockHeight() uint64 { pp.StatsMtx.RLock() defer pp.StatsMtx.RUnlock() - return pp.startingHeight + return pp.latestHeight } // NumBlocksToSend is the number of blocks the Peer has requested from @@ -738,6 +726,7 @@ func (pp *Peer) HandlePongMsg(msg *MsgDeSoPong) { } func (pp *Peer) PingHandler() { + pp.startGroup.Done() glog.V(1).Infof("Peer.PingHandler: Starting ping handler for Peer %v", pp) pingTicker := time.NewTicker(pingInterval) defer pingTicker.Stop() @@ -787,6 +776,10 @@ func (pp *Peer) Address() string { return pp.addrStr } +func (pp *Peer) NetAddress() *wire.NetAddress { + return pp.netAddr +} + func (pp *Peer) IP() string { return pp.netAddr.IP.String() } @@ -799,6 +792,10 @@ func (pp *Peer) IsOutbound() bool { return pp.isOutbound } +func (pp *Peer) IsPersistent() bool { + return pp.isPersistent +} + func (pp *Peer) QueueMessage(desoMessage DeSoMessage) { // If the peer is disconnected, don't queue anything. if !pp.Connected() { @@ -898,7 +895,22 @@ func (pp *Peer) _setKnownAddressesMap(key string, val bool) { pp.knownAddressesMap[key] = val } +func (pp *Peer) SetLatestBlockHeight(height uint64) { + pp.StatsMtx.Lock() + defer pp.StatsMtx.Unlock() + + pp.latestHeight = height +} + +func (pp *Peer) SetServiceFlag(sf ServiceFlag) { + pp.PeerInfoMtx.Lock() + defer pp.PeerInfoMtx.Unlock() + + pp.serviceFlags = sf +} + func (pp *Peer) outHandler() { + pp.startGroup.Done() glog.V(1).Infof("Peer.outHandler: Starting outHandler for Peer %v", pp) stallTicker := time.NewTicker(time.Second) out: @@ -1078,6 +1090,7 @@ func (pp *Peer) _handleInExpectedResponse(rmsg DeSoMessage) error { // inHandler handles all incoming messages for the peer. It must be run as a // goroutine. func (pp *Peer) inHandler() { + pp.startGroup.Done() glog.V(1).Infof("Peer.inHandler: Starting inHandler for Peer %v", pp) // The timer is stopped when a new message is received and reset after it @@ -1134,20 +1147,6 @@ out: // This switch actually processes the message. For most messages, we just // pass them onto the Server. switch msg := rmsg.(type) { - case *MsgDeSoVersion: - // We always receive the VERSION from the Peer before starting this select - // statement, so getting one here is an error. - - glog.Errorf("Peer.inHandler: Already received 'version' from peer %v -- disconnecting", pp) - break out - - case *MsgDeSoVerack: - // We always receive the VERACK from the Peer before starting this select - // statement, so getting one here is an error. - - glog.Errorf("Peer.inHandler: Already received 'verack' from peer %v -- disconnecting", pp) - break out - case *MsgDeSoPing: // Respond to a ping with a pong. pp.HandlePingMsg(msg) @@ -1156,7 +1155,7 @@ out: // Measure the ping time when we receive a pong. pp.HandlePongMsg(msg) - case *MsgDeSoNewPeer, *MsgDeSoDonePeer, *MsgDeSoQuit: + case *MsgDeSoDisconnectedPeer, *MsgDeSoQuit: // We should never receive control messages from a Peer. Disconnect if we do. glog.Errorf("Peer.inHandler: Received control message of type %v from "+ @@ -1189,20 +1188,12 @@ func (pp *Peer) Start() { glog.Infof("Peer.Start: Starting peer %v", pp) // The protocol has been negotiated successfully so start processing input // and output messages. + pp.startGroup.Add(4) go pp.PingHandler() go pp.outHandler() go pp.inHandler() go pp.StartDeSoMessageProcessor() - - // If the address manager needs more addresses, then send a GetAddr message - // to the peer. This is best-effort. - if pp.cmgr != nil { - if pp.cmgr.AddrMgr.NeedMoreAddresses() { - go func() { - pp.QueueMessage(&MsgDeSoGetAddr{}) - }() - } - } + pp.startGroup.Wait() // Send our verack message now that the IO processing machinery has started. } @@ -1284,226 +1275,17 @@ func (pp *Peer) ReadDeSoMessage() (DeSoMessage, error) { return msg, nil } -func (pp *Peer) NewVersionMessage(params *DeSoParams) *MsgDeSoVersion { - ver := NewMessage(MsgTypeVersion).(*MsgDeSoVersion) - - ver.Version = params.ProtocolVersion - ver.TstampSecs = time.Now().Unix() - // We use an int64 instead of a uint64 for convenience but - // this should be fine since we're just looking to generate a - // unique value. - ver.Nonce = uint64(RandInt64(math.MaxInt64)) - ver.UserAgent = params.UserAgent - // TODO: Right now all peers are full nodes. Later on we'll want to change this, - // at which point we'll need to do a little refactoring. - ver.Services = SFFullNodeDeprecated - if pp.cmgr != nil && pp.cmgr.HyperSync { - ver.Services |= SFHyperSync - } - if pp.srv.blockchain.archivalMode { - ver.Services |= SFArchivalNode - } - - // When a node asks you for what height you have, you should reply with - // the height of the latest actual block you have. This makes it so that - // peers who have up-to-date headers but missing blocks won't be considered - // for initial block download. - // - // TODO: This is ugly. It would be nice if the Peer required zero knowledge of the - // Server and the Blockchain. - if pp.srv != nil { - ver.StartBlockHeight = uint32(pp.srv.blockchain.blockTip().Header.Height) - } else { - ver.StartBlockHeight = uint32(0) - } - - // Set the minimum fee rate the peer will accept. - ver.MinFeeRateNanosPerKB = pp.minTxFeeRateNanosPerKB - - return ver -} - -func (pp *Peer) sendVerack() error { - verackMsg := NewMessage(MsgTypeVerack) - // Include the nonce we received in the peer's version message so - // we can validate that we actually control our IP address. - verackMsg.(*MsgDeSoVerack).Nonce = pp.VersionNonceReceived - if err := pp.WriteDeSoMessage(verackMsg); err != nil { - return errors.Wrap(err, "sendVerack: ") - } - - return nil -} - -func (pp *Peer) readVerack() error { - msg, err := pp.ReadDeSoMessage() - if err != nil { - return errors.Wrap(err, "readVerack: ") - } - if msg.GetMsgType() != MsgTypeVerack { - return fmt.Errorf( - "readVerack: Received message with type %s but expected type VERACK. ", - msg.GetMsgType().String()) - } - verackMsg := msg.(*MsgDeSoVerack) - if verackMsg.Nonce != pp.VersionNonceSent { - return fmt.Errorf( - "readVerack: Received VERACK message with nonce %d but expected nonce %d", - verackMsg.Nonce, pp.VersionNonceSent) - } - - return nil -} - -func (pp *Peer) sendVersion() error { - // For an outbound peer, we send a version message and then wait to - // hear back for one. - verMsg := pp.NewVersionMessage(pp.Params) - - // Record the nonce of this version message before we send it so we can - // detect self connections and so we can validate that the peer actually - // controls the IP she's supposedly communicating to us from. - pp.VersionNonceSent = verMsg.Nonce - if pp.cmgr != nil { - pp.cmgr.sentNonces.Add(pp.VersionNonceSent) - } - - if err := pp.WriteDeSoMessage(verMsg); err != nil { - return errors.Wrap(err, "sendVersion: ") - } - - return nil -} - -func (pp *Peer) readVersion() error { - msg, err := pp.ReadDeSoMessage() - if err != nil { - return errors.Wrap(err, "readVersion: ") - } - - verMsg, ok := msg.(*MsgDeSoVersion) - if !ok { - return fmt.Errorf( - "readVersion: Received message with type %s but expected type VERSION. "+ - "The VERSION message must preceed all others", msg.GetMsgType().String()) - } - if verMsg.Version < pp.Params.MinProtocolVersion { - return fmt.Errorf("readVersion: Peer's protocol version too low: %d (min: %v)", - verMsg.Version, pp.Params.MinProtocolVersion) - } - - // If we've sent this nonce before then return an error since this is - // a connection from ourselves. - msgNonce := verMsg.Nonce - if pp.cmgr != nil { - if pp.cmgr.sentNonces.Contains(msgNonce) { - pp.cmgr.sentNonces.Delete(msgNonce) - return fmt.Errorf("readVersion: Rejecting connection to self") - } - } - // Save the version nonce so we can include it in our verack message. - pp.VersionNonceReceived = msgNonce - - // Set the peer info-related fields. - pp.PeerInfoMtx.Lock() - pp.userAgent = verMsg.UserAgent - pp.serviceFlags = verMsg.Services - pp.advertisedProtocolVersion = verMsg.Version - negotiatedVersion := pp.Params.ProtocolVersion - if pp.advertisedProtocolVersion < pp.Params.ProtocolVersion { - negotiatedVersion = pp.advertisedProtocolVersion - } - pp.negotiatedProtocolVersion = negotiatedVersion - pp.PeerInfoMtx.Unlock() - - // Set the stats-related fields. - pp.StatsMtx.Lock() - pp.startingHeight = verMsg.StartBlockHeight - pp.minTxFeeRateNanosPerKB = verMsg.MinFeeRateNanosPerKB - pp.TimeConnected = time.Unix(verMsg.TstampSecs, 0) - pp.TimeOffsetSecs = verMsg.TstampSecs - time.Now().Unix() - pp.StatsMtx.Unlock() - - // Update the timeSource now that we've gotten a version message from the - // peer. - if pp.cmgr != nil { - pp.cmgr.timeSource.AddTimeSample(pp.addrStr, pp.TimeConnected) - } - - return nil -} - -func (pp *Peer) ReadWithTimeout(readFunc func() error, readTimeout time.Duration) error { - errChan := make(chan error) - go func() { - errChan <- readFunc() - }() - select { - case err := <-errChan: - { - return err - } - case <-time.After(readTimeout): - { - return fmt.Errorf("ReadWithTimeout: Timed out reading message from peer: (%v)", pp) - } - } -} - -func (pp *Peer) NegotiateVersion(versionNegotiationTimeout time.Duration) error { - if pp.isOutbound { - // Write a version message. - if err := pp.sendVersion(); err != nil { - return errors.Wrapf(err, "negotiateVersion: Problem sending version to Peer %v", pp) - } - // Read the peer's version. - if err := pp.ReadWithTimeout( - pp.readVersion, - versionNegotiationTimeout); err != nil { - - return errors.Wrapf(err, "negotiateVersion: Problem reading OUTBOUND peer version for Peer %v", pp) - } - } else { - // Read the version first since this is an inbound peer. - if err := pp.ReadWithTimeout( - pp.readVersion, - versionNegotiationTimeout); err != nil { - - return errors.Wrapf(err, "negotiateVersion: Problem reading INBOUND peer version for Peer %v", pp) - } - if err := pp.sendVersion(); err != nil { - return errors.Wrapf(err, "negotiateVersion: Problem sending version to Peer %v", pp) - } - } - - // After sending and receiving a compatible version, complete the - // negotiation by sending and receiving a verack message. - if err := pp.sendVerack(); err != nil { - return errors.Wrapf(err, "negotiateVersion: Problem sending verack to Peer %v", pp) - } - if err := pp.ReadWithTimeout( - pp.readVerack, - versionNegotiationTimeout); err != nil { - - return errors.Wrapf(err, "negotiateVersion: Problem reading VERACK message from Peer %v", pp) - } - pp.VersionNegotiated = true - - // At this point we have sent a version and validated our peer's - // version. So the negotiation should be complete. - return nil -} - // Disconnect closes a peer's network connection. func (pp *Peer) Disconnect() { // Only run the logic the first time Disconnect is called. glog.V(1).Infof(CLog(Yellow, "Peer.Disconnect: Starting")) - if atomic.AddInt32(&pp.disconnected, 1) != 1 { + if atomic.LoadInt32(&pp.disconnected) != 0 { glog.V(1).Infof("Peer.Disconnect: Disconnect call ignored since it was already called before for Peer %v", pp) return } + atomic.AddInt32(&pp.disconnected, 1) - glog.V(1).Infof("Peer.Disconnect: Running Disconnect for the first time for Peer %v", pp) + glog.V(2).Infof("Peer.Disconnect: Running Disconnect for the first time for Peer %v", pp) // Close the connection object. pp.Conn.Close() @@ -1513,9 +1295,7 @@ func (pp *Peer) Disconnect() { // Add the Peer to donePeers so that the ConnectionManager and Server can do any // cleanup they need to do. - if pp.cmgr != nil && atomic.LoadInt32(&pp.cmgr.shutdown) == 0 && pp.cmgr.donePeerChan != nil { - pp.cmgr.donePeerChan <- pp - } + pp.peerDisconnectedChan <- pp } func (pp *Peer) _logVersionSuccess() { diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index a5f90c519..f28fb071a 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 65ade5b80..10324e2a0 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -38,7 +38,7 @@ import ( // StatusHeaderValidated or StatusHeaderValidateFailed. // 5. Exit early if the's view is less than the current header chain's tip. // 6. Reorg the best header chain if the header's view is higher than the current tip. -func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader) ( +func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures bool) ( _isMainChain bool, _isOrphan bool, _err error, ) { if !bc.params.IsPoSBlockHeight(header.Height) { @@ -53,8 +53,21 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader) ( return false, false, errors.Wrapf(err, "processHeaderPoS: Problem hashing header") } + // If the incoming header is already part of the best header chain, then we can exit early. + // The header is not part of a fork, and is already an ancestor of the current header chain tip. + if _, isInBestHeaderChain := bc.bestHeaderChainMap[*headerHash]; isInBestHeaderChain { + return true, false, nil + } + + // If the incoming header is part of a reorg that uncommits the committed tip from the best chain, + // then we exit early. Such headers are invalid and should not be synced. + committedBlockchainTip, _ := bc.GetCommittedTip() + if committedBlockchainTip != nil && committedBlockchainTip.Header.Height >= header.Height { + return false, false, errors.New("processHeaderPoS: Header conflicts with committed tip") + } + // Validate the header and index it in the block index. - blockNode, isOrphan, err := bc.validateAndIndexHeaderPoS(header, headerHash) + blockNode, isOrphan, err := bc.validateAndIndexHeaderPoS(header, headerHash, verifySignatures) if err != nil { return false, false, errors.Wrapf(err, "processHeaderPoS: Problem validating and indexing header: ") } @@ -85,7 +98,7 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader) ( return true, false, nil } -func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHash *BlockHash) ( +func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) ( _headerBlockNode *BlockNode, _isOrphan bool, _err error, ) { // Look up the header in the block index to check if it has already been validated and indexed. @@ -129,7 +142,8 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas // // This is safe and efficient as long as validateAndIndexHeaderPoS is only called on non-orphan // headers. This guarantees that the recursive case for each header can only be hit once. - parentBlockNode, isParentAnOrphan, err := bc.validateAndIndexHeaderPoS(parentBlockNode.Header, header.PrevBlockHash) + parentBlockNode, isParentAnOrphan, err := bc.validateAndIndexHeaderPoS( + parentBlockNode.Header, header.PrevBlockHash, verifySignatures) if err != nil { return nil, false, err } @@ -153,15 +167,17 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas ) } - // Validate the header's random seed signature. - isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(header) - if err != nil { - return nil, false, errors.Wrap(err, "validateAndIndexHeaderPoS: Problem validating random seed signature") - } - if !isValidRandomSeedSignature { - return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( - header, errors.New("validateAndIndexHeaderPoS: Header has invalid random seed signature"), - ) + if verifySignatures { + // Validate the header's random seed signature. + isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(header) + if err != nil { + return nil, false, errors.Wrap(err, "validateAndIndexHeaderPoS: Problem validating random seed signature") + } + if !isValidRandomSeedSignature { + return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( + header, errors.New("validateAndIndexHeaderPoS: Header has invalid random seed signature"), + ) + } } // Store it as HeaderValidated now that it has passed all validations. @@ -220,21 +236,22 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v if _, err := block.Hash(); err != nil { return false, false, nil, errors.Wrapf(err, "processBlockPoS: Problem hashing block") } + // Get all the blocks between the current block and the committed tip. If the block // is an orphan, then we store it after performing basic validations. // If the block extends from any committed block other than the committed tip, // then we throw it away. lineageFromCommittedTip, err := bc.getLineageFromCommittedTip(block.Header) - if err == RuleErrorDoesNotExtendCommittedTip || - err == RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock || - err == RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight || - err == RuleErrorAncestorBlockValidationFailed { + if errors.Is(err, RuleErrorDoesNotExtendCommittedTip) || + errors.Is(err, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock) || + errors.Is(err, RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight) || + errors.Is(err, RuleErrorAncestorBlockValidationFailed) { // In this case, the block extends a committed block that is NOT the tip // block. We will never accept this block. To prevent spam, we do not // store this block as validate failed. We just throw it away. return false, false, nil, errors.Wrap(err, "processBlockPoS: ") } - if err == RuleErrorMissingAncestorBlock { + if errors.Is(err, RuleErrorMissingAncestorBlock) { // In this case, the block is an orphan that does not extend from any blocks // on our best chain. Try to process the orphan by running basic validations. // If it passes basic integrity checks, we'll store it with the hope that we @@ -248,20 +265,29 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v "processBlockPoS: Unexpected problem getting lineage from committed tip: ") } + // We expect the utxoView for the parent block to be valid because we check that all ancestor blocks have + // been validated. + parentUtxoView, err := bc.getUtxoViewAtBlockHash(*block.Header.PrevBlockHash) + if err != nil { + // This should never happen. If the parent is validated and extends from the tip, then we should + // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. + // We return that validation didn't fail and the error. + return false, false, nil, errors.Wrap(err, "validateLeaderAndQC: Problem getting UtxoView") + } // First, we perform a validation of the leader and the QC to prevent spam. // If the block fails this check, we throw it away. - passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block) + passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, parentUtxoView, verifySignatures) if err != nil { // If we hit an error, we can't store it since we're not sure if it passed the spam prevention check. return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem validating leader and QC") } if !passedSpamPreventionCheck { // If the block fails the spam prevention check, we throw it away. - return false, false, nil, errors.New("processBlockPoS: Block failed spam prevention check") + return false, false, nil, errors.Wrapf(RuleErrorFailedSpamPreventionsCheck, "processBlockPoS: Block failed spam prevention check: ") } // Validate the block and store it in the block index. The block is guaranteed to not be an orphan. - blockNode, err := bc.validateAndIndexBlockPoS(block) + blockNode, err := bc.validateAndIndexBlockPoS(block, parentUtxoView, verifySignatures) if err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem validating block: ") @@ -271,13 +297,20 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v "processBlockPoS: Block not validated after performing all validations.") } + // At this point, we know that the block has passed all validations. The block may or may + // not be connected to the chain, but it has been accepted because it is known to be valid. + // We trigger a block accepted event to notify listeners. + if bc.eventManager != nil { + bc.eventManager.blockAccepted(&BlockEvent{Block: block}) + } + // 4. Process the block's header and update the header chain. We call processHeaderPoS // here after verifying that the block is not an orphan and has passed all validations, // but directly before applying the block as the new tip. Any failure when validating the // header and applying it to the header chain will result in the two chains being out of // sync. The header chain is less critical and mutations to it are reversible. So we attempt // to mutate it first before attempting to mutate the block chain. - if _, _, err = bc.processHeaderPoS(block.Header); err != nil { + if _, _, err = bc.processHeaderPoS(block.Header, verifySignatures); err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem processing header") } @@ -294,7 +327,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // 6. Commit grandparent if possible. Only need to do this if we applied a new tip. if appliedNewTip { - if err = bc.runCommitRuleOnBestChain(); err != nil { + if err = bc.runCommitRuleOnBestChain(verifySignatures); err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: error running commit rule: ") } @@ -537,17 +570,12 @@ func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlo return blockNode, nil } -func (bc *Blockchain) validateLeaderAndQC(block *MsgDeSoBlock) (_passedSpamPreventionCheck bool, _err error) { - // We expect the utxoView for the parent block to be valid because we check that all ancestor blocks have - // been validated. - utxoView, err := bc.getUtxoViewAtBlockHash(*block.Header.PrevBlockHash) - if err != nil { - // This should never happen. If the parent is validated and extends from the tip, then we should - // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. - // We return that validation didn't fail and the error. - return false, errors.Wrap(err, "validateLeaderAndQC: Problem getting UtxoView") - } - currentEpochEntry, err := utxoView.GetCurrentEpochEntry() +func (bc *Blockchain) validateLeaderAndQC( + block *MsgDeSoBlock, + parentUtxoView *UtxoView, + verifySignatures bool, +) (_passedSpamPreventionCheck bool, _err error) { + currentEpochEntry, err := parentUtxoView.GetCurrentEpochEntry() if err != nil { return false, errors.Wrap(err, "validateLeaderAndQC: Problem getting current epoch entry") @@ -557,36 +585,38 @@ func (bc *Blockchain) validateLeaderAndQC(block *MsgDeSoBlock) (_passedSpamPreve if !currentEpochEntry.ContainsBlockHeight(block.Header.Height) { return false, nil } - snapshotAtEpochNumber, err := utxoView.ComputeSnapshotEpochNumberForEpoch(currentEpochEntry.EpochNumber) - if err != nil { - return false, errors.Wrapf(err, - "validateLeaderAndQC: Problem getting snapshot epoch number for epoch #%d", - currentEpochEntry.EpochNumber) - } - // TODO: We may want to only perform this check if verifySignatures is true. - isValidPartialSig, err := utxoView.hasValidProposerPartialSignaturePoS(block, snapshotAtEpochNumber) - if err != nil { - return false, errors.Wrap(err, - "validateLeaderAndQC: Problem validating proposer partial sig") - } - if !isValidPartialSig { - return false, nil - } - // 2. Validate QC - validatorsByStake, err := utxoView.GetAllSnapshotValidatorSetEntriesByStake() - if err != nil { - // This should never happen. If the parent is validated and extends from the tip, then we should - // be able to fetch the validator set at its block height for it. This failure can only happen due - // to transient badger issues. We return false for failed spam prevention check and the error. - return false, errors.Wrap(err, "validateLeaderAndQC: Problem getting validator set") - } - // Validate the block's QC. If it's invalid, we return true for failed spam prevention check. - if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { - return false, nil + if verifySignatures { + snapshotAtEpochNumber, err := parentUtxoView.ComputeSnapshotEpochNumberForEpoch(currentEpochEntry.EpochNumber) + if err != nil { + return false, errors.Wrapf(err, + "validateLeaderAndQC: Problem getting snapshot epoch number for epoch #%d", + currentEpochEntry.EpochNumber) + } + isValidPartialSig, err := parentUtxoView.hasValidProposerPartialSignaturePoS(block, snapshotAtEpochNumber) + if err != nil { + return false, errors.Wrap(err, + "validateLeaderAndQC: Problem validating proposer partial sig") + } + if !isValidPartialSig { + return false, nil + } + // 2. Validate QC + validatorsByStake, err := parentUtxoView.GetAllSnapshotValidatorSetEntriesByStake() + if err != nil { + // This should never happen. If the parent is validated and extends from the tip, then we should + // be able to fetch the validator set at its block height for it. This failure can only happen due + // to transient badger issues. We return false for failed spam prevention check and the error. + return false, errors.Wrap(err, "validateLeaderAndQC: Problem getting validator set") + } + + // Validate the block's QC. If it's invalid, we return true for failed spam prevention check. + if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { + return false, nil + } } - isBlockProposerValid, err := utxoView.hasValidBlockProposerPoS(block) + isBlockProposerValid, err := parentUtxoView.hasValidBlockProposerPoS(block) if err != nil { return false, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem validating block proposer") @@ -621,7 +651,8 @@ func (bc *Blockchain) validateLeaderAndQC(block *MsgDeSoBlock) (_passedSpamPreve // return the new BlockNode. // - Error case: Something goes wrong that doesn't result in the block being marked VALIDATE or VALIDATE_FAILED. In // this case, we will add the block to the block index with status STORED and return the BlockNode. -func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, error) { +func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoView *UtxoView, verifySignatures bool) ( + *BlockNode, error) { blockHash, err := block.Header.Hash() if err != nil { return nil, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem hashing block %v", block) @@ -649,7 +680,7 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, // Run the validation for the parent and update the block index with the parent's status. We first // check if the parent has a cached status. If so, we use the cached status. Otherwise, we run // the full validation algorithm on it, then index it and use the result. - parentBlockNode, err := bc.validatePreviouslyIndexedBlockPoS(block.Header.PrevBlockHash) + parentBlockNode, err := bc.validatePreviouslyIndexedBlockPoS(block.Header.PrevBlockHash, verifySignatures) if err != nil { return blockNode, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem validating previously indexed block: ") } @@ -668,32 +699,19 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, } // Validate the block's random seed signature - isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(block.Header) - if err != nil { - var innerErr error - blockNode, innerErr = bc.storeBlockInBlockIndex(block) - if innerErr != nil { - return nil, errors.Wrapf(innerErr, "validateAndIndexBlockPoS: Problem adding block to block index: %v", err) + if verifySignatures { + isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(block.Header) + if err != nil { + var innerErr error + blockNode, innerErr = bc.storeBlockInBlockIndex(block) + if innerErr != nil { + return nil, errors.Wrapf(innerErr, "validateAndIndexBlockPoS: Problem adding block to block index: %v", err) + } + return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem validating random seed signature") } - return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem validating random seed signature") - } - if !isValidRandomSeedSignature { - return bc.storeValidateFailedBlockWithWrappedError(block, errors.New("invalid random seed signature")) - } - - // We expect the utxoView for the parent block to be valid because we check that all ancestor blocks have - // been validated. - utxoView, err := bc.getUtxoViewAtBlockHash(*block.Header.PrevBlockHash) - if err != nil { - // This should never happen. If the parent is validated and extends from the tip, then we should - // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. - // We store the block and return an error. - var innerErr error - blockNode, innerErr = bc.storeBlockInBlockIndex(block) - if innerErr != nil { - return nil, errors.Wrapf(innerErr, "validateAndIndexBlockPoS: Problem adding block to block index: %v", err) + if !isValidRandomSeedSignature { + return bc.storeValidateFailedBlockWithWrappedError(block, errors.New("invalid random seed signature")) } - return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem getting UtxoView") } // Check if the block is properly formed and passes all basic validations. @@ -707,7 +725,7 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, }) // If we fail to connect the block, then it means the block is invalid. We should store it as ValidateFailed. - if _, err = utxoView.ConnectBlock(block, txHashes, true, nil, block.Header.Height); err != nil { + if _, err = parentUtxoView.ConnectBlock(block, txHashes, verifySignatures, nil, block.Header.Height); err != nil { // If it doesn't connect, we want to mark it as ValidateFailed. return bc.storeValidateFailedBlockWithWrappedError(block, err) } @@ -731,7 +749,10 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock) (*BlockNode, // validatePreviouslyIndexedBlockPoS is a helper function that takes in a block hash for a previously // cached block, and runs the validateAndIndexBlockPoS algorithm on it. It returns the resulting BlockNode. -func (bc *Blockchain) validatePreviouslyIndexedBlockPoS(blockHash *BlockHash) (*BlockNode, error) { +func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( + blockHash *BlockHash, + verifySignatures bool, +) (*BlockNode, error) { // Check if the block is already in the block index. If so, we check its current status first. blockNode, exists := bc.blockIndexByHash[*blockHash] if !exists { @@ -754,9 +775,16 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS(blockHash *BlockHash) (* // provided the block was cached in the block index and stored in the DB first. return nil, errors.Wrapf(err, "validatePreviouslyIndexedBlockPoS: Problem fetching block from DB") } + // Build utxoView for the block's parent. + parentUtxoView, err := bc.getUtxoViewAtBlockHash(*block.Header.PrevBlockHash) + if err != nil { + // This should never happen. If the parent is validated and extends from the tip, then we should + // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. + return nil, errors.Wrap(err, "validatePreviouslyIndexedBlockPoS: Problem getting UtxoView") + } // If the block isn't validated or validate failed, we need to run the anti-spam checks on it. - passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block) + passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, parentUtxoView, verifySignatures) if err != nil { // If we hit an error, that means there was an intermittent issue when trying to // validate the QC or the leader. @@ -774,7 +802,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS(blockHash *BlockHash) (* } // We run the full validation algorithm on the block. - return bc.validateAndIndexBlockPoS(block) + return bc.validateAndIndexBlockPoS(block, parentUtxoView, verifySignatures) } // isValidBlockPoS performs all basic block integrity checks. Any error @@ -1171,6 +1199,29 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc if leaderEntryFromVotingPublicKey == nil { return false, nil } + + // Dump some debug info on the current block's proposer and the current view's leader. + glog.V(2).Infof( + "hasValidBlockProposerPoS: Printing block proposer debug info: "+ + "\n Epoch Num: %d, Block View: %d, Block Height: %d, Epoch Initial View: %d, Epoch Initial Block Height: %d"+ + "\n Leader Idx: %d, Num Leaders: %d"+ + "\n Expected Leader PKID: %v, Expected Leader Voting PK: %v"+ + "\n Expected Leader PKID from BLS Key Lookup: %v, Expected Leader Voting PK from BLS Key Lookup: %v"+ + "\n Block Proposer Voting PK: %v", + currentEpochEntry.EpochNumber, + block.Header.ProposedInView, + block.Header.Height, + currentEpochEntry.InitialView, + currentEpochEntry.InitialBlockHeight, + leaderIdx, + len(leaders), + PkToString(leaderEntry.ValidatorPKID.ToBytes(), bav.Params), + leaderEntry.VotingPublicKey.ToAbbreviatedString(), + PkToString(leaderEntryFromVotingPublicKey.ValidatorPKID.ToBytes(), bav.Params), + leaderEntryFromVotingPublicKey.VotingPublicKey.ToAbbreviatedString(), + block.Header.ProposerVotingPublicKey.ToAbbreviatedString(), + ) + if !leaderEntry.VotingPublicKey.Eq(block.Header.ProposerVotingPublicKey) || !leaderEntry.ValidatorPKID.Eq(leaderEntryFromVotingPublicKey.ValidatorPKID) { return false, nil @@ -1227,7 +1278,7 @@ func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validator // including the committed tip. The first block in the returned slice is the first uncommitted // ancestor. func (bc *Blockchain) getLineageFromCommittedTip(header *MsgDeSoHeader) ([]*BlockNode, error) { - highestCommittedBlock, idx := bc.getCommittedTip() + highestCommittedBlock, idx := bc.GetCommittedTip() if idx == -1 || highestCommittedBlock == nil { return nil, errors.New("getLineageFromCommittedTip: No committed blocks found") } @@ -1549,8 +1600,6 @@ func (bc *Blockchain) shouldReorg(blockNode *BlockNode, currentView uint64) bool func (bc *Blockchain) addTipBlockToBestChain(blockNode *BlockNode) { bc.bestChain = append(bc.bestChain, blockNode) bc.bestChainMap[*blockNode.Hash] = blockNode - bc.bestHeaderChain = append(bc.bestHeaderChain, blockNode) - bc.bestHeaderChainMap[*blockNode.Hash] = blockNode } // removeTipBlockFromBestChain removes the current tip from the best chain. It @@ -1562,15 +1611,13 @@ func (bc *Blockchain) removeTipBlockFromBestChain() *BlockNode { lastBlock := bc.bestChain[len(bc.bestChain)-1] delete(bc.bestChainMap, *lastBlock.Hash) bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] - bc.bestHeaderChain = bc.bestHeaderChain[:len(bc.bestChain)] - delete(bc.bestHeaderChainMap, *lastBlock.Hash) return lastBlock } // runCommitRuleOnBestChain commits the grandparent of the block if possible. // Specifically, this updates the CommittedBlockStatus of its grandparent // and flushes the view after connecting the grandparent block to the DB. -func (bc *Blockchain) runCommitRuleOnBestChain() error { +func (bc *Blockchain) runCommitRuleOnBestChain(verifySignatures bool) error { currentBlock := bc.BlockTip() // If we can commit the grandparent, commit it. // Otherwise, we can't commit it and return nil. @@ -1579,7 +1626,7 @@ func (bc *Blockchain) runCommitRuleOnBestChain() error { return nil } // Find all uncommitted ancestors of block to commit - _, idx := bc.getCommittedTip() + _, idx := bc.GetCommittedTip() if idx == -1 { // This is an edge case we'll never hit in practice since all the PoW blocks // are committed. @@ -1593,7 +1640,7 @@ func (bc *Blockchain) runCommitRuleOnBestChain() error { } } for ii := 0; ii < len(uncommittedAncestors); ii++ { - if err := bc.commitBlockPoS(uncommittedAncestors[ii].Hash); err != nil { + if err := bc.commitBlockPoS(uncommittedAncestors[ii].Hash, verifySignatures); err != nil { return errors.Wrapf(err, "runCommitRuleOnBestChain: Problem committing block %v", uncommittedAncestors[ii].Hash.String()) } @@ -1625,7 +1672,7 @@ func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) (_grandparen // commitBlockPoS commits the block with the given hash. Specifically, this updates the // BlockStatus to include StatusBlockCommitted and flushes the view after connecting the block // to the DB and updates relevant badger indexes with info about the block. -func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash) error { +func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool) error { // block must be in the best chain. we grab the block node from there. blockNode, exists := bc.bestChainMap[*blockHash] if !exists { @@ -1650,7 +1697,7 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash) error { }) // Connect the block to the view! utxoOpsForBlock, err := utxoView.ConnectBlock( - block, txHashes, true /*verifySignatures*/, bc.eventManager, block.Header.Height) + block, txHashes, verifySignatures, bc.eventManager, block.Header.Height) if err != nil { // TODO: rule error handling? mark blocks invalid? return errors.Wrapf(err, "commitBlockPoS: Problem connecting block to view: ") @@ -1722,6 +1769,50 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash) error { return nil } +// GetUncommittedFullBlocks is a helper that the state syncer uses to fetch all uncommitted +// blocks, so it can flush them just like we would with mempool transactions. It returns +// all uncommitted blocks from the specified tip to the last uncommitted block. +// Note: it would be more efficient if we cached these results. +func (bc *Blockchain) GetUncommittedFullBlocks(tipHash *BlockHash) ([]*MsgDeSoBlock, error) { + if tipHash == nil { + tipHash = bc.BlockTip().Hash + } + bc.ChainLock.RLock() + defer bc.ChainLock.RUnlock() + tipBlock, exists := bc.bestChainMap[*tipHash] + if !exists { + return nil, errors.Errorf("GetUncommittedFullBlocks: Block %v not found in best chain map", tipHash.String()) + } + // If the tip block is committed, we can't get uncommitted blocks from it so we return an empty slice. + if tipBlock.IsCommitted() { + return []*MsgDeSoBlock{}, nil + } + var uncommittedBlocks []*MsgDeSoBlock + currentBlock := tipBlock + for !currentBlock.IsCommitted() { + fullBlock, err := GetBlock(currentBlock.Hash, bc.db, bc.snapshot) + if err != nil { + return nil, errors.Wrapf(err, "GetUncommittedFullBlocks: Problem fetching block %v", + currentBlock.Hash.String()) + } + uncommittedBlocks = append(uncommittedBlocks, fullBlock) + currentParentHash := currentBlock.Header.PrevBlockHash + if currentParentHash == nil { + return nil, errors.Errorf("GetUncommittedFullBlocks: Block %v has nil PrevBlockHash", currentBlock.Hash) + } + currentBlock = bc.blockIndexByHash[*currentParentHash] + if currentBlock == nil { + return nil, errors.Errorf("GetUncommittedFullBlocks: Block %v not found in block index", currentBlock.Hash) + } + } + return collections.Reverse(uncommittedBlocks), nil +} + +// GetCommittedTipView builds a UtxoView to the committed tip. +func (bc *Blockchain) GetCommittedTipView() (*UtxoView, error) { + return NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, nil) +} + // GetUncommittedTipView builds a UtxoView to the uncommitted tip. func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks @@ -1731,18 +1822,20 @@ func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { // getUtxoViewAtBlockHash builds a UtxoView to the block provided. It does this by // identifying all uncommitted ancestors of this block and then connecting those blocks. func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, error) { + // Always fetch the lineage from the committed tip to the block provided first to + // ensure that a valid UtxoView is returned. uncommittedAncestors := []*BlockNode{} currentBlock := bc.blockIndexByHash[blockHash] if currentBlock == nil { return nil, errors.Errorf("getUtxoViewAtBlockHash: Block %v not found in block index", blockHash) } + highestCommittedBlock, _ := bc.GetCommittedTip() + if highestCommittedBlock == nil { + return nil, errors.Errorf("getUtxoViewAtBlockHash: No committed blocks found") + } // If the provided block is committed, we need to make sure it's the committed tip. // Otherwise, we return an error. if currentBlock.IsCommitted() { - highestCommittedBlock, _ := bc.getCommittedTip() - if highestCommittedBlock == nil { - return nil, errors.Errorf("getUtxoViewAtBlockHash: No committed blocks found") - } if !highestCommittedBlock.Hash.IsEqual(&blockHash) { return nil, errors.Errorf( "getUtxoViewAtBlockHash: Block %v is committed but not the committed tip", blockHash) @@ -1756,14 +1849,28 @@ func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, er } currentBlock = bc.blockIndexByHash[*currentParentHash] if currentBlock == nil { - return nil, errors.Errorf("getUtxoViewAtBlockHash: Block %v not found in block index", blockHash) + return nil, errors.Errorf("getUtxoViewAtBlockHash: Block %v not found in block index", currentParentHash) + } + if currentBlock.IsCommitted() && !currentBlock.Hash.IsEqual(highestCommittedBlock.Hash) { + return nil, errors.Errorf( + "getUtxoViewAtBlockHash: extends from a committed block that isn't the committed tip") + } + } + if viewAtHash, exists := bc.blockViewCache.Lookup(blockHash); exists { + copiedView, err := viewAtHash.(*UtxoView).CopyUtxoView() + if err != nil { + return nil, errors.Wrapf(err, "getUtxoViewAtBlockHash: Problem copying UtxoView from cache") } + return copiedView, nil } // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if err != nil { return nil, errors.Wrapf(err, "getUtxoViewAtBlockHash: Problem initializing UtxoView") } + // TODO: there's another performance enhancement we can make here. If we have a view in the + // cache for one of the ancestors, we can skip fetching the block and connecting it by taking + // a copy of it and replacing the existing view. for ii := len(uncommittedAncestors) - 1; ii >= 0; ii-- { // We need to get these blocks from badger fullBlock, err := GetBlock(uncommittedAncestors[ii].Hash, bc.db, bc.snapshot) @@ -1783,11 +1890,17 @@ func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, er } // Update the TipHash saved on the UtxoView to the blockHash provided. utxoView.TipHash = &blockHash + // Save a copy of the UtxoView to the cache. + copiedView, err := utxoView.CopyUtxoView() + if err != nil { + return nil, errors.Wrapf(err, "getUtxoViewAtBlockHash: Problem copying UtxoView to store in cache") + } + bc.blockViewCache.Add(blockHash, copiedView) return utxoView, nil } -// getCommittedTip returns the highest committed block and its index in the best chain. -func (bc *Blockchain) getCommittedTip() (*BlockNode, int) { +// GetCommittedTip returns the highest committed block and its index in the best chain. +func (bc *Blockchain) GetCommittedTip() (*BlockNode, int) { for ii := len(bc.bestChain) - 1; ii >= 0; ii-- { if bc.bestChain[ii].IsCommitted() { return bc.bestChain[ii], ii @@ -1818,7 +1931,7 @@ func (bc *Blockchain) GetSafeBlocks() ([]*MsgDeSoHeader, error) { func (bc *Blockchain) getSafeBlockNodes() ([]*BlockNode, error) { // First get committed tip. - committedTip, idx := bc.getCommittedTip() + committedTip, idx := bc.GetCommittedTip() if idx == -1 || committedTip == nil { return nil, errors.New("getSafeBlockNodes: No committed blocks found") } @@ -1922,6 +2035,7 @@ const ( RuleErrorAncestorBlockValidationFailed RuleError = "RuleErrorAncestorBlockValidationFailed" RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock RuleError = "RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock" RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight RuleError = "RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight" + RuleErrorFailedSpamPreventionsCheck RuleError = "RuleErrorFailedSpamPreventionsCheck" RuleErrorNilMerkleRoot RuleError = "RuleErrorNilMerkleRoot" RuleErrorInvalidMerkleRoot RuleError = "RuleErrorInvalidMerkleRoot" diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 8f40597e8..0f11f1a23 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( @@ -1536,7 +1534,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { blockTemplate1 := _generateBlockAndAddToBestChain(testMeta, 12, 12, 887) // Okay now try to run the commit rule. Nothing will happen. // We expect the block to be uncommitted. - err := testMeta.chain.runCommitRuleOnBestChain() + err := testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) blockHash1, err := blockTemplate1.Hash() @@ -1549,7 +1547,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { // Run commit rule again. Nothing should happen. // We expect both block 1 and block 2 to be uncommitted. - err = testMeta.chain.runCommitRuleOnBestChain() + err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) blockHash2, err := blockTemplate2.Hash() @@ -1561,7 +1559,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { blockTemplate3 := _generateBlockAndAddToBestChain(testMeta, 14, 14, 513) // Run the commit rule again. This time we expect block 1 to be committed. - err = testMeta.chain.runCommitRuleOnBestChain() + err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) blockHash3, err := blockTemplate3.Hash() @@ -1573,7 +1571,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { // Add one more block to the best chain, but have the view be further in the future. // this should trigger a commit on block 2. blockTemplate4 := _generateBlockAndAddToBestChain(testMeta, 14, 20, 429) - err = testMeta.chain.runCommitRuleOnBestChain() + err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) blockHash4, err := blockTemplate4.Hash() @@ -1585,7 +1583,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { // Okay so add block 5 to the best chain. This should NOT trigger a commit on block 3 // as block 4 is not a direct child of block 3 based on its view. blockTemplate5 := _generateBlockAndAddToBestChain(testMeta, 15, 21, 654) - err = testMeta.chain.runCommitRuleOnBestChain() + err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) blockHash5, err := blockTemplate5.Hash() @@ -1599,7 +1597,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { // we have a descendent of block 5. blockTemplate6 := _generateBlockAndAddToBestChain(testMeta, 16, 22, 912) require.NoError(t, err) - err = testMeta.chain.runCommitRuleOnBestChain() + err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) blockHash6, err := blockTemplate6.Hash() diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 99c582d71..ee8d50205 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -14,6 +14,7 @@ import ( type FastHotStuffConsensus struct { lock sync.RWMutex + networkManager *NetworkManager blockchain *Blockchain fastHotStuffEventLoop consensus.FastHotStuffEventLoop mempool Mempool @@ -25,6 +26,7 @@ type FastHotStuffConsensus struct { func NewFastHotStuffConsensus( params *DeSoParams, + networkManager *NetworkManager, blockchain *Blockchain, mempool Mempool, signer *BLSSigner, @@ -32,6 +34,7 @@ func NewFastHotStuffConsensus( timeoutBaseDurationMilliseconds uint64, ) *FastHotStuffConsensus { return &FastHotStuffConsensus{ + networkManager: networkManager, blockchain: blockchain, fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), mempool: mempool, @@ -99,10 +102,22 @@ func (fc *FastHotStuffConsensus) Start() error { blockProductionInterval := time.Millisecond * time.Duration(fc.blockProductionIntervalMilliseconds) timeoutBaseDuration := time.Millisecond * time.Duration(fc.timeoutBaseDurationMilliseconds) - // Initialize and start the event loop - fc.fastHotStuffEventLoop.Init(blockProductionInterval, timeoutBaseDuration, genesisQC, tipBlockWithValidators[0], safeBlocksWithValidators) + // Initialize the event loop. This should never fail. If it does, we return the error to the caller. + // The caller handle the error and decide when to retry. + err = fc.fastHotStuffEventLoop.Init(blockProductionInterval, timeoutBaseDuration, genesisQC, tipBlockWithValidators[0], safeBlocksWithValidators) + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error initializing FastHotStuffEventLoop: %v", err) + } + + // Start the event loop fc.fastHotStuffEventLoop.Start() + // Update the validator connections in the NetworkManager. This is a best effort operation. If it fails, + // we log the error and continue. + if err = fc.updateActiveValidatorConnections(); err != nil { + glog.Errorf("FastHotStuffConsensus.tryProcessBlockAsNewTip: Error updating validator connections: %v", err) + } + return nil } @@ -122,6 +137,9 @@ func (fc *FastHotStuffConsensus) Stop() { // construct a block at a certain block height. This function validates the block proposal signal, // constructs, processes locally, and then broadcasts the block. func (fc *FastHotStuffConsensus) HandleLocalBlockProposalEvent(event *consensus.FastHotStuffEvent) error { + glog.V(2).Infof("FastHotStuffConsensus.HandleLocalBlockProposalEvent: %s", event.ToString()) + glog.V(2).Infof("FastHotStuffConsensus.HandleLocalBlockProposalEvent: %s", fc.fastHotStuffEventLoop.ToString()) + // Hold a read and write lock on the consensus. This is because we need to check // the current view of the consensus event loop, and to update the blockchain. fc.lock.Lock() @@ -139,6 +157,7 @@ func (fc *FastHotStuffConsensus) HandleLocalBlockProposalEvent(event *consensus. // Handle the event as a block proposal event for a regular block if err := fc.handleBlockProposalEvent(event, consensus.FastHotStuffEventTypeConstructVoteQC); err != nil { + glog.Errorf("FastHotStuffConsensus.HandleLocalBlockProposalEvent: Error proposing block: %v", err) return errors.Wrapf(err, "FastHotStuffConsensus.HandleLocalBlockProposalEvent: ") } @@ -150,6 +169,9 @@ func (fc *FastHotStuffConsensus) HandleLocalBlockProposalEvent(event *consensus. // construct a timeout block at a certain block height. This function validates the timeout block proposal // signal, constructs, processes locally, and then broadcasts the block. func (fc *FastHotStuffConsensus) HandleLocalTimeoutBlockProposalEvent(event *consensus.FastHotStuffEvent) error { + glog.V(2).Infof("FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: %s", event.ToString()) + glog.V(2).Infof("FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: %s", fc.fastHotStuffEventLoop.ToString()) + // Hold a read and write lock on the consensus. This is because we need to check // the current view of the consensus event loop, and to update the blockchain. fc.lock.Lock() @@ -167,6 +189,7 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutBlockProposalEvent(event *con // Handle the event as a block proposal event for a timeout block if err := fc.handleBlockProposalEvent(event, consensus.FastHotStuffEventTypeConstructTimeoutQC); err != nil { + glog.Errorf("FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: Error proposing block: %v", err) return errors.Wrapf(err, "FastHotStuffConsensus.HandleLocalTimeoutBlockProposalEvent: ") } @@ -281,7 +304,11 @@ func (fc *FastHotStuffConsensus) handleBlockProposalEvent( ) } - // TODO: Broadcast the block proposal to the network + // Broadcast the block to the validator network + validators := fc.networkManager.GetConnectedValidators() + for _, validator := range validators { + sendMessageToRemoteNodeAsync(validator, blockProposal) + } fc.logBlockProposal(blockProposal, blockHash) return nil @@ -297,6 +324,9 @@ func (fc *FastHotStuffConsensus) handleBlockProposalEvent( // 3. Process the vote in the consensus module // 4. Broadcast the vote msg to the network func (fc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotStuffEvent) error { + glog.V(2).Infof("FastHotStuffConsensus.HandleLocalVoteEvent: %s", event.ToString()) + glog.V(2).Infof("FastHotStuffConsensus.HandleLocalVoteEvent: %s", fc.fastHotStuffEventLoop.ToString()) + // Hold a read lock on the consensus. This is because we need to check the // current view and block height of the consensus module. fc.lock.Lock() @@ -346,8 +376,11 @@ func (fc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotSt return errors.Errorf("FastHotStuffConsensus.HandleLocalVoteEvent: Error processing vote locally: %v", err) } - // Broadcast the vote message to the network - // TODO: Broadcast the vote message to the network or alternatively to just the block proposer + // Broadcast the block to the validator network + validators := fc.networkManager.GetConnectedValidators() + for _, validator := range validators { + sendMessageToRemoteNodeAsync(validator, voteMsg) + } return nil } @@ -355,6 +388,9 @@ func (fc *FastHotStuffConsensus) HandleLocalVoteEvent(event *consensus.FastHotSt // HandleValidatorVote is called when we receive a validator vote message from a peer. This function processes // the vote locally in the FastHotStuffEventLoop. func (fc *FastHotStuffConsensus) HandleValidatorVote(pp *Peer, msg *MsgDeSoValidatorVote) error { + glog.V(2).Infof("FastHotStuffConsensus.HandleValidatorVote: %s", msg.ToString()) + glog.V(2).Infof("FastHotStuffConsensus.HandleValidatorVote: %s", fc.fastHotStuffEventLoop.ToString()) + // No need to hold a lock on the consensus because this function is a pass-through // for the FastHotStuffEventLoop which guarantees thread-safety for its callers @@ -362,7 +398,8 @@ func (fc *FastHotStuffConsensus) HandleValidatorVote(pp *Peer, msg *MsgDeSoValid if err := fc.fastHotStuffEventLoop.ProcessValidatorVote(msg); err != nil { // If we can't process the vote locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. - return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorVote: Error processing vote: ") + glog.Errorf("FastHotStuffConsensus.HandleValidatorVote: Error processing vote msg: %v", err) + return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorVote: Error processing vote msg: ") } // Happy path @@ -379,6 +416,9 @@ func (fc *FastHotStuffConsensus) HandleValidatorVote(pp *Peer, msg *MsgDeSoValid // 3. Process the timeout in the consensus module // 4. Broadcast the timeout msg to the network func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHotStuffEvent) error { + glog.V(2).Infof("FastHotStuffConsensus.HandleLocalTimeoutEvent: %s", event.ToString()) + glog.V(2).Infof("FastHotStuffConsensus.HandleLocalTimeoutEvent: %s", fc.fastHotStuffEventLoop.ToString()) + // Hold a read lock on the consensus. This is because we need to check the // current view and block height of the consensus module. fc.lock.Lock() @@ -461,8 +501,11 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error processing timeout locally: %v", err) } - // Broadcast the timeout message to the network - // TODO: Broadcast the timeout message to the network or alternatively to just the block proposer + // Broadcast the block to the validator network + validators := fc.networkManager.GetConnectedValidators() + for _, validator := range validators { + sendMessageToRemoteNodeAsync(validator, timeoutMsg) + } return nil } @@ -470,28 +513,50 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo // HandleValidatorTimeout is called when we receive a validator timeout message from a peer. This function // processes the timeout locally in the FastHotStuffEventLoop. func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) error { - // No need to hold a lock on the consensus because this function is a pass-through - // for the FastHotStuffEventLoop which guarantees thread-safety for its callers. + glog.V(2).Infof("FastHotStuffConsensus.HandleValidatorTimeout: %s", msg.ToString()) + glog.V(2).Infof("FastHotStuffConsensus.HandleValidatorTimeout: %s", fc.fastHotStuffEventLoop.ToString()) + + // Hold a write lock on the consensus, since we need to update the timeout message in the + // FastHotStuffEventLoop. + fc.lock.Lock() + defer fc.lock.Unlock() + + if !fc.fastHotStuffEventLoop.IsRunning() { + return errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: FastHotStuffEventLoop is not running") + } + + // If we don't have the highQC's block on hand, then we need to request it from the peer. We do + // that first before storing the timeout message locally in the FastHotStuffEventLoop. This + // prevents spamming of timeout messages by peers. + if !fc.blockchain.HasBlockInBlockIndex(msg.HighQC.BlockHash) { + fc.trySendMessageToPeer(pp, &MsgDeSoGetBlocks{HashList: []*BlockHash{msg.HighQC.BlockHash}}) + glog.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Requesting missing highQC's block: %v", msg.HighQC.BlockHash) + return errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Missing highQC's block: %v", msg.HighQC.BlockHash) + } // Process the timeout message locally in the FastHotStuffEventLoop if err := fc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { // If we can't process the timeout locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. - return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout: ") + glog.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout msg: %v", err) + return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout msg: ") } // Happy path return nil } -func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error { +func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) (_isOprhan bool, _err error) { + glog.V(2).Infof("FastHotStuffConsensus.HandleBlock: Received block: \n%s", msg.String()) + glog.V(2).Infof("FastHotStuffConsensus.HandleBlock: %s", fc.fastHotStuffEventLoop.ToString()) + // Hold a lock on the consensus, because we will need to mutate the Blockchain // and the FastHotStuffEventLoop data structures. fc.lock.Lock() defer fc.lock.Unlock() if !fc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("FastHotStuffConsensus.HandleBlock: FastHotStuffEventLoop is not running") + return false, errors.Errorf("FastHotStuffConsensus.HandleBlock: FastHotStuffEventLoop is not running") } // Hold the blockchain's write lock so that the chain cannot be mutated underneath us. @@ -507,7 +572,7 @@ func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error if err != nil { // If we get an error here, it means something went wrong with the block processing algorithm. // Nothing we can do to recover here. - return errors.Errorf("FastHotStuffConsensus.HandleBlock: Error processing block as new tip: %v", err) + return false, errors.Errorf("FastHotStuffConsensus.HandleBlock: Error processing block as new tip: %v", err) } // If there are missing block hashes, then we need to fetch the missing blocks from the network @@ -519,12 +584,17 @@ func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) error // // See https://github.com/deso-protocol/core/pull/875#discussion_r1460183510 for more details. if len(missingBlockHashes) > 0 { - pp.QueueMessage(&MsgDeSoGetBlocks{ - HashList: missingBlockHashes, - }) + remoteNode := fc.networkManager.GetRemoteNodeFromPeer(pp) + if remoteNode == nil { + glog.Errorf("FastHotStuffConsensus.HandleBlock: RemoteNode not found for peer: %v", pp) + } else { + sendMessageToRemoteNodeAsync(remoteNode, &MsgDeSoGetBlocks{HashList: missingBlockHashes}) + } + return true, nil } - return nil + // Happy path. The block was processed successfully and applied as the new tip. Nothing left to do. + return false, nil } // tryProcessBlockAsNewTip tries to apply a new tip block to both the Blockchain and FastHotStuffEventLoop data @@ -599,6 +669,12 @@ func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("Error processing tip block locally: %v", err) } + // Update the validator connections in the NetworkManager. This is a best effort operation. If it fails, + // we log the error and continue. + if err = fc.updateActiveValidatorConnections(); err != nil { + glog.Errorf("FastHotStuffConsensus.tryProcessBlockAsNewTip: Error updating validator connections: %v", err) + } + // Happy path. The block was processed successfully and applied as the new tip. Nothing left to do. return nil, nil } @@ -775,6 +851,70 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView, previousBloc return blockProducer, nil } +func (fc *FastHotStuffConsensus) updateActiveValidatorConnections() error { + // Fetch the committed tip view. This ends up being as good as using the uncommitted tip view + // but without the overhead of connecting at least two blocks' worth of txns to the view. + utxoView, err := fc.blockchain.GetCommittedTipView() + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching uncommitted tip view: %v", err) + } + + // Get the current snapshot epoch number from the committed tip. This will be behind the uncommitted tip + // by up to two blocks, but this is fine since we fetch both the current epoch's and next epoch's validator + // sets. + snapshotEpochNumber, err := utxoView.GetCurrentSnapshotEpochNumber() + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching snapshot epoch number: %v", err) + } + + // Fetch the current snapshot epoch's validator set. + currentValidatorList, err := utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber) + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching validator list: %v", err) + } + + // Fetch the next snapshot epoch's validator set. This is useful when we're close to epoch transitions and + // allows us to pre-connect to the next epoch's validator set. In the event that there is a timeout at + // the epoch transition, reverting us to the previous epoch, this allows us to maintain connections to the + // next epoch's validators. + // + // TODO: There is an optimization we can add here to only fetch the next epoch's validator list once we're + // within 300 blocks of the next epoch. This way, we don't prematurely attempt connections to the next + // epoch's validators. In production, this will reduce the lead time with which we connect to the next epoch's + // validator set from 1 hour to 5 minutes. + nextValidatorList, err := utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotEpochNumber + 1) + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching validator list: %v", err) + } + + // Merge the current and next validator lists. Place the current epoch's validators last so that they override + // the next epoch's validators in the event of a conflict. + mergedValidatorList := append(nextValidatorList, currentValidatorList...) + validatorsMap := collections.NewConcurrentMap[bls.SerializedPublicKey, consensus.Validator]() + for _, validator := range mergedValidatorList { + if validator.VotingPublicKey.Eq(fc.signer.GetPublicKey()) { + continue + } + validatorsMap.Set(validator.VotingPublicKey.Serialize(), validator) + } + + // Update the active validators map in the network manager + fc.networkManager.SetActiveValidatorsMap(validatorsMap) + + return nil +} + +func (fc *FastHotStuffConsensus) trySendMessageToPeer(pp *Peer, msg DeSoMessage) { + remoteNode := fc.networkManager.GetRemoteNodeFromPeer(pp) + if remoteNode == nil { + glog.Errorf("FastHotStuffConsensus.trySendMessageToPeer: RemoteNode not found for peer: %v", pp) + return + } + + // Send the message to the peer + remoteNode.SendMessage(msg) +} + // Finds the epoch entry for the block and returns the epoch number. func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { for _, epochEntry := range epochEntries { @@ -818,6 +958,10 @@ func isProperlyFormedBlockProposalEvent(event *consensus.FastHotStuffEvent) bool return false } +func sendMessageToRemoteNodeAsync(remoteNode *RemoteNode, msg DeSoMessage) { + go func(rn *RemoteNode, m DeSoMessage) { rn.SendMessage(m) }(remoteNode, msg) +} + ////////////////////////////////////////// Logging Helper Functions /////////////////////////////////////////////// func (fc *FastHotStuffConsensus) logBlockProposal(block *MsgDeSoBlock, blockHash *BlockHash) { @@ -836,13 +980,15 @@ func (fc *FastHotStuffConsensus) logBlockProposal(block *MsgDeSoBlock, blockHash "\n Timestamp: %d, View: %d, Height: %d, BlockHash: %v"+ "\n Proposer Voting PKey: %s"+ "\n Proposer Signature: %s"+ + "\n Proposer Random Seed Signature: %s"+ "\n High QC View: %d, High QC Num Validators: %d, High QC BlockHash: %s"+ "\n Timeout Agg QC View: %d, Timeout Agg QC Num Validators: %d, Timeout High QC Views: %s"+ "\n Num Block Transactions: %d, Num Transactions Remaining In Mempool: %d"+ - "\n=================================================================================================================", + "\n=================================================================================================================\n", block.Header.GetTstampSecs(), block.Header.GetView(), block.Header.Height, blockHash.String(), block.Header.ProposerVotingPublicKey.ToString(), block.Header.ProposerVotePartialSignature.ToString(), + block.Header.ProposerRandomSeedSignature.ToString(), block.Header.GetQC().GetView(), block.Header.GetQC().GetAggregatedSignature().GetSignersList().Size(), block.Header.PrevBlockHash.String(), aggQCView, aggQCNumValidators, aggQCHighQCViews, len(block.Txns), len(fc.mempool.GetTransactions()), diff --git a/lib/pos_consensus_test.go b/lib/pos_consensus_test.go index 9bdb136a0..f0be9f432 100644 --- a/lib/pos_consensus_test.go +++ b/lib/pos_consensus_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( @@ -7,6 +5,7 @@ import ( "testing" "github.com/deso-protocol/core/bls" + "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" "github.com/deso-protocol/go-deadlock" "github.com/pkg/errors" @@ -26,7 +25,8 @@ func TestFastHotStuffConsensusHandleLocalVoteEvent(t *testing.T) { // Create a mock consensus fastHotStuffConsensus := FastHotStuffConsensus{ - lock: sync.RWMutex{}, + lock: sync.RWMutex{}, + networkManager: _createMockNetworkManagerForConsensus(), blockchain: &Blockchain{ params: &DeSoTestnetParams, }, @@ -104,7 +104,8 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { // Create a mock consensus fastHotStuffConsensus := FastHotStuffConsensus{ - lock: sync.RWMutex{}, + lock: sync.RWMutex{}, + networkManager: _createMockNetworkManagerForConsensus(), signer: &BLSSigner{ privateKey: blsPrivateKey, }, @@ -200,3 +201,13 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { func alwaysReturnTrue() bool { return true } + +func _createMockNetworkManagerForConsensus() *NetworkManager { + return &NetworkManager{ + AllRemoteNodes: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + ValidatorOutboundIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), + ValidatorInboundIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), + NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), + } +} diff --git a/lib/pos_constants_test.go b/lib/pos_constants_test.go index b2075e0b1..c56c13194 100644 --- a/lib/pos_constants_test.go +++ b/lib/pos_constants_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index c574eef64..0d2dc92f6 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index aae60d662..24bfa4a04 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -10,6 +10,9 @@ import ( ) type PoSFeeEstimator struct { + // globalParams is the latest GlobalParams used by the PoSFeeEstimator. The fee estimation uses + // this whenever the cached blocks are refreshed + globalParams *GlobalParamsEntry // mempoolTransactionRegister is a pointer to the mempool's transaction register. The fee estimator // uses this to estimate fees based on congestion in the mempool. mempoolTransactionRegister *TransactionRegister @@ -52,9 +55,12 @@ func (posFeeEstimator *PoSFeeEstimator) Init( numPastBlocks uint64, globalParams *GlobalParamsEntry, ) error { + posFeeEstimator.globalParams = globalParams.Copy() posFeeEstimator.rwLock = &sync.RWMutex{} + posFeeEstimator.rwLock.Lock() defer posFeeEstimator.rwLock.Unlock() + if mempoolTransactionRegister == nil { return errors.New("PoSFeeEstimator.Init: mempoolTransactionRegister cannot be nil") } @@ -64,26 +70,30 @@ func (posFeeEstimator *PoSFeeEstimator) Init( if numPastBlocks == 0 { return errors.New("PoSFeeEstimator.Init: numPastBlocks cannot be zero") } - if numPastBlocks < uint64(len(pastBlocks)) { - return errors.New("PoSFeeEstimator.Init: numPastBlocks must greater than or equal the number of pastBlocks") - } - // Sort the past blocks by height just to be safe. - sortedPastBlocks := collections.SortStable(pastBlocks, func(ii, jj *MsgDeSoBlock) bool { - return ii.Header.Height < jj.Header.Height - }) - posFeeEstimator.mempoolTransactionRegister = mempoolTransactionRegister + + // Store the num blocks params used to estimate fees. posFeeEstimator.numMempoolBlocks = numMempoolBlocks posFeeEstimator.numPastBlocks = numPastBlocks + + // Store the mempool's transaction register. + posFeeEstimator.mempoolTransactionRegister = mempoolTransactionRegister + // Create a transaction register we can use to estimate fees for past blocks. posFeeEstimator.pastBlocksTransactionRegister = NewTransactionRegister() posFeeEstimator.pastBlocksTransactionRegister.Init(globalParams.Copy()) + // Sort the past blocks by height just to be safe. + sortedPastBlocks := posFeeEstimator.cleanUpPastBlocks(pastBlocks) + // Add all the txns from the past blocks to the new pastBlocksTransactionRegister. for _, block := range sortedPastBlocks { if err := posFeeEstimator.addBlockNoLock(block); err != nil { return errors.Wrap(err, "PosFeeEstimator.Init: error adding block to pastBlocksMempool") } } + + // Cache the past blocks. + posFeeEstimator.cachedBlocks = sortedPastBlocks return nil } @@ -94,27 +104,35 @@ func (posFeeEstimator *PoSFeeEstimator) Init( func (posFeeEstimator *PoSFeeEstimator) AddBlock(block *MsgDeSoBlock) error { posFeeEstimator.rwLock.Lock() defer posFeeEstimator.rwLock.Unlock() + if err := posFeeEstimator.addBlockNoLock(block); err != nil { return errors.Wrap(err, "PoSFeeEstimator.AddBlock: error adding block to PoSFeeEstimator") } + return nil } // addBlockNoLock is the same as AddBlock but assumes the caller has already acquired the rwLock. func (posFeeEstimator *PoSFeeEstimator) addBlockNoLock(block *MsgDeSoBlock) error { + // Create a new slice to house the new past blocks and add the new block to it. + newPastBlocks := append(posFeeEstimator.cachedBlocks, block) + newPastBlocks = posFeeEstimator.cleanUpPastBlocks(newPastBlocks) + + // Create a clean transaction register to add the blocks' transactions. + newTransactionRegister := NewTransactionRegister() + newTransactionRegister.Init(posFeeEstimator.globalParams.Copy()) + // Add all transactions from the block to the pastBlocksTransactionRegister. - if err := addBlockToTransactionRegister(posFeeEstimator.pastBlocksTransactionRegister, block); err != nil { - return errors.Wrap(err, "PoSFeeEstimator.addBlockNoLock: error adding block to pastBlocksTransactionRegister") - } - posFeeEstimator.cachedBlocks = append(posFeeEstimator.cachedBlocks, block) - // Sort the cached blocks by height & tstamp just to be safe. - posFeeEstimator.sortCachedBlocks() - if uint64(len(posFeeEstimator.cachedBlocks)) > posFeeEstimator.numPastBlocks { - // Remove the oldest block. - if err := posFeeEstimator.removeBlockNoLock(posFeeEstimator.cachedBlocks[0]); err != nil { - return errors.Wrap(err, "PoSFeeEstimator.addBlockNoLock: error removing block from PoSFeeEstimator") + for _, pastBlock := range newPastBlocks { + if err := addBlockToTransactionRegister(newTransactionRegister, pastBlock); err != nil { + return errors.Wrap(err, "PoSFeeEstimator.addBlockNoLock: error adding block to pastBlocksTransactionRegister") } } + + // Update the cached blocks and pastBlocksTransactionRegister. + posFeeEstimator.cachedBlocks = newPastBlocks + posFeeEstimator.pastBlocksTransactionRegister = newTransactionRegister + return nil } @@ -135,6 +153,7 @@ func addBlockToTransactionRegister(txnRegister *TransactionRegister, block *MsgD "PoSFeeEstimator.addBlockToTransactionRegister: error adding txn to pastBlocksTransactionRegister") } } + return nil } @@ -147,44 +166,41 @@ func (posFeeEstimator *PoSFeeEstimator) RemoveBlock(block *MsgDeSoBlock) error { if err := posFeeEstimator.removeBlockNoLock(block); err != nil { return errors.Wrap(err, "PoSFeeEstimator.RemoveBlock: error removing block from PoSFeeEstimator") } + return nil } // removeBlockNoLock is the same as RemoveBlock but assumes the caller has already acquired the rwLock. func (posFeeEstimator *PoSFeeEstimator) removeBlockNoLock(block *MsgDeSoBlock) error { - // Remove all transaction from the block from the pastBlocksTransactionRegister. - for _, txn := range block.Txns { - // We explicitly exclude block reward transactions as they do not have fees. - // They were never added in the first place. - if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { - continue - } - mtxn, err := NewMempoolTx(txn, NanoSecondsToTime(block.Header.TstampNanoSecs), block.Header.Height) - if err != nil { - return errors.Wrap(err, "PoSFeeEstimator.RemoveBlock: error creating MempoolTx") - } - if err = posFeeEstimator.pastBlocksTransactionRegister.RemoveTransaction(mtxn); err != nil { - return errors.Wrap(err, - "PoSFeeEstimator.removeBlockNoLock: error removing txn from pastBlocksTransactionRegister") - } - } blockHash, err := block.Hash() if err != nil { return errors.Wrap(err, "PoSFeeEstimator.removeBlockNoLock: error computing blockHash") } - // Remove the block from the cached blocks. - newCachedBlocks := []*MsgDeSoBlock{} - for _, cachedBlock := range posFeeEstimator.cachedBlocks { + + // Remove the block from the cached blocks, maintaining the relative ordering of all other blocks. + newPastBlocks := collections.Filter(posFeeEstimator.cachedBlocks, func(cachedBlock *MsgDeSoBlock) bool { cachedBlockHash, err := cachedBlock.Hash() if err != nil { - return errors.Wrap(err, "PoSFeeEstimator.removeBlockNoLock: error computing cachedBlockHash") + return false } - if blockHash.IsEqual(cachedBlockHash) { - continue + return !blockHash.IsEqual(cachedBlockHash) + }) + + // Create a clean transaction register to add the blocks' transactions. + newTransactionRegister := NewTransactionRegister() + newTransactionRegister.Init(posFeeEstimator.globalParams.Copy()) + + // Add all transactions from the past blocks to the transaction register. + for _, pastBlock := range newPastBlocks { + if err := addBlockToTransactionRegister(newTransactionRegister, pastBlock); err != nil { + return errors.Wrap(err, "PoSFeeEstimator.removeBlockNoLock: error adding block to transaction register") } - newCachedBlocks = append(newCachedBlocks, cachedBlock) } - posFeeEstimator.cachedBlocks = newCachedBlocks + + // Update the cached blocks and pastBlocksTransactionRegister. + posFeeEstimator.cachedBlocks = newPastBlocks + posFeeEstimator.pastBlocksTransactionRegister = newTransactionRegister + return nil } @@ -194,25 +210,50 @@ func (posFeeEstimator *PoSFeeEstimator) removeBlockNoLock(block *MsgDeSoBlock) e func (posFeeEstimator *PoSFeeEstimator) UpdateGlobalParams(globalParams *GlobalParamsEntry) error { posFeeEstimator.rwLock.Lock() defer posFeeEstimator.rwLock.Unlock() + + // Create a temporary transaction register to test the new global params. tempTransactionRegister := NewTransactionRegister() tempTransactionRegister.Init(globalParams.Copy()) + for _, block := range posFeeEstimator.cachedBlocks { if err := addBlockToTransactionRegister(tempTransactionRegister, block); err != nil { return errors.Wrap(err, "PosFeeEstimator.UpdateGlobalParams: error adding block to tempTransactionRegister") } } + + posFeeEstimator.globalParams = globalParams return nil } -// sortCachedBlocks sorts the cached blocks by height & tstamp just to be safe. -func (posFeeEstimator *PoSFeeEstimator) sortCachedBlocks() { - posFeeEstimator.cachedBlocks = collections.SortStable(posFeeEstimator.cachedBlocks, +// cleanUpPastBlocks cleans up the input blocks slice, deduping, sorting, and pruning the blocks by height. +func (posFeeEstimator *PoSFeeEstimator) cleanUpPastBlocks(blocks []*MsgDeSoBlock) []*MsgDeSoBlock { + dedupedBlocks := posFeeEstimator.dedupeBlocksByBlockHeight(blocks) + sortedBlocks := posFeeEstimator.sortBlocksByBlockHeight(dedupedBlocks) + return posFeeEstimator.pruneBlocksToMaxNumPastBlocks(sortedBlocks) +} + +// dedupeBlocksByBlockHeight deduplicates the blocks by block height. If multiple blocks have the same +// height, it keeps the one with the highest view. +func (posFeeEstimator *PoSFeeEstimator) dedupeBlocksByBlockHeight(blocks []*MsgDeSoBlock) []*MsgDeSoBlock { + blocksByBlockHeight := make(map[uint64]*MsgDeSoBlock) + for _, block := range blocks { + existingBlock, hasExistingBlock := blocksByBlockHeight[block.Header.Height] + if !hasExistingBlock || existingBlock.Header.GetView() < block.Header.GetView() { + blocksByBlockHeight[block.Header.Height] = block + } + } + return collections.MapValues(blocksByBlockHeight) +} + +// sortBlocksByBlockHeightAndTstamp sorts the blocks by height. +func (posFeeEstimator *PoSFeeEstimator) sortBlocksByBlockHeight(blocks []*MsgDeSoBlock) []*MsgDeSoBlock { + return collections.SortStable(blocks, func(ii, jj *MsgDeSoBlock) bool { if ii.Header.Height != jj.Header.Height { return ii.Header.Height < jj.Header.Height } - if ii.Header.TstampNanoSecs != jj.Header.TstampNanoSecs { - return ii.Header.TstampNanoSecs < jj.Header.TstampNanoSecs + if ii.Header.GetView() != jj.Header.GetView() { + return ii.Header.GetView() < jj.Header.GetView() } iiHash, err := ii.Hash() if iiHash == nil || err != nil { @@ -226,20 +267,34 @@ func (posFeeEstimator *PoSFeeEstimator) sortCachedBlocks() { }) } +// pruneBlocksToMaxNumPastBlocks reduces the number of blocks to the numPastBlocks param +func (posFeeEstimator *PoSFeeEstimator) pruneBlocksToMaxNumPastBlocks(blocks []*MsgDeSoBlock) []*MsgDeSoBlock { + numCachedBlocks := uint64(len(blocks)) + if numCachedBlocks <= posFeeEstimator.numPastBlocks { + return blocks + } + + // Prune the blocks with the lowest block heights. We do this by removing the + // first len(blocks) - numPastBlocks blocks from the blocks slice. + return blocks[numCachedBlocks-posFeeEstimator.numPastBlocks:] +} + // EstimateFeeRateNanosPerKB estimates the fee rate in nanos per KB for the current mempool // and past blocks using the congestionFactorBasisPoints, priorityPercentileBasisPoints, and // maxBlockSize params. func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( - congestionFactorBasisPoints uint64, - priorityPercentileBasisPoints uint64, + mempoolCongestionFactorBasisPoints uint64, + mempoolPriorityPercentileBasisPoints uint64, + pastBlocksCongestionFactorBasisPoints uint64, + pastBlocksPriorityPercentileBasisPoints uint64, maxBlockSize uint64, ) (uint64, error) { posFeeEstimator.rwLock.RLock() defer posFeeEstimator.rwLock.RUnlock() pastBlockFeeRate, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, - congestionFactorBasisPoints, - priorityPercentileBasisPoints, + pastBlocksCongestionFactorBasisPoints, + pastBlocksPriorityPercentileBasisPoints, posFeeEstimator.numPastBlocks, maxBlockSize, ) @@ -248,8 +303,8 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( } mempoolFeeRate, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.mempoolTransactionRegister, - congestionFactorBasisPoints, - priorityPercentileBasisPoints, + mempoolCongestionFactorBasisPoints, + mempoolPriorityPercentileBasisPoints, posFeeEstimator.numMempoolBlocks, maxBlockSize, ) diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index 08e1a18ee..ef2790830 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -109,7 +109,7 @@ func TestFeeEstimator(t *testing.T) { } dummyBlock := &MsgDeSoBlock{ Header: &MsgDeSoHeader{ - Height: 10, + Height: 11, }, Txns: pastBlocksTxns, } diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index c10037747..21a77d342 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 864b78ed2..e0bd5bf8b 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -51,6 +51,14 @@ type Mempool interface { pastBlocksPriorityPercentileBasisPoints uint64, maxBlockSize uint64, ) (uint64, error) + EstimateFeeRate( + minFeeRateNanosPerKB uint64, + mempoolCongestionFactorBasisPoints uint64, + mempoolPriorityPercentileBasisPoints uint64, + pastBlocksCongestionFactorBasisPoints uint64, + pastBlocksPriorityPercentileBasisPoints uint64, + maxBlockSize uint64, + ) (uint64, error) } type MempoolIterator interface { @@ -406,6 +414,8 @@ func (mp *PosMempool) OnBlockConnected(block *MsgDeSoBlock) { mp.removeTransactionNoLock(existingTxn, true) } + mp.refreshNoLock() + // Add the block to the fee estimator. This is a best effort operation. If we fail to add the block // to the fee estimator, we log an error and continue. if err := mp.feeEstimator.AddBlock(block); err != nil { @@ -450,6 +460,8 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { } } + mp.refreshNoLock() + // This is a best effort operation. If we fail to prune the mempool, we log an error and continue. if err := mp.pruneNoLock(); err != nil { glog.Errorf("PosMempool.AddTransaction: Problem pruning mempool: %v", err) @@ -911,8 +923,19 @@ func (mp *PosMempool) EstimateFee(txn *MsgDeSoTxn, pastBlocksCongestionFactorBasisPoints uint64, pastBlocksPriorityPercentileBasisPoints uint64, maxBlockSize uint64) (uint64, error) { - // TODO: replace MaxBasisPoints with variables configured by flags. return mp.feeEstimator.EstimateFee( txn, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, maxBlockSize) } + +func (mp *PosMempool) EstimateFeeRate( + _ uint64, + mempoolCongestionFactorBasisPoints uint64, + mempoolPriorityPercentileBasisPoints uint64, + pastBlocksCongestionFactorBasisPoints uint64, + pastBlocksPriorityPercentileBasisPoints uint64, + maxBlockSize uint64) (uint64, error) { + return mp.feeEstimator.EstimateFeeRateNanosPerKB( + mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, + pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, maxBlockSize) +} diff --git a/lib/pos_network.go b/lib/pos_network.go index 891315937..08aae1bbc 100644 --- a/lib/pos_network.go +++ b/lib/pos_network.go @@ -123,6 +123,17 @@ func (msg *MsgDeSoValidatorVote) FromBytes(data []byte) error { return nil } +func (msg *MsgDeSoValidatorVote) ToString() string { + return fmt.Sprintf( + "{MsgVersion: %d, VotingPublicKey: %s, BlockHash: %v, ProposedInView: %d, VotePartialSignature: %v}", + msg.MsgVersion, + msg.VotingPublicKey.ToAbbreviatedString(), + msg.BlockHash, + msg.ProposedInView, + msg.VotePartialSignature.ToAbbreviatedString(), + ) +} + // ================================================================== // Proof of Stake Timeout Message // ================================================================== @@ -240,6 +251,18 @@ func (msg *MsgDeSoValidatorTimeout) FromBytes(data []byte) error { return nil } +func (msg *MsgDeSoValidatorTimeout) ToString() string { + return fmt.Sprintf( + "{MsgVersion: %d, VotingPublicKey: %s, TimedOutView: %d, HighQCView: %v, HighQCBlockHash: %v, TimeoutPartialSignature: %s}", + msg.MsgVersion, + msg.VotingPublicKey.ToAbbreviatedString(), + msg.TimedOutView, + msg.HighQC.ProposedInView, + msg.HighQC.BlockHash, + msg.TimeoutPartialSignature.ToAbbreviatedString(), + ) +} + // A QuorumCertificate contains an aggregated signature from 2/3rds of the validators // on the network, weighted by stake. The signatures are associated with a block hash // and a view, both of which are identified in the certificate. diff --git a/lib/pos_network_message_interface.go b/lib/pos_network_message_interface.go index 96d19a9cb..0593f52e2 100644 --- a/lib/pos_network_message_interface.go +++ b/lib/pos_network_message_interface.go @@ -155,6 +155,10 @@ func (validator *ValidatorEntry) GetStakeAmount() *uint256.Int { return validator.TotalStakeAmountNanos } +func (validator *ValidatorEntry) GetDomains() [][]byte { + return validator.Domains +} + func ValidatorEntriesToConsensusInterface(validatorEntries []*ValidatorEntry) []consensus.Validator { validatorInterfaces := make([]consensus.Validator, len(validatorEntries)) for idx, validatorEntry := range validatorEntries { diff --git a/lib/pos_network_no_relic_test.go b/lib/pos_network_no_relic_test.go deleted file mode 100644 index 45eaeacf1..000000000 --- a/lib/pos_network_no_relic_test.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !relic - -package lib - -import ( - "testing" - - "github.com/deso-protocol/core/bls" -) - -// This function is a placeholder needed to make the lib tests compile when the relic build tag -// isn't defined. Without the relic build tag, we are not able to generate any BLS keys. This -// function immediately fails the parent test that called it. -func _generateValidatorVotingPublicKeyAndSignature(t *testing.T) (*bls.PublicKey, *bls.Signature) { - t.FailNow() - return nil, nil -} diff --git a/lib/pos_network_test.go b/lib/pos_network_test.go index 8e3f262e3..4b02029e1 100644 --- a/lib/pos_network_test.go +++ b/lib/pos_network_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go index a07e034b1..799a10a97 100644 --- a/lib/pos_random_seed_test.go +++ b/lib/pos_random_seed_test.go @@ -1,5 +1,3 @@ -//go:build relic - package lib import ( diff --git a/lib/pos_server_regtest.go b/lib/pos_server_regtest.go index e4a339416..42c607718 100644 --- a/lib/pos_server_regtest.go +++ b/lib/pos_server_regtest.go @@ -25,7 +25,7 @@ func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { } txnMeta := RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://deso.com")}, + Domains: [][]byte{[]byte("http://localhost:18000")}, DisableDelegatedStake: false, DelegatedStakeCommissionBasisPoints: 100, VotingPublicKey: blsSigner.GetPublicKey(), @@ -90,17 +90,3 @@ func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { } } } - -func (srv *Server) startRegtestFastHotStuffConsensus(block *MsgDeSoBlock) { - if block.Header.Height != uint64(srv.blockchain.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight-1) { - return - } - - if srv.fastHotStuffConsensus == nil || srv.fastHotStuffConsensus.IsRunning() { - return - } - - if err := srv.fastHotStuffConsensus.Start(); err != nil { - glog.Errorf(CLog(Yellow, "DeSoMiner._startThread: Error starting fast hotstuff consensus: %v"), err) - } -} diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index 99cf30134..eaa313d58 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -99,8 +99,11 @@ func (tr *TransactionRegister) addTransactionNoLock(txn *MempoolTx) error { return fmt.Errorf("TransactionRegister.AddTransaction: Transaction or transaction hash is nil") } + // If the transaction already exists in the register, then we can't add it. Rather than + // treating this as a no-op, we explicitly return an error to indicate that the add + // operation failed. if _, ok := tr.txnMembership[*txn.Hash]; ok { - return nil + return errors.Errorf("TransactionRegister.AddTransaction: Transaction already exists: %v", txn.Hash) } // If the transaction is too large, reject it. diff --git a/lib/remote_node.go b/lib/remote_node.go new file mode 100644 index 000000000..794f4d167 --- /dev/null +++ b/lib/remote_node.go @@ -0,0 +1,730 @@ +package lib + +import ( + "encoding/binary" + "fmt" + "github.com/btcsuite/btcd/wire" + "github.com/deso-protocol/core/bls" + "github.com/golang/glog" + "github.com/pkg/errors" + "golang.org/x/crypto/sha3" + "net" + "sync" + "time" +) + +type RemoteNodeStatus int + +const ( + RemoteNodeStatus_NotConnected RemoteNodeStatus = 0 + RemoteNodeStatus_Connected RemoteNodeStatus = 1 + RemoteNodeStatus_VersionSent RemoteNodeStatus = 2 + RemoteNodeStatus_VerackSent RemoteNodeStatus = 3 + RemoteNodeStatus_HandshakeCompleted RemoteNodeStatus = 4 + RemoteNodeStatus_Attempted RemoteNodeStatus = 5 + RemoteNodeStatus_Terminated RemoteNodeStatus = 6 +) + +type RemoteNodeId uint64 + +func NewRemoteNodeId(id uint64) RemoteNodeId { + return RemoteNodeId(id) +} + +func (id RemoteNodeId) ToUint64() uint64 { + return uint64(id) +} + +// RemoteNode is a chain-aware wrapper around the network Peer object. It is used to manage the lifecycle of a peer +// and to store blockchain-related metadata about the peer. The RemoteNode can wrap around either an inbound or outbound +// peer connection. For outbound peers, the RemoteNode is created prior to the connection being established. In this case, +// the RemoteNode will be first used to initiate an OutboundConnectionAttempt, and then store the resulting connected peer. +// For inbound peers, the RemoteNode is created after the connection is established in ConnectionManager. +// +// Once the RemoteNode's peer is set, the RemoteNode is used to manage the handshake with the peer. The handshake involves +// rounds of Version and Verack messages being sent between our node and the peer. The handshake is complete when both +// nodes have sent and received a Version and Verack message. Once the handshake is successful, the RemoteNode will +// emit a MsgDeSoPeerHandshakeComplete control message via the Server. +// +// In steady state, i.e. after the handshake is complete, the RemoteNode can be used to send a message to the peer, +// retrieve the peer's handshake metadata, and close the connection with the peer. The RemoteNode has a single-use +// lifecycle. Once the RemoteNode is terminated, it will be disposed of, and a new RemoteNode must be created if we +// wish to reconnect to the peer in the future. +type RemoteNode struct { + mtx sync.RWMutex + + peer *Peer + // The id is the unique identifier of this RemoteNode. For outbound connections, the id will be the same as the + // attemptId of the OutboundConnectionAttempt, and the subsequent id of the outbound peer. For inbound connections, + // the id will be the same as the inbound peer's id. + id RemoteNodeId + // validatorPublicKey is the BLS public key of the validator node. This is only set for validator nodes. For + // non-validator nodes, this will be nil. For outbound validators nodes, the validatorPublicKey will be set when + // the RemoteNode is instantiated. And for inbound validator nodes, the validatorPublicKey will be set when the + // handshake is completed. + validatorPublicKey *bls.PublicKey + // isPersistent identifies whether the RemoteNode is persistent or not. Persistent RemoteNodes is a sub-category of + // outbound RemoteNodes. They are different from non-persistent RemoteNodes from the very moment they are created. + // Initially, an outbound RemoteNode is in an "attempted" state, meaning we dial the connection to the peer. The + // non-persistent RemoteNode is terminated after the first failed dial, while a persistent RemoteNode will keep + // trying to dial the peer indefinitely until the connection is established, or the node stops. + isPersistent bool + + connectionStatus RemoteNodeStatus + + params *DeSoParams + srv *Server + cmgr *ConnectionManager + + // minTxFeeRateNanosPerKB is the minimum transaction fee rate in nanos per KB that our node will accept. + minTxFeeRateNanosPerKB uint64 + // latestBlockHeight is the block height of our node's block tip. + latestBlockHeight uint64 + // nodeServices is a bitfield that indicates the services supported by our node. + nodeServices ServiceFlag + + // handshakeMetadata is used to store the information received from the peer during the handshake. + handshakeMetadata *HandshakeMetadata + // keystore is a reference to the node's BLS private key storage. In the context of a RemoteNode, the keystore is + // used in the Verack message for validator nodes to prove ownership of the validator BLS public key. + keystore *BLSKeystore + + // versionTimeExpected is the latest time by which we expect to receive a Version message from the peer. + // If the Version message is not received by this time, the connection will be terminated. + versionTimeExpected *time.Time + // verackTimeExpected is the latest time by which we expect to receive a Verack message from the peer. + // If the Verack message is not received by this time, the connection will be terminated. + verackTimeExpected *time.Time +} + +// HandshakeMetadata stores the information received from the peer during the Version and Verack exchange. +type HandshakeMetadata struct { + // ### The following fields are populated during the MsgDeSoVersion exchange. + // versionNonceSent is the nonce sent in the Version message to the peer. + versionNonceSent uint64 + // versionNonceReceived is the nonce received in the Version message from the peer. + versionNonceReceived uint64 + // userAgent is a meta level label that can be used to analyze the network. + userAgent string + // serviceFlag is a bitfield that indicates the services supported by the peer. + serviceFlag ServiceFlag + // latestBlockHeight is the block height of the peer's block tip during the Version exchange. + latestBlockHeight uint64 + // minTxFeeRateNanosPerKB is the minimum transaction fee rate in nanos per KB that the peer will accept. + minTxFeeRateNanosPerKB uint64 + // advertisedProtocolVersion is the protocol version advertised by the peer. + advertisedProtocolVersion ProtocolVersionType + // negotiatedProtocolVersion is the protocol version negotiated between the peer and our node. This is the minimum + // of the advertised protocol version and our node's protocol version. + negotiatedProtocolVersion ProtocolVersionType + // versionNegotiated is true if the peer passed the version negotiation step. + versionNegotiated bool + + // ### The following fields are populated during the MsgDeSoVerack exchange. + // validatorPublicKey is the BLS public key of the peer, if the peer is a validator node. + validatorPublicKey *bls.PublicKey +} + +func NewHandshakeMetadata() *HandshakeMetadata { + return &HandshakeMetadata{} +} + +func NewRemoteNode(id RemoteNodeId, validatorPublicKey *bls.PublicKey, isPersistent bool, srv *Server, + cmgr *ConnectionManager, keystore *BLSKeystore, params *DeSoParams, minTxFeeRateNanosPerKB uint64, + latestBlockHeight uint64, nodeServices ServiceFlag) *RemoteNode { + return &RemoteNode{ + id: id, + validatorPublicKey: validatorPublicKey, + isPersistent: isPersistent, + connectionStatus: RemoteNodeStatus_NotConnected, + handshakeMetadata: NewHandshakeMetadata(), + srv: srv, + cmgr: cmgr, + keystore: keystore, + params: params, + minTxFeeRateNanosPerKB: minTxFeeRateNanosPerKB, + latestBlockHeight: latestBlockHeight, + nodeServices: nodeServices, + } +} + +// setStatusHandshakeCompleted sets the connection status of the remote node to HandshakeCompleted. +func (rn *RemoteNode) setStatusHandshakeCompleted() { + rn.connectionStatus = RemoteNodeStatus_HandshakeCompleted +} + +// setStatusConnected sets the connection status of the remote node to connected. +func (rn *RemoteNode) setStatusConnected() { + rn.connectionStatus = RemoteNodeStatus_Connected +} + +// setStatusVersionSent sets the connection status of the remote node to version sent. +func (rn *RemoteNode) setStatusVersionSent() { + rn.connectionStatus = RemoteNodeStatus_VersionSent +} + +// setStatusVerackSent sets the connection status of the remote node to verack sent. +func (rn *RemoteNode) setStatusVerackSent() { + rn.connectionStatus = RemoteNodeStatus_VerackSent +} + +// setStatusTerminated sets the connection status of the remote node to terminated. +func (rn *RemoteNode) setStatusTerminated() { + rn.connectionStatus = RemoteNodeStatus_Terminated +} + +// setStatusAttempted sets the connection status of the remote node to attempted. +func (rn *RemoteNode) setStatusAttempted() { + rn.connectionStatus = RemoteNodeStatus_Attempted +} + +func (rn *RemoteNode) GetId() RemoteNodeId { + return rn.id +} + +func (rn *RemoteNode) GetPeer() *Peer { + return rn.peer +} + +func (rn *RemoteNode) GetNegotiatedProtocolVersion() ProtocolVersionType { + return rn.handshakeMetadata.negotiatedProtocolVersion +} + +func (rn *RemoteNode) GetValidatorPublicKey() *bls.PublicKey { + return rn.validatorPublicKey +} + +func (rn *RemoteNode) GetServiceFlag() ServiceFlag { + return rn.handshakeMetadata.serviceFlag +} + +func (rn *RemoteNode) GetLatestBlockHeight() uint64 { + return rn.handshakeMetadata.latestBlockHeight +} + +func (rn *RemoteNode) GetUserAgent() string { + return rn.handshakeMetadata.userAgent +} + +func (rn *RemoteNode) GetNetAddress() *wire.NetAddress { + if !rn.IsHandshakeCompleted() || rn.GetPeer() == nil { + return nil + } + return rn.GetPeer().NetAddress() +} + +func (rn *RemoteNode) IsInbound() bool { + return rn.peer != nil && !rn.peer.IsOutbound() +} + +func (rn *RemoteNode) IsOutbound() bool { + return rn.peer != nil && rn.peer.IsOutbound() +} + +func (rn *RemoteNode) IsPersistent() bool { + return rn.isPersistent +} + +func (rn *RemoteNode) IsNotConnected() bool { + return rn.connectionStatus == RemoteNodeStatus_NotConnected +} + +func (rn *RemoteNode) IsConnected() bool { + return rn.connectionStatus == RemoteNodeStatus_Connected +} + +func (rn *RemoteNode) IsVersionSent() bool { + return rn.connectionStatus == RemoteNodeStatus_VersionSent +} + +func (rn *RemoteNode) IsVerackSent() bool { + return rn.connectionStatus == RemoteNodeStatus_VerackSent +} + +func (rn *RemoteNode) IsHandshakeCompleted() bool { + return rn.connectionStatus == RemoteNodeStatus_HandshakeCompleted +} + +func (rn *RemoteNode) IsTerminated() bool { + return rn.connectionStatus == RemoteNodeStatus_Terminated +} + +func (rn *RemoteNode) IsValidator() bool { + if !rn.IsHandshakeCompleted() { + return false + } + return rn.hasValidatorServiceFlag() +} + +func (rn *RemoteNode) IsExpectedValidator() bool { + return rn.GetValidatorPublicKey() != nil +} + +func (rn *RemoteNode) hasValidatorServiceFlag() bool { + return rn.GetServiceFlag().HasService(SFPosValidator) +} + +// DialOutboundConnection dials an outbound connection to the provided netAddr. +func (rn *RemoteNode) DialOutboundConnection(netAddr *wire.NetAddress) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + if !rn.IsNotConnected() { + return fmt.Errorf("RemoteNode.DialOutboundConnection: RemoteNode is not in the NotConnected state") + } + + rn.cmgr.DialOutboundConnection(netAddr, rn.GetId().ToUint64()) + rn.setStatusAttempted() + return nil +} + +// DialPersistentOutboundConnection dials a persistent outbound connection to the provided netAddr. +func (rn *RemoteNode) DialPersistentOutboundConnection(netAddr *wire.NetAddress) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + if !rn.IsNotConnected() { + return fmt.Errorf("RemoteNode.DialPersistentOutboundConnection: RemoteNode is not in the NotConnected state") + } + + rn.cmgr.DialPersistentOutboundConnection(netAddr, rn.GetId().ToUint64()) + rn.setStatusAttempted() + return nil +} + +// AttachInboundConnection creates an inbound peer once a successful inbound connection has been established. +func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddress) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + // At this point, the RemoteNode must be in the NotConnected state. If the RemoteNode already progressed to + // another state, we return an error. + if !rn.IsNotConnected() { + return fmt.Errorf("RemoteNode.AttachInboundConnection: RemoteNode is not in the NotConnected state") + } + + id := rn.GetId().ToUint64() + rn.peer = rn.cmgr.ConnectPeer(id, conn, na, false, false) + versionTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) + rn.versionTimeExpected = &versionTimeExpected + rn.setStatusConnected() + return nil +} + +// AttachOutboundConnection creates an outbound peer once a successful outbound connection has been established. +func (rn *RemoteNode) AttachOutboundConnection(conn net.Conn, na *wire.NetAddress, isPersistent bool) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + if rn.connectionStatus != RemoteNodeStatus_Attempted { + return fmt.Errorf("RemoteNode.AttachOutboundConnection: RemoteNode is not in the Attempted state") + } + + id := rn.GetId().ToUint64() + rn.peer = rn.cmgr.ConnectPeer(id, conn, na, true, isPersistent) + versionTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) + rn.versionTimeExpected = &versionTimeExpected + rn.setStatusConnected() + return nil +} + +// Disconnect disconnects the remote node, closing the attempted connection or the established connection. +func (rn *RemoteNode) Disconnect() { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + if rn.connectionStatus == RemoteNodeStatus_Terminated { + return + } + glog.V(2).Infof("RemoteNode.Disconnect: Disconnecting from peer (id= %d, status= %v)", + rn.id, rn.connectionStatus) + + id := rn.GetId().ToUint64() + switch rn.connectionStatus { + case RemoteNodeStatus_Attempted: + rn.cmgr.CloseAttemptedConnection(id) + case RemoteNodeStatus_Connected, RemoteNodeStatus_VersionSent, RemoteNodeStatus_VerackSent, + RemoteNodeStatus_HandshakeCompleted: + rn.cmgr.CloseConnection(id) + } + rn.setStatusTerminated() +} + +func (rn *RemoteNode) SendMessage(desoMsg DeSoMessage) error { + rn.mtx.RLock() + rn.mtx.RUnlock() + + if rn.connectionStatus != RemoteNodeStatus_HandshakeCompleted { + return fmt.Errorf("SendMessage: Remote node is not connected") + } + + return rn.sendMessage(desoMsg) +} + +func (rn *RemoteNode) sendMessage(desoMsg DeSoMessage) error { + if err := rn.cmgr.SendMessage(desoMsg, rn.GetId().ToUint64()); err != nil { + return fmt.Errorf("SendMessage: Problem sending message to peer (id= %d): %v", rn.id, err) + } + return nil +} + +// InitiateHandshake is a starting point for a peer handshake. If the peer is outbound, a version message is sent +// to the peer. If the peer is inbound, the peer is expected to send a version message to us first. +func (rn *RemoteNode) InitiateHandshake(nonce uint64) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + if rn.connectionStatus != RemoteNodeStatus_Connected { + return fmt.Errorf("InitiateHandshake: Remote node is not connected") + } + + if rn.GetPeer().IsOutbound() { + if err := rn.sendVersionMessage(nonce); err != nil { + return fmt.Errorf("InitiateHandshake: Problem sending version message to peer (id= %d): %v", rn.id, err) + } + rn.setStatusVersionSent() + } + return nil +} + +// sendVersionMessage generates and sends a version message to a RemoteNode peer. The message will contain the nonce +// that is passed in as an argument. +func (rn *RemoteNode) sendVersionMessage(nonce uint64) error { + verMsg := rn.newVersionMessage(nonce) + + // Record the nonce of this version message before we send it so we can + // detect self connections and so we can validate that the peer actually + // controls the IP she's supposedly communicating to us from. + rn.handshakeMetadata.versionNonceSent = nonce + + if err := rn.sendMessage(verMsg); err != nil { + return fmt.Errorf("sendVersionMessage: Problem sending version message to peer (id= %d): %v", rn.id, err) + } + return nil +} + +// newVersionMessage returns a new version message that can be sent to a RemoteNode. The message will contain the +// nonce that is passed in as an argument. +func (rn *RemoteNode) newVersionMessage(nonce uint64) *MsgDeSoVersion { + ver := NewMessage(MsgTypeVersion).(*MsgDeSoVersion) + + ver.Version = rn.params.ProtocolVersion.ToUint64() + // Set the services bitfield to indicate what services this node supports. + ver.Services = rn.nodeServices + + // We use an int64 instead of a uint64 for convenience. + ver.TstampSecs = time.Now().Unix() + + ver.Nonce = nonce + ver.UserAgent = rn.params.UserAgent + + // When a node asks you for what height you have, you should reply with the height of the latest actual block you + // have. This makes it so that peers who have up-to-date headers but missing blocks won't be considered for initial + // block download. + ver.LatestBlockHeight = rn.latestBlockHeight + + // Set the minimum fee rate the peer will accept. + ver.MinFeeRateNanosPerKB = rn.minTxFeeRateNanosPerKB + + return ver +} + +func (rn *RemoteNode) IsTimedOut() bool { + if rn.IsTerminated() { + return true + } + if rn.IsConnected() || rn.IsVersionSent() { + return rn.versionTimeExpected.Before(time.Now()) + } + if rn.IsVerackSent() { + return rn.verackTimeExpected.Before(time.Now()) + } + return false +} + +// HandleVersionMessage is called upon receiving a version message from the RemoteNode's peer. The peer may be the one +// initiating the handshake, in which case, we should respond with our own version message. To do this, we pass the +// responseNonce to this function, which we will use in our response version message. +func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce uint64) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + if !rn.IsConnected() && !rn.IsVersionSent() { + return fmt.Errorf("HandleVersionMessage: RemoteNode is not connected or version exchange has already "+ + "been completed, connectionStatus: %v", rn.connectionStatus) + } + + // Verify that the peer's version matches our minimal supported version. + if verMsg.Version < rn.params.MinProtocolVersion { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v) "+ + "protocol version too low. Peer version: %v, min version: %v", rn.id, verMsg.Version, rn.params.MinProtocolVersion) + } + + // Verify that the peer's version message is sent within the version negotiation timeout. + if rn.versionTimeExpected.Before(time.Now()) { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v) "+ + "version timeout. Time expected: %v, now: %v", rn.id, rn.versionTimeExpected.UnixMicro(), time.Now().UnixMicro()) + } + + vMeta := rn.handshakeMetadata + // Record the version the peer is using. + vMeta.advertisedProtocolVersion = NewProtocolVersionType(verMsg.Version) + // Make sure the latest supported protocol version is ProtocolVersion2. + if vMeta.advertisedProtocolVersion.After(ProtocolVersion2) { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v) "+ + "protocol version too high. Peer version: %v, max version: %v", rn.id, verMsg.Version, ProtocolVersion2) + } + + // Decide on the protocol version to use for this connection. + negotiatedVersion := rn.params.ProtocolVersion + if verMsg.Version < rn.params.ProtocolVersion.ToUint64() { + // In order to smoothly transition to the PoS fork, we prevent establishing new outbound connections with + // outdated nodes that run on ProtocolVersion1. This is because ProtocolVersion1 nodes will not be able to + // validate the PoS blocks and will be stuck on the PoW chain, unless they upgrade to ProtocolVersion2. + if rn.params.ProtocolVersion == ProtocolVersion2 && rn.IsOutbound() { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v). Version too low. "+ + "Outbound RemoteNodes must use at least ProtocolVersion2, instead received version: %v", rn.id, verMsg.Version) + } + + negotiatedVersion = NewProtocolVersionType(verMsg.Version) + } + + vMeta.negotiatedProtocolVersion = negotiatedVersion + + // Record the services the peer is advertising. + vMeta.serviceFlag = verMsg.Services + // If the RemoteNode was connected with an expectation of being a validator, make sure that its advertised ServiceFlag + // indicates that it is a validator. + if !rn.hasValidatorServiceFlag() && rn.validatorPublicKey != nil { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v). "+ + "Expected validator, but received invalid ServiceFlag: %v", rn.id, verMsg.Services) + } + // If the RemoteNode is on ProtocolVersion1, then it must not have the validator service flag set. + if rn.hasValidatorServiceFlag() && vMeta.advertisedProtocolVersion.Before(ProtocolVersion2) { + return fmt.Errorf("RemoteNode.HandleVersionMessage: Requesting disconnect for id: (%v). "+ + "RemoteNode has SFValidator service flag, but doesn't have ProtocolVersion2 or later", rn.id) + } + + // Save the received version nonce so we can include it in our verack message. + vMeta.versionNonceReceived = verMsg.Nonce + + // Set the peer info-related fields. + vMeta.userAgent = verMsg.UserAgent + vMeta.latestBlockHeight = verMsg.LatestBlockHeight + vMeta.minTxFeeRateNanosPerKB = verMsg.MinFeeRateNanosPerKB + + // Respond to the version message if this is an inbound peer. + if rn.IsInbound() { + if err := rn.sendVersionMessage(responseNonce); err != nil { + return errors.Wrapf(err, "RemoteNode.HandleVersionMessage: Problem sending version message to peer (id= %d)", rn.id) + } + } + + // After sending and receiving a compatible version, send the verack message. Notice that we don't wait for the + // peer's verack message even if it is an inbound peer. Instead, we just send the verack message right away. + + // Set the latest time by which we should receive a verack message from the peer. + verackTimeExpected := time.Now().Add(rn.params.VerackNegotiationTimeout) + rn.verackTimeExpected = &verackTimeExpected + if err := rn.sendVerack(); err != nil { + return errors.Wrapf(err, "RemoteNode.HandleVersionMessage: Problem sending verack message to peer (id= %d)", rn.id) + } + + // Update the timeSource now that we've gotten a version message from the peer. + rn.setStatusVerackSent() + return nil +} + +// sendVerack constructs and sends a verack message to the peer. +func (rn *RemoteNode) sendVerack() error { + verackMsg, err := rn.newVerackMessage() + if err != nil { + return err + } + + if err := rn.sendMessage(verackMsg); err != nil { + return errors.Wrapf(err, "RemoteNode.SendVerack: Problem sending verack message to peer (id= %d): %v", rn.id, err) + } + return nil +} + +// newVerackMessage constructs a verack message to be sent to the peer. +func (rn *RemoteNode) newVerackMessage() (*MsgDeSoVerack, error) { + verack := NewMessage(MsgTypeVerack).(*MsgDeSoVerack) + vMeta := rn.handshakeMetadata + + switch vMeta.negotiatedProtocolVersion { + case ProtocolVersion0, ProtocolVersion1: + // For protocol versions 0 and 1, we just send back the nonce we received from the peer in the version message. + verack.Version = VerackVersion0 + verack.NonceReceived = vMeta.versionNonceReceived + case ProtocolVersion2: + // For protocol version 2, we need to send the nonce we received from the peer in their version message. + // We also need to send our own nonce, which we generate for our version message. In addition, we need to + // send a current timestamp (in microseconds). We then sign the tuple of (nonceReceived, nonceSent, tstampMicro) + // using our validator BLS key, and send the signature along with our public key. + var err error + verack.Version = VerackVersion1 + verack.NonceReceived = vMeta.versionNonceReceived + verack.NonceSent = vMeta.versionNonceSent + tstampMicro := uint64(time.Now().UnixMicro()) + verack.TstampMicro = tstampMicro + // If the RemoteNode is not a validator, then we don't need to sign the verack message. + if !rn.nodeServices.HasService(SFPosValidator) { + break + } + verack.PublicKey = rn.keystore.GetSigner().GetPublicKey() + verack.Signature, err = rn.keystore.GetSigner().SignPoSValidatorHandshake(verack.NonceSent, verack.NonceReceived, tstampMicro) + if err != nil { + return nil, fmt.Errorf("RemoteNode.newVerackMessage: Problem signing verack message: %v", err) + } + } + return verack, nil +} + +// HandleVerackMessage handles a verack message received from the peer. +func (rn *RemoteNode) HandleVerackMessage(vrkMsg *MsgDeSoVerack) error { + rn.mtx.Lock() + defer rn.mtx.Unlock() + + if rn.connectionStatus != RemoteNodeStatus_VerackSent { + return fmt.Errorf("RemoteNode.HandleVerackMessage: Requesting disconnect for id: (%v) "+ + "verack received while in state: %v", rn.id, rn.connectionStatus) + } + + if rn.verackTimeExpected != nil && rn.verackTimeExpected.Before(time.Now()) { + return fmt.Errorf("RemoteNode.HandleVerackMessage: Requesting disconnect for id: (%v) "+ + "verack timeout. Time expected: %v, now: %v", rn.id, rn.verackTimeExpected.UnixMicro(), time.Now().UnixMicro()) + } + + var err error + vMeta := rn.handshakeMetadata + switch vMeta.negotiatedProtocolVersion { + case ProtocolVersion0, ProtocolVersion1: + err = rn.validateVerackPoW(vrkMsg) + case ProtocolVersion2: + err = rn.validateVerackPoS(vrkMsg) + } + + if err != nil { + return errors.Wrapf(err, "RemoteNode.HandleVerackMessage: Problem validating verack message from peer (id= %d)", rn.id) + } + + // If we get here then the peer has successfully completed the handshake. + vMeta.versionNegotiated = true + rn._logVersionSuccess() + rn.setStatusHandshakeCompleted() + + return nil +} + +func (rn *RemoteNode) validateVerackPoW(vrkMsg *MsgDeSoVerack) error { + vMeta := rn.handshakeMetadata + + // Verify that the verack message is formatted correctly according to the PoW standard. + if vrkMsg.Version != VerackVersion0 { + return fmt.Errorf("RemoteNode.validateVerackPoW: Requesting disconnect for id: (%v) "+ + "verack version mismatch; message: %v; expected: %v", rn.id, vrkMsg.Version, VerackVersion0) + } + + // If the verack message has a nonce that wasn't previously sent to us in the version message, return an error. + if vrkMsg.NonceReceived != vMeta.versionNonceSent { + return fmt.Errorf("RemoteNode.validateVerackPoW: Requesting disconnect for id: (%v) nonce mismatch; "+ + "message: %v; nonceSent: %v", rn.id, vrkMsg.NonceReceived, vMeta.versionNonceSent) + } + + return nil +} + +func (rn *RemoteNode) validateVerackPoS(vrkMsg *MsgDeSoVerack) error { + vMeta := rn.handshakeMetadata + + // Verify that the verack message is formatted correctly according to the PoS standard. + if vrkMsg.Version != VerackVersion1 { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack version mismatch; message: %v; expected: %v", rn.id, vrkMsg.Version, VerackVersion1) + } + + // Verify that the counterparty's verack message's NonceReceived matches the NonceSent we sent. + if vrkMsg.NonceReceived != vMeta.versionNonceSent { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) nonce mismatch; "+ + "message: %v; nonceSent: %v", rn.id, vrkMsg.NonceReceived, vMeta.versionNonceSent) + } + + // Verify that the counterparty's verack message's NonceSent matches the NonceReceived we sent. + if vrkMsg.NonceSent != vMeta.versionNonceReceived { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack nonce mismatch; message: %v; expected: %v", rn.id, vrkMsg.NonceSent, vMeta.versionNonceReceived) + } + + // Get the current time in microseconds and make sure the verack message's timestamp is within 15 minutes of it. + timeNowMicro := uint64(time.Now().UnixMicro()) + if vrkMsg.TstampMicro < timeNowMicro-rn.params.HandshakeTimeoutMicroSeconds { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack timestamp too far in the past. Time now: %v, verack timestamp: %v", rn.id, timeNowMicro, vrkMsg.TstampMicro) + } + + // If the RemoteNode is not a validator, then we don't need to verify the verack message's signature. + if !rn.hasValidatorServiceFlag() { + return nil + } + + // Make sure the verack message's public key and signature are not nil. + if vrkMsg.PublicKey == nil || vrkMsg.Signature == nil { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack public key or signature is nil", rn.id) + } + + // Verify the verack message's signature. + ok, err := BLSVerifyPoSValidatorHandshake(vrkMsg.NonceSent, vrkMsg.NonceReceived, vrkMsg.TstampMicro, + vrkMsg.Signature, vrkMsg.PublicKey) + if err != nil { + return errors.Wrapf(err, "RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack signature verification failed with error", rn.id) + } + if !ok { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack signature verification failed", rn.id) + } + + if rn.validatorPublicKey != nil && rn.validatorPublicKey.Serialize() != vrkMsg.PublicKey.Serialize() { + return fmt.Errorf("RemoteNode.validateVerackPoS: Requesting disconnect for id: (%v) "+ + "verack public key mismatch; message: %v; expected: %v", rn.id, vrkMsg.PublicKey, rn.validatorPublicKey) + } + + // If we get here then the verack message is valid. Set the validator public key on the peer. + vMeta.validatorPublicKey = vrkMsg.PublicKey + rn.validatorPublicKey = vrkMsg.PublicKey + return nil +} + +func (rn *RemoteNode) _logVersionSuccess() { + inboundStr := "INBOUND" + if rn.IsOutbound() { + inboundStr = "OUTBOUND" + } + persistentStr := "PERSISTENT" + if !rn.IsPersistent() { + persistentStr = "NON-PERSISTENT" + } + logStr := fmt.Sprintf("SUCCESS version negotiation for (%s) (%s) id=(%v).", inboundStr, persistentStr, rn.id.ToUint64()) + glog.V(1).Info(logStr) +} + +func GetVerackHandshakePayload(nonceReceived uint64, nonceSent uint64, tstampMicro uint64) [32]byte { + // The payload for the verack message is the two nonces concatenated together. + // We do this so that we can sign the nonces and verify the signature on the other side. + nonceReceivedBytes := make([]byte, 8) + binary.BigEndian.PutUint64(nonceReceivedBytes, nonceReceived) + + nonceSentBytes := make([]byte, 8) + binary.BigEndian.PutUint64(nonceSentBytes, nonceSent) + + tstampBytes := make([]byte, 8) + binary.BigEndian.PutUint64(tstampBytes, tstampMicro) + + payload := append(nonceReceivedBytes, nonceSentBytes...) + payload = append(payload, tstampBytes...) + + return sha3.Sum256(payload) +} diff --git a/lib/server.go b/lib/server.go index d0940cc03..8824b4c10 100644 --- a/lib/server.go +++ b/lib/server.go @@ -12,15 +12,17 @@ import ( "sync/atomic" "time" + "github.com/btcsuite/btcd/wire" + "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/consensus" + "github.com/decred/dcrd/lru" "github.com/DataDog/datadog-go/statsd" "github.com/btcsuite/btcd/addrmgr" chainlib "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" - "github.com/deso-protocol/core/consensus" "github.com/deso-protocol/go-deadlock" "github.com/dgraph-io/badger/v3" "github.com/golang/glog" @@ -64,7 +66,10 @@ type Server struct { TxIndex *TXIndex params *DeSoParams - fastHotStuffConsensus *FastHotStuffConsensus + networkManager *NetworkManager + + fastHotStuffConsensus *FastHotStuffConsensus + fastHotStuffConsensusTransitionCheckTime time.Time // All messages received from peers get sent from the ConnectionManager to the // Server through this channel. @@ -102,6 +107,9 @@ type Server struct { // point we can make the optimization. SyncPeer *Peer + // When --connect-ips is set, we don't connect to anything from the addrmgr. + connectIps []string + // If we're syncing state using hypersync, we'll keep track of the progress using HyperSyncProgress. // It stores information about all the prefixes that we're fetching. The way that HyperSyncProgress // is organized allows for multi-peer state synchronization. In such case, we would assign prefixes @@ -128,7 +136,9 @@ type Server struct { // It is organized in this way so that we can limit the number of addresses we // are distributing for a single peer to avoid a DOS attack. addrsToBroadcastLock deadlock.RWMutex - addrsToBroadcastt map[string][]*SingleAddr + addrsToBroadcast map[string][]*SingleAddr + + AddrMgr *addrmgr.AddrManager // When set to true, we disable the ConnectionManager DisableNetworking bool @@ -176,6 +186,10 @@ func (srv *Server) ResetRequestQueues() { srv.requestedTransactionsMap = make(map[BlockHash]*GetDataRequestInfo) } +func (srv *Server) GetNetworkManager() *NetworkManager { + return srv.networkManager +} + // dataLock must be acquired for writing before calling this function. func (srv *Server) _removeRequest(hash *BlockHash) { // Just be lazy and remove the hash from everything indiscriminately to @@ -400,6 +414,7 @@ func NewServer( _augmentedBlockViewRefreshIntervalMillis uint64, _posBlockProductionIntervalMilliseconds uint64, _posTimeoutBaseDurationMilliseconds uint64, + _stateSyncerMempoolTxnSyncLimit uint64, ) ( _srv *Server, _err error, @@ -413,7 +428,7 @@ func NewServer( if _stateChangeDir != "" { // Create the state change syncer to handle syncing state changes to disk, and assign some of its methods // to the event manager. - stateChangeSyncer = NewStateChangeSyncer(_stateChangeDir, _syncType) + stateChangeSyncer = NewStateChangeSyncer(_stateChangeDir, _syncType, _stateSyncerMempoolTxnSyncLimit) eventManager.OnStateSyncerOperation(stateChangeSyncer._handleStateSyncerOperation) eventManager.OnStateSyncerFlushed(stateChangeSyncer._handleStateSyncerFlush) } @@ -444,7 +459,9 @@ func NewServer( snapshot: _snapshot, nodeMessageChannel: _nodeMessageChan, forceChecksum: _forceChecksum, + AddrMgr: _desoAddrMgr, params: _params, + connectIps: _connectIps, } if stateChangeSyncer != nil { @@ -457,12 +474,10 @@ func NewServer( timesource := chainlib.NewMedianTime() // Create a new connection manager but note that it won't be initialized until Start(). - _incomingMessages := make(chan *ServerMessage, (_targetOutboundPeers+_maxInboundPeers)*3) + _incomingMessages := make(chan *ServerMessage, _params.ServerMessageChannelSize+(_targetOutboundPeers+_maxInboundPeers)*3) _cmgr := NewConnectionManager( - _params, _desoAddrMgr, _listeners, _connectIps, timesource, - _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP, - _hyperSync, _syncType, _stallTimeoutSeconds, _minFeeRateNanosPerKB, - _incomingMessages, srv) + _params, _listeners, _hyperSync, _syncType, _stallTimeoutSeconds, + _minFeeRateNanosPerKB, _incomingMessages, srv) // Set up the blockchain data structure. This is responsible for accepting new // blocks, keeping track of the best chain, and keeping all of that state up @@ -487,13 +502,37 @@ func NewServer( return nil, errors.Wrapf(err, "NewServer: Problem initializing blockchain"), true } + headerCumWorkStr := "" + headerCumWork := BigintToHash(_chain.headerTip().CumWork) + if headerCumWork != nil { + headerCumWorkStr = hex.EncodeToString(headerCumWork[:]) + } + blockCumWorkStr := "" + blockCumWork := BigintToHash(_chain.blockTip().CumWork) + if blockCumWork != nil { + blockCumWorkStr = hex.EncodeToString(blockCumWork[:]) + } glog.V(1).Infof("Initialized chain: Best Header Height: %d, Header Hash: %s, Header CumWork: %s, Best Block Height: %d, Block Hash: %s, Block CumWork: %s", _chain.headerTip().Height, hex.EncodeToString(_chain.headerTip().Hash[:]), - hex.EncodeToString(BigintToHash(_chain.headerTip().CumWork)[:]), + headerCumWorkStr, _chain.blockTip().Height, hex.EncodeToString(_chain.blockTip().Hash[:]), - hex.EncodeToString(BigintToHash(_chain.blockTip().CumWork)[:])) + blockCumWorkStr) + + nodeServices := SFFullNodeDeprecated + if _hyperSync { + nodeServices |= SFHyperSync + } + if archivalMode { + nodeServices |= SFArchivalNode + } + if _blsKeystore != nil { + nodeServices |= SFPosValidator + } + srv.networkManager = NewNetworkManager(_params, srv, _chain, _cmgr, _blsKeystore, _desoAddrMgr, + _connectIps, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP, + _minFeeRateNanosPerKB, nodeServices) if srv.stateChangeSyncer != nil { srv.stateChangeSyncer.BlockHeight = uint64(_chain.headerTip().Height) @@ -594,6 +633,7 @@ func NewServer( if _blsKeystore != nil { srv.fastHotStuffConsensus = NewFastHotStuffConsensus( _params, + srv.networkManager, _chain, _posMempool, _blsKeystore.GetSigner(), @@ -605,7 +645,6 @@ func NewServer( // off the PoS consensus once the miner is done. if _params.NetworkType == NetworkType_TESTNET && _miner != nil && _blockProducer != nil { _miner.AddBlockMinedListener(srv.submitRegtestValidatorRegistrationTxns) - _miner.AddBlockMinedListener(srv.startRegtestFastHotStuffConsensus) } } @@ -633,7 +672,7 @@ func NewServer( } // Initialize the addrs to broadcast map. - srv.addrsToBroadcastt = make(map[string][]*SingleAddr) + srv.addrsToBroadcast = make(map[string][]*SingleAddr) // This will initialize the request queues. srv.ResetRequestQueues() @@ -643,6 +682,10 @@ func NewServer( timer.Initialize() srv.timer = timer + if srv.stateChangeSyncer != nil { + srv.stateChangeSyncer.StartMempoolSyncRoutine(srv) + } + // If shouldRestart is true, it means that the state checksum is likely corrupted, and we need to enter a recovery mode. // This can happen if the node was terminated mid-operation last time it was running. The recovery process rolls back // blocks to the beginning of the current snapshot epoch and resets to the state checksum to the epoch checksum. @@ -768,16 +811,17 @@ func (srv *Server) GetSnapshot(pp *Peer) { return } } - // If operationQueueSemaphore is full, we are already storing too many chunks in memory. Block the thread while - // we wait for the queue to clear up. - srv.snapshot.operationQueueSemaphore <- struct{}{} - // Now send a message to the peer to fetch the snapshot chunk. - pp.AddDeSoMessage(&MsgDeSoGetSnapshot{ - SnapshotStartKey: lastReceivedKey, - }, false) - - glog.V(2).Infof("Server.GetSnapshot: Sending a GetSnapshot message to peer (%v) "+ - "with Prefix (%v) and SnapshotStartEntry (%v)", pp, prefix, lastReceivedKey) + // As a pace-setting mechanism, we enqueue to the operationQueueSemaphore in a go routine. The request will be blocked + // if there are too many requests in memory. + go func() { + srv.snapshot.operationQueueSemaphore <- struct{}{} + // Now send a message to the peer to fetch the snapshot chunk. + glog.V(2).Infof("Server.GetSnapshot: Sending a GetSnapshot message to peer (%v) "+ + "with Prefix (%v) and SnapshotStartEntry (%v)", pp, prefix, lastReceivedKey) + pp.AddDeSoMessage(&MsgDeSoGetSnapshot{ + SnapshotStartKey: lastReceivedKey, + }, false) + }() } // GetBlocksToStore is part of the archival mode, which makes the node download all historical blocks after completing @@ -876,19 +920,13 @@ func (srv *Server) GetBlocks(pp *Peer, maxHeight int) { func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { printHeight := pp.StartingBlockHeight() - if srv.blockchain.headerTip().Height > printHeight { - printHeight = srv.blockchain.headerTip().Height + if uint64(srv.blockchain.headerTip().Height) > printHeight { + printHeight = uint64(srv.blockchain.headerTip().Height) } glog.Infof(CLog(Yellow, fmt.Sprintf("Received header bundle with %v headers "+ - "in state %s from peer %v. Downloaded ( %v / %v ) total headers", + "in state %s from peer %v. Downloaded ( %v / %v ) total headers. Current Chain State: %v", len(msg.Headers), srv.blockchain.chainState(), pp, - srv.blockchain.headerTip().Header.Height, printHeight))) - - // If the node is running a Fast-HotStuff validator and the consensus is running, - // in the steady-state, then it means that we don't sync the header chain separately. - if srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() { - return - } + srv.blockchain.headerTip().Header.Height, printHeight, srv.blockchain.ChainState()))) // If we get here, it means that the node is not currently running a Fast-HotStuff // validator or that the node is syncing. In either case, we sync headers according @@ -938,8 +976,9 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // TODO: Delete? This is redundant. numNewHeaders++ - // Process the header, as we haven't seen it before. - _, isOrphan, err := srv.blockchain.ProcessHeader(headerReceived, headerHash) + // Process the header, as we haven't seen it before, set verifySignatures to false + // if we're in the process of syncing. + _, isOrphan, err := srv.blockchain.ProcessHeader(headerReceived, headerHash, !srv.blockchain.isSyncing()) // If this header is an orphan or we encountered an error for any reason, // disconnect from the peer. Because every header is sent in response to @@ -1192,16 +1231,6 @@ func (srv *Server) _handleGetSnapshot(pp *Peer, msg *MsgDeSoGetSnapshot) { func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { srv.timer.End("Get Snapshot") - // If the node is running a Fast-HotStuff validator and the consensus is running, - // in the steady-state, then it means that we don't download and handle snapshots. - if srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() { - return - } - - // If we get here, it means that the node is not currently running a Fast-HotStuff - // validator or that the node is syncing. In either case, we handle snapshots according - // to the Hypersync rules. - srv.timer.Start("Server._handleSnapshot Main") // If there are no db entries in the msg, we should also disconnect the peer. There should always be // at least one entry sent, which is either the empty entry or the last key we've requested. @@ -1232,6 +1261,8 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { "<%v>, Last entry: <%v>), (number of entries: %v), metadata (%v), and isEmpty (%v), from Peer %v", msg.SnapshotChunk[0].Key, msg.SnapshotChunk[len(msg.SnapshotChunk)-1].Key, len(msg.SnapshotChunk), msg.SnapshotMetadata, msg.SnapshotChunk[0].IsEmpty(), pp))) + // Free up a slot in the operationQueueSemaphore, now that a chunk has been processed. + srv.snapshot.FreeOperationQueueSemaphore() // There is a possibility that during hypersync the network entered a new snapshot epoch. We handle this case by // restarting the node and starting hypersync from scratch. @@ -1615,6 +1646,13 @@ func (srv *Server) _startSync() { // Find a peer with StartingHeight bigger than our best header tip. var bestPeer *Peer for _, peer := range srv.cmgr.GetAllPeers() { + // If connectIps is set, only sync from persistent peers. + if len(srv.connectIps) > 0 && !peer.IsPersistent() { + glog.Infof("Server._startSync: Connect-ips is set, so non-persistent peer is not a "+ + "sync candidate %v", peer) + continue + } + if !peer.IsSyncCandidate() { glog.Infof("Peer is not sync candidate: %v (isOutbound: %v)", peer, peer.isOutbound) continue @@ -1622,7 +1660,7 @@ func (srv *Server) _startSync() { // Choose the peer with the best height out of everyone who's a // valid sync candidate. - if peer.StartingBlockHeight() < bestHeight { + if peer.StartingBlockHeight() < uint64(bestHeight) { continue } @@ -1664,19 +1702,21 @@ func (srv *Server) _startSync() { "header tip height %v from peer %v", bestHeight, bestPeer) srv.SyncPeer = bestPeer +} - // Initialize state syncer mempool job, if needed. - if srv.stateChangeSyncer != nil { - srv.stateChangeSyncer.StartMempoolSyncRoutine(srv) +func (srv *Server) HandleAcceptedPeer(rn *RemoteNode) { + if rn == nil || rn.GetPeer() == nil { + return } + pp := rn.GetPeer() + pp.SetServiceFlag(rn.GetServiceFlag()) + pp.SetLatestBlockHeight(rn.GetLatestBlockHeight()) -} - -func (srv *Server) _handleNewPeer(pp *Peer) { isSyncCandidate := pp.IsSyncCandidate() isSyncing := srv.blockchain.isSyncing() chainState := srv.blockchain.chainState() - glog.V(1).Infof("Server._handleNewPeer: Processing NewPeer: (%v); IsSyncCandidate(%v), syncPeerIsNil=(%v), IsSyncing=(%v), ChainState=(%v)", + glog.V(1).Infof("Server.HandleAcceptedPeer: Processing NewPeer: (%v); IsSyncCandidate(%v), "+ + "syncPeerIsNil=(%v), IsSyncing=(%v), ChainState=(%v)", pp, isSyncCandidate, (srv.SyncPeer == nil), isSyncing, chainState) // Request a sync if we're ready @@ -1691,6 +1731,22 @@ func (srv *Server) _handleNewPeer(pp *Peer) { } } +func (srv *Server) maybeRequestAddresses(remoteNode *RemoteNode) { + if remoteNode == nil { + return + } + // If the address manager needs more addresses, then send a GetAddr message + // to the peer. This is best-effort. + if !srv.AddrMgr.NeedMoreAddresses() { + return + } + + if err := remoteNode.SendMessage(&MsgDeSoGetAddr{}); err != nil { + glog.Errorf("Server.maybeRequestAddresses: Problem sending GetAddr message to "+ + "remoteNode (id= %v); err: %v", remoteNode, err) + } +} + func (srv *Server) _cleanupDonePeerState(pp *Peer) { // Grab the dataLock since we'll be modifying requestedBlocks srv.dataLock.Lock() @@ -1755,8 +1811,8 @@ func (srv *Server) _cleanupDonePeerState(pp *Peer) { }, false) } -func (srv *Server) _handleDonePeer(pp *Peer) { - glog.V(1).Infof("Server._handleDonePeer: Processing DonePeer: %v", pp) +func (srv *Server) _handleDisconnectedPeerMessage(pp *Peer) { + glog.V(1).Infof("Server._handleDisconnectedPeerMessage: Processing DonePeer: %v", pp) srv._cleanupDonePeerState(pp) @@ -1774,20 +1830,17 @@ func (srv *Server) _relayTransactions() { // send them an inv. allPeers := srv.cmgr.GetAllPeers() - srv.blockchain.ChainLock.RLock() - tipHeight := uint64(srv.blockchain.BlockTip().Height) - srv.blockchain.ChainLock.RUnlock() + // Get the current mempool. This can be the PoW or PoS mempool depending on the + // current block height. + mempool := srv.GetMempool() - // If we're on the PoW protocol, we need to wait for the mempool readOnlyView to regenerate. - if srv.params.IsPoWBlockHeight(tipHeight) { - glog.V(1).Infof("Server._relayTransactions: Waiting for mempool readOnlyView to regenerate") - srv.mempool.BlockUntilReadOnlyViewRegenerated() - glog.V(1).Infof("Server._relayTransactions: Mempool view has regenerated") - } + glog.V(1).Infof("Server._relayTransactions: Waiting for mempool readOnlyView to regenerate") + mempool.BlockUntilReadOnlyViewRegenerated() + glog.V(1).Infof("Server._relayTransactions: Mempool view has regenerated") // We pull the transactions from either the PoW mempool or the PoS mempool depending // on the current block height. - txnList := srv.GetMempool().GetTransactions() + txnList := mempool.GetTransactions() for _, pp := range allPeers { if !pp.canReceiveInvMessagess { @@ -1809,6 +1862,10 @@ func (srv *Server) _relayTransactions() { continue } + // Add the transaction to the peer's known inventory. We do + // it here when we enqueue the message to the peers outgoing + // message queue so that we don't have remember to do it later. + pp.knownInventory.Add(*invVect) invMsg.InvList = append(invMsg.InvList, invVect) } if len(invMsg.InvList) > 0 { @@ -1981,13 +2038,13 @@ func (srv *Server) _handleBlockAccepted(event *BlockEvent) { Hash: *blockHash, } - // Iterate through all the peers and relay the InvVect to them. This will only - // actually be relayed if it's not already in the peer's knownInventory. - allPeers := srv.cmgr.GetAllPeers() - for _, pp := range allPeers { - pp.AddDeSoMessage(&MsgDeSoInv{ + // Iterate through all non-validator peers and relay the InvVect to them. + // This will only actually be relayed if it's not already in the peer's knownInventory. + allNonValidators := srv.networkManager.GetAllNonValidators() + for _, remoteNode := range allNonValidators { + remoteNode.sendMessage(&MsgDeSoInv{ InvList: []*InvVect{invVect}, - }, false) + }) } } @@ -2008,20 +2065,8 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Received block ( %v / %v ) from Peer %v", blk.Header.Height, srv.blockchain.headerTip().Height, pp))) - // If the node is running a Fast-HotStuff validator and the consensus is running, - // in the steady-state, then we handle the block according to the consensus rules. - if srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() { - if err := srv.fastHotStuffConsensus.HandleBlock(pp, blk); err != nil { - glog.Errorf("Server._handleBlock: Problem handling block with FastHotStuffConsensus: %v", err) - } - return - } - - // If we get here, it means that the node is not currently running a Fast-HotStuff - // validator or that the node is syncing. In either case, we handle the block - // according to the blocksync rules. - srv.timer.Start("Server._handleBlock: General") + // Pull out the header for easy access. blockHeader := blk.Header if blockHeader == nil { @@ -2032,33 +2077,31 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { // If we've set a maximum sync height and we've reached that height, then we will // stop accepting new blocks. - if srv.blockchain.isTipMaxed(srv.blockchain.blockTip()) && - blockHeader.Height > uint64(srv.blockchain.blockTip().Height) { - + blockTip := srv.blockchain.blockTip() + if srv.blockchain.isTipMaxed(blockTip) && blockHeader.Height > uint64(blockTip.Height) { glog.Infof("Server._handleBlock: Exiting because block tip is maxed out") return } - // Compute the hash of the block. + // Compute the hash of the block. If the hash computation fails, then we log an error and + // disconnect from the peer. The block is obviously bad. blockHash, err := blk.Header.Hash() if err != nil { - // This should never happen if we got this far but log the error, clear the - // requestedBlocks, disconnect from the peer and return just in case. - srv._logAndDisconnectPeer( - pp, blk, "Problem computing block hash") + srv._logAndDisconnectPeer(pp, blk, "Problem computing block hash") return } - if pp != nil { - if _, exists := pp.requestedBlocks[*blockHash]; !exists { - glog.Errorf("_handleBlock: Getting a block that we haven't requested before, "+ - "block hash (%v)", *blockHash) - } - delete(pp.requestedBlocks, *blockHash) - } else { - glog.Errorf("_handleBlock: Called with nil peer, this should never happen.") + // Unless we're running a PoS validator, we should not expect to see a block that we did not request. If + // we see such a block, then we log an error and disconnect from the peer. + _, isRequestedBlock := pp.requestedBlocks[*blockHash] + if srv.fastHotStuffConsensus == nil && !isRequestedBlock { + srv._logAndDisconnectPeer(pp, blk, "Getting a block that we haven't requested before") + return } + // Delete the block from the requested blocks map. We do this whether the block was requested or not. + delete(pp.requestedBlocks, *blockHash) + // Check that the mempool has not received a transaction that would forbid this block's signature pubkey. // This is a minimal check, a more thorough check is made in the ProcessBlock function. This check is // necessary because the ProcessBlock function only has access to mined transactions. Therefore, if an @@ -2079,12 +2122,16 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { // Only verify signatures for recent blocks. var isOrphan bool - if srv.blockchain.isSyncing() { + if srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() { + // If the FastHotStuffConsensus has been initialized, then we pass the block to the new consensus + // which will validate the block, try to apply it, and handle the orphan case by requesting missing + // parents. + isOrphan, err = srv.fastHotStuffConsensus.HandleBlock(pp, blk) + } else if srv.blockchain.isSyncing() { glog.V(1).Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Processing block %v WITHOUT "+ "signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp))) _, isOrphan, _, err = srv.blockchain.ProcessBlock(blk, false) - } else { // TODO: Signature checking slows things down because it acquires the ChainLock. // The optimal solution is to check signatures in a way that doesn't acquire the @@ -2104,20 +2151,27 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { // headers comment above but in the future we should probably try and figure // out a way to be more strict about things. glog.Warningf("Got duplicate block %v from peer %v", blk, pp) + } else if strings.Contains(err.Error(), RuleErrorFailedSpamPreventionsCheck.Error()) { + // If the block fails the spam prevention check, then it must be signed by the + // bad block proposer signature or it has a bad QC. In either case, we should + // disconnect the peer. + srv._logAndDisconnectPeer(pp, blk, errors.Wrapf(err, "Error while processing block at height %v: ", blk.Header.Height).Error()) + return } else { - srv._logAndDisconnectPeer( - pp, blk, - errors.Wrapf(err, "Error while processing block: ").Error()) + // For any other error, we log the error and continue. + glog.Errorf("Server._handleBlock: Error while processing block at height %v: %v", blk.Header.Height, err) return } } + if isOrphan { - // We should generally never receive orphan blocks. It indicates something - // went wrong in our headers syncing. - glog.Errorf("ERROR: Received orphan block with hash %v height %v. "+ - "This should never happen", blockHash, blk.Header.Height) + // It's possible to receive an orphan block if we're connected directly to the + // block producer, and they are broadcasting blocks in the steady state. We log + // a warning in this case and move on. + glog.Warningf("ERROR: Received orphan block with hash %v height %v.", blockHash, blk.Header.Height) return } + srv.timer.End("Server._handleBlock: Process Block") srv.timer.Print("Server._handleBlock: General") @@ -2125,9 +2179,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { // We shouldn't be receiving blocks while syncing headers. if srv.blockchain.chainState() == SyncStateSyncingHeaders { - srv._logAndDisconnectPeer( - pp, blk, - "We should never get blocks when we're syncing headers") + glog.Warningf("Server._handleBlock: Received block while syncing headers: %v", blk) return } @@ -2176,27 +2228,13 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { // In this case we shoot a MEMPOOL message over to the peer to bootstrap the mempool. srv._maybeRequestSync(pp) - ///////////////////// PoS Validator Consensus Initialization ///////////////////// - // Exit early if the chain isn't SyncStateFullyCurrent. if srv.blockchain.chainState() != SyncStateFullyCurrent { return } - // Exit early if the current tip height is below the final PoW block's height. We are ready to - // enable the FastHotStuffConsensus once we reach the final block of the PoW protocol. - // - // Enable the FastHotStuffConsensus once the tipHeight >= ProofOfStake2ConsensusCutoverBlockHeight-1 - tipHeight := uint64(srv.blockchain.blockTip().Height) - if tipHeight < srv.params.GetFinalPoWBlockHeight() { - return - } - - // If the PoS validator FastHotStuffConsensus is initialized but not yet running, then - // we can start the validator consensus, and transition to it in the steady-state. - if srv.fastHotStuffConsensus != nil && !srv.fastHotStuffConsensus.IsRunning() { - srv.fastHotStuffConsensus.Start() - } + // If the chain is current, then try to transition to the FastHotStuff consensus. + srv.tryTransitionToFastHotStuffConsensus() } func (srv *Server) _handleInv(peer *Peer, msg *MsgDeSoInv) { @@ -2360,20 +2398,33 @@ func (srv *Server) StartStatsdReporter() { }() } -func (srv *Server) _handleAddrMessage(pp *Peer, msg *MsgDeSoAddr) { +func (srv *Server) _handleAddrMessage(pp *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeAddr { + return + } + + id := NewRemoteNodeId(pp.ID) + var msg *MsgDeSoAddr + var ok bool + if msg, ok = desoMsg.(*MsgDeSoAddr); !ok { + glog.Errorf("Server._handleAddrMessage: Problem decoding MsgDeSoAddr: %v", spew.Sdump(desoMsg)) + srv.networkManager.DisconnectById(id) + return + } + srv.addrsToBroadcastLock.Lock() defer srv.addrsToBroadcastLock.Unlock() - glog.V(1).Infof("Server._handleAddrMessage: Received Addr from peer %v with addrs %v", pp, spew.Sdump(msg.AddrList)) + glog.V(1).Infof("Server._handleAddrMessage: Received Addr from peer id=%v with addrs %v", pp.ID, spew.Sdump(msg.AddrList)) // If this addr message contains more than the maximum allowed number of addresses // then disconnect this peer. if len(msg.AddrList) > MaxAddrsPerAddrMsg { glog.Errorf(fmt.Sprintf("Server._handleAddrMessage: Disconnecting "+ - "Peer %v for sending us an addr message with %d transactions, which exceeds "+ + "Peer id=%v for sending us an addr message with %d transactions, which exceeds "+ "the max allowed %d", - pp, len(msg.AddrList), MaxAddrsPerAddrMsg)) - pp.Disconnect() + pp.ID, len(msg.AddrList), MaxAddrsPerAddrMsg)) + srv.networkManager.DisconnectById(id) return } @@ -2382,17 +2433,16 @@ func (srv *Server) _handleAddrMessage(pp *Peer, msg *MsgDeSoAddr) { for _, addr := range msg.AddrList { addrAsNetAddr := wire.NewNetAddressIPPort(addr.IP, addr.Port, (wire.ServiceFlag)(addr.Services)) if !addrmgr.IsRoutable(addrAsNetAddr) { - glog.V(1).Infof("Dropping address %v from peer %v because it is not routable", addr, pp) + glog.V(1).Infof("Server._handleAddrMessage: Dropping address %v from peer %v because it is not routable", addr, pp) continue } netAddrsReceived = append( netAddrsReceived, addrAsNetAddr) } - srv.cmgr.AddrMgr.AddAddresses(netAddrsReceived, pp.netAddr) + srv.AddrMgr.AddAddresses(netAddrsReceived, pp.netAddr) - // If the message had <= 10 addrs in it, then queue all the addresses for relaying - // on the next cycle. + // If the message had <= 10 addrs in it, then queue all the addresses for relaying on the next cycle. if len(msg.AddrList) <= 10 { glog.V(1).Infof("Server._handleAddrMessage: Queueing %d addrs for forwarding from "+ "peer %v", len(msg.AddrList), pp) @@ -2402,7 +2452,7 @@ func (srv *Server) _handleAddrMessage(pp *Peer, msg *MsgDeSoAddr) { Port: pp.netAddr.Port, Services: pp.serviceFlags, } - listToAddTo, hasSeenSource := srv.addrsToBroadcastt[sourceAddr.StringWithPort(false /*includePort*/)] + listToAddTo, hasSeenSource := srv.addrsToBroadcast[sourceAddr.StringWithPort(false /*includePort*/)] if !hasSeenSource { listToAddTo = []*SingleAddr{} } @@ -2412,15 +2462,30 @@ func (srv *Server) _handleAddrMessage(pp *Peer, msg *MsgDeSoAddr) { listToAddTo = listToAddTo[:MaxAddrsPerAddrMsg/2] } listToAddTo = append(listToAddTo, msg.AddrList...) - srv.addrsToBroadcastt[sourceAddr.StringWithPort(false /*includePort*/)] = listToAddTo + srv.addrsToBroadcast[sourceAddr.StringWithPort(false /*includePort*/)] = listToAddTo } } -func (srv *Server) _handleGetAddrMessage(pp *Peer, msg *MsgDeSoGetAddr) { +func (srv *Server) _handleGetAddrMessage(pp *Peer, desoMsg DeSoMessage) { + if desoMsg.GetMsgType() != MsgTypeGetAddr { + return + } + + id := NewRemoteNodeId(pp.ID) + if _, ok := desoMsg.(*MsgDeSoGetAddr); !ok { + glog.Errorf("Server._handleAddrMessage: Problem decoding "+ + "MsgDeSoAddr: %v", spew.Sdump(desoMsg)) + srv.networkManager.DisconnectById(id) + return + } + glog.V(1).Infof("Server._handleGetAddrMessage: Received GetAddr from peer %v", pp) // When we get a GetAddr message, choose MaxAddrsPerMsg from the AddrMgr // and send them back to the peer. - netAddrsFound := srv.cmgr.AddrMgr.AddressCache() + netAddrsFound := srv.AddrMgr.AddressCache() + if len(netAddrsFound) == 0 { + return + } if len(netAddrsFound) > MaxAddrsPerAddrMsg { netAddrsFound = netAddrsFound[:MaxAddrsPerAddrMsg] } @@ -2436,16 +2501,22 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, msg *MsgDeSoGetAddr) { } res.AddrList = append(res.AddrList, singleAddr) } - pp.AddDeSoMessage(res, false) + rn := srv.networkManager.GetRemoteNodeById(id) + if err := srv.networkManager.SendMessage(rn, res); err != nil { + glog.Errorf("Server._handleGetAddrMessage: Problem sending addr message to peer %v: %v", pp, err) + srv.networkManager.DisconnectById(id) + return + } } func (srv *Server) _handleControlMessages(serverMessage *ServerMessage) (_shouldQuit bool) { switch serverMessage.Msg.(type) { // Control messages used internally to signal to the server. - case *MsgDeSoNewPeer: - srv._handleNewPeer(serverMessage.Peer) - case *MsgDeSoDonePeer: - srv._handleDonePeer(serverMessage.Peer) + case *MsgDeSoDisconnectedPeer: + srv._handleDisconnectedPeerMessage(serverMessage.Peer) + srv.networkManager._handleDisconnectedPeerMessage(serverMessage.Peer, serverMessage.Msg) + case *MsgDeSoNewConnection: + srv.networkManager._handleNewConnectionMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoQuit: return true } @@ -2457,6 +2528,10 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { // Handle all non-control message types from our Peers. switch msg := serverMessage.Msg.(type) { // Messages sent among peers. + case *MsgDeSoAddr: + srv._handleAddrMessage(serverMessage.Peer, serverMessage.Msg) + case *MsgDeSoGetAddr: + srv._handleGetAddrMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoGetHeaders: srv._handleGetHeaders(serverMessage.Peer, msg) case *MsgDeSoHeaderBundle: @@ -2479,6 +2554,10 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { srv._handleMempool(serverMessage.Peer, msg) case *MsgDeSoInv: srv._handleInv(serverMessage.Peer, msg) + case *MsgDeSoVersion: + srv.networkManager._handleVersionMessage(serverMessage.Peer, serverMessage.Msg) + case *MsgDeSoVerack: + srv.networkManager._handleVerackMessage(serverMessage.Peer, serverMessage.Msg) case *MsgDeSoValidatorVote: srv._handleValidatorVote(serverMessage.Peer, msg) case *MsgDeSoValidatorTimeout: @@ -2549,6 +2628,9 @@ func (srv *Server) _handleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeou // - It listens to consensus events from the Fast HostStuff consensus engine. The consensus signals when // it's ready to vote, timeout, propose a block, or propose an empty block with a timeout QC. func (srv *Server) _startConsensus() { + // Initialize the FastHotStuffConsensus transition check time. + srv.resetFastHotStuffConsensusTransitionCheckTime() + for { // This is used instead of the shouldQuit control message exist mechanism below. shouldQuit will be true only // when all incoming messages have been processed, on the other hand this shutdown will quit immediately. @@ -2557,9 +2639,15 @@ func (srv *Server) _startConsensus() { } select { - case consensusEvent := <-srv._getFastHotStuffConsensusEventChannel(): + case <-srv.getFastHotStuffTransitionCheckTime(): { - glog.Infof("Server._startConsensus: Received consensus event: %s", consensusEvent.ToString()) + glog.V(2).Info("Server._startConsensus: Checking if FastHotStuffConsensus is ready to start") + srv.tryTransitionToFastHotStuffConsensus() + } + + case consensusEvent := <-srv.getFastHotStuffConsensusEventChannel(): + { + glog.V(2).Infof("Server._startConsensus: Received consensus event: %s", consensusEvent.ToString()) srv._handleFastHostStuffConsensusEvent(consensusEvent) } @@ -2569,20 +2657,6 @@ func (srv *Server) _startConsensus() { glog.V(2).Infof("Server._startConsensus: Handling message of type %v from Peer %v", serverMessage.Msg.GetMsgType(), serverMessage.Peer) - - // If the message is an addr message we handle it independent of whether or - // not the BitcoinManager is synced. - if serverMessage.Msg.GetMsgType() == MsgTypeAddr { - srv._handleAddrMessage(serverMessage.Peer, serverMessage.Msg.(*MsgDeSoAddr)) - continue - } - // If the message is a GetAddr message we handle it independent of whether or - // not the BitcoinManager is synced. - if serverMessage.Msg.GetMsgType() == MsgTypeGetAddr { - srv._handleGetAddrMessage(serverMessage.Peer, serverMessage.Msg.(*MsgDeSoGetAddr)) - continue - } - srv._handlePeerMessages(serverMessage) // Always check for and handle control messages regardless of whether the @@ -2603,35 +2677,36 @@ func (srv *Server) _startConsensus() { glog.V(2).Info("Server.Start: Server done") } -func (srv *Server) _getAddrsToBroadcast() []*SingleAddr { +func (srv *Server) getAddrsToBroadcast() []*SingleAddr { srv.addrsToBroadcastLock.Lock() defer srv.addrsToBroadcastLock.Unlock() // If there's nothing in the map, return. - if len(srv.addrsToBroadcastt) == 0 { + if len(srv.addrsToBroadcast) == 0 { return []*SingleAddr{} } // If we get here then we have some addresses to broadcast. addrsToBroadcast := []*SingleAddr{} - for len(addrsToBroadcast) < 10 && len(srv.addrsToBroadcastt) > 0 { + for uint32(len(addrsToBroadcast)) < srv.params.MaxAddressesToBroadcast && + len(srv.addrsToBroadcast) > 0 { // Choose a key at random. This works because map iteration is random in golang. bucket := "" - for kk := range srv.addrsToBroadcastt { + for kk := range srv.addrsToBroadcast { bucket = kk break } // Remove the last element from the slice for the given bucket. - currentAddrList := srv.addrsToBroadcastt[bucket] + currentAddrList := srv.addrsToBroadcast[bucket] if len(currentAddrList) > 0 { lastIndex := len(currentAddrList) - 1 currentAddr := currentAddrList[lastIndex] currentAddrList = currentAddrList[:lastIndex] if len(currentAddrList) == 0 { - delete(srv.addrsToBroadcastt, bucket) + delete(srv.addrsToBroadcast, bucket) } else { - srv.addrsToBroadcastt[bucket] = currentAddrList + srv.addrsToBroadcast[bucket] = currentAddrList } addrsToBroadcast = append(addrsToBroadcast, currentAddr) @@ -2648,16 +2723,24 @@ func (srv *Server) _startAddressRelayer() { if atomic.LoadInt32(&srv.shutdown) >= 1 { break } - // For the first ten minutes after the server starts, relay our address to all + // For the first ten minutes after the connection controller starts, relay our address to all // peers. After the first ten minutes, do it once every 24 hours. - glog.V(1).Infof("Server.Start._startAddressRelayer: Relaying our own addr to peers") + glog.V(1).Infof("Server.startAddressRelayer: Relaying our own addr to peers") + remoteNodes := srv.networkManager.GetAllRemoteNodes().GetAll() if numMinutesPassed < 10 || numMinutesPassed%(RebroadcastNodeAddrIntervalMinutes) == 0 { - for _, pp := range srv.cmgr.GetAllPeers() { - bestAddress := srv.cmgr.AddrMgr.GetBestLocalAddress(pp.netAddr) + for _, rn := range remoteNodes { + if !rn.IsHandshakeCompleted() { + continue + } + netAddr := rn.GetNetAddress() + if netAddr == nil { + continue + } + bestAddress := srv.AddrMgr.GetBestLocalAddress(netAddr) if bestAddress != nil { - glog.V(2).Infof("Server.Start._startAddressRelayer: Relaying address %v to "+ - "peer %v", bestAddress.IP.String(), pp) - pp.AddDeSoMessage(&MsgDeSoAddr{ + glog.V(2).Infof("Server.startAddressRelayer: Relaying address %v to "+ + "RemoteNode (id= %v)", bestAddress.IP.String(), rn.GetId()) + addrMsg := &MsgDeSoAddr{ AddrList: []*SingleAddr{ { Timestamp: time.Now(), @@ -2666,40 +2749,122 @@ func (srv *Server) _startAddressRelayer() { Services: (ServiceFlag)(bestAddress.Services), }, }, - }, false) + } + if err := rn.SendMessage(addrMsg); err != nil { + glog.Errorf("Server.startAddressRelayer: Problem sending "+ + "MsgDeSoAddr to RemoteNode (id= %v): %v", rn.GetId(), err) + } } } } - glog.V(2).Infof("Server.Start._startAddressRelayer: Seeing if there are addrs to relay...") + glog.V(2).Infof("Server.startAddressRelayer: Seeing if there are addrs to relay...") // Broadcast the addrs we have to all of our peers. - addrsToBroadcast := srv._getAddrsToBroadcast() + addrsToBroadcast := srv.getAddrsToBroadcast() if len(addrsToBroadcast) == 0 { - glog.V(2).Infof("Server.Start._startAddressRelayer: No addrs to relay.") + glog.V(2).Infof("Server.startAddressRelayer: No addrs to relay.") time.Sleep(AddrRelayIntervalSeconds * time.Second) continue } - glog.V(2).Infof("Server.Start._startAddressRelayer: Found %d addrs to "+ + glog.V(2).Infof("Server.startAddressRelayer: Found %d addrs to "+ "relay: %v", len(addrsToBroadcast), spew.Sdump(addrsToBroadcast)) // Iterate over all our peers and broadcast the addrs to all of them. - for _, pp := range srv.cmgr.GetAllPeers() { - pp.AddDeSoMessage(&MsgDeSoAddr{ + for _, rn := range remoteNodes { + if !rn.IsHandshakeCompleted() { + continue + } + addrMsg := &MsgDeSoAddr{ AddrList: addrsToBroadcast, - }, false) + } + if err := rn.SendMessage(addrMsg); err != nil { + glog.Errorf("Server.startAddressRelayer: Problem sending "+ + "MsgDeSoAddr to RemoteNode (id= %v): %v", rn.GetId(), err) + } } time.Sleep(AddrRelayIntervalSeconds * time.Second) continue } } -func (srv *Server) _getFastHotStuffConsensusEventChannel() chan *consensus.FastHotStuffEvent { +func (srv *Server) getFastHotStuffConsensusEventChannel() chan *consensus.FastHotStuffEvent { if srv.fastHotStuffConsensus == nil { return nil } return srv.fastHotStuffConsensus.fastHotStuffEventLoop.GetEvents() } +func (srv *Server) resetFastHotStuffConsensusTransitionCheckTime() { + // Check once every 30 seconds if the FastHotStuffConsensus is ready to start. + srv.fastHotStuffConsensusTransitionCheckTime = time.Now().Add(30 * time.Second) +} + +func (srv *Server) getFastHotStuffTransitionCheckTime() <-chan time.Time { + // If the FastHotStuffConsensus does not exist, or is already running, then + // we don't need this timer. We can exit early. + if srv.fastHotStuffConsensus == nil || srv.fastHotStuffConsensus.IsRunning() { + return nil + } + return time.After(time.Until(srv.fastHotStuffConsensusTransitionCheckTime)) +} + +func (srv *Server) tryTransitionToFastHotStuffConsensus() { + // Reset the transition check timer when this function exits. + defer srv.resetFastHotStuffConsensusTransitionCheckTime() + + // If the FastHotStuffConsensus does not exist, or is already running, then + // there is nothing left to do. We can exit early. + if srv.fastHotStuffConsensus == nil || srv.fastHotStuffConsensus.IsRunning() { + return + } + + // Get the tip height, header tip height, and sync state of the blockchain. We'll use them + // in a heuristic here to determine if we are ready to transition to the FastHotStuffConsensus, + // or should continue to try to sync. + srv.blockchain.ChainLock.RLock() + tipHeight := uint64(srv.blockchain.blockTip().Height) + headerTipHeight := uint64(srv.blockchain.headerTip().Height) + syncState := srv.blockchain.chainState() + srv.blockchain.ChainLock.RUnlock() + + // Exit early if the current tip height is below the final PoW block's height. We are ready to + // enable the FastHotStuffConsensus once we reach the final block of the PoW protocol. The + // FastHotStuffConsensus can only be enabled once it's at or past the final block height of + // the PoW protocol. + if tipHeight < srv.params.GetFinalPoWBlockHeight() { + return + } + + // If the header's tip is not at the same height as the block tip, then we are still syncing + // and we should not transition to the FastHotStuffConsensus. + if headerTipHeight != tipHeight { + return + } + + // If we are still syncing, then we should not transition to the FastHotStuffConsensus. + // We intentionally exclude the SyncStateSyncingHeaders to account for the case where we + // do not have a sync peer and are stuck in the SyncStateSyncingHeaders state. + skippedSyncStates := []SyncState{ + SyncStateSyncingSnapshot, SyncStateSyncingBlocks, SyncStateNeedBlocksss, SyncStateSyncingHistoricalBlocks, + } + if collections.Contains(skippedSyncStates, syncState) { + return + } + + // If we have a sync peer and have not reached the sync peer's starting block height, then + // we should sync all remaining blocks from the sync peer before transitioning to the + // FastHotStuffConsensus. + if srv.SyncPeer != nil && srv.SyncPeer.StartingBlockHeight() > tipHeight { + return + } + + // At this point, we know that we have synced to the sync peer's tip or we don't have a sync + // peer. The header tip and the chain tip are also at the same height. We are ready to transition + // to the FastHotStuffConsensus. + + srv.fastHotStuffConsensus.Start() +} + func (srv *Server) _startTransactionRelayer() { // If we've set a maximum sync height, we will not relay transactions. if srv.blockchain.MaxSyncBlockHeight > 0 { @@ -2723,6 +2888,9 @@ func (srv *Server) Stop() { srv.cmgr.Stop() glog.Infof(CLog(Yellow, "Server.Stop: Closed the ConnectionManger")) + srv.networkManager.Stop() + glog.Infof(CLog(Yellow, "Server.Stop: Closed the NetworkManager")) + // Stop the miner if we have one running. if srv.miner != nil { srv.miner.Stop() @@ -2816,17 +2984,13 @@ func (srv *Server) Start() { go srv.miner.Start() } - // On testnet, if the node is configured to be a PoW block producer, and it is configured - // to be also a PoS validator, then we attach block mined listeners to the miner to kick - // off the PoS consensus once the miner is done. - if srv.params.NetworkType == NetworkType_TESTNET && srv.fastHotStuffConsensus != nil { - tipHeight := uint64(srv.blockchain.blockTip().Height) - if srv.params.IsFinalPoWBlockHeight(tipHeight) || srv.params.IsPoSBlockHeight(tipHeight) { - if err := srv.fastHotStuffConsensus.Start(); err != nil { - glog.Errorf("NewServer: Error starting fast hotstuff consensus %v", err) - } - } + // Initialize state syncer mempool job, if needed. + if srv.stateChangeSyncer != nil { + srv.stateChangeSyncer.StartMempoolSyncRoutine(srv) } + + // Start the network manager's internal event loop to open and close connections to peers. + srv.networkManager.Start() } // SyncPrefixProgress keeps track of sync progress on an individual prefix. It is used in diff --git a/lib/snapshot.go b/lib/snapshot.go index f0db0a511..6d0c8df9e 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -468,8 +468,6 @@ func (snap *Snapshot) Run() { operation.blockHeight); err != nil { glog.Errorf("Snapshot.Run: Problem adding snapshot chunk to the db") } - // Free up a slot in the operationQueueSemaphore, now that a chunk has been processed. - <-snap.operationQueueSemaphore case SnapshotOperationChecksumAdd: if err := snap.Checksum.AddOrRemoveBytesWithMigrations(operation.checksumKey, operation.checksumValue, @@ -1345,6 +1343,12 @@ func (snap *Snapshot) SetSnapshotChunk(mainDb *badger.DB, mainDbMutex *deadlock. return nil } +func (snap *Snapshot) FreeOperationQueueSemaphore() { + if len(snap.operationQueueSemaphore) > 0 { + <-snap.operationQueueSemaphore + } +} + // ------------------------------------------------------------------------------------- // StateChecksum // ------------------------------------------------------------------------------------- diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index a5a9f1777..02ed421f7 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "fmt" + "github.com/deso-protocol/core/collections" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" "github.com/google/uuid" @@ -258,6 +259,8 @@ type StateChangeSyncer struct { // of each entry, the consumer only has to sync the most recent version of each entry. // BlocksyncCompleteEntriesFlushed is used to track whether this one time flush has been completed. BlocksyncCompleteEntriesFlushed bool + + MempoolTxnSyncLimit uint64 } // Open a file, create if it doesn't exist. @@ -275,7 +278,8 @@ func openOrCreateLogFile(filePath string) (*os.File, error) { } // NewStateChangeSyncer initializes necessary log files and returns a StateChangeSyncer. -func NewStateChangeSyncer(stateChangeDir string, nodeSyncType NodeSyncType) *StateChangeSyncer { +func NewStateChangeSyncer(stateChangeDir string, nodeSyncType NodeSyncType, mempoolTxnSyncLimit uint64, +) *StateChangeSyncer { stateChangeFilePath := filepath.Join(stateChangeDir, StateChangeFileName) stateChangeIndexFilePath := filepath.Join(stateChangeDir, StateChangeIndexFileName) stateChangeMempoolFilePath := filepath.Join(stateChangeDir, StateChangeMempoolFileName) @@ -322,6 +326,7 @@ func NewStateChangeSyncer(stateChangeDir string, nodeSyncType NodeSyncType) *Sta StateSyncerMutex: &sync.Mutex{}, SyncType: nodeSyncType, BlocksyncCompleteEntriesFlushed: blocksyncCompleteEntriesFlushed, + MempoolTxnSyncLimit: mempoolTxnSyncLimit, } } @@ -624,10 +629,9 @@ func createMempoolTxKey(operationType StateSyncerOperationType, keyBytes []byte) // in the mempool state change file. It also loops through all unconnected transactions and their associated // utxo ops and adds them to the mempool state change file. func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Server) (bool, error) { - originalCommittedFlushId := stateChangeSyncer.BlockSyncFlushId - if server.mempool.stopped { + if !server.GetMempool().IsRunning() { return true, nil } @@ -655,7 +659,9 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser // Kill the snapshot so that it doesn't affect the original snapshot. mempoolUtxoView.Snapshot = nil + server.blockchain.ChainLock.RLock() mempoolUtxoView.TipHash = server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Hash + server.blockchain.ChainLock.RUnlock() // A new transaction is created so that we can simulate writes to the db without actually writing to the db. // Using the transaction here rather than a stubbed badger db allows the process to query the db for any entries @@ -672,10 +678,10 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser } // Loop through all the transactions in the mempool and connect them and their utxo ops to the mempool view. - server.mempool.mtx.RLock() - mempoolTxns, _, err := server.mempool._getTransactionsOrderedByTimeAdded() - server.mempool.mtx.RUnlock() + mempoolTxns := server.GetMempool().GetOrderedTransactions() + // Get the uncommitted blocks from the chain. + uncommittedBlocks, err := server.blockchain.GetUncommittedFullBlocks(mempoolUtxoView.TipHash) if err != nil { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: uuid.Nil, @@ -685,13 +691,91 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer: ") } - currentTimestamp := time.Now().UnixNano() - for _, mempoolTx := range mempoolTxns { - utxoOpsForTxn, _, _, _, err := mempoolTxUtxoView.ConnectTransaction( - mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), - currentTimestamp, false, false /*ignoreUtxos*/) + // First connect the uncommitted blocks to the mempool view. + for _, uncommittedBlock := range uncommittedBlocks { + var utxoOpsForBlock [][]*UtxoOperation + txHashes := collections.Transform(uncommittedBlock.Txns, func(txn *MsgDeSoTxn) *BlockHash { + return txn.Hash() + }) + // TODO: there is a slight performance enhancement we could make here + // by rewriting the ConnectBlock logic to avoid unnecessary UtxoView copying + // for failing transactions. However, we'd also need to rewrite the end-of-epoch + // logic here which would make this function a bit long. + // Connect this block to the mempoolTxUtxoView so we can get the utxo ops. + utxoOpsForBlock, err = mempoolTxUtxoView.ConnectBlock( + uncommittedBlock, txHashes, false, nil, uncommittedBlock.Header.Height) if err != nil { - return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer ConnectTransaction: ") + mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ + FlushId: uuid.Nil, + Succeeded: false, + IsMempoolFlush: true, + }) + return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer ConnectBlock uncommitted block: ") + } + blockHash, _ := uncommittedBlock.Hash() + // Emit the UtxoOps event. + mempoolUtxoView.EventManager.stateSyncerOperation(&StateSyncerOperationEvent{ + StateChangeEntry: &StateChangeEntry{ + OperationType: DbOperationTypeUpsert, + KeyBytes: _DbKeyForUtxoOps(blockHash), + EncoderBytes: EncodeToBytes(blockHeight, &UtxoOperationBundle{ + UtxoOpBundle: utxoOpsForBlock, + }, false), + Block: uncommittedBlock, + }, + FlushId: uuid.Nil, + IsMempoolTxn: true, + }) + } + + currentTimestamp := time.Now().UnixNano() + for ii, mempoolTx := range mempoolTxns { + if server.params.IsPoSBlockHeight(blockHeight) && uint64(ii) > stateChangeSyncer.MempoolTxnSyncLimit { + break + } + var utxoOpsForTxn []*UtxoOperation + if server.params.IsPoSBlockHeight(blockHeight + 1) { + // We need to create a copy of the view in the event that the transaction fails to + // connect. If it fails to connect, we need to reset the view to its original state. + // and try to connect it as a failing transaction. If that fails as well, we just continue + // and the mempoolTxUtxoView is unmodified. + var copiedView *UtxoView + copiedView, err = mempoolTxUtxoView.CopyUtxoView() + if err != nil { + return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer CopyUtxoView: ") + } + utxoOpsForTxn, _, _, _, err = copiedView.ConnectTransaction( + mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), + currentTimestamp, false, false /*ignoreUtxos*/) + // If the transaction successfully connected, we update mempoolTxUtxoView to the copied view. + if err == nil { + mempoolTxUtxoView = copiedView + } else { + // If the transaction fails to connect, we need to reset the view to its original state + // and connect it as a failing transaction. + copiedView, err = mempoolTxUtxoView.CopyUtxoView() + if err != nil { + return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer CopyUtxoView: ") + } + utxoOpsForTxn, _, _, err = copiedView._connectFailingTransaction( + mempoolTx.Tx, uint32(blockHeight+1), false) + // If we fail to connect the transaction as a failing transaction, we just continue and the + // mempoolTxUtxoView is unmodified. + if err != nil { + glog.V(2).Infof("StateChangeSyncer.SyncMempoolToStateSyncer "+ + "ConnectFailingTransaction for mempool tx: %v", err) + continue + } + mempoolTxUtxoView = copiedView + } + } else { + // For PoW block heights, we can just connect the transaction to the mempool view. + utxoOpsForTxn, _, _, _, err = mempoolTxUtxoView.ConnectTransaction( + mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), + currentTimestamp, false, false /*ignoreUtxos*/) + if err != nil { + return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer ConnectTransaction: ") + } } // Emit transaction state change. @@ -747,7 +831,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser func (stateChangeSyncer *StateChangeSyncer) StartMempoolSyncRoutine(server *Server) { go func() { // Wait for mempool to be initialized. - for server.mempool == nil || server.blockchain.chainState() != SyncStateFullyCurrent { + for server.GetMempool() == nil || server.blockchain.chainState() != SyncStateFullyCurrent { time.Sleep(15000 * time.Millisecond) } if !stateChangeSyncer.BlocksyncCompleteEntriesFlushed && stateChangeSyncer.SyncType == NodeSyncTypeBlockSync { @@ -757,7 +841,7 @@ func (stateChangeSyncer *StateChangeSyncer) StartMempoolSyncRoutine(server *Serv fmt.Printf("StateChangeSyncer.StartMempoolSyncRoutine: Error flushing all entries to file: %v", err) } } - mempoolClosed := server.mempool.stopped + mempoolClosed := !server.GetMempool().IsRunning() for !mempoolClosed { // Sleep for a short while to avoid a tight loop. time.Sleep(100 * time.Millisecond) diff --git a/lib/txindex.go b/lib/txindex.go index 61d0a7966..7dad5db4a 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -149,7 +149,11 @@ func NewTXIndex(coreChain *Blockchain, params *DeSoParams, dataDirectory string) } func (txi *TXIndex) FinishedSyncing() bool { - return txi.TXIndexChain.BlockTip().Height == txi.CoreChain.BlockTip().Height + committedTip, idx := txi.CoreChain.GetCommittedTip() + if idx == -1 { + return false + } + return txi.TXIndexChain.BlockTip().Height == committedTip.Height } func (txi *TXIndex) Start() { @@ -224,7 +228,7 @@ func (txi *TXIndex) GetTxindexUpdateBlockNodes() ( txindexTipNode := blockIndexByHashCopy[*txindexTipHash.Hash] // Get the committed tip. - committedTip, _ := txi.CoreChain.getCommittedTip() + committedTip, _ := txi.CoreChain.GetCommittedTip() if txindexTipNode == nil { glog.Info("GetTxindexUpdateBlockNodes: Txindex tip was not found; building txindex starting at genesis block") @@ -408,10 +412,16 @@ func (txi *TXIndex) Update() error { return fmt.Errorf( "Update: Error initializing UtxoView: %v", err) } + if blockToAttach.Header.PrevBlockHash != nil { + utxoView, err = txi.TXIndexChain.getUtxoViewAtBlockHash(*blockToAttach.Header.PrevBlockHash) + if err != nil { + return fmt.Errorf("Update: Problem getting UtxoView at block hash %v: %v", + blockToAttach.Header.PrevBlockHash, err) + } + } // Do each block update in a single transaction so we're safe in case the node // restarts. - blockHeight := uint64(txi.CoreChain.BlockTip().Height) err = txi.TXIndexChain.DB().Update(func(dbTxn *badger.Txn) error { // Iterate through each transaction in the block and do the following: @@ -419,15 +429,27 @@ func (txi *TXIndex) Update() error { // - Compute its mapping values, which may include custom metadata fields // - add all its mappings to the db. for txnIndexInBlock, txn := range blockMsg.Txns { + hasPoWBlockHeight := txi.Params.IsPoWBlockHeight(blockMsg.Header.Height) + // Also, the first transaction in the block, the block reward transaction, should always be a connecting transaction. + isBlockRewardTxn := (txnIndexInBlock == 0) && (txn.TxnMeta.GetTxnType() == TxnTypeBlockReward) + // Finally, if the transaction is not the first in the block, we check the TxnConnectStatusByIndex to see if + // it's marked by the block producer as a connecting transaction. PoS blocks should reflect this in TxnConnectStatusByIndex. + hasConnectingPoSTxnStatus := false + if txi.Params.IsPoSBlockHeight(blockMsg.Header.Height) && (txnIndexInBlock > 0) && (blockMsg.TxnConnectStatusByIndex != nil) { + // Note that TxnConnectStatusByIndex doesn't include the first block reward transaction. + hasConnectingPoSTxnStatus = blockMsg.TxnConnectStatusByIndex.Get(txnIndexInBlock - 1) + } + connects := hasPoWBlockHeight || isBlockRewardTxn || hasConnectingPoSTxnStatus + txnMeta, err := ConnectTxnAndComputeTransactionMetadata( txn, utxoView, blockToAttach.Hash, blockToAttach.Height, - int64(blockToAttach.Header.TstampNanoSecs), uint64(txnIndexInBlock)) + blockToAttach.Header.TstampNanoSecs, uint64(txnIndexInBlock), connects) if err != nil { return fmt.Errorf("Update: Problem connecting txn %v to txindex: %v", txn, err) } - err = DbPutTxindexTransactionMappingsWithTxn(dbTxn, nil, blockHeight, + err = DbPutTxindexTransactionMappingsWithTxn(dbTxn, nil, blockMsg.Header.Height, txn, txi.Params, txnMeta, txi.CoreChain.eventManager) if err != nil { return fmt.Errorf("Update: Problem adding txn %v to txindex: %v", @@ -454,3 +476,32 @@ func (txi *TXIndex) Update() error { return nil } + +func ConnectTxnAndComputeTransactionMetadata( + txn *MsgDeSoTxn, utxoView *UtxoView, blockHash *BlockHash, + blockHeight uint32, blockTimestampNanoSecs int64, txnIndexInBlock uint64, connects bool) (*TransactionMetadata, error) { + + totalNanosPurchasedBefore := utxoView.NanosPurchased + usdCentsPerBitcoinBefore := utxoView.GetCurrentUSDCentsPerBitcoin() + + var utxoOps []*UtxoOperation + var totalInput, totalOutput, fees, burnFee, utilityFee uint64 + var err error + if connects { + utxoOps, totalInput, totalOutput, fees, err = utxoView._connectTransaction( + txn, txn.Hash(), blockHeight, blockTimestampNanoSecs, false, false, + ) + } else { + utxoOps, burnFee, utilityFee, err = utxoView._connectFailingTransaction( + txn, blockHeight, false) + fees = burnFee + utilityFee + } + + if err != nil { + return nil, fmt.Errorf( + "UpdateTxindex: Error connecting txn to UtxoView: %v", err) + } + + return ComputeTransactionMetadata(txn, utxoView, blockHash, totalNanosPurchasedBefore, + usdCentsPerBitcoinBefore, totalInput, totalOutput, fees, txnIndexInBlock, utxoOps, uint64(blockHeight)), nil +} diff --git a/scripts/install-relic.sh b/scripts/install-relic.sh deleted file mode 100755 index cd2dcb9eb..000000000 --- a/scripts/install-relic.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# This script installs Relic, a C library dependency for the BLS signature implementation -# provided by Flow. This file was copy-pasted from Flow's installation README found here: -# https://github.com/onflow/flow-go/tree/master/crypto. - -# crypto package -PKG_NAME="github.com/onflow/flow-go/crypto" - -# go.mod -MOD_FILE="./go.mod" - -# the version of onflow/flow-go/crypto used in the project is read from the go.mod file -if [ -f "${MOD_FILE}" ] -then - # extract the version from the go.mod file - VERSION="$(grep ${PKG_NAME} < ${MOD_FILE} | cut -d' ' -f 2)" - # using the right version, get the package directory path - PKG_DIR="$(go env GOPATH)/pkg/mod/${PKG_NAME}@${VERSION}" -else - { echo "couldn't find go.mod file - make sure the script is in the project root directory"; exit 1; } -fi - -# grant permissions if not existent -if [[ ! -r ${PKG_DIR} || ! -w ${PKG_DIR} || ! -x ${PKG_DIR} ]]; then - sudo chmod -R 755 "${PKG_DIR}" -fi - -# get into the package directory and set up the external dependencies -( - cd "${PKG_DIR}" || { echo "cd into the GOPATH package folder failed"; exit 1; } - go generate -) diff --git a/scripts/pos/validator_registration_utils.go b/scripts/pos/validator_registration_utils.go index c5a811970..2f5ff86e9 100644 --- a/scripts/pos/validator_registration_utils.go +++ b/scripts/pos/validator_registration_utils.go @@ -1,5 +1,3 @@ -//go:build relic - package main import ( @@ -19,9 +17,6 @@ func getBLSVotingAuthorizationAndPublicKey(blsKeyStore *lib.BLSKeystore, transac return blsKeyStore.GetSigner().GetPublicKey(), votingAuthorization } -// You must have relic installed to run this code. -// To install relic, use the install-relic.sh script in the scripts directory. -// go run -tags relic validator_registration_utils.go func main() { // Replace with your own BIP39 Validator seed phrase keystore, err := lib.NewBLSKeystore("...") diff --git a/test.Dockerfile b/test.Dockerfile index 92dd66a55..6203a79bd 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -25,9 +25,7 @@ COPY scripts scripts COPY test_data test_data COPY main.go . -RUN ./scripts/install-relic.sh - # build backend RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go -ENTRYPOINT ["go", "test", "-tags", "relic", "-v", "-failfast", "-p", "1", "github.com/deso-protocol/core/bls", "github.com/deso-protocol/core/collections", "github.com/deso-protocol/core/consensus", "github.com/deso-protocol/core/lib"] +ENTRYPOINT ["go", "test", "-v", "-failfast", "-p", "1", "github.com/deso-protocol/core/bls", "github.com/deso-protocol/core/collections", "github.com/deso-protocol/core/consensus", "github.com/deso-protocol/core/lib"] From ab1a7cc9fa32f2f37984b3a023ddf49d7fb9baa8 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 20 Mar 2024 11:20:02 -0700 Subject: [PATCH 547/762] Typos fixed, TransactionMetadata encoder/decoder patch. --- lib/block_view_atomic_txns.go | 37 ++++---------------- lib/block_view_atomic_txns_test.go | 54 ++++++++++++++++++++++++++++++ lib/block_view_types.go | 2 +- lib/db_utils.go | 20 +++++++---- lib/legacy_mempool.go | 1 + 5 files changed, 75 insertions(+), 39 deletions(-) diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index d029e0b06..265af0f53 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -138,7 +138,7 @@ func (txnData *AtomicTxnsWrapperMetadata) New() DeSoTxnMetadata { // HELPER FUNCTIONS: MsgDeSoTxn // -// IsAtomicTxnsInnerTxn is used to determine if a MsgDeSoTxn is an inner tnx +// IsAtomicTxnsInnerTxn is used to determine if a MsgDeSoTxn is an inner txn // of an atomic transaction. An atomic transaction is qualified by the existence // of the NextAtomicTxnPreHash and PreviousAtomicTxnPreHash keys in the ExtraData map. func (msg *MsgDeSoTxn) IsAtomicTxnsInnerTxn() bool { @@ -489,9 +489,7 @@ func (txindexMetadata *AtomicTxnsWrapperTxindexMetadata) RawEncodeWithoutMetadat var data []byte data = append(data, UintToBuf(uint64(len(txindexMetadata.InnerTxnsTransactionMetadata)))...) for _, innerMetadata := range txindexMetadata.InnerTxnsTransactionMetadata { - txnBytes := innerMetadata.RawEncodeWithoutMetadata(blockHeight, skipMetadata...) - data = append(data, UintToBuf(uint64(len(txnBytes)))...) - data = append(data, txnBytes...) + data = append(data, EncodeToBytes(blockHeight, innerMetadata, skipMetadata...)...) } return data } @@ -515,34 +513,11 @@ func (txindexMetadata *AtomicTxnsWrapperTxindexMetadata) RawDecodeWithoutMetadat // Read the transactions. for ii := uint64(0); ii < numTxns; ii++ { - txindexMetadata.InnerTxnsTransactionMetadata[ii] = &TransactionMetadata{} - - // Figure out how many bytes are associated with the ith transaction metadata. - numTxnMetadataBytes, err := ReadUvarint(rr) - if err != nil { - return errors.Wrap(err, - "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: "+ - "Problem reading number of bytes in transaction metadata") - } - - // Allocate memory for the transaction metadata bytes to be read into. - txnMetadataBytes, err := SafeMakeSliceWithLength[byte](numTxnMetadataBytes) - if err != nil { - return errors.Wrap(err, - "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: "+ - "Problem allocating bytes for transaction") - } + if txindexMetadata.InnerTxnsTransactionMetadata[ii], err = DecodeDeSoEncoder(&TransactionMetadata{}, rr); err != nil { + return errors.Wrap( + err, "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: "+ + "Problem decoding inner transaction bytes") - // Read the transaction metadata into the txnBytes memory buffer. - if _, err = io.ReadFull(rr, txnMetadataBytes); err != nil { - return errors.Wrap(err, - "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: Problem reading bytes for transaction") - } - - // Convert the txnBytes buffer to a TransactionMetadata struct. - if err = txindexMetadata.InnerTxnsTransactionMetadata[ii].RawDecodeWithoutMetadata(blockHeight, rr); err != nil { - return errors.Wrap(err, - "AtomicTxnsWrapperTxindexMetadata.RawDecodeWithoutMetadata: Problem parsing transaction bytes") } } return nil diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 018eb6761..4379ec1fc 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -8,9 +8,63 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "math" + "reflect" "testing" ) +func TestAtomicTxnsWrapperTxIndexMetadataEncoder(t *testing.T) { + // + // (1) Construct some transactions from which we can construct transaction metadata. + // + + // Initialize test chain, miner, and testMeta. + testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) + + // Initialize m0, m1, m2, m3, m4. + _setUpUsersForAtomicTransactionsTesting(testMeta) + + // Create a series of valid (unsigned) dependent transactions. + atomicTxns, signerPrivKeysBase58 := _generateUnsignedDependentAtomicTransactions(testMeta, int(100)) + + // Construct an atomic transaction. + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + require.NoError(t, err) + + // Sign the internal atomic transactions, making the atomic transaction valid. + for ii := range atomicTxns { + _signTxn(t, + atomicTxnsWrapper.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[ii], + signerPrivKeysBase58[ii], + ) + } + + // Compute the txindex metadata for the transaction and ensure its computed correctly. + utxoView, err := NewUtxoView( + testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + blockHeight := testMeta.chain.BlockTip().Height + 1 + atomicTxnMetadata := ComputeTransactionMetadata( + atomicTxnsWrapper, + utxoView, nil, 0, 0, 0, 0, + 0, 0, nil, uint64(blockHeight), + ) + + // + // (2) Attempt to encode and decode the computed transaction metadata without error. + // + + encodedTxnMetadata := EncodeToBytes(uint64(blockHeight), atomicTxnMetadata) + reader := bytes.NewReader(encodedTxnMetadata) + decodedAtomicTxnMetadata, err := DecodeDeSoEncoder(&TransactionMetadata{}, reader) + require.NoError(t, err) + + // + // (3) Ensure the original->encoded->decoded pathway results in the same struct. + // + + require.True(t, reflect.DeepEqual(atomicTxnMetadata, decodedAtomicTxnMetadata)) +} + func TestAtomicTxnsWrapperAtomicity(t *testing.T) { // Initialize test chain, miner, and testMeta. testMeta := _setUpMinerAndTestMetaForAtomicTransactionTests(t) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 3869d84b3..98cbfec68 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -1057,7 +1057,7 @@ type UtxoOperation struct { // NOTE: While it may seem erroneous to have a field within the UtxoOperation struct of // type UtxoOperation, this is valid because the size of the pointer is always known at // compile time. Hence, there's no circular dependency as is the case if we were to use - // [][]UtoOperation for this field instead. This could equivalently be a 2D array of + // [][]UtxoOperation for this field instead. This could equivalently be a 2D array of // void pointers from the compiler's perspective. In addition, it may seem as though // there's a recursive issue in RawEncodeWithoutMetadata resulting from cyclic dependencies, // this is not the case as we only call RawEncodeWithoutMetadata if the length of the diff --git a/lib/db_utils.go b/lib/db_utils.go index 395cc8e7e..9ac45db53 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -7216,6 +7216,8 @@ func (txnMeta *TransactionMetadata) RawEncodeWithoutMetadata(blockHeight uint64, data = append(data, EncodeToBytes(blockHeight, txnMeta.UnlockStakeTxindexMetadata, skipMetadata...)...) // encoding UnjailValidatorTxindexMetadata data = append(data, EncodeToBytes(blockHeight, txnMeta.UnjailValidatorTxindexMetadata, skipMetadata...)...) + // encoding AtomicTxnsWrapperTxindexMetadata + data = append(data, EncodeToBytes(blockHeight, txnMeta.AtomicTxnsWrapperTxindexMetadata, skipMetadata...)...) } return data @@ -7417,28 +7419,28 @@ func (txnMeta *TransactionMetadata) RawDecodeWithoutMetadata(blockHeight uint64, CopyCreateUserAssociationTxindexMetadata := &CreateUserAssociationTxindexMetadata{} if exist, err := DecodeFromBytes(CopyCreateUserAssociationTxindexMetadata, rr); exist && err == nil { txnMeta.CreateUserAssociationTxindexMetadata = CopyCreateUserAssociationTxindexMetadata - } else { + } else if err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading CreateUserAssociationTxindexMetadata") } // decoding DeleteUserAssociationTxindexMetadata CopyDeleteUserAssociationTxindexMetadata := &DeleteUserAssociationTxindexMetadata{} if exist, err := DecodeFromBytes(CopyDeleteUserAssociationTxindexMetadata, rr); exist && err == nil { txnMeta.DeleteUserAssociationTxindexMetadata = CopyDeleteUserAssociationTxindexMetadata - } else { + } else if err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading DeleteUserAssociationTxindexMetadata") } // decoding CreatePostAssociationTxindexMetadata CopyCreatePostAssociationTxindexMetadata := &CreatePostAssociationTxindexMetadata{} if exist, err := DecodeFromBytes(CopyCreatePostAssociationTxindexMetadata, rr); exist && err == nil { txnMeta.CreatePostAssociationTxindexMetadata = CopyCreatePostAssociationTxindexMetadata - } else { + } else if err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading CreatePostAssociationTxindexMetadata") } // decoding DeletePostAssociationTxindexMetadata CopyDeletePostAssociationTxindexMetadata := &DeletePostAssociationTxindexMetadata{} if exist, err := DecodeFromBytes(CopyDeletePostAssociationTxindexMetadata, rr); exist && err == nil { txnMeta.DeletePostAssociationTxindexMetadata = CopyDeletePostAssociationTxindexMetadata - } else { + } else if err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading DeletePostAssociationTxindexMetadata") } } @@ -7448,21 +7450,21 @@ func (txnMeta *TransactionMetadata) RawDecodeWithoutMetadata(blockHeight uint64, CopyAccessGroupTxindexMetadata := &AccessGroupTxindexMetadata{} if exist, err := DecodeFromBytes(CopyAccessGroupTxindexMetadata, rr); exist && err == nil { txnMeta.AccessGroupTxindexMetadata = CopyAccessGroupTxindexMetadata - } else { + } else if err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading AccessGroupTxindexMetadata") } // decoding AccessGroupMembersTxindexMetadata CopyAccessGroupMembersTxindexMetadata := &AccessGroupMembersTxindexMetadata{} if exist, err := DecodeFromBytes(CopyAccessGroupMembersTxindexMetadata, rr); exist && err == nil { txnMeta.AccessGroupMembersTxindexMetadata = CopyAccessGroupMembersTxindexMetadata - } else { + } else if err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading AccessGroupMembersTxindexMetadata") } // decoding NewMessageTxindexMetadata CopyNewMessageTxindexMetadata := &NewMessageTxindexMetadata{} if exist, err := DecodeFromBytes(CopyNewMessageTxindexMetadata, rr); exist && err == nil { txnMeta.NewMessageTxindexMetadata = CopyNewMessageTxindexMetadata - } else { + } else if err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading NewMessageTxindexMetadata") } } @@ -7492,6 +7494,10 @@ func (txnMeta *TransactionMetadata) RawDecodeWithoutMetadata(blockHeight uint64, if txnMeta.UnjailValidatorTxindexMetadata, err = DecodeDeSoEncoder(&UnjailValidatorTxindexMetadata{}, rr); err != nil { return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading UnjailValidatorTxindexMetadata: ") } + // decoding AtomicTxnsWrapperTxindexMetadata + if txnMeta.AtomicTxnsWrapperTxindexMetadata, err = DecodeDeSoEncoder(&AtomicTxnsWrapperTxindexMetadata{}, rr); err != nil { + return errors.Wrapf(err, "TransactionMetadata.Decode: Problem reading AtomicTxnsWrapperTxindexMetadata: ") + } } return nil diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 8614e8b46..8e577ef8c 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2019,6 +2019,7 @@ func ComputeTransactionMetadata( txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) case TxnTypeAtomicTxnsWrapper: realTxMeta := txn.TxnMeta.(*AtomicTxnsWrapperMetadata) + txnMeta.AtomicTxnsWrapperTxindexMetadata = &AtomicTxnsWrapperTxindexMetadata{} txnMeta.AtomicTxnsWrapperTxindexMetadata.InnerTxnsTransactionMetadata = []*TransactionMetadata{} for _, innerTxn := range realTxMeta.Txns { // Compute the transaction metadata for each inner transaction. From cf269149719da749345ea57ee7fae32712b97e97 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 20 Mar 2024 14:32:33 -0400 Subject: [PATCH 548/762] Remove Redundant StartMempoolSyncRoutine Starts Before Mempool Is Initialized (#1133) --- lib/server.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/lib/server.go b/lib/server.go index 58e158f84..7ccd0f7c2 100644 --- a/lib/server.go +++ b/lib/server.go @@ -544,11 +544,6 @@ func NewServer( _minFeeRateNanosPerKB, _blockCypherAPIKey, _runReadOnlyUtxoViewUpdater, _dataDir, _mempoolDumpDir, false) - // Initialize state syncer mempool job, if needed. - if srv.stateChangeSyncer != nil { - srv.stateChangeSyncer.StartMempoolSyncRoutine(srv) - } - // Initialize the PoS mempool. We need to initialize a best-effort UtxoView based on the current // known state of the chain. This will all be overwritten as we process blocks later on. currentUtxoView, err := _chain.GetUncommittedTipView() @@ -687,10 +682,6 @@ func NewServer( timer.Initialize() srv.timer = timer - if srv.stateChangeSyncer != nil { - srv.stateChangeSyncer.StartMempoolSyncRoutine(srv) - } - // If shouldRestart is true, it means that the state checksum is likely corrupted, and we need to enter a recovery mode. // This can happen if the node was terminated mid-operation last time it was running. The recovery process rolls back // blocks to the beginning of the current snapshot epoch and resets to the state checksum to the epoch checksum. From fbd347ac6e021ece705f92890abb69c05d72221e Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Wed, 20 Mar 2024 12:52:38 -0700 Subject: [PATCH 549/762] Support _executeAllTestRollbackAndFlush for atomic transactions. --- lib/block_view_atomic_txns_test.go | 43 +++++++++++++++++++----------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 4379ec1fc..ff5113cc4 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -75,12 +75,18 @@ func TestAtomicTxnsWrapperAtomicity(t *testing.T) { // Create a series of valid (unsigned) dependent transactions. atomicTxns, signerPrivKeysBase58 := _generateUnsignedDependentAtomicTransactions(testMeta, int(100)) + // Construct a UtxoView and block height for getting balances. + utxoView, err := NewUtxoView( + testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + blockHeight := testMeta.chain.BlockTip().Height + 1 + // Fetch all starting balances for affected public keys. fetchTransactorBalances := func(transactions []*MsgDeSoTxn) []uint64 { var balancesNanos []uint64 for _, txn := range transactions { balancesNanos = append(balancesNanos, - _getBalance(testMeta.t, testMeta.chain, testMeta.mempool, + _getBalanceWithView(testMeta.t, testMeta.chain, utxoView, Base58CheckEncode(txn.PublicKey, false, testMeta.params))) } return balancesNanos @@ -110,10 +116,17 @@ func TestAtomicTxnsWrapperAtomicity(t *testing.T) { // Try to connect the atomic transaction wrapper. // This should fail on the final transaction as it's incorrectly signed. - _, err = _atomicTransactionsWrapperWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapper, 0) + _, _, _, _, err = utxoView.ConnectTransaction( + atomicTxnsWrapper, atomicTxnsWrapper.Hash(), blockHeight, + 0, true, false) require.Contains(t, err.Error(), RuleErrorInvalidTransactionSignature) + // Because the transaction fails, we must construct a new UtxoView as there's + // invalid data in the previous view. + utxoView, err = NewUtxoView( + testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) + require.NoError(t, err) + // Check that the balances are not updated. // This ensures that if a single transaction within the atomic transaction // fails, the entire transaction as a whole fails. @@ -129,8 +142,9 @@ func TestAtomicTxnsWrapperAtomicity(t *testing.T) { ) // Try to connect the atomic transaction, this should now succeed. - _, err = _atomicTransactionsWrapperWithConnectTimestamp( - t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapper, 0) + _, _, _, _, err = utxoView.ConnectTransaction( + atomicTxnsWrapper, atomicTxnsWrapper.Hash(), blockHeight, + 0, false, false) require.NoError(t, err) // Validate that only M0's balance has changed by the total fees paid. @@ -144,6 +158,9 @@ func TestAtomicTxnsWrapperAtomicity(t *testing.T) { require.Equal(t, uint64(0), endingBalance) } } + + // Test disconnects. + _executeAllTestRollbackAndFlush(testMeta) } func TestAtomicTxnsSignatureFailure(t *testing.T) { @@ -407,7 +424,7 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { blockHeight := testMeta.chain.BlockTip().Height + 1 // Get the initial balance for m0. - m0InitialBalanceNanos := _getBalance(t, testMeta.chain, testMeta.mempool, m0Pub) + m0InitialBalanceNanos := _getBalanceWithView(t, testMeta.chain, utxoView, m0Pub) // Connect the transactions to ensure they can actually be connected. var totalFees uint64 @@ -420,11 +437,8 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { totalFees += fees } - // Flush the view to ensure everything is working properly. - require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) - // Get the final balance for m0. - m0FinalBalanceNanos := _getBalance(t, testMeta.chain, testMeta.mempool, m0Pub) + m0FinalBalanceNanos := _getBalanceWithView(t, testMeta.chain, utxoView, m0Pub) // Check that fees were paid. require.Equal(t, m0InitialBalanceNanos-totalFees, m0FinalBalanceNanos) @@ -441,12 +455,6 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { atomicTxns[0] = atomicTxns[len(atomicTxns)-1] atomicTxns[0] = initialTxn - // Construct a new view to connect the transactions to. - utxoView, err = NewUtxoView( - testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) - blockHeight = testMeta.chain.BlockTip().Height + 1 - // Connect the transactions to ensure they can actually be connected. for _, txn := range atomicTxns { // Connect the transaction. @@ -457,6 +465,9 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { require.Contains(t, err.Error(), RuleErrorInsufficientBalance) } } + + // Test disconnects. + _executeAllTestRollbackAndFlush(testMeta) } //---------------------------------------------------------- From 250a7bb6c0817fce65d0790b233c40cdc77be964 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 20 Mar 2024 18:21:15 -0400 Subject: [PATCH 550/762] Use Safer Handshake Filter For Connected Validators (#1137) --- lib/network_manager.go | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/lib/network_manager.go b/lib/network_manager.go index 0e83ca3b1..2208a0445 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -2,6 +2,13 @@ package lib import ( "fmt" + "math" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/wire" "github.com/decred/dcrd/lru" @@ -10,12 +17,6 @@ import ( "github.com/deso-protocol/core/consensus" "github.com/golang/glog" "github.com/pkg/errors" - "math" - "net" - "strconv" - "sync" - "sync/atomic" - "time" ) // NetworkManager is a structure that oversees all connections to RemoteNodes. NetworkManager has the following @@ -1185,18 +1186,30 @@ func (nm *NetworkManager) GetAllValidators() *collections.ConcurrentMap[bls.Seri return allValidators } -// GetConnectedValidators returns a list of all connected validators that have passed handshake. It is a sub-view of -// the GetAllValidators map, with only the validators that have HandshakeCompleted status. +// GetConnectedValidators returns a list of all connected validators that have passed handshake. It filters +// for validators that have completed the handshake. If both an inbound and outbound connection exist to the +// same validator, it prioritizes the outbound connection because that is the one we initiated. func (nm *NetworkManager) GetConnectedValidators() []*RemoteNode { - var connectedValidators []*RemoteNode - allValidators := nm.GetAllValidators().ToMap() - for _, rn := range allValidators { - // Filter out validators that have not completed the handshake. + connectedValidators := map[bls.SerializedPublicKey]*RemoteNode{} + + inboundValidatorsMap := nm.GetValidatorInboundIndex().ToMap() + outboundValidatorsMap := nm.GetValidatorOutboundIndex().ToMap() + + // Add all of the connected inbound validators first + for pk, rn := range inboundValidatorsMap { if rn.IsHandshakeCompleted() { - connectedValidators = append(connectedValidators, rn) + connectedValidators[pk] = rn } } - return connectedValidators + + // Add all of the connected outbound validators next, overriding any inbound validator connections + for pk, rn := range outboundValidatorsMap { + if rn.IsHandshakeCompleted() { + connectedValidators[pk] = rn + } + } + + return collections.MapValues(connectedValidators) } func (nm *NetworkManager) GetAllNonValidators() []*RemoteNode { From 9b0d55314c191f197ad47a1bc1d481943daff998 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Wed, 20 Mar 2024 19:31:36 -0700 Subject: [PATCH 551/762] PoS UtxoView ConnectTransactions (#1099) * PoS UtxoView ConnectTransactions * Comments --- lib/block_view.go | 97 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/lib/block_view.go b/lib/block_view.go index 5f56c6c22..77562a66d 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3920,6 +3920,103 @@ func (bav *UtxoView) _connectTransaction( return utxoOpsForTxn, totalInput, totalOutput, fees, nil } +// ConnectTransactionsFailSafe connects a list of transactions to the view and returns the combined UtxoOperations, +// total inputs, total outputs, fees, and success flags. If the ignoreFailing flag is set, the function will not return +// an error when transactions fail to connect. Instead, a failed transaction will be skipped over, and the success flag +// at the corresponding index will be set to false. If ignoreFailing is set to false, the function will return an error +// if any of the transactions fail to connect. The other parameters have the same behavior as in ConnectTransaction. +func (bav *UtxoView) ConnectTransactionsFailSafe( + txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, + verifySignatures bool, ignoreUtxos bool, ignoreFailing bool) ( + _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, + _fees []uint64, _successFlags []bool, _err error) { + + return bav._connectTransactionsFailSafe(txns, txHashes, blockHeight, blockTimestampNanoSecs, verifySignatures, + ignoreUtxos, ignoreFailing, 0) +} + +// ConnectTransactionsFailSafeWithLimit works similarly to ConnectTransactionsFailSafe, but it limits the number of +// transactions that can be connected to the view. If the transactionConnectLimit is set to 0, all transactions will be +// connected. If the transactionConnectLimit is set to a positive number, the function will return after successfully +// connecting the specified number of transactions. Failing transactions do not count towards the transactionConnectLimit. +func (bav *UtxoView) ConnectTransactionsFailSafeWithLimit( + txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, + verifySignatures bool, ignoreUtxos bool, ignoreFailing bool, transactionConnectLimit uint32) ( + _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, + _fees []uint64, _successFlags []bool, _err error) { + + return bav._connectTransactionsFailSafe(txns, txHashes, blockHeight, blockTimestampNanoSecs, verifySignatures, + ignoreUtxos, ignoreFailing, transactionConnectLimit) +} + +func (bav *UtxoView) _connectTransactionsFailSafe( + txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, + verifySignatures bool, ignoreUtxos bool, ignoreFailing bool, transactionConnectLimit uint32) ( + _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, + _fees []uint64, _successFlags []bool, _err error) { + + var combinedUtxoOps [][]*UtxoOperation + var totalInputs []uint64 + var totalOutputs []uint64 + var fees []uint64 + var successFlags []bool + var totalConnectedTxns uint32 + + updateValues := func(utxoOps []*UtxoOperation, totalInput uint64, totalOutput uint64, fee uint64, success bool) { + combinedUtxoOps = append(combinedUtxoOps, utxoOps) + totalInputs = append(totalInputs, totalInput) + totalOutputs = append(totalOutputs, totalOutput) + fees = append(fees, fee) + successFlags = append(successFlags, success) + } + + // Connect the transactions in the order they are given. + for ii, txn := range txns { + // Create a copy of the view to connect the transactions to in the event we have a failing txn. + copiedView, err := bav.CopyUtxoView() + if err != nil { + return nil, nil, nil, nil, nil, + errors.Wrapf(err, "_connectTransactionsFailSafe: Problem copying UtxoView") + } + + // Connect the transaction to the copied view. + utxoOpsForTxn, totalInput, totalOutput, fee, err := copiedView.ConnectTransaction( + txn, txHashes[ii], blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) + if err != nil && ignoreFailing { + // If ignoreFailing was set, we mark the transaction as failing and continue. + glog.V(2).Infof("_connectTransactionsFailSafe: Ignoring failing txn %d: %v", ii, err) + updateValues(nil, 0, 0, 0, false) + continue + } else if err != nil { + return nil, nil, nil, nil, nil, + errors.Wrapf(err, "_connectTransactionsFailSafe: Problem connecting txn %d on copy view", ii) + } + + // At this point, we know the transaction will connect successfully, so we connect it to the main view. + utxoOpsForTxn, totalInput, totalOutput, fee, err = bav.ConnectTransaction( + txn, txHashes[ii], blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) + if err != nil { + return nil, nil, nil, nil, nil, + errors.Wrapf(err, "_connectTransactionsFailSafe: Problem connecting txn %d", ii) + } + updateValues(utxoOpsForTxn, totalInput, totalOutput, fee, true) + + // If the transactionConnectLimit was set to 0, we will try to connect all the provided transactions. + if transactionConnectLimit == 0 { + continue + } + + // Otherwise, if transactionConnectLimit is non-zero, we'll keep track of how many transactions we've + // successfully connected so far. + totalConnectedTxns++ + if totalConnectedTxns >= transactionConnectLimit { + break + } + } + + return combinedUtxoOps, totalInputs, totalOutputs, fees, successFlags, nil +} + func (bav *UtxoView) ValidateTransactionNonce(txn *MsgDeSoTxn, blockHeight uint64) error { if txn == nil || txn.TxnNonce == nil { return fmt.Errorf("ValidateTransactionNonce: Nonce or txn is nil for public key %v", From d9afc28b3a269d7da6040483c9de32b6813ed53d Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 21 Mar 2024 00:41:09 -0700 Subject: [PATCH 552/762] PoS Mempool Updated Refresh (#1100) * PoS Mempool Updated Refresh * Comments and fixes --- cmd/config.go | 2 + cmd/node.go | 1 + cmd/run.go | 2 + integration_testing/tools.go | 1 + lib/block_view.go | 6 +-- lib/pos_block_producer_test.go | 8 +-- lib/pos_blockchain_test.go | 2 +- lib/pos_fee_estimator_test.go | 2 +- lib/pos_mempool.go | 99 ++++++++++++++++++---------------- lib/pos_mempool_test.go | 16 +++--- lib/server.go | 2 + 11 files changed, 78 insertions(+), 63 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 4d47aad55..d64954475 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -53,6 +53,7 @@ type Config struct { MaxMempoolPosSizeBytes uint64 MempoolFeeEstimatorNumMempoolBlocks uint64 MempoolFeeEstimatorNumPastBlocks uint64 + MempoolMaxValidationViewConnects uint64 AugmentedBlockViewRefreshIntervalMillis uint64 // Mining @@ -131,6 +132,7 @@ func LoadConfig() *Config { config.MaxMempoolPosSizeBytes = viper.GetUint64("max-mempool-pos-size-bytes") config.MempoolFeeEstimatorNumMempoolBlocks = viper.GetUint64("mempool-fee-estimator-num-mempool-blocks") config.MempoolFeeEstimatorNumPastBlocks = viper.GetUint64("mempool-fee-estimator-num-past-blocks") + config.MempoolMaxValidationViewConnects = viper.GetUint64("mempool-max-validation-view-connects") config.AugmentedBlockViewRefreshIntervalMillis = viper.GetUint64("augmented-block-view-refresh-interval-millis") // Peers diff --git a/cmd/node.go b/cmd/node.go index 18f6e3b5e..0c8d161c1 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -279,6 +279,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.MempoolBackupIntervalMillis, node.Config.MempoolFeeEstimatorNumMempoolBlocks, node.Config.MempoolFeeEstimatorNumPastBlocks, + node.Config.MempoolMaxValidationViewConnects, node.Config.AugmentedBlockViewRefreshIntervalMillis, node.Config.PosBlockProductionIntervalMilliseconds, node.Config.PosTimeoutBaseDurationMilliseconds, diff --git a/cmd/run.go b/cmd/run.go index 2eaf1fa67..5fa164fd4 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -102,6 +102,8 @@ func SetupRunFlags(cmd *cobra.Command) { "The number of future blocks to break the PoS mempool into when estimating txn fee for the next block.") cmd.PersistentFlags().Uint64("mempool-fee-estimator-num-past-blocks", 50, "The number of past blocks to use when estimating txn fee for the next block from the PoS mempool.") + cmd.PersistentFlags().Uint64("mempool-max-validation-view-connects", 10000, + "The maximum number of connects that the PoS mempool transaction validation routine will perform.") cmd.PersistentFlags().Uint64("augmented-block-view-refresh-interval-millis", 10, "The frequency in milliseconds with which the augmented block view will be refreshed. "+ "The default value is 100 milliseconds.") diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 388a4e4bf..8731490f1 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -86,6 +86,7 @@ func generateConfig(t *testing.T, port uint32, dataDir string, maxPeers uint32) config.MaxMempoolPosSizeBytes = 3000000000 config.MempoolFeeEstimatorNumMempoolBlocks = 1 config.MempoolFeeEstimatorNumPastBlocks = 50 + config.MempoolMaxValidationViewConnects = 10000 config.AugmentedBlockViewRefreshIntervalMillis = 10 config.PosBlockProductionIntervalMilliseconds = 1500 config.PosTimeoutBaseDurationMilliseconds = 30000 diff --git a/lib/block_view.go b/lib/block_view.go index 77562a66d..e67997c8e 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3941,7 +3941,7 @@ func (bav *UtxoView) ConnectTransactionsFailSafe( // connecting the specified number of transactions. Failing transactions do not count towards the transactionConnectLimit. func (bav *UtxoView) ConnectTransactionsFailSafeWithLimit( txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, - verifySignatures bool, ignoreUtxos bool, ignoreFailing bool, transactionConnectLimit uint32) ( + verifySignatures bool, ignoreUtxos bool, ignoreFailing bool, transactionConnectLimit uint64) ( _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, _fees []uint64, _successFlags []bool, _err error) { @@ -3951,7 +3951,7 @@ func (bav *UtxoView) ConnectTransactionsFailSafeWithLimit( func (bav *UtxoView) _connectTransactionsFailSafe( txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, - verifySignatures bool, ignoreUtxos bool, ignoreFailing bool, transactionConnectLimit uint32) ( + verifySignatures bool, ignoreUtxos bool, ignoreFailing bool, transactionConnectLimit uint64) ( _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, _fees []uint64, _successFlags []bool, _err error) { @@ -3960,7 +3960,7 @@ func (bav *UtxoView) _connectTransactionsFailSafe( var totalOutputs []uint64 var fees []uint64 var successFlags []bool - var totalConnectedTxns uint32 + var totalConnectedTxns uint64 updateValues := func(utxoOps []*UtxoOperation, totalInput uint64, totalOutput uint64, fee uint64, success bool) { combinedUtxoOps = append(combinedUtxoOps, utxoOps) diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 15f15258a..619e17fd1 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -36,7 +36,7 @@ func TestCreateBlockTemplate(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -102,7 +102,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -179,7 +179,7 @@ func TestGetBlockTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -264,7 +264,7 @@ func TestGetBlockTransactions(t *testing.T) { testMempool := NewPosMempool() testMempool.Init( params, globalParams, latestBlockView, 2, "", true, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, ) require.NoError(testMempool.Start()) defer testMempool.Stop() diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 8f40597e8..4531d81f2 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2790,7 +2790,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { mempool := NewPosMempool() require.NoError(t, mempool.Init( params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis, 1, nil, 1, 100, + mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100, )) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index 08e1a18ee..02f16b4d8 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -26,7 +26,7 @@ func TestFeeEstimator(t *testing.T) { mempool := NewPosMempool() err = mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, ) require.NoError(t, err) require.NoError(t, mempool.Start()) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 325dbbe1f..b6081d26c 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -151,6 +151,11 @@ type PosMempool struct { // based off the current state of the mempool and the most n recent blocks. feeEstimator *PoSFeeEstimator + // maxValidationViewConnects is the maximum number of transactions that the mempool will connect to the validation view + // during the Refresh operation. This limit applies to the number of transactions that successfully connect to the + // validation view. Transactions that will fail the validation view connection are not counted towards this limit. + maxValidationViewConnects uint64 + // augmentedBlockViewRefreshIntervalMillis is the frequency with which the augmentedLatestBlockView is updated. augmentedBlockViewRefreshIntervalMillis uint64 @@ -207,6 +212,7 @@ func (mp *PosMempool) Init( feeEstimatorNumMempoolBlocks uint64, feeEstimatorPastBlocks []*MsgDeSoBlock, feeEstimatorNumPastBlocks uint64, + maxValidationViewConnects uint64, augmentedBlockViewRefreshIntervalMillis uint64, ) error { if mp.status != PosMempoolStatusNotInitialized { @@ -229,6 +235,7 @@ func (mp *PosMempool) Init( mp.inMemoryOnly = inMemoryOnly mp.maxMempoolPosSizeBytes = maxMempoolPosSizeBytes mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis + mp.maxValidationViewConnects = maxValidationViewConnects mp.augmentedBlockViewRefreshIntervalMillis = augmentedBlockViewRefreshIntervalMillis // TODO: parameterize num blocks. Also, how to pass in blocks. @@ -529,11 +536,12 @@ func (mp *PosMempool) checkTransactionSanity(txn *MsgDeSoTxn) error { return nil } +// updateTransactionValidatedStatus updates the validated status of a mempool transaction with the provided txnHash. func (mp *PosMempool) updateTransactionValidatedStatus(txnHash *BlockHash, validated bool) { mp.Lock() defer mp.Unlock() - if !mp.IsRunning() { + if !mp.IsRunning() || txnHash == nil { return } @@ -711,61 +719,63 @@ func (mp *PosMempool) GetIterator() MempoolIterator { return NewPosMempoolIterator(mp.txnRegister.GetFeeTimeIterator()) } -// Refresh can be used to evict stale transactions from the mempool. However, it is a bit expensive and should be used -// sparingly. Upon being called, Refresh will create an in-memory temp PosMempool and populate it with transactions from -// the main mempool. The temp mempool will have the most up-to-date readOnlyLatestBlockView, Height, and globalParams. Any -// transaction that fails to add to the temp mempool will be removed from the main mempool. +// Refresh updates the validated status of transactions in the mempool. The function connects the Fee-Time ordered +// mempool transactions to the readOnlyLatestBlockView, creating a cumulative validationView. Transactions that fail to +// connect to the validationView are removed from the mempool, as they would have also failed to connect during +// block production. This function is thread-safe. func (mp *PosMempool) Refresh() error { - mp.Lock() - defer mp.Unlock() - + // We hold a read-lock on the mempool to get the transactions and the latest block view. + mp.RLock() if !mp.IsRunning() { return nil } - if err := mp.refreshNoLock(); err != nil { - return errors.Wrapf(err, "PosMempool.Refresh: Problem refreshing mempool") + // We copy the reference to the readOnlyLatestBlockView. Since the utxoView is immutable, we don't need to copy the + // entire view while we hold the lock. + validationView := mp.readOnlyLatestBlockView + mempoolTxns := mp.getTransactionsNoLock() + mp.RUnlock() + + // If the validation view is nil, there's nothing to do so we return early. + if validationView == nil { + return nil } - return nil -} -func (mp *PosMempool) refreshNoLock() error { - // Create the temporary in-memory mempool with the most up-to-date readOnlyLatestBlockView, Height, and globalParams. - tempPool := NewPosMempool() - err := tempPool.Init( - mp.params, - mp.globalParams, - mp.readOnlyLatestBlockView, - mp.latestBlockHeight, - "", - true, - mp.maxMempoolPosSizeBytes, - mp.mempoolBackupIntervalMillis, - mp.feeEstimator.numMempoolBlocks, - mp.feeEstimator.cachedBlocks, - mp.feeEstimator.numPastBlocks, - mp.augmentedBlockViewRefreshIntervalMillis, - ) + // Convert the mempool transactions to the MsgDeSoTxn format, which we can use for connecting to the validation view. + var txns []*MsgDeSoTxn + var txHashes []*BlockHash + for _, txn := range mempoolTxns { + txns = append(txns, txn.Tx) + txHashes = append(txHashes, txn.Hash) + } + // Copy the validation view to avoid modifying the readOnlyLatestBlockView. + copyValidationView, err := validationView.CopyUtxoView() if err != nil { - return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem initializing temp pool") + return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem copying utxo view") } - if err := tempPool.Start(); err != nil { - return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem starting temp pool") + // Connect the transactions to the validation view. We use the latest block height + 1 as the block height to connect + // the transactions. This is because the mempool contains transactions that we use for producing the next block. + _, _, _, _, successFlags, err := copyValidationView.ConnectTransactionsFailSafeWithLimit(txns, txHashes, uint32(mp.latestBlockHeight)+1, + time.Now().UnixNano(), false, false, true, mp.maxValidationViewConnects) + if err != nil { + return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem connecting transactions") } - defer tempPool.Stop() - // Add all transactions from the main mempool to the temp mempool. Skip signature verification. + // We iterate through the successFlags and update the validated status of the transactions in the mempool. var txnsToRemove []*MempoolTx - txns := mp.getTransactionsNoLock() - for _, txn := range txns { - mtxn := NewMempoolTransaction(txn.Tx, txn.Added, txn.IsValidated()) - err := tempPool.AddTransaction(mtxn) - if err == nil { - continue + for ii, successFlag := range successFlags { + if ii >= len(mempoolTxns) { + break + } + // If the transaction successfully connected to the validation view, we update the validated status of the + // transaction in the mempool. If the transaction failed to connect to the validation view, we add it to the + // txnsToRemove list. Note that we don't need to hold a lock while updating the validated status of the + // transactions in the mempool, since the updateTransactionValidatedStatus already holds the lock. + if successFlag { + mp.updateTransactionValidatedStatus(mempoolTxns[ii].Hash, true) + } else { + txnsToRemove = append(txnsToRemove, mempoolTxns[ii]) } - - // If we've encountered an error while adding the transaction to the temp mempool, we add it to our txnsToRemove list. - txnsToRemove = append(txnsToRemove, txn) } // Now remove all transactions from the txnsToRemove list from the main mempool. @@ -834,9 +844,6 @@ func (mp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { } mp.globalParams = globalParams - if err := mp.refreshNoLock(); err != nil { - glog.Errorf("PosMempool.UpdateGlobalParams: Problem refreshing mempool: %v", err) - } } // Implementation of the Mempool interface diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 102585c47..2f0715de6 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -23,7 +23,7 @@ func TestPosMempoolStart(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - ¶ms, globalParams, nil, 0, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, 100, + ¶ms, globalParams, nil, 0, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -53,7 +53,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -72,7 +72,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis, 1, nil, 1, 100)) + mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100)) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() @@ -108,7 +108,7 @@ func TestPosMempoolPrune(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -145,7 +145,7 @@ func TestPosMempoolPrune(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) @@ -206,7 +206,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -235,7 +235,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init( params, newGlobalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) @@ -268,7 +268,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, + nil, 1, 10000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) diff --git a/lib/server.go b/lib/server.go index a8a4a7aa5..78e716ee2 100644 --- a/lib/server.go +++ b/lib/server.go @@ -406,6 +406,7 @@ func NewServer( _mempoolBackupIntervalMillis uint64, _mempoolFeeEstimatorNumMempoolBlocks uint64, _mempoolFeeEstimatorNumPastBlocks uint64, + _mempoolMaxValidationViewConnects uint64, _augmentedBlockViewRefreshIntervalMillis uint64, _posBlockProductionIntervalMilliseconds uint64, _posTimeoutBaseDurationMilliseconds uint64, @@ -564,6 +565,7 @@ func NewServer( _mempoolFeeEstimatorNumMempoolBlocks, []*MsgDeSoBlock{latestBlock}, _mempoolFeeEstimatorNumPastBlocks, + _mempoolMaxValidationViewConnects, _augmentedBlockViewRefreshIntervalMillis, ) if err != nil { From 021aaadb65aac6130ad16a4d07a254f77855be76 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 21 Mar 2024 00:50:43 -0700 Subject: [PATCH 553/762] PoS Mempool Transaction Validation Routine (#1101) * PoS Mempool Transaction Validation Routine * Comments --- cmd/config.go | 14 +++++----- cmd/node.go | 1 + cmd/run.go | 5 +++- integration_testing/tools.go | 1 + lib/pos_block_producer_test.go | 8 +++--- lib/pos_blockchain_test.go | 2 +- lib/pos_fee_estimator_test.go | 2 +- lib/pos_mempool.go | 48 ++++++++++++++++++++++++++++++---- lib/pos_mempool_test.go | 29 ++++++++++---------- lib/server.go | 2 ++ 10 files changed, 80 insertions(+), 32 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index d64954475..302ba2eb7 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -49,12 +49,13 @@ type Config struct { PosTimeoutBaseDurationMilliseconds uint64 // Mempool - MempoolBackupIntervalMillis uint64 - MaxMempoolPosSizeBytes uint64 - MempoolFeeEstimatorNumMempoolBlocks uint64 - MempoolFeeEstimatorNumPastBlocks uint64 - MempoolMaxValidationViewConnects uint64 - AugmentedBlockViewRefreshIntervalMillis uint64 + MempoolBackupIntervalMillis uint64 + MaxMempoolPosSizeBytes uint64 + MempoolFeeEstimatorNumMempoolBlocks uint64 + MempoolFeeEstimatorNumPastBlocks uint64 + MempoolMaxValidationViewConnects uint64 + TransactionValidationRefreshIntervalMillis uint64 + AugmentedBlockViewRefreshIntervalMillis uint64 // Mining MinerPublicKeys []string @@ -133,6 +134,7 @@ func LoadConfig() *Config { config.MempoolFeeEstimatorNumMempoolBlocks = viper.GetUint64("mempool-fee-estimator-num-mempool-blocks") config.MempoolFeeEstimatorNumPastBlocks = viper.GetUint64("mempool-fee-estimator-num-past-blocks") config.MempoolMaxValidationViewConnects = viper.GetUint64("mempool-max-validation-view-connects") + config.TransactionValidationRefreshIntervalMillis = viper.GetUint64("transaction-validation-refresh-interval-millis") config.AugmentedBlockViewRefreshIntervalMillis = viper.GetUint64("augmented-block-view-refresh-interval-millis") // Peers diff --git a/cmd/node.go b/cmd/node.go index 0c8d161c1..f14c09d87 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -280,6 +280,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.MempoolFeeEstimatorNumMempoolBlocks, node.Config.MempoolFeeEstimatorNumPastBlocks, node.Config.MempoolMaxValidationViewConnects, + node.Config.TransactionValidationRefreshIntervalMillis, node.Config.AugmentedBlockViewRefreshIntervalMillis, node.Config.PosBlockProductionIntervalMilliseconds, node.Config.PosTimeoutBaseDurationMilliseconds, diff --git a/cmd/run.go b/cmd/run.go index 5fa164fd4..ebbdb30e2 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -104,9 +104,12 @@ func SetupRunFlags(cmd *cobra.Command) { "The number of past blocks to use when estimating txn fee for the next block from the PoS mempool.") cmd.PersistentFlags().Uint64("mempool-max-validation-view-connects", 10000, "The maximum number of connects that the PoS mempool transaction validation routine will perform.") + cmd.PersistentFlags().Uint64("transaction-validation-refresh-interval-millis", 10, + "The frequency in milliseconds with which the transaction validation routine is run in mempool. "+ + "The default value is 10 milliseconds.") cmd.PersistentFlags().Uint64("augmented-block-view-refresh-interval-millis", 10, "The frequency in milliseconds with which the augmented block view will be refreshed. "+ - "The default value is 100 milliseconds.") + "The default value is 10 milliseconds.") // Peers cmd.PersistentFlags().StringSlice("connect-ips", []string{}, diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 8731490f1..43733cbb9 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -87,6 +87,7 @@ func generateConfig(t *testing.T, port uint32, dataDir string, maxPeers uint32) config.MempoolFeeEstimatorNumMempoolBlocks = 1 config.MempoolFeeEstimatorNumPastBlocks = 50 config.MempoolMaxValidationViewConnects = 10000 + config.TransactionValidationRefreshIntervalMillis = 10 config.AugmentedBlockViewRefreshIntervalMillis = 10 config.PosBlockProductionIntervalMilliseconds = 1500 config.PosTimeoutBaseDurationMilliseconds = 30000 diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 619e17fd1..175d952f8 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -36,7 +36,7 @@ func TestCreateBlockTemplate(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 10000, 100, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -102,7 +102,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 10000, 100, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -179,7 +179,7 @@ func TestGetBlockTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 10000, 100, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -264,7 +264,7 @@ func TestGetBlockTransactions(t *testing.T) { testMempool := NewPosMempool() testMempool.Init( params, globalParams, latestBlockView, 2, "", true, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 10000, 100, 100, ) require.NoError(testMempool.Start()) defer testMempool.Stop() diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 4531d81f2..36af5a5df 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2790,7 +2790,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { mempool := NewPosMempool() require.NoError(t, mempool.Init( params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100, + mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100, 100, )) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index 02f16b4d8..e672e2460 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -26,7 +26,7 @@ func TestFeeEstimator(t *testing.T) { mempool := NewPosMempool() err = mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 10000, 100, 100, ) require.NoError(t, err) require.NoError(t, mempool.Start()) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index b6081d26c..7ec370a7e 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -98,6 +98,13 @@ func (mtxn *MempoolTransaction) IsValidated() bool { // by Fee-Time algorithm. More on the Fee-Time algorithm can be found in the documentation of TransactionRegister. type PosMempool struct { sync.RWMutex + // startGroup and exitGroup are concurrency control mechanisms used to ensure that all the PosMempool routines + // are started and stopped properly. The startGroup is used to wait for all the PosMempool routines to start before + // returning from the Start method. The exitGroup is used to wait for all the PosMempool routines to stop before + // returning from the Stop method. + startGroup sync.WaitGroup + exitGroup sync.WaitGroup + status PosMempoolStatus // params of the blockchain params *DeSoParams @@ -152,10 +159,13 @@ type PosMempool struct { feeEstimator *PoSFeeEstimator // maxValidationViewConnects is the maximum number of transactions that the mempool will connect to the validation view - // during the Refresh operation. This limit applies to the number of transactions that successfully connect to the - // validation view. Transactions that will fail the validation view connection are not counted towards this limit. + // during the validateTransactions operation. This limit applies to the number of transactions that successfully connect + // to the validation view. Transactions that will fail the validation view connection are not counted towards this limit. maxValidationViewConnects uint64 + // transactionValidationRoutineRefreshIntervalMillis is the frequency with which the transactionValidationRoutine is run. + transactionValidationRefreshIntervalMillis uint64 + // augmentedBlockViewRefreshIntervalMillis is the frequency with which the augmentedLatestBlockView is updated. augmentedBlockViewRefreshIntervalMillis uint64 @@ -213,6 +223,7 @@ func (mp *PosMempool) Init( feeEstimatorPastBlocks []*MsgDeSoBlock, feeEstimatorNumPastBlocks uint64, maxValidationViewConnects uint64, + transactionValidationRefreshIntervalMillis uint64, augmentedBlockViewRefreshIntervalMillis uint64, ) error { if mp.status != PosMempoolStatusNotInitialized { @@ -236,6 +247,7 @@ func (mp *PosMempool) Init( mp.maxMempoolPosSizeBytes = maxMempoolPosSizeBytes mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis mp.maxValidationViewConnects = maxValidationViewConnects + mp.transactionValidationRefreshIntervalMillis = transactionValidationRefreshIntervalMillis mp.augmentedBlockViewRefreshIntervalMillis = augmentedBlockViewRefreshIntervalMillis // TODO: parameterize num blocks. Also, how to pass in blocks. @@ -284,14 +296,38 @@ func (mp *PosMempool) Start() error { return errors.Wrapf(err, "PosMempool.Start: Problem loading persisted transactions") } } + mp.startGroup.Add(2) + mp.exitGroup.Add(2) + mp.startTransactionValidationRoutine() mp.startAugmentedViewRefreshRoutine() - + mp.startGroup.Wait() mp.status = PosMempoolStatusRunning return nil } +// startTransactionValidationRoutine is responsible for validating transactions in the mempool. The routine runs every +// transactionValidationRefreshIntervalMillis milliseconds. It uses the validateTransactions method to validate the +// top Fee-Time ordered transactions in the mempool. +func (mp *PosMempool) startTransactionValidationRoutine() { + go func() { + mp.startGroup.Done() + for { + select { + case <-time.After(time.Duration(mp.transactionValidationRefreshIntervalMillis) * time.Millisecond): + if err := mp.validateTransactions(); err != nil { + glog.Errorf("PosMempool.startTransactionValidationRoutine: Problem validating transactions: %v", err) + } + case <-mp.quit: + mp.exitGroup.Done() + return + } + } + }() +} + func (mp *PosMempool) startAugmentedViewRefreshRoutine() { go func() { + mp.startGroup.Done() for { select { case <-time.After(time.Duration(mp.augmentedBlockViewRefreshIntervalMillis) * time.Millisecond): @@ -351,6 +387,7 @@ func (mp *PosMempool) startAugmentedViewRefreshRoutine() { // Increment the augmentedLatestBlockViewSequenceNumber. atomic.AddInt64(&mp.augmentedLatestBlockViewSequenceNumber, 1) case <-mp.quit: + mp.exitGroup.Done() return } } @@ -380,6 +417,7 @@ func (mp *PosMempool) Stop() { mp.nonceTracker.Reset() mp.feeEstimator = NewPoSFeeEstimator() close(mp.quit) + mp.exitGroup.Wait() mp.status = PosMempoolStatusNotInitialized } @@ -719,11 +757,11 @@ func (mp *PosMempool) GetIterator() MempoolIterator { return NewPosMempoolIterator(mp.txnRegister.GetFeeTimeIterator()) } -// Refresh updates the validated status of transactions in the mempool. The function connects the Fee-Time ordered +// validateTransactions updates the validated status of transactions in the mempool. The function connects the Fee-Time ordered // mempool transactions to the readOnlyLatestBlockView, creating a cumulative validationView. Transactions that fail to // connect to the validationView are removed from the mempool, as they would have also failed to connect during // block production. This function is thread-safe. -func (mp *PosMempool) Refresh() error { +func (mp *PosMempool) validateTransactions() error { // We hold a read-lock on the mempool to get the transactions and the latest block view. mp.RLock() if !mp.IsRunning() { diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 2f0715de6..c9fa1abbc 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -23,11 +23,12 @@ func TestPosMempoolStart(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - ¶ms, globalParams, nil, 0, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100, + ¶ms, globalParams, nil, 0, dir, false, maxMempoolPosSizeBytes, + mempoolBackupIntervalMillis, 1, nil, 1, 1000, 100, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) - require.NoError(mempool.Refresh()) + require.NoError(mempool.validateTransactions()) mempool.Stop() require.False(mempool.IsRunning()) } @@ -53,7 +54,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 1000, 100, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -65,20 +66,20 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { poolTxns := mempool.GetTransactions() require.Equal(2, len(poolTxns)) - require.NoError(mempool.Refresh()) + require.NoError(mempool.validateTransactions()) require.Equal(2, len(mempool.GetTransactions())) mempool.Stop() require.False(mempool.IsRunning()) newPool := NewPosMempool() require.NoError(newPool.Init(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100)) + mempoolBackupIntervalMillis, 1, nil, 1, 1000, 100, 100)) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() require.Equal(2, len(newPoolTxns)) require.Equal(len(newPool.GetTransactions()), len(newPool.nonceTracker.nonceMap)) - require.NoError(newPool.Refresh()) + require.NoError(newPool.validateTransactions()) require.Equal(2, len(newPool.GetTransactions())) _wrappedPosMempoolRemoveTransaction(t, newPool, txn1.Hash()) _wrappedPosMempoolRemoveTransaction(t, newPool, txn2.Hash()) @@ -108,7 +109,7 @@ func TestPosMempoolPrune(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 1000, 100, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -137,7 +138,7 @@ func TestPosMempoolPrune(t *testing.T) { // Remove one transaction. _wrappedPosMempoolRemoveTransaction(t, mempool, fetchedTxns[0].Hash()) - require.NoError(mempool.Refresh()) + require.NoError(mempool.validateTransactions()) require.Equal(2, len(mempool.GetTransactions())) mempool.Stop() require.False(mempool.IsRunning()) @@ -145,7 +146,7 @@ func TestPosMempoolPrune(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 1000, 100, 100, )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) @@ -174,7 +175,7 @@ func TestPosMempoolPrune(t *testing.T) { index++ } require.Equal(len(newPool.GetTransactions()), len(newPool.nonceTracker.nonceMap)) - require.NoError(newPool.Refresh()) + require.NoError(newPool.validateTransactions()) newTxns := newPool.GetTransactions() require.Equal(3, len(newTxns)) for _, txn := range newTxns { @@ -206,7 +207,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 1000, 100, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -235,7 +236,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init( params, newGlobalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 1000, 100, 100, )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) @@ -268,7 +269,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 1000, 100, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -321,7 +322,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { require.Equal(txn1New, mempool.GetTransactions()[1].GetTxn()) require.Equal(len(mempool.GetTransactions()), len(mempool.nonceTracker.nonceMap)) - require.NoError(mempool.Refresh()) + require.NoError(mempool.validateTransactions()) require.Equal(2, len(mempool.GetTransactions())) mempool.Stop() require.False(mempool.IsRunning()) diff --git a/lib/server.go b/lib/server.go index 78e716ee2..1d0db426c 100644 --- a/lib/server.go +++ b/lib/server.go @@ -407,6 +407,7 @@ func NewServer( _mempoolFeeEstimatorNumMempoolBlocks uint64, _mempoolFeeEstimatorNumPastBlocks uint64, _mempoolMaxValidationViewConnects uint64, + _transactionValidationRefreshIntervalMillis uint64, _augmentedBlockViewRefreshIntervalMillis uint64, _posBlockProductionIntervalMilliseconds uint64, _posTimeoutBaseDurationMilliseconds uint64, @@ -566,6 +567,7 @@ func NewServer( []*MsgDeSoBlock{latestBlock}, _mempoolFeeEstimatorNumPastBlocks, _mempoolMaxValidationViewConnects, + _transactionValidationRefreshIntervalMillis, _augmentedBlockViewRefreshIntervalMillis, ) if err != nil { From e5cb4ac840a20ebf3575ad235fa519ca492004ec Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 21 Mar 2024 01:02:19 -0700 Subject: [PATCH 554/762] PoS Block Producer updates (#1102) * PoS Block Producer updates * Fixes --- lib/block_view.go | 166 +++------------------------------ lib/block_view_test.go | 165 -------------------------------- lib/network.go | 54 +---------- lib/network_test.go | 11 --- lib/pos_block_producer.go | 58 ++---------- lib/pos_block_producer_test.go | 26 ++---- lib/pos_blockchain.go | 18 ---- lib/pos_blockchain_test.go | 37 +------- lib/pos_mempool.go | 20 ---- lib/state_change_syncer.go | 18 +--- lib/txindex.go | 30 ++---- 11 files changed, 42 insertions(+), 561 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index e67997c8e..3d38b18e0 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4049,104 +4049,6 @@ func (bav *UtxoView) ValidateTransactionNonce(txn *MsgDeSoTxn, blockHeight uint6 return nil } -// _connectFailingTransaction is used to process the fee and burn associated with the user submitting a failing transaction. -// A failing transaction is a txn that passes formatting validation, yet fails connecting to the UtxoView. This can happen for a -// number of reasons, such as insufficient DESO balance, wrong public key, etc. With Revolution's Fee-Time block ordering, these -// failing transactions are included in the blocks and their fees are burned. In addition, a major part of the effective -// fees of this transaction is burned with BMF. This makes spam attacks economically disadvantageous. Attacker's funds -// are burned, to the benefit of everyone else on the network. BMF algorithm also computes a utility fee, which is -// distributed to the block producer. -func (bav *UtxoView) _connectFailingTransaction(txn *MsgDeSoTxn, blockHeight uint32, verifySignatures bool) ( - _utxoOps []*UtxoOperation, _burnFee uint64, _utilityFee uint64, _err error) { - - // Failing transactions are only allowed after ProofOfStake2ConsensusCutoverBlockHeight. - if blockHeight < bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight { - return nil, 0, 0, fmt.Errorf("_connectFailingTransaction: Failing transactions " + - "not allowed before ProofOfStake2ConsensusCutoverBlockHeight") - } - - // Sanity check the transaction to make sure it is properly formatted. - if err := CheckTransactionSanity(txn, blockHeight, bav.Params); err != nil { - return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: "+ - "Problem checking txn sanity") - } - - if err := ValidateDeSoTxnSanityBalanceModel(txn, uint64(blockHeight), bav.Params, bav.GlobalParamsEntry); err != nil { - return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: "+ - "Problem checking txn sanity under balance model") - } - - if err := bav.ValidateTransactionNonce(txn, uint64(blockHeight)); err != nil { - return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: "+ - "Problem validating transaction nonce") - } - - // Get the FailingTransactionBMFMultiplierBasisPoints from the global params entry. We then compute the effective fee - // as: effectiveFee = txn.TxnFeeNanos * FailingTransactionBMFMultiplierBasisPoints / 10000 - gp := bav.GetCurrentGlobalParamsEntry() - - failingTransactionRate := uint256.NewInt().SetUint64(gp.FailingTransactionBMFMultiplierBasisPoints) - failingTransactionFee := uint256.NewInt().SetUint64(txn.TxnFeeNanos) - basisPointsAsUint256 := uint256.NewInt().SetUint64(MaxBasisPoints) - - effectiveFeeU256 := uint256.NewInt() - if effectiveFeeU256.MulOverflow(failingTransactionRate, failingTransactionFee) { - return nil, 0, 0, fmt.Errorf("_connectFailingTransaction: Problem computing effective fee") - } - effectiveFeeU256.Div(effectiveFeeU256, basisPointsAsUint256) - - // We should never overflow on the effective fee, since FailingTransactionBMFMultiplierBasisPoints is <= 10000. - // But if for some magical reason we do, we set the effective fee to the max uint64. We don't error, and - // instead let _spendBalance handle the overflow. - if !effectiveFeeU256.IsUint64() { - effectiveFeeU256.SetUint64(math.MaxUint64) - } - effectiveFee := effectiveFeeU256.Uint64() - - // Serialize the transaction to bytes so we can compute its size. - txnBytes, err := txn.ToBytes(false) - if err != nil { - return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: Problem serializing transaction: ") - } - txnSizeBytes := uint64(len(txnBytes)) - - // If the effective fee rate per KB is less than the minimum network fee rate per KB, we set it to the minimum - // network fee rate per KB. We multiply by 1000 and divide by the txn bytes to convert the txn's total effective - // fee to a fee rate per KB. - // - // The effectiveFee * 1000 computation is guaranteed to not overflow because an overflow check is already - // performed in ValidateDeSoTxnSanityBalanceModel above. - effectiveFeeRateNanosPerKB := (effectiveFee * 1000) / txnSizeBytes - if effectiveFeeRateNanosPerKB < gp.MinimumNetworkFeeNanosPerKB { - // The minimum effective fee for the txn is the txn size * the minimum network fee rate per KB. - effectiveFee = (gp.MinimumNetworkFeeNanosPerKB * txnSizeBytes) / 1000 - } - - burnFee, utilityFee := computeBMF(effectiveFee) - - var utxoOps []*UtxoOperation - // When spending balances, we need to check for immature block rewards. Since we don't have - // the block rewards yet for the current block, we subtract one from the current block height - // when spending balances. - feeUtxoOp, err := bav._spendBalance(effectiveFee, txn.PublicKey, blockHeight-1) - if err != nil { - return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: Problem "+ - "spending balance") - } - utxoOps = append(utxoOps, feeUtxoOp) - utxoOps = append(utxoOps, &UtxoOperation{Type: OperationTypeFailingTxn}) - - // If verifySignatures is passed, we check transaction signature. - if verifySignatures { - if err := bav._verifyTxnSignature(txn, blockHeight); err != nil { - return nil, 0, 0, errors.Wrapf(err, "_connectFailingTransaction: Problem "+ - "verifying signature") - } - } - - return utxoOps, burnFee, utilityFee, nil -} - // computeBMF computes the burn fee and the utility fee for a given fee. The acronym stands for Burn Maximizing Fee, which // entails that the burn function is designed to maximize the amount of DESO burned, while providing the minimal viable // utility fee to the block producer. This is so that block producers have no advantage over other network participants @@ -4254,66 +4156,24 @@ func (bav *UtxoView) ConnectBlock( for txIndex, txn := range desoBlock.Txns { txHash := txHashes[txIndex] - // PoS introduced a concept of a failing transaction, or transactions that fail UtxoView's ConnectTransaction. - // In PoS, these failing transactions are included in the block and their fees are burned. - - // To determine if we're dealing with a connecting or failing transaction, we first check if we're on a PoS block - // height. Otherwise, the transaction is expected to connect. - hasPoWBlockHeight := bav.Params.IsPoWBlockHeight(blockHeight) - // Also, the first transaction in the block, the block reward transaction, should always be a connecting transaction. - isBlockRewardTxn := (txIndex == 0) && (txn.TxnMeta.GetTxnType() == TxnTypeBlockReward) - // Finally, if the transaction is not the first in the block, we check the TxnConnectStatusByIndex to see if - // it's marked by the block producer as a connecting transaction. PoS blocks should reflect this in TxnConnectStatusByIndex. - hasConnectingPoSTxnStatus := false - if bav.Params.IsPoSBlockHeight(blockHeight) && (txIndex > 0) && (desoBlock.TxnConnectStatusByIndex != nil) { - // Note that TxnConnectStatusByIndex doesn't include the first block reward transaction. - hasConnectingPoSTxnStatus = desoBlock.TxnConnectStatusByIndex.Get(txIndex - 1) - } - // Now, we can determine if the transaction is expected to connect. - txnConnects := hasPoWBlockHeight || isBlockRewardTxn || hasConnectingPoSTxnStatus - var utilityFee uint64 var utxoOpsForTxn []*UtxoOperation var err error var currentFees uint64 - if txnConnects { - // ConnectTransaction validates all of the transactions in the block and - // is responsible for verifying signatures. - // - // TODO: We currently don't check that the min transaction fee is satisfied when - // connecting blocks. We skip this check because computing the transaction's size - // would slow down block processing significantly. We should figure out a way to - // enforce this check in the future, but for now the only attack vector is one in - // which a miner is trying to spam the network, which should generally never happen. - utxoOpsForTxn, _, _, currentFees, err = bav.ConnectTransaction( - txn, txHash, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) - if err != nil { - return nil, errors.Wrapf(err, "ConnectBlock: error connecting txn #%d", txIndex) - } - _, utilityFee = computeBMF(currentFees) - } else { - // If the transaction is not supposed to connect, we need to verify that it won't connect. - // We need to construct a copy of the view to verify that the transaction won't connect - // without side effects. - var utxoViewCopy *UtxoView - utxoViewCopy, err = bav.CopyUtxoView() - if err != nil { - return nil, errors.Wrapf(err, "ConnectBlock: error copying UtxoView") - } - _, _, _, _, err = utxoViewCopy.ConnectTransaction( - txn, txHash, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) - if err == nil { - return nil, errors.Errorf("ConnectBlock: txn #%d should not connect but err is nil", txIndex) - } - var burnFee uint64 - // Connect the failing transaction to get the fees and utility fee. - utxoOpsForTxn, burnFee, utilityFee, err = bav._connectFailingTransaction( - txn, uint32(blockHeader.Height), verifySignatures) - if err != nil { - return nil, errors.Wrapf(err, "ConnectBlock: error connecting failing txn #%d", txIndex) - } - currentFees = burnFee + utilityFee + // ConnectTransaction validates all of the transactions in the block and + // is responsible for verifying signatures. + // + // TODO: We currently don't check that the min transaction fee is satisfied when + // connecting blocks. We skip this check because computing the transaction's size + // would slow down block processing significantly. We should figure out a way to + // enforce this check in the future, but for now the only attack vector is one in + // which a miner is trying to spam the network, which should generally never happen. + utxoOpsForTxn, _, _, currentFees, err = bav.ConnectTransaction( + txn, txHash, uint32(blockHeader.Height), blockHeader.TstampNanoSecs, verifySignatures, false) + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: error connecting txn #%d", txIndex) } + _, utilityFee = computeBMF(currentFees) // After the block reward patch block height, we only include fees from transactions // where the transactor is not the block reward output public key. This prevents diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 6cfbe72b4..ee7f173bf 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -12,8 +12,6 @@ import ( "github.com/deso-protocol/core/bls" - "math/rand" - "github.com/btcsuite/btcd/btcec" "github.com/decred/dcrd/lru" "github.com/dgraph-io/badger/v3" @@ -2217,166 +2215,3 @@ func TestBlockRewardPatch(t *testing.T) { require.NoError(t, err) } } - -func TestConnectFailingTransaction(t *testing.T) { - setBalanceModelBlockHeights(t) - setPoSBlockHeights(t, 3, 3) - require := require.New(t) - seed := int64(1011) - rand := rand.New(rand.NewSource(seed)) - - globalParams := _testGetDefaultGlobalParams() - feeMin := globalParams.MinimumNetworkFeeNanosPerKB - feeMax := uint64(10000) - - chain, params, db := NewLowDifficultyBlockchain(t) - mempool, miner := NewTestMiner(t, chain, params, true) - // Mine a few blocks to give the senderPkString some money. - _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, mempool) - require.NoError(err) - _, err = miner.MineAndProcessSingleBlock(0 /*threadIndex*/, mempool) - require.NoError(err) - - m0PubBytes, _, _ := Base58CheckDecode(m0Pub) - m0PublicKeyBase58Check := Base58CheckEncode(m0PubBytes, false, params) - - _, _, _ = _doBasicTransferWithViewFlush( - t, chain, db, params, senderPkString, m0PublicKeyBase58Check, - senderPrivString, 200000, 11) - - blockHeight := chain.BlockTip().Height + 1 - - // Set up the test meta. - testMeta := &TestMeta{ - t: t, - chain: chain, - params: params, - db: db, - mempool: mempool, - miner: miner, - savedHeight: blockHeight, - feeRateNanosPerKb: uint64(201), - } - // Allow m0 to update global params. - params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(m0PubBytes)] = true - - // Test failing txn with default global params - { - blockView, err := NewUtxoView(db, params, nil, nil, chain.eventManager) - require.NoError(err) - txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) - utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) - require.NoError(err) - require.Equal(2, len(utxoOps)) - expectedBurnFee, expectedUtilityFee := _getBMFForTxn(txn, globalParams) - require.Equal(expectedBurnFee, burnFee) - require.Equal(expectedUtilityFee, utilityFee) - - err = blockView.FlushToDb(uint64(blockHeight)) - require.NoError(err) - } - - // Test case where the failing txn fee rate is applied as expected. - { - - { - // Set FailingTransactionBMFMultiplierBasisPoints=7000 or 70%. - _updateGlobalParamsEntryWithExtraData( - testMeta, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - map[string][]byte{FailingTransactionBMFMultiplierBasisPointsKey: UintToBuf(7000)}, - ) - } - blockView, err := NewUtxoView(db, params, nil, nil, chain.eventManager) - require.NoError(err) - - newParams := blockView.GetCurrentGlobalParamsEntry() - require.Equal(uint64(7000), newParams.FailingTransactionBMFMultiplierBasisPoints) - - startingBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) - require.NoError(err) - - // Try connecting another failing transaction, and make sure the burn and utility fees are computed accurately. - txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) - - utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) - require.NoError(err) - require.Equal(2, len(utxoOps)) - - // The final balance is m0's starting balance minus the failing txn fee paid. - finalBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) - require.NoError(err) - - // Recompute the failing txn fee, which is expected to use the minimum network fee rate because - // the failing txn fee rate is too low on its own. - expectedFailingTxnFee := txn.TxnFeeNanos * newParams.FailingTransactionBMFMultiplierBasisPoints / MaxBasisPoints - require.Equal(startingBalance, finalBalance+expectedFailingTxnFee) - - expectedBurnFee, expectedUtilityFee := _getBMFForTxn(txn, newParams) - require.Equal(expectedBurnFee, burnFee) - require.Equal(expectedUtilityFee, utilityFee) - - err = blockView.FlushToDb(uint64(blockHeight)) - require.NoError(err) - } - - // Test case where the failing txn fee rate is too low and replaced by the minimum network fee. - { - { - // Set FailingTransactionBMFMultiplierBasisPoints=1 or 0.01%. - _updateGlobalParamsEntryWithExtraData( - testMeta, - testMeta.feeRateNanosPerKb, - m0Pub, - m0Priv, - map[string][]byte{FailingTransactionBMFMultiplierBasisPointsKey: UintToBuf(1)}, - ) - } - - // Set the txn fee to ~1000 nanos, which guarantees that the effective failing txn fee rate is too low. - feeMin := uint64(1000) - feeMax := uint64(1001) - - blockView, err := NewUtxoView(db, params, nil, nil, chain.eventManager) - require.NoError(err) - - newParams := blockView.GetCurrentGlobalParamsEntry() - require.Equal(uint64(1), newParams.FailingTransactionBMFMultiplierBasisPoints) - - startingBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) - require.NoError(err) - - txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 0) - utxoOps, burnFee, utilityFee, err := blockView._connectFailingTransaction(txn, blockHeight, true) - require.NoError(err) - require.Equal(2, len(utxoOps)) - - // The final balance is m0's starting balance minus the failing txn fee paid. - finalBalance, err := blockView.GetDeSoBalanceNanosForPublicKey(m0PubBytes) - require.NoError(err) - - txnBytes, err := txn.ToBytes(false) - require.NoError(err) - - // Recompute the failing txn fee, which is expected to use the minimum network fee rate because - // the failing txn fee rate is too low on its own. - expectedFailingTxnFee := uint64(len(txnBytes)) * newParams.MinimumNetworkFeeNanosPerKB / 1000 - require.Equal(startingBalance, finalBalance+expectedFailingTxnFee) - - expectedBurnFee, expectedUtilityFee := computeBMF(expectedFailingTxnFee) - require.Equal(expectedBurnFee, burnFee) - require.Equal(expectedUtilityFee, utilityFee) - - err = blockView.FlushToDb(uint64(blockHeight)) - require.NoError(err) - } -} - -func _getBMFForTxn(txn *MsgDeSoTxn, gp *GlobalParamsEntry) (_burnFee uint64, _utilityFee uint64) { - failingTransactionRate := NewFloat().SetUint64(gp.FailingTransactionBMFMultiplierBasisPoints) - failingTransactionRate.Quo(failingTransactionRate, NewFloat().SetUint64(10000)) - failingTransactionFee, _ := NewFloat().Mul(failingTransactionRate, NewFloat().SetUint64(txn.TxnFeeNanos)).Uint64() - return computeBMF(failingTransactionFee) -} diff --git a/lib/network.go b/lib/network.go index d8c5af284..a6877a2ad 100644 --- a/lib/network.go +++ b/lib/network.go @@ -17,7 +17,6 @@ import ( "strings" "time" - "github.com/deso-protocol/core/collections/bitset" "github.com/golang/glog" "github.com/decred/dcrd/dcrec/secp256k1/v4" @@ -2081,14 +2080,6 @@ type MsgDeSoHeader struct { // event that ASICs become powerful enough to have birthday problems in the future. ExtraNonce uint64 - // TransactionsConnectStatus is only used for Proof of Stake blocks, starting with - // MsgDeSoHeader version 2. For all earlier versions, this field will default to nil. - // - // The hash of the TxnConnectStatusByIndex field in MsgDeSoBlock. It is stored to ensure - // that the TxnConnectStatusByIndex is part of the header hash, which is signed by the - // proposer. The full index is stored in the block to offload space complexity. - TxnConnectStatusByIndexHash *BlockHash - // ProposerVotingPublicKey is only used for Proof of Stake blocks, starting with // MsgDeSoHeader version 2. For all earlier versions, this field will default to nil. // @@ -2305,12 +2296,6 @@ func (msg *MsgDeSoHeader) EncodeHeaderVersion2(preSignature bool) ([]byte, error // The Nonce and ExtraNonce fields are unused in version 2. We skip them // during both encoding and decoding. - // TxnConnectStatusByIndexHash - if msg.TxnConnectStatusByIndexHash == nil { - return nil, fmt.Errorf("EncodeHeaderVersion2: TxnConnectStatusByIndexHash must be non-nil") - } - retBytes = append(retBytes, msg.TxnConnectStatusByIndexHash[:]...) - // ProposerVotingPublicKey if msg.ProposerVotingPublicKey == nil { return nil, fmt.Errorf("EncodeHeaderVersion2: ProposerVotingPublicKey must be non-nil") @@ -2520,13 +2505,6 @@ func DecodeHeaderVersion2(rr io.Reader) (*MsgDeSoHeader, error) { retHeader.Nonce = 0 retHeader.ExtraNonce = 0 - // TxnConnectStatusByIndexHash - retHeader.TxnConnectStatusByIndexHash = &BlockHash{} - _, err = io.ReadFull(rr, retHeader.TxnConnectStatusByIndexHash[:]) - if err != nil { - return nil, errors.Wrapf(err, "MsgDeSoHeader.FromBytes: Problem decoding TxnConnectStatusByIndexHash") - } - // ProposerVotingPublicKey retHeader.ProposerVotingPublicKey, err = DecodeBLSPublicKey(rr) if err != nil { @@ -2740,11 +2718,6 @@ type MsgDeSoBlock struct { // entity, which can be useful for nodes that want to restrict who they accept blocks // from. BlockProducerInfo *BlockProducerInfo - - // This bitset field stores information whether each transaction in the block passes - // or fails to connect. The bit at i-th position is set to 1 if the i-th transaction - // in the block passes connect, and 0 otherwise. - TxnConnectStatusByIndex *bitset.Bitset } func (msg *MsgDeSoBlock) EncodeBlockCommmon(preSignature bool) ([]byte, error) { @@ -2797,29 +2770,12 @@ func (msg *MsgDeSoBlock) EncodeBlockVersion1(preSignature bool) ([]byte, error) return data, nil } -func (msg *MsgDeSoBlock) EncodeBlockVersion2(preSignature bool) ([]byte, error) { - data, err := msg.EncodeBlockCommmon(preSignature) - if err != nil { - return nil, err - } - - // TxnConnectStatusByIndex - if msg.TxnConnectStatusByIndex == nil { - return nil, fmt.Errorf("MsgDeSoBlock.EncodeBlockVersion2: TxnConnectStatusByIndex should not be nil") - } - data = append(data, EncodeBitset(msg.TxnConnectStatusByIndex)...) - - return data, nil -} - func (msg *MsgDeSoBlock) ToBytes(preSignature bool) ([]byte, error) { switch msg.Header.Version { case HeaderVersion0: return msg.EncodeBlockVersion0(preSignature) - case HeaderVersion1: + case HeaderVersion1, HeaderVersion2: return msg.EncodeBlockVersion1(preSignature) - case HeaderVersion2: - return msg.EncodeBlockVersion2(preSignature) default: return nil, fmt.Errorf("MsgDeSoBlock.ToBytes: Error encoding version: %v", msg.Header.Version) } @@ -2912,14 +2868,6 @@ func (msg *MsgDeSoBlock) FromBytes(data []byte) error { } } - // Version 2 blocks have a TxnStatusConnectedIndex attached to them. - if ret.Header.Version == HeaderVersion2 { - ret.TxnConnectStatusByIndex, err = DecodeBitset(rr) - if err != nil { - return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: Error decoding TxnConnectStatusByIndex") - } - } - *msg = *ret return nil } diff --git a/lib/network_test.go b/lib/network_test.go index d6c3e753f..fef075d20 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -142,12 +142,6 @@ func createTestBlockHeaderVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSo 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, } - testTxnConnectStatusByIndex := BlockHash{ - 0x00, 0x03, 0x04, 0x21, 0x06, 0x07, 0x08, 0x09, 0x10, 0x19, - 0x12, 0x13, 0x14, 0x15, 0x44, 0x17, 0x18, 0x19, 0x20, 0x21, - 0x02, 0x23, 0x24, 0x25, 0x26, 0x27, 0x33, 0x29, 0x30, 0x31, - 0x32, 0x33, - } testBitset := bitset.NewBitset().Set(0, true).Set(3, true) testBLSPublicKey, testBLSSignature := _generateValidatorVotingPublicKeyAndSignature(t) @@ -187,7 +181,6 @@ func createTestBlockHeaderVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSo // Nonce and ExtraNonce are unused and set to 0 starting in version 2. Nonce: uint64(0), ExtraNonce: uint64(0), - TxnConnectStatusByIndexHash: &testTxnConnectStatusByIndex, ProposerVotingPublicKey: testBLSPublicKey, ProposerRandomSeedSignature: testBLSSignature, ProposedInView: uint64(1432101234), @@ -453,10 +446,6 @@ func createTestBlockVersion2(t *testing.T, includeTimeoutQC bool) *MsgDeSoBlock // Set V2 header. block.Header = createTestBlockHeaderVersion2(t, includeTimeoutQC) - // Set the block's TxnConnectStatusByIndex and update its hash in the header. - block.TxnConnectStatusByIndex = bitset.NewBitset().Set(0, true).Set(3, true) - block.Header.TxnConnectStatusByIndexHash = HashBitset(block.TxnConnectStatusByIndex) - return &block } diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index d216a1153..372dc94a1 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -7,7 +7,6 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/collections/bitset" "github.com/pkg/errors" ) @@ -109,8 +108,6 @@ func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newB block.Header.ProposerVotingPublicKey = pbp.proposerVotingPublicKey block.Header.ProposerRandomSeedSignature = proposerRandomSeedSignature - // Hash the TxnConnectStatusByIndex - block.Header.TxnConnectStatusByIndexHash = HashBitset(block.TxnConnectStatusByIndex) return block, nil } @@ -139,7 +136,7 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( } // Get block transactions from the mempool. - feeTimeTxns, txnConnectStatusByIndex, maxUtilityFee, err := pbp.getBlockTransactions( + feeTimeTxns, maxUtilityFee, err := pbp.getBlockTransactions( pbp.proposerPublicKey, latestBlockView, newBlockHeight, @@ -155,7 +152,6 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( block.Txns = append([]*MsgDeSoTxn{blockRewardTxn}, feeTimeTxns...) // Set the RevolutionMetadata - block.TxnConnectStatusByIndex = txnConnectStatusByIndex return block, nil } @@ -168,7 +164,6 @@ func (pbp *PosBlockProducer) getBlockTransactions( maxBlockSizeBytes uint64, ) ( _txns []*MsgDeSoTxn, - _txnConnectStatusByIndex *bitset.Bitset, _maxUtilityFee uint64, _err error, ) { @@ -177,17 +172,16 @@ func (pbp *PosBlockProducer) getBlockTransactions( // Try to connect transactions one by one. blocksTxns := []*MsgDeSoTxn{} - txnConnectStatusByIndex := bitset.NewBitset() maxUtilityFee := uint64(0) currentBlockSize := uint64(0) blockUtxoView, err := latestBlockView.CopyUtxoView() if err != nil { - return nil, nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") + return nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") } for _, txn := range feeTimeTxns { txnBytes, err := txn.ToBytes(false) if err != nil { - return nil, nil, 0, errors.Wrapf(err, "Error getting transaction size: ") + return nil, 0, errors.Wrapf(err, "Error getting transaction size: ") } // Skip over transactions that are too big. @@ -197,68 +191,34 @@ func (pbp *PosBlockProducer) getBlockTransactions( blockUtxoViewCopy, err := blockUtxoView.CopyUtxoView() if err != nil { - return nil, nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") + return nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") } _, _, _, fees, err := blockUtxoViewCopy._connectTransaction( txn.GetTxn(), txn.Hash(), uint32(newBlockHeight), newBlockTimestampNanoSecs, true, false) // Check if the transaction connected. - if err == nil { - blockUtxoView = blockUtxoViewCopy - txnConnectStatusByIndex.Set(len(blocksTxns), true) - blocksTxns = append(blocksTxns, txn.GetTxn()) - currentBlockSize += uint64(len(txnBytes)) - - // If the transactor is the block producer, then they won't receive the utility - // fee. - if blockProducerPublicKey.Equal(*NewPublicKey(txn.PublicKey)) { - continue - } - - // Compute BMF for the transaction. - _, utilityFee := computeBMF(fees) - maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) - if err != nil { - return nil, nil, 0, errors.Wrapf(err, "Error computing max utility fee: ") - } - continue - } - - // If the transaction didn't connect, we will try to add it as a failing transaction. - blockUtxoViewCopy, err = blockUtxoView.CopyUtxoView() if err != nil { - return nil, nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") - } - - _, _, utilityFee, err := blockUtxoViewCopy._connectFailingTransaction(txn.GetTxn(), uint32(newBlockHeight), true) - if err != nil { - // If the transaction still doesn't connect, this means we encountered an invalid transaction. We will skip - // it and let some other process figure out what to do with it. Removing invalid transactions is a fast - // process, so we don't need to worry about it here. continue } - - // If we get to this point, it means the transaction didn't connect but it was a valid transaction. We will - // add it to the block as a failing transaction. blockUtxoView = blockUtxoViewCopy - txnConnectStatusByIndex.Set(len(blocksTxns), false) blocksTxns = append(blocksTxns, txn.GetTxn()) currentBlockSize += uint64(len(txnBytes)) - // If the transactor is the block producer, then they won't receive the utility - // fee. + // If the transactor is the block producer, then they won't receive the utility fee. if blockProducerPublicKey.Equal(*NewPublicKey(txn.PublicKey)) { continue } + // Compute BMF for the transaction. + _, utilityFee := computeBMF(fees) maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) if err != nil { - return nil, nil, 0, errors.Wrapf(err, "Error computing max utility fee: ") + return nil, 0, errors.Wrapf(err, "Error computing max utility fee: ") } } - return blocksTxns, txnConnectStatusByIndex, maxUtilityFee, nil + return blocksTxns, maxUtilityFee, nil } func _maxInt64(a, b int64) int64 { diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 175d952f8..2fb62beea 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -9,7 +9,6 @@ import ( "time" "github.com/deso-protocol/core/bls" - "github.com/deso-protocol/core/collections/bitset" "github.com/stretchr/testify/require" ) @@ -76,7 +75,6 @@ func TestCreateBlockTemplate(t *testing.T) { require.Equal(blockTemplate.Header.ProposedInView, uint64(10)) require.Equal(blockTemplate.Header.ProposerVotingPublicKey, pub) require.True(blockTemplate.Header.ProposerRandomSeedSignature.Eq(seedSignature)) - require.Equal(blockTemplate.Header.TxnConnectStatusByIndexHash, HashBitset(blockTemplate.TxnConnectStatusByIndex)) } func TestCreateBlockWithoutHeader(t *testing.T) { @@ -122,14 +120,13 @@ func TestCreateBlockWithoutHeader(t *testing.T) { // Test cases where the block producer is the transactor for the mempool txns { pbp := NewPosBlockProducer(mempool, params, NewPublicKey(m0PubBytes), blsPubKey, time.Now().UnixNano()) - txns, txnConnectStatus, _, err := pbp.getBlockTransactions( + txns, _, err := pbp.getBlockTransactions( NewPublicKey(m0PubBytes), latestBlockView, 3, 0, 50000) require.NoError(err) blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0) require.NoError(err) require.Equal(txns, blockTemplate.Txns[1:]) - require.Equal(txnConnectStatus, blockTemplate.TxnConnectStatusByIndex) require.Equal(uint64(0), blockTemplate.Txns[0].TxOutputs[0].AmountNanos) require.Equal(NewMessage(MsgTypeHeader).(*MsgDeSoHeader), blockTemplate.Header) require.Nil(blockTemplate.BlockProducerInfo) @@ -138,14 +135,13 @@ func TestCreateBlockWithoutHeader(t *testing.T) { // Test cases where the block producer is not the transactor for the mempool txns { pbp := NewPosBlockProducer(mempool, params, NewPublicKey(m1PubBytes), blsPubKey, time.Now().UnixNano()) - txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions( + txns, maxUtilityFee, err := pbp.getBlockTransactions( NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 50000) require.NoError(err) blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0) require.NoError(err) require.Equal(txns, blockTemplate.Txns[1:]) - require.Equal(txnConnectStatus, blockTemplate.TxnConnectStatusByIndex) require.Equal(maxUtilityFee, blockTemplate.Txns[0].TxOutputs[0].AmountNanos) require.Equal(NewMessage(MsgTypeHeader).(*MsgDeSoHeader), blockTemplate.Header) require.Nil(blockTemplate.BlockProducerInfo) @@ -246,11 +242,10 @@ func TestGetBlockTransactions(t *testing.T) { latestBlockViewCopy, err := latestBlockView.CopyUtxoView() require.NoError(err) - txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions(NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 1000) + txns, maxUtilityFee, err := pbp.getBlockTransactions(NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 1000) require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(true, len(passingTxns) > len(txns)) - require.Equal(true, len(passingTxns) > txnConnectStatus.Size()) totalUtilityFee = 0 for _, txn := range txns { _, utilityFee := computeBMF(txn.TxnFeeNanos) @@ -282,7 +277,7 @@ func TestGetBlockTransactions(t *testing.T) { } func _testProduceBlockNoSizeLimit(t *testing.T, mp *PosMempool, pbp *PosBlockProducer, latestBlockView *UtxoView, blockHeight uint64, - numPassing int, numFailing int, numInvalid int) (_txns []*MsgDeSoTxn, _txnConnectStatusByIndex *bitset.Bitset, _maxUtilityFee uint64) { + numPassing int, numFailing int, numInvalid int) (_txns []*MsgDeSoTxn, _maxUtilityFee uint64) { require := require.New(t) totalAcceptedTxns := numPassing + numFailing @@ -291,17 +286,10 @@ func _testProduceBlockNoSizeLimit(t *testing.T, mp *PosMempool, pbp *PosBlockPro latestBlockViewCopy, err := latestBlockView.CopyUtxoView() require.NoError(err) - txns, txnConnectStatus, maxUtilityFee, err := pbp.getBlockTransactions(pbp.proposerPublicKey, latestBlockView, blockHeight, 0, math.MaxUint64) + txns, maxUtilityFee, err := pbp.getBlockTransactions(pbp.proposerPublicKey, latestBlockView, blockHeight, 0, math.MaxUint64) require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(totalAcceptedTxns, len(txns)) - require.True(totalAcceptedTxns >= txnConnectStatus.Size()) - numConnected := 0 - for ii := range txns { - if txnConnectStatus.Get(ii) { - numConnected++ - } - } - require.Equal(numPassing, numConnected) - return txns, txnConnectStatus, maxUtilityFee + + return txns, maxUtilityFee } diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 56bb473fc..72044588b 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -912,16 +912,6 @@ func (bc *Blockchain) isProperlyFormedBlockPoS(block *MsgDeSoBlock) error { return RuleErrorBlockWithNoTxns } - // Make sure TxnConnectStatusByIndex is non-nil - if block.TxnConnectStatusByIndex == nil { - return RuleErrorNilTxnConnectStatusByIndex - } - - // Make sure the TxnConnectStatusByIndex matches the TxnConnectStatusByIndexHash - if !(HashBitset(block.TxnConnectStatusByIndex).IsEqual(block.Header.TxnConnectStatusByIndexHash)) { - return RuleErrorTxnConnectStatusByIndexHashMismatch - } - // Make sure that the first txn in each block is a block reward txn. if block.Txns[0].TxnMeta.GetTxnType() != TxnTypeBlockReward { return RuleErrorBlockDoesNotStartWithRewardTxn @@ -962,11 +952,6 @@ func (bc *Blockchain) isProperlyFormedBlockHeaderPoS(header *MsgDeSoHeader) erro return RuleErrorInvalidPoSBlockHeaderVersion } - // Must have TxnConnectStatusByIndexHash - if header.TxnConnectStatusByIndexHash == nil { - return RuleErrorNilTxnConnectStatusByIndexHash - } - // Require header to have either vote or timeout QC isTimeoutQCEmpty := header.ValidatorsTimeoutAggregateQC.isEmpty() isVoteQCEmpty := header.ValidatorsVoteQC.isEmpty() @@ -1962,9 +1947,6 @@ const ( RuleErrorPoSBlockTstampNanoSecsTooOld RuleError = "RuleErrorPoSBlockTstampNanoSecsTooOld" RuleErrorPoSBlockTstampNanoSecsInFuture RuleError = "RuleErrorPoSBlockTstampNanoSecsInFuture" RuleErrorInvalidPoSBlockHeaderVersion RuleError = "RuleErrorInvalidPoSBlockHeaderVersion" - RuleErrorNilTxnConnectStatusByIndex RuleError = "RuleErrorNilTxnConnectStatusByIndex" - RuleErrorNilTxnConnectStatusByIndexHash RuleError = "RuleErrorNilTxnConnectStatusByIndexHash" - RuleErrorTxnConnectStatusByIndexHashMismatch RuleError = "RuleErrorTxnConnectStatusByIndexHashMismatch" RuleErrorNoTimeoutOrVoteQC RuleError = "RuleErrorNoTimeoutOrVoteQC" RuleErrorBothTimeoutAndVoteQC RuleError = "RuleErrorBothTimeoutAndVoteQC" RuleErrorBlockWithNoTxns RuleError = "RuleErrorBlockWithNoTxns" diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 36af5a5df..b7fbc25dd 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -77,10 +77,8 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * ProposerRandomSeedSignature: signature, ProposerVotingPublicKey: randomBLSPrivateKey.PublicKey(), TransactionMerkleRoot: merkleRoot, - TxnConnectStatusByIndexHash: HashBitset(bitset.NewBitset().Set(0, true)), }, - Txns: txns, - TxnConnectStatusByIndex: bitset.NewBitset().Set(0, true), + Txns: txns, } // Validate the block with a valid timeout QC and header. @@ -183,23 +181,6 @@ func TestIsProperlyFormedBlockPoSAndIsBlockTimestampValidRelativeToParentPoS(t * }, } - // TxnConnectStatusByIndex tests - // TxnConnectStatusByIndex must be non-nil - block.TxnConnectStatusByIndex = nil - err = bc.isProperlyFormedBlockPoS(block) - require.Equal(t, err, RuleErrorNilTxnConnectStatusByIndex) - // TxnConnectStatusByIndexHash must be non-nil - block.TxnConnectStatusByIndex = bitset.NewBitset().Set(0, true) - block.Header.TxnConnectStatusByIndexHash = nil - err = bc.isProperlyFormedBlockPoS(block) - require.Equal(t, err, RuleErrorNilTxnConnectStatusByIndexHash) - // The hashed version of TxnConnectStatusByIndex must match the actual TxnConnectStatusByIndexHash - block.Header.TxnConnectStatusByIndexHash = HashBitset(bitset.NewBitset().Set(0, false)) - err = bc.isProperlyFormedBlockPoS(block) - require.Equal(t, err, RuleErrorTxnConnectStatusByIndexHashMismatch) - // Reset TxnConnectStatusByIndexHash - block.Header.TxnConnectStatusByIndexHash = HashBitset(block.TxnConnectStatusByIndex) - // Block must have valid proposer voting public key block.Header.ProposerVotingPublicKey = nil err = bc.isProperlyFormedBlockPoS(block) @@ -386,14 +367,12 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { SignersList: bitset.NewBitset(), }, }, - TxnConnectStatusByIndexHash: NewBlockHash(bitset.NewBitset().ToBytes()), }, Txns: []*MsgDeSoTxn{ { TxnMeta: &BlockRewardMetadataa{}, }, }, - TxnConnectStatusByIndex: bitset.NewBitset(), } blockNode, err := bc.storeBlockInBlockIndex(block) require.NoError(t, err) @@ -1891,8 +1870,6 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { var malformedOrphanBlock *MsgDeSoBlock malformedOrphanBlock = _generateRealBlock(testMeta, 18, 18, 9273, testMeta.chain.BlockTip().Hash, false) malformedOrphanBlock.Header.PrevBlockHash = randomHash - // Modify anything to make the block malformed, but make sure a hash can still be generated. - malformedOrphanBlock.Header.TxnConnectStatusByIndexHash = randomHash // Resign the block. updateProposerVotePartialSignatureForBlock(testMeta, malformedOrphanBlock) malformedOrphanBlockHash, err := malformedOrphanBlock.Hash() @@ -1909,7 +1886,6 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.True(t, malformedOrphanBlockInIndex.IsStored()) // If a block can't be hashed, we expect to get an error. - malformedOrphanBlock.Header.TxnConnectStatusByIndexHash = nil success, isOrphan, missingBlockHashes, err = testMeta.chain.ProcessBlockPoS(malformedOrphanBlock, 18, true) require.False(t, success) require.False(t, isOrphan) @@ -1920,7 +1896,6 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { { var blockWithFailingTxn *MsgDeSoBlock blockWithFailingTxn = _generateRealBlockWithFailingTxn(testMeta, 18, 18, 123722, orphanBlockHash, false, 1, 0) - require.Equal(t, blockWithFailingTxn.TxnConnectStatusByIndex.Get(len(blockWithFailingTxn.Txns)-1), false) success, _, _, err := testMeta.chain.ProcessBlockPoS(blockWithFailingTxn, 18, true) require.True(t, success) blockWithFailingTxnHash, err = blockWithFailingTxn.Hash() @@ -2595,9 +2570,8 @@ func _getVoteQC(testMeta *TestMeta, blockHeight uint64, qcBlockHash *BlockHash, return voteQC } -// _getFullRealBlockTemplate is a helper function that generates a block template with a valid TxnConnectStatusByIndexHash -// and a valid TxnConnectStatusByIndex, a valid vote or timeout QC, does all the required signing by validators, -// and generates the proper ProposerVotePartialSignature. +// _getFullRealBlockTemplate is a helper function that generates a block template with a valid vote or timeout QC, +// does all the required signing by validators, and generates the proper ProposerVotePartialSignature. func _getFullRealBlockTemplate( testMeta *TestMeta, blockHeight uint64, @@ -2610,7 +2584,6 @@ func _getFullRealBlockTemplate( testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature) require.NoError(testMeta.t, err) require.NotNil(testMeta.t, blockTemplate) - blockTemplate.Header.TxnConnectStatusByIndexHash = HashBitset(blockTemplate.TxnConnectStatusByIndex) // Figure out who the leader is supposed to be. leaderPublicKey, leaderPublicKeyBytes := getLeaderForBlockHeightAndView(testMeta, blockHeight, view) @@ -2686,13 +2659,11 @@ func _getFullRealBlockTemplate( return blockTemplate } -// _getFullDummyBlockTemplate is a helper function that generates a block template with a dummy TxnConnectStatusByIndexHash -// and a dummy ValidatorsVoteQC. +// _getFullDummyBlockTemplate is a helper function that generates a block template with a dummy ValidatorsVoteQC. func _getFullDummyBlockTemplate(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint64, view uint64, seedSignature *bls.Signature) BlockTemplate { blockTemplate, err := testMeta.posBlockProducer.createBlockTemplate(latestBlockView, blockHeight, view, seedSignature) require.NoError(testMeta.t, err) require.NotNil(testMeta.t, blockTemplate) - blockTemplate.Header.TxnConnectStatusByIndexHash = HashBitset(blockTemplate.TxnConnectStatusByIndex) // Add a dummy vote QC proposerVotingPublicKey := _generateRandomBLSPrivateKey(testMeta.t) dummySig, err := proposerVotingPublicKey.Sign(RandomBytes(32)) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 7ec370a7e..80117a433 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -358,26 +358,6 @@ func (mp *PosMempool) startAugmentedViewRefreshRoutine() { // and proceed to the next transaction. if err == nil { newView = copiedView - continue - } - // If the transaction failed to connect, we connect the transaction as a failed txn - // directly on newView. - if mp.params.IsPoSBlockHeight(mp.latestBlockHeight + 1) { - // Copy the view again in case we hit an error. - copiedView, err = newView.CopyUtxoView() - if err != nil { - glog.Errorf("PosMempool.startAugmentedViewRefreshRoutine: Problem copying utxo view inner: %v", err) - continue - } - // Try to connect as failing txn directly to newView - _, _, _, err = copiedView._connectFailingTransaction( - txn.GetTxn(), uint32(mp.latestBlockHeight+1), false) - if err != nil { - glog.Errorf( - "PosMempool.startAugmentedViewRefreshRoutine: Problem connecting transaction: %v", err) - continue - } - newView = copiedView } } // Grab the augmentedLatestBlockViewMutex write lock and update the augmentedLatestBlockView. diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 02ed421f7..84a78c078 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -751,22 +751,8 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser if err == nil { mempoolTxUtxoView = copiedView } else { - // If the transaction fails to connect, we need to reset the view to its original state - // and connect it as a failing transaction. - copiedView, err = mempoolTxUtxoView.CopyUtxoView() - if err != nil { - return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer CopyUtxoView: ") - } - utxoOpsForTxn, _, _, err = copiedView._connectFailingTransaction( - mempoolTx.Tx, uint32(blockHeight+1), false) - // If we fail to connect the transaction as a failing transaction, we just continue and the - // mempoolTxUtxoView is unmodified. - if err != nil { - glog.V(2).Infof("StateChangeSyncer.SyncMempoolToStateSyncer "+ - "ConnectFailingTransaction for mempool tx: %v", err) - continue - } - mempoolTxUtxoView = copiedView + glog.V(2).Infof("StateChangeSyncer.SyncMempoolToStateSyncer "+ + "failed connecting mempool tx with (hash= %v): (err=%v)", mempoolTx.Hash, err) } } else { // For PoW block heights, we can just connect the transaction to the mempool view. diff --git a/lib/txindex.go b/lib/txindex.go index 7dad5db4a..f1f805235 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -429,21 +429,9 @@ func (txi *TXIndex) Update() error { // - Compute its mapping values, which may include custom metadata fields // - add all its mappings to the db. for txnIndexInBlock, txn := range blockMsg.Txns { - hasPoWBlockHeight := txi.Params.IsPoWBlockHeight(blockMsg.Header.Height) - // Also, the first transaction in the block, the block reward transaction, should always be a connecting transaction. - isBlockRewardTxn := (txnIndexInBlock == 0) && (txn.TxnMeta.GetTxnType() == TxnTypeBlockReward) - // Finally, if the transaction is not the first in the block, we check the TxnConnectStatusByIndex to see if - // it's marked by the block producer as a connecting transaction. PoS blocks should reflect this in TxnConnectStatusByIndex. - hasConnectingPoSTxnStatus := false - if txi.Params.IsPoSBlockHeight(blockMsg.Header.Height) && (txnIndexInBlock > 0) && (blockMsg.TxnConnectStatusByIndex != nil) { - // Note that TxnConnectStatusByIndex doesn't include the first block reward transaction. - hasConnectingPoSTxnStatus = blockMsg.TxnConnectStatusByIndex.Get(txnIndexInBlock - 1) - } - connects := hasPoWBlockHeight || isBlockRewardTxn || hasConnectingPoSTxnStatus - txnMeta, err := ConnectTxnAndComputeTransactionMetadata( txn, utxoView, blockToAttach.Hash, blockToAttach.Height, - blockToAttach.Header.TstampNanoSecs, uint64(txnIndexInBlock), connects) + blockToAttach.Header.TstampNanoSecs, uint64(txnIndexInBlock)) if err != nil { return fmt.Errorf("Update: Problem connecting txn %v to txindex: %v", txn, err) @@ -479,23 +467,17 @@ func (txi *TXIndex) Update() error { func ConnectTxnAndComputeTransactionMetadata( txn *MsgDeSoTxn, utxoView *UtxoView, blockHash *BlockHash, - blockHeight uint32, blockTimestampNanoSecs int64, txnIndexInBlock uint64, connects bool) (*TransactionMetadata, error) { + blockHeight uint32, blockTimestampNanoSecs int64, txnIndexInBlock uint64) (*TransactionMetadata, error) { totalNanosPurchasedBefore := utxoView.NanosPurchased usdCentsPerBitcoinBefore := utxoView.GetCurrentUSDCentsPerBitcoin() var utxoOps []*UtxoOperation - var totalInput, totalOutput, fees, burnFee, utilityFee uint64 + var totalInput, totalOutput, fees uint64 var err error - if connects { - utxoOps, totalInput, totalOutput, fees, err = utxoView._connectTransaction( - txn, txn.Hash(), blockHeight, blockTimestampNanoSecs, false, false, - ) - } else { - utxoOps, burnFee, utilityFee, err = utxoView._connectFailingTransaction( - txn, blockHeight, false) - fees = burnFee + utilityFee - } + utxoOps, totalInput, totalOutput, fees, err = utxoView._connectTransaction( + txn, txn.Hash(), blockHeight, blockTimestampNanoSecs, false, false, + ) if err != nil { return nil, fmt.Errorf( From 7eafe8c69339ad7d692c8f3083d88604aafd8926 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 21 Mar 2024 01:09:02 -0700 Subject: [PATCH 555/762] PoS Mempool Exchange Updates (#1103) * PoS Mempool Exchange Updates * Nits * Comment update --- lib/peer.go | 9 ++++----- lib/pos_mempool.go | 8 ++++++++ lib/server.go | 4 ++++ 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/lib/peer.go b/lib/peer.go index 232fcb3f0..b68a9481d 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -177,10 +177,9 @@ func (pp *Peer) HandleGetTransactionsMsg(getTxnMsg *MsgDeSoGetTransactions) { // whichever one is used for the consensus protocol at the current block height. for _, txHash := range getTxnMsg.HashList { mempoolTx := pp.srv.GetMempool().GetTransaction(txHash) - // If the transaction isn't in the pool, just continue without adding - // it. It is generally OK to respond with only a subset of the transactions - // that were requested. - if mempoolTx == nil { + // If the transaction isn't in the pool, or hasn't been validated, just continue without adding + // it. It is generally OK to respond with only a subset of the transactions that were requested. + if mempoolTx == nil || !mempoolTx.IsValidated() { continue } @@ -312,7 +311,7 @@ func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) { // For transactions, check that the transaction isn't in the // mempool and that it isn't currently being requested. _, requestIsInFlight := pp.srv.requestedTransactionsMap[currentHash] - if requestIsInFlight || pp.srv.mempool.IsTransactionInPool(¤tHash) { + if requestIsInFlight || pp.srv.GetMempool().IsTransactionInPool(¤tHash) { continue } diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 80117a433..ced346837 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -913,6 +913,10 @@ func (mp *PosMempool) GetOrderedTransactions() []*MempoolTx { func (mp *PosMempool) IsTransactionInPool(txHash *BlockHash) bool { mp.RLock() defer mp.RUnlock() + if !mp.IsRunning() || txHash == nil { + return false + } + _, exists := mp.txnRegister.txnMembership[*txHash] return exists } @@ -920,6 +924,10 @@ func (mp *PosMempool) IsTransactionInPool(txHash *BlockHash) bool { func (mp *PosMempool) GetMempoolTx(txHash *BlockHash) *MempoolTx { mp.RLock() defer mp.RUnlock() + if !mp.IsRunning() || txHash == nil { + return nil + } + return mp.txnRegister.txnMembership[*txHash] } diff --git a/lib/server.go b/lib/server.go index 1d0db426c..7de67303c 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1871,6 +1871,10 @@ func (srv *Server) _relayTransactions() { // for which the minimum fee is below what the Peer will allow. invMsg := &MsgDeSoInv{} for _, newTxn := range txnList { + if !newTxn.IsValidated() { + continue + } + invVect := &InvVect{ Type: InvTypeTx, Hash: *newTxn.Hash(), From 1ad4e35f60cdd3a411bcbb05b5083b4695a3be37 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 21 Mar 2024 02:27:29 -0700 Subject: [PATCH 556/762] nits --- bls/signature_no_relic.go | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 bls/signature_no_relic.go diff --git a/bls/signature_no_relic.go b/bls/signature_no_relic.go deleted file mode 100644 index e69de29bb..000000000 From 09d9dbedecfbb1625a3424a8a5ce7858e0cde751 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 21 Mar 2024 06:58:33 -0700 Subject: [PATCH 557/762] PoS Hypersync Updates (#1085) * Add BLS Keystore * PoS Validator Constants and Network Changes (#876) * PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment * Constants and network changes * Test MsgDeSoVerack encoding * Fix snapshot hack * Revert "Remove constants/network" This reverts commit b467ddbcd034c2e8d2728a7e77f4b714b686a760. * Fix compilation errors * Address review comments * PoS Validator Connect/Disconnect Flow (#768) * PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment * Another review round * gofmt * Comment change * PoS RemoteNode and RemoteNodeId (#857) * RemoteNode and RemoteNodeId * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test * Review round * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test * Final pass * PoS Remote Node Indexer and Manager (#879) * Add RemoteNodeIndexer * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation Simplify RemoteNodeManager More RemoteNodeManager updates Nits * PoS HandshakeController (#860) * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation * Add HandshakeController PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation Simplify RemoteNodeManager * Merge HandshakeStage with RemoteNodeStatus; small HandshakeController nits * Nit * HandshakeController updates * Nits * Quick nit * Nits * Comment nit * Add HandshakeController (#861) PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment Integration testing updates PoS Block Producer: TxnConnectStatusByIndex (#672) * TransactionConnectStatus and ConnectFailingTransaction * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to 960001ce00bbc1c99afb6ca6f697748bd6d944f6. * Revert "Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions"" This reverts commit 10a147654c5147c28ec674d0650bb54c8d9cebce. * Revert "Merge branch 'p/bmf-status-connected' into p/failing-transactions" This reverts commit d3e543c4c3e6f03cc74087b05c268d4449ba1689, reversing changes made to a9f782751b11e8aa3a0977fd2b2535b1ef4d7242. * TransactionConnectStatus and ConnectFailingTransaction * Initial _connectFailingTransaction * ConnectFailingTransaction and GlobalParamsEntry updates * Fix merge conflicts * gofmt * Fix merge conflicts * Fix blockheight * Fix merge conflicts * gofmt * Revert connect failing transaction * Add TxnStatusConnectedIndex to block and header * Fix naming * Fix tests; remove asserts * Update comment RemoteNode and RemoteNodeId Initial remote node manager tests remote node tests Better connection testing framework Add validator integration test Fix validator-validator connection test; Add nonValidator-validator test Simplify indices Simplify remote node indexer; fix compilation Simplify RemoteNodeManager More RemoteNodeManager updates Nits * Add validator deduplication test (#942) * Revert "Code split" (#943) This reverts commit 831096ac1d3008233868ac8b8f0eca4cd2b9553e. * Revert "Code split" (#944) This reverts commit 0604b6d3fc155177a2bb295e6635ed21b20dd947. * Nits * PoS NetworkManager RemoteNode Cleanup (#945) * Revert "Code split" This reverts commit c0c32f3943ead0e06fdfb3343954a6b5273ea887. * Review * Sync trunk * Rename * PoS NetworkManager Address (#957) * Revert "Another split" This reverts commit eaeec5875a84621b4888fc2a6104e9904e7ef53d. * Revert routine stops * gofmt * Add addrMgr to Server * Review * PoS NetworkManager Rename and Nits (#959) * Renames * nits * More renames * Review * PoS NetworkManager Fix Integration Tests (#960) * Some fixes * Fixes * Fix another integration test * Fix integration tests * Fix RegtestMiner * Fix fmt (#973) * PoS Networking and Syncing Documentation (#974) * noop * NetworkManager documentation * gofmt * Rewording (#981) * Rename (#986) * PoS NetworkManager Fix Deadlock and Test AddIps (#996) * Fix Deadlock and Test AddIps * Glog fix * PoS NetworkManager Comment Nit (#997) * Add ProtocolVersion To Regtest Params (#1035) * Fix Constructor Name for NetworkManager * Broadcast Votes, Timeouts, and Block Proposals To Validators (#1036) * Broadcast Votes and Timeouts To Validators * Broadcast Block Proposals To Network * Fix failed unit tests * Address Nina's comments * Use RemoteNode To Fetch Missing PoS Blocks (#1045) * Update Active Validator Connections In PoS Consensus (#1054) * Remove Rollbacks To bestHeaderChain When bestChain is Mutated (#1055) * PoW Miner Event Loop Cleanup (#1056) * Log Peer Validator Vote Messages (#1057) * Log Peer Validator Timeout Messages (#1058) * Request Missing HighQC Block For Timeout Message (#1061) * Request Missing HighQC Block For Timeout Message * Address Nina's comments * Remove Peer Disconnects in Server._handleBlock (#1062) * Remove Peer Disconnects in Server._handleBlock * Warning for unrequested blocks * Catch case for spam prevention check * Address Nina's comment * Change log level for consensus events * Update regtest block producer domain * Prevent Commited Tip Reorgs in ProcessHeaderPoS (#1063) * Prevent Commited Tip Reorgs in ProcessHeaderPoS * Clean up error message * Add Estimate fee rate to mempool interface (#1066) * Add BLS Public Key PKID Pair Entry Snapshot to core state (#1068) * Fix txindex for failing txns (#1069) * Introduce failing txn type so state syncer understand failing txns (#1070) * Support uncommitted blocks in state syncer mempool flush (#1065) * Fix Connect failing txn test (#1071) * noop * Add support for seed hex in block producer seed (#1072) * Make GetCommittedTip public (#1073) * Assorted small enhancements for node restart w/ pos (#1079) * pos_blockchain updates (#1074) * Fix txindex dependent transaction within uncommitted blocks (#1082) * Add flag to control number of mempool txns to sync in state syncer routine (#1083) * PoS update snapshot epoch duration (#1075) * PoW -> PoS Snapshot epoch transition * Simplify SnapshotBlockHeightPeriod logic * PoS deprecate DisconnectBlocksToHeight (#1076) * Deprecate DisconnectBlocksToHeight * cleanup * PoS batch BlockNode update in snapshot (#1077) * Batch BlockNode update * nit * Review * PoS NetworkManager Consolidation (#1078) * NetworkManager consolidation * Nits * Connect Uncommitted Blocks To Best Header Chain on Startup (#1087) * Smarter Transition From Syncing To Steady State PoS Consensus (#1088) * Fix FastHotStuffConsensus Transition Timer (#1091) * Gossip New Block Hashes Via Inv Message to Non-Validators (#1092) * Fix Edge Cases With Header Syncing, Block Syncing, and PoS Steady State (#1093) * Safer Heuristic to Detemine When to Transition to FastHotStuffConsensus (#1094) * Fix Mempool and Fee Estimator Past Blocks Refresh on Block Connect (#1095) * Fix Mempool and Fee Estimator Past Blocks Refresh on Block Connect * Cleanup * Address Nina's comments * Fix Race Conditions With Mempool Txn Relay (#1096) * Add Peer nil Check in Block Gossiping (#1097) * Beef up logging for FastHotStuffConsensus (#1107) * Fix Validation For Zero Validator HighQC View Value In Timeout QCs (#1111) * Fix Validation For Zero Validator HighQC View Value In Timeout QCs * Fix typo * Better comments * Better validations * Print Current Leader Check Debugging Info (#1112) * Fix Block Hash Comparison In ProcessValidatorVote (#1117) * nit (#1124) * PoS Hypersync Archival Mode (#1089) * Fix * Revert "nit" This reverts commit bd106c96ff1ac6d0e231ebb50f657d7e97ff160c. * Pass uncommitted utxo view through validation funcs to prevent rebuilds (#1109) * Cache block views (#1110) * VerifySignature syncing enhancements (#1115) * Update flow crypto dependency (#1123) * Reject Forking PoW Blocks Once Chain Reaches Final Height of PoW Protocol (#1129) * PoS Hypersync Fixes (#1122) Hypersync Archival Regtest * PoS Hypersync Testing (#1130) * PoS Hypersync Testing Hypersync Archival Regtest * Fixes * comments * Nit * gofmt * Nits * Test nits * Fix Regtest Block Proposer Domain (#1138) --------- Co-authored-by: iamsofonias Co-authored-by: tholonious <99746187+tholonious@users.noreply.github.com> Co-authored-by: Lazy Nina <81658138+lazynina@users.noreply.github.com> --- cmd/node.go | 1 + cmd/run.go | 2 +- integration_testing/hypersync_test.go | 39 +++++ .../network_manager_utils_test.go | 41 ----- integration_testing/rollback_test.go | 122 -------------- integration_testing/tools.go | 144 +++++++++++++++- lib/blockchain.go | 155 +----------------- lib/constants.go | 17 ++ lib/db_utils.go | 14 ++ lib/network_connection_test.go | 1 + lib/pos_blockchain.go | 41 ++++- lib/pos_server_regtest.go | 10 +- lib/server.go | 64 +++++--- lib/snapshot.go | 113 ++----------- 14 files changed, 312 insertions(+), 452 deletions(-) delete mode 100644 integration_testing/rollback_test.go diff --git a/cmd/node.go b/cmd/node.go index 6bc40df06..a53b86e3d 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -396,6 +396,7 @@ func (node *Node) Stop() { // Databases glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Closing all databases...")) node.closeDb(node.ChainDB, "chain") + node.closeDb(node.Server.GetBlockchain().DB(), "blockchain DB") node.stopWaitGroup.Wait() glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Databases successfully closed.")) diff --git a/cmd/run.go b/cmd/run.go index 2eaf1fa67..8fdbfb974 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -65,7 +65,7 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().Bool("force-checksum", true, "When true, the node will panic if the "+ "local state checksum differs from the network checksum reported by its peers.") // Snapshot - cmd.PersistentFlags().Uint64("snapshot-block-height-period", 1000, "Set the snapshot epoch period. Snapshots are taken at block heights divisible by the period.") + cmd.PersistentFlags().Uint64("snapshot-block-height-period", 600000, "Set the snapshot epoch period. Snapshots are taken at block heights divisible by the period.") // Archival mode cmd.PersistentFlags().Bool("archival-mode", true, "Download all historical blocks after finishing hypersync.") // Disable encoder migrations diff --git a/integration_testing/hypersync_test.go b/integration_testing/hypersync_test.go index b76b1db48..473b2ddfc 100644 --- a/integration_testing/hypersync_test.go +++ b/integration_testing/hypersync_test.go @@ -35,6 +35,45 @@ func TestSimpleHyperSync(t *testing.T) { t.Logf("Databases match!") } +// TestPoSRegtestHypersyncArchival tests whether a node can successfully archival hypersync from another node after +// the PoS transition. In the test, node1 will produce blocks until it creates a snapshot after the 2nd PoS fork height. +// Then, we spawn node2, which will connect to node1 and hypersync from it. We then compare that the state dbs match. +// This test takes a couple of minutes to run, and can potentially fail the db comparison if node1 produces a block +// at the very end of the test, right before it stops, and node2 doesn't receive it in time. If this happens, rerun +// the test, it should generally pass. +func TestPoSRegtestHypersyncArchival(t *testing.T) { + node1 := simplePosNode(t, 18000, "node1", true) + + node1.Config.Params = node1.Params + node1.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node1.Params.DefaultPoWSnapshotBlockHeightPeriod = 30 + node1.Config.SnapshotBlockHeightPeriod = 60 + node1.Config.HyperSync = true + node1 = startNode(t, node1) + condition := func() bool { + return node1.Server.GetBlockchain().BlockTip().Height >= 140 + } + waitForConditionNoTimeout(t, "node1", condition) + + node2 := simplePosNode(t, 18001, "node2", true) + node2.Config.MinerPublicKeys = []string{} + node2.Params.DefaultPoWSnapshotBlockHeightPeriod = 30 + node2.Config.SnapshotBlockHeightPeriod = 60 + node2.Config.ConnectIPs = []string{"127.0.0.1:18000"} + node2.Config.HyperSync = true + node2.Config.SyncType = lib.NodeSyncTypeHyperSyncArchival + node2 = startNode(t, node2) + condition2 := func() bool { + return node2.Server.GetBlockchain().BlockTip().Height >= 140 && + node2.Server.GetBlockchain().ChainState() == lib.SyncStateFullyCurrent + } + waitForConditionNoTimeout(t, "node2", condition2) + + node1.Stop() + node2.Stop() + compareNodesByStateOffline(t, node1, node2, 0) +} + // TestHyperSyncFromHyperSyncedNode test if a node can successfully hypersync from another hypersynced node: // 1. Spawn three nodes node1, node2, node3 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod // 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records. diff --git a/integration_testing/network_manager_utils_test.go b/integration_testing/network_manager_utils_test.go index d8344c67c..fa3e62b28 100644 --- a/integration_testing/network_manager_utils_test.go +++ b/integration_testing/network_manager_utils_test.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" - "os" "testing" ) @@ -269,43 +268,3 @@ func getRemoteNodeWithUserAgent(node *cmd.Node, userAgent string) *lib.RemoteNod } return nil } - -func spawnNodeProtocol1(t *testing.T, port uint32, id string) *cmd.Node { - dbDir := getDirectory(t) - t.Cleanup(func() { - os.RemoveAll(dbDir) - }) - config := generateConfig(t, port, dbDir, 10) - config.SyncType = lib.NodeSyncTypeBlockSync - node := cmd.NewNode(config) - node.Params.UserAgent = id - node.Params.ProtocolVersion = lib.ProtocolVersion1 - return node -} - -func spawnNonValidatorNodeProtocol2(t *testing.T, port uint32, id string) *cmd.Node { - dbDir := getDirectory(t) - t.Cleanup(func() { - os.RemoveAll(dbDir) - }) - config := generateConfig(t, port, dbDir, 10) - config.SyncType = lib.NodeSyncTypeBlockSync - node := cmd.NewNode(config) - node.Params.UserAgent = id - node.Params.ProtocolVersion = lib.ProtocolVersion2 - return node -} - -func spawnValidatorNodeProtocol2(t *testing.T, port uint32, id string, blsSeedPhrase string) *cmd.Node { - dbDir := getDirectory(t) - t.Cleanup(func() { - os.RemoveAll(dbDir) - }) - config := generateConfig(t, port, dbDir, 10) - config.SyncType = lib.NodeSyncTypeBlockSync - config.PosValidatorSeed = blsSeedPhrase - node := cmd.NewNode(config) - node.Params.UserAgent = id - node.Params.ProtocolVersion = lib.ProtocolVersion2 - return node -} diff --git a/integration_testing/rollback_test.go b/integration_testing/rollback_test.go deleted file mode 100644 index c7b440b2b..000000000 --- a/integration_testing/rollback_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package integration_testing - -import ( - "github.com/deso-protocol/core/cmd" - "github.com/deso-protocol/core/lib" - "github.com/stretchr/testify/require" - "os" - "reflect" - "testing" -) - -// Start blocks to height 5000 and then disconnect -// TODO: This test won't work now. -func TestStateRollback(t *testing.T) { - t.Skipf("DisconnectBlocksToHeight doesn't work in PoS") - - require := require.New(t) - _ = require - - dbDir1 := getDirectory(t) - dbDir2 := getDirectory(t) - defer os.RemoveAll(dbDir1) - defer os.RemoveAll(dbDir2) - - config1 := generateConfig(t, 18000, dbDir1, 10) - config1.SyncType = lib.NodeSyncTypeBlockSync - config2 := generateConfig(t, 18001, dbDir2, 10) - config2.SyncType = lib.NodeSyncTypeBlockSync - - config1.MaxSyncBlockHeight = 5000 - config2.MaxSyncBlockHeight = 5689 - config1.HyperSync = true - config2.HyperSync = true - config1.ConnectIPs = []string{"deso-seed-2.io:17000"} - config2.ConnectIPs = []string{"deso-seed-2.io:17000"} - - node1 := cmd.NewNode(config1) - node2 := cmd.NewNode(config2) - - node1 = startNode(t, node1) - node2 = startNode(t, node2) - - // wait for node1, node2 to sync blocks - waitForNodeToFullySync(node1) - waitForNodeToFullySync(node2) - - /* This code is no longer needed, but it was really useful in testing disconnect. Basically it goes transaction by - transaction and compares that connecting/disconnecting the transaction gives the same state at the end. The check - on the state is pretty hardcore. We checksum the entire database before the first connect and then compare it - to the checksum of the db after applying the connect/disconnect. */ - - //bestChain := node2.Server.GetBlockchain().BestChain() - //lastNode := bestChain[len(bestChain)-1] - //lastBlock, err := lib.GetBlock(lastNode.Hash, node2.Server.GetBlockchain().DB(), nil) - //require.NoError(err) - //height := lastBlock.Header.Height - //_, txHashes, err := lib.ComputeMerkleRoot(lastBlock.Txns) - //require.NoError(err) - // - //utxoOps := [][]*lib.UtxoOperation{} - ////howMany := 3 - //initialChecksum := computeNodeStateChecksum(t, node1, height) - //checksumsBeforeTransactions := []*lib.StateChecksum{initialChecksum} - ////I0404 20:00:03.139818 76191 run_test.go:1280] checksumAfterTransactionBytes: ([8 89 214 239 199 116 26 139 218 1 24 67 190 194 178 16 137 186 76 7 124 98 185 77 198 214 201 50 248 152 75 4]), current txIndex (0), current txn (< TxHash: 4be39648eba47f54baa62e77e2423d57d12ed779d5e4b0044064a99ed5ba18b0, TxnType: BLOCK_REWARD, PubKey: 8mkU8yaVLs >) - ////I0404 20:00:06.246344 76191 run_test.go:1280] checksumAfterTransactionBytes: ([26 238 98 178 174 72 123 173 5 191 100 244 94 58 94 75 10 76 3 19 146 252 225 150 107 231 82 224 49 46 132 117]), current txIndex (1), current txn (< TxHash: 07a5ac6b44f8f5f91caf502465bfbd60324ee319140a76a2a3a01fe0609d258f, TxnType: BASIC_TRANSFER, PubKey: BC1YLhSkfH28QrMAVkbejMUZELwkAEMwr2FFwhEtofHvzHRtP6rd7s6 >) - ////I0404 20:00:17.912611 76191 run_test.go:1280] checksumAfterTransactionBytes: ([244 163 221 45 233 134 83 142 148 232 191 244 88 253 9 15 66 56 21 36 88 57 108 211 78 195 7 81 143 143 112 96]), current txIndex (2), current txn (< TxHash: 12e9af008054e4107c903e980149245149bc565b33d76b4a3c19cd68ee7ad485, TxnType: UPDATE_PROFILE, PubKey: BC1YLiMxepKu2kLBZssC2hQBahsjcg9Aat4ttsBZYy2WCnUE2WyrNzZ >) - //// run_test.go:1291: - //// 76390 db_utils.go:619] Getting into a set: key ([40]) value (11) - // - //for txIndex, txn := range lastBlock.Txns { - // initialChecksumBytes, err := checksumsBeforeTransactions[txIndex].ToBytes() - // require.NoError(err) - // blockView, err := lib.NewUtxoView(node1.Server.GetBlockchain().DB(), node1.Params, nil, nil) - // require.NoError(err) - // - // txHash := txHashes[txIndex] - // utxoOpsForTxn, _, _, _, err := blockView.ConnectTransaction(txn, txHash, - // 0, uint32(height), true, false) - // require.NoError(err) - // utxoOps = append(utxoOps, utxoOpsForTxn) - // glog.Infof(lib.CLog(lib.Red, "RIGHT BEFORE FLUSH TO DB")) - // require.NoError(blockView.FlushToDb(height)) - // checksumAfterTransaction := computeNodeStateChecksum(t, node1, height) - // checksumsBeforeTransactions = append(checksumsBeforeTransactions, checksumAfterTransaction) - // checksumAfterTransactionBytes, err := checksumAfterTransaction.ToBytes() - // require.NoError(err) - // glog.Infof("checksumAfterTransactionBytes: (%v), current txIndex (%v), current txn (%v)", - // checksumAfterTransactionBytes, txIndex, txn) - // - // blockView, err = lib.NewUtxoView(node1.Server.GetBlockchain().DB(), node1.Params, nil, nil) - // require.NoError(err) - // err = blockView.DisconnectTransaction(txn, txHash, utxoOpsForTxn, uint32(height)) - // require.NoError(err) - // glog.Infof(lib.CLog(lib.Red, "RIGHT BEFORE DISCONNECT TO DB")) - // require.NoError(blockView.FlushToDb(height)) - // afterDisconnectChecksum := computeNodeStateChecksum(t, node1, height) - // afterDisconnectBytes, err := afterDisconnectChecksum.ToBytes() - // require.NoError(err) - // require.Equal(true, reflect.DeepEqual(initialChecksumBytes, afterDisconnectBytes)) - // - // blockView, err = lib.NewUtxoView(node1.Server.GetBlockchain().DB(), node1.Params, nil, nil) - // require.NoError(err) - // utxoOpsForTxn, _, _, _, err = blockView.ConnectTransaction(txn, txHash, - // 0, uint32(height), true, false) - // require.NoError(err) - // require.NoError(blockView.FlushToDb(height)) - // checksumFinal := computeNodeStateChecksum(t, node1, height) - // checksumFinalFinalBytes, err := checksumFinal.ToBytes() - // require.NoError(err) - // require.Equal(true, reflect.DeepEqual(checksumAfterTransactionBytes, checksumFinalFinalBytes)) - //} - - require.NoError(node2.Server.GetBlockchain().DisconnectBlocksToHeight(5000, nil)) - //compareNodesByState(t, node1, node2, 0) - - node1Bytes := computeNodeStateChecksum(t, node1, 5000) - node2Bytes := computeNodeStateChecksum(t, node2, 5000) - require.Equal(true, reflect.DeepEqual(node1Bytes, node2Bytes)) - - node1.Stop() - node2.Stop() -} diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 388a4e4bf..6dde06b93 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -10,6 +10,7 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" "github.com/stretchr/testify/require" + "github.com/tyler-smith/go-bip39" "io/ioutil" "os" "reflect" @@ -55,11 +56,20 @@ func getDirectory(t *testing.T) string { func generateConfig(t *testing.T, port uint32, dataDir string, maxPeers uint32) *cmd.Config { config := &cmd.Config{} params := lib.DeSoMainnetParams + config.Params = ¶ms + return _generateConfig(t, config, port, dataDir, maxPeers) +} - params.DNSSeeds = []string{} +func generateConfigTestnet(t *testing.T, port uint32, dataDir string, maxPeers uint32) *cmd.Config { + config := &cmd.Config{} + params := lib.DeSoTestnetParams config.Params = ¶ms + return _generateConfig(t, config, port, dataDir, maxPeers) +} + +func _generateConfig(t *testing.T, config *cmd.Config, port uint32, dataDir string, maxPeers uint32) *cmd.Config { + config.Params.DNSSeeds = []string{} config.ProtocolPort = uint16(port) - // "/Users/piotr/data_dirs/n98_1" config.DataDirectory = dataDir if err := os.MkdirAll(config.DataDirectory, os.ModePerm); err != nil { t.Fatalf("Could not create data directories (%s): %v", config.DataDirectory, err) @@ -95,6 +105,60 @@ func generateConfig(t *testing.T, port uint32, dataDir string, maxPeers uint32) return config } +func spawnNodeProtocol1(t *testing.T, port uint32, id string) *cmd.Node { + dbDir := getDirectory(t) + t.Cleanup(func() { + os.RemoveAll(dbDir) + }) + config := generateConfig(t, port, dbDir, 10) + config.SyncType = lib.NodeSyncTypeBlockSync + node := cmd.NewNode(config) + node.Params.UserAgent = id + node.Params.ProtocolVersion = lib.ProtocolVersion1 + return node +} + +func spawnNonValidatorNodeProtocol2(t *testing.T, port uint32, id string) *cmd.Node { + dbDir := getDirectory(t) + t.Cleanup(func() { + os.RemoveAll(dbDir) + }) + config := generateConfig(t, port, dbDir, 10) + config.SyncType = lib.NodeSyncTypeBlockSync + node := cmd.NewNode(config) + node.Params.UserAgent = id + node.Params.ProtocolVersion = lib.ProtocolVersion2 + return node +} + +func spawnValidatorNodeProtocol2(t *testing.T, port uint32, id string, blsSeedPhrase string) *cmd.Node { + dbDir := getDirectory(t) + t.Cleanup(func() { + os.RemoveAll(dbDir) + }) + config := generateConfig(t, port, dbDir, 10) + return _spawnValidatorNodeProtocol2(t, config, port, id, blsSeedPhrase) +} + +func _spawnValidatorNodeProtocol2(t *testing.T, config *cmd.Config, port uint32, id string, blsSeedPhrase string) *cmd.Node { + config.SyncType = lib.NodeSyncTypeBlockSync + config.PosValidatorSeed = blsSeedPhrase + config.BlockProducerSeed = blsSeedPhrase + node := cmd.NewNode(config) + node.Params.UserAgent = id + node.Params.ProtocolVersion = lib.ProtocolVersion2 + return node +} + +func spawnValidatorNodeProtocol2Testnet(t *testing.T, port uint32, id string, blsSeedPhrase string) *cmd.Node { + dbDir := getDirectory(t) + t.Cleanup(func() { + os.RemoveAll(dbDir) + }) + config := generateConfigTestnet(t, port, dbDir, 10) + return _spawnValidatorNodeProtocol2(t, config, port, id, blsSeedPhrase) +} + // waitForNodeToFullySync will busy-wait until provided node is fully current. func waitForNodeToFullySync(node *cmd.Node) { ticker := time.NewTicker(5 * time.Millisecond) @@ -177,6 +241,41 @@ func compareNodesByDB(t *testing.T, nodeA *cmd.Node, nodeB *cmd.Node, verbose in prefixList, verbose) } +func compareNodesByStateOffline(t *testing.T, nodeA *cmd.Node, nodeB *cmd.Node, verbose int) { + prefixList := lib.StatePrefixes.StatePrefixesList + dbA := GetChainDBFromNode(t, nodeA) + dbB := GetChainDBFromNode(t, nodeB) + + compareNodesByStateWithPrefixList(t, dbA, dbB, prefixList, verbose) +} + +func GetChainDBFromNode(t *testing.T, node *cmd.Node) *badger.DB { + // Setup chain database + dbDir := lib.GetBadgerDbPath(node.Config.DataDirectory) + var opts badger.Options + performanceOptions, err := lib.DbInitializedWithPerformanceOptions(node.Config.DataDirectory) + + // If the db options haven't yet been saved, we should base the options on the sync type. + if os.IsNotExist(err) { + performanceOptions = !node.Config.HyperSync + // Save the db options for future runs. + lib.SaveBoolToFile(lib.GetDbPerformanceOptionsFilePath(node.Config.DataDirectory), performanceOptions) + } else if err != nil { + // If we get an error other than "file does not exist", we should panic. + t.Fatalf("err: %v", err) + } + + if performanceOptions { + opts = lib.PerformanceBadgerOptions(dbDir) + } else { + opts = lib.DefaultBadgerOptions(dbDir) + } + opts.ValueDir = dbDir + db, err := badger.Open(opts) + require.NoError(t, err) + return db +} + // compareNodesByDB will look through all records in nodeA and nodeB txindex databases and will compare them. // The nodes pass this comparison iff they have identical states. func compareNodesByTxIndex(t *testing.T, nodeA *cmd.Node, nodeB *cmd.Node, verbose int) { @@ -510,6 +609,32 @@ func randomUint32Between(t *testing.T, min, max uint32) uint32 { return randomHeight + min } +func seedPhraseToPublicKeyBase58Check(t *testing.T, seedPhrase string, params *lib.DeSoParams) string { + seedBytes, err := bip39.NewSeedWithErrorChecking(seedPhrase, "") + if err != nil { + panic(err) + } + _, privKey, _, err := lib.ComputeKeysFromSeed(seedBytes, 0, params) + if err != nil { + panic(err) + } + return lib.Base58CheckEncode(privKey.PubKey().SerializeCompressed(), false, params) +} + +func simplePosNode(t *testing.T, port uint32, id string, regtest bool) *cmd.Node { + blsSeedPhrase, err := bip39.NewMnemonic(lib.RandomBytes(32)) + require.NoError(t, err) + node := spawnValidatorNodeProtocol2Testnet(t, port, id, blsSeedPhrase) + node.Config.MaxSyncBlockHeight = 0 + node.Config.HyperSync = true + if regtest { + node.Config.MinerPublicKeys = []string{seedPhraseToPublicKeyBase58Check(t, blsSeedPhrase, node.Params)} + node.Config.Regtest = true + node.Params.EnableRegtest() + } + return node +} + func waitForCondition(t *testing.T, id string, condition func() bool) { signalChan := make(chan struct{}) go func() { @@ -529,3 +654,18 @@ func waitForCondition(t *testing.T, id string, condition func() bool) { t.Fatalf("Condition timed out | %s", id) } } + +func waitForConditionNoTimeout(t *testing.T, id string, condition func() bool) { + signalChan := make(chan struct{}) + go func() { + for { + if condition() { + signalChan <- struct{}{} + return + } + time.Sleep(100 * time.Millisecond) + } + }() + + <-signalChan +} diff --git a/lib/blockchain.go b/lib/blockchain.go index 8f2d0ba35..9dc9cc2ec 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1407,7 +1407,8 @@ func (bc *Blockchain) isHyperSyncCondition() bool { blockTip := bc.blockTip() headerTip := bc.headerTip() - if uint64(headerTip.Height-blockTip.Height) >= SnapshotBlockHeightPeriod { + snapshotBlockHeightPeriod := bc.params.GetSnapshotBlockHeightPeriod(uint64(headerTip.Height), bc.Snapshot().GetSnapshotBlockHeightPeriod()) + if uint64(headerTip.Height-blockTip.Height) >= snapshotBlockHeightPeriod { return true } return false @@ -2022,15 +2023,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures return false, false, RuleErrorBlockHeightAfterProofOfStakeCutover } - // Only accept blocks if the blockchain is still running PoW. Once the chain connects the final block of the - // PoW protocol, it will transition to the PoS. We should not accept any PoW blocks as they can result in a - // fork. - if bc.blockTip().Header.Height >= bc.params.GetFinalPoWBlockHeight() { - return false, false, RuleErrorBestChainIsAtProofOfStakeCutover - } - blockHeight := uint64(bc.BlockTip().Height + 1) - bc.timer.Start("Blockchain.ProcessBlock: Initial") // Start by getting and validating the block's header. @@ -2801,150 +2794,6 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures return isMainChain, false, nil } -// DisconnectBlocksToHeight will rollback blocks from the db and blockchain structs until block tip reaches the provided -// blockHeight parameter. -func (bc *Blockchain) DisconnectBlocksToHeight(blockHeight uint64, snap *Snapshot) error { - // Roll back the block and make sure we don't hit any errors. - bc.ChainLock.Lock() - defer bc.ChainLock.Unlock() - - if blockHeight < 0 { - blockHeight = 0 - } - - // NOTE: This function doesn't maintain the snapshot. The checksum should be recalculated after this. - - // There is this edge-case where a partial blockProcess can skew the state. This is because of block reward entries, - // which are stored along with the block. In particular, if we've stored the block at blockTipHeight + 1, and the node - // crashed in the middle of ProcessBlock, then the reward entry will be stored in the state, even thought the block tip - // is at blockTipHeight. So we delete the block reward at the blockTipHeight + 1 to make sure the state is correct. - // TODO: decouple block reward from PutBlockWithTxn. - blockTipHeight := bc.bestChain[len(bc.bestChain)-1].Height - for hashIter, node := range bc.blockIndexByHash { - hash := hashIter.NewBlockHash() - if node.Height > blockTipHeight { - glog.V(1).Info(CLog(Yellow, fmt.Sprintf("DisconnectBlocksToHeight: Found node in blockIndexByHash with "+ - "larger height than the current block tip. Deleting the corresponding block reward. Node: (%v)", node))) - blockToDetach, err := GetBlock(hash, bc.db, snap) - if err != nil && err != badger.ErrKeyNotFound { - return errors.Wrapf(err, "DisconnectBlocksToHeight: Problem getting block with hash: (%v) and "+ - "at height: (%v)", hash, node.Height) - } - if blockToDetach != nil { - if err = DeleteBlockReward(bc.db, snap, blockToDetach, bc.eventManager, true); err != nil { - return errors.Wrapf(err, "DisconnectBlocksToHeight: Problem deleting block reward with hash: "+ - "(%v) and at height: (%v)", hash, node.Height) - } - } - } - } - - for ii := len(bc.bestChain) - 1; ii > 0 && uint64(bc.bestChain[ii].Height) > blockHeight; ii-- { - node := bc.bestChain[ii] - prevHash := *bc.bestChain[ii-1].Hash - hash := *bc.bestChain[ii].Hash - height := uint64(bc.bestChain[ii].Height) - err := bc.db.Update(func(txn *badger.Txn) error { - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, snap, bc.eventManager) - if err != nil { - return err - } - - if *utxoView.TipHash != hash { - return fmt.Errorf("DisconnectBlocksToHeight: UtxovView tip hash doesn't match the bestChain hash") - } - // Fetch the utxo operations for the block we're detaching. We need these - // in order to be able to detach the block. - utxoOps, err := GetUtxoOperationsForBlock(bc.db, snap, &hash) - if err != nil { - return err - } - - // Compute the hashes for all the transactions. - blockToDetach, err := GetBlock(&hash, bc.db, snap) - if err != nil { - return err - } - txHashes, err := ComputeTransactionHashes(blockToDetach.Txns) - if err != nil { - return err - } - err = utxoView.DisconnectBlock(blockToDetach, txHashes, utxoOps, height) - if err != nil { - return err - } - - // Flushing the view after applying and rolling back should work. - err = utxoView.FlushToDb(height) - if err != nil { - return err - } - - // Set the best node hash to the new tip. - if bc.postgres != nil { - if err := bc.postgres.UpsertChain(MAIN_CHAIN, &prevHash); err != nil { - return err - } - } else { - if err := PutBestHashWithTxn(txn, snap, &prevHash, ChainTypeDeSoBlock, bc.eventManager); err != nil { - return err - } - } - - // Delete the utxo operations for the blocks we're detaching since we don't need - // them anymore. - if err := DeleteUtxoOperationsForBlockWithTxn(txn, snap, &hash, bc.eventManager, true); err != nil { - return errors.Wrapf(err, "DisconnectBlocksToHeight: Problem deleting utxo operations for block") - } - - if err := DeleteBlockRewardWithTxn(txn, snap, blockToDetach, bc.eventManager, true); err != nil { - return errors.Wrapf(err, "DisconnectBlocksToHeight: Problem deleting block reward") - } - - // Revert the detached block's status to StatusHeaderValidated and save the blockNode to the db. - node.Status = StatusHeaderValidated - if bc.postgres != nil { - if err := bc.postgres.DeleteTransactionsForBlock(blockToDetach, node); err != nil { - return err - } - if err := bc.postgres.UpsertBlock(node); err != nil { - return err - } - } else { - if err := PutHeightHashToNodeInfoWithTxn(txn, snap, node, false, bc.eventManager); err != nil { - return errors.Wrapf(err, "DisconnectBlocksToHeight: Problem deleting height hash to node info") - } - } - - // If we have a Server object then call its function - if bc.eventManager != nil { - // We need to add the UtxoOps here to handle reorgs properly in Rosetta - // For now it's fine because reorgs are virtually impossible. - bc.eventManager.blockDisconnected(&BlockEvent{Block: blockToDetach}) - } - - return nil - }) - - if err != nil { - return errors.Wrapf(err, "DisconnectBlocksToHeight: Problem disconnecting block "+ - "with hash: (%v) at blockHeight: (%v)", hash, height) - } - - bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] - delete(bc.bestChainMap, hash) - } - - // Remove blocks we've disconnected from the bestHeaderChain. - for ii := len(bc.bestHeaderChain) - 1; ii > 0 && uint64(bc.bestHeaderChain[ii].Height) > blockHeight; ii-- { - hash := *bc.bestHeaderChain[ii].Hash - bc.bestHeaderChain = bc.bestHeaderChain[:len(bc.bestHeaderChain)-1] - delete(bc.bestHeaderChainMap, hash) - } - - return nil -} - // ValidateTransaction creates a UtxoView and sees if the transaction can be connected // to it. If a mempool is provided, this function tries to find dependencies of the // passed-in transaction in the pool and connect them before trying to connect the diff --git a/lib/constants.go b/lib/constants.go index 9d5832016..5b9ad9c7a 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -668,6 +668,13 @@ type DeSoParams struct { MiningIterationsPerCycle uint64 + // Snapshot + // For PoW, we use a snapshot block height period of 1000 blocks. We record this value in the constants + // as it'll be used during the PoW -> PoS transition. Notably, this value is used to allow PoS nodes + // to hypersync from PoW nodes. In hypersync, knowing the snapshot block height period of the sync peer + // is necessary to determine the block height of the snapshot we're going to receive. + DefaultPoWSnapshotBlockHeightPeriod uint64 + // deso MaxUsernameLengthBytes uint64 MaxUserDescriptionLengthBytes uint64 @@ -910,6 +917,13 @@ func (params *DeSoParams) GetFirstPoSBlockHeight() uint64 { return uint64(params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) } +func (params *DeSoParams) GetSnapshotBlockHeightPeriod(blockHeight uint64, currentSnapshotBlockHeightPeriod uint64) uint64 { + if blockHeight < uint64(params.ForkHeights.ProofOfStake1StateSetupBlockHeight) { + return params.DefaultPoWSnapshotBlockHeightPeriod + } + return currentSnapshotBlockHeightPeriod +} + // GenesisBlock defines the genesis block used for the DeSo mainnet and testnet var ( ArchitectPubKeyBase58Check = "BC1YLg3oh6Boj8e2boCo1vQCYHLk1rjsHF6jthBdvSw79bixQvKK6Qa" @@ -1173,6 +1187,8 @@ var DeSoMainnetParams = DeSoParams{ // a 10 minute block time. MiningIterationsPerCycle: 95000, + DefaultPoWSnapshotBlockHeightPeriod: 1000, + MaxUsernameLengthBytes: MaxUsernameLengthBytes, MaxUserDescriptionLengthBytes: 20000, @@ -1465,6 +1481,7 @@ var DeSoTestnetParams = DeSoParams{ MiningIterationsPerCycle: 9500, + DefaultPoWSnapshotBlockHeightPeriod: 1000, // deso MaxUsernameLengthBytes: MaxUsernameLengthBytes, diff --git a/lib/db_utils.go b/lib/db_utils.go index 5ee4e649a..08127bfbb 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -5220,6 +5220,20 @@ func PutHeightHashToNodeInfoWithTxn(txn *badger.Txn, snap *Snapshot, return nil } +func PutHeightHashToNodeInfoBatch(handle *badger.DB, snap *Snapshot, + nodes []*BlockNode, bitcoinNodes bool, eventManager *EventManager) error { + + err := handle.Update(func(txn *badger.Txn) error { + for _, node := range nodes { + if err := PutHeightHashToNodeInfoWithTxn(txn, snap, node, bitcoinNodes, eventManager); err != nil { + return err + } + } + return nil + }) + return err +} + func PutHeightHashToNodeInfo(handle *badger.DB, snap *Snapshot, node *BlockNode, bitcoinNodes bool, eventManager *EventManager) error { err := handle.Update(func(txn *badger.Txn) error { return PutHeightHashToNodeInfoWithTxn(txn, snap, node, bitcoinNodes, eventManager) diff --git a/lib/network_connection_test.go b/lib/network_connection_test.go index 5d3008f72..a9aab0f6d 100644 --- a/lib/network_connection_test.go +++ b/lib/network_connection_test.go @@ -69,6 +69,7 @@ func (sl *simpleListener) start() { fmt.Println("simpleListener.start: ll.Accept:", err) return } + // We use this only to limit maximum number of connections, when channel limit is reached. sl.connectionChan <- &inboundConnection{ connection: conn, } diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 10324e2a0..7ad07dd67 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -237,6 +237,15 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v return false, false, nil, errors.Wrapf(err, "processBlockPoS: Problem hashing block") } + // In hypersync archival mode, we may receive blocks that have already been processed and committed during state + // synchronization. However, we may want to store these blocks in the db for archival purposes. We check if the + // block we're dealing with is an archival block. If it is, we store it and return early. + if success, err := bc.checkAndStoreArchivalBlock(block); err != nil { + return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem checking and storing archival block") + } else if success { + return true, false, nil, nil + } + // Get all the blocks between the current block and the committed tip. If the block // is an orphan, then we store it after performing basic validations. // If the block extends from any committed block other than the committed tip, @@ -557,6 +566,30 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { return errors.Wrap(err, "processBlockPoS: Problem adding block to block index: ") } +// checkAndStoreArchivalBlock is a helper function that takes in a block and checks if it's an archival block. +// If it is, it stores the block in the db and returns true. If it's not, it returns false, or false and an error. +func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock) (_success bool, _err error) { + // First, get the block hash and lookup the block index. + blockHash, err := block.Hash() + if err != nil { + return false, errors.Wrap(err, "checkAndStoreArchivalBlock: Problem hashing block") + } + blockNode, exists := bc.blockIndexByHash[*blockHash] + // If the blockNode doesn't exist, or the block is not committed, or it's already stored, then we're not dealing + // with an archival block. Archival blocks must have an existing blockNode, be committed, and not be stored. + if !exists || !blockNode.IsCommitted() || blockNode.IsStored() { + return false, nil + } + + // If we get to this point, we're dealing with an archival block, so we'll attempt to store it. + // This means, this block node is already marked as COMMITTED and VALIDATED, and we just need to store it. + _, err = bc.storeBlockInBlockIndex(block) + if err != nil { + return false, errors.Wrap(err, "checkAndStoreArchivalBlock: Problem storing block in block index") + } + return true, nil +} + // storeValidateFailedBlockWithWrappedError is a helper function that takes in a block and an error and // stores the block in the block index with status VALIDATE_FAILED. It returns the resulting BlockNode. func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlock, outerErr error) ( @@ -1475,11 +1508,6 @@ func (bc *Blockchain) upsertBlockAndBlockNodeToDB(block *MsgDeSoBlock, blockNode ) error { // Store the block in badger err := bc.db.Update(func(txn *badger.Txn) error { - if bc.snapshot != nil { - bc.snapshot.PrepareAncestralRecordsFlush() - defer bc.snapshot.StartAncestralRecordsFlush(true) - glog.V(2).Infof("upsertBlockAndBlockNodeToDB: Preparing snapshot flush") - } if storeFullBlock { if innerErr := PutBlockHashToBlockWithTxn(txn, bc.snapshot, block, bc.eventManager); innerErr != nil { return errors.Wrapf(innerErr, "upsertBlockAndBlockNodeToDB: Problem calling PutBlockHashToBlockWithTxn") @@ -1765,6 +1793,9 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool }) } } + if bc.snapshot != nil { + bc.snapshot.FinishProcessBlock(blockNode) + } // TODO: What else do we need to do in here? return nil } diff --git a/lib/pos_server_regtest.go b/lib/pos_server_regtest.go index 42c607718..2e561d426 100644 --- a/lib/pos_server_regtest.go +++ b/lib/pos_server_regtest.go @@ -24,8 +24,14 @@ func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { panic(err) } + var domain string + if len(srv.GetConnectionManager().listeners) == 0 { + domain = "localhost:18000" + } + domain = srv.GetConnectionManager().listeners[0].Addr().String() + txnMeta := RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("http://localhost:18000")}, + Domains: [][]byte{[]byte(domain)}, DisableDelegatedStake: false, DelegatedStakeCommissionBasisPoints: 100, VotingPublicKey: blsSigner.GetPublicKey(), @@ -62,7 +68,7 @@ func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { stakeTxnMeta := StakeMetadata{ ValidatorPublicKey: NewPublicKey(transactorPubKey), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt().SetUint64(10 * 1e9), + StakeAmountNanos: uint256.NewInt().SetUint64(10 * 1e6), } stakeTxn, _, _, _, err := srv.blockProducer.chain.CreateStakeTxn( diff --git a/lib/server.go b/lib/server.go index 7ccd0f7c2..17a5fec7b 100644 --- a/lib/server.go +++ b/lib/server.go @@ -691,9 +691,7 @@ func NewServer( } glog.Errorf(CLog(Red, "NewServer: Forcing a rollback to the last snapshot epoch because node was not closed "+ "properly last time")) - if err := _snapshot.ForceResetToLastSnapshot(_chain); err != nil { - return nil, errors.Wrapf(err, "NewServer: Problem in ForceResetToLastSnapshot"), true - } + return nil, errors.Wrapf(err, "NewServer: Restart required"), true } return srv, nil, shouldRestart @@ -1038,7 +1036,10 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // If node is a hyper sync node and we haven't finished syncing state yet, we will kick off state sync. if srv.cmgr.HyperSync { bestHeaderHeight := uint64(srv.blockchain.headerTip().Height) - expectedSnapshotHeight := bestHeaderHeight - (bestHeaderHeight % srv.snapshot.SnapshotBlockHeightPeriod) + // The peer's snapshot block height period before the first PoS fork height is expected to be the + // PoW default value. After the fork height, it's expected to be the value defined in the params. + snapshotBlockHeightPeriod := srv.params.GetSnapshotBlockHeightPeriod(bestHeaderHeight, srv.snapshot.SnapshotBlockHeightPeriod) + expectedSnapshotHeight := bestHeaderHeight - (bestHeaderHeight % snapshotBlockHeightPeriod) srv.blockchain.snapshot.Migrations.CleanupMigrations(expectedSnapshotHeight) if len(srv.HyperSyncProgress.PrefixProgress) != 0 { @@ -1502,27 +1503,40 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { // already synced all the state corresponding to the sub-blockchain ending at the snapshot // height, we will now mark all these blocks as processed. To do so, we will iterate through // the blockNodes in the header chain and set them in the blockchain data structures. - err = srv.blockchain.db.Update(func(txn *badger.Txn) error { - for ii := uint64(1); ii <= srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight; ii++ { - currentNode := srv.blockchain.bestHeaderChain[ii] - // Do not set the StatusBlockStored flag, because we still need to download the past blocks. - currentNode.Status |= StatusBlockProcessed - currentNode.Status |= StatusBlockValidated - srv.blockchain.addNewBlockNodeToBlockIndex(currentNode) - srv.blockchain.bestChainMap[*currentNode.Hash] = currentNode - srv.blockchain.bestChain = append(srv.blockchain.bestChain, currentNode) - err = PutHeightHashToNodeInfoWithTxn(txn, srv.snapshot, currentNode, false /*bitcoinNodes*/, srv.eventManager) - if err != nil { - return err - } + // + // We split the db update into batches of 10,000 block nodes to avoid a single transaction + // being too large and possibly causing an error in badger. + var blockNodeBatch []*BlockNode + for ii := uint64(1); ii <= srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight; ii++ { + currentNode := srv.blockchain.bestHeaderChain[ii] + // Do not set the StatusBlockStored flag, because we still need to download the past blocks. + currentNode.Status |= StatusBlockProcessed + currentNode.Status |= StatusBlockValidated + currentNode.Status |= StatusBlockCommitted + srv.blockchain.addNewBlockNodeToBlockIndex(currentNode) + srv.blockchain.bestChainMap[*currentNode.Hash] = currentNode + srv.blockchain.bestChain = append(srv.blockchain.bestChain, currentNode) + blockNodeBatch = append(blockNodeBatch, currentNode) + if len(blockNodeBatch) < 10000 { + continue } - // We will also set the hash of the block at snapshot height as the best chain hash. - err = PutBestHashWithTxn(txn, srv.snapshot, msg.SnapshotMetadata.CurrentEpochBlockHash, ChainTypeDeSoBlock, srv.eventManager) - return err - }) + err = PutHeightHashToNodeInfoBatch(srv.blockchain.db, srv.snapshot, blockNodeBatch, false /*bitcoinNodes*/, srv.eventManager) + if err != nil { + glog.Errorf("Server._handleSnapshot: Problem updating snapshot block nodes, error: (%v)", err) + break + } + blockNodeBatch = []*BlockNode{} + } + if len(blockNodeBatch) > 0 { + err = PutHeightHashToNodeInfoBatch(srv.blockchain.db, srv.snapshot, blockNodeBatch, false /*bitcoinNodes*/, srv.eventManager) + if err != nil { + glog.Errorf("Server._handleSnapshot: Problem updating snapshot block nodes, error: (%v)", err) + } + } + err = PutBestHash(srv.blockchain.db, srv.snapshot, msg.SnapshotMetadata.CurrentEpochBlockHash, ChainTypeDeSoBlock, srv.eventManager) if err != nil { - glog.Errorf("Server._handleSnapshot: Problem updating snapshot blocknodes, error: (%v)", err) + glog.Errorf("Server._handleSnapshot: Problem updating best hash, error: (%v)", err) } // We also reset the in-memory snapshot cache, because it is populated with stale records after // we've initialized the chain with seed transactions. @@ -1830,9 +1844,9 @@ func (srv *Server) _relayTransactions() { // current block height. mempool := srv.GetMempool() - glog.V(1).Infof("Server._relayTransactions: Waiting for mempool readOnlyView to regenerate") + glog.V(3).Infof("Server._relayTransactions: Waiting for mempool readOnlyView to regenerate") mempool.BlockUntilReadOnlyViewRegenerated() - glog.V(1).Infof("Server._relayTransactions: Mempool view has regenerated") + glog.V(3).Infof("Server._relayTransactions: Mempool view has regenerated") // We pull the transactions from either the PoW mempool or the PoS mempool depending // on the current block height. @@ -1869,7 +1883,7 @@ func (srv *Server) _relayTransactions() { } } - glog.V(1).Infof("Server._relayTransactions: Relay to all peers is complete!") + glog.V(3).Infof("Server._relayTransactions: Relay to all peers is complete!") } func (srv *Server) _addNewTxn( diff --git a/lib/snapshot.go b/lib/snapshot.go index 6d0c8df9e..95c15685a 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -337,7 +337,7 @@ func NewSnapshot(mainDb *badger.DB, mainDbDirectory string, snapshotBlockHeightP glog.Infof("Snapshot BadgerDB Dir: %v", snapshotOpts.Dir) glog.Infof("Snapshot BadgerDB ValueDir: %v", snapshotOpts.ValueDir) if snapshotBlockHeightPeriod == 0 { - snapshotBlockHeightPeriod = SnapshotBlockHeightPeriod + snapshotBlockHeightPeriod = params.DefaultPoWSnapshotBlockHeightPeriod } var snapshotDbMutex sync.Mutex @@ -519,105 +519,6 @@ func (snap *Snapshot) Stop() { // It's important!!! } -// ForceResetToLastSnapshot is a doomsday scenario recovery mode. It will be triggered if the node was shutdown midway, -// resulting in a corrupted ancestral records or checksum. To recover from this situation, we will revert to the beginning -// of the current snapshot epoch. We do this by disconnecting blocks from the tip to the epoch's start and resetting the checksum. -func (snap *Snapshot) ForceResetToLastSnapshot(chain *Blockchain) error { - snap.stopped = true - - // First we'll stop and reset the snapshot operation channel. - snap.OperationChannel.EnqueueOperation(&SnapshotOperation{ - operationType: SnapshotOperationExit, - }) - snap.WaitForAllOperationsToFinish() - snap.updateWaitGroup.Wait() - snap.OperationChannel.StateSemaphore = 0 - - // Now, we'll reset the snapshot db status semaphores. - snap.Status.MainDBSemaphore = 0 - snap.Status.AncestralDBSemaphore = 0 - - // Now, disconnect the blocks to the beginning of the snapshot epoch, or equivalently, end of the last snapshot epoch. - lastEpochHeight := snap.CurrentEpochSnapshotMetadata.SnapshotBlockHeight - // We use nil for the snapshot, because snapshot hasn't been created yet. - err := chain.DisconnectBlocksToHeight(lastEpochHeight, nil) - if err != nil { - return errors.Wrapf(err, "ForceResetToLastSnapshot: Problem disconnecting blocks") - } - - // Reset the state checksum to the one we got at the beginning of this epoch. - if len(snap.CurrentEpochSnapshotMetadata.CurrentEpochChecksumBytes) == 0 { - snap.Checksum.ResetChecksum() - } else { - err = snap.Checksum.FromBytes(snap.CurrentEpochSnapshotMetadata.CurrentEpochChecksumBytes) - if err != nil { - return errors.Wrapf(err, "ForceResetToLastSnapshot: problem resetting checksum bytes") - } - } - err = snap.Checksum.SaveChecksum() - if err != nil { - return errors.Wrapf(err, "ForceResetToLastSnapshot: Problem saving checksum") - } - - // Similarly, we'll reset the encoder migration checksums. - snap.Migrations.ResetChecksums() - if err = snap.Migrations.SaveMigrations(); err != nil { - return errors.Wrapf(err, "ForceResetToLastSnapshot: Problem saving migrations") - } - - // Save the operation channel status and the db status. - if err = snap.OperationChannel.SaveOperationChannel(); err != nil { - return errors.Errorf("ForceResetToLastSnapshot: Problem saving operation channel in database. Error: (%v)", err) - } - snap.Status.SaveStatus() - - // Delete all ancestral records for the current snapshot epoch. - if err = snap.DeleteAncestralRecords(lastEpochHeight); err != nil { - return errors.Wrapf(err, "ForceResetToLastSnapshot: Problem deleting ancestral records at height (%v)", lastEpochHeight) - } - - // Now we'll verify that the final state checksum matches the last snapshot checksum. We do it in a slightly hacky way - // where we create and start an empty migration. The StartMigration function will basically scan the entire node's - // db and compute the checksum. It's easier than having a separate function for this; although, we might have a - // dedicated function for this in the future for better code clarity. - glog.Infof(CLog(Yellow, "ForceResetToLastSnapshot: Finished node reset, will now proceed to verify state checksum.")) - verificationMigration := EncoderMigration{} - verificationMigration.InitializeSingleHeight(chain.db, snap.SnapshotDb, snap.SnapshotDbMutex, lastEpochHeight, snap.params) - if err := verificationMigration.StartMigrations(); err != nil { - return errors.Wrapf(err, "ForceResetToLastSnapshot: Problem starting verification migration.") - } - if len(verificationMigration.migrationChecksums) != 1 { - return errors.Errorf("ForceResetToLastSnapshot: Number of migration checksums is invalid.") - } - verificationChecksum, err := verificationMigration.migrationChecksums[0].Checksum.ToBytes() - if err != nil { - return errors.Wrapf(err, "ForceResetToLastSnapshot: Problem getting verification migration.") - } - if len(snap.CurrentEpochSnapshotMetadata.CurrentEpochChecksumBytes) == 0 { - identitySum := &StateChecksum{} - identitySum.Initialize(nil, nil) - snap.CurrentEpochSnapshotMetadata.CurrentEpochChecksumBytes, err = identitySum.ToBytes() - if err != nil { - return errors.Wrapf(err, "ForceResetToLastSnapshot: Current epoch checksum was empty but failed to reset") - } - } - // Make sure the snapshot epoch checksum is equal to the checksum that we've computed during the StartMigrations, - // i.e. the checksum we got by scanning the entire db and manually recomputing the checksum from scratch. - // This check is very important, if it fails then it means that there is no way for us to recover and we should resync. - if !reflect.DeepEqual(snap.CurrentEpochSnapshotMetadata.CurrentEpochChecksumBytes, verificationChecksum) { - - return errors.Errorf("ForceRestartToLastSnapshot: Snapshot epoch checksum: (%v), and verification "+ - "checksum: (%v), are not equal. This means recovery failed. Unfortunatelly, we have to resync your node.", - snap.CurrentEpochSnapshotMetadata.CurrentEpochChecksumBytes, verificationChecksum) - } - - if err := snap.SnapshotDb.Close(); err != nil { - return errors.Wrapf(err, "ForceResetToLastSnapshot: Problem closing snapshot db.") - } - glog.Infof(CLog(Yellow, "ForceResetToLastSnapshot: Finished rolling back blocks and recovering snapshot. Node should now be restarted.")) - return nil -} - // StartAncestralRecordsFlush updates the ancestral records after a UtxoView flush. This function should be called // after all UtxoView flushes. shouldIncrement is usually set to true and indicates that we are supposed to update the // db semaphores. The semaphore are used to manage concurrency between the main and ancestral dbs. @@ -666,7 +567,13 @@ func (snap *Snapshot) FinishProcessBlock(blockNode *BlockNode) { snap.CurrentEpochSnapshotMetadata.updateMutex.Lock() defer snap.CurrentEpochSnapshotMetadata.updateMutex.Unlock() - if uint64(blockNode.Height)%snap.SnapshotBlockHeightPeriod == 0 && + + // If the block height is divisible by the snapshot block height period, we update the snapshot metadata. + // For PoW blocks, until the first PoS fork height, we use the default snapshot block height period of 1000. + // For blocks after the first PoS fork height, we use the snapshot block height period defined in the params. + snapshotBlockHeightPeriod := snap.params.GetSnapshotBlockHeightPeriod(uint64(blockNode.Height), snap.SnapshotBlockHeightPeriod) + + if uint64(blockNode.Height)%snapshotBlockHeightPeriod == 0 && uint64(blockNode.Height) > snap.CurrentEpochSnapshotMetadata.SnapshotBlockHeight { snap.CurrentEpochSnapshotMetadata.SnapshotBlockHeight = uint64(blockNode.Height) @@ -996,6 +903,10 @@ func (snap *Snapshot) GetAncestralRecordsKeyWithTxn(txn *badger.Txn, key []byte, return txn.Get(recordsKey) } +func (snap *Snapshot) GetSnapshotBlockHeightPeriod() uint64 { + return snap.SnapshotBlockHeightPeriod +} + // DBSetAncestralRecordWithTxn sets a record corresponding to our ExistingRecordsMap. // We append a []byte{1} to the end to indicate that this is an existing record, and // we append a []byte{0} to the end to indicate that this is a NON-existent record. We From 9f8ba448140cfb3cb75cf23dc200389dce423b30 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 21 Mar 2024 15:45:31 -0400 Subject: [PATCH 558/762] Fix syncing headers state while syncing blocks (#1132) --- lib/server.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/lib/server.go b/lib/server.go index 17a5fec7b..b0873cc94 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2187,9 +2187,21 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { srv.timer.Print("Server._handleBlock: General") srv.timer.Print("Server._handleBlock: Process Block") - // We shouldn't be receiving blocks while syncing headers. + // We shouldn't be receiving blocks while syncing headers, but we can end up here + // if it took longer than MaxTipAge to sync blocks to this point. We'll revert to + // syncing headers and then resume syncing blocks once we're current again. if srv.blockchain.chainState() == SyncStateSyncingHeaders { glog.Warningf("Server._handleBlock: Received block while syncing headers: %v", blk) + glog.Infof("Requesting headers: %v", pp) + + locator := srv.blockchain.LatestHeaderLocator() + pp.AddDeSoMessage(&MsgDeSoGetHeaders{ + StopHash: &BlockHash{}, + BlockLocator: locator, + }, false) + glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* headers for blocks starting at "+ + "header tip %v from peer %v", + srv.blockchain.HeaderTip(), pp) return } From b185bdecb5b16bb0d95a748620d3b846992af3b5 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:05:54 -0400 Subject: [PATCH 559/762] SnapshotCache struct (#1116) --- lib/block_view.go | 57 +++++++++++ lib/blockchain.go | 6 +- lib/connection_manager.go | 3 +- lib/pos_blockchain.go | 34 ++++++- lib/pos_consensus.go | 4 +- lib/pos_snapshot_cache.go | 191 ++++++++++++++++++++++++++++++++++++ lib/pos_snapshot_entries.go | 78 +++++++++------ lib/server.go | 7 +- 8 files changed, 335 insertions(+), 45 deletions(-) create mode 100644 lib/pos_snapshot_cache.go diff --git a/lib/block_view.go b/lib/block_view.go index a242aa9dc..045025344 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -155,6 +155,9 @@ type UtxoView struct { // It contains the snapshot value of every ValidatorEntry that makes up the validator set at // the given SnapshotAtEpochNumber. SnapshotValidatorSet map[SnapshotValidatorSetMapKey]*ValidatorEntry + // HasFullSnapshotValidatorSetByEpoch is a map of SnapshotAtEpochNumber to a boolean. If all + // validator entries for a given epoch have been loaded from the DB, the value is true. + HasFullSnapshotValidatorSetByEpoch map[uint64]bool // SnapshotValidatorBLSPublicKeyPKIDPairEntries is a map of // to a BLSPublicKeyPKIDPairEntry. It contains the snapshot value of the BLSPublicKeyPKIDPairEntry @@ -170,6 +173,9 @@ type UtxoView struct { // It contains the PKID of the validator at the given index in the leader schedule // generated at the given SnapshotAtEpochNumber. SnapshotLeaderSchedule map[SnapshotLeaderScheduleMapKey]*PKID + // HasFullSnapshotLeaderScheduleByEpoch is a map of SnapshotAtEpochNumber to a boolean. If the leader schedule + // for a given epoch has been loaded from the DB, the value is true. + HasFullSnapshotLeaderScheduleByEpoch map[uint64]bool // SnapshotStakesToReward is a map of // to a snapshotted StakeEntry for the ValidatorPKID and StakerPKID pair at a given SnapshotAtEpochNumber. @@ -302,6 +308,7 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // SnapshotValidatorSet bav.SnapshotValidatorSet = make(map[SnapshotValidatorSetMapKey]*ValidatorEntry) + bav.HasFullSnapshotValidatorSetByEpoch = make(map[uint64]bool) // SnapshotValidatorBLSPublicKeyPKIDPairEntries bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries = make(map[SnapshotValidatorBLSPublicKeyMapKey]*BLSPublicKeyPKIDPairEntry) @@ -311,6 +318,7 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // SnapshotLeaderSchedule bav.SnapshotLeaderSchedule = make(map[SnapshotLeaderScheduleMapKey]*PKID) + bav.HasFullSnapshotLeaderScheduleByEpoch = make(map[uint64]bool) // SnapshotStakesToReward bav.SnapshotStakesToReward = make(map[SnapshotStakeMapKey]*StakeEntry) @@ -633,6 +641,10 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.SnapshotValidatorSet[mapKey] = validatorEntry.Copy() } + for mapKey, hasFullSnapshotValidatorSet := range bav.HasFullSnapshotValidatorSetByEpoch { + newView.HasFullSnapshotValidatorSetByEpoch[mapKey] = hasFullSnapshotValidatorSet + } + for mapKey, blsPublicKeyPKIDPairEntry := range bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries { newView.SnapshotValidatorBLSPublicKeyPKIDPairEntries[mapKey] = blsPublicKeyPKIDPairEntry.Copy() } @@ -646,6 +658,9 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { for mapKey, validatorPKID := range bav.SnapshotLeaderSchedule { newView.SnapshotLeaderSchedule[mapKey] = validatorPKID.NewPKID() } + for mapKey, hasFullSnapshotLeaderSchedule := range bav.HasFullSnapshotLeaderScheduleByEpoch { + newView.HasFullSnapshotLeaderScheduleByEpoch[mapKey] = hasFullSnapshotLeaderSchedule + } // Copy the SnapshotStakesToReward for mapKey, snapshotStakeToReward := range bav.SnapshotStakesToReward { @@ -657,6 +672,48 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { return newView, nil } +func NewUtxoViewWithSnapshotCache( + _handle *badger.DB, + _params *DeSoParams, + _postgres *Postgres, + _snapshot *Snapshot, + _eventManager *EventManager, + _snapshotCache *SnapshotCache, +) (*UtxoView, error) { + utxoView, err := NewUtxoView(_handle, _params, _postgres, _snapshot, _eventManager) + if err != nil { + return nil, err + } + if _snapshotCache != nil { + allValidatorSetEntries := _snapshotCache.GetAllCachedSnapshotValidatorSetEntries() + for snapshotAtEpochNumber, validatorSetEntries := range allValidatorSetEntries { + for _, validatorEntry := range validatorSetEntries { + utxoView.SnapshotValidatorSet[SnapshotValidatorSetMapKey{ + SnapshotAtEpochNumber: snapshotAtEpochNumber, + ValidatorPKID: *validatorEntry.ValidatorPKID, + }] = validatorEntry.Copy() + } + utxoView.HasFullSnapshotValidatorSetByEpoch[snapshotAtEpochNumber] = true + } + + allLeaderScheduleEntries := _snapshotCache.GetAllCachedLeaderSchedules() + for snapshotAtEpochNumber, leaderSchedule := range allLeaderScheduleEntries { + for leaderIndex, validatorPKID := range leaderSchedule { + utxoView.SnapshotLeaderSchedule[SnapshotLeaderScheduleMapKey{ + SnapshotAtEpochNumber: snapshotAtEpochNumber, + LeaderIndex: uint16(leaderIndex), + }] = validatorPKID.NewPKID() + } + utxoView.HasFullSnapshotLeaderScheduleByEpoch[snapshotAtEpochNumber] = true + } + allGlobalParamsEntries := _snapshotCache.GetAllCachedSnapshotGlobalParams() + for snapshotAtEpochNumber, globalParamsEntry := range allGlobalParamsEntries { + utxoView.SnapshotGlobalParamEntries[snapshotAtEpochNumber] = globalParamsEntry.Copy() + } + } + return utxoView, nil +} + func NewUtxoView( _handle *badger.DB, _params *DeSoParams, diff --git a/lib/blockchain.go b/lib/blockchain.go index 9dc9cc2ec..4267c0242 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -505,6 +505,9 @@ type Blockchain struct { // cache block view for each block blockViewCache lru.KVCache + // snapshot cache + snapshotCache *SnapshotCache + // State checksum is used to verify integrity of state data and when // syncing from snapshot in the hyper sync protocol. // @@ -805,7 +808,8 @@ func NewBlockchain( bestHeaderChainMap: make(map[BlockHash]*BlockNode), - blockViewCache: lru.NewKVCache(1000), // TODO: parameterize + blockViewCache: lru.NewKVCache(100), // TODO: parameterize + snapshotCache: NewSnapshotCache(), orphanList: list.New(), timer: timer, diff --git a/lib/connection_manager.go b/lib/connection_manager.go index b07bc6e97..1a8448521 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -114,7 +114,8 @@ type ConnectionManager struct { } func NewConnectionManager( - _params *DeSoParams, _listeners []net.Listener, + _params *DeSoParams, + _listeners []net.Listener, _hyperSync bool, _syncType NodeSyncType, _stallTimeoutSeconds uint64, diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 7ad07dd67..8db0bd83a 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -401,7 +401,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // as validate failed. func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // Construct a UtxoView, so we can perform the QC and leader checks. - utxoView, err := NewUtxoView(bc.db, bc.params, nil, bc.snapshot, nil) + utxoView, err := bc.GetCommittedTipView() if err != nil { // We can't validate the QC without a UtxoView. Return an error. return errors.Wrap(err, "processOrphanBlockPoS: Problem initializing UtxoView") @@ -910,12 +910,12 @@ func (bc *Blockchain) isBlockTimestampTooFarInFuturePoS(header *MsgDeSoHeader) ( return false, nil } - // We use NewUtxoView here, which generates a UtxoView at the current committed tip. We can use the view + // We use GetCommittedTipView here, which generates a UtxoView at the current committed tip. We can use the view // to fetch the snapshot global params for the previous epoch, current epoch, and next epoch. As long as // the block's height is within 3600 blocks of the committed tip, this will always work. In practice, // the incoming block never be more than 3600 blocks behind or ahead of the tip, while also failing the // above header.TstampNanoSecs <= currentTstampNanoSecs check. - utxoView, err := NewUtxoView(bc.db, bc.params, nil, bc.snapshot, nil) + utxoView, err := bc.GetCommittedTipView() if err != nil { return false, errors.Wrap(err, "isBlockTimestampTooFarInFuturePoS: Problem initializing UtxoView") } @@ -1384,6 +1384,8 @@ func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (* ) } blockNode.Status |= StatusHeaderValidated + + // TODO: this seems to be slowing down the sync process. // If the DB update fails, then we should return an error. if err = bc.upsertBlockNodeToDB(blockNode); err != nil { return nil, errors.Wrapf(err, "storeValidatedHeaderInBlockIndex: Problem upserting block node to DB") @@ -1796,6 +1798,16 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool if bc.snapshot != nil { bc.snapshot.FinishProcessBlock(blockNode) } + currentEpochNumber, err := utxoView.GetCurrentEpochNumber() + if err != nil { + return errors.Wrapf(err, "commitBlockPoS: Problem getting current epoch number") + } + snapshotEpochNumber, err := utxoView.GetCurrentSnapshotEpochNumber() + if err != nil { + return errors.Wrapf(err, "commitBlockPoS: Problem getting current snapshot epoch number") + } + bc.snapshotCache.LoadCacheAtSnapshotAtEpochNumber( + snapshotEpochNumber, currentEpochNumber, bc.db, bc.snapshot, bc.params) // TODO: What else do we need to do in here? return nil } @@ -1841,7 +1853,7 @@ func (bc *Blockchain) GetUncommittedFullBlocks(tipHash *BlockHash) ([]*MsgDeSoBl // GetCommittedTipView builds a UtxoView to the committed tip. func (bc *Blockchain) GetCommittedTipView() (*UtxoView, error) { - return NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, nil) + return NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, nil, bc.snapshotCache) } // GetUncommittedTipView builds a UtxoView to the uncommitted tip. @@ -1886,6 +1898,17 @@ func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, er return nil, errors.Errorf( "getUtxoViewAtBlockHash: extends from a committed block that isn't the committed tip") } + if currentBlock.IsCommitted() && !currentBlock.Hash.IsEqual(highestCommittedBlock.Hash) { + return nil, errors.Errorf( + "getUtxoViewAtBlockHash: extends from a committed block that isn't the committed tip") + } + } + if viewAtHash, exists := bc.blockViewCache.Lookup(blockHash); exists { + copiedView, err := viewAtHash.(*UtxoView).CopyUtxoView() + if err != nil { + return nil, errors.Wrapf(err, "getUtxoViewAtBlockHash: Problem copying UtxoView from cache") + } + return copiedView, nil } if viewAtHash, exists := bc.blockViewCache.Lookup(blockHash); exists { copiedView, err := viewAtHash.(*UtxoView).CopyUtxoView() @@ -1895,7 +1918,8 @@ func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, er return copiedView, nil } // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + utxoView, err := NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager, + bc.snapshotCache) if err != nil { return nil, errors.Wrapf(err, "getUtxoViewAtBlockHash: Problem initializing UtxoView") } diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index ee8d50205..5ef3e9594 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -765,8 +765,8 @@ func (fc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgD validatorSetEntriesBySnapshotEpochNumber := make(map[uint64][]*ValidatorEntry) // Create a UtxoView for the committed tip block. We will use this to fetch the validator set for - // all of the safe blocks. - utxoView, err := NewUtxoView(fc.blockchain.db, fc.params, fc.blockchain.postgres, fc.blockchain.snapshot, nil) + // all the safe blocks. + utxoView, err := fc.blockchain.GetCommittedTipView() if err != nil { return nil, errors.Errorf("Error creating UtxoView: %v", err) } diff --git a/lib/pos_snapshot_cache.go b/lib/pos_snapshot_cache.go new file mode 100644 index 000000000..783f37379 --- /dev/null +++ b/lib/pos_snapshot_cache.go @@ -0,0 +1,191 @@ +package lib + +import ( + "github.com/dgraph-io/badger/v3" + "github.com/pkg/errors" + "sync" +) + +// SnapshotCache is a struct that holds cached versions of the end-of-epoch +// snapshot data. This data is used to speed up the process of fetching the +// snapshot data from the database when processing blocks. We only +// ever update the cache with data from the database and never the view. +type SnapshotCache struct { + sync.RWMutex + + // snapshot validator cache + cachedValidatorsBySnapshotAtEpoch map[uint64][]*ValidatorEntry + + // snapshot leader schedule cache + cachedLeaderScheduleBySnapshotAtEpoch map[uint64][]*PKID + + // snapshot global params cache + cachedGlobalParamsBySnapshotAtEpoch map[uint64]*GlobalParamsEntry + + snapshotsCached map[uint64]bool +} + +func NewSnapshotCache() *SnapshotCache { + return &SnapshotCache{ + cachedValidatorsBySnapshotAtEpoch: make(map[uint64][]*ValidatorEntry), + cachedLeaderScheduleBySnapshotAtEpoch: make(map[uint64][]*PKID), + cachedGlobalParamsBySnapshotAtEpoch: make(map[uint64]*GlobalParamsEntry), + snapshotsCached: make(map[uint64]bool), + } +} + +// LoadCacheAtSnapshotAtEpochNumber loads the cache with the snapshot data at the +// given snapshot epoch number and the previous two snapshot at epochs. It should +// only ever be called when committing a block. +func (sc *SnapshotCache) LoadCacheAtSnapshotAtEpochNumber( + snapshotAtEpochNumber uint64, + currentEpochNumber uint64, + handle *badger.DB, + snapshot *Snapshot, + params *DeSoParams, +) { + var snapshotsToLoad []uint64 + for ii := snapshotAtEpochNumber; ii < currentEpochNumber; ii++ { + snapshotsToLoad = append(snapshotsToLoad, ii) + } + for _, epochNumber := range snapshotsToLoad { + _, err1 := sc.GetSnapshotValidatorSetEntriesByStakeAtEpochNumber(snapshotAtEpochNumber, handle, snapshot, params) + _, err2 := sc.GetSnapshotLeaderSchedule(snapshotAtEpochNumber, handle, snapshot, params) + _, err3 := sc.GetSnapshotGlobalParams(snapshotAtEpochNumber, handle, snapshot, params) + if err1 == nil && err2 == nil && err3 == nil { + sc.snapshotsCached[epochNumber] = true + } + } + for epochNumber := range sc.snapshotsCached { + if epochNumber < snapshotAtEpochNumber { + delete(sc.cachedValidatorsBySnapshotAtEpoch, epochNumber) + delete(sc.cachedLeaderScheduleBySnapshotAtEpoch, epochNumber) + delete(sc.cachedGlobalParamsBySnapshotAtEpoch, epochNumber) + delete(sc.snapshotsCached, epochNumber) + } + } +} + +// GetAllCachedSnapshotValidatorSetEntries returns all cached validator entries. +func (sc *SnapshotCache) GetAllCachedSnapshotValidatorSetEntries() map[uint64][]*ValidatorEntry { + sc.RLock() + defer sc.RUnlock() + return sc.cachedValidatorsBySnapshotAtEpoch +} + +// GetSnapshotValidatorSetEntriesByStakeAtEpochNumber returns the top N validators by stake at the given snapshot +// epoch number. +func (sc *SnapshotCache) GetSnapshotValidatorSetEntriesByStakeAtEpochNumber( + snapshotAtEpochNumber uint64, + handle *badger.DB, + snapshot *Snapshot, + params *DeSoParams, +) ([]*ValidatorEntry, error) { + // First check the cache to see if we have the validator entries for the snapshot epoch number. + sc.RLock() + validatorEntries, exists := sc.cachedValidatorsBySnapshotAtEpoch[snapshotAtEpochNumber] + sc.RUnlock() + if exists { + return validatorEntries, nil + } + // If not found in the cache, create a new view and use it to fetch the validator entries. + tempView, err := NewUtxoView(handle, params, nil, snapshot, nil) + if err != nil { + return nil, errors.Wrap(err, "sc.GetSnapshotLeaderSchedule: Error creating new UtxoView: ") + } + // Get the snapshot global params so we know how many validators to fetch. + snapshotGlobalParams, err := sc.GetSnapshotGlobalParams(snapshotAtEpochNumber, handle, snapshot, params) + if err != nil { + return nil, errors.Wrap( + err, "sc.GetSnapshotValidatorSetEntriesByStakeAtEpochNumber: Error fetching global params: ") + } + // Fetch the validator entries for the snapshot epoch number. + finalValidatorEntries, err := tempView.GetSnapshotValidatorSetByStakeAmountAtEpochNumber( + snapshotAtEpochNumber, snapshotGlobalParams.ValidatorSetMaxNumValidators) + if err != nil { + return nil, errors.Wrap( + err, "sc.GetSnapshotValidatorSetEntriesByStakeAtEpochNumber: Error fetching validator set: ") + } + // Cache the ValidatorEntries. + sc.Lock() + sc.cachedValidatorsBySnapshotAtEpoch[snapshotAtEpochNumber] = finalValidatorEntries + sc.Unlock() + return finalValidatorEntries, nil +} + +// GetAllCachedSnapshotGlobalParams returns all cached global params. +func (sc *SnapshotCache) GetAllCachedSnapshotGlobalParams() map[uint64]*GlobalParamsEntry { + sc.RLock() + defer sc.RUnlock() + return sc.cachedGlobalParamsBySnapshotAtEpoch +} + +// GetSnapshotGlobalParams returns the global params for the snapshot at the given epoch number. +// If it is not in the cache, it fetches it from the database and caches it. +func (sc *SnapshotCache) GetSnapshotGlobalParams( + snapshotAtEpoch uint64, + handle *badger.DB, + snapshot *Snapshot, + params *DeSoParams, +) (*GlobalParamsEntry, error) { + // First check the cache to see if we have the global params for the snapshot epoch number. + sc.RLock() + globalParams, exists := sc.cachedGlobalParamsBySnapshotAtEpoch[snapshotAtEpoch] + sc.RUnlock() + if exists { + return globalParams, nil + } + // If not found in the cache, create a new view and use it to fetch the global params. + tempView, err := NewUtxoView(handle, params, nil, snapshot, nil) + if err != nil { + return nil, errors.Wrap(err, "sc.GetSnapshotLeaderSchedule: Error creating new UtxoView: ") + } + globalParams, err = tempView.GetSnapshotGlobalParamsEntryByEpochNumber(snapshotAtEpoch) + if err != nil { + return nil, errors.Wrap(err, "sc.GetSnapshotGlobalParams: Error fetching global params: ") + } + // Cache the global params for the snapshot epoch number + sc.Lock() + sc.cachedGlobalParamsBySnapshotAtEpoch[snapshotAtEpoch] = globalParams + sc.Unlock() + return globalParams, nil +} + +// GetAllCachedLeaderSchedules returns all cached leader schedules. +func (sc *SnapshotCache) GetAllCachedLeaderSchedules() map[uint64][]*PKID { + sc.RLock() + defer sc.RUnlock() + return sc.cachedLeaderScheduleBySnapshotAtEpoch +} + +// GetSnapshotLeaderSchedule returns the leader schedule for the snapshot at the given epoch number. +// If it is not in the cache, it fetches it from the database and caches it. +func (sc *SnapshotCache) GetSnapshotLeaderSchedule( + snapshotAtEpoch uint64, + handle *badger.DB, + snapshot *Snapshot, + params *DeSoParams, +) ([]*PKID, error) { + // First check the cache to see if we have the leader schedule for the snapshot epoch number. + sc.RLock() + leaderSchedule, exists := sc.cachedLeaderScheduleBySnapshotAtEpoch[snapshotAtEpoch] + sc.RUnlock() + if exists { + return leaderSchedule, nil + } + + // Fetch the leader schedule for the snapshot epoch number + tempView, err := NewUtxoView(handle, params, nil, snapshot, nil) + if err != nil { + return nil, errors.Wrap(err, "sc.GetSnapshotLeaderSchedule: Error creating new UtxoView: ") + } + leaderPKIDs, err := tempView.GetSnapshotLeaderScheduleAtEpochNumber(snapshotAtEpoch) + if err != nil { + return nil, errors.Wrap(err, "sc.GetSnapshotLeaderSchedule: Error fetching leader schedule: ") + } + // Cache the leader schedule for the snapshot epoch number + sc.Lock() + sc.cachedLeaderScheduleBySnapshotAtEpoch[snapshotAtEpoch] = leaderPKIDs + sc.Unlock() + return leaderPKIDs, nil +} diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 69b34c20e..b0fcda326 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -8,7 +8,6 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" - "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" @@ -303,26 +302,33 @@ func (bav *UtxoView) GetSnapshotValidatorSetByStakeAmountAtEpochNumber(snapshotA utxoViewValidatorEntries = append(utxoViewValidatorEntries, validatorEntry) } } - // Pull top N ValidatorEntries from the database (not present in the UtxoView). - // Note that we will skip validators that are present in the view because we pass - // utxoViewValidatorEntries to the function. - dbValidatorEntries, err := DBGetSnapshotValidatorSetByStakeAmount( - bav.Handle, bav.Snapshot, limit, snapshotAtEpochNumber, utxoViewValidatorEntries, - ) - if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotValidatorSetByStakeAmountAtEpochNumber: error retrieving entries from db: ") - } - // Cache top N active ValidatorEntries from the db in the UtxoView. - for _, validatorEntry := range dbValidatorEntries { - // We only pull ValidatorEntries from the db that are not present in the - // UtxoView. As a sanity check, we double-check that the ValidatorEntry - // is not already in the UtxoView here. - mapKey := SnapshotValidatorSetMapKey{ - SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID, + // If the view hasn't loaded the full set of validators for this snapshot, pull them from the db. + if !bav.HasFullSnapshotValidatorSetByEpoch[snapshotAtEpochNumber] { + + // Pull top N ValidatorEntries from the database (not present in the UtxoView). + // Note that we will skip validators that are present in the view because we pass + // utxoViewValidatorEntries to the function. + dbValidatorEntries, err := DBGetSnapshotValidatorSetByStakeAmount( + bav.Handle, bav.Snapshot, limit, snapshotAtEpochNumber, utxoViewValidatorEntries, + ) + if err != nil { + return nil, errors.Wrapf( + err, "GetSnapshotValidatorSetByStakeAmountAtEpochNumber: error retrieving entries from db: ") } - if _, exists := bav.SnapshotValidatorSet[mapKey]; !exists { - bav._setSnapshotValidatorSetEntry(validatorEntry, snapshotAtEpochNumber) + // Cache top N active ValidatorEntries from the db in the UtxoView. + for _, validatorEntry := range dbValidatorEntries { + // We only pull ValidatorEntries from the db that are not present in the + // UtxoView. As a sanity check, we double-check that the ValidatorEntry + // is not already in the UtxoView here. + mapKey := SnapshotValidatorSetMapKey{ + SnapshotAtEpochNumber: snapshotAtEpochNumber, ValidatorPKID: *validatorEntry.ValidatorPKID, + } + if _, exists := bav.SnapshotValidatorSet[mapKey]; !exists { + bav._setSnapshotValidatorSetEntry(validatorEntry, snapshotAtEpochNumber) + } } + // Mark that we have the full set of validators for this snapshot in the view. + bav.HasFullSnapshotValidatorSetByEpoch[snapshotAtEpochNumber] = true } // Pull !isDeleted, active ValidatorEntries from the UtxoView with stake > 0. var validatorEntries []*ValidatorEntry @@ -1239,23 +1245,31 @@ func (bav *UtxoView) GetCurrentSnapshotLeaderSchedule() ([]*PKID, error) { if err != nil { return nil, errors.Wrapf(err, "GetCurrentSnapshotLeaderSchedule: problem calculating SnapshotEpochNumber: ") } - return bav.GetSnapshotLeaderScheduleAtEpochNumber(snapshotAtEpochNumber) -} -func (bav *UtxoView) GetSnapshotLeaderScheduleAtEpochNumber(snapshotAtEpochNumber uint64) ([]*PKID, error) { - // Seek over DB prefix and merge into view. - leaderIdxToValidatorPKIDMap, err := DBSeekSnapshotLeaderSchedule(bav.Handle, snapshotAtEpochNumber) + snapshotLeaderSchedule, err := bav.GetSnapshotLeaderScheduleAtEpochNumber(snapshotAtEpochNumber) if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotLeaderScheduleAtEpochNumber: error retrieving ValidatorPKIDs: ") + return nil, errors.Wrapf(err, "GetCurrentSnapshotLeaderSchedule: problem retrieving LeaderSchedule: ") } - // Merge the DB entries into the UtxoView. - for leaderIdx, validatorPKID := range leaderIdxToValidatorPKIDMap { - snapshotLeaderScheduleMapKey := SnapshotLeaderScheduleMapKey{ - SnapshotAtEpochNumber: snapshotAtEpochNumber, - LeaderIndex: leaderIdx, + return snapshotLeaderSchedule, nil +} +func (bav *UtxoView) GetSnapshotLeaderScheduleAtEpochNumber(snapshotAtEpochNumber uint64) ([]*PKID, error) { + if !bav.HasFullSnapshotLeaderScheduleByEpoch[snapshotAtEpochNumber] { + // Seek over DB prefix and merge into view. + leaderIdxToValidatorPKIDMap, err := DBSeekSnapshotLeaderSchedule(bav.Handle, snapshotAtEpochNumber) + if err != nil { + return nil, errors.Wrapf(err, "GetSnapshotLeaderScheduleAtEpochNumber: error retrieving ValidatorPKIDs: ") } - if _, exists := bav.SnapshotLeaderSchedule[snapshotLeaderScheduleMapKey]; !exists { - bav._setSnapshotLeaderScheduleValidator(validatorPKID, leaderIdx, snapshotAtEpochNumber) + // Merge the DB entries into the UtxoView. + for leaderIdx, validatorPKID := range leaderIdxToValidatorPKIDMap { + snapshotLeaderScheduleMapKey := SnapshotLeaderScheduleMapKey{ + SnapshotAtEpochNumber: snapshotAtEpochNumber, + LeaderIndex: leaderIdx, + } + if _, exists := bav.SnapshotLeaderSchedule[snapshotLeaderScheduleMapKey]; !exists { + bav._setSnapshotLeaderScheduleValidator(validatorPKID, leaderIdx, snapshotAtEpochNumber) + } } + // Mark that we have the full snapshot leader schedule for this epoch. + bav.HasFullSnapshotLeaderScheduleByEpoch[snapshotAtEpochNumber] = true } // First, check the UtxoView. diff --git a/lib/server.go b/lib/server.go index b0873cc94..a0ed2c656 100644 --- a/lib/server.go +++ b/lib/server.go @@ -249,7 +249,6 @@ func (srv *Server) GetBlockProducer() *DeSoBlockProducer { return srv.blockProducer } -// TODO: The hallmark of a messy non-law-of-demeter-following interface... func (srv *Server) GetConnectionManager() *ConnectionManager { return srv.cmgr } @@ -918,15 +917,15 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { printHeight = uint64(srv.blockchain.headerTip().Height) } glog.Infof(CLog(Yellow, fmt.Sprintf("Received header bundle with %v headers "+ - "in state %s from peer %v. Downloaded ( %v / %v ) total headers. Current Chain State: %v", + "in state %s from peer %v. Downloaded ( %v / %v ) total headers.", len(msg.Headers), srv.blockchain.chainState(), pp, - srv.blockchain.headerTip().Header.Height, printHeight, srv.blockchain.ChainState()))) + srv.blockchain.headerTip().Header.Height, printHeight))) // If we get here, it means that the node is not currently running a Fast-HotStuff // validator or that the node is syncing. In either case, we sync headers according // to the blocksync rules. - // Start by processing all of the headers given to us. They should start + // Start by processing all the headers given to us. They should start // right after the tip of our header chain ideally. While going through them // tally up the number that we actually process. numNewHeaders := 0 From 1a3b3abd7c712824194b610e9b7a01f52dfe40b3 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:08:01 -0400 Subject: [PATCH 560/762] Look up validator set only once in end of epoch (#1127) --- lib/block_view.go | 5 ++++- lib/block_view_types.go | 5 +++-- lib/pos_epoch_complete_hook.go | 29 +++++++++++++---------------- lib/pos_leader_schedule.go | 11 ++++++----- lib/pos_leader_schedule_test.go | 26 ++++++++++++++++++++------ 5 files changed, 46 insertions(+), 30 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 045025344..3b8d479b2 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3302,7 +3302,10 @@ func (bav *UtxoView) _connectUpdateGlobalParams( // Cross-validate the new LeaderScheduleMaxNumValidators and ValidatorSetMaxNumValidators values. The size of the // leader schedule must be less than or equal to the size of the validator set. - if newGlobalParamsEntry.ValidatorSetMaxNumValidators < newGlobalParamsEntry.LeaderScheduleMaxNumValidators { + // We must merge the defaults in the event that ValidatorSetMaxNumValidators is not set. + mergedGlobalParamsEntry := MergeGlobalParamEntryDefaults(&newGlobalParamsEntry, bav.Params) + if mergedGlobalParamsEntry.ValidatorSetMaxNumValidators < + mergedGlobalParamsEntry.LeaderScheduleMaxNumValidators { return 0, 0, nil, RuleErrorLeaderScheduleExceedsValidatorSetMaxNumValidators } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index b487c91ec..c36216ce8 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4094,12 +4094,13 @@ type GlobalParamsEntry struct { ValidatorJailEpochDuration uint64 // LeaderScheduleMaxNumValidators is the maximum number of validators that - // are included when generating a new Proof-of-Stake leader schedule. + // are included when generating a new Proof-of-Stake leader schedule. This + // must be less than or equal to ValidatorSetMaxNumValidators. LeaderScheduleMaxNumValidators uint64 // ValidatorSetMaxNumValidators is the maximum number of validators that // are included in the active validator set every epoch in the Proof-of-Stake - // consensus. + // consensus. This must be greater than or equal to LeaderScheduleMaxNumValidators. ValidatorSetMaxNumValidators uint64 // StakingRewardsMaxNumStakes is the maximum number of stake entries that are diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index c299015ba..2bbf6432d 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -121,19 +121,23 @@ func (bav *UtxoView) runEpochCompleteSnapshotGeneration(epochNumber uint64) erro // Snapshot the current GlobalParamsEntry. bav._setSnapshotGlobalParamsEntry(bav.GetCurrentGlobalParamsEntry(), epochNumber) - // Snapshot the current top m validators as the validator set. - validatorSet, err := bav.generateAndSnapshotValidatorSet(epochNumber) + currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() + validatorEntries, err := bav.GetTopActiveValidatorsByStakeAmount( + currentGlobalParamsEntry.ValidatorSetMaxNumValidators) if err != nil { - return errors.Wrapf(err, "runEpochCompleteSnapshotGeneration: problem snapshotting validator set: ") + return errors.Wrapf(err, "runEpochCompleteSnapshotGeneration: problem retrieving top ValidatorEntries: ") } + // Snapshot the current top m validators as the validator set. + bav.generateAndSnapshotValidatorSet(epochNumber, validatorEntries) + // Snapshot a randomly generated leader schedule. - if err = bav.generateAndSnapshotLeaderSchedule(epochNumber); err != nil { + if err = bav.generateAndSnapshotLeaderSchedule(epochNumber, validatorEntries); err != nil { return errors.Wrapf(err, "runEpochCompleteSnapshotGeneration: problem snapshotting leader schedule: ") } // Snapshot the current top n stake entries as the stakes to reward. - if err = bav.generateAndSnapshotStakesToReward(epochNumber, validatorSet); err != nil { + if err = bav.generateAndSnapshotStakesToReward(epochNumber, validatorEntries); err != nil { return errors.Wrapf(err, "runEpochCompleteSnapshotGeneration: problem snapshotting stakes to reward: ") } @@ -266,14 +270,9 @@ func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeig return nil } -func (bav *UtxoView) generateAndSnapshotValidatorSet(epochNumber uint64) ([]*ValidatorEntry, error) { +func (bav *UtxoView) generateAndSnapshotValidatorSet(epochNumber uint64, validatorSet []*ValidatorEntry, +) { // Snapshot the current top n active validators as the validator set. - validatorSet, err := bav.GetTopActiveValidatorsByStakeAmount( - bav.GetCurrentGlobalParamsEntry().ValidatorSetMaxNumValidators, - ) - if err != nil { - return nil, errors.Wrapf(err, "generateAndSnapshotValidatorSet: error retrieving top ValidatorEntries: ") - } for _, validatorEntry := range validatorSet { bav._setSnapshotValidatorSetEntry(validatorEntry, epochNumber) } @@ -282,13 +281,11 @@ func (bav *UtxoView) generateAndSnapshotValidatorSet(epochNumber uint64) ([]*Val // active validators for the epoch. The total stake is the sum of all of the active validators' stakes. validatorSetTotalStakeAmountNanos := SumValidatorEntriesTotalStakeAmountNanos(validatorSet) bav._setSnapshotValidatorSetTotalStakeAmountNanos(validatorSetTotalStakeAmountNanos, epochNumber) - - return validatorSet, nil } -func (bav *UtxoView) generateAndSnapshotLeaderSchedule(epochNumber uint64) error { +func (bav *UtxoView) generateAndSnapshotLeaderSchedule(epochNumber uint64, validatorSet []*ValidatorEntry) error { // Generate a random leader schedule and snapshot it. - leaderSchedule, err := bav.GenerateLeaderSchedule() + leaderSchedule, err := bav.GenerateLeaderSchedule(validatorSet) if err != nil { return errors.Wrapf(err, "generateAndSnapshotLeaderSchedule: problem generating leader schedule: ") } diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 30617d627..618e54bf5 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -7,7 +7,7 @@ import ( "github.com/pkg/errors" ) -func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { +func (bav *UtxoView) GenerateLeaderSchedule(validatorSet []*ValidatorEntry) ([]*PKID, error) { // Retrieve CurrentRandomSeedHash. currentRandomSeedHash, err := bav.GetCurrentRandomSeedHash() if err != nil { @@ -19,10 +19,11 @@ func (bav *UtxoView) GenerateLeaderSchedule() ([]*PKID, error) { // to snapshot the leader schedule. currentGlobalParamsEntry := bav.GetCurrentGlobalParamsEntry() - // Retrieve top, active validators ordered by stake. - validatorEntries, err := bav.GetTopActiveValidatorsByStakeAmount(currentGlobalParamsEntry.LeaderScheduleMaxNumValidators) - if err != nil { - return nil, errors.Wrapf(err, "UtxoView.GenerateLeaderSchedule: error retrieving top ValidatorEntries: ") + validatorEntries := validatorSet + // If the number of validators is greater than the max number of validators, we need to select the top + // maxLeaderScheduleNumValidators validators by stake. + if uint64(len(validatorSet)) > currentGlobalParamsEntry.LeaderScheduleMaxNumValidators { + validatorEntries = validatorSet[:currentGlobalParamsEntry.LeaderScheduleMaxNumValidators] } if len(validatorEntries) == 0 { return []*PKID{}, nil diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 21a77d342..10c938444 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -101,12 +101,26 @@ func TestGenerateLeaderSchedule(t *testing.T) { require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) } + getValidatorSet := func() []*ValidatorEntry { + utxoView := newUtxoView() + leaderScheduleMaxNumValidators := utxoView.GetCurrentGlobalParamsEntry().LeaderScheduleMaxNumValidators + maxValidatorSetSize := utxoView.GetCurrentGlobalParamsEntry().ValidatorSetMaxNumValidators + maxValidatorsToFetch := leaderScheduleMaxNumValidators + if maxValidatorsToFetch < maxValidatorSetSize { + maxValidatorsToFetch = maxValidatorSetSize + } + maxValidatorSet, err := utxoView.GetTopActiveValidatorsByStakeAmount(maxValidatorsToFetch) + require.NoError(t, err) + return maxValidatorSet + } + testGenerateLeaderSchedule := func(expectedOrder []*PKID) { // We test that GenerateLeaderSchedule() is idempotent by running it 10 times. // Given the same CurrentRandomSeedHash and the same stake-weighted validators, // we verify that we generate the same leader schedule each time. for ii := 0; ii < 10; ii++ { - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + maxValidatorSet := getValidatorSet() + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule(maxValidatorSet) require.NoError(t, err) require.Len(t, leaderSchedule, len(expectedOrder)) @@ -134,7 +148,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { } { // Test GenerateLeaderSchedule() edge case: no registered validators. - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule(nil) require.NoError(t, err) require.Empty(t, leaderSchedule) } @@ -144,7 +158,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { } { // Test GenerateLeaderSchedule() edge case: one registered validator with zero stake. - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule([]*ValidatorEntry{}) require.NoError(t, err) require.Empty(t, leaderSchedule) } @@ -154,7 +168,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { } { // Test GenerateLeaderSchedule() edge case: one registered validator with non-zero stake. - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule(getValidatorSet()) require.NoError(t, err) require.Len(t, leaderSchedule, 1) require.Equal(t, leaderSchedule[0], m0PKID) @@ -165,7 +179,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { } { // Test GenerateLeaderSchedule() edge case: two registered validators with non-zero stake. - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule(getValidatorSet()) require.NoError(t, err) require.Len(t, leaderSchedule, 2) require.Equal(t, leaderSchedule[0], m1PKID) @@ -242,7 +256,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { { // Test changing LeaderScheduleMaxNumValidators. params.DefaultLeaderScheduleMaxNumValidators = 5 - leaderSchedule, err := newUtxoView().GenerateLeaderSchedule() + leaderSchedule, err := newUtxoView().GenerateLeaderSchedule(getValidatorSet()) require.NoError(t, err) require.Len(t, leaderSchedule, 5) } From df6d14e630c2b1133315f2207a863f81a51d7c3d Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:09:33 -0400 Subject: [PATCH 561/762] Read bytes directly into PKID struct instead of creating reader (#1134) --- lib/block_view_stake.go | 14 ++++++-------- lib/block_view_validator.go | 12 ++++++++---- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 1acf463dc..ff3d90d26 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -581,20 +581,18 @@ func DBKeyForStakeByStakeAmount(stakeEntry *StakeEntry) []byte { func GetValidatorPKIDFromDBKeyForStakeByStakeAmount(key []byte) (*PKID, error) { validatorPKIDBytes := key[len(key)-(PublicKeyLenCompressed*2) : len(key)-PublicKeyLenCompressed] - validatorPKID := PKID{} - if err := validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { - return nil, errors.Wrapf(err, "GetValidatorPKIDFromDBKeyForStakeByStakeAmount: ") + if len(validatorPKIDBytes) != PublicKeyLenCompressed { + return nil, fmt.Errorf("GetValidatorPKIDFromDBKeyForStakeByStakeAmount: invalid key length") } - return &validatorPKID, nil + return NewPKID(validatorPKIDBytes), nil } func GetStakerPKIDFromDBKeyForStakeByStakeAmount(key []byte) (*PKID, error) { stakerPKIDBytes := key[len(key)-(PublicKeyLenCompressed):] - stakerPKID := PKID{} - if err := stakerPKID.FromBytes(bytes.NewReader(stakerPKIDBytes)); err != nil { - return nil, errors.Wrapf(err, "GetStakerPKIDFromDBKeyForStakeByStakeAmount: ") + if len(stakerPKIDBytes) != PublicKeyLenCompressed { + return nil, fmt.Errorf("GetStakerPKIDFromDBKeyForStakeByStakeAmount: invalid key length") } - return &stakerPKID, nil + return NewPKID(stakerPKIDBytes), nil } func DBKeyForLockedStakeByValidatorAndStakerAndLockedAt(lockedStakeEntry *LockedStakeEntry) []byte { diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 124541ae9..a068896ae 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -674,11 +674,15 @@ func DBKeyForValidatorByStatusAndStakeAmount(validatorEntry *ValidatorEntry) []b func GetValidatorPKIDFromDBKeyForValidatorByStatusAndStakeAmount(key []byte) (*PKID, error) { validatorPKIDBytes := key[len(key)-PublicKeyLenCompressed:] - validatorPKID := PKID{} - if err := validatorPKID.FromBytes(bytes.NewReader(validatorPKIDBytes)); err != nil { - return nil, errors.Wrapf(err, "GetValidatorPKIDFromDBKeyForValidatorByStatusAndStakeAmount: problem reading ValidatorPKID: ") + if len(validatorPKIDBytes) != PublicKeyLenCompressed { + return nil, fmt.Errorf( + "GetValidatorPKIDFromDBKeyForValidatorByStatusAndStakeAmount: Problem reading ValidatorPKID: "+ + "Length of ValidatorPKIDBytes is %d but expected %d", + len(validatorPKIDBytes), PublicKeyLenCompressed, + ) + } - return &validatorPKID, nil + return NewPKID(validatorPKIDBytes), nil } func DBGetValidatorByPKID(handle *badger.DB, snap *Snapshot, pkid *PKID) (*ValidatorEntry, error) { From a37ce44f7cc8c0b0f30b3f45391270524605e80b Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:11:32 -0400 Subject: [PATCH 562/762] Add keys only seek for badger (#1135) --- lib/block_view_stake.go | 2 +- lib/block_view_validator.go | 2 +- lib/db_utils.go | 94 ++++++++++++++++++++++++++++++------- 3 files changed, 80 insertions(+), 18 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index ff3d90d26..097521934 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -719,7 +719,7 @@ func DBGetTopStakesForValidatorsByStakeAmount( // Retrieve top N StakeEntry keys by stake amount. key := append([]byte{}, Prefixes.PrefixStakeByStakeAmount...) - keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrderAndSkipFunc( + keysFound, err := EnumerateKeysOnlyForPrefixWithLimitOffsetOrderAndSkipFunc( handle, key, int(limit), nil, true, canSkipValidatorPKIDAndStakerPKIDInBadgerSeek, ) if err != nil { diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index a068896ae..1df4aa8b6 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -748,7 +748,7 @@ func DBGetTopActiveValidatorsByStakeAmount( // Retrieve top N active ValidatorEntry keys by stake. key := append([]byte{}, Prefixes.PrefixValidatorByStatusAndStakeAmount...) key = append(key, EncodeUint8(uint8(ValidatorStatusActive))...) - keysFound, _, err := EnumerateKeysForPrefixWithLimitOffsetOrderAndSkipFunc( + keysFound, err := EnumerateKeysOnlyForPrefixWithLimitOffsetOrderAndSkipFunc( handle, key, int(limit), nil, true, canSkipValidatorInBadgerSeek, ) if err != nil { diff --git a/lib/db_utils.go b/lib/db_utils.go index 08127bfbb..a3bcaacb2 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -11834,8 +11834,8 @@ func EnumerateKeysForPrefixWithLimitOffsetOrder( return keysFound, valsFound, nil } -func EnumerateKeysForPrefixWithLimitOffsetOrderAndSkipFunc( - db *badger.DB, +func _enumerateKeysForPrefixWithLimitOffsetOrderWithTxn( + txn *badger.Txn, prefix []byte, limit int, lastSeenKey []byte, @@ -11845,33 +11845,99 @@ func EnumerateKeysForPrefixWithLimitOffsetOrderAndSkipFunc( keysFound := [][]byte{} valsFound := [][]byte{} + // If provided, start at the last seen key. + startingKey := prefix + haveSeenLastSeenKey := true + if lastSeenKey != nil { + startingKey = lastSeenKey + haveSeenLastSeenKey = false + if limit > 0 { + // Need to increment limit by one (if non-zero) since + // we include the lastSeenKey/lastSeenValue. + limit += 1 + } + } + + opts := badger.DefaultIteratorOptions + // Search keys in reverse order if sort DESC. + if sortDescending { + opts.Reverse = true + startingKey = append(startingKey, 0xff) + } + opts.Prefix = prefix + nodeIterator := txn.NewIterator(opts) + defer nodeIterator.Close() + + for nodeIterator.Seek(startingKey); nodeIterator.ValidForPrefix(prefix); nodeIterator.Next() { + // Break if at or beyond limit. + if limit > 0 && len(keysFound) >= limit { + break + } + key := nodeIterator.Item().Key() + // Skip if key is before the last seen key. The caller + // needs to filter out the lastSeenKey in the view as + // we return any key >= the lastSeenKey. + if !haveSeenLastSeenKey { + if !bytes.Equal(key, lastSeenKey) { + continue + } + haveSeenLastSeenKey = true + } + // Skip if key can be skipped. + if canSkipKey(key) { + continue + } + // Copy key. + keyCopy := make([]byte, len(key)) + copy(keyCopy[:], key[:]) + // Copy value. + valCopy, err := nodeIterator.Item().ValueCopy(nil) + if err != nil { + return nil, nil, err + } + // Append found entry to return slices. + keysFound = append(keysFound, keyCopy) + valsFound = append(valsFound, valCopy) + } + return keysFound, valsFound, nil +} + +func EnumerateKeysOnlyForPrefixWithLimitOffsetOrderAndSkipFunc( + db *badger.DB, + prefix []byte, + limit int, + lastSeenKey []byte, + sortDescending bool, + canSkipKey func([]byte) bool, +) ([][]byte, error) { + keysFound := [][]byte{} + dbErr := db.View(func(txn *badger.Txn) error { var err error - keysFound, valsFound, err = _enumerateKeysForPrefixWithLimitOffsetOrderWithTxn( + keysFound, err = _enumerateKeysOnlyForPrefixWithLimitOffsetOrderWithTxn( txn, prefix, limit, lastSeenKey, sortDescending, canSkipKey, ) return err }) if dbErr != nil { - return nil, nil, errors.Wrapf( + return nil, errors.Wrapf( dbErr, - "EnumerateKeysForPrefixWithLimitOffsetOrderAndSkipFunc: problem fetching keys and values from db: ", + "EnumerateKeysOnlyForPrefixWithLimitOffsetOrderAndSkipFunc: problem fetching keys from db: ", ) } - return keysFound, valsFound, nil + return keysFound, nil } -func _enumerateKeysForPrefixWithLimitOffsetOrderWithTxn( +func _enumerateKeysOnlyForPrefixWithLimitOffsetOrderWithTxn( txn *badger.Txn, prefix []byte, limit int, lastSeenKey []byte, sortDescending bool, canSkipKey func([]byte) bool, -) ([][]byte, [][]byte, error) { +) ([][]byte, error) { keysFound := [][]byte{} - valsFound := [][]byte{} // If provided, start at the last seen key. startingKey := prefix @@ -11892,6 +11958,8 @@ func _enumerateKeysForPrefixWithLimitOffsetOrderWithTxn( opts.Reverse = true startingKey = append(startingKey, 0xff) } + opts.PrefetchValues = false + opts.Prefix = prefix nodeIterator := txn.NewIterator(opts) defer nodeIterator.Close() @@ -11917,16 +11985,10 @@ func _enumerateKeysForPrefixWithLimitOffsetOrderWithTxn( // Copy key. keyCopy := make([]byte, len(key)) copy(keyCopy[:], key[:]) - // Copy value. - valCopy, err := nodeIterator.Item().ValueCopy(nil) - if err != nil { - return nil, nil, err - } // Append found entry to return slices. keysFound = append(keysFound, keyCopy) - valsFound = append(valsFound, valCopy) } - return keysFound, valsFound, nil + return keysFound, nil } // Check to see if the badger db has already been initialized with the performance options. From 602fafc559fee13f0654af9c84a15e7a02ffdc9d Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:13:31 -0400 Subject: [PATCH 563/762] Set Prefix when seeking and set prefetch values to false where appropriate (#1136) --- integration_testing/tools.go | 1 + lib/block_view_like.go | 2 +- lib/block_view_post.go | 6 +-- lib/block_view_stake.go | 4 +- lib/db_utils.go | 92 +++++++++++++++++++++--------------- lib/pos_snapshot_entries.go | 2 +- lib/snapshot.go | 1 + 7 files changed, 64 insertions(+), 44 deletions(-) diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 6dde06b93..f0df91574 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -437,6 +437,7 @@ func computeNodeStateChecksum(t *testing.T, node *cmd.Node, blockHeight uint64) err := node.Server.GetBlockchain().DB().View(func(txn *badger.Txn) error { opts := badger.DefaultIteratorOptions for _, prefix := range prefixes { + opts.Prefix = prefix it := txn.NewIterator(opts) for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { item := it.Item() diff --git a/lib/block_view_like.go b/lib/block_view_like.go index 33cf3200f..c3bb9355c 100644 --- a/lib/block_view_like.go +++ b/lib/block_view_like.go @@ -77,7 +77,7 @@ func (bav *UtxoView) GetLikesForPostHash(postHash *BlockHash) (_likerPubKeys [][ handle := bav.Handle dbPrefix := append([]byte{}, Prefixes.PrefixLikedPostHashToLikerPubKey...) dbPrefix = append(dbPrefix, postHash[:]...) - keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix) + keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true) // Iterate over all the db keys & values and load them into the view. expectedKeyLength := 1 + HashSizeBytes + btcec.PubKeyBytesLenCompressed diff --git a/lib/block_view_post.go b/lib/block_view_post.go index 199e65f8a..728c091df 100644 --- a/lib/block_view_post.go +++ b/lib/block_view_post.go @@ -591,7 +591,7 @@ func (bav *UtxoView) GetDiamondSendersForPostHash(postHash *BlockHash) (_pkidToD // FIXME: Db operation like this shouldn't happen in utxoview. dbPrefix := append([]byte{}, Prefixes.PrefixDiamondedPostHashDiamonderPKIDDiamondLevel...) dbPrefix = append(dbPrefix, postHash[:]...) - keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix) + keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true) diamondPostEntry := bav.GetPostEntryForPostHash(postHash) receiverPKIDEntry := bav.GetPKIDForPublicKey(diamondPostEntry.PosterPublicKey) @@ -632,7 +632,7 @@ func (bav *UtxoView) GetRepostsForPostHash(postHash *BlockHash) (_reposterPubKey // FIXME: Db operation like this shouldn't happen in utxoview. dbPrefix := append([]byte{}, Prefixes.PrefixRepostedPostHashReposterPubKey...) dbPrefix = append(dbPrefix, postHash[:]...) - keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix) + keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true) // Iterate over all the db keys & values and load them into the view. expectedKeyLength := 1 + HashSizeBytes + btcec.PubKeyBytesLenCompressed @@ -669,7 +669,7 @@ func (bav *UtxoView) GetQuoteRepostsForPostHash(postHash *BlockHash, // FIXME: Db operation like this shouldn't happen in utxoview. dbPrefix := append([]byte{}, Prefixes.PrefixRepostedPostHashReposterPubKeyRepostPostHash...) dbPrefix = append(dbPrefix, postHash[:]...) - keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix) + keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true) // Iterate over all the db keys & values and load them into the view. expectedKeyLength := 1 + HashSizeBytes + btcec.PubKeyBytesLenCompressed + HashSizeBytes diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 097521934..fd8520eb2 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -877,7 +877,9 @@ func DBGetLockedStakeEntriesInRangeWithTxn( }) // Create an iterator. - iterator := txn.NewIterator(badger.DefaultIteratorOptions) + opts := badger.DefaultIteratorOptions + opts.Prefix = prefixKey + iterator := txn.NewIterator(opts) defer iterator.Close() // Store matching LockedStakeEntries to return. diff --git a/lib/db_utils.go b/lib/db_utils.go index a3bcaacb2..e1b0a0a32 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -1335,6 +1335,7 @@ func DBDeleteAllStateRecords(db *badger.DB) (_shouldErase bool, _error error) { opts := badger.DefaultIteratorOptions opts.AllVersions = false opts.PrefetchValues = false + opts.Prefix = prefix // Iterate over the prefix as long as there are valid keys in the DB. it := txn.NewIterator(opts) defer it.Close() @@ -1507,18 +1508,18 @@ func DBDeletePKIDMappingsWithTxn(txn *badger.Txn, snap *Snapshot, publicKey []by return nil } -func EnumerateKeysForPrefix(db *badger.DB, dbPrefix []byte) (_keysFound [][]byte, _valsFound [][]byte) { - return _enumerateKeysForPrefix(db, dbPrefix) +func EnumerateKeysForPrefix(db *badger.DB, dbPrefix []byte, keysOnly bool) (_keysFound [][]byte, _valsFound [][]byte) { + return _enumerateKeysForPrefix(db, dbPrefix, keysOnly) } // A helper function to enumerate all of the values for a particular prefix. -func _enumerateKeysForPrefix(db *badger.DB, dbPrefix []byte) (_keysFound [][]byte, _valsFound [][]byte) { +func _enumerateKeysForPrefix(db *badger.DB, dbPrefix []byte, keysOnly bool) (_keysFound [][]byte, _valsFound [][]byte) { keysFound := [][]byte{} valsFound := [][]byte{} dbErr := db.View(func(txn *badger.Txn) error { var err error - keysFound, valsFound, err = _enumerateKeysForPrefixWithTxn(txn, dbPrefix) + keysFound, valsFound, err = _enumerateKeysForPrefixWithTxn(txn, dbPrefix, keysOnly) if err != nil { return err } @@ -1532,11 +1533,15 @@ func _enumerateKeysForPrefix(db *badger.DB, dbPrefix []byte) (_keysFound [][]byt return keysFound, valsFound } -func _enumerateKeysForPrefixWithTxn(txn *badger.Txn, dbPrefix []byte) (_keysFound [][]byte, _valsFound [][]byte, _err error) { +func _enumerateKeysForPrefixWithTxn(txn *badger.Txn, dbPrefix []byte, keysOnly bool) (_keysFound [][]byte, _valsFound [][]byte, _err error) { keysFound := [][]byte{} valsFound := [][]byte{} opts := badger.DefaultIteratorOptions + if keysOnly { + opts.PrefetchValues = false + } + opts.Prefix = dbPrefix nodeIterator := txn.NewIterator(opts) defer nodeIterator.Close() prefix := dbPrefix @@ -1545,12 +1550,14 @@ func _enumerateKeysForPrefixWithTxn(txn *badger.Txn, dbPrefix []byte) (_keysFoun keyCopy := make([]byte, len(key)) copy(keyCopy[:], key[:]) - valCopy, err := nodeIterator.Item().ValueCopy(nil) - if err != nil { - return nil, nil, err - } keysFound = append(keysFound, keyCopy) - valsFound = append(valsFound, valCopy) + if !keysOnly { + valCopy, err := nodeIterator.Item().ValueCopy(nil) + if err != nil { + return nil, nil, err + } + valsFound = append(valsFound, valCopy) + } } return keysFound, valsFound, nil } @@ -1570,6 +1577,7 @@ func _enumeratePaginatedLimitedKeysForPrefixWithTxn(txn *badger.Txn, dbPrefix [] opts := badger.DefaultIteratorOptions opts.PrefetchValues = false + opts.Prefix = dbPrefix nodeIterator := txn.NewIterator(opts) defer nodeIterator.Close() @@ -1622,6 +1630,7 @@ func _enumerateLimitedKeysReversedForPrefixAndStartingKeyWithTxn(txn *badger.Txn // Go in reverse order opts.Reverse = true + opts.Prefix = dbPrefix nodeIterator := txn.NewIterator(opts) defer nodeIterator.Close() @@ -1876,7 +1885,7 @@ func DBGetMessageEntriesForPublicKey(handle *badger.DB, publicKey []byte) ( // Goes backwards to get messages in time sorted order. // Limit the number of keys to speed up load times. - _, valuesFound := _enumerateKeysForPrefix(handle, prefix) + _, valuesFound := _enumerateKeysForPrefix(handle, prefix, false) privateMessages := []*MessageEntry{} for _, valBytes := range valuesFound { @@ -2097,7 +2106,7 @@ func DBGetMessagingGroupEntriesForOwnerWithTxn(txn *badger.Txn, ownerPublicKey * // Setting the prefix to owner's public key will allow us to fetch all messaging keys // for the user. We enumerate this prefix. prefix := _dbSeekPrefixForMessagingGroupEntry(ownerPublicKey) - _, valuesFound, err := _enumerateKeysForPrefixWithTxn(txn, prefix) + _, valuesFound, err := _enumerateKeysForPrefixWithTxn(txn, prefix, false) if err != nil { return nil, errors.Wrapf(err, "DBGetMessagingGroupEntriesForOwnerWithTxn: "+ "problem enumerating messaging key entries for prefix (%v)", prefix) @@ -3320,7 +3329,7 @@ func DBGetAllMessagingGroupEntriesForMemberWithTxn(txn *badger.Txn, ownerPublicK // This function is used to fetch all messaging var messagingGroupEntries []*MessagingGroupEntry prefix := _dbSeekPrefixForMessagingGroupMember(ownerPublicKey) - _, valuesFound, err := _enumerateKeysForPrefixWithTxn(txn, prefix) + _, valuesFound, err := _enumerateKeysForPrefixWithTxn(txn, prefix, false) if err != nil { return nil, errors.Wrapf(err, "DBGetAllMessagingGroupEntriesForMemberWithTxn: "+ "problem enumerating messaging key entries for prefix (%v)", prefix) @@ -3586,7 +3595,7 @@ func DbGetPostHashesYouLike(handle *badger.DB, yourPublicKey []byte) ( _postHashes []*BlockHash, _err error) { prefix := _dbSeekPrefixForPostHashesYouLike(yourPublicKey) - keysFound, _ := _enumerateKeysForPrefix(handle, prefix) + keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true) postHashesYouLike := []*BlockHash{} for _, keyBytes := range keysFound { @@ -3603,7 +3612,7 @@ func DbGetLikerPubKeysLikingAPostHash(handle *badger.DB, likedPostHash BlockHash _pubKeys [][]byte, _err error) { prefix := _dbSeekPrefixForLikerPubKeysLikingAPostHash(likedPostHash) - keysFound, _ := _enumerateKeysForPrefix(handle, prefix) + keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true) userPubKeys := [][]byte{} for _, keyBytes := range keysFound { @@ -3716,7 +3725,7 @@ func DbGetReposterPubKeyRepostedPostHashToRepostEntryWithTxn(txn *badger.Txn, snap *Snapshot, userPubKey []byte, repostedPostHash BlockHash) *RepostEntry { key := _dbSeekKeyForReposterPubKeyRepostedPostHashToRepostPostHash(userPubKey, repostedPostHash) - keysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, key) + keysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, key, true) if err != nil { return nil } @@ -3764,7 +3773,7 @@ func DbDeleteRepostMappingsWithTxn(txn *badger.Txn, snap *Snapshot, repostEntry func DbDeleteAllRepostMappingsWithTxn(txn *badger.Txn, snap *Snapshot, userPubKey []byte, repostedPostHash BlockHash, eventManager *EventManager, entryIsDeleted bool) error { key := _dbSeekKeyForReposterPubKeyRepostedPostHashToRepostPostHash(userPubKey, repostedPostHash) - keysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, key) + keysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, key, true) if err != nil { return nil } @@ -3781,7 +3790,7 @@ func DbGetPostHashesYouRepost(handle *badger.DB, yourPublicKey []byte) ( _postHashes []*BlockHash, _err error) { prefix := _dbSeekPrefixForPostHashesYouRepost(yourPublicKey) - keysFound, _ := _enumerateKeysForPrefix(handle, prefix) + keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true) postHashesYouRepost := []*BlockHash{} for _, keyBytes := range keysFound { @@ -3944,7 +3953,7 @@ func DbGetPKIDsYouFollow(handle *badger.DB, yourPKID *PKID) ( _pkids []*PKID, _err error) { prefix := _dbSeekPrefixForPKIDsYouFollow(yourPKID) - keysFound, _ := _enumerateKeysForPrefix(handle, prefix) + keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true) pkidsYouFollow := []*PKID{} for _, keyBytes := range keysFound { @@ -3962,7 +3971,7 @@ func DbGetPKIDsFollowingYou(handle *badger.DB, yourPKID *PKID) ( _pkids []*PKID, _err error) { prefix := _dbSeekPrefixForPKIDsFollowingYou(yourPKID) - keysFound, _ := _enumerateKeysForPrefix(handle, prefix) + keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true) pkidsFollowingYou := []*PKID{} for _, keyBytes := range keysFound { @@ -4221,7 +4230,7 @@ func DbGetPKIDsThatDiamondedYouMap(handle *badger.DB, yourPKID *PKID, fetchYouDi diamondReceiverStartIdx = 1 + btcec.PubKeyBytesLenCompressed diamondReceiverEndIdx = 1 + 2*btcec.PubKeyBytesLenCompressed } - keysFound, valsFound := _enumerateKeysForPrefix(handle, prefix) + keysFound, valsFound := _enumerateKeysForPrefix(handle, prefix, false) pkidsToDiamondEntryMap := make(map[PKID][]*DiamondEntry) for ii, keyBytes := range keysFound { @@ -4295,7 +4304,7 @@ func DbGetDiamondEntriesForSenderToReceiver(handle *badger.DB, receiverPKID *PKI _diamondEntries []*DiamondEntry, _err error) { prefix := _dbSeekPrefixForReceiverPKIDAndSenderPKID(receiverPKID, senderPKID) - keysFound, valsFound := _enumerateKeysForPrefix(handle, prefix) + keysFound, valsFound := _enumerateKeysForPrefix(handle, prefix, false) var diamondEntries []*DiamondEntry for ii, keyBytes := range keysFound { // The DiamondEntry found must not be nil. @@ -4385,7 +4394,7 @@ func DbDeleteBitcoinBurnTxIDWithTxn(txn *badger.Txn, snap *Snapshot, bitcoinBurn } func DbGetAllBitcoinBurnTxIDs(handle *badger.DB) (_bitcoinBurnTxIDs []*BlockHash) { - keysFound, _ := _enumerateKeysForPrefix(handle, Prefixes.PrefixBitcoinBurnTxIDs) + keysFound, _ := _enumerateKeysForPrefix(handle, Prefixes.PrefixBitcoinBurnTxIDs, true) bitcoinBurnTxIDs := []*BlockHash{} for _, key := range keysFound { bbtxid := &BlockHash{} @@ -5627,7 +5636,7 @@ func DbTxindexPublicKeyIndexToTxnKey(publicKey []byte, index uint32) []byte { func DbGetTxindexTxnsForPublicKeyWithTxn(txn *badger.Txn, publicKey []byte) []*BlockHash { txIDs := []*BlockHash{} - _, valsFound, err := _enumerateKeysForPrefixWithTxn(txn, DbTxindexPublicKeyPrefix(publicKey)) + _, valsFound, err := _enumerateKeysForPrefixWithTxn(txn, DbTxindexPublicKeyPrefix(publicKey), false) if err != nil { return txIDs } @@ -5659,14 +5668,16 @@ func _DbGetTxindexNextIndexForPublicKeBySeekWithTxn(txn *badger.Txn, publicKey [ // Go in reverse order. opts.Reverse = true - it := txn.NewIterator(opts) - defer it.Close() // Since we iterate backwards, the prefix must be bigger than all possible // counts that could actually exist. We use four bytes since the index is // encoded as a 32-bit big-endian byte slice, which will be four bytes long. maxBigEndianUint32Bytes := []byte{0xFF, 0xFF, 0xFF, 0xFF} prefix := append([]byte{}, dbPrefixx...) prefix = append(prefix, maxBigEndianUint32Bytes...) + opts.Prefix = prefix + opts.PrefetchValues = false + it := txn.NewIterator(opts) + defer it.Close() for it.Seek(prefix); it.ValidForPrefix(dbPrefixx); it.Next() { countKey := it.Item().Key() @@ -8318,7 +8329,7 @@ func DBGetNFTEntriesForPostHash(handle *badger.DB, nftPostHash *BlockHash) (_nft nftEntries := []*NFTEntry{} prefix := append([]byte{}, Prefixes.PrefixPostHashSerialNumberToNFTEntry...) keyPrefix := append(prefix, nftPostHash[:]...) - _, entryByteStringsFound := _enumerateKeysForPrefix(handle, keyPrefix) + _, entryByteStringsFound := _enumerateKeysForPrefix(handle, keyPrefix, false) for _, byteString := range entryByteStringsFound { currentEntry := &NFTEntry{} rr := bytes.NewReader(byteString) @@ -8365,7 +8376,7 @@ func DBGetNFTEntriesForPKID(handle *badger.DB, ownerPKID *PKID) (_nftEntries []* var nftEntries []*NFTEntry prefix := append([]byte{}, Prefixes.PrefixPKIDIsForSaleBidAmountNanosPostHashSerialNumberToNFTEntry...) keyPrefix := append(prefix, ownerPKID[:]...) - _, entryByteStringsFound := _enumerateKeysForPrefix(handle, keyPrefix) + _, entryByteStringsFound := _enumerateKeysForPrefix(handle, keyPrefix, false) for _, byteString := range entryByteStringsFound { currentEntry := &NFTEntry{} rr := bytes.NewReader(byteString) @@ -8607,7 +8618,7 @@ func DBGetNFTBidEntriesForPKID(handle *badger.DB, bidderPKID *PKID) (_nftBidEntr { prefix := append([]byte{}, Prefixes.PrefixBidderPKIDPostHashSerialNumberToBidNanos...) keyPrefix := append(prefix, bidderPKID[:]...) - keysFound, valuesFound := _enumerateKeysForPrefix(handle, keyPrefix) + keysFound, valuesFound := _enumerateKeysForPrefix(handle, keyPrefix, false) bidderPKIDLength := len(bidderPKID[:]) for ii, keyFound := range keysFound { @@ -8644,7 +8655,7 @@ func DBGetNFTBidEntries(handle *badger.DB, nftPostHash *BlockHash, serialNumber prefix := append([]byte{}, Prefixes.PrefixPostHashSerialNumberBidNanosBidderPKID...) keyPrefix := append(prefix, nftPostHash[:]...) keyPrefix = append(keyPrefix, EncodeUint64(serialNumber)...) - keysFound, _ := _enumerateKeysForPrefix(handle, keyPrefix) + keysFound, _ := _enumerateKeysForPrefix(handle, keyPrefix, true) for _, keyFound := range keysFound { bidAmountStartIdx := 1 + HashSizeBytes + 8 // The length of prefix + the post hash + the serial #. bidAmountEndIdx := bidAmountStartIdx + 8 // Add the length of the bid amount (uint64). @@ -8812,7 +8823,7 @@ func DBGetAllOwnerToDerivedKeyMappings(handle *badger.DB, ownerPublicKey PublicK _entries []*DerivedKeyEntry, _err error) { prefix := _dbSeekPrefixForDerivedKeyMappings(ownerPublicKey) - _, valsFound := _enumerateKeysForPrefix(handle, prefix) + _, valsFound := _enumerateKeysForPrefix(handle, prefix, false) var derivedEntries []*DerivedKeyEntry for _, keyBytes := range valsFound { @@ -9280,7 +9291,7 @@ func DbGetBalanceEntriesYouHold(db *badger.DB, snap *Snapshot, pkid *PKID, filte { prefix := _dbGetPrefixForHODLerPKIDCreatorPKIDToBalanceEntry(isDAOCoin) keyPrefix := append(prefix, pkid[:]...) - _, entryByteStringsFound := _enumerateKeysForPrefix(db, keyPrefix) + _, entryByteStringsFound := _enumerateKeysForPrefix(db, keyPrefix, false) for _, byteString := range entryByteStringsFound { currentEntry := &BalanceEntry{} rr := bytes.NewReader(byteString) @@ -9302,7 +9313,7 @@ func DbGetBalanceEntriesHodlingYou(db *badger.DB, snap *Snapshot, pkid *PKID, fi { prefix := _dbGetPrefixForCreatorPKIDHODLerPKIDToBalanceEntry(isDAOCoin) keyPrefix := append(prefix, pkid[:]...) - _, entryByteStringsFound := _enumerateKeysForPrefix(db, keyPrefix) + _, entryByteStringsFound := _enumerateKeysForPrefix(db, keyPrefix, false) for _, byteString := range entryByteStringsFound { currentEntry := &BalanceEntry{} rr := bytes.NewReader(byteString) @@ -9803,7 +9814,7 @@ func DBGetAllDAOCoinLimitOrdersForThisTransactor(handle *badger.DB, transactorPK func _DBGetAllDAOCoinLimitOrdersByPrefix(handle *badger.DB, prefixKey []byte) ([]*DAOCoinLimitOrderEntry, error) { // Get all DAO coin limit orders containing this prefix. - _, valsFound := _enumerateKeysForPrefix(handle, prefixKey) + _, valsFound := _enumerateKeysForPrefix(handle, prefixKey, false) orders := []*DAOCoinLimitOrderEntry{} // Cast resulting values from bytes to order entries. @@ -9935,7 +9946,7 @@ func DbGetMempoolTxn(db *badger.DB, snap *Snapshot, mempoolTx *MempoolTx) *MsgDe } func DbGetAllMempoolTxnsSortedByTimeAdded(handle *badger.DB) (_mempoolTxns []*MsgDeSoTxn, _error error) { - _, valuesFound := _enumerateKeysForPrefix(handle, Prefixes.PrefixMempoolTxnHashToMsgDeSoTxn) + _, valuesFound := _enumerateKeysForPrefix(handle, Prefixes.PrefixMempoolTxnHashToMsgDeSoTxn, false) mempoolTxns := []*MsgDeSoTxn{} for _, mempoolTxnBytes := range valuesFound { @@ -9954,7 +9965,7 @@ func DbGetAllMempoolTxnsSortedByTimeAdded(handle *badger.DB) (_mempoolTxns []*Ms } func DbDeleteAllMempoolTxnsWithTxn(txn *badger.Txn, snap *Snapshot, eventManager *EventManager, entryIsDeleted bool) error { - txnKeysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, Prefixes.PrefixMempoolTxnHashToMsgDeSoTxn) + txnKeysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, Prefixes.PrefixMempoolTxnHashToMsgDeSoTxn, true) if err != nil { return errors.Wrapf(err, "DbDeleteAllMempoolTxnsWithTxn: ") } @@ -10037,7 +10048,7 @@ func DbDeleteMempoolTxnKeyWithTxn(txn *badger.Txn, snap *Snapshot, txnKey []byte func LogDBSummarySnapshot(db *badger.DB) { keyCountMap := make(map[byte]int) for prefixByte := byte(0); prefixByte < byte(40); prefixByte++ { - keysForPrefix, _ := EnumerateKeysForPrefix(db, []byte{prefixByte}) + keysForPrefix, _ := EnumerateKeysForPrefix(db, []byte{prefixByte}, true) keyCountMap[prefixByte] = len(keysForPrefix) } glog.Info(spew.Printf("LogDBSummarySnapshot: Current DB summary snapshot: %v", keyCountMap)) @@ -11421,6 +11432,7 @@ func DBGetLimitedVestedLockedBalanceEntriesWithTxn( // Create a backwards iterator. backwardOpts := badger.DefaultIteratorOptions backwardOpts.Reverse = true + backwardOpts.Prefix = prefixKey backwardIterator := txn.NewIterator(backwardOpts) defer backwardIterator.Close() @@ -11460,6 +11472,7 @@ func DBGetLimitedVestedLockedBalanceEntriesWithTxn( // Create a forward iterator. We will use t forwardOpts := badger.DefaultIteratorOptions + forwardOpts.Prefix = prefixKey forwardIterator := txn.NewIterator(forwardOpts) defer forwardIterator.Close() @@ -11611,6 +11624,7 @@ func DBGetAllYieldCurvePointsByProfilePKIDWithTxn(txn *badger.Txn, snap *Snapsho // Create an iterator. opts := badger.DefaultIteratorOptions + opts.Prefix = validKey iterator := txn.NewIterator(opts) defer iterator.Close() @@ -11765,6 +11779,8 @@ func DbGetTransactorNonceEntriesToExpireAtBlockHeightWithTxn(txn *badger.Txn, bl endPrefix := append([]byte{}, Prefixes.PrefixNoncePKIDIndex...) opts := badger.DefaultIteratorOptions opts.Reverse = true + opts.Prefix = endPrefix + opts.PrefetchValues = false nodeIterator := txn.NewIterator(opts) defer nodeIterator.Close() var transactorNonceEntries []*TransactorNonceEntry @@ -11776,7 +11792,7 @@ func DbGetTransactorNonceEntriesToExpireAtBlockHeightWithTxn(txn *badger.Txn, bl } func DbGetAllTransactorNonceEntries(handle *badger.DB) []*TransactorNonceEntry { - keys, _ := EnumerateKeysForPrefix(handle, Prefixes.PrefixNoncePKIDIndex) + keys, _ := EnumerateKeysForPrefix(handle, Prefixes.PrefixNoncePKIDIndex, true) nonceEntries := []*TransactorNonceEntry{} for _, key := range keys { // Convert key to nonce entry. diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index b0fcda326..fa75c7261 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -1382,7 +1382,7 @@ func DBSeekSnapshotLeaderSchedule( snapshotAtEpochNumber uint64, ) (map[uint16]*PKID, error) { seekKey := DBSSeekKeyForSnapshotLeaderSchedule(snapshotAtEpochNumber) - keysFound, valsFound := EnumerateKeysForPrefix(handle, seekKey) + keysFound, valsFound := EnumerateKeysForPrefix(handle, seekKey, false) leaderIdxToPKID := make(map[uint16]*PKID) for idx, keyFound := range keysFound { // TODO: Make sure this decode is correct diff --git a/lib/snapshot.go b/lib/snapshot.go index 95c15685a..1041826d6 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -2372,6 +2372,7 @@ func (migration *EncoderMigration) StartMigrations() error { opts := badger.DefaultIteratorOptions for _, prefix := range prefixes { startedPrefix = prefix + opts.Prefix = prefix it := txn.NewIterator(opts) for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { item := it.Item() From 23c6e78429e7b00c333dc4db8fd59c10023d7da6 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 21 Mar 2024 18:08:34 -0700 Subject: [PATCH 564/762] nits --- lib/pos_mempool.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 2aa7ff5a6..ced346837 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -438,8 +438,6 @@ func (mp *PosMempool) OnBlockConnected(block *MsgDeSoBlock) { mp.removeTransactionNoLock(existingTxn, true) } - mp.refreshNoLock() - // Add the block to the fee estimator. This is a best effort operation. If we fail to add the block // to the fee estimator, we log an error and continue. if err := mp.feeEstimator.AddBlock(block); err != nil { @@ -484,8 +482,6 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { } } - mp.refreshNoLock() - // This is a best effort operation. If we fail to prune the mempool, we log an error and continue. if err := mp.pruneNoLock(); err != nil { glog.Errorf("PosMempool.AddTransaction: Problem pruning mempool: %v", err) From ec64c727749ad50df67a720258642d57c6c84cc6 Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Thu, 21 Mar 2024 18:52:45 -0700 Subject: [PATCH 565/762] Add refreshNoLock and fix tests --- lib/network_connection_test.go | 2 +- lib/network_test.go | 2 +- lib/pos_block_producer_test.go | 2 +- lib/pos_blockchain_test.go | 3 ++ lib/pos_mempool.go | 71 ++++++++++++++++++++++++++++++++++ 5 files changed, 77 insertions(+), 3 deletions(-) diff --git a/lib/network_connection_test.go b/lib/network_connection_test.go index a9aab0f6d..6b630eb02 100644 --- a/lib/network_connection_test.go +++ b/lib/network_connection_test.go @@ -83,8 +83,8 @@ func (sl *simpleListener) stop() { sl.ll.Close() sl.closed = true close(sl.exitChan) - close(sl.connectionChan) sl.stopGroup.Wait() + close(sl.connectionChan) fmt.Println("simpleListener.stop: stopped") } diff --git a/lib/network_test.go b/lib/network_test.go index 235cd9aa4..2bccb33e9 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -241,7 +241,7 @@ func TestHeaderConversionAndReadWriteMessage(t *testing.T) { require.NoError(err) require.Equal(hdrPayload, data) - require.Equalf(14, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), + require.Equalf(13, reflect.TypeOf(expectedBlockHeader).Elem().NumField(), "Number of fields in HEADER message is different from expected. "+ "Did you add a new field? If so, make sure the serialization code "+ "works, add the new field to the test case, and fix this error.") diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 1d0187be7..b7c522cfa 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -278,7 +278,7 @@ func _testProduceBlockNoSizeLimit(t *testing.T, mp *PosMempool, pbp *PosBlockPro numPassing int, numFailing int, numInvalid int) (_txns []*MsgDeSoTxn, _maxUtilityFee uint64) { require := require.New(t) - totalAcceptedTxns := numPassing + numFailing + totalAcceptedTxns := numPassing totalTxns := numPassing + numFailing + numInvalid require.Equal(totalTxns, len(mp.GetTransactions())) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 79e09b4d6..2e8e76125 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -1868,6 +1868,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { var malformedOrphanBlock *MsgDeSoBlock malformedOrphanBlock = _generateRealBlock(testMeta, 18, 18, 9273, testMeta.chain.BlockTip().Hash, false) malformedOrphanBlock.Header.PrevBlockHash = randomHash + malformedOrphanBlock.Header.Version = 5 // Resign the block. updateProposerVotePartialSignatureForBlock(testMeta, malformedOrphanBlock) malformedOrphanBlockHash, err := malformedOrphanBlock.Hash() @@ -1884,6 +1885,8 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.True(t, malformedOrphanBlockInIndex.IsStored()) // If a block can't be hashed, we expect to get an error. + malformedOrphanBlock.Header.Version = HeaderVersion2 + malformedOrphanBlock.Header.ProposerVotingPublicKey = nil success, isOrphan, missingBlockHashes, err = testMeta.chain.ProcessBlockPoS(malformedOrphanBlock, 18, true) require.False(t, success) require.False(t, isOrphan) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index ced346837..ed76e44a4 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -438,6 +438,8 @@ func (mp *PosMempool) OnBlockConnected(block *MsgDeSoBlock) { mp.removeTransactionNoLock(existingTxn, true) } + mp.refreshNoLock() + // Add the block to the fee estimator. This is a best effort operation. If we fail to add the block // to the fee estimator, we log an error and continue. if err := mp.feeEstimator.AddBlock(block); err != nil { @@ -482,6 +484,8 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { } } + mp.refreshNoLock() + // This is a best effort operation. If we fail to prune the mempool, we log an error and continue. if err := mp.pruneNoLock(); err != nil { glog.Errorf("PosMempool.AddTransaction: Problem pruning mempool: %v", err) @@ -815,6 +819,70 @@ func (mp *PosMempool) validateTransactions() error { return nil } +// refreshNoLock can be used to evict stale transactions from the mempool. However, it is a bit expensive and should be used +// sparingly. Upon being called, refreshNoLock will create an in-memory temp PosMempool and populate it with transactions from +// the main mempool. The temp mempool will have the most up-to-date readOnlyLatestBlockView, Height, and globalParams. Any +// transaction that fails to add to the temp mempool will be removed from the main mempool. +func (mp *PosMempool) refreshNoLock() error { + // Create the temporary in-memory mempool with the most up-to-date readOnlyLatestBlockView, Height, and globalParams. + tempPool := NewPosMempool() + err := tempPool.Init( + mp.params, + mp.globalParams, + mp.readOnlyLatestBlockView, + mp.latestBlockHeight, + "", + true, + mp.maxMempoolPosSizeBytes, + mp.mempoolBackupIntervalMillis, + mp.feeEstimator.numMempoolBlocks, + mp.feeEstimator.cachedBlocks, + mp.feeEstimator.numPastBlocks, + mp.maxValidationViewConnects, + mp.transactionValidationRefreshIntervalMillis, + mp.augmentedBlockViewRefreshIntervalMillis, + ) + if err != nil { + return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem initializing temp pool") + } + if err := tempPool.Start(); err != nil { + return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem starting temp pool") + } + defer tempPool.Stop() + + // Add all transactions from the main mempool to the temp mempool. Skip signature verification. + var txnsToRemove []*MempoolTx + txns := mp.getTransactionsNoLock() + for _, txn := range txns { + mtxn := NewMempoolTransaction(txn.Tx, txn.Added, txn.IsValidated()) + err := tempPool.AddTransaction(mtxn) + if err == nil { + continue + } + + // If we've encountered an error while adding the transaction to the temp mempool, we add it to our txnsToRemove list. + txnsToRemove = append(txnsToRemove, txn) + } + + // Now remove all transactions from the txnsToRemove list from the main mempool. + for _, txn := range txnsToRemove { + if err := mp.removeTransactionNoLock(txn, true); err != nil { + glog.Errorf("PosMempool.refreshNoLock: Problem removing transaction with hash (%v): %v", txn.Hash, err) + } + } + + // Log the hashes for transactions that were removed. + if len(txnsToRemove) > 0 { + var removedTxnHashes []string + for _, txn := range txnsToRemove { + removedTxnHashes = append(removedTxnHashes, txn.Hash.String()) + } + glog.Infof("PosMempool.refreshNoLock: Transactions with the following hashes were removed: %v", + strings.Join(removedTxnHashes, ",")) + } + return nil +} + // pruneNoLock removes transactions from the mempool until the mempool size is below the maximum allowed size. The transactions // are removed in lowest to highest Fee-Time priority, i.e. opposite way that transactions are ordered in // GetTransactions(). @@ -862,6 +930,9 @@ func (mp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { } mp.globalParams = globalParams + if err := mp.refreshNoLock(); err != nil { + glog.Errorf("PosMempool.UpdateGlobalParams: Problem refreshing mempool: %v", err) + } } // Implementation of the Mempool interface From b54f475a4d3cd21647e114c97a335f48c46d0f0a Mon Sep 17 00:00:00 2001 From: Piotr Nojszewski <29924594+AeonSw4n@users.noreply.github.com> Date: Fri, 22 Mar 2024 01:40:46 -0700 Subject: [PATCH 566/762] Tests --- lib/pos_mempool.go | 28 ++++++------- lib/pos_mempool_test.go | 92 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 14 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index ed76e44a4..d43fc9808 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -375,13 +375,14 @@ func (mp *PosMempool) startAugmentedViewRefreshRoutine() { } func (mp *PosMempool) Stop() { - mp.Lock() - defer mp.Unlock() - if !mp.IsRunning() { return } + close(mp.quit) + mp.exitGroup.Wait() + mp.Lock() + defer mp.Unlock() // Close the persister and stop the database. if !mp.inMemoryOnly { if err := mp.persister.Stop(); err != nil { @@ -396,8 +397,6 @@ func (mp *PosMempool) Stop() { mp.txnRegister.Reset() mp.nonceTracker.Reset() mp.feeEstimator = NewPoSFeeEstimator() - close(mp.quit) - mp.exitGroup.Wait() mp.status = PosMempoolStatusNotInitialized } @@ -746,12 +745,11 @@ func (mp *PosMempool) GetIterator() MempoolIterator { // connect to the validationView are removed from the mempool, as they would have also failed to connect during // block production. This function is thread-safe. func (mp *PosMempool) validateTransactions() error { - // We hold a read-lock on the mempool to get the transactions and the latest block view. - mp.RLock() if !mp.IsRunning() { return nil } - + // We hold a read-lock on the mempool to get the transactions and the latest block view. + mp.RLock() // We copy the reference to the readOnlyLatestBlockView. Since the utxoView is immutable, we don't need to copy the // entire view while we hold the lock. validationView := mp.readOnlyLatestBlockView @@ -773,14 +771,14 @@ func (mp *PosMempool) validateTransactions() error { // Copy the validation view to avoid modifying the readOnlyLatestBlockView. copyValidationView, err := validationView.CopyUtxoView() if err != nil { - return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem copying utxo view") + return errors.Wrapf(err, "PosMempool.validateTransactions: Problem copying utxo view") } // Connect the transactions to the validation view. We use the latest block height + 1 as the block height to connect // the transactions. This is because the mempool contains transactions that we use for producing the next block. _, _, _, _, successFlags, err := copyValidationView.ConnectTransactionsFailSafeWithLimit(txns, txHashes, uint32(mp.latestBlockHeight)+1, - time.Now().UnixNano(), false, false, true, mp.maxValidationViewConnects) + time.Now().UnixNano(), true, false, true, mp.maxValidationViewConnects) if err != nil { - return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem connecting transactions") + return errors.Wrapf(err, "PosMempool.validateTransactions: Problem connecting transactions") } // We iterate through the successFlags and update the validated status of the transactions in the mempool. @@ -801,11 +799,13 @@ func (mp *PosMempool) validateTransactions() error { } // Now remove all transactions from the txnsToRemove list from the main mempool. + mp.Lock() for _, txn := range txnsToRemove { if err := mp.removeTransactionNoLock(txn, true); err != nil { - glog.Errorf("PosMempool.refreshNoLock: Problem removing transaction with hash (%v): %v", txn.Hash, err) + glog.Errorf("PosMempool.validateTransactions: Problem removing transaction with hash (%v): %v", txn.Hash, err) } } + mp.Unlock() // Log the hashes for transactions that were removed. if len(txnsToRemove) > 0 { @@ -813,7 +813,7 @@ func (mp *PosMempool) validateTransactions() error { for _, txn := range txnsToRemove { removedTxnHashes = append(removedTxnHashes, txn.Hash.String()) } - glog.Infof("PosMempool.refreshNoLock: Transactions with the following hashes were removed: %v", + glog.V(1).Infof("PosMempool.validateTransactions: Transactions with the following hashes were removed: %v", strings.Join(removedTxnHashes, ",")) } return nil @@ -877,7 +877,7 @@ func (mp *PosMempool) refreshNoLock() error { for _, txn := range txnsToRemove { removedTxnHashes = append(removedTxnHashes, txn.Hash.String()) } - glog.Infof("PosMempool.refreshNoLock: Transactions with the following hashes were removed: %v", + glog.V(1).Infof("PosMempool.refreshNoLock: Transactions with the following hashes were removed: %v", strings.Join(removedTxnHashes, ",")) } return nil diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index c9fa1abbc..3a34eacaf 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -328,6 +328,89 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { require.False(mempool.IsRunning()) } +func TestPosMempoolTransactionValidation(t *testing.T) { + seed := int64(1073) + rand := rand.New(rand.NewSource(seed)) + + globalParams := _testGetDefaultGlobalParams() + feeMin := globalParams.MinimumNetworkFeeNanosPerKB + feeMax := uint64(2000) + maxMempoolPosSizeBytes := uint64(3000000000) + mempoolBackupIntervalMillis := uint64(30000) + + params, db := _posTestBlockchainSetup(t) + m0PubBytes, _, _ := Base58CheckDecode(m0Pub) + m1PubBytes, _, _ := Base58CheckDecode(m1Pub) + latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) + require.NoError(t, err) + dir := _dbDirSetup(t) + + mempool := NewPosMempool() + require.NoError(t, mempool.Init( + params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, + nil, 1, 5, 10, 10, + )) + require.NoError(t, mempool.Start()) + require.True(t, mempool.IsRunning()) + + // First, we'll try adding two transactions, one passing, one failing, and verify that the validation routine + // properly validates the passing transaction, and removes the failing transaction. + output := []*DeSoOutput{{ + PublicKey: m1PubBytes, + AmountNanos: 1000, + }} + txn1 := _generateTestTxnWithOutputs(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 25, output) + // This should fail signature verification. + txn2 := _generateTestTxnWithOutputs(t, rand, feeMin, feeMax, m0PubBytes, m1Priv, 100, 25, output) + _wrappedPosMempoolAddTransaction(t, mempool, txn1) + _wrappedPosMempoolAddTransaction(t, mempool, txn2) + // Wait for the validation routine to finish. + time.Sleep(20 * time.Millisecond) + require.Equal(t, true, mempool.GetTransaction(txn1.Hash()).IsValidated()) + require.Nil(t, mempool.GetTransaction(txn2.Hash())) + require.NoError(t, mempool.RemoveTransaction(txn1.Hash())) + + // Now we'll try generating 10 passing transactions and 10 failing transactions, and verify that the validation + // routine properly validates up to 5, the maxValidationViewConnects, passing transactions, and possibly removes + // some of the failing transactions. + var passingTxns, failingTxns []*MsgDeSoTxn + for ii := 0; ii < 10; ii++ { + txn := _generateTestTxnWithOutputs(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 25, output) + if ii > 0 { + // Make sure we add the transactions with increasing fees, otherwise we may validate more than 5 transactions, + // if the validation routine executes while we're adding transactions. + txn.TxnFeeNanos = passingTxns[ii-1].TxnFeeNanos + 1 + _signTxn(t, txn, m0Priv) + } + passingTxns = append(passingTxns, txn) + _wrappedPosMempoolAddTransaction(t, mempool, txn) + } + for ii := 0; ii < 10; ii++ { + // Make sure the transaction fails the signature verification. + txn := _generateTestTxnWithOutputs(t, rand, feeMin, feeMax, m0PubBytes, m1Priv, 100, 25, output) + failingTxns = append(failingTxns, txn) + _wrappedPosMempoolAddTransaction(t, mempool, txn) + } + // Wait for the validation routine to finish. + time.Sleep(20 * time.Millisecond) + totalValidatedTxns := 0 + for _, txn := range passingTxns { + if mempool.GetTransaction(txn.Hash()).IsValidated() { + totalValidatedTxns++ + } + } + // Make sure that the number of validated transactions is equal to the maxValidationViewConnects. + require.Equal(t, 5, totalValidatedTxns) + // Now make sure that failing transactions were either removed, or remained unvalidated. + for _, txn := range failingTxns { + fetchedTxn := mempool.GetTransaction(txn.Hash()) + if fetchedTxn != nil { + require.False(t, fetchedTxn.IsValidated()) + } + } + mempool.Stop() +} + func _posTestBlockchainSetup(t *testing.T) (_params *DeSoParams, _db *badger.DB) { return _posTestBlockchainSetupWithBalances(t, 200000, 200000) } @@ -391,6 +474,15 @@ func _generateTestTxn(t *testing.T, rand *rand.Rand, feeMin uint64, feeMax uint6 return txn } +func _generateTestTxnWithOutputs(t *testing.T, rand *rand.Rand, feeMin uint64, feeMax uint64, pk []byte, priv string, expirationHeight uint64, + extraDataBytes int32, outputs []*DeSoOutput) *MsgDeSoTxn { + + txn := _generateTestTxn(t, rand, feeMin, feeMax, pk, priv, expirationHeight, extraDataBytes) + txn.TxOutputs = outputs + _signTxn(t, txn, priv) + return txn +} + func _wrappedPosMempoolAddTransaction(t *testing.T, mp *PosMempool, txn *MsgDeSoTxn) { added := time.Now() mtxn := NewMempoolTransaction(txn, added, false) From 5fe25e9eae76398c384aa2d5c97b2ed51e6d3857 Mon Sep 17 00:00:00 2001 From: Jon Pollock Date: Thu, 28 Mar 2024 08:54:55 -0700 Subject: [PATCH 567/762] Add block height check to IsAtomicTxnsInnerTxn. --- lib/block_view.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/block_view.go b/lib/block_view.go index 6e03e0e50..577353c84 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3562,7 +3562,7 @@ func (bav *UtxoView) _connectTransaction( // Check that we're not trying to connect a transaction meant to be part of a series of atomic transactions // outside an atomic transactions wrapper. - if txn.IsAtomicTxnsInnerTxn() { + if blockHeight >= bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight && txn.IsAtomicTxnsInnerTxn() { return nil, 0, 0, 0, RuleErrorAtomicTxnsRequiresWrapper } From b876ea8e64e3d7fed77a84c805e66070301fbbd1 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 28 Mar 2024 12:43:21 -0400 Subject: [PATCH 568/762] Allow orphan blocks in block index (#1151) --- lib/blockchain.go | 2 +- lib/db_utils.go | 7 ++++++- lib/db_utils_test.go | 4 ++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 5d31f9ea9..fc8819c64 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -656,7 +656,7 @@ func (bc *Blockchain) _initChain() error { if bc.postgres != nil { bc.blockIndexByHash, err = bc.postgres.GetBlockIndex() } else { - bc.blockIndexByHash, err = GetBlockIndex(bc.db, false /*bitcoinNodes*/) + bc.blockIndexByHash, err = GetBlockIndex(bc.db, false /*bitcoinNodes*/, bc.params) } if err != nil { return errors.Wrapf(err, "_initChain: Problem reading block index from db") diff --git a/lib/db_utils.go b/lib/db_utils.go index e864cfc44..542bda24f 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -5452,7 +5452,7 @@ func GetBlockTipHeight(handle *badger.DB, bitcoinNodes bool) (uint64, error) { return blockHeight, err } -func GetBlockIndex(handle *badger.DB, bitcoinNodes bool) (map[BlockHash]*BlockNode, error) { +func GetBlockIndex(handle *badger.DB, bitcoinNodes bool, params *DeSoParams) (map[BlockHash]*BlockNode, error) { blockIndex := make(map[BlockHash]*BlockNode) prefix := _heightHashToNodeIndexPrefix(bitcoinNodes) @@ -5503,6 +5503,11 @@ func GetBlockIndex(handle *badger.DB, bitcoinNodes bool) (map[BlockHash]*BlockNo // We found the parent node so connect it. blockNode.Parent = parent } else { + // If we're syncing a DeSo node and we hit a PoS block, we expect there to + // be orphan blocks in the block index. In this case, we don't throw an error. + if bitcoinNodes == false && params.IsPoSBlockHeight(uint64(blockNode.Height)) { + continue + } // In this case we didn't find the parent so error. There shouldn't // be any unconnectedTxns in our block index. return fmt.Errorf("GetBlockIndex: Could not find parent for blockNode: %+v", blockNode) diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index f7f08644e..b373592fd 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -171,7 +171,7 @@ func TestBlockNodePutGet(t *testing.T) { err = PutHeightHashToNodeInfo(db, nil, b4, false /*bitcoinNodes*/, nil) require.NoError(err) - blockIndex, err := GetBlockIndex(db, false /*bitcoinNodes*/) + blockIndex, err := GetBlockIndex(db, false /*bitcoinNodes*/, &DeSoTestnetParams) require.NoError(err) require.Len(blockIndex, 4) @@ -224,7 +224,7 @@ func TestInitDbWithGenesisBlock(t *testing.T) { require.NoError(err) // Check the block index. - blockIndex, err := GetBlockIndex(db, false /*bitcoinNodes*/) + blockIndex, err := GetBlockIndex(db, false /*bitcoinNodes*/, &DeSoTestnetParams) require.NoError(err) require.Len(blockIndex, 1) genesisHash := *MustDecodeHexBlockHash(DeSoTestnetParams.GenesisBlockHashHex) From be5b7b63f8c9f9a3dd39f831d82f0e98a15a9904 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 29 Mar 2024 13:02:40 -0400 Subject: [PATCH 569/762] Delay FastHotStuffConsensus Transition If Sync Peer Is Not Connected (#1154) --- lib/server.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/server.go b/lib/server.go index a0ed2c656..b4c66e6b6 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2872,6 +2872,13 @@ func (srv *Server) tryTransitionToFastHotStuffConsensus() { return } + // If we have at least one sync peer configured but are not connected to any sync peers, then it + // means that we are still in the process of connecting to a sync peer. We can exit early and wait + // for the network manager to connect to a sync peer. + if len(srv.networkManager.connectIps) != 0 && srv.SyncPeer == nil { + return + } + // If we have a sync peer and have not reached the sync peer's starting block height, then // we should sync all remaining blocks from the sync peer before transitioning to the // FastHotStuffConsensus. From 8adba9d836aa0adfa9146c92f777bec273c2cd05 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 29 Mar 2024 13:20:55 -0400 Subject: [PATCH 570/762] Address Nina's Comments For connectFailing Removal in StateChangeSyncer (#1155) * Address Nina's Comments For connectFailing Removal in StateChangeSyncer * Better comments --- lib/state_change_syncer.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 84a78c078..93e50261f 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -4,15 +4,16 @@ import ( "bytes" "encoding/binary" "fmt" + "os" + "path/filepath" + "sync" + "time" + "github.com/deso-protocol/core/collections" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" "github.com/google/uuid" "github.com/pkg/errors" - "os" - "path/filepath" - "sync" - "time" ) // StateSyncerOperationType is an enum that represents the type of operation that should be performed on the @@ -751,8 +752,13 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser if err == nil { mempoolTxUtxoView = copiedView } else { - glog.V(2).Infof("StateChangeSyncer.SyncMempoolToStateSyncer "+ - "failed connecting mempool tx with (hash= %v): (err=%v)", mempoolTx.Hash, err) + glog.V(2).Infof( + "StateChangeSyncer.SyncMempoolToStateSyncer failed connecting mempool tx with (hash= %v): (err=%v)", + mempoolTx.Hash, + err, + ) + // If the txn fails to connect, then we should not emit any state changes for it. + continue } } else { // For PoW block heights, we can just connect the transaction to the mempool view. From d03aaa20ee0fd5644c7d72efd34df044115584a3 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 29 Mar 2024 15:20:19 -0400 Subject: [PATCH 571/762] Make MaxBlockSizeBytesPoS part of GlobalParams (#1139) --- lib/block_view.go | 28 +++++++++- lib/block_view_atomic_txns.go | 6 ++- lib/block_view_atomic_txns_test.go | 2 +- lib/block_view_test.go | 83 +++++++++++++++++++++++++++++ lib/block_view_types.go | 9 ++++ lib/blockchain.go | 45 ++++++++++------ lib/constants.go | 23 ++++++-- lib/errors.go | 2 + lib/pos_block_producer.go | 5 +- lib/pos_block_producer_test.go | 1 + lib/pos_blockchain.go | 10 ++++ lib/pos_blockchain_test.go | 4 +- lib/pos_epoch_complete_hook_test.go | 6 +++ lib/pos_snapshot_entries.go | 3 ++ lib/validation_deso_txn.go | 17 ++++-- lib/validation_deso_txn_test.go | 8 +-- 16 files changed, 222 insertions(+), 30 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 7b27ca5bd..bb0b491f4 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3459,6 +3459,23 @@ func (bav *UtxoView) _connectUpdateGlobalParams( } newGlobalParamsEntry.MempoolFeeEstimatorNumPastBlocks = val } + if len(extraData[MaxBlockSizeBytesPoSKey]) > 0 { + val, bytesRead := Uvarint( + extraData[MaxBlockSizeBytesPoSKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode MaxBlockSizeBytesPoS as uint64", + ) + } + if val < MinMaxBlockSizeBytes { + return 0, 0, nil, RuleErrorMaxBlockSizeBytesTooLow + } + if val > MaxMaxBlockSizeBytes { + return 0, 0, nil, RuleErrorMaxBlockSizeBytesTooHigh + } + newGlobalParamsEntry.MaxBlockSizeBytesPoS = val + } } var newForbiddenPubKeyEntry *ForbiddenPubKeyEntry @@ -3662,8 +3679,13 @@ func (bav *UtxoView) _connectSingleTxn( return nil, 0, 0, 0, errors.Wrapf( err, "_connectTransaction: Problem serializing transaction: ") } + maxBlockSizeBytes := bav.Params.MaxBlockSizeBytesPoW + if bav.Params.IsPoSBlockHeight(uint64(blockHeight)) { + maxBlockSizeBytes = bav.GetMaxBlockSizeBytesPoS() + } + txnSizeBytes := uint64(len(txnBytes)) - if txnSizeBytes > bav.Params.MaxBlockSizeBytes/2 { + if txnSizeBytes > maxBlockSizeBytes/2 { return nil, 0, 0, 0, RuleErrorTxnTooBig } @@ -5055,3 +5077,7 @@ func mergeExtraData(oldMap map[string][]byte, newMap map[string][]byte) map[stri return retMap } + +func (bav *UtxoView) GetMaxBlockSizeBytesPoS() uint64 { + return bav.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS +} diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 265af0f53..2e27c551d 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -222,7 +222,11 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( err, "_connectTransaction: Problem serializing transaction: ") } txnSizeBytes := uint64(len(txnBytes)) - if txnSizeBytes > bav.Params.MaxBlockSizeBytes/2 { + maxBlockSizeBytes := bav.Params.MaxBlockSizeBytesPoW + if bav.Params.IsPoSBlockHeight(uint64(blockHeight)) { + maxBlockSizeBytes = bav.GetMaxBlockSizeBytesPoS() + } + if txnSizeBytes > maxBlockSizeBytes/2 { return nil, 0, 0, 0, RuleErrorTxnTooBig } diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index ff5113cc4..17c9c0092 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -198,7 +198,7 @@ func TestConnectAtomicTxnsWrapperRuleErrors(t *testing.T) { // Try and generate an atomic transaction wrapper larger than half the block size. // For simplicity, we estimate a basic transfer at greater than 100 bytes. // (This should fail -- RuleErrorTxnTooBig) - numTxnsToGenerate := testMeta.params.MaxBlockSizeBytes / 200 + numTxnsToGenerate := testMeta.params.MaxBlockSizeBytesPoW / 200 atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, int(numTxnsToGenerate)) atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) require.NoError(t, err) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index ee7f173bf..1c8c23795 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -1271,6 +1271,7 @@ func TestBalanceModelUpdateGlobalParams(t *testing.T) { setBalanceModelBlockHeights(t) TestUpdateGlobalParams(t) + TestUpdateGlobalParamsPoS(t) } func TestUpdateGlobalParams(t *testing.T) { @@ -1482,6 +1483,88 @@ func TestUpdateGlobalParams(t *testing.T) { } } +func TestUpdateGlobalParamsPoS(t *testing.T) { + // Set pos block heights + setPoSBlockHeights(t, 2, 1000) + // Set up a blockchain + assert := assert.New(t) + require := require.New(t) + _, _ = assert, require + + chain, params, db := NewLowDifficultyBlockchain(t) + postgres := chain.postgres + mempool, miner := NewTestMiner(t, chain, params, true /*isSender*/) + _, _ = mempool, miner + + // Set the founder equal to the moneyPk + params.ExtraRegtestParamUpdaterKeys = make(map[PkMapKey]bool) + params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(MustBase58CheckDecode(moneyPkString))] = true + + // Mine a few blocks + _, err := miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(err) + _, err = miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(err) + // MaxBlockSizeBytesPoS tests. + { + params.ForkHeights = GlobalDeSoParams.ForkHeights + params.EncoderMigrationHeights = GlobalDeSoParams.EncoderMigrationHeights + params.EncoderMigrationHeightsList = GlobalDeSoParams.EncoderMigrationHeightsList + mempool.bc.params = params + // Make sure setting max block size too low fails. + _, _, _, err := _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + MaxBlockSizeBytesPoSKey: UintToBuf(MinMaxBlockSizeBytes - 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorMaxBlockSizeBytesTooLow) + // Make sure setting max block size too high fails. + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + MaxBlockSizeBytesPoSKey: UintToBuf(MaxMaxBlockSizeBytes + 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorMaxBlockSizeBytesTooHigh) + // Make sure setting max block size to a valid value works and updates global params. + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + MaxBlockSizeBytesPoSKey: UintToBuf(5000), + }, + true, + mempool) + require.NoError(err) + utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) + require.NoError(err) + require.Equal(utxoView.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS, uint64(5000)) + } +} + func TestBalanceModelBasicTransfers(t *testing.T) { setBalanceModelBlockHeights(t) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 6dca8e2e7..ad7c04ca5 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4241,6 +4241,9 @@ type GlobalParamsEntry struct { // MempoolFeeEstimatorNumPastBlocks is the number of past blocks to reference txn fees from when estimating // the fee for a new txn. MempoolFeeEstimatorNumPastBlocks uint64 + + // MaxBlockSizeBytesPoS is the maximum size of a block in bytes. + MaxBlockSizeBytesPoS uint64 } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { @@ -4266,6 +4269,7 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { MempoolMaxSizeBytes: gp.MempoolMaxSizeBytes, MempoolFeeEstimatorNumMempoolBlocks: gp.MempoolFeeEstimatorNumMempoolBlocks, MempoolFeeEstimatorNumPastBlocks: gp.MempoolFeeEstimatorNumPastBlocks, + MaxBlockSizeBytesPoS: gp.MaxBlockSizeBytesPoS, } } @@ -4296,6 +4300,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.MempoolMaxSizeBytes)...) data = append(data, UintToBuf(gp.MempoolFeeEstimatorNumMempoolBlocks)...) data = append(data, UintToBuf(gp.MempoolFeeEstimatorNumPastBlocks)...) + data = append(data, UintToBuf(gp.MaxBlockSizeBytesPoS)...) } return data } @@ -4392,6 +4397,10 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MempoolFeeEstimatorNumPastBlocks") } + gp.MaxBlockSizeBytesPoS, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MaxBlockSizeBytesPoW") + } } return nil } diff --git a/lib/blockchain.go b/lib/blockchain.go index fc8819c64..ae891fb53 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1491,15 +1491,14 @@ func (bc *Blockchain) SetBestChainMap(bestChain []*BlockNode, bestChainMap map[B bc.blockIndexByHeight = blockIndexByHeight } -// TODO: update to support validating orphan PoS Blocks -func (bc *Blockchain) _validateOrphanBlock(desoBlock *MsgDeSoBlock) error { +func (bc *Blockchain) _validateOrphanBlockPoW(desoBlock *MsgDeSoBlock) error { // Error if the block is missing a parent hash or header. if desoBlock.Header == nil { - return fmt.Errorf("_validateOrphanBlock: Block is missing header") + return fmt.Errorf("_validateOrphanBlockPoW: Block is missing header") } parentHash := desoBlock.Header.PrevBlockHash if parentHash == nil { - return fmt.Errorf("_validateOrphanBlock: Block is missing parent hash") + return fmt.Errorf("_validateOrphanBlockPoW: Block is missing parent hash") } // Check that the block size isn't bigger than the max allowed. This prevents @@ -1507,9 +1506,10 @@ func (bc *Blockchain) _validateOrphanBlock(desoBlock *MsgDeSoBlock) error { // an attempt to exhaust our memory. serializedBlock, err := desoBlock.ToBytes(false) if err != nil { - return fmt.Errorf("_validateOrphanBlock: Could not serialize block") + return fmt.Errorf("_validateOrphanBlockPoW: Could not serialize block") } - if uint64(len(serializedBlock)) > bc.params.MaxBlockSizeBytes { + // It's safe to leave this as a direct access to MaxBlockSizeBytesPoW since this is a PoW only function. + if uint64(len(serializedBlock)) > bc.params.MaxBlockSizeBytesPoW { return RuleErrorBlockTooBig } @@ -1539,7 +1539,7 @@ func (bc *Blockchain) _validateOrphanBlock(desoBlock *MsgDeSoBlock) error { // is wasteful of resources. Better would be to clean up orphan blocks once they're // too old or something like that. func (bc *Blockchain) ProcessOrphanBlock(desoBlock *MsgDeSoBlock, blockHash *BlockHash) error { - err := bc._validateOrphanBlock(desoBlock) + err := bc._validateOrphanBlockPoW(desoBlock) if err != nil { return errors.Wrapf(err, "ProcessOrphanBlock: Problem validating orphan block") } @@ -2192,7 +2192,9 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // potentially a network issue not an issue with the actual block. return false, false, fmt.Errorf("ProcessBlock: Problem serializing block") } - if uint64(len(serializedBlock)) > bc.params.MaxBlockSizeBytes { + // Since this is a PoW-only function, it's safe to leave direct access + // to MaxBlockSizeBytesPoW through the params instead of using the GlobalParamsEntry. + if uint64(len(serializedBlock)) > bc.params.MaxBlockSizeBytesPoW { bc.MarkBlockInvalid(nodeToValidate, RuleErrorBlockTooBig) return false, false, RuleErrorBlockTooBig } @@ -4875,10 +4877,13 @@ func (bc *Blockchain) CreateMaxSpend( for feeAmountNanos == 0 || feeAmountNanos != prevFeeAmountNanos { prevFeeAmountNanos = feeAmountNanos if !isInterfaceValueNil(mempool) { + maxBlockSizeBytes := bc.params.MaxBlockSizeBytesPoW + if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { + maxBlockSizeBytes = utxoView.GetMaxBlockSizeBytesPoS() + } // TODO: replace MaxBasisPoints with variables configured by flags. feeAmountNanos, err = mempool.EstimateFee(txn, minFeeRateNanosPerKB, - MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, - bc.params.MaxBlockSizeBytes) + MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, maxBlockSizeBytes) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "CreateMaxSpend: Problem estimating fee: ") } @@ -4913,7 +4918,10 @@ func (bc *Blockchain) CreateMaxSpend( // than what AddInputsAndChangeToTransaction will allow because we want to leave // some breathing room to avoid this transaction getting rejected. currentTxnSize := _computeMaxTxSize(txn) - if currentTxnSize > bc.params.MaxBlockSizeBytes/3 { + // It is okay to directly use MaxBlockSizeBytesPoW here since the PoS fork + // comes after the balance model fork. The balance model fork ensures we do not hit + // this point. + if currentTxnSize > bc.params.MaxBlockSizeBytesPoW/3 { if len(txn.TxInputs) > 0 { // Cut off the last input if the transaction just became too large. txn.TxInputs = txn.TxInputs[:len(txn.TxInputs)-1] @@ -5009,9 +5017,13 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( if txArg.TxnMeta.GetTxnType() != TxnTypeBlockReward { if !isInterfaceValueNil(mempool) { + maxBlockSizeBytes := bc.params.MaxBlockSizeBytesPoW + if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { + maxBlockSizeBytes = utxoView.GetMaxBlockSizeBytesPoS() + } // TODO: replace MaxBasisPoints with variables configured by flags. txArg.TxnFeeNanos, err = mempool.EstimateFee(txArg, minFeeRateNanosPerKB, MaxBasisPoints, - MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, bc.params.MaxBlockSizeBytes) + MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, maxBlockSizeBytes) if err != nil { return 0, 0, 0, 0, errors.Wrapf(err, "AddInputsAndChangeToTransaction: Problem estimating fee: ") @@ -5149,10 +5161,13 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( // If the final transaction is absolutely huge, return an error. finalTxnSize := _computeMaxTxSize(finalTxCopy) - if finalTxnSize > bc.params.MaxBlockSizeBytes/2 { + // It's fine to directly use MaxBlockSizeBytesPoW since the PoS fork + // will always be after the balance model fork. The balance model fork + // prevents the codebase from reaching this point. + if finalTxnSize > bc.params.MaxBlockSizeBytesPoW/2 { return 0, 0, 0, 0, fmt.Errorf("AddInputsAndChangeToTransaction: "+ "Transaction size (%d bytes) exceeds the maximum sane amount "+ - "allowed (%d bytes)", finalTxnSize, bc.params.MaxBlockSizeBytes/2) + "allowed (%d bytes)", finalTxnSize, bc.params.MaxBlockSizeBytesPoW/2) } // At this point, the inputs cover the (spend amount plus transaction fee) @@ -5183,7 +5198,7 @@ func (bc *Blockchain) EstimateDefaultFeeRateNanosPerKB( return minFeeRateNanosPerKB } numBytes := len(blockBytes) - if float64(numBytes)/float64(bc.params.MaxBlockSizeBytes) < medianThreshold { + if float64(numBytes)/float64(bc.params.MaxBlockSizeBytesPoW) < medianThreshold { return minFeeRateNanosPerKB } diff --git a/lib/constants.go b/lib/constants.go index 3b91f5899..dbc37a17b 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -648,7 +648,7 @@ type DeSoParams struct { // The maximum number of bytes that can be allocated to transactions in // a block. - MaxBlockSizeBytes uint64 + MaxBlockSizeBytesPoW uint64 // It's useful to set the miner maximum block size to a little lower than the // maximum block size in certain cases. For example, on initial launch, setting @@ -801,6 +801,10 @@ type DeSoParams struct { // for a description of its usage. DefaultMempoolFeeEstimatorNumPastBlocks uint64 + // DefaultMaxBlockSizeBytesPoS is the default value for GlobalParamsEntry.MaxBlockSizeBytesPoS. + // This is the initial value for the maximum block size in bytes that we allow for PoS blocks. + DefaultMaxBlockSizeBytesPoS uint64 + // HandshakeTimeoutMicroSeconds is the timeout for the peer handshake certificate. The default value is 15 minutes. HandshakeTimeoutMicroSeconds uint64 @@ -888,6 +892,9 @@ func (params *DeSoParams) EnableRegtest() { params.DefaultEpochDurationNumBlocks = 10 // Set the PoS default jail inactive validator grace period epochs to 3. params.DefaultJailInactiveValidatorGracePeriodEpochs = 3 + // Set the DefaultMaxBlockSizeBytesPoS to be the same as the MaxBlockSizeBytesPoW + // so existing regtest nodes don't break. + params.DefaultMaxBlockSizeBytesPoS = params.MaxBlockSizeBytesPoW // In regtest, we start all the fork heights at zero. These can be adjusted // for testing purposes to ensure that a transition does not cause issues. @@ -1176,7 +1183,7 @@ var DeSoMainnetParams = DeSoParams{ // have to have a lot of space. This seems fine, however, // because space is cheap and it's easy to spin up a cloud machine with // tens of terabytes of space. - MaxBlockSizeBytes: 16000000, + MaxBlockSizeBytesPoW: 16000000, // We set this to be lower initially to avoid winding up with really big // spam blocks in the event someone tries to abuse the initially low min @@ -1283,6 +1290,9 @@ var DeSoMainnetParams = DeSoParams{ // The number of past blocks to consider when estimating the mempool fee. DefaultMempoolFeeEstimatorNumPastBlocks: 50, + // The maximum size of blocks for PoS. + DefaultMaxBlockSizeBytesPoS: 32000, // 32KB TODO: verify this is a sane value. + // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), @@ -1469,7 +1479,7 @@ var DeSoTestnetParams = DeSoParams{ // We use a max block size of 1MB. This seems to work well for BTC and // most of our data doesn't need to be stored on the blockchain anyway. - MaxBlockSizeBytes: 1000000, + MaxBlockSizeBytesPoW: 1000000, // We set this to be lower initially to avoid winding up with really big // spam blocks in the event someone tries to abuse the initially low min @@ -1578,6 +1588,9 @@ var DeSoTestnetParams = DeSoParams{ // The number of past blocks to consider when estimating the mempool fee. DefaultMempoolFeeEstimatorNumPastBlocks: 50, + // The maximum size of blocks for PoS. + DefaultMaxBlockSizeBytesPoS: 32000, // 32KB TODO: verify this is a sane value. + // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), @@ -1643,6 +1656,7 @@ const ( MempoolMaxSizeBytesKey = "MempoolMaxSizeBytes" MempoolFeeEstimatorNumMempoolBlocksKey = "MempoolFeeEstimatorNumMempoolBlocks" MempoolFeeEstimatorNumPastBlocksKey = "MempoolFeeEstimatorNumPastBlocks" + MaxBlockSizeBytesPoSKey = "MaxBlockSizeBytesPoS" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" @@ -1750,6 +1764,9 @@ const ( // Access group key constants MinAccessGroupKeyNameCharacters = 1 MaxAccessGroupKeyNameCharacters = 32 + // Min/MaxMaxBlockSizeBytes - Min/max value to which the max block size can be set. + MinMaxBlockSizeBytes = 1000 // 1kb TODO: Verify this is a sane value. + MaxMaxBlockSizeBytes = 16000000 // 16MB TODO: Verify this is a sane value. // DefaultMaxNonceExpirationBlockHeightOffset - default value to which the MaxNonceExpirationBlockHeightOffset // is set to before specified by ParamUpdater. diff --git a/lib/errors.go b/lib/errors.go index 9c4bdd62a..806262a9f 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -228,6 +228,8 @@ const ( RuleErrorUserNotAuthorizedToUpdateGlobalParams RuleError = "RuleErrorUserNotAuthorizedToUpdateGlobalParams" RuleErrorUserOutputMustBeNonzero RuleError = "RuleErrorUserOutputMustBeNonzero" RuleErrorLeaderScheduleExceedsValidatorSetMaxNumValidators RuleError = "RuleErrorLeaderScheduleExceedsValidatorSetMaxNumValidators" + RuleErrorMaxBlockSizeBytesTooLow RuleError = "RuleErrorMaxBlockSizeBytesTooLow" + RuleErrorMaxBlockSizeBytesTooHigh RuleError = "RuleErrorMaxBlockSizeBytesTooHigh" // DeSo Diamonds RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel RuleError = "RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel" diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 372dc94a1..29db036cc 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -135,13 +135,16 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( return nil, errors.Wrapf(err, "Error computing block reward txn size: ") } + // PoS Block Producer only uses the PoS MaxBlockSizeBytes. + maxBlockSizeBytes := latestBlockView.GetMaxBlockSizeBytesPoS() + // Get block transactions from the mempool. feeTimeTxns, maxUtilityFee, err := pbp.getBlockTransactions( pbp.proposerPublicKey, latestBlockView, newBlockHeight, newBlockTimestampNanoSecs, - pbp.params.MinerMaxBlockSizeBytes-uint64(len(blockRewardTxnSizeBytes)), + maxBlockSizeBytes-uint64(len(blockRewardTxnSizeBytes)), ) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.createBlockWithoutHeader: Problem retrieving block transactions: ") diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index b7c522cfa..c096d3b3a 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -88,6 +88,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { blsPubKey, _ := _generateValidatorVotingPublicKeyAndSignature(t) params, db := _posTestBlockchainSetupWithBalances(t, 200000, 200000) params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 + params.DefaultMaxBlockSizeBytesPoS = params.MaxBlockSizeBytesPoW maxMempoolPosSizeBytes := uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 3ec971394..c645405cd 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -747,6 +747,16 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi } } + // Make sure the block isn't too big. + serializedBlock, err := block.ToBytes(false) + if err != nil { + return bc.storeValidateFailedBlockWithWrappedError( + block, errors.Wrap(err, "validateAndIndexBlockPoS: Problem serializing block")) + } + if uint64(len(serializedBlock)) > parentUtxoView.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS { + return bc.storeValidateFailedBlockWithWrappedError(block, RuleErrorBlockTooBig) + } + // Check if the block is properly formed and passes all basic validations. if err = bc.isValidBlockPoS(block); err != nil { return bc.storeValidateFailedBlockWithWrappedError(block, err) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 2e8e76125..7c2ca3bcc 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2207,7 +2207,9 @@ func TestProcessOrphanBlockPoS(t *testing.T) { nextEpochEntry, err := utxoView.computeNextEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight, currentEpochEntry.FinalBlockHeight, 1) require.NoError(t, err) var twoEpochsInFutureBlock *MsgDeSoBlock - twoEpochsInFutureBlock = _generateRealBlock(testMeta, nextEpochEntry.FinalBlockHeight+1, nextEpochEntry.FinalBlockHeight+1, 17283, testMeta.chain.BlockTip().Hash, false) + twoEpochsInFutureBlock = _generateRealBlock(testMeta, nextEpochEntry.FinalBlockHeight, nextEpochEntry.FinalBlockHeight, 17283, testMeta.chain.BlockTip().Hash, false) + twoEpochsInFutureBlock.Header.Height += 1 + twoEpochsInFutureBlock.Header.ProposedInView += 1 // Give the block a random parent, so it is truly an orphan. twoEpochsInFutureBlock.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) updateProposerVotePartialSignatureForBlock(testMeta, twoEpochsInFutureBlock) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 0d2dc92f6..8008d8b90 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -83,6 +83,12 @@ func TestRunEpochCompleteHook(t *testing.T) { blockHeight := uint64(testMeta.chain.blockTip().Height) + 1 incrBlockHeight := func() uint64 { blockHeight += 1 + // Update the saved height in the testMeta so that the calls to construct transactions use the + // correct block height. + testMeta.savedHeight = uint32(blockHeight) + // Put a dummy block in the chain so that the tip is updated. This + // make the calls to update global params work since they use the chain's tip as the block height. + testMeta.chain.addTipBlockToBestChain(&BlockNode{Height: uint32(blockHeight), Hash: NewBlockHash([]byte{byte(blockHeight)})}) return blockHeight } viewNumber := uint64(0) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index fa75c7261..0d14f47b9 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -160,6 +160,9 @@ func MergeGlobalParamEntryDefaults(globalParamsEntry *GlobalParamsEntry, params if globalParamsEntryCopy.MempoolFeeEstimatorNumPastBlocks == 0 { globalParamsEntryCopy.MempoolFeeEstimatorNumPastBlocks = params.DefaultMempoolFeeEstimatorNumPastBlocks } + if globalParamsEntryCopy.MaxBlockSizeBytesPoS == 0 { + globalParamsEntryCopy.MaxBlockSizeBytesPoS = params.DefaultMaxBlockSizeBytesPoS + } // Return the merged result. return globalParamsEntryCopy diff --git a/lib/validation_deso_txn.go b/lib/validation_deso_txn.go index 1d230fd1e..15f2091b0 100644 --- a/lib/validation_deso_txn.go +++ b/lib/validation_deso_txn.go @@ -17,7 +17,7 @@ func ValidateDeSoTxnSanityBalanceModel(txn *MsgDeSoTxn, blockHeight uint64, } // Validate encoding - if err := ValidateDeSoTxnEncoding(txn, params); err != nil { + if err := ValidateDeSoTxnEncoding(txn, blockHeight, globalParams, params); err != nil { return errors.Wrapf(err, "ValidateDeSoTxnSanityBalanceModel: ") } // Validate transaction metadata @@ -44,7 +44,12 @@ func ValidateDeSoTxnSanityBalanceModel(txn *MsgDeSoTxn, blockHeight uint64, } // ValidateDeSoTxnEncoding validates that the transaction encoding works as expected. -func ValidateDeSoTxnEncoding(txn *MsgDeSoTxn, params *DeSoParams) error { +func ValidateDeSoTxnEncoding( + txn *MsgDeSoTxn, + blockHeight uint64, + globalParams *GlobalParamsEntry, + params *DeSoParams, +) error { if txn == nil || params == nil { return fmt.Errorf("ValidateDeSoTxnEncoding: Transaction and params cannot be nil") } @@ -68,10 +73,14 @@ func ValidateDeSoTxnEncoding(txn *MsgDeSoTxn, params *DeSoParams) error { } // TODO: Do we want a separate parameter for transaction size? Should it be a part of GlobalDeSoParams? + maxBlockSizeBytes := params.MaxBlockSizeBytesPoW + if params.IsPoSBlockHeight(blockHeight) { + maxBlockSizeBytes = MergeGlobalParamEntryDefaults(globalParams, params).MaxBlockSizeBytesPoS + } // Validate transaction size - if uint64(len(txnBytes)) > params.MaxBlockSizeBytes/2 { + if uint64(len(txnBytes)) > maxBlockSizeBytes/2 { return errors.Wrapf(RuleErrorTxnTooBig, "ValidateDeSoTxnEncoding: Transaction size %d is greater than "+ - "MaxBlockSizeBytes/2 %d", len(txnBytes), params.MaxBlockSizeBytes/2) + "MaxBlockSizeBytesPoW/2 %d", len(txnBytes), maxBlockSizeBytes/2) } return nil } diff --git a/lib/validation_deso_txn_test.go b/lib/validation_deso_txn_test.go index 3b4bce6cd..0c3a1807d 100644 --- a/lib/validation_deso_txn_test.go +++ b/lib/validation_deso_txn_test.go @@ -35,15 +35,17 @@ func TestValidateDeSoTxnEncoding(t *testing.T) { require := require.New(t) params := DeSoTestnetParams + mergedGlobalParams := MergeGlobalParamEntryDefaults(&GlobalParamsEntry{}, ¶ms) txns := decodeTestTxns(t) for _, txn := range txns { - require.NoError(ValidateDeSoTxnEncoding(txn, ¶ms)) + require.NoError( + ValidateDeSoTxnEncoding(txn, 1, mergedGlobalParams, ¶ms), ¶ms) } - params.MaxBlockSizeBytes = 0 + params.MaxBlockSizeBytesPoW = 0 for _, txn := range txns { - require.Contains(ValidateDeSoTxnEncoding(txn, ¶ms).Error(), RuleErrorTxnTooBig) + require.Contains(ValidateDeSoTxnEncoding(txn, 1, mergedGlobalParams, ¶ms).Error(), RuleErrorTxnTooBig) } } From 44b650d6a56e1c23957d019392546cab6342bdf0 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 29 Mar 2024 15:23:01 -0400 Subject: [PATCH 572/762] Add soft max block size bytes (#1152) --- lib/block_view.go | 24 ++++++++++++ lib/block_view_atomic_txns.go | 2 +- lib/block_view_test.go | 72 ++++++++++++++++++++++++++++++++++ lib/block_view_types.go | 12 +++++- lib/blockchain.go | 4 +- lib/constants.go | 15 +++++++ lib/errors.go | 3 ++ lib/pos_block_producer.go | 21 +++++++--- lib/pos_block_producer_test.go | 8 ++-- lib/pos_snapshot_entries.go | 3 ++ 10 files changed, 150 insertions(+), 14 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index bb0b491f4..035f971d7 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3476,6 +3476,26 @@ func (bav *UtxoView) _connectUpdateGlobalParams( } newGlobalParamsEntry.MaxBlockSizeBytesPoS = val } + if len(extraData[SoftMaxBlockSizeBytesPoSKey]) > 0 { + val, bytesRead := Uvarint( + extraData[SoftMaxBlockSizeBytesPoSKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode SoftMaxBlockSizeBytesPoS as uint64", + ) + } + if val < MinSoftMaxBlockSizeBytes { + return 0, 0, nil, RuleErrorSoftMaxBlockSizeBytesTooLow + } + if val > MaxSoftMaxBlockSizeBytes { + return 0, 0, nil, RuleErrorSoftMaxBlockSizeBytesTooHigh + } + if MergeGlobalParamEntryDefaults(&newGlobalParamsEntry, bav.Params).MaxBlockSizeBytesPoS < val { + return 0, 0, nil, RuleErrorSoftMaxBlockSizeBytesExceedsMaxBlockSizeBytes + } + newGlobalParamsEntry.SoftMaxBlockSizeBytesPoS = val + } } var newForbiddenPubKeyEntry *ForbiddenPubKeyEntry @@ -5081,3 +5101,7 @@ func mergeExtraData(oldMap map[string][]byte, newMap map[string][]byte) map[stri func (bav *UtxoView) GetMaxBlockSizeBytesPoS() uint64 { return bav.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS } + +func (bav *UtxoView) GetSoftMaxBlockSizeBytesPoS() uint64 { + return bav.GetCurrentGlobalParamsEntry().SoftMaxBlockSizeBytesPoS +} diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 2e27c551d..27a53db02 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -221,11 +221,11 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( return nil, 0, 0, 0, errors.Wrapf( err, "_connectTransaction: Problem serializing transaction: ") } - txnSizeBytes := uint64(len(txnBytes)) maxBlockSizeBytes := bav.Params.MaxBlockSizeBytesPoW if bav.Params.IsPoSBlockHeight(uint64(blockHeight)) { maxBlockSizeBytes = bav.GetMaxBlockSizeBytesPoS() } + txnSizeBytes := uint64(len(txnBytes)) if txnSizeBytes > maxBlockSizeBytes/2 { return nil, 0, 0, 0, RuleErrorTxnTooBig } diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 1c8c23795..039be626a 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -1563,6 +1563,78 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { require.NoError(err) require.Equal(utxoView.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS, uint64(5000)) } + // SoftMaxBlockSizeBytesPoS tests. + { + // Make sure setting soft max block size too low fails. + _, _, _, err := _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + SoftMaxBlockSizeBytesPoSKey: UintToBuf(MinSoftMaxBlockSizeBytes - 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorSoftMaxBlockSizeBytesTooLow) + // Make sure setting soft max block size too high fails. + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + SoftMaxBlockSizeBytesPoSKey: UintToBuf(MaxSoftMaxBlockSizeBytes + 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorSoftMaxBlockSizeBytesTooHigh) + // Make sure setting soft max block size to a value greater than max block size fails. + utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) + require.NoError(err) + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + SoftMaxBlockSizeBytesPoSKey: UintToBuf(utxoView.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS + 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorSoftMaxBlockSizeBytesExceedsMaxBlockSizeBytes) + // Make sure setting max block size to a valid value works and updates global params. + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + SoftMaxBlockSizeBytesPoSKey: UintToBuf(4000), + }, + true, + mempool) + require.NoError(err) + utxoView, err = NewUtxoView(db, params, postgres, chain.snapshot, nil) + require.NoError(err) + require.Equal(utxoView.GetCurrentGlobalParamsEntry().SoftMaxBlockSizeBytesPoS, uint64(4000)) + } } func TestBalanceModelBasicTransfers(t *testing.T) { diff --git a/lib/block_view_types.go b/lib/block_view_types.go index ad7c04ca5..1b1c616ec 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4244,6 +4244,10 @@ type GlobalParamsEntry struct { // MaxBlockSizeBytesPoS is the maximum size of a block in bytes. MaxBlockSizeBytesPoS uint64 + + // SoftMaxBlockSizeBytesPoS is the ideal steady state size of a block in bytes. + // This value will be used to control size of block production and congestion in fee estimation. + SoftMaxBlockSizeBytesPoS uint64 } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { @@ -4270,6 +4274,7 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { MempoolFeeEstimatorNumMempoolBlocks: gp.MempoolFeeEstimatorNumMempoolBlocks, MempoolFeeEstimatorNumPastBlocks: gp.MempoolFeeEstimatorNumPastBlocks, MaxBlockSizeBytesPoS: gp.MaxBlockSizeBytesPoS, + SoftMaxBlockSizeBytesPoS: gp.SoftMaxBlockSizeBytesPoS, } } @@ -4301,6 +4306,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.MempoolFeeEstimatorNumMempoolBlocks)...) data = append(data, UintToBuf(gp.MempoolFeeEstimatorNumPastBlocks)...) data = append(data, UintToBuf(gp.MaxBlockSizeBytesPoS)...) + data = append(data, UintToBuf(gp.SoftMaxBlockSizeBytesPoS)...) } return data } @@ -4399,7 +4405,11 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by } gp.MaxBlockSizeBytesPoS, err = ReadUvarint(rr) if err != nil { - return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MaxBlockSizeBytesPoW") + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MaxBlockSizeBytesPoS") + } + gp.SoftMaxBlockSizeBytesPoS, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading SoftMaxBlockSizeBytesPoS") } } return nil diff --git a/lib/blockchain.go b/lib/blockchain.go index ae891fb53..a1b7f0291 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -4879,7 +4879,7 @@ func (bc *Blockchain) CreateMaxSpend( if !isInterfaceValueNil(mempool) { maxBlockSizeBytes := bc.params.MaxBlockSizeBytesPoW if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { - maxBlockSizeBytes = utxoView.GetMaxBlockSizeBytesPoS() + maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() } // TODO: replace MaxBasisPoints with variables configured by flags. feeAmountNanos, err = mempool.EstimateFee(txn, minFeeRateNanosPerKB, @@ -5019,7 +5019,7 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( if !isInterfaceValueNil(mempool) { maxBlockSizeBytes := bc.params.MaxBlockSizeBytesPoW if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { - maxBlockSizeBytes = utxoView.GetMaxBlockSizeBytesPoS() + maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() } // TODO: replace MaxBasisPoints with variables configured by flags. txArg.TxnFeeNanos, err = mempool.EstimateFee(txArg, minFeeRateNanosPerKB, MaxBasisPoints, diff --git a/lib/constants.go b/lib/constants.go index dbc37a17b..127d8fb1e 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -805,6 +805,11 @@ type DeSoParams struct { // This is the initial value for the maximum block size in bytes that we allow for PoS blocks. DefaultMaxBlockSizeBytesPoS uint64 + // DefaultSoftMaxBlockSizeBytesPoS is the default value for GlobalParamsEntry.SoftMaxBlockSizeBytesPoS + // This is the initial value for the ideal block size in bytes we aim for in block production and fee + // estimation. + DefaultSoftMaxBlockSizeBytesPoS uint64 + // HandshakeTimeoutMicroSeconds is the timeout for the peer handshake certificate. The default value is 15 minutes. HandshakeTimeoutMicroSeconds uint64 @@ -1293,6 +1298,9 @@ var DeSoMainnetParams = DeSoParams{ // The maximum size of blocks for PoS. DefaultMaxBlockSizeBytesPoS: 32000, // 32KB TODO: verify this is a sane value. + // The soft maximum size of blocks for PoS. + DefaultSoftMaxBlockSizeBytesPoS: 16000, // 16KB TODO: verify this is a sane value. + // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), @@ -1591,6 +1599,9 @@ var DeSoTestnetParams = DeSoParams{ // The maximum size of blocks for PoS. DefaultMaxBlockSizeBytesPoS: 32000, // 32KB TODO: verify this is a sane value. + // The soft maximum size of blocks for PoS. + DefaultSoftMaxBlockSizeBytesPoS: 16000, // 16KB TODO: verify this is a sane value. + // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), @@ -1657,6 +1668,7 @@ const ( MempoolFeeEstimatorNumMempoolBlocksKey = "MempoolFeeEstimatorNumMempoolBlocks" MempoolFeeEstimatorNumPastBlocksKey = "MempoolFeeEstimatorNumPastBlocks" MaxBlockSizeBytesPoSKey = "MaxBlockSizeBytesPoS" + SoftMaxBlockSizeBytesPoSKey = "SoftMaxBlockSizeBytesPoS" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" @@ -1767,6 +1779,9 @@ const ( // Min/MaxMaxBlockSizeBytes - Min/max value to which the max block size can be set. MinMaxBlockSizeBytes = 1000 // 1kb TODO: Verify this is a sane value. MaxMaxBlockSizeBytes = 16000000 // 16MB TODO: Verify this is a sane value. + // Min/MaxSoftMaxBlockSizeBytes - Min/max value to which the soft max block size can be set. + MinSoftMaxBlockSizeBytes = 1000 // 1kb TODO: Verify this is a sane value. + MaxSoftMaxBlockSizeBytes = 16000000 // 16MB TODO: Verify this is a sane value. // DefaultMaxNonceExpirationBlockHeightOffset - default value to which the MaxNonceExpirationBlockHeightOffset // is set to before specified by ParamUpdater. diff --git a/lib/errors.go b/lib/errors.go index 806262a9f..7e4a8c445 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -230,6 +230,9 @@ const ( RuleErrorLeaderScheduleExceedsValidatorSetMaxNumValidators RuleError = "RuleErrorLeaderScheduleExceedsValidatorSetMaxNumValidators" RuleErrorMaxBlockSizeBytesTooLow RuleError = "RuleErrorMaxBlockSizeBytesTooLow" RuleErrorMaxBlockSizeBytesTooHigh RuleError = "RuleErrorMaxBlockSizeBytesTooHigh" + RuleErrorSoftMaxBlockSizeBytesTooLow RuleError = "RuleErrorSoftMaxBlockSizeBytesTooLow" + RuleErrorSoftMaxBlockSizeBytesTooHigh RuleError = "RuleErrorSoftMaxBlockSizeBytesTooHigh" + RuleErrorSoftMaxBlockSizeBytesExceedsMaxBlockSizeBytes RuleError = "RuleErrorSoftMaxBlockSizeBytesExceedsMaxBlockSizeBytes" // DeSo Diamonds RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel RuleError = "RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel" diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 29db036cc..a00238476 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -135,8 +135,9 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( return nil, errors.Wrapf(err, "Error computing block reward txn size: ") } - // PoS Block Producer only uses the PoS MaxBlockSizeBytes. - maxBlockSizeBytes := latestBlockView.GetMaxBlockSizeBytesPoS() + // PoS Block producer only uses PoS, so we just directly fetch the soft max and hard max block sizes. + softMaxBlockSizeBytes := latestBlockView.GetSoftMaxBlockSizeBytesPoS() + hardMaxBlockSizeBytes := latestBlockView.GetMaxBlockSizeBytesPoS() // Get block transactions from the mempool. feeTimeTxns, maxUtilityFee, err := pbp.getBlockTransactions( @@ -144,7 +145,8 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( latestBlockView, newBlockHeight, newBlockTimestampNanoSecs, - maxBlockSizeBytes-uint64(len(blockRewardTxnSizeBytes)), + softMaxBlockSizeBytes-uint64(len(blockRewardTxnSizeBytes)), + hardMaxBlockSizeBytes-uint64(len(blockRewardTxnSizeBytes)), ) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.createBlockWithoutHeader: Problem retrieving block transactions: ") @@ -164,7 +166,8 @@ func (pbp *PosBlockProducer) getBlockTransactions( latestBlockView *UtxoView, newBlockHeight uint64, newBlockTimestampNanoSecs int64, - maxBlockSizeBytes uint64, + softMaxBlockSizeBytes uint64, + hardMaxBlockSizeBytes uint64, ) ( _txns []*MsgDeSoTxn, _maxUtilityFee uint64, @@ -182,13 +185,19 @@ func (pbp *PosBlockProducer) getBlockTransactions( return nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") } for _, txn := range feeTimeTxns { + // If we've exceeded the soft max block size, we exit. We want to allow at least one txn that moves the + // cumulative block size past the soft max, but don't want to add more txns beyond that. + if currentBlockSize > softMaxBlockSizeBytes { + break + } txnBytes, err := txn.ToBytes(false) if err != nil { return nil, 0, errors.Wrapf(err, "Error getting transaction size: ") } - // Skip over transactions that are too big. - if currentBlockSize+uint64(len(txnBytes)) > maxBlockSizeBytes { + // Skip over transactions that are too big. The block would be too large + // to be accepted by the network. + if currentBlockSize+uint64(len(txnBytes)) > hardMaxBlockSizeBytes { continue } diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index c096d3b3a..a926fab1d 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -120,7 +120,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { { pbp := NewPosBlockProducer(mempool, params, NewPublicKey(m0PubBytes), blsPubKey, time.Now().UnixNano()) txns, _, err := pbp.getBlockTransactions( - NewPublicKey(m0PubBytes), latestBlockView, 3, 0, 50000) + NewPublicKey(m0PubBytes), latestBlockView, 3, 0, 50000, 50000) require.NoError(err) blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0) @@ -135,7 +135,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { { pbp := NewPosBlockProducer(mempool, params, NewPublicKey(m1PubBytes), blsPubKey, time.Now().UnixNano()) txns, maxUtilityFee, err := pbp.getBlockTransactions( - NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 50000) + NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 50000, 50000) require.NoError(err) blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0) @@ -241,7 +241,7 @@ func TestGetBlockTransactions(t *testing.T) { latestBlockViewCopy, err := latestBlockView.CopyUtxoView() require.NoError(err) - txns, maxUtilityFee, err := pbp.getBlockTransactions(NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 1000) + txns, maxUtilityFee, err := pbp.getBlockTransactions(NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 1000, 1000) require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(true, len(passingTxns) > len(txns)) @@ -285,7 +285,7 @@ func _testProduceBlockNoSizeLimit(t *testing.T, mp *PosMempool, pbp *PosBlockPro latestBlockViewCopy, err := latestBlockView.CopyUtxoView() require.NoError(err) - txns, maxUtilityFee, err := pbp.getBlockTransactions(pbp.proposerPublicKey, latestBlockView, blockHeight, 0, math.MaxUint64) + txns, maxUtilityFee, err := pbp.getBlockTransactions(pbp.proposerPublicKey, latestBlockView, blockHeight, 0, math.MaxUint64, math.MaxUint64) require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(totalAcceptedTxns, len(txns)) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 0d14f47b9..b57d9197b 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -163,6 +163,9 @@ func MergeGlobalParamEntryDefaults(globalParamsEntry *GlobalParamsEntry, params if globalParamsEntryCopy.MaxBlockSizeBytesPoS == 0 { globalParamsEntryCopy.MaxBlockSizeBytesPoS = params.DefaultMaxBlockSizeBytesPoS } + if globalParamsEntryCopy.SoftMaxBlockSizeBytesPoS == 0 { + globalParamsEntryCopy.SoftMaxBlockSizeBytesPoS = params.DefaultSoftMaxBlockSizeBytesPoS + } // Return the merged result. return globalParamsEntryCopy From de097fd0d5353e6e205ae0fbabb9d7e4a8c55fb5 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 29 Mar 2024 15:24:31 -0400 Subject: [PATCH 573/762] MaxTxnSizeBytesPoS in global params (#1153) --- lib/block_view.go | 31 +++++++++++++-- lib/block_view_atomic_txns.go | 6 +-- lib/block_view_test.go | 72 +++++++++++++++++++++++++++++++++++ lib/block_view_types.go | 9 +++++ lib/constants.go | 14 +++++++ lib/errors.go | 3 ++ lib/pos_snapshot_entries.go | 3 ++ 7 files changed, 132 insertions(+), 6 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 035f971d7..2758f5014 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3496,6 +3496,26 @@ func (bav *UtxoView) _connectUpdateGlobalParams( } newGlobalParamsEntry.SoftMaxBlockSizeBytesPoS = val } + if len(extraData[MaxTxnSizeBytesPoSKey]) > 0 { + val, bytesRead := Uvarint( + extraData[MaxTxnSizeBytesPoSKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode MaxTxnSizeBytesPoS as uint64", + ) + } + if val < MinMaxTxnSizeBytes { + return 0, 0, nil, RuleErrorMaxTxnSizeBytesTooLow + } + if val > MaxMaxTxnSizeBytes { + return 0, 0, nil, RuleErrorMaxTxnSizeBytesTooHigh + } + if MergeGlobalParamEntryDefaults(&newGlobalParamsEntry, bav.Params).MaxBlockSizeBytesPoS < val { + return 0, 0, nil, RuleErrorMaxTxnSizeBytesExceedsMaxBlockSizeBytes + } + newGlobalParamsEntry.MaxTxnSizeBytesPoS = val + } } var newForbiddenPubKeyEntry *ForbiddenPubKeyEntry @@ -3699,13 +3719,14 @@ func (bav *UtxoView) _connectSingleTxn( return nil, 0, 0, 0, errors.Wrapf( err, "_connectTransaction: Problem serializing transaction: ") } - maxBlockSizeBytes := bav.Params.MaxBlockSizeBytesPoW + + maxTxnSizeBytes := bav.Params.MaxBlockSizeBytesPoW / 2 if bav.Params.IsPoSBlockHeight(uint64(blockHeight)) { - maxBlockSizeBytes = bav.GetMaxBlockSizeBytesPoS() + maxTxnSizeBytes = bav.GetMaxTxnSizeBytesPoS() } txnSizeBytes := uint64(len(txnBytes)) - if txnSizeBytes > maxBlockSizeBytes/2 { + if txnSizeBytes > maxTxnSizeBytes { return nil, 0, 0, 0, RuleErrorTxnTooBig } @@ -5105,3 +5126,7 @@ func (bav *UtxoView) GetMaxBlockSizeBytesPoS() uint64 { func (bav *UtxoView) GetSoftMaxBlockSizeBytesPoS() uint64 { return bav.GetCurrentGlobalParamsEntry().SoftMaxBlockSizeBytesPoS } + +func (bav *UtxoView) GetMaxTxnSizeBytesPoS() uint64 { + return bav.GetCurrentGlobalParamsEntry().MaxTxnSizeBytesPoS +} diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 27a53db02..792fbae61 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -221,12 +221,12 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( return nil, 0, 0, 0, errors.Wrapf( err, "_connectTransaction: Problem serializing transaction: ") } - maxBlockSizeBytes := bav.Params.MaxBlockSizeBytesPoW + maxTxnSizeBytes := bav.Params.MaxBlockSizeBytesPoW / 2 if bav.Params.IsPoSBlockHeight(uint64(blockHeight)) { - maxBlockSizeBytes = bav.GetMaxBlockSizeBytesPoS() + maxTxnSizeBytes = bav.GetMaxTxnSizeBytesPoS() } txnSizeBytes := uint64(len(txnBytes)) - if txnSizeBytes > maxBlockSizeBytes/2 { + if txnSizeBytes > maxTxnSizeBytes { return nil, 0, 0, 0, RuleErrorTxnTooBig } diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 039be626a..588e22cf0 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -1635,6 +1635,78 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { require.NoError(err) require.Equal(utxoView.GetCurrentGlobalParamsEntry().SoftMaxBlockSizeBytesPoS, uint64(4000)) } + // MaxTxnSizeBytesPoS tests. + { + // Make sure setting max txn size too low fails. + _, _, _, err := _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + MaxTxnSizeBytesPoSKey: UintToBuf(MinMaxTxnSizeBytes - 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorMaxTxnSizeBytesTooLow) + // Make sure setting max txn size too high fails. + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + MaxTxnSizeBytesPoSKey: UintToBuf(MaxMaxTxnSizeBytes + 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorMaxTxnSizeBytesTooHigh) + // Make sure setting max txn size to a value greater than max block size fails. + utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) + require.NoError(err) + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + MaxTxnSizeBytesPoSKey: UintToBuf(utxoView.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS + 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorMaxTxnSizeBytesExceedsMaxBlockSizeBytes) + // Make sure setting max txn size to a valid value works and updates global params. + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + MaxTxnSizeBytesPoSKey: UintToBuf(4000), + }, + true, + mempool) + require.NoError(err) + utxoView, err = NewUtxoView(db, params, postgres, chain.snapshot, nil) + require.NoError(err) + require.Equal(utxoView.GetCurrentGlobalParamsEntry().MaxTxnSizeBytesPoS, uint64(4000)) + } } func TestBalanceModelBasicTransfers(t *testing.T) { diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 1b1c616ec..7cf9558b3 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4248,6 +4248,9 @@ type GlobalParamsEntry struct { // SoftMaxBlockSizeBytesPoS is the ideal steady state size of a block in bytes. // This value will be used to control size of block production and congestion in fee estimation. SoftMaxBlockSizeBytesPoS uint64 + + // MaxTxnSizeBytesPoS is the maximum size of a transaction in bytes allowed. + MaxTxnSizeBytesPoS uint64 } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { @@ -4275,6 +4278,7 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { MempoolFeeEstimatorNumPastBlocks: gp.MempoolFeeEstimatorNumPastBlocks, MaxBlockSizeBytesPoS: gp.MaxBlockSizeBytesPoS, SoftMaxBlockSizeBytesPoS: gp.SoftMaxBlockSizeBytesPoS, + MaxTxnSizeBytesPoS: gp.MaxTxnSizeBytesPoS, } } @@ -4307,6 +4311,7 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.MempoolFeeEstimatorNumPastBlocks)...) data = append(data, UintToBuf(gp.MaxBlockSizeBytesPoS)...) data = append(data, UintToBuf(gp.SoftMaxBlockSizeBytesPoS)...) + data = append(data, UintToBuf(gp.MaxTxnSizeBytesPoS)...) } return data } @@ -4411,6 +4416,10 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading SoftMaxBlockSizeBytesPoS") } + gp.MaxTxnSizeBytesPoS, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MaxTxnSizeBytesPoS") + } } return nil } diff --git a/lib/constants.go b/lib/constants.go index 127d8fb1e..18fa92d4e 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -810,6 +810,10 @@ type DeSoParams struct { // estimation. DefaultSoftMaxBlockSizeBytesPoS uint64 + // DefaultMaxTxnSizeBytesPoS is the default value for GlobalParamsEntry.MaxTxnSizeBytesPoS. + // This is the initial value for the maximum txn size in bytes that we allow for txns in PoS blocks. + DefaultMaxTxnSizeBytesPoS uint64 + // HandshakeTimeoutMicroSeconds is the timeout for the peer handshake certificate. The default value is 15 minutes. HandshakeTimeoutMicroSeconds uint64 @@ -1301,6 +1305,9 @@ var DeSoMainnetParams = DeSoParams{ // The soft maximum size of blocks for PoS. DefaultSoftMaxBlockSizeBytesPoS: 16000, // 16KB TODO: verify this is a sane value. + // The maximum size for a single txn in PoS. + DefaultMaxTxnSizeBytesPoS: 25000, // 25KB TODO: verify this is a sane value. + // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), @@ -1602,6 +1609,9 @@ var DeSoTestnetParams = DeSoParams{ // The soft maximum size of blocks for PoS. DefaultSoftMaxBlockSizeBytesPoS: 16000, // 16KB TODO: verify this is a sane value. + // The maximum size for a single txn in PoS. + DefaultMaxTxnSizeBytesPoS: 25000, // 25KB TODO: verify this is a sane value. + // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), @@ -1669,6 +1679,7 @@ const ( MempoolFeeEstimatorNumPastBlocksKey = "MempoolFeeEstimatorNumPastBlocks" MaxBlockSizeBytesPoSKey = "MaxBlockSizeBytesPoS" SoftMaxBlockSizeBytesPoSKey = "SoftMaxBlockSizeBytesPoS" + MaxTxnSizeBytesPoSKey = "MaxTxnSizeBytesPoS" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" @@ -1782,6 +1793,9 @@ const ( // Min/MaxSoftMaxBlockSizeBytes - Min/max value to which the soft max block size can be set. MinSoftMaxBlockSizeBytes = 1000 // 1kb TODO: Verify this is a sane value. MaxSoftMaxBlockSizeBytes = 16000000 // 16MB TODO: Verify this is a sane value. + // Min/MaxMaxTxnSizeBytes - Min/max value to which the max txn size can be set. + MinMaxTxnSizeBytes = 1000 // 1kb TODO: Verify this is a sane value. + MaxMaxTxnSizeBytes = 16000000 // 16MB TODO: Verify this is a sane value. // DefaultMaxNonceExpirationBlockHeightOffset - default value to which the MaxNonceExpirationBlockHeightOffset // is set to before specified by ParamUpdater. diff --git a/lib/errors.go b/lib/errors.go index 7e4a8c445..fdda6d57e 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -233,6 +233,9 @@ const ( RuleErrorSoftMaxBlockSizeBytesTooLow RuleError = "RuleErrorSoftMaxBlockSizeBytesTooLow" RuleErrorSoftMaxBlockSizeBytesTooHigh RuleError = "RuleErrorSoftMaxBlockSizeBytesTooHigh" RuleErrorSoftMaxBlockSizeBytesExceedsMaxBlockSizeBytes RuleError = "RuleErrorSoftMaxBlockSizeBytesExceedsMaxBlockSizeBytes" + RuleErrorMaxTxnSizeBytesTooLow RuleError = "RuleErrorMaxTxnSizeBytesTooLow" + RuleErrorMaxTxnSizeBytesTooHigh RuleError = "RuleErrorMaxTxnSizeBytesTooHigh" + RuleErrorMaxTxnSizeBytesExceedsMaxBlockSizeBytes RuleError = "RuleErrorMaxTxnSizeBytesExceedsMaxBlockSizeBytes" // DeSo Diamonds RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel RuleError = "RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel" diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index b57d9197b..0d6073213 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -166,6 +166,9 @@ func MergeGlobalParamEntryDefaults(globalParamsEntry *GlobalParamsEntry, params if globalParamsEntryCopy.SoftMaxBlockSizeBytesPoS == 0 { globalParamsEntryCopy.SoftMaxBlockSizeBytesPoS = params.DefaultSoftMaxBlockSizeBytesPoS } + if globalParamsEntryCopy.MaxTxnSizeBytesPoS == 0 { + globalParamsEntryCopy.MaxTxnSizeBytesPoS = params.DefaultMaxTxnSizeBytesPoS + } // Return the merged result. return globalParamsEntryCopy From 21ac6fa3f3e1f78b2d3ae47171cad6e0ce8b69f9 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 29 Mar 2024 15:26:31 -0400 Subject: [PATCH 574/762] Enforce a minimum fee bucket size when updating global params (#1140) --- lib/block_view.go | 8 ++++ lib/block_view_test.go | 84 +++++++++++++++++++++++++++++++++++------- lib/constants.go | 4 ++ lib/errors.go | 1 + 4 files changed, 83 insertions(+), 14 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 2758f5014..602b92736 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3377,6 +3377,14 @@ func (bav *UtxoView) _connectUpdateGlobalParams( ) } } + // Validate that the minimum fee bucket size is greater than the minimum allowed. + mergedGlobalParams := MergeGlobalParamEntryDefaults(&newGlobalParamsEntry, bav.Params) + minFeeRateNanosPerKB, feeBucketMultiplier := mergedGlobalParams. + ComputeFeeTimeBucketMinimumFeeAndMultiplier() + nextFeeBucketMin := computeFeeTimeBucketMinFromExponent(1, minFeeRateNanosPerKB, feeBucketMultiplier) + if nextFeeBucketMin < mergedGlobalParams.MinimumNetworkFeeNanosPerKB+MinFeeBucketSize { + return 0, 0, nil, RuleErrorFeeBucketSizeTooSmall + } if len(extraData[FailingTransactionBMFMultiplierBasisPointsKey]) > 0 { val, bytesRead := Uvarint( extraData[FailingTransactionBMFMultiplierBasisPointsKey], diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 588e22cf0..85514e05a 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -1501,9 +1501,24 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(MustBase58CheckDecode(moneyPkString))] = true // Mine a few blocks - _, err := miner.MineAndProcessSingleBlock(0, mempool) - require.NoError(err) - _, err = miner.MineAndProcessSingleBlock(0, mempool) + for ii := 0; ii < 10; ii++ { + _, err := miner.MineAndProcessSingleBlock(0, mempool) + require.NoError(err) + } + + // Update min fee rate on global params to be 1000. + _, _, _, err := _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, + moneyPkString, + moneyPrivString, + -1, + 1000, + -1, + -1, + -1, + -1, + map[string][]byte{}, + true, + mempool) require.NoError(err) // MaxBlockSizeBytesPoS tests. { @@ -1512,7 +1527,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { params.EncoderMigrationHeightsList = GlobalDeSoParams.EncoderMigrationHeightsList mempool.bc.params = params // Make sure setting max block size too low fails. - _, _, _, err := _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err := _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1528,7 +1543,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { mempool) require.ErrorIs(err, RuleErrorMaxBlockSizeBytesTooLow) // Make sure setting max block size too high fails. - _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1544,7 +1559,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { mempool) require.ErrorIs(err, RuleErrorMaxBlockSizeBytesTooHigh) // Make sure setting max block size to a valid value works and updates global params. - _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1566,7 +1581,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { // SoftMaxBlockSizeBytesPoS tests. { // Make sure setting soft max block size too low fails. - _, _, _, err := _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err := _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1582,7 +1597,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { mempool) require.ErrorIs(err, RuleErrorSoftMaxBlockSizeBytesTooLow) // Make sure setting soft max block size too high fails. - _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1600,7 +1615,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { // Make sure setting soft max block size to a value greater than max block size fails. utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) require.NoError(err) - _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1616,7 +1631,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { mempool) require.ErrorIs(err, RuleErrorSoftMaxBlockSizeBytesExceedsMaxBlockSizeBytes) // Make sure setting max block size to a valid value works and updates global params. - _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1638,7 +1653,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { // MaxTxnSizeBytesPoS tests. { // Make sure setting max txn size too low fails. - _, _, _, err := _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err := _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1654,7 +1669,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { mempool) require.ErrorIs(err, RuleErrorMaxTxnSizeBytesTooLow) // Make sure setting max txn size too high fails. - _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1672,7 +1687,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { // Make sure setting max txn size to a value greater than max block size fails. utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) require.NoError(err) - _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1688,7 +1703,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { mempool) require.ErrorIs(err, RuleErrorMaxTxnSizeBytesExceedsMaxBlockSizeBytes) // Make sure setting max txn size to a valid value works and updates global params. - _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 200, + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, -1, @@ -1707,6 +1722,47 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { require.NoError(err) require.Equal(utxoView.GetCurrentGlobalParamsEntry().MaxTxnSizeBytesPoS, uint64(4000)) } + // MinFeeBucket size tests. + { + + utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) + require.NoError(err) + require.Equal(utxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB, uint64(1000)) + require.Equal(utxoView.GetCurrentGlobalParamsEntry().FeeBucketGrowthRateBasisPoints, uint64(1000)) + // Make sure setting min fee bucket size too low fails. 1% of 1000 is 10, so anything less than that + // should fail. + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + FeeBucketGrowthRateBasisPointsKey: UintToBuf(90), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorFeeBucketSizeTooSmall) + // Okay now set it to 1% + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + FeeBucketGrowthRateBasisPointsKey: UintToBuf(100), + }, + true, + mempool) + require.NoError(err) + } } func TestBalanceModelBasicTransfers(t *testing.T) { diff --git a/lib/constants.go b/lib/constants.go index 18fa92d4e..c1cab75f0 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1796,6 +1796,10 @@ const ( // Min/MaxMaxTxnSizeBytes - Min/max value to which the max txn size can be set. MinMaxTxnSizeBytes = 1000 // 1kb TODO: Verify this is a sane value. MaxMaxTxnSizeBytes = 16000000 // 16MB TODO: Verify this is a sane value. + // MinFeeBucketSize is the minimum size of a fee bucket that we'll allow global params to + // be configured to compute. This is a safety measure to prevent the fee bucket from being + // too small and causing issues with the mempool. + MinFeeBucketSize = 10 // DefaultMaxNonceExpirationBlockHeightOffset - default value to which the MaxNonceExpirationBlockHeightOffset // is set to before specified by ParamUpdater. diff --git a/lib/errors.go b/lib/errors.go index fdda6d57e..2c072b79f 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -236,6 +236,7 @@ const ( RuleErrorMaxTxnSizeBytesTooLow RuleError = "RuleErrorMaxTxnSizeBytesTooLow" RuleErrorMaxTxnSizeBytesTooHigh RuleError = "RuleErrorMaxTxnSizeBytesTooHigh" RuleErrorMaxTxnSizeBytesExceedsMaxBlockSizeBytes RuleError = "RuleErrorMaxTxnSizeBytesExceedsMaxBlockSizeBytes" + RuleErrorFeeBucketSizeTooSmall RuleError = "RuleErrorFeeBucketSizeTooSmall" // DeSo Diamonds RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel RuleError = "RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel" From 56d2f6300ec577d2f8291a983d70a260a834b884 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 29 Mar 2024 15:28:34 -0400 Subject: [PATCH 575/762] Hook up Block Production interval millis in global params (#1141) --- cmd/config.go | 6 +- cmd/node.go | 2 - consensus/event_loop.go | 19 ++++- consensus/integration_test_types.go | 3 + consensus/mocking.go | 11 ++- consensus/types.go | 2 +- integration_testing/tools.go | 2 - lib/block_view.go | 35 +++++++++ lib/block_view_test.go | 108 ++++++++++++++++++++++++++++ lib/block_view_types.go | 18 +++++ lib/constants.go | 28 ++++++++ lib/errors.go | 4 ++ lib/pos_consensus.go | 67 +++++++++++------ lib/pos_snapshot_entries.go | 6 ++ lib/server.go | 4 -- 15 files changed, 274 insertions(+), 41 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 302ba2eb7..9f63e83cd 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -44,9 +44,7 @@ type Config struct { HypersyncMaxQueueSize uint32 // PoS Validator - PosValidatorSeed string - PosBlockProductionIntervalMilliseconds uint64 - PosTimeoutBaseDurationMilliseconds uint64 + PosValidatorSeed string // Mempool MempoolBackupIntervalMillis uint64 @@ -125,8 +123,6 @@ func LoadConfig() *Config { // PoS Validator config.PosValidatorSeed = viper.GetString("pos-validator-seed") - config.PosBlockProductionIntervalMilliseconds = viper.GetUint64("pos-block-production-interval-milliseconds") - config.PosTimeoutBaseDurationMilliseconds = viper.GetUint64("pos-timeout-base-duration-milliseconds") // Mempool config.MempoolBackupIntervalMillis = viper.GetUint64("mempool-backup-time-millis") diff --git a/cmd/node.go b/cmd/node.go index 45e8384f6..282d91b4a 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -297,8 +297,6 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.MempoolMaxValidationViewConnects, node.Config.TransactionValidationRefreshIntervalMillis, node.Config.AugmentedBlockViewRefreshIntervalMillis, - node.Config.PosBlockProductionIntervalMilliseconds, - node.Config.PosTimeoutBaseDurationMilliseconds, node.Config.StateSyncerMempoolTxnSyncLimit, ) if err != nil { diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 6a768042a..6d350dd7e 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -145,7 +145,12 @@ func (fc *fastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { // all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks // that are safe to extend from. This function does not validate the collection of blocks. It // expects the server to know and decide what blocks are safe to extend from. -func (fc *fastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { +func (fc *fastHotStuffEventLoop) ProcessTipBlock( + tip BlockWithValidatorList, + safeBlocks []BlockWithValidatorList, + crankTimerDuration time.Duration, + timeoutTimerDuration time.Duration, +) error { // Grab the event loop's lock fc.lock.Lock() defer fc.lock.Unlock() @@ -160,12 +165,22 @@ func (fc *fastHotStuffEventLoop) ProcessTipBlock(tip BlockWithValidatorList, saf return errors.Wrap(err, "FastHotStuffEventLoop.ProcessTipBlock: ") } + // Validate the scheduled task durations + if crankTimerDuration <= 0 { + return errors.New("FastHotStuffEventLoop.ProcessTipBlock: Crank timer interval must be > 0") + } + if timeoutTimerDuration <= 0 { + return errors.New("FastHotStuffEventLoop.ProcessTipBlock: Timeout base duration must be > 0") + } + // We track the current view here so we know which view to time out on later on. fc.currentView = fc.tip.block.GetView() + 1 // Reset QC construction status for the current view fc.hasCrankTimerRunForCurrentView = false fc.hasConstructedQCInCurrentView = false + fc.crankTimerInterval = crankTimerDuration + fc.timeoutBaseDuration = timeoutTimerDuration // Evict all stale votes and timeouts fc.evictStaleVotesAndTimeouts() @@ -213,7 +228,7 @@ func (fc *fastHotStuffEventLoop) UpdateSafeBlocks(safeBlocks []BlockWithValidato // Validate the safe blocks and validator lists, and store them if err := fc.storeBlocks(tipBlock, safeBlocks); err != nil { - return errors.Wrap(err, "FastHotStuffEventLoop.ProcessTipBlock: ") + return errors.Wrap(err, "FastHotStuffEventLoop.UpdateSafeBlocks: ") } // Happy path. There's no need to reschedule the crank timer or timeout scheduled tasks here. diff --git a/consensus/integration_test_types.go b/consensus/integration_test_types.go index 962342500..c3cf8ca79 100644 --- a/consensus/integration_test_types.go +++ b/consensus/integration_test_types.go @@ -167,6 +167,9 @@ func (node *validatorNode) ProcessBlock(incomingBlock *block) { return BlockWithValidatorList{bb, node.getValidators()} }, ), + // TODO: replace with values from snapshot global params + node.eventLoop.crankTimerInterval, + node.eventLoop.timeoutBaseDuration, ) } diff --git a/consensus/mocking.go b/consensus/mocking.go index 33044c895..96b3e438e 100644 --- a/consensus/mocking.go +++ b/consensus/mocking.go @@ -7,7 +7,7 @@ type MockFastHotStuffEventLoop struct { OnInit func(time.Duration, time.Duration, QuorumCertificate, BlockWithValidatorList, []BlockWithValidatorList) error OnGetCurrentView func() uint64 OnAdvanceViewOnTimeout func() (uint64, error) - OnProcessTipBlock func(BlockWithValidatorList, []BlockWithValidatorList) error + OnProcessTipBlock func(BlockWithValidatorList, []BlockWithValidatorList, time.Duration, time.Duration) error OnUpdateSafeBlocks func([]BlockWithValidatorList) error OnProcessValidatorVote func(VoteMessage) error OnProcessValidatorTimeout func(TimeoutMessage) error @@ -32,8 +32,13 @@ func (fc *MockFastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { return fc.OnAdvanceViewOnTimeout() } -func (fc *MockFastHotStuffEventLoop) ProcessTipBlock(tipBlock BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { - return fc.OnProcessTipBlock(tipBlock, safeBlocks) +func (fc *MockFastHotStuffEventLoop) ProcessTipBlock( + tipBlock BlockWithValidatorList, + safeBlocks []BlockWithValidatorList, + crankTimerDuration time.Duration, + timeoutTimerDuration time.Duration, +) error { + return fc.OnProcessTipBlock(tipBlock, safeBlocks, crankTimerDuration, timeoutTimerDuration) } func (fc *MockFastHotStuffEventLoop) UpdateSafeBlocks(safeBlocks []BlockWithValidatorList) error { diff --git a/consensus/types.go b/consensus/types.go index b81a6ffa2..a84fba44b 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -70,7 +70,7 @@ type FastHotStuffEventLoop interface { Init(time.Duration, time.Duration, QuorumCertificate, BlockWithValidatorList, []BlockWithValidatorList) error GetCurrentView() uint64 AdvanceViewOnTimeout() (uint64, error) - ProcessTipBlock(BlockWithValidatorList, []BlockWithValidatorList) error + ProcessTipBlock(BlockWithValidatorList, []BlockWithValidatorList, time.Duration, time.Duration) error UpdateSafeBlocks([]BlockWithValidatorList) error ProcessValidatorVote(VoteMessage) error ProcessValidatorTimeout(TimeoutMessage) error diff --git a/integration_testing/tools.go b/integration_testing/tools.go index d1fa6791c..cdf55ebf5 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -99,8 +99,6 @@ func _generateConfig(t *testing.T, config *cmd.Config, port uint32, dataDir stri config.MempoolMaxValidationViewConnects = 10000 config.TransactionValidationRefreshIntervalMillis = 10 config.AugmentedBlockViewRefreshIntervalMillis = 10 - config.PosBlockProductionIntervalMilliseconds = 1500 - config.PosTimeoutBaseDurationMilliseconds = 30000 //config.ArchivalMode = true diff --git a/lib/block_view.go b/lib/block_view.go index 602b92736..c63e8437a 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3524,6 +3524,41 @@ func (bav *UtxoView) _connectUpdateGlobalParams( } newGlobalParamsEntry.MaxTxnSizeBytesPoS = val } + if len(extraData[BlockProductionIntervalPoSKey]) > 0 { + val, bytesRead := Uvarint( + extraData[BlockProductionIntervalPoSKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode BlockProductionIntervalPoS as uint64", + ) + } + if val < MinBlockProductionIntervalMillisecondsPoS { + return 0, 0, nil, RuleErrorBlockProductionIntervalPoSTooLow + } + if val > MaxBlockProductionIntervalMillisecondsPoS { + return 0, 0, nil, RuleErrorBlockProductionIntervalPoSTooHigh + } + newGlobalParamsEntry.BlockProductionIntervalMillisecondsPoS = val + } + + if len(extraData[TimeoutIntervalPoSKey]) > 0 { + val, bytesRead := Uvarint( + extraData[TimeoutIntervalPoSKey], + ) + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode TimeoutIntervalPoS as uint64", + ) + } + if val < MinTimeoutIntervalMillisecondsPoS { + return 0, 0, nil, RuleErrorTimeoutIntervalPoSTooLow + } + if val > MaxTimeoutIntervalMillisecondsPoS { + return 0, 0, nil, RuleErrorTimeoutIntervalPoSTooHigh + } + newGlobalParamsEntry.TimeoutIntervalMillisecondsPoS = val + } } var newForbiddenPubKeyEntry *ForbiddenPubKeyEntry diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 85514e05a..bc49c19de 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -1763,6 +1763,114 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { mempool) require.NoError(err) } + { + // Block production global params test. + // Make sure setting block production interval too low fails. + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + BlockProductionIntervalPoSKey: UintToBuf(MinBlockProductionIntervalMillisecondsPoS - 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorBlockProductionIntervalPoSTooLow) + // Make sure setting block production interval too high fails. Anything over the max allowed will fail. + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + BlockProductionIntervalPoSKey: UintToBuf(MaxBlockProductionIntervalMillisecondsPoS + 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorBlockProductionIntervalPoSTooHigh) + // Make sure setting block timeout interval to a reasonable value works. Make it 5s + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + BlockProductionIntervalPoSKey: UintToBuf(5000), + }, + true, + mempool) + require.NoError(err) + utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) + require.NoError(err) + require.Equal(utxoView.GetCurrentGlobalParamsEntry().BlockProductionIntervalMillisecondsPoS, uint64(5000)) + } + { + // Timeout interval global params test + // Make sure setting timeout interval too low fails. Anything below min should fail + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + TimeoutIntervalPoSKey: UintToBuf(MinTimeoutIntervalMillisecondsPoS - 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorTimeoutIntervalPoSTooLow) + // Make sure setting timeout interval too low fails. Anything above max should fail + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + TimeoutIntervalPoSKey: UintToBuf(MaxTimeoutIntervalMillisecondsPoS + 1), + }, + true, + mempool) + require.ErrorIs(err, RuleErrorTimeoutIntervalPoSTooHigh) + // Make sure setting timeout interval to a reasonable value works. Make it 5s + _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, + moneyPkString, + moneyPrivString, + -1, + -1, + -1, + -1, + -1, + -1, + map[string][]byte{ + TimeoutIntervalPoSKey: UintToBuf(5000), + }, + true, + mempool) + require.NoError(err) + utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) + require.NoError(err) + require.Equal(utxoView.GetCurrentGlobalParamsEntry().TimeoutIntervalMillisecondsPoS, uint64(5000)) + } } func TestBalanceModelBasicTransfers(t *testing.T) { diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 7cf9558b3..314f9eb27 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4251,6 +4251,12 @@ type GlobalParamsEntry struct { // MaxTxnSizeBytesPoS is the maximum size of a transaction in bytes allowed. MaxTxnSizeBytesPoS uint64 + + // BlockProductionIntervalMillisecondsPoS is the time in milliseconds to produce blocks. + BlockProductionIntervalMillisecondsPoS uint64 + + // TimeoutIntervalMillisecondsPoS is the time in milliseconds to wait before timing out a view. + TimeoutIntervalMillisecondsPoS uint64 } func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { @@ -4279,6 +4285,8 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { MaxBlockSizeBytesPoS: gp.MaxBlockSizeBytesPoS, SoftMaxBlockSizeBytesPoS: gp.SoftMaxBlockSizeBytesPoS, MaxTxnSizeBytesPoS: gp.MaxTxnSizeBytesPoS, + BlockProductionIntervalMillisecondsPoS: gp.BlockProductionIntervalMillisecondsPoS, + TimeoutIntervalMillisecondsPoS: gp.TimeoutIntervalMillisecondsPoS, } } @@ -4312,6 +4320,8 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.MaxBlockSizeBytesPoS)...) data = append(data, UintToBuf(gp.SoftMaxBlockSizeBytesPoS)...) data = append(data, UintToBuf(gp.MaxTxnSizeBytesPoS)...) + data = append(data, UintToBuf(gp.BlockProductionIntervalMillisecondsPoS)...) + data = append(data, UintToBuf(gp.TimeoutIntervalMillisecondsPoS)...) } return data } @@ -4420,6 +4430,14 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MaxTxnSizeBytesPoS") } + gp.BlockProductionIntervalMillisecondsPoS, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading BlockProductionIntervalMillisecondsPoS") + } + gp.TimeoutIntervalMillisecondsPoS, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading TimeoutIntervalMillisecondsPoS") + } } return nil } diff --git a/lib/constants.go b/lib/constants.go index c1cab75f0..cf164103e 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -814,6 +814,14 @@ type DeSoParams struct { // This is the initial value for the maximum txn size in bytes that we allow for txns in PoS blocks. DefaultMaxTxnSizeBytesPoS uint64 + // DefaultBlockProductionIntervalMillisecondsPoS is the default value for GlobalParamsEntry.BlockProductionIntervalMillisecondsPoS. + // This is the initial value for the interval between producing blocks. + DefaultBlockProductionIntervalMillisecondsPoS uint64 + + // DefaultTimeoutIntervalMillisecondsPoS is the default value for GlobalParamsEntry.TimeoutIntervalMillisecondsPoS. + // This is the initial value for the interval between timing out a view. + DefaultTimeoutIntervalMillisecondsPoS uint64 + // HandshakeTimeoutMicroSeconds is the timeout for the peer handshake certificate. The default value is 15 minutes. HandshakeTimeoutMicroSeconds uint64 @@ -1308,6 +1316,12 @@ var DeSoMainnetParams = DeSoParams{ // The maximum size for a single txn in PoS. DefaultMaxTxnSizeBytesPoS: 25000, // 25KB TODO: verify this is a sane value. + // The interval between producing blocks. + DefaultBlockProductionIntervalMillisecondsPoS: 1500, // 1.5s TODO: verify this is a sane value. + + // The interval between timing out a view. + DefaultTimeoutIntervalMillisecondsPoS: 30000, // 30s TODO: verify this is a sane value. + // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), @@ -1612,6 +1626,12 @@ var DeSoTestnetParams = DeSoParams{ // The maximum size for a single txn in PoS. DefaultMaxTxnSizeBytesPoS: 25000, // 25KB TODO: verify this is a sane value. + // The interval between producing blocks. + DefaultBlockProductionIntervalMillisecondsPoS: 1500, // 1.5s TODO: verify this is a sane value. + + // The interval between timing out a view. + DefaultTimeoutIntervalMillisecondsPoS: 30000, // 30s TODO: verify this is a sane value. + // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), @@ -1680,6 +1700,8 @@ const ( MaxBlockSizeBytesPoSKey = "MaxBlockSizeBytesPoS" SoftMaxBlockSizeBytesPoSKey = "SoftMaxBlockSizeBytesPoS" MaxTxnSizeBytesPoSKey = "MaxTxnSizeBytesPoS" + BlockProductionIntervalPoSKey = "BlockProductionIntervalPoS" + TimeoutIntervalPoSKey = "TimeoutIntervalPoS" DiamondLevelKey = "DiamondLevel" DiamondPostHashKey = "DiamondPostHash" @@ -1800,6 +1822,12 @@ const ( // be configured to compute. This is a safety measure to prevent the fee bucket from being // too small and causing issues with the mempool. MinFeeBucketSize = 10 + // Min/MaxBlockProductionIntervalMillisecondsPoS - Min/max value to which the block production interval can be set. + MinBlockProductionIntervalMillisecondsPoS = 1000 // 1s TODO: Verify this is a sane value. + MaxBlockProductionIntervalMillisecondsPoS = 10000 // 10s TODO: Verify this is a sane value. + // Min/MaxTimeoutIntervalMillisecondsPoS - Min/max value to which the timeout interval can be set. + MinTimeoutIntervalMillisecondsPoS = 1000 // 1s TODO: Verify this is a sane value. + MaxTimeoutIntervalMillisecondsPoS = 60000 // 60s TODO: Verify this is a sane value. // DefaultMaxNonceExpirationBlockHeightOffset - default value to which the MaxNonceExpirationBlockHeightOffset // is set to before specified by ParamUpdater. diff --git a/lib/errors.go b/lib/errors.go index 2c072b79f..655dcdecb 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -237,6 +237,10 @@ const ( RuleErrorMaxTxnSizeBytesTooHigh RuleError = "RuleErrorMaxTxnSizeBytesTooHigh" RuleErrorMaxTxnSizeBytesExceedsMaxBlockSizeBytes RuleError = "RuleErrorMaxTxnSizeBytesExceedsMaxBlockSizeBytes" RuleErrorFeeBucketSizeTooSmall RuleError = "RuleErrorFeeBucketSizeTooSmall" + RuleErrorBlockProductionIntervalPoSTooLow RuleError = "RuleErrorBlockProductionIntervalPoSTooLow" + RuleErrorBlockProductionIntervalPoSTooHigh RuleError = "RuleErrorBlockProductionIntervalPoSTooHigh" + RuleErrorTimeoutIntervalPoSTooLow RuleError = "RuleErrorTimeoutIntervalPoSTooLow" + RuleErrorTimeoutIntervalPoSTooHigh RuleError = "RuleErrorTimeoutIntervalPoSTooHigh" // DeSo Diamonds RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel RuleError = "RuleErrorBasicTransferHasDiamondPostHashWithoutDiamondLevel" diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 5ef3e9594..14acfcce4 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -13,15 +13,13 @@ import ( ) type FastHotStuffConsensus struct { - lock sync.RWMutex - networkManager *NetworkManager - blockchain *Blockchain - fastHotStuffEventLoop consensus.FastHotStuffEventLoop - mempool Mempool - params *DeSoParams - signer *BLSSigner - blockProductionIntervalMilliseconds uint64 - timeoutBaseDurationMilliseconds uint64 + lock sync.RWMutex + networkManager *NetworkManager + blockchain *Blockchain + fastHotStuffEventLoop consensus.FastHotStuffEventLoop + mempool Mempool + params *DeSoParams + signer *BLSSigner } func NewFastHotStuffConsensus( @@ -30,18 +28,14 @@ func NewFastHotStuffConsensus( blockchain *Blockchain, mempool Mempool, signer *BLSSigner, - blockProductionIntervalMilliseconds uint64, - timeoutBaseDurationMilliseconds uint64, ) *FastHotStuffConsensus { return &FastHotStuffConsensus{ - networkManager: networkManager, - blockchain: blockchain, - fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), - mempool: mempool, - params: params, - signer: signer, - blockProductionIntervalMilliseconds: blockProductionIntervalMilliseconds, - timeoutBaseDurationMilliseconds: timeoutBaseDurationMilliseconds, + networkManager: networkManager, + blockchain: blockchain, + fastHotStuffEventLoop: consensus.NewFastHotStuffEventLoop(), + mempool: mempool, + params: params, + signer: signer, } } @@ -98,9 +92,20 @@ func (fc *FastHotStuffConsensus) Start() error { return errors.Errorf("FastHotStuffConsensus.Start: Error fetching validator lists for safe blocks: %v", err) } + uncommittedTipView, err := fc.blockchain.GetUncommittedTipView() + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching uncommitted tip view: %v", err) + } + + currentSnapshotGlobalParams, err := uncommittedTipView.GetCurrentSnapshotGlobalParamsEntry() + if err != nil { + return errors.Errorf("FastHotStuffConsensus.Start: Error fetching current snapshot global params: %v", err) + } + // Compute the block production internal and timeout base duration as time.Duration - blockProductionInterval := time.Millisecond * time.Duration(fc.blockProductionIntervalMilliseconds) - timeoutBaseDuration := time.Millisecond * time.Duration(fc.timeoutBaseDurationMilliseconds) + blockProductionInterval := time.Millisecond * + time.Duration(currentSnapshotGlobalParams.BlockProductionIntervalMillisecondsPoS) + timeoutBaseDuration := time.Millisecond * time.Duration(currentSnapshotGlobalParams.TimeoutIntervalMillisecondsPoS) // Initialize the event loop. This should never fail. If it does, we return the error to the caller. // The caller handle the error and decide when to retry. @@ -664,8 +669,26 @@ func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("Error fetching validator lists for tip block: %v", err) } + tipBlockHash, err := tipBlock.Hash() + if err != nil { + return nil, errors.Errorf("Error hashing tip block: %v", err) + } + + utxoView, err := fc.blockchain.getUtxoViewAtBlockHash(*tipBlockHash) + if err != nil { + return nil, errors.Errorf("Error fetching UtxoView for tip block: %v", err) + } + snapshotGlobalParams, err := utxoView.GetCurrentSnapshotGlobalParamsEntry() + if err != nil { + return nil, errors.Errorf("Error fetching snapshot global params: %v", err) + } // Pass the new tip and safe blocks to the FastHotStuffEventLoop - if err = fc.fastHotStuffEventLoop.ProcessTipBlock(tipBlockWithValidators[0], safeBlocksWithValidators); err != nil { + if err = fc.fastHotStuffEventLoop.ProcessTipBlock( + tipBlockWithValidators[0], + safeBlocksWithValidators, + time.Millisecond*time.Duration(snapshotGlobalParams.BlockProductionIntervalMillisecondsPoS), + time.Millisecond*time.Duration(snapshotGlobalParams.TimeoutIntervalMillisecondsPoS), + ); err != nil { return nil, errors.Errorf("Error processing tip block locally: %v", err) } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 0d6073213..b1cb70d9f 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -169,6 +169,12 @@ func MergeGlobalParamEntryDefaults(globalParamsEntry *GlobalParamsEntry, params if globalParamsEntryCopy.MaxTxnSizeBytesPoS == 0 { globalParamsEntryCopy.MaxTxnSizeBytesPoS = params.DefaultMaxTxnSizeBytesPoS } + if globalParamsEntryCopy.BlockProductionIntervalMillisecondsPoS == 0 { + globalParamsEntryCopy.BlockProductionIntervalMillisecondsPoS = params.DefaultBlockProductionIntervalMillisecondsPoS + } + if globalParamsEntryCopy.TimeoutIntervalMillisecondsPoS == 0 { + globalParamsEntryCopy.TimeoutIntervalMillisecondsPoS = params.DefaultTimeoutIntervalMillisecondsPoS + } // Return the merged result. return globalParamsEntryCopy diff --git a/lib/server.go b/lib/server.go index 74cdbfcf4..86bbeac53 100644 --- a/lib/server.go +++ b/lib/server.go @@ -413,8 +413,6 @@ func NewServer( _mempoolMaxValidationViewConnects uint64, _transactionValidationRefreshIntervalMillis uint64, _augmentedBlockViewRefreshIntervalMillis uint64, - _posBlockProductionIntervalMilliseconds uint64, - _posTimeoutBaseDurationMilliseconds uint64, _stateSyncerMempoolTxnSyncLimit uint64, ) ( _srv *Server, @@ -640,8 +638,6 @@ func NewServer( _chain, _posMempool, _blsKeystore.GetSigner(), - _posBlockProductionIntervalMilliseconds, - _posTimeoutBaseDurationMilliseconds, ) // On testnet, if the node is configured to be a PoW block producer, and it is configured // to be also a PoS validator, then we attach block mined listeners to the miner to kick From 81b5c7e3e42b38a4e44a939624fc15728f1fd086 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 29 Mar 2024 16:02:45 -0400 Subject: [PATCH 576/762] Fix event loop test (#1159) --- consensus/event_loop_test.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index cdba7e439..96e9465f5 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -156,7 +156,7 @@ func TestProcessTipBlock(t *testing.T) { // Test ProcessTipBlock() function when event loop is not running { tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} - err := fc.ProcessTipBlock(tipBlock, []BlockWithValidatorList{tipBlock}) + err := fc.ProcessTipBlock(tipBlock, []BlockWithValidatorList{tipBlock}, oneHourInNanoSecs, oneHourInNanoSecs) require.Error(t, err) } @@ -168,6 +168,8 @@ func TestProcessTipBlock(t *testing.T) { err := fc.ProcessTipBlock( BlockWithValidatorList{nil, createDummyValidatorList()}, // tip []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks + oneHourInNanoSecs, + oneHourInNanoSecs, ) require.Error(t, err) } @@ -177,6 +179,8 @@ func TestProcessTipBlock(t *testing.T) { err := fc.ProcessTipBlock( BlockWithValidatorList{createDummyBlock(2), nil}, // tip []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks + oneHourInNanoSecs, + oneHourInNanoSecs, ) require.Error(t, err) } @@ -186,6 +190,8 @@ func TestProcessTipBlock(t *testing.T) { err := fc.ProcessTipBlock( BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip []BlockWithValidatorList{{nil, createDummyValidatorList()}}, // safeBlocks + oneHourInNanoSecs, + oneHourInNanoSecs, ) require.Error(t, err) } @@ -195,6 +201,8 @@ func TestProcessTipBlock(t *testing.T) { err := fc.ProcessTipBlock( BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()}, // tip []BlockWithValidatorList{{createDummyBlock(2), nil}}, // safeBlocks + oneHourInNanoSecs, + oneHourInNanoSecs, ) require.Error(t, err) } @@ -252,7 +260,7 @@ func TestProcessTipBlock(t *testing.T) { tipBlock := BlockWithValidatorList{nextBlock, createDummyValidatorList()} - err := fc.ProcessTipBlock(tipBlock, []BlockWithValidatorList{tipBlock}) + err := fc.ProcessTipBlock(tipBlock, []BlockWithValidatorList{tipBlock}, oneHourInNanoSecs, oneHourInNanoSecs) require.NoError(t, err) require.Equal(t, nextBlock.GetBlockHash().GetValue(), fc.tip.block.GetBlockHash().GetValue()) From 82b5c618b8a494c908b0fe109d4eb59012b91435 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 29 Mar 2024 16:06:32 -0400 Subject: [PATCH 577/762] Remove FailingTxn operation type (#1157) --- lib/block_view_types.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 314f9eb27..159868dc4 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -686,8 +686,7 @@ const ( OperationTypeStakeDistributionPayToBalance OperationType = 50 OperationTypeSetValidatorLastActiveAtEpoch OperationType = 51 OperationTypeAtomicTxnsWrapper OperationType = 52 - OperationTypeFailingTxn OperationType = 53 - // NEXT_TAG = 54 + // NEXT_TAG = 53 ) func (op OperationType) String() string { From 70412503020597f3cf516183b9a437a1b5fe02ad Mon Sep 17 00:00:00 2001 From: diamondhands Date: Thu, 21 Mar 2024 21:05:16 -0700 Subject: [PATCH 578/762] Support bulk block download from peer In order to increase the number of blocks each node can process per second, we are upgrading our legacy block processing from fetching a single block at a time to fetching blocks in batches. --- lib/block_view_validator.go | 2 +- lib/network.go | 110 ++++++++++++++++++++++++++++++++++-- lib/network_test.go | 18 +++++- lib/peer.go | 77 +++++++++++++++++-------- lib/pos_server_regtest.go | 4 +- lib/server.go | 77 +++++++++++++++++++++++-- 6 files changed, 247 insertions(+), 41 deletions(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 1df4aa8b6..17d2c96ee 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1756,7 +1756,7 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( for _, domain := range metadata.Domains { _, err := url.ParseRequestURI(string(domain)) if err != nil { - return fmt.Errorf("UtxoView.IsValidRegisterAsValidatorMetadata: %s: %v", RuleErrorValidatorInvalidDomain, domain) + return fmt.Errorf("UtxoView.IsValidRegisterAsValidatorMetadata: %s: %v", RuleErrorValidatorInvalidDomain, string(domain)) } domainStrings = append(domainStrings, string(domain)) } diff --git a/lib/network.go b/lib/network.go index 58d4680bf..b559ed7ca 100644 --- a/lib/network.go +++ b/lib/network.go @@ -76,6 +76,7 @@ const ( MsgTypeGetHeaders MsgType = 6 // MsgTypeHeaderBundle contains headers from a peer. MsgTypeHeaderBundle MsgType = 7 + MsgTypeBlockBundle MsgType = 22 MsgTypePing MsgType = 8 MsgTypePong MsgType = 9 MsgTypeInv MsgType = 10 @@ -100,7 +101,7 @@ const ( MsgTypeValidatorVote MsgType = 20 MsgTypeValidatorTimeout MsgType = 21 - // NEXT_TAG = 22 + // NEXT_TAG = 23 // Below are control messages used to signal to the Server from other parts of // the code but not actually sent among peers. @@ -145,6 +146,8 @@ func (msgType MsgType) String() string { return "GET_HEADERS" case MsgTypeHeaderBundle: return "HEADER_BUNDLE" + case MsgTypeBlockBundle: + return "BLOCK_BUNDLE" case MsgTypePing: return "PING" case MsgTypePong: @@ -813,6 +816,8 @@ func NewMessage(msgType MsgType) DeSoMessage { return &MsgDeSoGetHeaders{} case MsgTypeHeaderBundle: return &MsgDeSoHeaderBundle{} + case MsgTypeBlockBundle: + return &MsgDeSoBlockBundle{} case MsgTypeAddr: return &MsgDeSoAddr{} case MsgTypeGetAddr: @@ -1037,6 +1042,90 @@ func (msg *MsgDeSoHeaderBundle) String() string { return fmt.Sprintf("Num Headers: %v, Tip Height: %v, Tip Hash: %v, Headers: %v", len(msg.Headers), msg.TipHeight, msg.TipHash, msg.Headers) } +// ================================================================== +// BLOCK_BUNDLE message +// ================================================================== + +type MsgDeSoBlockBundle struct { + Blocks []*MsgDeSoBlock + TipHash *BlockHash + TipHeight uint64 +} + +func (msg *MsgDeSoBlockBundle) GetMsgType() MsgType { + return MsgTypeBlockBundle +} + +func (msg *MsgDeSoBlockBundle) ToBytes(preSignature bool) ([]byte, error) { + data := []byte{} + + // Encode the number of blocks in the bundle. + data = append(data, UintToBuf(uint64(len(msg.Blocks)))...) + + // Encode all the blocks. + for _, block := range msg.Blocks { + blockBytes, err := block.ToBytes(preSignature) + if err != nil { + return nil, errors.Wrapf(err, "MsgDeSoBlockBundle.ToBytes: Problem encoding block") + } + data = append(data, EncodeByteArray(blockBytes)...) + } + + // Encode the tip hash. + data = append(data, msg.TipHash[:]...) + + // Encode the tip height. + data = append(data, UintToBuf(uint64(msg.TipHeight))...) + + return data, nil +} + +func (msg *MsgDeSoBlockBundle) FromBytes(data []byte) error { + rr := bytes.NewReader(data) + retBundle := NewMessage(MsgTypeBlockBundle).(*MsgDeSoBlockBundle) + + // Read in the number of block in the bundle. + numBlocks, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlockBundle.FromBytes: Problem decoding number of block") + } + + // Read in all of the blocks. + for ii := uint64(0); ii < numBlocks; ii++ { + blockBytes, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlockBundle.FromBytes: Problem decoding block: ") + } + retBlock := &MsgDeSoBlock{} + if err := retBlock.FromBytes(blockBytes); err != nil { + return errors.Wrapf(err, "MsgDeSoBlock.FromBytes: ") + } + + retBundle.Blocks = append(retBundle.Blocks, retBlock) + } + + // Read in the tip hash. + retBundle.TipHash = &BlockHash{} + _, err = io.ReadFull(rr, retBundle.TipHash[:]) + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlockBundle.FromBytes:: Error reading TipHash: ") + } + + // Read in the tip height. + tipHeight, err := ReadUvarint(rr) + if err != nil || tipHeight > math.MaxUint32 { + return fmt.Errorf("MsgDeSoBlockBundle.FromBytes: %v", err) + } + retBundle.TipHeight = tipHeight + + *msg = *retBundle + return nil +} + +func (msg *MsgDeSoBlockBundle) String() string { + return fmt.Sprintf("Num Blocks: %v, Tip Height: %v, Tip Hash: %v, Blocks: %v", len(msg.Blocks), msg.TipHeight, msg.TipHash, msg.Blocks) +} + // ================================================================== // GetBlocks Messages // ================================================================== @@ -1052,9 +1141,11 @@ func (msg *MsgDeSoGetBlocks) GetMsgType() MsgType { func (msg *MsgDeSoGetBlocks) ToBytes(preSignature bool) ([]byte, error) { data := []byte{} - if len(msg.HashList) > MaxBlocksInFlight { + // We can safely increase this without breaking backwards-compatibility because old + // nodes will never send us more hashes than this. + if len(msg.HashList) > MaxBlocksInFlightPoS { return nil, fmt.Errorf("MsgDeSoGetBlocks.ToBytes: Blocks requested %d "+ - "exceeds MaxBlocksInFlight %d", len(msg.HashList), MaxBlocksInFlight) + "exceeds MaxBlocksInFlightPoS %d", len(msg.HashList), MaxBlocksInFlightPoS) } // Encode the number of hashes. @@ -1076,9 +1167,11 @@ func (msg *MsgDeSoGetBlocks) FromBytes(data []byte) error { return errors.Wrapf(err, "MsgDeSoGetBlocks.FromBytes: Problem "+ "reading number of block hashes requested") } - if numHashes > MaxBlocksInFlight { + // We can safely increase this without breaking backwards-compatibility because old + // nodes will never send us more hashes than this. + if numHashes > MaxBlocksInFlightPoS { return fmt.Errorf("MsgDeSoGetBlocks.FromBytes: HashList length (%d) "+ - "exceeds maximum allowed (%d)", numHashes, MaxBlocksInFlight) + "exceeds maximum allowed (%d)", numHashes, MaxBlocksInFlightPoS) } // Read in all the hashes. @@ -1344,6 +1437,13 @@ const ( // MaxBlocksInFlight is the maximum number of blocks that can be requested // from a peer. MaxBlocksInFlight = 250 + // After PoS, we have blocks every second rather than every five minutes, and blocks + // are smaller. As such, we can safely increase this limit. + // + // TODO: This is a pretty large value. Blocks were processing at ~80 blocks per second + // when I last ran it. If we can't get the blocks per second to a higher value, then + // we should probably decrease this value. + MaxBlocksInFlightPoS = 25000 ) // InvType represents the allowed types of inventory vectors. See InvVect. diff --git a/lib/network_test.go b/lib/network_test.go index 2bccb33e9..47dd12339 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -3,8 +3,6 @@ package lib import ( "bytes" "encoding/hex" - "github.com/deso-protocol/core/bls" - "golang.org/x/crypto/sha3" "math/big" "math/rand" "reflect" @@ -13,6 +11,9 @@ import ( "testing" "time" + "github.com/deso-protocol/core/bls" + "golang.org/x/crypto/sha3" + "github.com/holiman/uint256" "github.com/btcsuite/btcd/btcec" @@ -615,6 +616,19 @@ func TestBlockSerialize(t *testing.T) { require.Equal(*block, *testBlock) } + // Also test MsgDeSoBlockBundle + bundle := &MsgDeSoBlockBundle{ + Blocks: expectedBlocksToTest, + // Just fill any old data for these + TipHash: expectedBlocksToTest[0].Header.PrevBlockHash, + TipHeight: expectedBlocksToTest[0].Header.Height, + } + bb, err := bundle.ToBytes(false) + require.NoError(err) + testBundle := &MsgDeSoBlockBundle{} + err = testBundle.FromBytes(bb) + require.NoError(err) + require.Equal(bundle, testBundle) } func TestBlockSerializeNoBlockProducerInfo(t *testing.T) { diff --git a/lib/peer.go b/lib/peer.go index b68a9481d..727c252c9 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -2,13 +2,14 @@ package lib import ( "fmt" - "github.com/decred/dcrd/lru" "net" "sort" "sync" "sync/atomic" "time" + "github.com/decred/dcrd/lru" + "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" @@ -394,29 +395,52 @@ func (pp *Peer) HandleGetBlocks(msg *MsgDeSoGetBlocks) { return } - // For each block the Peer has requested, fetch it and queue it to - // be sent. It takes some time to fetch the blocks which is why we - // do it in a goroutine. This might also block if the Peer's send - // queue is full. - // - // Note that the requester should generally ask for the blocks in the - // order they'd like to receive them as we will typically honor this - // ordering. - // - // With HyperSync there is a potential that a node will request blocks that we haven't yet stored, although we're - // fully synced. This can happen to archival nodes that haven't yet downloaded all historical blocks. If a GetBlock - // is sent to a non-archival node for blocks that we don't have, then the peer is misbehaving and should be disconnected. - for _, hashToSend := range msg.HashList { - blockToSend := pp.srv.blockchain.GetBlock(hashToSend) - if blockToSend == nil { - // Don't ask us for blocks before verifying that we have them with a - // GetHeaders request. - glog.Errorf("Server._handleGetBlocks: Disconnecting peer %v because "+ - "she asked for a block with hash %v that we don't have", pp, msg.HashList[0]) - pp.Disconnect() - return + // Before Version2 we would send each block in a single message, which was quite + // slow. Now when we receive a GetBlocks message we will send the blocks in large + // batches, which is much faster. + if pp.Params.ProtocolVersion == ProtocolVersion2 { + allBlocks := MsgDeSoBlockBundle{} + for _, hashToSend := range msg.HashList { + blockToSend := pp.srv.blockchain.GetBlock(hashToSend) + if blockToSend == nil { + // Don't ask us for blocks before verifying that we have them with a + // GetHeaders request. + glog.Errorf("Server._handleGetBlocks: Disconnecting peer %v because "+ + "she asked for a block with hash %v that we don't have", pp, msg.HashList[0]) + pp.Disconnect() + return + } + allBlocks.Blocks = append(allBlocks.Blocks, blockToSend) + } + allBlocks.TipHash = pp.srv.blockchain.blockTip().Hash + allBlocks.TipHeight = uint64(pp.srv.blockchain.blockTip().Height) + pp.AddDeSoMessage(&allBlocks, false) + + } else { + // For each block the Peer has requested, fetch it and queue it to + // be sent. It takes some time to fetch the blocks which is why we + // do it in a goroutine. This might also block if the Peer's send + // queue is full. + // + // Note that the requester should generally ask for the blocks in the + // order they'd like to receive them as we will typically honor this + // ordering. + // + // With HyperSync there is a potential that a node will request blocks that we haven't yet stored, although we're + // fully synced. This can happen to archival nodes that haven't yet downloaded all historical blocks. If a GetBlock + // is sent to a non-archival node for blocks that we don't have, then the peer is misbehaving and should be disconnected. + for _, hashToSend := range msg.HashList { + blockToSend := pp.srv.blockchain.GetBlock(hashToSend) + if blockToSend == nil { + // Don't ask us for blocks before verifying that we have them with a + // GetHeaders request. + glog.Errorf("Server._handleGetBlocks: Disconnecting peer %v because "+ + "she asked for a block with hash %v that we don't have", pp, msg.HashList[0]) + pp.Disconnect() + return + } + pp.AddDeSoMessage(blockToSend, false) } - pp.AddDeSoMessage(blockToSend, false) } } @@ -1007,11 +1031,14 @@ func (pp *Peer) _maybeAddBlocksToSend(msg DeSoMessage) error { // If the peer has exceeded the number of blocks she is allowed to request // then disconnect her. - if len(pp.blocksToSend) > MaxBlocksInFlight { + // + // We can safely increase this without breaking backwards-compatibility because old + // nodes will never send us more hashes than this. + if len(pp.blocksToSend) > MaxBlocksInFlightPoS { pp.Disconnect() return fmt.Errorf("_maybeAddBlocksToSend: Disconnecting peer %v because she requested %d "+ "blocks, which is more than the %d blocks allowed "+ - "in flight", pp, len(pp.blocksToSend), MaxBlocksInFlight) + "in flight", pp, len(pp.blocksToSend), MaxBlocksInFlightPoS) } return nil diff --git a/lib/pos_server_regtest.go b/lib/pos_server_regtest.go index 2e561d426..2aed5f984 100644 --- a/lib/pos_server_regtest.go +++ b/lib/pos_server_regtest.go @@ -26,9 +26,9 @@ func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { var domain string if len(srv.GetConnectionManager().listeners) == 0 { - domain = "localhost:18000" + domain = "http://localhost:18000" } - domain = srv.GetConnectionManager().listeners[0].Addr().String() + domain = "http://" + srv.GetConnectionManager().listeners[0].Addr().String() txnMeta := RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte(domain)}, diff --git a/lib/server.go b/lib/server.go index 86bbeac53..ea011abf0 100644 --- a/lib/server.go +++ b/lib/server.go @@ -884,7 +884,14 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { // SyncStateSyncingHeaders. func (srv *Server) GetBlocks(pp *Peer, maxHeight int) { // Fetch as many blocks as we can from this peer. - numBlocksToFetch := MaxBlocksInFlight - len(pp.requestedBlocks) + // If our peer is on PoS then we can safely request a lot more blocks from them in + // each flight. + maxBlocksInFlight := MaxBlocksInFlight + if pp.Params.ProtocolVersion >= ProtocolVersion2 { + maxBlocksInFlight = MaxBlocksInFlightPoS + } + numBlocksToFetch := maxBlocksInFlight - len(pp.requestedBlocks) + blockNodesToFetch := srv.blockchain.GetBlockNodesToFetch( numBlocksToFetch, maxHeight, pp.requestedBlocks) if len(blockNodesToFetch) == 0 { @@ -2074,10 +2081,14 @@ func (srv *Server) _logAndDisconnectPeer(pp *Peer, blockMsg *MsgDeSoBlock, suffi pp.Disconnect() } -func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { - glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Received block ( %v / %v ) from Peer %v", - blk.Header.Height, srv.blockchain.headerTip().Height, pp))) - +// This function handles a single block that we receive from our peer. Originally, we would receive blocks +// one by one from our peer. However, now we receive a batch of blocks all at once via _handleBlockBundle, +// which then calls this function to process them one by one on our side. +// +// isLastBlock indicates that this is the last block in the list of blocks we received back +// via a MsgDeSoBlockBundle message. When we receive a single block, isLastBlock will automatically +// be true, which will give it its old single-block behavior. +func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { srv.timer.Start("Server._handleBlock: General") // Pull out the header for easy access. @@ -2190,6 +2201,13 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { srv.timer.Print("Server._handleBlock: General") srv.timer.Print("Server._handleBlock: Process Block") + // If we're not at the last block yet, then we're done. The rest of this code is only + // relevant after we've connected the last block, and it generally involves fetching + // more data from our peer. + if !isLastBlock { + return + } + // We shouldn't be receiving blocks while syncing headers, but we can end up here // if it took longer than MaxTipAge to sync blocks to this point. We'll revert to // syncing headers and then resume syncing blocks once we're current again. @@ -2262,6 +2280,50 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { srv.tryTransitionToFastHotStuffConsensus() } +func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { + if len(bundle.Blocks) == 0 { + glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlockBundle: Received EMPTY block bundle "+ + "at header height ( %v ) from Peer %v. Disconnecting peer since this should never happen.", + srv.blockchain.headerTip().Height, pp))) + pp.Disconnect() + return + } + glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlockBundle: Received blocks ( %v->%v / %v ) from Peer %v", + bundle.Blocks[0].Header.Height, bundle.Blocks[len(bundle.Blocks)-1].Header.Height, + srv.blockchain.headerTip().Height, pp))) + + srv.timer.Start("Server._handleBlockBundle: General") + + // TODO: We should fetch the next batch of blocks while we process this batch. + // This requires us to modify GetBlocks to take a start hash and a count + // of the number of blocks we want. Or we could make the existing GetBlocks + // take a start hash and the other node can just return as many blcoks as it + // can. + + // Process each block in the bundle. Record our blocks per second. + blockProcessingStartTime := time.Now() + for ii, blk := range bundle.Blocks { + // TODO: We should make it so that we break out if one of the blocks errors. It's just that + // _handleBlock is a legacy function that doesn't support erroring out. It's not a big deal + // though as we'll just connect all the blocks after the failed one and those blocks will also + // gracefully fail. + srv._handleBlock(pp, blk, ii == len(bundle.Blocks)-1 /*isLastBlock*/) + numLogBlocks := 1000 + if ii%numLogBlocks == 0 { + glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlockBundle: Processed block ( %v / %v ) = ( %v / %v ) from Peer %v", + bundle.Blocks[ii].Header.Height, + srv.blockchain.headerTip().Height, + ii+1, len(bundle.Blocks), + pp))) + + elapsed := time.Since(blockProcessingStartTime) + if ii != 0 { + fmt.Printf("We are processing %v blocks per second\n", float64(ii)/(float64(elapsed)/1e9)) + } + } + } +} + func (srv *Server) _handleInv(peer *Peer, msg *MsgDeSoInv) { if !peer.isOutbound && srv.IgnoreInboundPeerInvMessages { glog.Infof("_handleInv: Ignoring inv message from inbound peer because "+ @@ -2561,10 +2623,13 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { srv._handleGetHeaders(serverMessage.Peer, msg) case *MsgDeSoHeaderBundle: srv._handleHeaderBundle(serverMessage.Peer, msg) + case *MsgDeSoBlockBundle: + srv._handleBlockBundle(serverMessage.Peer, msg) case *MsgDeSoGetBlocks: srv._handleGetBlocks(serverMessage.Peer, msg) case *MsgDeSoBlock: - srv._handleBlock(serverMessage.Peer, msg) + // isLastBlock is always true when we get a legacy single-block message. + srv._handleBlock(serverMessage.Peer, msg, true) case *MsgDeSoGetSnapshot: srv._handleGetSnapshot(serverMessage.Peer, msg) case *MsgDeSoSnapshotData: From d78a7946b33216652a7dec28310b37fdc644708c Mon Sep 17 00:00:00 2001 From: diamondhands Date: Thu, 21 Mar 2024 21:58:15 -0700 Subject: [PATCH 579/762] Increase the number of headers we fetch from peers after the PoS upgrade --- lib/blockchain.go | 9 ++++++--- lib/network.go | 6 ++++++ lib/server.go | 15 +++++++++++++-- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index a1b7f0291..25923dc91 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5,7 +5,6 @@ import ( "container/list" "encoding/hex" "fmt" - "github.com/decred/dcrd/lru" "math" "math/big" "reflect" @@ -14,6 +13,8 @@ import ( "strings" "time" + "github.com/decred/dcrd/lru" + "github.com/deso-protocol/core/collections" "github.com/google/uuid" @@ -975,11 +976,13 @@ func locateHeaders(locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32, // after the genesis block will be returned // // This function is safe for concurrent access. -func (bc *Blockchain) LocateBestBlockChainHeaders(locator []*BlockHash, stopHash *BlockHash) []*MsgDeSoHeader { +func (bc *Blockchain) LocateBestBlockChainHeaders( + locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32) []*MsgDeSoHeader { + // TODO: Shouldn't we hold a ChainLock here? I think it's fine though because the place // where it's currently called is single-threaded via a channel in server.go. Going to // avoid messing with it for now. - headers := locateHeaders(locator, stopHash, MaxHeadersPerMsg, + headers := locateHeaders(locator, stopHash, maxHeaders, bc.blockIndexByHash, bc.bestChain, bc.bestChainMap) return headers diff --git a/lib/network.go b/lib/network.go index b559ed7ca..f1790d088 100644 --- a/lib/network.go +++ b/lib/network.go @@ -44,6 +44,12 @@ var MaxBlockRewardDataSizeBytes = 250 // MaxHeadersPerMsg is the maximum numbers allowed in a GetHeaders response. var MaxHeadersPerMsg = uint32(2000) +// With PoS we can afford to download more headers in each batch. +// +// TODO: I set this number really high because it's easier to lower it than it is +// to increase it (increasing requires everyone to upgrade). +var MaxHeadersPerMsgPos = uint32(200000) + // MaxBitcoinHeadersPerMsg is the maximum number of headers Bitcoin allows in // a getheaders response. It is used to determine whether a node has more headers // to give us. diff --git a/lib/server.go b/lib/server.go index ea011abf0..69ba55688 100644 --- a/lib/server.go +++ b/lib/server.go @@ -471,6 +471,9 @@ func NewServer( // manager. It just takes and keeps track of the median time among our peers so // we can keep a consistent clock. timesource := chainlib.NewMedianTime() + // We need to add an initial time sample or else it will return the zero time, which + // messes things up during initialization. + timesource.AddTimeSample("my-time", time.Now()) // Create a new connection manager but note that it won't be initialized until Start(). _incomingMessages := make(chan *ServerMessage, _params.ServerMessageChannelSize+(_targetOutboundPeers+_maxInboundPeers)*3) @@ -720,7 +723,11 @@ func (srv *Server) _handleGetHeaders(pp *Peer, msg *MsgDeSoGetHeaders) { // Use the block after the genesis block if no other blocks in the // provided locator are known. This does mean the client will start // over with the genesis block if unknown block locators are provided. - headers := srv.blockchain.LocateBestBlockChainHeaders(msg.BlockLocator, msg.StopHash) + maxHeadersPerMsg := MaxHeadersPerMsg + if pp.Params.ProtocolVersion >= ProtocolVersion2 { + maxHeadersPerMsg = MaxHeadersPerMsgPos + } + headers := srv.blockchain.LocateBestBlockChainHeaders(msg.BlockLocator, msg.StopHash, maxHeadersPerMsg) // Send found headers to the requesting peer. blockTip := srv.blockchain.blockTip() @@ -1012,7 +1019,11 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // On the other hand, if the request contains MaxHeadersPerMsg, it is highly // likely we have not hit the tip of our peer's chain, and so requesting more // headers from the peer would likely be useful. - if uint32(len(msg.Headers)) < MaxHeadersPerMsg || srv.blockchain.isTipMaxed(srv.blockchain.headerTip()) { + maxHeadersPerMsg := MaxHeadersPerMsg + if pp.Params.ProtocolVersion >= ProtocolVersion2 { + maxHeadersPerMsg = MaxHeadersPerMsgPos + } + if uint32(len(msg.Headers)) < maxHeadersPerMsg || srv.blockchain.isTipMaxed(srv.blockchain.headerTip()) { // If we have exhausted the peer's headers but our header chain still isn't // current it means the peer we chose isn't current either. So disconnect // from her and try to sync with someone else. From 8e2bec4e9c2e6f56fc8683523061676cbee0d7a1 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Fri, 29 Mar 2024 15:29:57 -0400 Subject: [PATCH 580/762] Add version field to bulk block download message type --- lib/network.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/lib/network.go b/lib/network.go index f1790d088..38cb0bb2d 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1053,6 +1053,7 @@ func (msg *MsgDeSoHeaderBundle) String() string { // ================================================================== type MsgDeSoBlockBundle struct { + Version uint8 Blocks []*MsgDeSoBlock TipHash *BlockHash TipHeight uint64 @@ -1065,6 +1066,9 @@ func (msg *MsgDeSoBlockBundle) GetMsgType() MsgType { func (msg *MsgDeSoBlockBundle) ToBytes(preSignature bool) ([]byte, error) { data := []byte{} + // Encode the version of the bundle. + data = append(data, msg.Version) + // Encode the number of blocks in the bundle. data = append(data, UintToBuf(uint64(len(msg.Blocks)))...) @@ -1087,9 +1091,22 @@ func (msg *MsgDeSoBlockBundle) ToBytes(preSignature bool) ([]byte, error) { } func (msg *MsgDeSoBlockBundle) FromBytes(data []byte) error { + var err error + rr := bytes.NewReader(data) retBundle := NewMessage(MsgTypeBlockBundle).(*MsgDeSoBlockBundle) + // Read the version of the bundle. + retBundle.Version, err = rr.ReadByte() + if err != nil { + return errors.Wrapf(err, "MsgDeSoBlockBundle.FromBytes: Problem decoding version") + } + + // For now, only version is supported for the block bundle message type. + if retBundle.Version != 0 { + return fmt.Errorf("MsgDeSoBlockBundle.FromBytes: Unsupported version %d", retBundle.Version) + } + // Read in the number of block in the bundle. numBlocks, err := ReadUvarint(rr) if err != nil { From 43dc82f98626d8e7e9712aa0e337b79b3351dc08 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 29 Mar 2024 16:12:33 -0400 Subject: [PATCH 581/762] FastHostStuff -> FastHotStuff (#1158) --- lib/server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/server.go b/lib/server.go index 86bbeac53..b7b0316ab 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2590,11 +2590,11 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { } } -func (srv *Server) _handleFastHostStuffConsensusEvent(event *consensus.FastHotStuffEvent) { +func (srv *Server) _handleFastHotStuffConsensusEvent(event *consensus.FastHotStuffEvent) { // This should never happen. If the consensus message handler isn't defined, then something went // wrong during the node initialization. We log it and return early to avoid panicking. if srv.fastHotStuffConsensus == nil { - glog.Errorf("Server._handleFastHostStuffConsensusEvent: Consensus controller is nil") + glog.Errorf("Server._handleFastHotStuffConsensusEvent: Consensus controller is nil") return } @@ -2673,7 +2673,7 @@ func (srv *Server) _startConsensus() { case consensusEvent := <-srv.getFastHotStuffConsensusEventChannel(): { glog.V(2).Infof("Server._startConsensus: Received consensus event: %s", consensusEvent.ToString()) - srv._handleFastHostStuffConsensusEvent(consensusEvent) + srv._handleFastHotStuffConsensusEvent(consensusEvent) } case serverMessage := <-srv.incomingMessages: From 3825ee98432bec98ed9402d9bd3804a42ccc243e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 29 Mar 2024 17:36:58 -0400 Subject: [PATCH 582/762] Increase Regtest PoS Fork Height to 100 (#1161) --- lib/constants.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index cf164103e..f12dce4c3 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -863,9 +863,9 @@ var RegtestForkHeights = ForkHeights{ BalanceModelBlockHeight: uint32(1), ProofOfStake1StateSetupBlockHeight: uint32(1), - // For convenience, we set the PoS cutover block height to 50 + // For convenience, we set the PoS cutover block height to 100 // so that enough DESO is minted to allow for testing. - ProofOfStake2ConsensusCutoverBlockHeight: uint32(50), + ProofOfStake2ConsensusCutoverBlockHeight: uint32(100), LockupsBlockHeight: uint32(1), From fb9bf2f095dcaaf1979c05215e7b3559de08dbcb Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 29 Mar 2024 18:08:07 -0400 Subject: [PATCH 583/762] Adjust Regtest Default Params (#1162) --- lib/constants.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index f12dce4c3..06e3a5cbb 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -900,7 +900,6 @@ func (params *DeSoParams) EnableRegtest() { params.TimeBetweenDifficultyRetargets = 6 * time.Second // Make sure we don't care about blockchain tip age. params.MaxTipAgePoW = 1000000 * time.Hour - params.MaxTipAgePoS = 4 * time.Hour // Allow block rewards to be spent instantly params.BlockRewardMaturity = 0 @@ -909,16 +908,13 @@ func (params *DeSoParams) EnableRegtest() { params.DefaultEpochDurationNumBlocks = 10 // Set the PoS default jail inactive validator grace period epochs to 3. params.DefaultJailInactiveValidatorGracePeriodEpochs = 3 - // Set the DefaultMaxBlockSizeBytesPoS to be the same as the MaxBlockSizeBytesPoW - // so existing regtest nodes don't break. - params.DefaultMaxBlockSizeBytesPoS = params.MaxBlockSizeBytesPoW // In regtest, we start all the fork heights at zero. These can be adjusted // for testing purposes to ensure that a transition does not cause issues. params.ForkHeights = RegtestForkHeights params.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) params.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - params.DefaultStakingRewardsAPYBasisPoints = 10 * 100 // 10% for regtest + params.DefaultStakingRewardsAPYBasisPoints = 100000 * 100 // 100000% for regtest } func (params *DeSoParams) IsPoWBlockHeight(blockHeight uint64) bool { From f296edb0ae35d2d1c8fa70851194693aad5691ec Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 1 Apr 2024 19:11:13 -0400 Subject: [PATCH 584/762] cache utxo ops and blocks too to speed up syncing (#1144) --- lib/pos_blockchain.go | 144 +++++++++++++++++++++++-------------- lib/pos_blockchain_test.go | 3 +- lib/pos_consensus.go | 7 +- lib/txindex.go | 4 +- 4 files changed, 101 insertions(+), 57 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index c645405cd..1d89e8c49 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -276,13 +276,14 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // We expect the utxoView for the parent block to be valid because we check that all ancestor blocks have // been validated. - parentUtxoView, err := bc.getUtxoViewAtBlockHash(*block.Header.PrevBlockHash) + parentUtxoViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash) if err != nil { // This should never happen. If the parent is validated and extends from the tip, then we should // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. // We return that validation didn't fail and the error. return false, false, nil, errors.Wrap(err, "validateLeaderAndQC: Problem getting UtxoView") } + parentUtxoView := parentUtxoViewAndUtxoOps.UtxoView // First, we perform a validation of the leader and the QC to prevent spam. // If the block fails this check, we throw it away. passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, parentUtxoView, verifySignatures) @@ -819,13 +820,14 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( return nil, errors.Wrapf(err, "validatePreviouslyIndexedBlockPoS: Problem fetching block from DB") } // Build utxoView for the block's parent. - parentUtxoView, err := bc.getUtxoViewAtBlockHash(*block.Header.PrevBlockHash) + parentUtxoViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash) if err != nil { // This should never happen. If the parent is validated and extends from the tip, then we should // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. return nil, errors.Wrap(err, "validatePreviouslyIndexedBlockPoS: Problem getting UtxoView") } + parentUtxoView := parentUtxoViewAndUtxoOps.UtxoView // If the block isn't validated or validate failed, we need to run the anti-spam checks on it. passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, parentUtxoView, verifySignatures) if err != nil { @@ -1708,25 +1710,14 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool // Can't commit a block that's already committed. return errors.Errorf("commitBlockPoS: Block %v is already committed", blockHash.String()) } - block, err := GetBlock(blockHash, bc.db, bc.snapshot) - if err != nil { - return errors.Wrapf(err, "commitBlockPoS: Problem getting block from db %v", blockHash.String()) - } - // Connect a view up to the parent of the block we are committing. - utxoView, err := bc.getUtxoViewAtBlockHash(*block.Header.PrevBlockHash) + // Connect a view up to block we are committing. + utxoViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*blockHash) if err != nil { return errors.Wrapf(err, "commitBlockPoS: Problem initializing UtxoView: ") } - txHashes := collections.Transform(block.Txns, func(txn *MsgDeSoTxn) *BlockHash { - return txn.Hash() - }) - // Connect the block to the view! - utxoOpsForBlock, err := utxoView.ConnectBlock( - block, txHashes, verifySignatures, bc.eventManager, block.Header.Height) - if err != nil { - // TODO: rule error handling? mark blocks invalid? - return errors.Wrapf(err, "commitBlockPoS: Problem connecting block to view: ") - } + utxoView := utxoViewAndUtxoOps.UtxoView + utxoOps := utxoViewAndUtxoOps.UtxoOps + block := utxoViewAndUtxoOps.Block // Put the block in the db // Note: we're skipping postgres. blockNode.Status |= StatusBlockCommitted @@ -1736,12 +1727,6 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool defer bc.snapshot.StartAncestralRecordsFlush(true) glog.V(2).Infof("commitBlockPoS: Preparing snapshot flush") } - // Store the new block in the db under the - // -> - // index. - if innerErr := PutBlockHashToBlockWithTxn(txn, bc.snapshot, block, bc.eventManager); innerErr != nil { - return errors.Wrapf(innerErr, "commitBlockPoS: Problem calling PutBlockHashToBlockWithTxn") - } // Store the new block's node in our node index in the db under the // -> @@ -1760,7 +1745,7 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool // Write the utxo operations for this block to the db, so we can have the // ability to roll it back in the future. if innerErr := PutUtxoOperationsForBlockWithTxn( - txn, bc.snapshot, uint64(blockNode.Height), blockNode.Hash, utxoOpsForBlock, bc.eventManager, + txn, bc.snapshot, uint64(blockNode.Height), blockNode.Hash, utxoOps, bc.eventManager, ); innerErr != nil { return errors.Wrapf(innerErr, "commitBlockPoS: Problem writing utxo operations to db on simple add to tip") } @@ -1778,7 +1763,7 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool bc.eventManager.blockConnected(&BlockEvent{ Block: block, UtxoView: utxoView, - UtxoOps: utxoOpsForBlock, + UtxoOps: utxoOps, }) // TODO: check w/ Z if this is right.... // Signal the state syncer that we've flushed to the DB so state syncer @@ -1851,79 +1836,124 @@ func (bc *Blockchain) GetCommittedTipView() (*UtxoView, error) { return NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, nil, bc.snapshotCache) } +// BlockViewAndUtxoOps is a struct that contains a UtxoView and the UtxoOperations +// and a block that were used to build the UtxoView. This struct is only +// used for Blockchain's blockViewCache, which is used to speed up repeated access +// to a utxo view at an uncommitted block. Simply having a utxo view was insufficient +// for all performance enhancements as the utxo operations are needed when committing +// a block and the block is needed for the state syncer. +type BlockViewAndUtxoOps struct { + UtxoView *UtxoView + UtxoOps [][]*UtxoOperation + Block *MsgDeSoBlock +} + +func (viewAndUtxoOps *BlockViewAndUtxoOps) Copy() (*BlockViewAndUtxoOps, error) { + copiedView, err := viewAndUtxoOps.UtxoView.CopyUtxoView() + if err != nil { + return nil, errors.Wrapf(err, "BlockViewAndUtxoOps.Copy: Problem copying UtxoView") + } + return &BlockViewAndUtxoOps{ + UtxoView: copiedView, + UtxoOps: viewAndUtxoOps.UtxoOps, + Block: viewAndUtxoOps.Block, + }, nil +} + // GetUncommittedTipView builds a UtxoView to the uncommitted tip. func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks - return bc.getUtxoViewAtBlockHash(*bc.BlockTip().Hash) + blockViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*bc.BlockTip().Hash) + if err != nil { + return nil, errors.Wrapf(err, "GetUncommittedTipView: Problem getting UtxoView at block hash") + } + return blockViewAndUtxoOps.UtxoView, nil } -// getUtxoViewAtBlockHash builds a UtxoView to the block provided. It does this by -// identifying all uncommitted ancestors of this block and then connecting those blocks. -func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, error) { +func (bc *Blockchain) getCachedBlockViewAndUtxoOps(blockHash BlockHash) (*BlockViewAndUtxoOps, error, bool) { + if viewAndUtxoOpsAtHashItem, exists := bc.blockViewCache.Lookup(blockHash); exists { + viewAndUtxoOpsAtHash, ok := viewAndUtxoOpsAtHashItem.(*BlockViewAndUtxoOps) + if !ok { + glog.Errorf("getCachedBlockViewAndUtxoOps: Problem casting to BlockViewAndUtxoOps") + return nil, fmt.Errorf("getCachedBlockViewAndUtxoOps: Problem casting to BlockViewAndUtxoOps"), false + } + return viewAndUtxoOpsAtHash, nil, true + } + return nil, nil, false +} + +// getUtxoViewAndUtxoOpsAtBlockHash builds a UtxoView to the block provided and returns a BlockViewAndUtxoOps +// struct containing UtxoView, the UtxoOperations that resulted from connecting the block, and the full +// block (MsgDeSoBlock) for convenience that came from connecting the block. It does this by identifying +// all uncommitted ancestors of this block. Then it checks the block view cache to see if we have already +// computed this view. If not, connecting the uncommitted ancestor blocks and saving to the cache. The +// returned UtxoOps and FullBlock should NOT be modified. +func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( + *BlockViewAndUtxoOps, error) { // Always fetch the lineage from the committed tip to the block provided first to // ensure that a valid UtxoView is returned. uncommittedAncestors := []*BlockNode{} currentBlock := bc.blockIndexByHash[blockHash] if currentBlock == nil { - return nil, errors.Errorf("getUtxoViewAtBlockHash: Block %v not found in block index", blockHash) + return nil, errors.Errorf("getUtxoViewAndUtxoOpsAtBlockHash: Block %v not found in block index", blockHash) } + highestCommittedBlock, _ := bc.GetCommittedTip() if highestCommittedBlock == nil { - return nil, errors.Errorf("getUtxoViewAtBlockHash: No committed blocks found") + return nil, errors.Errorf("getUtxoViewAndUtxoOpsAtBlockHash: No committed blocks found") } // If the provided block is committed, we need to make sure it's the committed tip. // Otherwise, we return an error. if currentBlock.IsCommitted() { if !highestCommittedBlock.Hash.IsEqual(&blockHash) { return nil, errors.Errorf( - "getUtxoViewAtBlockHash: Block %v is committed but not the committed tip", blockHash) + "getUtxoViewAndUtxoOpsAtBlockHash: Block %v is committed but not the committed tip", blockHash) } } for !currentBlock.IsCommitted() { uncommittedAncestors = append(uncommittedAncestors, currentBlock) currentParentHash := currentBlock.Header.PrevBlockHash if currentParentHash == nil { - return nil, errors.Errorf("getUtxoViewAtBlockHash: Block %v has nil PrevBlockHash", currentBlock.Hash) + return nil, errors.Errorf("getUtxoViewAndUtxoOpsAtBlockHash: Block %v has nil PrevBlockHash", currentBlock.Hash) } currentBlock = bc.blockIndexByHash[*currentParentHash] if currentBlock == nil { - return nil, errors.Errorf("getUtxoViewAtBlockHash: Block %v not found in block index", currentParentHash) + return nil, errors.Errorf("getUtxoViewAndUtxoOpsAtBlockHash: Block %v not found in block index", currentParentHash) } if currentBlock.IsCommitted() && !currentBlock.Hash.IsEqual(highestCommittedBlock.Hash) { return nil, errors.Errorf( - "getUtxoViewAtBlockHash: extends from a committed block that isn't the committed tip") + "getUtxoViewAndUtxoOpsAtBlockHash: extends from a committed block that isn't the committed tip") } if currentBlock.IsCommitted() && !currentBlock.Hash.IsEqual(highestCommittedBlock.Hash) { return nil, errors.Errorf( - "getUtxoViewAtBlockHash: extends from a committed block that isn't the committed tip") + "getUtxoViewAndUtxoOpsAtBlockHash: extends from a committed block that isn't the committed tip") } } - if viewAtHash, exists := bc.blockViewCache.Lookup(blockHash); exists { - copiedView, err := viewAtHash.(*UtxoView).CopyUtxoView() - if err != nil { - return nil, errors.Wrapf(err, "getUtxoViewAtBlockHash: Problem copying UtxoView from cache") - } - return copiedView, nil + viewAndUtxoOpsAtHash, err, exists := bc.getCachedBlockViewAndUtxoOps(blockHash) + if err != nil { + return nil, errors.Wrapf(err, "getUtxoViewAndUtxoOpsAtBlockHash: Problem getting cached BlockViewAndUtxoOps") } - if viewAtHash, exists := bc.blockViewCache.Lookup(blockHash); exists { - copiedView, err := viewAtHash.(*UtxoView).CopyUtxoView() + if exists { + viewAndUtxoOpsCopy, err := viewAndUtxoOpsAtHash.Copy() if err != nil { - return nil, errors.Wrapf(err, "getUtxoViewAtBlockHash: Problem copying UtxoView from cache") + return nil, errors.Wrapf(err, "getUtxoViewAndUtxoOpsAtBlockHash: Problem copying BlockViewAndUtxoOps from cache") } - return copiedView, nil + return viewAndUtxoOpsCopy, nil } // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks utxoView, err := NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager, bc.snapshotCache) if err != nil { - return nil, errors.Wrapf(err, "getUtxoViewAtBlockHash: Problem initializing UtxoView") + return nil, errors.Wrapf(err, "getUtxoViewAndUtxoOpsAtBlockHash: Problem initializing UtxoView") } // TODO: there's another performance enhancement we can make here. If we have a view in the // cache for one of the ancestors, we can skip fetching the block and connecting it by taking // a copy of it and replacing the existing view. + var utxoOps [][]*UtxoOperation + var fullBlock *MsgDeSoBlock for ii := len(uncommittedAncestors) - 1; ii >= 0; ii-- { // We need to get these blocks from badger - fullBlock, err := GetBlock(uncommittedAncestors[ii].Hash, bc.db, bc.snapshot) + fullBlock, err = GetBlock(uncommittedAncestors[ii].Hash, bc.db, bc.snapshot) if err != nil { return nil, errors.Wrapf(err, "GetUncommittedTipView: Error fetching Block %v not found in block index", @@ -1932,7 +1962,7 @@ func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, er txnHashes := collections.Transform(fullBlock.Txns, func(txn *MsgDeSoTxn) *BlockHash { return txn.Hash() }) - _, err = utxoView.ConnectBlock(fullBlock, txnHashes, false, nil, fullBlock.Header.Height) + utxoOps, err = utxoView.ConnectBlock(fullBlock, txnHashes, false, nil, fullBlock.Header.Height) if err != nil { hash, _ := fullBlock.Hash() return nil, errors.Wrapf(err, "GetUncommittedTipView: Problem connecting block hash %v", hash.String()) @@ -1943,10 +1973,18 @@ func (bc *Blockchain) getUtxoViewAtBlockHash(blockHash BlockHash) (*UtxoView, er // Save a copy of the UtxoView to the cache. copiedView, err := utxoView.CopyUtxoView() if err != nil { - return nil, errors.Wrapf(err, "getUtxoViewAtBlockHash: Problem copying UtxoView to store in cache") + return nil, errors.Wrapf(err, "getUtxoViewAndUtxoOpsAtBlockHash: Problem copying UtxoView to store in cache") } - bc.blockViewCache.Add(blockHash, copiedView) - return utxoView, nil + bc.blockViewCache.Add(blockHash, &BlockViewAndUtxoOps{ + UtxoView: copiedView, + UtxoOps: utxoOps, + Block: fullBlock, + }) + return &BlockViewAndUtxoOps{ + UtxoView: utxoView, + UtxoOps: utxoOps, + Block: fullBlock, + }, nil } // GetCommittedTip returns the highest committed block and its index in the best chain. diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 7c2ca3bcc..8ca808a83 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2393,8 +2393,9 @@ func _generateRealBlockWithFailingTxn(testMeta *TestMeta, blockHeight uint64, vi prevBlock, exists := testMeta.chain.blockIndexByHash[*prevBlockHash] require.True(testMeta.t, exists) // Always update the testMeta latestBlockView - latestBlockView, err := testMeta.chain.getUtxoViewAtBlockHash(*prevBlockHash) + latestBlockViewAndUtxoOps, err := testMeta.chain.getUtxoViewAndUtxoOpsAtBlockHash(*prevBlockHash) require.NoError(testMeta.t, err) + latestBlockView := latestBlockViewAndUtxoOps.UtxoView latestBlockHeight := testMeta.chain.blockIndexByHash[*prevBlockHash].Height testMeta.posMempool.UpdateLatestBlock(latestBlockView, uint64(latestBlockHeight)) seedSignature := getRandomSeedSignature(testMeta, blockHeight, view, prevBlock.Header.ProposerRandomSeedSignature) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 14acfcce4..c2ec3c50d 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -674,10 +674,11 @@ func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("Error hashing tip block: %v", err) } - utxoView, err := fc.blockchain.getUtxoViewAtBlockHash(*tipBlockHash) + utxoViewAndUtxoOps, err := fc.blockchain.getUtxoViewAndUtxoOpsAtBlockHash(*tipBlockHash) if err != nil { return nil, errors.Errorf("Error fetching UtxoView for tip block: %v", err) } + utxoView := utxoViewAndUtxoOps.UtxoView snapshotGlobalParams, err := utxoView.GetCurrentSnapshotGlobalParamsEntry() if err != nil { return nil, errors.Errorf("Error fetching snapshot global params: %v", err) @@ -719,12 +720,14 @@ func (fc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( } // Build a UtxoView at the parent block - utxoViewAtParent, err := fc.blockchain.getUtxoViewAtBlockHash(*parentBlockHash) + parentUtxoViewAndUtxoOps, err := fc.blockchain.getUtxoViewAndUtxoOpsAtBlockHash(*parentBlockHash) if err != nil { // This should never happen as long as the parent block is a descendant of the committed tip. return nil, errors.Errorf("Error fetching UtxoView for parent block: %v", parentBlockHash) } + utxoViewAtParent := parentUtxoViewAndUtxoOps.UtxoView + // Dynamically create a new block producer at the current block height blockProducer, err := fc.createBlockProducer(utxoViewAtParent, parentBlock.Header.TstampNanoSecs) if err != nil { diff --git a/lib/txindex.go b/lib/txindex.go index f1f805235..cde5dc648 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -413,11 +413,13 @@ func (txi *TXIndex) Update() error { "Update: Error initializing UtxoView: %v", err) } if blockToAttach.Header.PrevBlockHash != nil { - utxoView, err = txi.TXIndexChain.getUtxoViewAtBlockHash(*blockToAttach.Header.PrevBlockHash) + var utxoViewAndUtxoOps *BlockViewAndUtxoOps + utxoViewAndUtxoOps, err = txi.TXIndexChain.getUtxoViewAndUtxoOpsAtBlockHash(*blockToAttach.Header.PrevBlockHash) if err != nil { return fmt.Errorf("Update: Problem getting UtxoView at block hash %v: %v", blockToAttach.Header.PrevBlockHash, err) } + utxoView = utxoViewAndUtxoOps.UtxoView } // Do each block update in a single transaction so we're safe in case the node From 041a63107a212c34e10f7a7dfc0c3972afcb0e95 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 1 Apr 2024 19:13:50 -0400 Subject: [PATCH 585/762] Use block view cache to speed up state syncer mempool flushes (#1146) --- lib/pos_blockchain.go | 28 +++++++++++----------------- lib/state_change_syncer.go | 27 ++++++++++----------------- 2 files changed, 21 insertions(+), 34 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 1d89e8c49..38d001d82 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1792,11 +1792,10 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool return nil } -// GetUncommittedFullBlocks is a helper that the state syncer uses to fetch all uncommitted -// blocks, so it can flush them just like we would with mempool transactions. It returns -// all uncommitted blocks from the specified tip to the last uncommitted block. -// Note: it would be more efficient if we cached these results. -func (bc *Blockchain) GetUncommittedFullBlocks(tipHash *BlockHash) ([]*MsgDeSoBlock, error) { +// GetUncommittedBlocks is a helper that the state syncer uses to fetch all uncommitted +// block nodes, so it can flush them just like we would with mempool transactions. It returns +// all uncommitted block nodes from the specified tip to the last uncommitted block. +func (bc *Blockchain) GetUncommittedBlocks(tipHash *BlockHash) ([]*BlockNode, error) { if tipHash == nil { tipHash = bc.BlockTip().Hash } @@ -1804,31 +1803,26 @@ func (bc *Blockchain) GetUncommittedFullBlocks(tipHash *BlockHash) ([]*MsgDeSoBl defer bc.ChainLock.RUnlock() tipBlock, exists := bc.bestChainMap[*tipHash] if !exists { - return nil, errors.Errorf("GetUncommittedFullBlocks: Block %v not found in best chain map", tipHash.String()) + return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in best chain map", tipHash.String()) } // If the tip block is committed, we can't get uncommitted blocks from it so we return an empty slice. if tipBlock.IsCommitted() { - return []*MsgDeSoBlock{}, nil + return []*BlockNode{}, nil } - var uncommittedBlocks []*MsgDeSoBlock + var uncommittedBlockNodes []*BlockNode currentBlock := tipBlock for !currentBlock.IsCommitted() { - fullBlock, err := GetBlock(currentBlock.Hash, bc.db, bc.snapshot) - if err != nil { - return nil, errors.Wrapf(err, "GetUncommittedFullBlocks: Problem fetching block %v", - currentBlock.Hash.String()) - } - uncommittedBlocks = append(uncommittedBlocks, fullBlock) + uncommittedBlockNodes = append(uncommittedBlockNodes, currentBlock) currentParentHash := currentBlock.Header.PrevBlockHash if currentParentHash == nil { - return nil, errors.Errorf("GetUncommittedFullBlocks: Block %v has nil PrevBlockHash", currentBlock.Hash) + return nil, errors.Errorf("GetUncommittedBlocks: Block %v has nil PrevBlockHash", currentBlock.Hash) } currentBlock = bc.blockIndexByHash[*currentParentHash] if currentBlock == nil { - return nil, errors.Errorf("GetUncommittedFullBlocks: Block %v not found in block index", currentBlock.Hash) + return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in block index", currentBlock.Hash) } } - return collections.Reverse(uncommittedBlocks), nil + return collections.Reverse(uncommittedBlockNodes), nil } // GetCommittedTipView builds a UtxoView to the committed tip. diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 93e50261f..0bf81bc29 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -9,7 +9,6 @@ import ( "sync" "time" - "github.com/deso-protocol/core/collections" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" "github.com/google/uuid" @@ -682,7 +681,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser mempoolTxns := server.GetMempool().GetOrderedTransactions() // Get the uncommitted blocks from the chain. - uncommittedBlocks, err := server.blockchain.GetUncommittedFullBlocks(mempoolUtxoView.TipHash) + uncommittedBlocks, err := server.blockchain.GetUncommittedBlocks(mempoolUtxoView.TipHash) if err != nil { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: uuid.Nil, @@ -694,17 +693,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser // First connect the uncommitted blocks to the mempool view. for _, uncommittedBlock := range uncommittedBlocks { - var utxoOpsForBlock [][]*UtxoOperation - txHashes := collections.Transform(uncommittedBlock.Txns, func(txn *MsgDeSoTxn) *BlockHash { - return txn.Hash() - }) - // TODO: there is a slight performance enhancement we could make here - // by rewriting the ConnectBlock logic to avoid unnecessary UtxoView copying - // for failing transactions. However, we'd also need to rewrite the end-of-epoch - // logic here which would make this function a bit long. - // Connect this block to the mempoolTxUtxoView so we can get the utxo ops. - utxoOpsForBlock, err = mempoolTxUtxoView.ConnectBlock( - uncommittedBlock, txHashes, false, nil, uncommittedBlock.Header.Height) + utxoViewAndOpsAtBlockHash, err := server.blockchain.getUtxoViewAndUtxoOpsAtBlockHash(*uncommittedBlock.Hash) if err != nil { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: uuid.Nil, @@ -713,20 +702,24 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser }) return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer ConnectBlock uncommitted block: ") } - blockHash, _ := uncommittedBlock.Hash() // Emit the UtxoOps event. mempoolUtxoView.EventManager.stateSyncerOperation(&StateSyncerOperationEvent{ StateChangeEntry: &StateChangeEntry{ OperationType: DbOperationTypeUpsert, - KeyBytes: _DbKeyForUtxoOps(blockHash), + KeyBytes: _DbKeyForUtxoOps(uncommittedBlock.Hash), EncoderBytes: EncodeToBytes(blockHeight, &UtxoOperationBundle{ - UtxoOpBundle: utxoOpsForBlock, + UtxoOpBundle: utxoViewAndOpsAtBlockHash.UtxoOps, }, false), - Block: uncommittedBlock, + Block: utxoViewAndOpsAtBlockHash.Block, }, FlushId: uuid.Nil, IsMempoolTxn: true, }) + // getUtxoViewAtBlockHash returns a copy of the view, so we + // set the mempoolTxUtxoView to the view at the block hash + // and update its event manager to match the mempoolEventManager. + mempoolTxUtxoView = utxoViewAndOpsAtBlockHash.UtxoView + mempoolTxUtxoView.EventManager = &mempoolEventManager } currentTimestamp := time.Now().UnixNano() From 167c6c603b6663299a11d36fa7587bee37146def Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Mon, 1 Apr 2024 16:16:38 -0700 Subject: [PATCH 586/762] Bump badger version & improve header processing logging (#1149) --- cmd/node.go | 2 +- go.mod | 17 ++++--- go.sum | 51 +++++++-------------- integration_testing/tools.go | 15 +++--- lib/block_view.go | 2 +- lib/block_view_atomic_txns_test.go | 2 +- lib/block_view_bitcoin_test.go | 2 +- lib/block_view_creator_coin_test.go | 2 +- lib/block_view_dao_coin_limit_order_test.go | 2 +- lib/block_view_dao_coin_test.go | 7 +-- lib/block_view_derived_key_test.go | 11 +++-- lib/block_view_flush.go | 2 +- lib/block_view_follow_test.go | 5 +- lib/block_view_like_test.go | 5 +- lib/block_view_lockups.go | 7 +-- lib/block_view_lockups_test.go | 5 +- lib/block_view_message_test.go | 2 +- lib/block_view_nft_test.go | 2 +- lib/block_view_post.go | 13 +++--- lib/block_view_post_test.go | 2 +- lib/block_view_profile_test.go | 2 +- lib/block_view_stake.go | 2 +- lib/block_view_test.go | 2 +- lib/block_view_validator.go | 2 +- lib/blockchain.go | 2 +- lib/blockchain_test.go | 2 +- lib/db_adapter.go | 5 +- lib/db_utils.go | 2 +- lib/db_utils_test.go | 2 +- lib/legacy_mempool.go | 3 +- lib/notifier.go | 7 +-- lib/pos_blockchain.go | 2 +- lib/pos_epoch.go | 2 +- lib/pos_mempool.go | 2 +- lib/pos_mempool_persister.go | 7 +-- lib/pos_mempool_persister_test.go | 5 +- lib/pos_mempool_test.go | 2 +- lib/pos_random_seed.go | 5 +- lib/pos_snapshot_cache.go | 5 +- lib/pos_snapshot_entries.go | 2 +- lib/postgres.go | 2 +- lib/server.go | 12 ++++- lib/snapshot.go | 17 +++---- lib/txindex.go | 3 +- 44 files changed, 129 insertions(+), 124 deletions(-) diff --git a/cmd/node.go b/cmd/node.go index 282d91b4a..76b4a39b8 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -18,7 +18,7 @@ import ( "github.com/deso-protocol/core/lib" "github.com/deso-protocol/core/migrate" "github.com/deso-protocol/go-deadlock" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/go-pg/pg/v10" "github.com/golang/glog" migrations "github.com/robinjoseph08/go-pg-migrations/v3" diff --git a/go.mod b/go.mod index 829ab59d8..bcccc632b 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/decred/dcrd/lru v1.1.1 github.com/deso-protocol/go-deadlock v1.0.0 github.com/deso-protocol/go-merkle-tree v1.0.0 - github.com/dgraph-io/badger/v3 v3.2103.5 + github.com/dgraph-io/badger/v4 v4.2.0 github.com/emirpasic/gods v1.18.1 github.com/ethereum/go-ethereum v1.9.25 github.com/fatih/color v1.13.0 @@ -37,7 +37,7 @@ require ( github.com/tyler-smith/go-bip39 v1.0.2 github.com/unrolled/secure v1.0.8 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 gopkg.in/DataDog/dd-trace-go.v1 v1.29.0 ) @@ -52,8 +52,7 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/bwesterb/go-ristretto v1.2.0 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect @@ -99,14 +98,14 @@ require ( github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.opencensus.io v0.23.0 // indirect - golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect - golang.org/x/sys v0.0.0-20221010170243-090e33056c14 // indirect - golang.org/x/text v0.3.6 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect - golang.org/x/tools v0.1.5 // indirect + golang.org/x/tools v0.1.12 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gonum.org/v1/gonum v0.6.1 // indirect - google.golang.org/protobuf v1.26.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/AlecAivazis/survey.v1 v1.8.7 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/ini.v1 v1.51.0 // indirect diff --git a/go.sum b/go.sum index 23375cc2d..e4bbfaa35 100644 --- a/go.sum +++ b/go.sum @@ -34,7 +34,6 @@ github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= @@ -45,7 +44,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -81,10 +79,10 @@ github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40 github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -94,14 +92,10 @@ github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtM github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -122,8 +116,8 @@ github.com/deso-protocol/go-deadlock v1.0.0 h1:mw0pHy/19zgC+JFBStuQt1+1Ehv5OKA5N github.com/deso-protocol/go-deadlock v1.0.0/go.mod h1:K0Wd2OV2x7ck7SMYDraWerpKjFKUeBqaFcwz21tmkb8= github.com/deso-protocol/go-merkle-tree v1.0.0 h1:9zkI5dQsITYy77s4kbTGPQmZnhQ+LsH/kRdL5l/Yzvg= github.com/deso-protocol/go-merkle-tree v1.0.0/go.mod h1:V/vbg/maaNv6G7zf9VVs645nLFx/jsO2L/awFB/S/ZU= -github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= -github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= +github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= +github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -183,7 +177,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -211,7 +204,6 @@ github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -221,7 +213,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -319,7 +310,6 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -419,7 +409,6 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -438,13 +427,10 @@ github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIK github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= @@ -452,7 +438,6 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= @@ -486,7 +471,6 @@ github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df/go.mod h1:p github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8= github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/unrolled/secure v1.0.8 h1:JaMvKbe4CRt8oyxVXn+xY+6jlqd7pyJNSVkmsBxxQsM= github.com/unrolled/secure v1.0.8/go.mod h1:fO+mEan+FLB0CdEnHf6Q4ZZVNqG+5fuLFnP8p0BXDPI= github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= @@ -509,14 +493,12 @@ github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+m github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= @@ -529,7 +511,6 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -574,8 +555,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -606,8 +587,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -618,8 +599,9 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180606202747-9527bec2660b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -628,7 +610,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -659,15 +640,16 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14 h1:k5II8e6QD8mITdi+okbbmR/cIyEbeXLBhy5Ha4nevyc= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= @@ -698,8 +680,9 @@ golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -749,8 +732,9 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/AlecAivazis/survey.v1 v1.8.7 h1:oBJqtgsyBLg9K5FK9twNUbcPnbCPoh+R9a+7nag3qJM= gopkg.in/AlecAivazis/survey.v1 v1.8.7/go.mod h1:iBNOmqKz/NUbZx3bA+4hAGLRC7fSK7tgtVDT4tB22XA= gopkg.in/DataDog/dd-trace-go.v1 v1.29.0 h1:3C1EEjgFTPqrnS2SXuSqkBbZGacIOPJ7ScGJk4nrP9s= @@ -758,7 +742,6 @@ gopkg.in/DataDog/dd-trace-go.v1 v1.29.0/go.mod h1:FLwUDeuH0z5hkvgvd04/M3MHQN4AF5 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/integration_testing/tools.go b/integration_testing/tools.go index cdf55ebf5..369a3eb09 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -3,20 +3,21 @@ package integration_testing import ( "encoding/hex" "fmt" + "io/ioutil" + "os" + "reflect" + "sort" + "testing" + "time" + "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/tyler-smith/go-bip39" - "io/ioutil" - "os" - "reflect" - "sort" - "testing" - "time" ) // This testing suite is the first serious attempt at making a comprehensive functional testing framework for DeSo nodes. diff --git a/lib/block_view.go b/lib/block_view.go index c63e8437a..964c78e65 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 17c9c0092..517531f01 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -3,7 +3,7 @@ package lib import ( "bytes" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index b482aecd1..3325495cb 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -14,7 +14,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" merkletree "github.com/deso-protocol/go-merkle-tree" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_creator_coin_test.go b/lib/block_view_creator_coin_test.go index 8707b9a48..844e23921 100644 --- a/lib/block_view_creator_coin_test.go +++ b/lib/block_view_creator_coin_test.go @@ -5,7 +5,7 @@ import ( "strconv" "testing" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index 026b8eaad..26d2e0ece 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/holiman/uint256" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_dao_coin_test.go b/lib/block_view_dao_coin_test.go index 1050a8b9b..2dfa6b76b 100644 --- a/lib/block_view_dao_coin_test.go +++ b/lib/block_view_dao_coin_test.go @@ -1,13 +1,14 @@ package lib import ( + "reflect" + "testing" + "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/holiman/uint256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "reflect" - "testing" ) func _daoCoinTxn(t *testing.T, chain *Blockchain, db *badger.DB, diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index 7b9c8fdde..e49622387 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -5,15 +5,16 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" - "github.com/holiman/uint256" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "math" "math/rand" "testing" "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/dgraph-io/badger/v4" + "github.com/holiman/uint256" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index bffe54a4c..bd289cb40 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -5,7 +5,7 @@ import ( "reflect" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/block_view_follow_test.go b/lib/block_view_follow_test.go index 8b73727b1..10a6bd6da 100644 --- a/lib/block_view_follow_test.go +++ b/lib/block_view_follow_test.go @@ -2,10 +2,11 @@ package lib import ( "fmt" - "github.com/dgraph-io/badger/v3" + "testing" + + "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" ) func _doFollowTxn(t *testing.T, chain *Blockchain, db *badger.DB, diff --git a/lib/block_view_like_test.go b/lib/block_view_like_test.go index 19d3ab260..3b376cf75 100644 --- a/lib/block_view_like_test.go +++ b/lib/block_view_like_test.go @@ -2,10 +2,11 @@ package lib import ( "fmt" - "github.com/dgraph-io/badger/v3" + "testing" + + "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" ) func _doLikeTxn(t *testing.T, chain *Blockchain, db *badger.DB, diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 9051421c2..85fab796f 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -3,13 +3,14 @@ package lib import ( "bytes" "fmt" + "reflect" + "sort" + "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "reflect" - "sort" ) // diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 7680c5dd6..1ab7fb017 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -3,13 +3,14 @@ package lib import ( "testing" + "time" + "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/holiman/uint256" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "time" ) func TestCoinLockupsForkHeight(t *testing.T) { diff --git a/lib/block_view_message_test.go b/lib/block_view_message_test.go index b79931981..dfa640926 100644 --- a/lib/block_view_message_test.go +++ b/lib/block_view_message_test.go @@ -10,7 +10,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_nft_test.go b/lib/block_view_nft_test.go index bc3d6efcd..dbc0f4395 100644 --- a/lib/block_view_nft_test.go +++ b/lib/block_view_nft_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_post.go b/lib/block_view_post.go index 728c091df..2ea329a0c 100644 --- a/lib/block_view_post.go +++ b/lib/block_view_post.go @@ -5,17 +5,18 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/btcsuite/btcd/btcec" - "github.com/davecgh/go-spew/spew" - "github.com/dgraph-io/badger/v3" - "github.com/gernest/mention" - "github.com/golang/glog" - "github.com/pkg/errors" "math" "reflect" "regexp" "sort" "strings" + + "github.com/btcsuite/btcd/btcec" + "github.com/davecgh/go-spew/spew" + "github.com/dgraph-io/badger/v4" + "github.com/gernest/mention" + "github.com/golang/glog" + "github.com/pkg/errors" ) func (bav *UtxoView) _getRepostEntryForRepostKey(repostKey *RepostKey) *RepostEntry { diff --git a/lib/block_view_post_test.go b/lib/block_view_post_test.go index 1f8574d05..9ed958875 100644 --- a/lib/block_view_post_test.go +++ b/lib/block_view_post_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index 3f3c0d9d8..c7721f442 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index fd8520eb2..aca08492f 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -5,7 +5,7 @@ import ( "fmt" "sort" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" diff --git a/lib/block_view_test.go b/lib/block_view_test.go index bc49c19de..000130b17 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -14,7 +14,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/decred/dcrd/lru" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" embeddedpostgres "github.com/fergusstrange/embedded-postgres" "github.com/golang/glog" "github.com/stretchr/testify/assert" diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 17d2c96ee..a2f717399 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -12,7 +12,7 @@ import ( "github.com/deso-protocol/core/consensus" "github.com/deso-protocol/core/bls" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" diff --git a/lib/blockchain.go b/lib/blockchain.go index 25923dc91..d5c4799f8 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -28,7 +28,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/deso-protocol/go-deadlock" merkletree "github.com/deso-protocol/go-merkle-tree" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 65b5120d4..f81988e0b 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -18,7 +18,7 @@ import ( chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/lib/db_adapter.go b/lib/db_adapter.go index 052c74873..b92ff601d 100644 --- a/lib/db_adapter.go +++ b/lib/db_adapter.go @@ -2,10 +2,11 @@ package lib import ( "bytes" - "github.com/dgraph-io/badger/v3" + "sort" + + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" - "sort" ) type DbAdapter struct { diff --git a/lib/db_utils.go b/lib/db_utils.go index 542bda24f..1e1aed02f 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -23,7 +23,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index b373592fd..a06d0fedc 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 54942a3c4..df30ec70f 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -17,10 +17,9 @@ import ( "time" "github.com/btcsuite/btcutil" + "github.com/dgraph-io/badger/v4" "github.com/gernest/mention" - "github.com/dgraph-io/badger/v3" - "github.com/btcsuite/btcd/btcec" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/go-deadlock" diff --git a/lib/notifier.go b/lib/notifier.go index de5959dfb..b6c88c37d 100644 --- a/lib/notifier.go +++ b/lib/notifier.go @@ -3,13 +3,14 @@ package lib import ( "encoding/json" "fmt" - "github.com/dgraph-io/badger/v3" - "github.com/gernest/mention" - "github.com/go-pg/pg/v10" "reflect" "strings" "time" + "github.com/dgraph-io/badger/v4" + "github.com/gernest/mention" + "github.com/go-pg/pg/v10" + "github.com/golang/glog" ) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 38d001d82..66c39fdaf 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -9,7 +9,7 @@ import ( "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index 438377777..12fa1be6d 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -3,7 +3,7 @@ package lib import ( "bytes" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index d43fc9808..6812f96fa 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -8,7 +8,7 @@ import ( "sync/atomic" "time" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/pos_mempool_persister.go b/lib/pos_mempool_persister.go index 25e0a3fc8..ceeface31 100644 --- a/lib/pos_mempool_persister.go +++ b/lib/pos_mempool_persister.go @@ -2,11 +2,12 @@ package lib import ( "bytes" - "github.com/dgraph-io/badger/v3" - "github.com/golang/glog" - "github.com/pkg/errors" "sync" "time" + + "github.com/dgraph-io/badger/v4" + "github.com/golang/glog" + "github.com/pkg/errors" ) const ( diff --git a/lib/pos_mempool_persister_test.go b/lib/pos_mempool_persister_test.go index 6050456c4..73887a01f 100644 --- a/lib/pos_mempool_persister_test.go +++ b/lib/pos_mempool_persister_test.go @@ -1,12 +1,13 @@ package lib import ( - "github.com/dgraph-io/badger/v3" - "github.com/stretchr/testify/require" "math/rand" "os" "testing" "time" + + "github.com/dgraph-io/badger/v4" + "github.com/stretchr/testify/require" ) func TestMempoolPersister(t *testing.T) { diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 3a34eacaf..75673cbf0 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index 2d69340a2..d5de7494e 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -2,11 +2,12 @@ package lib import ( "bytes" - "golang.org/x/crypto/sha3" "io" + "golang.org/x/crypto/sha3" + "github.com/deso-protocol/core/bls" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" diff --git a/lib/pos_snapshot_cache.go b/lib/pos_snapshot_cache.go index 783f37379..d07470eec 100644 --- a/lib/pos_snapshot_cache.go +++ b/lib/pos_snapshot_cache.go @@ -1,9 +1,10 @@ package lib import ( - "github.com/dgraph-io/badger/v3" - "github.com/pkg/errors" "sync" + + "github.com/dgraph-io/badger/v4" + "github.com/pkg/errors" ) // SnapshotCache is a struct that holds cached versions of the end-of-epoch diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index b1cb70d9f..195a238e6 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -8,7 +8,7 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" diff --git a/lib/postgres.go b/lib/postgres.go index fae62bc16..f3cae7833 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -10,7 +10,7 @@ import ( "regexp" "strings" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/go-pg/pg/v10" "github.com/go-pg/pg/v10/orm" "github.com/golang/glog" diff --git a/lib/server.go b/lib/server.go index f57e3937a..2ecba1841 100644 --- a/lib/server.go +++ b/lib/server.go @@ -24,7 +24,7 @@ import ( chainlib "github.com/btcsuite/btcd/blockchain" "github.com/davecgh/go-spew/spew" "github.com/deso-protocol/go-deadlock" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -943,7 +943,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // right after the tip of our header chain ideally. While going through them // tally up the number that we actually process. numNewHeaders := 0 - for _, headerReceived := range msg.Headers { + for ii, headerReceived := range msg.Headers { // If we've set a maximum height for node sync and we've reached it, // then we will not process any more headers. if srv.blockchain.isTipMaxed(srv.blockchain.headerTip()) { @@ -987,6 +987,14 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // if we're in the process of syncing. _, isOrphan, err := srv.blockchain.ProcessHeader(headerReceived, headerHash, !srv.blockchain.isSyncing()) + numLogHeaders := 2000 + if ii%numLogHeaders == 0 { + glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleHeaderBundle: Processed header ( %v / %v ) from Peer %v", + headerReceived.Height, + msg.Headers[len(msg.Headers)-1].Height, + pp))) + } + // If this header is an orphan or we encountered an error for any reason, // disconnect from the peer. Because every header is sent in response to // a GetHeaders request, the peer should know enough to never send us diff --git a/lib/snapshot.go b/lib/snapshot.go index 1041826d6..787a8f347 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -5,23 +5,24 @@ import ( "context" "encoding/hex" "fmt" + "math" + "path/filepath" + "reflect" + "runtime" + "sort" + "sync" + "time" + "github.com/cloudflare/circl/group" "github.com/decred/dcrd/lru" "github.com/deso-protocol/go-deadlock" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/fatih/color" "github.com/golang/glog" "github.com/google/uuid" "github.com/oleiade/lane" "github.com/pkg/errors" "golang.org/x/sync/semaphore" - "math" - "path/filepath" - "reflect" - "runtime" - "sort" - "sync" - "time" ) var ( diff --git a/lib/txindex.go b/lib/txindex.go index cde5dc648..276869d13 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -3,12 +3,13 @@ package lib import ( "encoding/hex" "fmt" - "github.com/dgraph-io/badger/v3" "path/filepath" "reflect" "sync" "time" + "github.com/dgraph-io/badger/v4" + chainlib "github.com/btcsuite/btcd/blockchain" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" From 84f3224c4f823e549adecc84271512c17b501fa8 Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Mon, 1 Apr 2024 16:18:34 -0700 Subject: [PATCH 587/762] Speed up blocksync by only writing to db when an entry has changed (#1150) This change fully resolves a block syncing slowness issue encountered in the last phase of PoS testing. In order to optimize flushes for certain prefixes, we want to only write entries to the db that have changed. On top of that, we add a further optimization to only update related indexes if the stake amount or status has changed in the validator. Not doing this results in a lot of writes to badger every epoch that eventually slow block processing to a crawl. This is essentially a bug in badger when you repeatedly write to the same key, and we're papering over it here in response to encountering the issue. In an ideal world, badger would work as intended and this extra optimization wouldn't be necessary. Prefixes affected: * PrefixStakeByValidatorAndStaker * PrefixStakeByStakeAmount * PrefixValidatorByPKID * PrefixValidatorByStatusAndStakeAmount * PrefixSnapshotValidatorSetByPKID * PrefixSnapshotValidatorSetByStakeAmount --- lib/block_view_stake.go | 75 ++++++++++++++++-------- lib/block_view_validator.go | 94 +++++++++++++++++++++--------- lib/pos_snapshot_entries.go | 110 ++++++++++++++++++++++-------------- lib/server.go | 4 +- 4 files changed, 190 insertions(+), 93 deletions(-) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index aca08492f..15dc7b4f9 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -912,7 +912,14 @@ func DBGetLockedStakeEntriesInRangeWithTxn( return lockedStakeEntries, nil } -func DBPutStakeEntryWithTxn( +// In order to optimize the flush, we want to only write entries to the db that have changed. +// On top of that, we add a further optimization to only update the +// PrefixStakeByStakeAmount index if the stake amount has changed. Not doing this results +// in a lot of writes to badger every epoch that eventually slow block processing to a crawl. +// This is essentially a bug in badger when you repeatedly write to the same key, and we're +// papering over it here in response to encountering the issue. In an ideal world, badger +// would work as intended and this extra optimization wouldn't be necessary. +func DBUpdateStakeEntryWithTxn( txn *badger.Txn, snap *Snapshot, stakeEntry *StakeEntry, @@ -923,20 +930,50 @@ func DBPutStakeEntryWithTxn( return nil } - // Set StakeEntry in PrefixStakeByValidatorByStaker. + // Fetch the existing entry from the db so we can potentially avoid an update + dbEntry, err := DBGetStakeEntryWithTxn( + txn, snap, stakeEntry.ValidatorPKID, stakeEntry.StakerPKID) + if err != nil { + return errors.Wrapf(err, "_flushStakeEntriesToDbWithTxn: ") + } + dbEntryBytes := EncodeToBytes(blockHeight, dbEntry) + // Serialize the entry to bytes + entryToWriteBytes := EncodeToBytes(blockHeight, stakeEntry) + // If the entry we're about to write is the exact same as what's already in the db then + // don't write it. + if bytes.Equal(dbEntryBytes, entryToWriteBytes) { + return nil + } + + // Set StakeEntry in PrefixStakeByValidatorByStaker. This should gracefully overwrite an existing entry + // if one exists so no need to delete before adding it. stakeByValidatorAndStakerKey := DBKeyForStakeByValidatorAndStaker(stakeEntry.ValidatorPKID, stakeEntry.StakerPKID) if err := DBSetWithTxn(txn, snap, stakeByValidatorAndStakerKey, EncodeToBytes(blockHeight, stakeEntry), eventManager); err != nil { return errors.Wrapf( - err, "DBPutStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByValidatorByStaker: ", + err, "DBUpdateStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByValidatorByStaker: ", ) } - // Set StakeEntry in PrefixStakeByStakeAmount. - stakeByStakeAmountKey := DBKeyForStakeByStakeAmount(stakeEntry) - if err := DBSetWithTxn(txn, snap, stakeByStakeAmountKey, nil, eventManager); err != nil { - return errors.Wrapf( - err, "DBPutStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByStakeAmount: ", - ) + // Set StakeEntry in PrefixStakeByStakeAmount but only if the amount has changed. + if dbEntry == nil || dbEntry.StakeAmountNanos.Cmp(stakeEntry.StakeAmountNanos) != 0 { + // Delete the existing entry in the db index if one exists + if dbEntry != nil { + dbStakeByStakeAmountKey := DBKeyForStakeByStakeAmount(dbEntry) + // Note we set isDeleted=false as a hint to the state syncer that we're about to + // update this value immediately after. + if err := DBDeleteWithTxn(txn, snap, dbStakeByStakeAmountKey, eventManager, false); err != nil { + return errors.Wrapf( + err, "DBDeleteStakeEntryWithTxn: problem deleting StakeEntry from index PrefixStakeByStakeAmount: ", + ) + } + } + + stakeByStakeAmountKey := DBKeyForStakeByStakeAmount(stakeEntry) + if err := DBSetWithTxn(txn, snap, stakeByStakeAmountKey, nil, eventManager); err != nil { + return errors.Wrapf( + err, "DBUpdateStakeEntryWithTxn: problem storing StakeEntry in index PrefixStakeByStakeAmount: ", + ) + } } return nil @@ -2719,7 +2756,8 @@ func (bav *UtxoView) _deleteLockedStakeEntryMappings(lockedStakeEntry *LockedSta } func (bav *UtxoView) _flushStakeEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - // Delete all entries in the UtxoView map. + // Iterate through all the entries in the view. Delete the entries that have isDeleted=true + // and update the entries that don't. for mapKeyIter, entryIter := range bav.StakeMapKeyToStakeEntry { // Make a copy of the iterators since we make references to them below. mapKey := mapKeyIter @@ -2737,21 +2775,12 @@ func (bav *UtxoView) _flushStakeEntriesToDbWithTxn(txn *badger.Txn, blockHeight // Delete the existing mappings in the db for this MapKey. They will be // re-added if the corresponding entry in-memory has isDeleted=false. - if err := DBDeleteStakeEntryWithTxn(txn, bav.Snapshot, entry.ValidatorPKID, entry.StakerPKID, blockHeight, bav.EventManager, entry.isDeleted); err != nil { - return errors.Wrapf(err, "_flushStakeEntriesToDbWithTxn: ") - } - } - - // Set any !isDeleted entries in the UtxoView map. - for _, entryIter := range bav.StakeMapKeyToStakeEntry { - entry := *entryIter if entry.isDeleted { - // If isDeleted then there's nothing to do because - // we already deleted the entry above. + if err := DBDeleteStakeEntryWithTxn(txn, bav.Snapshot, entry.ValidatorPKID, entry.StakerPKID, blockHeight, bav.EventManager, entry.isDeleted); err != nil { + return errors.Wrapf(err, "_flushStakeEntriesToDbWithTxn: ") + } } else { - // If !isDeleted then we put the corresponding - // mappings for it into the db. - if err := DBPutStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight, bav.EventManager); err != nil { + if err := DBUpdateStakeEntryWithTxn(txn, bav.Snapshot, &entry, blockHeight, bav.EventManager); err != nil { return errors.Wrapf(err, "_flushStakeEntriesToDbWithTxn: ") } } diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index a2f717399..cd88f8656 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -4,13 +4,12 @@ import ( "bytes" "crypto/sha256" "fmt" + "github.com/deso-protocol/core/consensus" "io" "math" "net/url" "sort" - "github.com/deso-protocol/core/consensus" - "github.com/deso-protocol/core/bls" "github.com/dgraph-io/badger/v4" "github.com/golang/glog" @@ -773,7 +772,15 @@ func DBGetTopActiveValidatorsByStakeAmount( return validatorEntries, nil } -func DBPutValidatorWithTxn( +// In order to optimize the flush, we want to only write entries to the db that have changed. +// On top of that, we add a further optimization to only update the +// PrefixValidatorByStatusAndStakeAmount index if the stake amount or status has changed in the +// validator. Not doing this results in a lot of writes to badger +// every epoch that eventually slow block processing to a crawl. This is essentially a bug in +// badger when you repeatedly write to the same key, and we're papering over it here in response +// to encountering the issue. In an ideal world, badger would work as intended and this extra +// optimization wouldn't be necessary. +func DBUpdateValidatorWithTxn( txn *badger.Txn, snap *Snapshot, validatorEntry *ValidatorEntry, @@ -786,7 +793,28 @@ func DBPutValidatorWithTxn( return nil } - // Set ValidatorEntry in PrefixValidatorByPKID. + // Look up the existing ValidatorEntry from the db + dbEntry, err := DBGetValidatorByPKIDWithTxn(txn, snap, validatorEntry.ValidatorPKID) + if err != nil { + return errors.Wrapf(err, "DBUpdateValidatorWithTxn: ") + } + dbEntryBytes := EncodeToBytes(blockHeight, dbEntry) + + entryToWriteBytes := EncodeToBytes(blockHeight, validatorEntry) + // If the entry we're about to write is the exact same as what's already in the db then + // don't write it. + // + // In 99%+ of cases, the entries will be identical so we save a lot from + // this optimization, and it significantly speeds up block processing ot have it. When they + // differ, typically it's only because of LastActiveAtEpochNumber. For this reason, we have + // a secondary optimization to only update the PrefixValidatorByStatusAndStakeAmount index + // when absolutely necessary. + if bytes.Equal(dbEntryBytes, entryToWriteBytes) { + return nil + } + + // Set ValidatorEntry in PrefixValidatorByPKID. This should gracefully overwrite an existing entry + // if one exists. key := DBKeyForValidatorByPKID(validatorEntry) if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry), eventManager); err != nil { return errors.Wrapf( @@ -794,13 +822,33 @@ func DBPutValidatorWithTxn( ) } - // Set ValidatorEntry key in PrefixValidatorByStatusAndStakeAmount. The value should be nil. - // We parse the ValidatorPKID from the key for this index. - key = DBKeyForValidatorByStatusAndStakeAmount(validatorEntry) - if err := DBSetWithTxn(txn, snap, key, nil, eventManager); err != nil { - return errors.Wrapf( - err, "DBPutValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStatusAndStakeAmount", - ) + // If the entry we're about to write has the exact same stake amount and the exact same status, + // then there is no need to update PrefixValidatorByStatusAndStakeAmount. This saves us a lot in terms + // of block processing time due to the aforementioned badger bug. + if dbEntry == nil || validatorEntry.TotalStakeAmountNanos.Cmp(dbEntry.TotalStakeAmountNanos) != 0 || + validatorEntry.Status() != dbEntry.Status() { + + // Here we need to delete the existing value in the index first + if dbEntry != nil { + key = DBKeyForValidatorByStatusAndStakeAmount(dbEntry) + // Note we set isDeleted=false as a hint to the state syncer that we're about to + // update this value immediately after. + if err := DBDeleteWithTxn(txn, snap, key, eventManager, false); err != nil { + return errors.Wrapf( + err, "DBUpdateValidatorWithTxn: problem deleting ValidatorEntry from index "+ + "PrefixValidatorByStatusAndStakeAmount", + ) + } + } + + // Set ValidatorEntry key in PrefixValidatorByStatusAndStakeAmount. The value should be nil. + // We parse the ValidatorPKID from the key for this index. + key = DBKeyForValidatorByStatusAndStakeAmount(validatorEntry) + if err := DBSetWithTxn(txn, snap, key, nil, eventManager); err != nil { + return errors.Wrapf( + err, "DBUpdateValidatorWithTxn: problem storing ValidatorEntry in index PrefixValidatorByStatusAndStakeAmount", + ) + } } return nil @@ -2231,7 +2279,8 @@ func (bav *UtxoView) _deleteValidatorEntryMappings(validatorEntry *ValidatorEntr } func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - // Delete all entries in the ValidatorMapKeyToValidatorEntry UtxoView map. + // Iterate through all the entries and either delete or update them depending on their + // isDeleted status. for validatorMapKeyIter, validatorEntryIter := range bav.ValidatorPKIDToValidatorEntry { // Make a copy of the iterators since we make references to them below. validatorMapKey := validatorMapKeyIter @@ -2247,28 +2296,17 @@ func (bav *UtxoView) _flushValidatorEntriesToDbWithTxn(txn *badger.Txn, blockHei ) } - // Delete the existing mappings in the db for this ValidatorMapKey. They - // will be re-added if the corresponding entry in memory has isDeleted=false. - if err := DBDeleteValidatorWithTxn(txn, bav.Snapshot, &validatorMapKey, bav.EventManager, validatorEntry.isDeleted); err != nil { - return errors.Wrapf(err, "_flushValidatorEntriesToDbWithTxn: ") - } - } - - // Set any !isDeleted ValidatorEntries in the ValidatorMapKeyToValidatorEntry UtxoView map. - for _, validatorEntryIter := range bav.ValidatorPKIDToValidatorEntry { - validatorEntry := *validatorEntryIter + // Delete entries if they have isDeleted=true if validatorEntry.isDeleted { - // If ValidatorEntry.isDeleted then there's nothing to - // do because we already deleted the entry above. + if err := DBDeleteValidatorWithTxn(txn, bav.Snapshot, &validatorMapKey, bav.EventManager, validatorEntry.isDeleted); err != nil { + return errors.Wrapf(err, "_flushValidatorEntriesToDbWithTxn: ") + } } else { - // If !ValidatorEntry.isDeleted then we put the - // corresponding mappings for it into the db. - if err := DBPutValidatorWithTxn(txn, bav.Snapshot, &validatorEntry, blockHeight, bav.EventManager); err != nil { + if err := DBUpdateValidatorWithTxn(txn, bav.Snapshot, &validatorEntry, blockHeight, bav.EventManager); err != nil { return errors.Wrapf(err, "_flushValidatorEntriesToDbWithTxn: ") } } } - return nil } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 195a238e6..f1e78854e 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -419,25 +419,6 @@ func (bav *UtxoView) _deleteSnapshotValidatorSetEntry(validatorEntry *ValidatorE func (bav *UtxoView) _flushSnapshotValidatorSetToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { // Delete all SnapshotValidatorSet entries from the db that are in the UtxoView. - for mapKey, validatorEntry := range bav.SnapshotValidatorSet { - if validatorEntry == nil { - return fmt.Errorf( - "_flushSnapshotValidatorSetToDbWithTxn: found nil entry for EpochNumber %d, this should never happen", - mapKey.SnapshotAtEpochNumber, - ) - } - if err := DBDeleteSnapshotValidatorSetEntryWithTxn( - txn, bav.Snapshot, &mapKey.ValidatorPKID, mapKey.SnapshotAtEpochNumber, bav.EventManager, validatorEntry.isDeleted, - ); err != nil { - return errors.Wrapf( - err, - "_flushSnapshotValidatorSetToDbWithTxn: problem deleting ValidatorEntry for EpochNumber %d: ", - mapKey.SnapshotAtEpochNumber, - ) - } - } - - // Put all !isDeleted SnapshotValidatorSet entry into the db from the UtxoView. for mapKey, validatorEntry := range bav.SnapshotValidatorSet { if validatorEntry == nil { return fmt.Errorf( @@ -446,17 +427,25 @@ func (bav *UtxoView) _flushSnapshotValidatorSetToDbWithTxn(txn *badger.Txn, bloc ) } if validatorEntry.isDeleted { - // Skip any deleted SnapshotValidatorSet. - continue - } - if err := DBPutSnapshotValidatorSetEntryWithTxn( - txn, bav.Snapshot, validatorEntry, mapKey.SnapshotAtEpochNumber, blockHeight, bav.EventManager, - ); err != nil { - return errors.Wrapf( - err, - "_flushSnapshotValidatorSetToDbWithTxn: problem setting ValidatorEntry for EpochNumber %d: ", - mapKey.SnapshotAtEpochNumber, - ) + if err := DBDeleteSnapshotValidatorSetEntryWithTxn( + txn, bav.Snapshot, &mapKey.ValidatorPKID, mapKey.SnapshotAtEpochNumber, bav.EventManager, validatorEntry.isDeleted, + ); err != nil { + return errors.Wrapf( + err, + "_flushSnapshotValidatorSetToDbWithTxn: problem deleting ValidatorEntry for EpochNumber %d: ", + mapKey.SnapshotAtEpochNumber, + ) + } + } else { + if err := DBUpdateSnapshotValidatorSetEntryWithTxn( + txn, bav.Snapshot, validatorEntry, mapKey.SnapshotAtEpochNumber, blockHeight, bav.EventManager, + ); err != nil { + return errors.Wrapf( + err, + "_flushSnapshotValidatorSetToDbWithTxn: problem setting ValidatorEntry for EpochNumber %d: ", + mapKey.SnapshotAtEpochNumber, + ) + } } } return nil @@ -559,7 +548,14 @@ func DBGetSnapshotValidatorSetByStakeAmount( return validatorEntries, nil } -func DBPutSnapshotValidatorSetEntryWithTxn( +// In order to optimize the flush, we want to only write entries to the db that have changed. +// On top of that, we add a further optimization to only update the +// PrefixSnapshotValidatorSetByStakeAmount index if the stake amount has changed. Not doing this results +// in a lot of writes to badger every epoch that eventually slow block processing to a crawl. +// This is essentially a bug in badger when you repeatedly write to the same key, and we're +// papering over it here in response to encountering the issue. In an ideal world, badger +// would work as intended and this extra optimization wouldn't be necessary. +func DBUpdateSnapshotValidatorSetEntryWithTxn( txn *badger.Txn, snap *Snapshot, validatorEntry *ValidatorEntry, @@ -569,28 +565,60 @@ func DBPutSnapshotValidatorSetEntryWithTxn( ) error { if validatorEntry == nil { // This should never happen but is a sanity check. - glog.Errorf("DBPutSnapshotValidatorSetEntryWithTxn: called with nil ValidatorEntry, this should never happen") + glog.Errorf("DBUpdateSnapshotValidatorSetEntryWithTxn: called with nil ValidatorEntry, this should never happen") return nil } - // Put the ValidatorEntry in the SnapshotSetByPKID index. - key := DBKeyForSnapshotValidatorSetByPKID(validatorEntry, snapshotAtEpochNumber) - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry), eventManager); err != nil { + // Look up the existing entry + dbEntry, err := DBGetSnapshotValidatorSetEntryByPKIDWithTxn( + txn, snap, validatorEntry.ValidatorPKID, snapshotAtEpochNumber) + if err != nil { return errors.Wrapf( - err, - "DBPutSnapshotValidatorSetEntryWithTxn: problem putting ValidatorEntry in the SnapshotValidatorByPKID index: ", + err, "DBUpdateSnapshotValidatorSetEntryWithTxn: problem retrieving ValidatorEntry for PKID %v: ", + validatorEntry.ValidatorPKID, ) } + dbEntryBytes := EncodeToBytes(blockHeight, dbEntry) - // Put the ValidatorPKID in the SnapshotValidatorByStatusAndStakeAmount index. - key = DBKeyForSnapshotValidatorSetByStakeAmount(validatorEntry, snapshotAtEpochNumber) - if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID), eventManager); err != nil { + entryToWriteBytes := EncodeToBytes(blockHeight, validatorEntry) + // If the entry in the db is the same as the entry we want to write, then no need to do anything. + if bytes.Equal(dbEntryBytes, entryToWriteBytes) { + return nil + } + + // Put the ValidatorEntry in the SnapshotSetByPKID index. + key := DBKeyForSnapshotValidatorSetByPKID(validatorEntry, snapshotAtEpochNumber) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry), eventManager); err != nil { return errors.Wrapf( err, - "DBPutSnapshotValidatorSetEntryWithTxn: problem putting ValidatorPKID in the SnapshotValidatorByStake index: ", + "DBUpdateSnapshotValidatorSetEntryWithTxn: problem putting ValidatorEntry in the SnapshotValidatorByPKID index: ", ) } + // Only update the PrefixSnapshotValidatorSetByStakeAmount index if the stake amount has changed. + if dbEntry == nil || dbEntry.TotalStakeAmountNanos.Cmp(validatorEntry.TotalStakeAmountNanos) != 0 { + // Delete the existing index value if it exists. + if dbEntry != nil { + key = DBKeyForSnapshotValidatorSetByStakeAmount(dbEntry, snapshotAtEpochNumber) + // Note we set isDeleted=false as a hint to the state syncer that we're about to + // update this value immediately after. + if err := DBDeleteWithTxn(txn, snap, key, eventManager, false); err != nil { + return errors.Wrapf( + err, + "DBUpdateSnapshotValidatorSetEntryWithTxn: problem deleting ValidatorPKID from the SnapshotValidatorByStake index: ", + ) + } + } + + // Put the ValidatorPKID in the SnapshotValidatorByStatusAndStakeAmount index. + key = DBKeyForSnapshotValidatorSetByStakeAmount(validatorEntry, snapshotAtEpochNumber) + if err := DBSetWithTxn(txn, snap, key, EncodeToBytes(blockHeight, validatorEntry.ValidatorPKID), eventManager); err != nil { + return errors.Wrapf( + err, + "DBUpdateSnapshotValidatorSetEntryWithTxn: problem putting ValidatorPKID in the SnapshotValidatorByStake index: ", + ) + } + } return nil } diff --git a/lib/server.go b/lib/server.go index 2ecba1841..9fb1ac734 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2336,8 +2336,10 @@ func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { pp))) elapsed := time.Since(blockProcessingStartTime) + // Reset the blockProcessingStartTime so that each 1k blocks is timed individually + blockProcessingStartTime = time.Now() if ii != 0 { - fmt.Printf("We are processing %v blocks per second\n", float64(ii)/(float64(elapsed)/1e9)) + fmt.Printf("We are processing %v blocks per second\n", float64(1000)/(float64(elapsed)/1e9)) } } } From 414142dfc8f5bed129ce021ab442afc25cec6dc1 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 2 Apr 2024 14:18:06 -0400 Subject: [PATCH 588/762] Consolidate validateTransactions routine and refresh augmented view routine (#1166) --- cmd/config.go | 2 -- cmd/node.go | 1 - cmd/run.go | 3 -- integration_testing/tools.go | 1 - lib/pos_block_producer_test.go | 8 ++--- lib/pos_blockchain_test.go | 2 +- lib/pos_fee_estimator_test.go | 2 +- lib/pos_mempool.go | 65 +++++----------------------------- lib/pos_mempool_test.go | 18 +++++----- lib/server.go | 2 -- 10 files changed, 23 insertions(+), 81 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 9f63e83cd..74ad8ddd6 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -53,7 +53,6 @@ type Config struct { MempoolFeeEstimatorNumPastBlocks uint64 MempoolMaxValidationViewConnects uint64 TransactionValidationRefreshIntervalMillis uint64 - AugmentedBlockViewRefreshIntervalMillis uint64 // Mining MinerPublicKeys []string @@ -131,7 +130,6 @@ func LoadConfig() *Config { config.MempoolFeeEstimatorNumPastBlocks = viper.GetUint64("mempool-fee-estimator-num-past-blocks") config.MempoolMaxValidationViewConnects = viper.GetUint64("mempool-max-validation-view-connects") config.TransactionValidationRefreshIntervalMillis = viper.GetUint64("transaction-validation-refresh-interval-millis") - config.AugmentedBlockViewRefreshIntervalMillis = viper.GetUint64("augmented-block-view-refresh-interval-millis") // Peers config.ConnectIPs = viper.GetStringSlice("connect-ips") diff --git a/cmd/node.go b/cmd/node.go index 76b4a39b8..e9d7f0fd8 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -296,7 +296,6 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.MempoolFeeEstimatorNumPastBlocks, node.Config.MempoolMaxValidationViewConnects, node.Config.TransactionValidationRefreshIntervalMillis, - node.Config.AugmentedBlockViewRefreshIntervalMillis, node.Config.StateSyncerMempoolTxnSyncLimit, ) if err != nil { diff --git a/cmd/run.go b/cmd/run.go index b56b96030..7b739d42b 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -107,9 +107,6 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().Uint64("transaction-validation-refresh-interval-millis", 10, "The frequency in milliseconds with which the transaction validation routine is run in mempool. "+ "The default value is 10 milliseconds.") - cmd.PersistentFlags().Uint64("augmented-block-view-refresh-interval-millis", 10, - "The frequency in milliseconds with which the augmented block view will be refreshed. "+ - "The default value is 10 milliseconds.") // Peers cmd.PersistentFlags().StringSlice("connect-ips", []string{}, diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 369a3eb09..763d3dbe7 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -99,7 +99,6 @@ func _generateConfig(t *testing.T, config *cmd.Config, port uint32, dataDir stri config.MempoolFeeEstimatorNumPastBlocks = 50 config.MempoolMaxValidationViewConnects = 10000 config.TransactionValidationRefreshIntervalMillis = 10 - config.AugmentedBlockViewRefreshIntervalMillis = 10 //config.ArchivalMode = true diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index a926fab1d..a2bf48bd2 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -33,7 +33,7 @@ func TestCreateBlockTemplate(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, 100, + nil, 1, 10000, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -99,7 +99,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, 100, + nil, 1, 10000, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -174,7 +174,7 @@ func TestGetBlockTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, 100, + nil, 1, 10000, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -258,7 +258,7 @@ func TestGetBlockTransactions(t *testing.T) { testMempool := NewPosMempool() testMempool.Init( params, globalParams, latestBlockView, 2, "", true, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, 100, + nil, 1, 10000, 100, ) require.NoError(testMempool.Start()) defer testMempool.Stop() diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 8ca808a83..fc799a47e 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2765,7 +2765,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { mempool := NewPosMempool() require.NoError(t, mempool.Init( params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100, 100, + mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100, )) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index fdb3ffe4a..080843ddc 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -26,7 +26,7 @@ func TestFeeEstimator(t *testing.T) { mempool := NewPosMempool() err = mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, 100, + nil, 1, 10000, 100, ) require.NoError(t, err) require.NoError(t, mempool.Start()) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 6812f96fa..c4ee0ffae 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -166,9 +166,6 @@ type PosMempool struct { // transactionValidationRoutineRefreshIntervalMillis is the frequency with which the transactionValidationRoutine is run. transactionValidationRefreshIntervalMillis uint64 - // augmentedBlockViewRefreshIntervalMillis is the frequency with which the augmentedLatestBlockView is updated. - augmentedBlockViewRefreshIntervalMillis uint64 - // augmentedLatestBlockViewSequenceNumber is the sequence number of the readOnlyLatestBlockView. It is incremented // every time augmentedLatestBlockView is updated. It can be used by obtainers of the augmentedLatestBlockView to // wait until a particular transaction has been connected. @@ -224,7 +221,6 @@ func (mp *PosMempool) Init( feeEstimatorNumPastBlocks uint64, maxValidationViewConnects uint64, transactionValidationRefreshIntervalMillis uint64, - augmentedBlockViewRefreshIntervalMillis uint64, ) error { if mp.status != PosMempoolStatusNotInitialized { return errors.New("PosMempool.Init: PosMempool already initialized") @@ -248,7 +244,6 @@ func (mp *PosMempool) Init( mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis mp.maxValidationViewConnects = maxValidationViewConnects mp.transactionValidationRefreshIntervalMillis = transactionValidationRefreshIntervalMillis - mp.augmentedBlockViewRefreshIntervalMillis = augmentedBlockViewRefreshIntervalMillis // TODO: parameterize num blocks. Also, how to pass in blocks. err = mp.feeEstimator.Init( @@ -299,7 +294,6 @@ func (mp *PosMempool) Start() error { mp.startGroup.Add(2) mp.exitGroup.Add(2) mp.startTransactionValidationRoutine() - mp.startAugmentedViewRefreshRoutine() mp.startGroup.Wait() mp.status = PosMempoolStatusRunning return nil @@ -325,55 +319,6 @@ func (mp *PosMempool) startTransactionValidationRoutine() { }() } -func (mp *PosMempool) startAugmentedViewRefreshRoutine() { - go func() { - mp.startGroup.Done() - for { - select { - case <-time.After(time.Duration(mp.augmentedBlockViewRefreshIntervalMillis) * time.Millisecond): - // If we're not within 10 blocks of the PoS cutover, we don't need to update the - // augmentedLatestBlockView. - if mp.latestBlockHeight+10 < uint64(mp.params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { - continue - } - // Update the augmentedLatestBlockView with the latest block view. - mp.RLock() - readOnlyViewPointer := mp.readOnlyLatestBlockView - mp.RUnlock() - newView, err := readOnlyViewPointer.CopyUtxoView() - if err != nil { - glog.Errorf("PosMempool.startAugmentedViewRefreshRoutine: Problem copying utxo view outer: %v", err) - continue - } - for _, txn := range mp.GetTransactions() { - copiedView, err := newView.CopyUtxoView() - if err != nil { - glog.Errorf("PosMempool.startAugmentedViewRefreshRoutine: Problem copying utxo view: %v", err) - continue - } - _, _, _, _, err = copiedView.ConnectTransaction( - txn.GetTxn(), txn.Hash(), uint32(mp.latestBlockHeight)+1, time.Now().UnixNano(), false, - false) - // If the transaction successfully connects, we set the newView to the copiedView - // and proceed to the next transaction. - if err == nil { - newView = copiedView - } - } - // Grab the augmentedLatestBlockViewMutex write lock and update the augmentedLatestBlockView. - mp.augmentedReadOnlyLatestBlockViewMutex.Lock() - mp.augmentedReadOnlyLatestBlockView = newView - mp.augmentedReadOnlyLatestBlockViewMutex.Unlock() - // Increment the augmentedLatestBlockViewSequenceNumber. - atomic.AddInt64(&mp.augmentedLatestBlockViewSequenceNumber, 1) - case <-mp.quit: - mp.exitGroup.Done() - return - } - } - }() -} - func (mp *PosMempool) Stop() { if !mp.IsRunning() { return @@ -806,6 +751,12 @@ func (mp *PosMempool) validateTransactions() error { } } mp.Unlock() + // We also update the augmentedLatestBlockView with the view after the transactions have been connected. + mp.augmentedReadOnlyLatestBlockViewMutex.Lock() + mp.augmentedReadOnlyLatestBlockView = copyValidationView + mp.augmentedReadOnlyLatestBlockViewMutex.Unlock() + // Increment the augmentedLatestBlockViewSequenceNumber. + atomic.AddInt64(&mp.augmentedLatestBlockViewSequenceNumber, 1) // Log the hashes for transactions that were removed. if len(txnsToRemove) > 0 { @@ -840,7 +791,6 @@ func (mp *PosMempool) refreshNoLock() error { mp.feeEstimator.numPastBlocks, mp.maxValidationViewConnects, mp.transactionValidationRefreshIntervalMillis, - mp.augmentedBlockViewRefreshIntervalMillis, ) if err != nil { return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem initializing temp pool") @@ -958,7 +908,7 @@ func (mp *PosMempool) BlockUntilReadOnlyViewRegenerated() { oldSeqNum := atomic.LoadInt64(&mp.augmentedLatestBlockViewSequenceNumber) newSeqNum := oldSeqNum // Check fairly often. Not too often. - checkIntervalMillis := mp.augmentedBlockViewRefreshIntervalMillis / 5 + checkIntervalMillis := mp.transactionValidationRefreshIntervalMillis / 5 if checkIntervalMillis == 0 { checkIntervalMillis = 1 } @@ -967,6 +917,7 @@ func (mp *PosMempool) BlockUntilReadOnlyViewRegenerated() { newSeqNum = atomic.LoadInt64(&mp.augmentedLatestBlockViewSequenceNumber) } } + func (mp *PosMempool) CheckSpend(op UtxoKey) *MsgDeSoTxn { panic("implement me") } diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 75673cbf0..6b0c9eb6d 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -24,7 +24,7 @@ func TestPosMempoolStart(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( ¶ms, globalParams, nil, 0, dir, false, maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis, 1, nil, 1, 1000, 100, 100, + mempoolBackupIntervalMillis, 1, nil, 1, 1000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -54,7 +54,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, 100, + nil, 1, 1000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -73,7 +73,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis, 1, nil, 1, 1000, 100, 100)) + mempoolBackupIntervalMillis, 1, nil, 1, 1000, 100)) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() @@ -109,7 +109,7 @@ func TestPosMempoolPrune(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, 100, + nil, 1, 1000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -146,7 +146,7 @@ func TestPosMempoolPrune(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, 100, + nil, 1, 1000, 100, )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) @@ -207,7 +207,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, 100, + nil, 1, 1000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -236,7 +236,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init( params, newGlobalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, 100, + nil, 1, 1000, 100, )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) @@ -269,7 +269,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, 100, + nil, 1, 1000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -348,7 +348,7 @@ func TestPosMempoolTransactionValidation(t *testing.T) { mempool := NewPosMempool() require.NoError(t, mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 5, 10, 10, + nil, 1, 5, 10, )) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) diff --git a/lib/server.go b/lib/server.go index 9fb1ac734..4a731d4a0 100644 --- a/lib/server.go +++ b/lib/server.go @@ -412,7 +412,6 @@ func NewServer( _mempoolFeeEstimatorNumPastBlocks uint64, _mempoolMaxValidationViewConnects uint64, _transactionValidationRefreshIntervalMillis uint64, - _augmentedBlockViewRefreshIntervalMillis uint64, _stateSyncerMempoolTxnSyncLimit uint64, ) ( _srv *Server, @@ -573,7 +572,6 @@ func NewServer( _mempoolFeeEstimatorNumPastBlocks, _mempoolMaxValidationViewConnects, _transactionValidationRefreshIntervalMillis, - _augmentedBlockViewRefreshIntervalMillis, ) if err != nil { return nil, errors.Wrapf(err, "NewServer: Problem initializing PoS mempool"), true From 4e69dc3fbaeb55b45ca8eda414726ff07ec23197 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 2 Apr 2024 14:19:41 -0400 Subject: [PATCH 589/762] Add recent blocks txn cache (#1167) --- lib/legacy_mempool.go | 8 +++++++ lib/pos_mempool.go | 51 +++++++++++++++++++++++++++++++++++++++++-- lib/server.go | 9 +++++++- 3 files changed, 65 insertions(+), 3 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index df30ec70f..7a5505a5a 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2644,6 +2644,14 @@ func (mp *DeSoMempool) BlockUntilReadOnlyViewRegenerated() { } } +// WaitForTxnValidation is a blocking call that waits for a transaction to be validated. +// The legacy mempool doesn't validate transactions, so this function always returns true +// after BlockUntilReadOnlyViewRegenerated is called. +func (mp *DeSoMempool) WaitForTxnValidation(_ *BlockHash) bool { + mp.BlockUntilReadOnlyViewRegenerated() + return true +} + func (mp *DeSoMempool) StartMempoolDBDumper() { // If we were instructed to dump txns to the db, then do so periodically // Note this acquired a very minimal lock on the universalTransactionList diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index c4ee0ffae..ec997e219 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -2,6 +2,7 @@ package lib import ( "fmt" + "github.com/decred/dcrd/lru" "path/filepath" "strings" "sync" @@ -36,6 +37,7 @@ type Mempool interface { GetAugmentedUniversalView() (*UtxoView, error) GetAugmentedUtxoViewForPublicKey(pk []byte, optionalTx *MsgDeSoTxn) (*UtxoView, error) BlockUntilReadOnlyViewRegenerated() + WaitForTxnValidation(txHash *BlockHash) bool CheckSpend(op UtxoKey) *MsgDeSoTxn GetOrderedTransactions() []*MempoolTx IsTransactionInPool(txHash *BlockHash) bool @@ -166,10 +168,15 @@ type PosMempool struct { // transactionValidationRoutineRefreshIntervalMillis is the frequency with which the transactionValidationRoutine is run. transactionValidationRefreshIntervalMillis uint64 - // augmentedLatestBlockViewSequenceNumber is the sequence number of the readOnlyLatestBlockView. It is incremented + // augmentedLatestBlockViewSequenceNumber is the sequence number of the augmentedLatestBlockView. It is incremented // every time augmentedLatestBlockView is updated. It can be used by obtainers of the augmentedLatestBlockView to // wait until a particular transaction has been connected. augmentedLatestBlockViewSequenceNumber int64 + + // recentBlockTxnCache is an LRU KV cache used to track the transaction that have been included in blocks. + // This cache is used to power logic that waits for a transaction to either be validated in the mempool + // or be included in a block. + recentBlockTxnCache lru.KVCache } // PosMempoolIterator is a wrapper around FeeTimeIterator, modified to return MsgDeSoTxn instead of MempoolTx. @@ -244,6 +251,7 @@ func (mp *PosMempool) Init( mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis mp.maxValidationViewConnects = maxValidationViewConnects mp.transactionValidationRefreshIntervalMillis = transactionValidationRefreshIntervalMillis + mp.recentBlockTxnCache = lru.NewKVCache(100000) // cache 100K latest txns from blocks. // TODO: parameterize num blocks. Also, how to pass in blocks. err = mp.feeEstimator.Init( @@ -379,6 +387,10 @@ func (mp *PosMempool) OnBlockConnected(block *MsgDeSoBlock) { continue } + // Add the transaction to the recentBlockTxnCache. + mp.addTxnHashToRecentBlockCache(*txnHash) + + // Remove the transaction from the mempool. mp.removeTransactionNoLock(existingTxn, true) } @@ -405,7 +417,7 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { return } - // Remove all transactions in the block from the mempool. + // Add all transactions from the block to the mempool. for _, txn := range block.Txns { txnHash := txn.Hash() @@ -422,6 +434,9 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { continue } + // Remove the transaction from the recentBlockTxnCache. + mp.deleteTxnHashFromRecentBlockCache(*txnHash) + // Add the transaction to the mempool and then prune if needed. if err := mp.addTransactionNoLock(mempoolTx, true); err != nil { glog.Errorf("PosMempool.AddTransaction: Problem adding transaction to mempool: %v", err) @@ -483,6 +498,17 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { return nil } +func (mp *PosMempool) addTxnHashToRecentBlockCache(txnHash BlockHash) { + mp.recentBlockTxnCache.Add(txnHash, nil) +} + +func (mp *PosMempool) deleteTxnHashFromRecentBlockCache(txnHash BlockHash) { + mp.recentBlockTxnCache.Delete(txnHash) +} +func (mp *PosMempool) isTxnHashInRecentBlockCache(txnHash BlockHash) bool { + return mp.recentBlockTxnCache.Contains(txnHash) +} + func (mp *PosMempool) checkTransactionSanity(txn *MsgDeSoTxn) error { mp.RLock() defer mp.RUnlock() @@ -918,6 +944,27 @@ func (mp *PosMempool) BlockUntilReadOnlyViewRegenerated() { } } +// WaitForTxnValidation blocks until the transaction with the given hash is either validated in the mempool, +// in a recent block, or no longer in the mempool. +func (mp *PosMempool) WaitForTxnValidation(txHash *BlockHash) bool { + // Check fairly often. Not too often. + checkIntervalMillis := mp.transactionValidationRefreshIntervalMillis / 5 + if checkIntervalMillis == 0 { + checkIntervalMillis = 1 + } + for { + mtxn := mp.GetTransaction(txHash) + if mtxn.IsValidated() { + return true + } + if mtxn == nil { + return mp.isTxnHashInRecentBlockCache(*txHash) + } + // Sleep for a bit and then check again. + time.Sleep(time.Duration(checkIntervalMillis) * time.Millisecond) + } +} + func (mp *PosMempool) CheckSpend(op UtxoKey) *MsgDeSoTxn { panic("implement me") } diff --git a/lib/server.go b/lib/server.go index 4a731d4a0..fccebabcc 100644 --- a/lib/server.go +++ b/lib/server.go @@ -259,6 +259,10 @@ func (srv *Server) GetMiner() *DeSoMiner { } func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MsgDeSoTxn, error) { + txnHash := txn.Hash() + if txnHash == nil { + return nil, fmt.Errorf("BroadcastTransaction: Txn hash is nil") + } // Use the backendServer to add the transaction to the mempool and // relay it to peers. When a transaction is created by the user there // is no need to consider a rateLimit and also no need to verifySignatures @@ -270,7 +274,10 @@ func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MsgDeSoTxn, error) // At this point, we know the transaction has been run through the mempool. // Now wait for an update of the ReadOnlyUtxoView so we don't break anything. - srv.GetMempool().BlockUntilReadOnlyViewRegenerated() + isValidated := srv.GetMempool().WaitForTxnValidation(txnHash) + if !isValidated { + return nil, fmt.Errorf("BroadcastTransaction: Transaction %v was not validated", txnHash) + } return mempoolTxs, nil } From 1045d87e9e59fc414e4f0aa64fe2bc7d17f37607 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 2 Apr 2024 14:53:26 -0400 Subject: [PATCH 590/762] Fix wait start group for pos mempool start (#1168) --- lib/pos_mempool.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index ec997e219..5691fd567 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -299,8 +299,8 @@ func (mp *PosMempool) Start() error { return errors.Wrapf(err, "PosMempool.Start: Problem loading persisted transactions") } } - mp.startGroup.Add(2) - mp.exitGroup.Add(2) + mp.startGroup.Add(1) + mp.exitGroup.Add(1) mp.startTransactionValidationRoutine() mp.startGroup.Wait() mp.status = PosMempoolStatusRunning From 90a4991ac1791a8626a7e8ea404a78fcccc899da Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 2 Apr 2024 15:23:02 -0400 Subject: [PATCH 591/762] Move mp write lock to the top of AddTransaction (#1169) --- lib/pos_mempool.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 5691fd567..06192cb33 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -464,6 +464,10 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { return fmt.Errorf("PosMempool.AddTransaction: Cannot add a nil transaction") } + // Acquire the mempool lock for all operations related to adding the transaction + mp.Lock() + defer mp.Unlock() + // First, validate that the transaction is properly formatted according to BalanceModel. We acquire a read lock on // the mempool. This allows multiple goroutines to safely perform transaction validation concurrently. In particular, // transaction signature verification can be parallelized. @@ -472,10 +476,6 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { } // If we get this far, it means that the transaction is valid. We can now add it to the mempool. - // We lock the mempool to ensure that no other thread is modifying it while we add the transaction. - mp.Lock() - defer mp.Unlock() - if !mp.IsRunning() { return errors.Wrapf(MempoolErrorNotRunning, "PosMempool.AddTransaction: ") } @@ -510,9 +510,6 @@ func (mp *PosMempool) isTxnHashInRecentBlockCache(txnHash BlockHash) bool { } func (mp *PosMempool) checkTransactionSanity(txn *MsgDeSoTxn) error { - mp.RLock() - defer mp.RUnlock() - if err := CheckTransactionSanity(txn, uint32(mp.latestBlockHeight), mp.params); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction sanity") } From 57699a1872e83caddba995a894faf0a109e49546 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:21:58 -0400 Subject: [PATCH 592/762] Create Helper Function To Compute the Number of Timeouts Before Epoch Transition (#1171) --- lib/pos_blockchain.go | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 66c39fdaf..daa31290d 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1259,6 +1259,46 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc return true, nil } +func (bc *Blockchain) getNumTimeoutsBeforeEpochTransition(block *MsgDeSoBlock, epochEntry *EpochEntry) (uint64, error) { + if !epochEntry.ContainsBlockHeight(block.Header.Height) { + return 0, errors.New("getNumTimeoutsBeforeEpochTransition: Block height not in epoch") + } + + // Fetch the previous epoch's final block height. + prevEpochFinalBlockHeight := epochEntry.InitialBlockHeight - 1 + + // Fetch the previous epoch's final block that is an ancestor of the given block. This operation is O(n) + // where n is the number of blocks between the given block and the previous epoch's final block. The worst + // case is O(3600) since we only need to go back 3600 blocks to find the previous epoch's final block. + prevEpochFinalBlockHeader := block.Header + for prevEpochFinalBlockHeader.Height > prevEpochFinalBlockHeight { + blockNode, exists := bc.blockIndexByHash[*prevEpochFinalBlockHeader.PrevBlockHash] + if !exists { + return 0, errors.New("getNumTimeoutsBeforeEpochTransition: Missing ancestor block") + } + prevEpochFinalBlockHeader = blockNode.Header + } + + // Fetch the previous epoch's 2nd to last block that is an ancestor of the given block. + prevEpochSecondToLastBlockNode, ok := bc.blockIndexByHash[*prevEpochFinalBlockHeader.PrevBlockHash] + if !ok { + return 0, errors.New("getNumTimeoutsBeforeEpochTransition: Missing ancestor block") + } + + // Ensure that the previous epoch's final two blocks have increasing views + if prevEpochFinalBlockHeader.GetView() <= prevEpochSecondToLastBlockNode.Header.GetView() { + return 0, errors.New("getNumTimeoutsBeforeEpochTransition: Final block view not greater than 2nd to last block view") + } + + // Ensure that the previous epoch's final two blocks have sequential heights + if prevEpochSecondToLastBlockNode.Header.Height != prevEpochFinalBlockHeader.Height-1 { + return 0, errors.New("getNumTimeoutsBeforeEpochTransition: Final block height not sequential with 2nd to last block height") + } + + // Compute the number of timeouts at the end of the previous epoch + return (prevEpochFinalBlockHeader.GetView() - prevEpochSecondToLastBlockNode.Header.GetView() - 1) / 2, nil +} + // isValidPoSQuorumCertificate validates that the QC of this block is valid, meaning a super majority // of the validator set has voted (or timed out). It special cases the first block after the PoS cutover // by overriding the validator set used to validate the high QC in the first block after the PoS cutover. From f3df5dab7f32153288c972b13878b0a21b9a2e31 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:25:34 -0400 Subject: [PATCH 593/762] Apply Prev Epoch Timeouts To Leader Index Computation (#1172) Apply Prev Epoch Timeouts To Leader Index Computation Adjust processOrphanBlockPoS --- lib/pos_blockchain.go | 223 +++++++++++++++++-------------------- lib/pos_blockchain_test.go | 25 +++-- 2 files changed, 117 insertions(+), 131 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index daa31290d..1750efd6e 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -407,26 +407,26 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // We can't validate the QC without a UtxoView. Return an error. return errors.Wrap(err, "processOrphanBlockPoS: Problem initializing UtxoView") } - currentEpochEntry, err := utxoView.GetCurrentEpochEntry() + + epochEntry, err := utxoView.GetCurrentEpochEntry() if err != nil { // We can't validate the QC without getting the current epoch entry. return errors.Wrap(err, "processOrphanBlockPoS: Problem getting current epoch entry") } - var validatorsByStake []*ValidatorEntry + // If the block is in a previous or future epoch, we need to compute the // proper validator set for the block. We do this by computing the prev/next // epoch entry and then fetching the validator set at the snapshot of the // epoch number of the prev/next epoch entry. - if !currentEpochEntry.ContainsBlockHeight(block.Header.Height) { + if !epochEntry.ContainsBlockHeight(block.Header.Height) { // Get the epoch entry based on the block height. The logic is the same // regardless of whether the block is in a previous or future epoch. // Note that the InitialView cannot be properly computed. - var epochEntry *EpochEntry - usePrevEpoch := block.Header.Height < currentEpochEntry.InitialBlockHeight + usePrevEpoch := block.Header.Height < epochEntry.InitialBlockHeight // If it's in a previous epoch, we compute the prev epoch entry. if usePrevEpoch { - epochEntry, err = utxoView.simulatePrevEpochEntry(currentEpochEntry.EpochNumber, - currentEpochEntry.InitialBlockHeight) + epochEntry, err = utxoView.simulatePrevEpochEntry(epochEntry.EpochNumber, + epochEntry.InitialBlockHeight) if err != nil { return errors.Wrap(err, "processOrphanBlockPoS: Problem computing prev epoch entry") } @@ -435,8 +435,8 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // the next epoch entry and check if it is in that epoch. If it's in a future epoch, we just throw it away. // We supply 0 for the view and 0 for the block timestamp as we don't know what those values should be, and // we will ignore these values. - epochEntry, err = utxoView.computeNextEpochEntry(currentEpochEntry.EpochNumber, - currentEpochEntry.FinalBlockHeight, 0, 0) + epochEntry, err = utxoView.computeNextEpochEntry(epochEntry.EpochNumber, + epochEntry.FinalBlockHeight, 0, 0) if err != nil { return errors.Wrap(err, "processOrphanBlockPoS: Problem computing next epoch entry") } @@ -451,99 +451,64 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { return fmt.Errorf("processOrphanBlockPoS: Block height %d is too far in the %v", block.Header.Height, errSuffix) } - var epochEntrySnapshotAtEpochNumber uint64 - epochEntrySnapshotAtEpochNumber, err = utxoView.ComputeSnapshotEpochNumberForEpoch(epochEntry.EpochNumber) - if err != nil { - return errors.Wrapf(err, - "processOrphanBlockPoS: Problem getting snapshot at epoch number for poch entry at epoch #%d", - epochEntry.EpochNumber) - } - // Okay now that we've gotten the SnapshotAtEpochNumber for the prev/next epoch, we can make sure that the - // proposer of the block is within the set of potential block proposers for the prev/next epoch based on - // the VotingPublicKey. - // First, we get the snapshot validator entry based on the BLS public key in the header. - snapshotBLSPublicKeyPKIDEntry, err := utxoView.GetSnapshotValidatorBLSPublicKeyPKIDPairEntry( - block.Header.ProposerVotingPublicKey, epochEntrySnapshotAtEpochNumber) - if err != nil { - return errors.Wrapf(err, - "processOrphanBlockPoS: Problem getting snapshot validator entry for block proposer %v", - block.Header.ProposerVotingPublicKey) - } - // If no snapshot BLSPublicKeyPKIDEntry exists, we'll never accept this block as - // its block proposer is not in the validator set as we did not snapshot its BLS Public key. - // This is a spam prevention measure, so we just throw away the block. - if snapshotBLSPublicKeyPKIDEntry == nil { - return nil - } - // Fetch the snapshot leader PKIDs - snapshotLeaderPKIDs, err := utxoView.GetSnapshotLeaderScheduleAtEpochNumber(epochEntrySnapshotAtEpochNumber) - if err != nil { - return errors.Wrapf(err, - "processOrphanBlockPoS: Problem getting snapshot leader schedule at snapshot at epoch number %d", - epochEntrySnapshotAtEpochNumber) - } - // Get the PKID for the block proposer from the snapshot validator entry. - blockProposerPKID := snapshotBLSPublicKeyPKIDEntry.PKID - // TODO: Replace w/ collections.Any for simplicity. There is an issue with this version - // of Go's compiler that is preventing us from using collections.Any here. - // We can now check if the block proposer is in the set of snapshot leader PKIDs. - blockProposerSeen := false - for _, snapshotLeaderPKID := range snapshotLeaderPKIDs { - if snapshotLeaderPKID.Eq(blockProposerPKID) { - blockProposerSeen = true - break - } - } - if !blockProposerSeen { - // We'll never accept this block as its block proposer is not in the set of - // potential leaders. As a spam-prevention measure, we simply return nil and throw it away. - return nil - } - validatorsByStake, err = utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber( + } + + var epochEntrySnapshotAtEpochNumber uint64 + epochEntrySnapshotAtEpochNumber, err = utxoView.ComputeSnapshotEpochNumberForEpoch(epochEntry.EpochNumber) + if err != nil { + return errors.Wrapf(err, + "processOrphanBlockPoS: Problem getting snapshot at epoch number for poch entry at epoch #%d", + epochEntry.EpochNumber) + } + // Okay now that we've gotten the SnapshotAtEpochNumber for the prev/next epoch, we can make sure that the + // proposer of the block is within the set of potential block proposers for the prev/next epoch based on + // the VotingPublicKey. + // First, we get the snapshot validator entry based on the BLS public key in the header. + snapshotBLSPublicKeyPKIDEntry, err := utxoView.GetSnapshotValidatorBLSPublicKeyPKIDPairEntry( + block.Header.ProposerVotingPublicKey, epochEntrySnapshotAtEpochNumber) + if err != nil { + return errors.Wrapf(err, + "processOrphanBlockPoS: Problem getting snapshot validator entry for block proposer %v", + block.Header.ProposerVotingPublicKey) + } + // If no snapshot BLSPublicKeyPKIDEntry exists, we'll never accept this block as + // its block proposer is not in the validator set as we did not snapshot its BLS Public key. + // This is a spam prevention measure, so we just throw away the block. + if snapshotBLSPublicKeyPKIDEntry == nil { + return nil + } + // Fetch the snapshot leader PKIDs + snapshotLeaderPKIDs, err := utxoView.GetSnapshotLeaderScheduleAtEpochNumber(epochEntrySnapshotAtEpochNumber) + if err != nil { + return errors.Wrapf(err, + "processOrphanBlockPoS: Problem getting snapshot leader schedule at snapshot at epoch number %d", epochEntrySnapshotAtEpochNumber) - if err != nil { - return errors.Wrapf(err, - "processOrphanBlockPoS: Problem getting validator set at snapshot at epoch number %d", - epochEntrySnapshotAtEpochNumber) - } - } else { - // This block is in the current epoch! - // First we validate the proposer vote partial signature - snapshotAtEpochNumber, err := utxoView.GetCurrentSnapshotEpochNumber() - if err != nil { - return errors.Wrap(err, "processOrphanBlockPoS: Problem getting current snapshot epoch number") - } - // Validate the proposer partial sig. - isValidPartialSig, err := utxoView.hasValidProposerPartialSignaturePoS(block, snapshotAtEpochNumber) - if err != nil { - return errors.Wrap(err, "processOrphanBlockPoS: Problem validating proposer partial sig") - } - if !isValidPartialSig { - // We'll never accept this block since it has an invalid leader signature. - // As a spam-prevention measure, we just throw away this block - // and don't store it. - return nil - } - // Next we validate that the leader is correct. We can only do this if the block - // is in the current epoch since we need the current epoch entry's initial view - // to compute the proper leader. - var isBlockProposerValid bool - isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(block) - if err != nil { - return errors.Wrapf(err, "processOrphanBlockPoS: Problem validating block proposer") - } - if !isBlockProposerValid { - // If the block proposer isn't valid, we'll never accept this block. As a spam-prevention - // measure, we just throw away this block and don't store it. - return nil - } - // If we get here, we know we have the correct block proposer. We now fetch the validators ordered by - // stake, so we can validate the QC. - validatorsByStake, err = utxoView.GetAllSnapshotValidatorSetEntriesByStake() - if err != nil { - return errors.Wrap(err, "processOrphanBlockPoS: Problem getting validator set") + } + // Get the PKID for the block proposer from the snapshot validator entry. + blockProposerPKID := snapshotBLSPublicKeyPKIDEntry.PKID + // TODO: Replace w/ collections.Any for simplicity. There is an issue with this version + // of Go's compiler that is preventing us from using collections.Any here. + // We can now check if the block proposer is in the set of snapshot leader PKIDs. + blockProposerSeen := false + for _, snapshotLeaderPKID := range snapshotLeaderPKIDs { + if snapshotLeaderPKID.Eq(blockProposerPKID) { + blockProposerSeen = true + break } } + if !blockProposerSeen { + // We'll never accept this block as its block proposer is not in the set of + // potential leaders. As a spam-prevention measure, we simply return nil and throw it away. + return nil + } + validatorsByStake, err := utxoView.GetAllSnapshotValidatorSetEntriesByStakeAtEpochNumber( + epochEntrySnapshotAtEpochNumber) + if err != nil { + return errors.Wrapf(err, + "processOrphanBlockPoS: Problem getting validator set at snapshot at epoch number %d", + epochEntrySnapshotAtEpochNumber) + } + // Okay now we have the validator set ordered by stake, we can validate the QC. if err = bc.isValidPoSQuorumCertificate(block, validatorsByStake); err != nil { // If we hit an error, we know that the QC is invalid, and we'll never accept this block, @@ -650,7 +615,7 @@ func (bc *Blockchain) validateLeaderAndQC( } } - isBlockProposerValid, err := parentUtxoView.hasValidBlockProposerPoS(block) + isBlockProposerValid, err := bc.hasValidBlockProposerPoS(block, parentUtxoView) if err != nil { return false, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem validating block proposer") @@ -1169,12 +1134,12 @@ func (bav *UtxoView) hasValidProposerPartialSignaturePoS(block *MsgDeSoBlock, sn // block height + view number pair. It returns a bool indicating whether // we confirmed that the leader is valid. If we receive an error, we are unsure // if the leader is invalid or not, so we return false. -func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBlockProposer bool, _err error) { - currentEpochEntry, err := bav.GetCurrentEpochEntry() +func (bc *Blockchain) hasValidBlockProposerPoS(block *MsgDeSoBlock, utxoView *UtxoView) (_isValidBlockProposer bool, _err error) { + currentEpochEntry, err := utxoView.GetCurrentEpochEntry() if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting current epoch entry") } - leaders, err := bav.GetCurrentSnapshotLeaderSchedule() + leaders, err := utxoView.GetCurrentSnapshotLeaderSchedule() if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting leader schedule") } @@ -1192,34 +1157,52 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc if viewDiff < heightDiff { return false, nil } + + // Fetch the number timeouts that took place at the final block height of the previous epoch. We need to + // compute this number because a timeout at the start of the current epoch would regress the chain to + // the previous epoch, which would count the timeout as part of the previous epoch. + numTimeoutsBeforeEpochTransition, err := bc.getNumTimeoutsBeforeEpochTransition(block, currentEpochEntry) + if err != nil { + return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting num timeouts before epoch transition") + } + // We compute the current index in the leader schedule as follows: - // [(block.View - currentEpoch.InitialView) - (block.Height - currentEpoch.InitialHeight)] % len(leaders) - // The number of views that have elapsed since the start of the epoch is block.View - currentEpoch.InitialView. - // The number of blocks that have been added to the chain since the start of the epoch is - // block.Height - currentEpoch.InitialHeight. The difference between these two numbers is the number of timeouts - // that have occurred in this epoch. For each timeout, we need to go to the next leader in the schedule. - // If we have more timeouts than leaders in the schedule, we start from the top of the schedule again, - // which is why we take the modulo of the length of the leader schedule. - // A quick example: If we have 3 leaders in the schedule and the epoch started at height 10 and view 11, - // and the current block is at height 15 and view 17, then the number of timeouts that have occurred is - // (17 - 11) - (15 - 10) = 1. This means this block should be proposed by the 2nd leader in the schedule, - // which is at index 1. - leaderIdxUint64 := (viewDiff - heightDiff) % uint64(len(leaders)) + // - [(block.View - currentEpoch.InitialView) - (block.Height - currentEpoch.InitialHeight) + numTimeoutsBeforeEpochTransition] % len(leaders) + // - The number of views that have elapsed since the start of the epoch is block.View - currentEpoch.InitialView. + // - The number of blocks that have been added to the chain since the start of the epoch is + // block.Height - currentEpoch.InitialHeight. + // - The difference between the above two numbers is the number of timeouts that have occurred in this epoch. + // - The numTimeoutsBeforeEpochTransition is the number of epochs that have occurred during the epoch transition + // and are counted as part of the previous epoch. + // + // For each timeout, we skip one leader in the in the schedule. If we have more timeouts than leaders in + // the schedule, we start from the top of the schedule again, which is why we take the modulo of the length + // of the leader schedule. + // + // A quick example: + // - Say we have 3 leaders in the schedule + // - The epoch started at height 10 and view 11 + // - The current block is at height 15 and view 17 + // - There were 6 timeouts at the epoch transition + // - Then the number of timeouts that have occurred is (17 - 11) - (15 - 10) + 6 = 7. + // - The leader index is 7 % 3 = 1. + // - This means this block should be proposed by the 2nd leader in the schedule, which is at index 1. + leaderIdxUint64 := (viewDiff + numTimeoutsBeforeEpochTransition - heightDiff) % uint64(len(leaders)) if leaderIdxUint64 > math.MaxUint16 { return false, nil } leaderIdx := uint16(leaderIdxUint64) - leaderEntry, err := bav.GetSnapshotLeaderScheduleValidator(leaderIdx) + leaderEntry, err := utxoView.GetSnapshotLeaderScheduleValidator(leaderIdx) if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting leader schedule validator") } - snapshotAtEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(currentEpochEntry.EpochNumber) + snapshotAtEpochNumber, err := utxoView.ComputeSnapshotEpochNumberForEpoch(currentEpochEntry.EpochNumber) if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting snapshot epoch number for epoch #%d", currentEpochEntry.EpochNumber) } - leaderEntryFromVotingPublicKey, err := bav.GetSnapshotValidatorEntryByBLSPublicKey( + leaderEntryFromVotingPublicKey, err := utxoView.GetSnapshotValidatorEntryByBLSPublicKey( block.Header.ProposerVotingPublicKey, snapshotAtEpochNumber) if err != nil { @@ -1245,9 +1228,9 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc currentEpochEntry.InitialBlockHeight, leaderIdx, len(leaders), - PkToString(leaderEntry.ValidatorPKID.ToBytes(), bav.Params), + PkToString(leaderEntry.ValidatorPKID.ToBytes(), bc.params), leaderEntry.VotingPublicKey.ToAbbreviatedString(), - PkToString(leaderEntryFromVotingPublicKey.ValidatorPKID.ToBytes(), bav.Params), + PkToString(leaderEntryFromVotingPublicKey.ValidatorPKID.ToBytes(), bc.params), leaderEntryFromVotingPublicKey.VotingPublicKey.ToAbbreviatedString(), block.Header.ProposerVotingPublicKey.ToAbbreviatedString(), ) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index fc799a47e..71bf22a3e 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -585,6 +585,9 @@ func TestHasValidBlockProposerPoS(t *testing.T) { return viewNumber } + // Get a pointer to the blockchain instance + bc := testMeta.chain + // Seed a CurrentEpochEntry. tmpUtxoView := _newUtxoView(testMeta) tmpUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 0, FinalBlockHeight: blockHeight + 1}) @@ -677,14 +680,14 @@ func TestHasValidBlockProposerPoS(t *testing.T) { ProposerVotingPublicKey: leader0Entry.VotingPublicKey, }, } - isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) require.NoError(t, err) require.True(t, isBlockProposerValid) // If we have a different proposer public key, we will have an error leader1Entry := validatorPKIDToValidatorEntryMap[*leaderSchedule[1]] dummyBlock.Header.ProposerVotingPublicKey = leader1Entry.VotingPublicKey.Copy() - isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) require.NoError(t, err) require.False(t, isBlockProposerValid) @@ -692,7 +695,7 @@ func TestHasValidBlockProposerPoS(t *testing.T) { // we move to leader 1. dummyBlock.Header.ProposedInView = viewNumber + 2 dummyBlock.Header.ProposerVotingPublicKey = leader1Entry.VotingPublicKey - isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) require.NoError(t, err) require.True(t, isBlockProposerValid) @@ -701,27 +704,27 @@ func TestHasValidBlockProposerPoS(t *testing.T) { dummyBlock.Header.ProposedInView = viewNumber + 5 leader4Entry := validatorPKIDToValidatorEntryMap[*leaderSchedule[4]] dummyBlock.Header.ProposerVotingPublicKey = leader4Entry.VotingPublicKey - isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) require.NoError(t, err) require.True(t, isBlockProposerValid) // If we have 7 timeouts, we know everybody timed out, so we go back to leader 0. dummyBlock.Header.ProposedInView = viewNumber + 8 dummyBlock.Header.ProposerVotingPublicKey = leader0Entry.VotingPublicKey - isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) require.NoError(t, err) require.True(t, isBlockProposerValid) // If the block view is less than the epoch's initial view, this is an error. dummyBlock.Header.ProposedInView = viewNumber - isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) require.NoError(t, err) require.False(t, isBlockProposerValid) // If the block height is less than epoch's initial block height, this is an error. dummyBlock.Header.ProposedInView = viewNumber + 1 dummyBlock.Header.Height = blockHeight - isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) require.NoError(t, err) require.False(t, isBlockProposerValid) @@ -730,7 +733,7 @@ func TestHasValidBlockProposerPoS(t *testing.T) { // This would imply that we've had more blocks than views, which is not possible. dummyBlock.Header.ProposedInView = viewNumber + 1 dummyBlock.Header.Height = blockHeight + 2 - isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) require.NoError(t, err) require.False(t, isBlockProposerValid) } @@ -2029,8 +2032,8 @@ func TestProcessOrphanBlockPoS(t *testing.T) { require.True(t, blockNode.IsValidateFailed()) require.False(t, blockNode.IsValidated()) } - // Generate a real block in this epoch and change the block proposer. This should fail the spam prevention check - // and the block will not be in the block index. + // Generate a real block in this epoch and make the block proposer any public key that is not in the + // leader schedule. This should fail the spam prevention check and the block will not be in the block index. { var realBlock *MsgDeSoBlock realBlock = _generateRealBlock(testMeta, 12, 12, 1273, testMeta.chain.BlockTip().Hash, false) @@ -2041,7 +2044,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { currentEpochEntry, err := utxoView.GetCurrentEpochEntry() require.NoError(t, err) require.True(t, currentEpochEntry.ContainsBlockHeight(12)) - // Change the block proposer to some any other validator's public key. + // Change the block proposer to any other validator's public key. realBlock.Header.ProposerVotingPublicKey = _generateRandomBLSPrivateKey(t).PublicKey() updateProposerVotePartialSignatureForBlock(testMeta, realBlock) // There should be no error, but the block should be marked as ValidateFailed. From 9697733f63fc5a4092580252e237cf1c430a24d1 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 3 Apr 2024 17:50:49 -0400 Subject: [PATCH 594/762] Address Nina's Naming And Nit Comments (#1173) --- lib/pos_blockchain.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 1750efd6e..a4842ce3e 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1134,12 +1134,12 @@ func (bav *UtxoView) hasValidProposerPartialSignaturePoS(block *MsgDeSoBlock, sn // block height + view number pair. It returns a bool indicating whether // we confirmed that the leader is valid. If we receive an error, we are unsure // if the leader is invalid or not, so we return false. -func (bc *Blockchain) hasValidBlockProposerPoS(block *MsgDeSoBlock, utxoView *UtxoView) (_isValidBlockProposer bool, _err error) { - currentEpochEntry, err := utxoView.GetCurrentEpochEntry() +func (bc *Blockchain) hasValidBlockProposerPoS(block *MsgDeSoBlock, parentUtxoView *UtxoView) (_isValidBlockProposer bool, _err error) { + currentEpochEntry, err := parentUtxoView.GetCurrentEpochEntry() if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting current epoch entry") } - leaders, err := utxoView.GetCurrentSnapshotLeaderSchedule() + leaders, err := parentUtxoView.GetCurrentSnapshotLeaderSchedule() if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting leader schedule") } @@ -1172,7 +1172,7 @@ func (bc *Blockchain) hasValidBlockProposerPoS(block *MsgDeSoBlock, utxoView *Ut // - The number of blocks that have been added to the chain since the start of the epoch is // block.Height - currentEpoch.InitialHeight. // - The difference between the above two numbers is the number of timeouts that have occurred in this epoch. - // - The numTimeoutsBeforeEpochTransition is the number of epochs that have occurred during the epoch transition + // - The numTimeoutsBeforeEpochTransition is the number of timeouts that have occurred during the epoch transition // and are counted as part of the previous epoch. // // For each timeout, we skip one leader in the in the schedule. If we have more timeouts than leaders in @@ -1192,17 +1192,17 @@ func (bc *Blockchain) hasValidBlockProposerPoS(block *MsgDeSoBlock, utxoView *Ut return false, nil } leaderIdx := uint16(leaderIdxUint64) - leaderEntry, err := utxoView.GetSnapshotLeaderScheduleValidator(leaderIdx) + leaderEntry, err := parentUtxoView.GetSnapshotLeaderScheduleValidator(leaderIdx) if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting leader schedule validator") } - snapshotAtEpochNumber, err := utxoView.ComputeSnapshotEpochNumberForEpoch(currentEpochEntry.EpochNumber) + snapshotAtEpochNumber, err := parentUtxoView.ComputeSnapshotEpochNumberForEpoch(currentEpochEntry.EpochNumber) if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting snapshot epoch number for epoch #%d", currentEpochEntry.EpochNumber) } - leaderEntryFromVotingPublicKey, err := utxoView.GetSnapshotValidatorEntryByBLSPublicKey( + leaderEntryFromVotingPublicKey, err := parentUtxoView.GetSnapshotValidatorEntryByBLSPublicKey( block.Header.ProposerVotingPublicKey, snapshotAtEpochNumber) if err != nil { From 51a8afac114b7382a35a8d02cf6adcfcecf15ba0 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 4 Apr 2024 10:33:15 -0400 Subject: [PATCH 595/762] Move deleting of expired nonces to end of epoch hook (#1170) --- lib/block_view.go | 3 ++- lib/db_utils.go | 11 ++++++----- lib/db_utils_test.go | 2 +- lib/pos_epoch_complete_hook.go | 10 ++++++++++ 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 964c78e65..343eae89d 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4514,7 +4514,8 @@ func (bav *UtxoView) ConnectBlock( // after connecting all transactions in the block. These operations // are always the last utxo operation in a given block. var blockLevelUtxoOps []*UtxoOperation - if blockHeight >= uint64(bav.Params.ForkHeights.BalanceModelBlockHeight) { + if blockHeight >= uint64(bav.Params.ForkHeights.BalanceModelBlockHeight) && + !bav.Params.IsPoSBlockHeight(blockHeight) { prevNonces := bav.GetTransactorNonceEntriesToDeleteAtBlockHeight(blockHeight) blockLevelUtxoOps = append(blockLevelUtxoOps, &UtxoOperation{ Type: OperationTypeDeleteExpiredNonces, diff --git a/lib/db_utils.go b/lib/db_utils.go index 1e1aed02f..49a002a95 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -11789,18 +11789,19 @@ func DbGetTransactorNonceEntriesToExpireAtBlockHeight(handle *badger.DB, blockHe } func DbGetTransactorNonceEntriesToExpireAtBlockHeightWithTxn(txn *badger.Txn, blockHeight uint64) []*TransactorNonceEntry { - startPrefix := _dbKeyForTransactorNonceEntry(&DeSoNonce{ExpirationBlockHeight: blockHeight, PartialID: math.MaxUint64}, &MaxPKID) endPrefix := append([]byte{}, Prefixes.PrefixNoncePKIDIndex...) opts := badger.DefaultIteratorOptions - opts.Reverse = true opts.Prefix = endPrefix opts.PrefetchValues = false nodeIterator := txn.NewIterator(opts) defer nodeIterator.Close() var transactorNonceEntries []*TransactorNonceEntry - for nodeIterator.Seek(startPrefix); nodeIterator.ValidForPrefix(endPrefix); nodeIterator.Next() { - transactorNonceEntries = append(transactorNonceEntries, - TransactorNonceKeyToTransactorNonceEntry(nodeIterator.Item().Key())) + for nodeIterator.Seek(endPrefix); nodeIterator.ValidForPrefix(endPrefix); nodeIterator.Next() { + transactorNonceEntry := TransactorNonceKeyToTransactorNonceEntry(nodeIterator.Item().Key()) + if transactorNonceEntry.Nonce.ExpirationBlockHeight > blockHeight { + break + } + transactorNonceEntries = append(transactorNonceEntries, transactorNonceEntry) } return transactorNonceEntries } diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index a06d0fedc..7b5decf50 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -649,7 +649,7 @@ func TestDeleteExpiredTransactorNonceEntries(t *testing.T) { -1, 1, ) - // There should be once nonce in the db. + // There should be three nonces in the db. nonceEntries := DbGetAllTransactorNonceEntries(testMeta.db) require.Equal(3, len(nonceEntries)) globalParamsEntry := DbGetGlobalParamsEntry(db, chain.snapshot) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 2bbf6432d..cbce462af 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -113,6 +113,16 @@ func (bav *UtxoView) runEpochCompleteStateTransition(blockHeight uint64, blockTi return nil, errors.Wrapf(err, "runEpochCompleteStateTransition: problem rewarding snapshot stakes: ") } + // Delete expired nonces + prevNonces := bav.GetTransactorNonceEntriesToDeleteAtBlockHeight(blockHeight) + utxoOperations = append(utxoOperations, &UtxoOperation{ + Type: OperationTypeDeleteExpiredNonces, + PrevNonceEntries: prevNonces, + }) + for _, prevNonceEntry := range prevNonces { + bav.DeleteTransactorNonceEntry(prevNonceEntry) + } + return utxoOperations, nil } From 6f8a5bd229bef030ca5818930aad167231a71e9f Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 4 Apr 2024 10:35:33 -0400 Subject: [PATCH 596/762] BLS public key enhancements (#1174) --- bls/signature.go | 64 ++++++++++++++++++++++++++--------------- bls/signature_test.go | 2 +- lib/network_test.go | 3 ++ lib/pos_network_test.go | 2 ++ 4 files changed, 47 insertions(+), 24 deletions(-) diff --git a/bls/signature.go b/bls/signature.go index 9d12a2b38..143eca3f7 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -45,7 +45,10 @@ func AggregateSignatures(signatures []*Signature) (*Signature, error) { // true if every bls.PublicKey in the slice signed the payload. The input bls.Signature is the aggregate // signature of each of their respective bls.Signatures for that payload. func VerifyAggregateSignatureSinglePayload(publicKeys []*PublicKey, signature *Signature, payloadBytes []byte) (bool, error) { - flowPublicKeys := extractFlowPublicKeys(publicKeys) + flowPublicKeys, err := extractFlowPublicKeys(publicKeys) + if err != nil { + return false, err + } return flowCrypto.VerifyBLSSignatureOneMessage(flowPublicKeys, signature.flowSignature, payloadBytes, hashingAlgorithm) } @@ -57,7 +60,10 @@ func VerifyAggregateSignatureMultiplePayloads(publicKeys []*PublicKey, signature return false, fmt.Errorf("number of public keys %d does not equal number of payloads %d", len(publicKeys), len(payloadsBytes)) } - flowPublicKeys := extractFlowPublicKeys(publicKeys) + flowPublicKeys, err := extractFlowPublicKeys(publicKeys) + if err != nil { + return false, err + } var hashingAlgorithms []hash.Hasher for ii := 0; ii < len(publicKeys); ii++ { @@ -103,7 +109,8 @@ func (privateKey *PrivateKey) PublicKey() *PublicKey { if privateKey == nil || privateKey.flowPrivateKey == nil { return nil } - return &PublicKey{flowPublicKey: privateKey.flowPrivateKey.PublicKey()} + publicKey := privateKey.flowPrivateKey.PublicKey() + return &PublicKey{flowPublicKey: publicKey, flowPublicKeyBytes: publicKey.Encode()} } func (privateKey *PrivateKey) ToString() string { @@ -175,38 +182,46 @@ func (privateKey *PrivateKey) Eq(other *PrivateKey) bool { // type PublicKey struct { - flowPublicKey flowCrypto.PublicKey + flowPublicKeyBytes []byte + flowPublicKey flowCrypto.PublicKey +} + +func (publicKey *PublicKey) loadFlowPublicKey() error { + if publicKey.flowPublicKey == nil && len(publicKey.flowPublicKeyBytes) > 0 { + var err error + publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(signingAlgorithm, publicKey.flowPublicKeyBytes) + return err + } + return nil } func (publicKey *PublicKey) Verify(signature *Signature, input []byte) (bool, error) { - if publicKey == nil || publicKey.flowPublicKey == nil { + if publicKey == nil || len(publicKey.flowPublicKeyBytes) == 0 { return false, errors.New("bls.PublicKey is nil") } + if publicKey.loadFlowPublicKey() != nil { + return false, errors.New("failed to load flowPublicKey") + } return publicKey.flowPublicKey.Verify(signature.flowSignature, input, hashingAlgorithm) } func (publicKey *PublicKey) ToBytes() []byte { - var publicKeyBytes []byte - if publicKey != nil && publicKey.flowPublicKey != nil { - publicKeyBytes = publicKey.flowPublicKey.Encode() - } - return publicKeyBytes + return publicKey.flowPublicKeyBytes } func (publicKey *PublicKey) FromBytes(publicKeyBytes []byte) (*PublicKey, error) { if publicKey == nil || len(publicKeyBytes) == 0 { return nil, nil } - var err error - publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(signingAlgorithm, publicKeyBytes) - return publicKey, err + publicKey.flowPublicKeyBytes = publicKeyBytes + return publicKey, nil } func (publicKey *PublicKey) ToString() string { - if publicKey == nil || publicKey.flowPublicKey == nil { + if publicKey == nil || len(publicKey.flowPublicKeyBytes) == 0 { return "" } - return publicKey.flowPublicKey.String() + return "0x" + hex.EncodeToString(publicKey.flowPublicKeyBytes) } func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, error) { @@ -220,8 +235,7 @@ func (publicKey *PublicKey) FromString(publicKeyString string) (*PublicKey, erro if err != nil { return nil, err } - // Convert from byte slice to bls.PublicKey. - publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(signingAlgorithm, publicKeyBytes) + publicKey.flowPublicKeyBytes = publicKeyBytes return publicKey, err } @@ -254,10 +268,10 @@ func (publicKey *PublicKey) UnmarshalJSON(data []byte) error { } func (publicKey *PublicKey) Eq(other *PublicKey) bool { - if publicKey == nil || publicKey.flowPublicKey == nil || other == nil { + if publicKey == nil || publicKey.flowPublicKeyBytes == nil || other == nil { return false } - return publicKey.flowPublicKey.Equals(other.flowPublicKey) + return bytes.Equal(publicKey.flowPublicKeyBytes, other.flowPublicKeyBytes) } func (publicKey *PublicKey) Copy() *PublicKey { @@ -265,12 +279,13 @@ func (publicKey *PublicKey) Copy() *PublicKey { return nil } return &PublicKey{ - flowPublicKey: publicKey.flowPublicKey, + flowPublicKeyBytes: publicKey.flowPublicKeyBytes, + flowPublicKey: publicKey.flowPublicKey, } } func (publicKey *PublicKey) IsEmpty() bool { - return publicKey == nil || publicKey.flowPublicKey == nil + return publicKey == nil || publicKey.flowPublicKeyBytes == nil } type SerializedPublicKey string @@ -381,10 +396,13 @@ func (signature *Signature) IsEmpty() bool { return signature == nil || signature.flowSignature == nil } -func extractFlowPublicKeys(publicKeys []*PublicKey) []flowCrypto.PublicKey { +func extractFlowPublicKeys(publicKeys []*PublicKey) ([]flowCrypto.PublicKey, error) { flowPublicKeys := make([]flowCrypto.PublicKey, len(publicKeys)) for i, publicKey := range publicKeys { + if err := publicKey.loadFlowPublicKey(); err != nil { + return nil, err + } flowPublicKeys[i] = publicKey.flowPublicKey } - return flowPublicKeys + return flowPublicKeys, nil } diff --git a/bls/signature_test.go b/bls/signature_test.go index 5596e3958..acfd012f5 100644 --- a/bls/signature_test.go +++ b/bls/signature_test.go @@ -161,7 +161,7 @@ func TestVerifyingBLSSignatures(t *testing.T) { // Test bls.PublicKey.Copy(). blsPublicKey1Copy := blsPublicKey1.Copy() require.True(t, blsPublicKey1.Eq(blsPublicKey1Copy)) - blsPublicKey1Copy.flowPublicKey = _generateRandomBLSPrivateKey(t).PublicKey().flowPublicKey + blsPublicKey1Copy.flowPublicKeyBytes = _generateRandomBLSPrivateKey(t).PublicKey().ToBytes() require.False(t, blsPublicKey1.Eq(blsPublicKey1Copy)) // Test bls.Signature.Copy(). diff --git a/lib/network_test.go b/lib/network_test.go index 47dd12339..2598ab0f1 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -117,6 +117,9 @@ func TestVerackV1(t *testing.T) { msg.PublicKey = priv.PublicKey() msg.Signature, err = priv.Sign(hash[:]) require.NoError(err) + // Reset the bls public key so that it only contains the bytes. + msg.PublicKey, err = (&bls.PublicKey{}).FromBytes(priv.PublicKey().ToBytes()) + require.NoError(err) _, err = WriteMessage(&buf2, msg, networkType) require.NoError(err) diff --git a/lib/pos_network_test.go b/lib/pos_network_test.go index 4b02029e1..185c64271 100644 --- a/lib/pos_network_test.go +++ b/lib/pos_network_test.go @@ -92,5 +92,7 @@ func _generateValidatorVotingPublicKeyAndSignature(t *testing.T) (*bls.PublicKey blsPublicKey := blsPrivateKey.PublicKey() blsSignature, err := blsPrivateKey.Sign([]byte{0x01, 0x02, 0x03}) require.NoError(t, err) + blsPublicKey, err = (&bls.PublicKey{}).FromBytes(blsPublicKey.ToBytes()) + require.NoError(t, err) return blsPublicKey, blsSignature } From aabcdc663018eaf405a95ab0b9a5b1af40e2bba2 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 4 Apr 2024 10:58:13 -0400 Subject: [PATCH 597/762] Allow txn relay in need blocks state for PoS (#1160) --- lib/server.go | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/lib/server.go b/lib/server.go index fccebabcc..998792a98 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1930,12 +1930,23 @@ func (srv *Server) _addNewTxn( return nil, err } - if srv.blockchain.chainState() != SyncStateFullyCurrent { + srv.blockchain.ChainLock.RLock() + tipHeight := uint64(srv.blockchain.BlockTip().Height) + chainState := srv.blockchain.chainState() + srv.blockchain.ChainLock.RUnlock() - err := fmt.Errorf("Server._addNewTxnAndRelay: Cannot process txn "+ - "from peer %v while syncing: %v %v", pp, srv.blockchain.chainState(), txn.Hash()) - glog.Error(err) - return nil, err + if chainState != SyncStateFullyCurrent { + // We allow txn relay if chain is in a need blocks state and is running PoS. + // We will error in two cases: + // - the chainState is not need blocks state + // - the chainState is need blocks state but the chain is not on PoS. + if chainState != SyncStateNeedBlocksss || + srv.blockchain.params.IsPoSBlockHeight(tipHeight) { + err := fmt.Errorf("Server._addNewTxnAndRelay: Cannot process txn "+ + "from peer %v while syncing: %v %v", pp, srv.blockchain.chainState(), txn.Hash()) + glog.Error(err) + return nil, err + } } glog.V(1).Infof("Server._addNewTxnAndRelay: txn: %v, peer: %v", txn, pp) @@ -1946,8 +1957,9 @@ func (srv *Server) _addNewTxn( peerID = pp.ID } + // Refresh TipHeight. srv.blockchain.ChainLock.RLock() - tipHeight := uint64(srv.blockchain.BlockTip().Height) + tipHeight = uint64(srv.blockchain.BlockTip().Height) srv.blockchain.ChainLock.RUnlock() // Only attempt to add the transaction to the PoW mempool if we're on the From 3c4f766aad7fa50692650724b30fc140e0cf78ed Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 4 Apr 2024 11:36:27 -0400 Subject: [PATCH 598/762] || !running pos in txn relay check (#1175) --- lib/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/server.go b/lib/server.go index 998792a98..d354abca6 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1941,7 +1941,7 @@ func (srv *Server) _addNewTxn( // - the chainState is not need blocks state // - the chainState is need blocks state but the chain is not on PoS. if chainState != SyncStateNeedBlocksss || - srv.blockchain.params.IsPoSBlockHeight(tipHeight) { + !srv.blockchain.params.IsPoSBlockHeight(tipHeight) { err := fmt.Errorf("Server._addNewTxnAndRelay: Cannot process txn "+ "from peer %v while syncing: %v %v", pp, srv.blockchain.chainState(), txn.Hash()) glog.Error(err) From 31e77443d9f1b70a30b0600b54ce557cb1dbd7d6 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 4 Apr 2024 16:12:13 -0400 Subject: [PATCH 599/762] Deprecate FailingTransactionBMFMultiplierBasisPointsKey (#1176) --- lib/block_view.go | 18 +----------------- lib/block_view_types.go | 11 ----------- lib/constants.go | 14 -------------- lib/pos_block_producer_test.go | 8 +------- lib/pos_snapshot_entries.go | 3 --- 5 files changed, 2 insertions(+), 52 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 343eae89d..c4331ae14 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3377,6 +3377,7 @@ func (bav *UtxoView) _connectUpdateGlobalParams( ) } } + // Validate that the minimum fee bucket size is greater than the minimum allowed. mergedGlobalParams := MergeGlobalParamEntryDefaults(&newGlobalParamsEntry, bav.Params) minFeeRateNanosPerKB, feeBucketMultiplier := mergedGlobalParams. @@ -3385,24 +3386,7 @@ func (bav *UtxoView) _connectUpdateGlobalParams( if nextFeeBucketMin < mergedGlobalParams.MinimumNetworkFeeNanosPerKB+MinFeeBucketSize { return 0, 0, nil, RuleErrorFeeBucketSizeTooSmall } - if len(extraData[FailingTransactionBMFMultiplierBasisPointsKey]) > 0 { - val, bytesRead := Uvarint( - extraData[FailingTransactionBMFMultiplierBasisPointsKey], - ) - if val > MaxBasisPoints { - return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: FailingTransactionBMFMultiplierBasisPoints must be <= %d", - MaxBasisPoints, - ) - } - newGlobalParamsEntry.FailingTransactionBMFMultiplierBasisPoints = val - if bytesRead <= 0 { - return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: unable to decode FailingTransactionBMFMultiplierBasisPoints as uint64", - ) - } - } if len(extraData[BlockTimestampDriftNanoSecsKey]) > 0 { val, bytesRead := Varint( extraData[BlockTimestampDriftNanoSecsKey], diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 159868dc4..62124a7db 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4221,11 +4221,6 @@ type GlobalParamsEntry struct { // be [1210, 1330], etc. FeeBucketGrowthRateBasisPoints uint64 - // FailingTransactionBMFMultiplierBasisPoints is the factor of the transaction fee that is used for the computation - // BMF. The value is expressed in basis points. For example a value of 2500 means that 25% of the fee will be - // failing transaction fee will be used in the BMF algorithm. - FailingTransactionBMFMultiplierBasisPoints uint64 - // BlockTimestampDriftNanoSecs is the maximum number of nanoseconds from the current timestamp that // we will allow a PoS block to be submitted. BlockTimestampDriftNanoSecs int64 @@ -4276,7 +4271,6 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { JailInactiveValidatorGracePeriodEpochs: gp.JailInactiveValidatorGracePeriodEpochs, MaximumVestedIntersectionsPerLockupTransaction: gp.MaximumVestedIntersectionsPerLockupTransaction, FeeBucketGrowthRateBasisPoints: gp.FeeBucketGrowthRateBasisPoints, - FailingTransactionBMFMultiplierBasisPoints: gp.FailingTransactionBMFMultiplierBasisPoints, BlockTimestampDriftNanoSecs: gp.BlockTimestampDriftNanoSecs, MempoolMaxSizeBytes: gp.MempoolMaxSizeBytes, MempoolFeeEstimatorNumMempoolBlocks: gp.MempoolFeeEstimatorNumMempoolBlocks, @@ -4311,7 +4305,6 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.JailInactiveValidatorGracePeriodEpochs)...) data = append(data, IntToBuf(int64(gp.MaximumVestedIntersectionsPerLockupTransaction))...) data = append(data, UintToBuf(gp.FeeBucketGrowthRateBasisPoints)...) - data = append(data, UintToBuf(gp.FailingTransactionBMFMultiplierBasisPoints)...) data = append(data, IntToBuf(gp.BlockTimestampDriftNanoSecs)...) data = append(data, UintToBuf(gp.MempoolMaxSizeBytes)...) data = append(data, UintToBuf(gp.MempoolFeeEstimatorNumMempoolBlocks)...) @@ -4397,10 +4390,6 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading FeeBucketGrowthRateBasisPoints") } - gp.FailingTransactionBMFMultiplierBasisPoints, err = ReadUvarint(rr) - if err != nil { - return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading FailingTransactionBMFMultiplierBasisPoints") - } gp.BlockTimestampDriftNanoSecs, err = ReadVarint(rr) if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading BlockTimestampDriftNanoSecs") diff --git a/lib/constants.go b/lib/constants.go index 06e3a5cbb..5aba28778 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -777,11 +777,6 @@ type DeSoParams struct { // as basis points. For example a value of 1000 means that the fee bucket ranges will grow by 10% each time. DefaultFeeBucketGrowthRateBasisPoints uint64 - // DefaultFailingTransactionBMFMultiplierBasisPoints is the default rate for failing transaction fees, in basis points, - // used in BMF calculations. E.g. a value of 2500 means that 25% of the failing transaction's fee is used - // in BMF calculations. - DefaultFailingTransactionBMFMultiplierBasisPoints uint64 - // DefaultMaximumVestedIntersectionsPerLockupTransaction is the default value for // GlobalParamsEntry.MaximumVestedIntersectionsPerLockupTransaction. See the comment // in GlobalParamsEntry for a detailed description of its usage. @@ -1288,9 +1283,6 @@ var DeSoMainnetParams = DeSoParams{ // The rate of growth of the fee bucket ranges. DefaultFeeBucketGrowthRateBasisPoints: uint64(1000), - // The rate of the failing transaction's fee used in BMF calculations. - DefaultFailingTransactionBMFMultiplierBasisPoints: uint64(2500), - // The maximum number of vested lockup intersections in a lockup transaction. DefaultMaximumVestedIntersectionsPerLockupTransaction: 1000, @@ -1598,9 +1590,6 @@ var DeSoTestnetParams = DeSoParams{ // The rate of growth of the fee bucket ranges. DefaultFeeBucketGrowthRateBasisPoints: uint64(1000), - // The rate of the failing transaction's fee used in BMF calculations. - DefaultFailingTransactionBMFMultiplierBasisPoints: uint64(2500), - // The maximum number of vested lockup intersections in a lockup transaction. DefaultMaximumVestedIntersectionsPerLockupTransaction: 1000, @@ -1688,7 +1677,6 @@ const ( JailInactiveValidatorGracePeriodEpochsKey = "JailInactiveValidatorGracePeriodEpochs" MaximumVestedIntersectionsPerLockupTransactionKey = "MaximumVestedIntersectionsPerLockupTransaction" FeeBucketGrowthRateBasisPointsKey = "FeeBucketGrowthRateBasisPointsKey" - FailingTransactionBMFMultiplierBasisPointsKey = "FailingTransactionBMFMultiplierBasisPoints" BlockTimestampDriftNanoSecsKey = "BlockTimestampDriftNanoSecs" MempoolMaxSizeBytesKey = "MempoolMaxSizeBytes" MempoolFeeEstimatorNumMempoolBlocksKey = "MempoolFeeEstimatorNumMempoolBlocks" @@ -1778,8 +1766,6 @@ var ( MaxCopiesPerNFT: 0, // We initialize the FeeBucketGrowthRateBasisPoints to 1000, or equivalently, a multiplier of 1.1x. FeeBucketGrowthRateBasisPoints: 1000, - // We initialize the FailingTransactionBMFMultiplierBasisPoints to 2500, or equivalently, a rate of 0.25. - FailingTransactionBMFMultiplierBasisPoints: 2500, } ) diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index a2bf48bd2..3b0736216 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -182,12 +182,9 @@ func TestGetBlockTransactions(t *testing.T) { // First test happy path with a bunch of passing transactions. passingTxns := []*MsgDeSoTxn{} - totalUtilityFee := uint64(0) for ii := 0; ii < passingTransactions; ii++ { txn := _generateTestTxn(t, rand, feeMin, feeMax, m0PubBytes, m0Priv, 100, 20) passingTxns = append(passingTxns, txn) - _, utilityFee := computeBMF(txn.TxnFeeNanos) - totalUtilityFee += utilityFee _wrappedPosMempoolAddTransaction(t, mempool, txn) } @@ -205,9 +202,6 @@ func TestGetBlockTransactions(t *testing.T) { AmountNanos: 1e10, }) _signTxn(t, failingTxn, m0Priv) - effectiveFee := failingTxn.TxnFeeNanos * globalParams.FailingTransactionBMFMultiplierBasisPoints / 10000 - _, utilityFee := computeBMF(effectiveFee) - totalUtilityFee += utilityFee failingTxns = append(failingTxns, failingTxn) _wrappedPosMempoolAddTransaction(t, mempool, failingTxn) } @@ -245,7 +239,7 @@ func TestGetBlockTransactions(t *testing.T) { require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(true, len(passingTxns) > len(txns)) - totalUtilityFee = 0 + totalUtilityFee := uint64(0) for _, txn := range txns { _, utilityFee := computeBMF(txn.TxnFeeNanos) totalUtilityFee += utilityFee diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index f1e78854e..2548c12d9 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -141,9 +141,6 @@ func MergeGlobalParamEntryDefaults(globalParamsEntry *GlobalParamsEntry, params if globalParamsEntryCopy.FeeBucketGrowthRateBasisPoints == 0 { globalParamsEntryCopy.FeeBucketGrowthRateBasisPoints = params.DefaultFeeBucketGrowthRateBasisPoints } - if globalParamsEntryCopy.FailingTransactionBMFMultiplierBasisPoints == 0 { - globalParamsEntryCopy.FailingTransactionBMFMultiplierBasisPoints = params.DefaultFailingTransactionBMFMultiplierBasisPoints - } if globalParamsEntryCopy.MaximumVestedIntersectionsPerLockupTransaction == 0 { globalParamsEntryCopy.MaximumVestedIntersectionsPerLockupTransaction = params.DefaultMaximumVestedIntersectionsPerLockupTransaction From 8554a55225dcc73e65ef2e2a505fa20fe48aedc4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 4 Apr 2024 17:36:49 -0400 Subject: [PATCH 600/762] Update PoW Epoch Durations to 144 Blocks (#1178) * Update PoW Epoch Durations to 144 Blocks * Fix typo --- lib/constants.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index 5aba28778..7b0888f1d 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -858,9 +858,12 @@ var RegtestForkHeights = ForkHeights{ BalanceModelBlockHeight: uint32(1), ProofOfStake1StateSetupBlockHeight: uint32(1), - // For convenience, we set the PoS cutover block height to 100 - // so that enough DESO is minted to allow for testing. - ProofOfStake2ConsensusCutoverBlockHeight: uint32(100), + // For convenience, we set the PoS cutover block height to 300 so that + // enough DESO is minted to allow for testing. The 300 number is tuned + // to allow for 144 blocks/epoch * 2 epochs = 288 blocks to be mined + // before the chain transitions to PoS. Two epoch transitions must take + // place for the chain to set up the validator set to run PoS. + ProofOfStake2ConsensusCutoverBlockHeight: uint32(300), LockupsBlockHeight: uint32(1), @@ -899,8 +902,6 @@ func (params *DeSoParams) EnableRegtest() { // Allow block rewards to be spent instantly params.BlockRewardMaturity = 0 - // Set the PoS epoch duration to 10 blocks - params.DefaultEpochDurationNumBlocks = 10 // Set the PoS default jail inactive validator grace period epochs to 3. params.DefaultJailInactiveValidatorGracePeriodEpochs = 3 @@ -1271,8 +1272,10 @@ var DeSoMainnetParams = DeSoParams{ // Staking reward APY is defaulted to 0% to be safe. DefaultStakingRewardsAPYBasisPoints: uint64(0), - // The number of blocks in one epoch - DefaultEpochDurationNumBlocks: uint64(3600), + // The number of blocks in one epoch. This number is tuned to result in roughly 10 epochs + // per day given a 10-minute block time on mainnet when running PoW. The number is tuned + // for PoW because epoch transitions begin on PoW before the chain transitions to PoS. + DefaultEpochDurationNumBlocks: uint64(144), // The number of epochs before an inactive validator is jailed DefaultJailInactiveValidatorGracePeriodEpochs: uint64(48), @@ -1578,8 +1581,10 @@ var DeSoTestnetParams = DeSoParams{ // Staking reward APY is defaulted to 0% to be safe. DefaultStakingRewardsAPYBasisPoints: uint64(0), - // The number of blocks in one epoch - DefaultEpochDurationNumBlocks: uint64(3600), + // The number of blocks in one epoch. This number is tuned to result in roughly 10 epochs + // per day given a 10-minute block time on testnet when running PoW. The number is tuned + // for PoW because epoch transitions begin on PoW before the chain transitions to PoS. + DefaultEpochDurationNumBlocks: uint64(144), // The number of epochs before an inactive validator is jailed DefaultJailInactiveValidatorGracePeriodEpochs: uint64(48), From e78653fdf0c2492e471005211d4624a477ade6ed Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 5 Apr 2024 10:00:43 -0400 Subject: [PATCH 601/762] Use GetCurrentGlobalParams where applicable (#1179) --- lib/block_view.go | 8 ++++---- lib/block_view_atomic_txns.go | 4 ++-- lib/block_view_bitcoin.go | 4 ++-- lib/block_view_dao_coin_limit_order.go | 2 +- lib/block_view_derived_key_test.go | 2 +- lib/block_view_nft.go | 12 ++++++------ lib/block_view_nft_test.go | 2 +- lib/block_view_profile.go | 4 ++-- lib/block_view_profile_test.go | 2 +- lib/legacy_mempool.go | 8 ++++---- 10 files changed, 24 insertions(+), 24 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index c4331ae14..1ef84768d 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4026,13 +4026,13 @@ func (bav *UtxoView) _connectSingleTxn( // If the current minimum network fee per kb is set to 0, that indicates we should not assess a minimum fee. // Similarly, BlockReward transactions do not require a fee. isFeeExempt := txn.TxnMeta.GetTxnType() == TxnTypeBitcoinExchange || txn.TxnMeta.GetTxnType() == TxnTypeBlockReward - if !isFeeExempt && txnSizeBytes != 0 && bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB != 0 { + if !isFeeExempt && txnSizeBytes != 0 && bav.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB != 0 { // Make sure there isn't overflow in the fee. if fees != ((fees * 1000) / 1000) { return nil, 0, 0, 0, RuleErrorOverflowDetectedInFeeRateCalculation } // If the fee is less than the minimum network fee per KB, return an error. - if (fees*1000)/uint64(txnSizeBytes) < bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB { + if (fees*1000)/uint64(txnSizeBytes) < bav.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB { return nil, 0, 0, 0, RuleErrorTxnFeeBelowNetworkMinimum } } @@ -4933,8 +4933,8 @@ func (bav *UtxoView) ConstructNonceForPublicKey(publicKey []byte, blockHeight ui func (bav *UtxoView) ConstructNonceForPKID(pkid *PKID, blockHeight uint64) (*DeSoNonce, error) { // construct nonce expirationBuffer := uint64(DefaultMaxNonceExpirationBlockHeightOffset) - if bav.GlobalParamsEntry != nil && bav.GlobalParamsEntry.MaxNonceExpirationBlockHeightOffset != 0 { - expirationBuffer = bav.GlobalParamsEntry.MaxNonceExpirationBlockHeightOffset + if bav.GetCurrentGlobalParamsEntry() != nil && bav.GetCurrentGlobalParamsEntry().MaxNonceExpirationBlockHeightOffset != 0 { + expirationBuffer = bav.GetCurrentGlobalParamsEntry().MaxNonceExpirationBlockHeightOffset } // Some tests use a very low expiration buffer to test // that expired nonces get deleted. We don't want to diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 792fbae61..072ef5295 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -234,13 +234,13 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( // cover the atomic transactions AS WELL AS the wrapper. We validate this // here to ensure we can test for these edge cases as they're also logically caught // by _verifyAtomicTxnsWrapper. - if txnSizeBytes != 0 && bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB != 0 { + if txnSizeBytes != 0 && bav.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB != 0 { // Make sure there isn't overflow in the fee. if txn.TxnFeeNanos != ((txn.TxnFeeNanos * 1000) / 1000) { return nil, 0, 0, 0, RuleErrorOverflowDetectedInFeeRateCalculation } // If the fee is less than the minimum network fee per KB, return an error. - if (txn.TxnFeeNanos*1000)/uint64(txnSizeBytes) < bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB { + if (txn.TxnFeeNanos*1000)/txnSizeBytes < bav.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB { return nil, 0, 0, 0, RuleErrorTxnFeeBelowNetworkMinimum } } diff --git a/lib/block_view_bitcoin.go b/lib/block_view_bitcoin.go index 2e9245de3..e4ea9667e 100644 --- a/lib/block_view_bitcoin.go +++ b/lib/block_view_bitcoin.go @@ -17,8 +17,8 @@ import ( // UPDATE_GLOBAL_PARAMS txn. func (bav *UtxoView) GetCurrentUSDCentsPerBitcoin() uint64 { usdCentsPerBitcoin := bav.USDCentsPerBitcoin - if bav.GlobalParamsEntry.USDCentsPerBitcoin != 0 { - usdCentsPerBitcoin = bav.GlobalParamsEntry.USDCentsPerBitcoin + if bav.GetCurrentGlobalParamsEntry().USDCentsPerBitcoin != 0 { + usdCentsPerBitcoin = bav.GetCurrentGlobalParamsEntry().USDCentsPerBitcoin } return usdCentsPerBitcoin } diff --git a/lib/block_view_dao_coin_limit_order.go b/lib/block_view_dao_coin_limit_order.go index 643521c30..6b8b5d851 100644 --- a/lib/block_view_dao_coin_limit_order.go +++ b/lib/block_view_dao_coin_limit_order.go @@ -272,7 +272,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( if (txMeta.FeeNanos * 1000) <= txMeta.FeeNanos { return 0, 0, nil, RuleErrorDAOCoinLimitOrderFeeNanosOverflow } - if (txMeta.FeeNanos*1000)/uint64(len(txnBytes)) < bav.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB || + if (txMeta.FeeNanos*1000)/uint64(len(txnBytes)) < bav.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB || txMeta.FeeNanos == 0 { return 0, 0, nil, RuleErrorDAOCoinLimitOrderFeeNanosBelowMinTxFee } diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index e49622387..bf5323fee 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -282,7 +282,7 @@ func _doTxnWithBlockHeight( realTxMeta.HasUnlockable, realTxMeta.IsForSale, realTxMeta.MinBidAmountNanos, - utxoView.GlobalParamsEntry.CreateNFTFeeNanos*uint64(realTxMeta.NumCopies), + utxoView.GetCurrentGlobalParamsEntry().CreateNFTFeeNanos*uint64(realTxMeta.NumCopies), realTxMeta.NFTRoyaltyToCreatorBasisPoints, realTxMeta.NFTRoyaltyToCoinBasisPoints, isBuyNow, diff --git a/lib/block_view_nft.go b/lib/block_view_nft.go index 3e55c7e61..fd6b1983d 100644 --- a/lib/block_view_nft.go +++ b/lib/block_view_nft.go @@ -541,7 +541,7 @@ func (bav *UtxoView) extractAdditionalRoyaltyMap( func (bav *UtxoView) _connectCreateNFT( txn *MsgDeSoTxn, txHash *BlockHash, blockHeight uint32, verifySignatures bool) ( _totalInput uint64, _totalOutput uint64, _utxoOps []*UtxoOperation, _err error) { - if bav.GlobalParamsEntry.MaxCopiesPerNFT == 0 { + if bav.GetCurrentGlobalParamsEntry().MaxCopiesPerNFT == 0 { return 0, 0, nil, fmt.Errorf("_connectCreateNFT: called with zero MaxCopiesPerNFT") } @@ -574,7 +574,7 @@ func (bav *UtxoView) _connectCreateNFT( } // Validate the txMeta. - if txMeta.NumCopies > bav.GlobalParamsEntry.MaxCopiesPerNFT { + if txMeta.NumCopies > bav.GetCurrentGlobalParamsEntry().MaxCopiesPerNFT { return 0, 0, nil, RuleErrorTooManyNFTCopies } if txMeta.NumCopies == 0 { @@ -658,7 +658,7 @@ func (bav *UtxoView) _connectCreateNFT( // Since issuing N copies of an NFT multiplies the downstream processing overhead by N, // we charge a fee for each additional copy minted. // We do not need to check for overflow as these values are managed by the ParamUpdater. - nftFee, err := SafeUint64().Mul(txMeta.NumCopies, bav.GlobalParamsEntry.CreateNFTFeeNanos) + nftFee, err := SafeUint64().Mul(txMeta.NumCopies, bav.GetCurrentGlobalParamsEntry().CreateNFTFeeNanos) if err != nil { return 0, 0, nil, errors.Wrapf( err, "_connectCreateNFT: error computing NFT fee") @@ -757,7 +757,7 @@ func (bav *UtxoView) _connectCreateNFT( func (bav *UtxoView) _connectUpdateNFT( txn *MsgDeSoTxn, txHash *BlockHash, blockHeight uint32, verifySignatures bool) ( _totalInput uint64, _totalOutput uint64, _utxoOps []*UtxoOperation, _err error) { - if bav.GlobalParamsEntry.MaxCopiesPerNFT == 0 { + if bav.GetCurrentGlobalParamsEntry().MaxCopiesPerNFT == 0 { return 0, 0, nil, fmt.Errorf("_connectUpdateNFT: called with zero MaxCopiesPerNFT") } @@ -923,7 +923,7 @@ func (bav *UtxoView) _connectUpdateNFT( func (bav *UtxoView) _connectAcceptNFTBid( txn *MsgDeSoTxn, txHash *BlockHash, blockHeight uint32, verifySignatures bool) ( _totalInput uint64, _totalOutput uint64, _utxoOps []*UtxoOperation, _err error) { - if bav.GlobalParamsEntry.MaxCopiesPerNFT == 0 { + if bav.GetCurrentGlobalParamsEntry().MaxCopiesPerNFT == 0 { return 0, 0, nil, fmt.Errorf("_connectAcceptNFTBid: called with zero MaxCopiesPerNFT") } @@ -1678,7 +1678,7 @@ func (bav *UtxoView) _helpConnectNFTSold(args HelpConnectNFTSoldStruct) ( func (bav *UtxoView) _connectNFTBid( txn *MsgDeSoTxn, txHash *BlockHash, blockHeight uint32, verifySignatures bool) ( _totalInput uint64, _totalOutput uint64, _utxoOps []*UtxoOperation, _err error) { - if bav.GlobalParamsEntry.MaxCopiesPerNFT == 0 { + if bav.GetCurrentGlobalParamsEntry().MaxCopiesPerNFT == 0 { return 0, 0, nil, fmt.Errorf("_connectNFTBid: called with zero MaxCopiesPerNFT") } diff --git a/lib/block_view_nft_test.go b/lib/block_view_nft_test.go index dbc0f4395..6fe5186f9 100644 --- a/lib/block_view_nft_test.go +++ b/lib/block_view_nft_test.go @@ -1282,7 +1282,7 @@ func TestNFTBasic(t *testing.T) { require.NoError(err) numCopies := uint64(10) - nftFee := utxoView.GlobalParamsEntry.CreateNFTFeeNanos * numCopies + nftFee := utxoView.GetCurrentGlobalParamsEntry().CreateNFTFeeNanos * numCopies m0BalBeforeNFT := _getBalance(testMeta.t, testMeta.chain, nil, m0Pub) require.Equal(getConditionalBalance(24, 24), m0BalBeforeNFT) diff --git a/lib/block_view_profile.go b/lib/block_view_profile.go index e198019e3..35400c350 100644 --- a/lib/block_view_profile.go +++ b/lib/block_view_profile.go @@ -613,7 +613,7 @@ func (bav *UtxoView) _connectUpdateProfile( existingProfileEntry := bav.GetProfileEntryForPublicKey(profilePublicKey) var extraSpend uint64 if existingProfileEntry == nil || existingProfileEntry.isDeleted { - extraSpend = bav.GlobalParamsEntry.CreateProfileFeeNanos + extraSpend = bav.GetCurrentGlobalParamsEntry().CreateProfileFeeNanos } // Connect basic txn to get the total input and the total output without @@ -641,7 +641,7 @@ func (bav *UtxoView) _connectUpdateProfile( // If we are creating a profile for the first time, assess the create profile fee. if existingProfileEntry == nil { - createProfileFeeNanos := bav.GlobalParamsEntry.CreateProfileFeeNanos + createProfileFeeNanos := bav.GetCurrentGlobalParamsEntry().CreateProfileFeeNanos totalOutput += createProfileFeeNanos if totalInput < totalOutput { return 0, 0, nil, RuleErrorCreateProfileTxnOutputExceedsInput diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index c7721f442..d3cd11cd2 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -172,7 +172,7 @@ func _updateProfileWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB, } else { require.GreaterOrEqual(totalInput, totalInputMake) if totalInput != totalInputMake { - require.Equal(totalInput, totalInputMake+utxoView.GlobalParamsEntry.CreateProfileFeeNanos) + require.Equal(totalInput, totalInputMake+utxoView.GetCurrentGlobalParamsEntry().CreateProfileFeeNanos) } require.Equal(OperationTypeSpendBalance, utxoOps[0].Type) } diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 7a5505a5a..621de9cb7 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -1035,8 +1035,8 @@ func (mp *DeSoMempool) tryAcceptTransaction( if tx.TxnNonce.ExpirationBlockHeight < blockHeight { return nil, nil, TxErrorNonceExpired } - if mp.universalUtxoView.GlobalParamsEntry.MaxNonceExpirationBlockHeightOffset != 0 && - tx.TxnNonce.ExpirationBlockHeight > blockHeight+mp.universalUtxoView.GlobalParamsEntry.MaxNonceExpirationBlockHeightOffset { + if mp.universalUtxoView.GetCurrentGlobalParamsEntry().MaxNonceExpirationBlockHeightOffset != 0 && + tx.TxnNonce.ExpirationBlockHeight > blockHeight+mp.universalUtxoView.GetCurrentGlobalParamsEntry().MaxNonceExpirationBlockHeightOffset { return nil, nil, TxErrorNonceExpirationBlockHeightOffsetExceeded } } @@ -2472,8 +2472,8 @@ func (mp *DeSoMempool) EstimateFeeRate( _ uint64, _ uint64, _ uint64) (uint64, error) { - if minFeeRateNanosPerKB < mp.readOnlyUtxoView.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB { - return mp.readOnlyUtxoView.GlobalParamsEntry.MinimumNetworkFeeNanosPerKB, nil + if minFeeRateNanosPerKB < mp.readOnlyUtxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB { + return mp.readOnlyUtxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB, nil } return minFeeRateNanosPerKB, nil } From 4c1ca2b71095f632598af1cb63d37be73ea8e59c Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 5 Apr 2024 11:29:54 -0400 Subject: [PATCH 602/762] Use specified MinimumFeeRateNanosPerKB when computing PoS fee estimator fees (#1180) --- lib/pos_fee_estimator.go | 9 +++++++++ lib/pos_fee_estimator_test.go | 10 +++++----- lib/pos_mempool.go | 14 ++++++++------ 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 24bfa4a04..f66a56ff2 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -283,6 +283,7 @@ func (posFeeEstimator *PoSFeeEstimator) pruneBlocksToMaxNumPastBlocks(blocks []* // and past blocks using the congestionFactorBasisPoints, priorityPercentileBasisPoints, and // maxBlockSize params. func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( + minFeeRateNanosPerKB uint64, mempoolCongestionFactorBasisPoints uint64, mempoolPriorityPercentileBasisPoints uint64, pastBlocksCongestionFactorBasisPoints uint64, @@ -311,6 +312,9 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( if err != nil { return 0, errors.Wrap(err, "EstimateFeeRateNanosPerKB: Problem computing mempool fee rate") } + if minFeeRateNanosPerKB > pastBlockFeeRate && minFeeRateNanosPerKB > mempoolFeeRate { + return minFeeRateNanosPerKB, nil + } if pastBlockFeeRate < mempoolFeeRate { return mempoolFeeRate, nil } @@ -321,6 +325,7 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( // max of the mempoolFeeEstimate and pastBlocksFeeEstimate. func (posFeeEstimator *PoSFeeEstimator) EstimateFee( txn *MsgDeSoTxn, + minFeeRateNanosPerKB uint64, mempoolCongestionFactorBasisPoints uint64, mempoolPriorityPercentileBasisPoints uint64, pastBlocksCongestionFactorBasisPoints uint64, @@ -347,6 +352,10 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFee( if err != nil { return 0, errors.Wrap(err, "PoSFeeEstimator.EstimateFee: Problem computing past blocks fee estimate") } + minFeeRateEstimate, err := computeFeeGivenTxnAndFeeRate(txn, minFeeRateNanosPerKB) + if minFeeRateEstimate > mempoolFeeEstimate && minFeeRateEstimate > pastBlocksFeeEstimate { + return minFeeRateNanosPerKB, nil + } if mempoolFeeEstimate < pastBlocksFeeEstimate { return pastBlocksFeeEstimate, nil } diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index 080843ddc..b61e93580 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -64,7 +64,7 @@ func TestFeeEstimator(t *testing.T) { require.NoError(t, err) validateTxnFee(t, txn, computedFee, baseFeeRate) // Hybrid estimator will also return the base fee rate * number of bytes. - computedFee, err = posFeeEstimator.EstimateFee(txn, 10000, 10000, 1000, 10000, 1000) + computedFee, err = posFeeEstimator.EstimateFee(txn, 0, 10000, 10000, 1000, 10000, 1000) require.NoError(t, err) validateTxnFee(t, txn, computedFee, baseFeeRate) @@ -166,7 +166,7 @@ func TestFeeEstimator(t *testing.T) { // And the hybrid estimator is just the max, but for completeness, we check it. estimatedHybridFee, err = posFeeEstimator.EstimateFee( - txn, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, + txn, 0, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, maxBlockSizeHybrid) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) @@ -213,7 +213,7 @@ func TestFeeEstimator(t *testing.T) { // And the hybrid estimator is just the max, but for completeness, we check it. estimatedHybridFee, err = posFeeEstimator.EstimateFee( - txn, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, + txn, 0, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, maxBlockSizeHybrid) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) @@ -260,7 +260,7 @@ func TestFeeEstimator(t *testing.T) { // And the hybrid estimator is just the max, but for completeness, we check it. estimatedHybridFee, err = posFeeEstimator.EstimateFee( - txn, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, + txn, 0, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, maxBlockSizeHybrid) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) @@ -307,7 +307,7 @@ func TestFeeEstimator(t *testing.T) { // And the hybrid estimator is just the max, but for completeness, we check it. estimatedHybridFee, err = posFeeEstimator.EstimateFee( - txn, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, + txn, 0, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, maxBlockSizeHybrid) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 06192cb33..ccf21c789 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -1001,26 +1001,28 @@ func (mp *PosMempool) GetMempoolSummaryStats() map[string]*SummaryStats { return convertMempoolTxsToSummaryStats(mp.txnRegister.GetFeeTimeTransactions()) } -func (mp *PosMempool) EstimateFee(txn *MsgDeSoTxn, - _ uint64, +func (mp *PosMempool) EstimateFee( + txn *MsgDeSoTxn, + minFeeRateNanosPerKB uint64, mempoolCongestionFactorBasisPoints uint64, mempoolPriorityPercentileBasisPoints uint64, pastBlocksCongestionFactorBasisPoints uint64, pastBlocksPriorityPercentileBasisPoints uint64, - maxBlockSize uint64) (uint64, error) { + maxBlockSize uint64, +) (uint64, error) { return mp.feeEstimator.EstimateFee( - txn, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, + txn, minFeeRateNanosPerKB, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, maxBlockSize) } func (mp *PosMempool) EstimateFeeRate( - _ uint64, + minFeeRateNanosPerKB uint64, mempoolCongestionFactorBasisPoints uint64, mempoolPriorityPercentileBasisPoints uint64, pastBlocksCongestionFactorBasisPoints uint64, pastBlocksPriorityPercentileBasisPoints uint64, maxBlockSize uint64) (uint64, error) { return mp.feeEstimator.EstimateFeeRateNanosPerKB( - mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, + minFeeRateNanosPerKB, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, maxBlockSize) } From 97b4c54c8d3fd8dc05d8dafb880ff397dce68e8b Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 5 Apr 2024 11:41:24 -0400 Subject: [PATCH 603/762] Account for size of header in block producer (#1177) --- lib/pos_block_producer.go | 143 ++++++++++++++++++++++++++++++--- lib/pos_block_producer_test.go | 41 ++++++++-- lib/pos_blockchain_test.go | 27 ++++++- 3 files changed, 193 insertions(+), 18 deletions(-) diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index a00238476..70fbb0b07 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -26,6 +26,7 @@ type PosBlockProducer struct { proposerPublicKey *PublicKey proposerVotingPublicKey *bls.PublicKey previousBlockTimestampNanoSecs int64 + mockBlockSignature *bls.Signature } func NewPosBlockProducer( @@ -50,8 +51,20 @@ func NewPosBlockProducer( func (pbp *PosBlockProducer) CreateUnsignedBlock(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, proposerRandomSeedSignature *bls.Signature, validatorsVoteQC *QuorumCertificate) (BlockTemplate, error) { + headerSizeEstimate, err := pbp.estimateHeaderSize( + latestBlockView, + newBlockHeight, + view, + proposerRandomSeedSignature, + validatorsVoteQC, + nil, + ) + if err != nil { + return nil, errors.Wrapf(err, "PosBlockProducer.CreateUnsignedBlock: Problem creating mock header") + } // Create the block template. - block, err := pbp.createBlockTemplate(latestBlockView, newBlockHeight, view, proposerRandomSeedSignature) + block, err := pbp.createBlockTemplate( + latestBlockView, newBlockHeight, view, proposerRandomSeedSignature, headerSizeEstimate) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.CreateUnsignedTimeoutBlock: Problem creating block template") } @@ -64,11 +77,27 @@ func (pbp *PosBlockProducer) CreateUnsignedBlock(latestBlockView *UtxoView, newB // CreateUnsignedTimeoutBlock constructs an unsigned, PoS block with Fee-Time ordered transactions. This function should be used // during a timeout in consensus when a validators timeout aggregate QC has been assembled. The block is unsigned, // and so is returned as a BlockTemplate. -func (pbp *PosBlockProducer) CreateUnsignedTimeoutBlock(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, - proposerRandomSeedSignature *bls.Signature, validatorsTimeoutAggregateQC *TimeoutAggregateQuorumCertificate) (BlockTemplate, error) { - +func (pbp *PosBlockProducer) CreateUnsignedTimeoutBlock( + latestBlockView *UtxoView, + newBlockHeight uint64, + view uint64, + proposerRandomSeedSignature *bls.Signature, + validatorsTimeoutAggregateQC *TimeoutAggregateQuorumCertificate, +) (BlockTemplate, error) { + headerSizeEstimate, err := pbp.estimateHeaderSize( + latestBlockView, + newBlockHeight, + view, + proposerRandomSeedSignature, + nil, + validatorsTimeoutAggregateQC, + ) + if err != nil { + return nil, errors.Wrapf(err, "PosBlockProducer.CreateUnsignedTimeoutBlock: Problem creating mock header") + } // Create the block template. - block, err := pbp.createBlockTemplate(latestBlockView, newBlockHeight, view, proposerRandomSeedSignature) + block, err := pbp.createBlockTemplate( + latestBlockView, newBlockHeight, view, proposerRandomSeedSignature, headerSizeEstimate) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.CreateUnsignedTimeoutBlock: Problem creating block template") } @@ -81,11 +110,16 @@ func (pbp *PosBlockProducer) CreateUnsignedTimeoutBlock(latestBlockView *UtxoVie // createBlockTemplate is a helper function used by CreateUnsignedBlock and CreateUnsignedTimeoutBlock. It constructs // a partially filled out block with Fee-Time ordered transactions. The returned block is complete except for // the qc / aggregateQc fields, and the signature. -func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newBlockHeight uint64, view uint64, - proposerRandomSeedSignature *bls.Signature) (BlockTemplate, error) { +func (pbp *PosBlockProducer) createBlockTemplate( + latestBlockView *UtxoView, + newBlockHeight uint64, + view uint64, + proposerRandomSeedSignature *bls.Signature, + headerSizeEstimate uint64, +) (BlockTemplate, error) { // First get the block without the header. currentTimestamp := _maxInt64(time.Now().UnixNano(), pbp.previousBlockTimestampNanoSecs+1) - block, err := pbp.createBlockWithoutHeader(latestBlockView, newBlockHeight, currentTimestamp) + block, err := pbp.createBlockWithoutHeader(latestBlockView, newBlockHeight, currentTimestamp, headerSizeEstimate) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.CreateBlockTemplate: Problem creating block without header") } @@ -111,10 +145,81 @@ func (pbp *PosBlockProducer) createBlockTemplate(latestBlockView *UtxoView, newB return block, nil } +func (pbp *PosBlockProducer) getMockBlockSignature() (*bls.Signature, error) { + if pbp.mockBlockSignature != nil { + return pbp.mockBlockSignature, nil + } + mockBLSPrivateKey, err := bls.NewPrivateKey() + if err != nil { + return nil, errors.Wrap(err, "Error creating mock BLS private key") + } + mockBLSSigner, err := NewBLSSigner(mockBLSPrivateKey) + if err != nil { + return nil, errors.Wrap(err, "Error creating mock BLSSigner") + } + mockBlockSignature, err := mockBLSSigner.SignBlockProposal(math.MaxUint64, NewBlockHash(RandomBytes(32))) + if err != nil { + return nil, errors.Wrap(err, "Error creating mock block signature") + } + pbp.mockBlockSignature = mockBlockSignature + return mockBlockSignature, nil +} + +func (pbp *PosBlockProducer) estimateHeaderSize( + latestBlockView *UtxoView, + newBlockHeight uint64, + view uint64, + proposerRandomSeedSignature *bls.Signature, + validatorsVoteQC *QuorumCertificate, + validatorTimeoutAggregateQC *TimeoutAggregateQuorumCertificate, +) (uint64, error) { + if validatorsVoteQC == nil && validatorTimeoutAggregateQC == nil { + return 0, errors.New( + "PosBlockProducer.mockHeader: both validatorsVoteQC and validatorTimeoutAggregateQC are nil") + } + if validatorsVoteQC != nil && validatorTimeoutAggregateQC != nil { + return 0, errors.New( + "PosBlockProducer.mockHeader: both validatorsVoteQC and validatorTimeoutAggregateQC are not nil") + } + if proposerRandomSeedSignature == nil { + return 0, errors.New("PosBlockProducer.mockHeader: proposerRandomSeedSignature is nil") + } + mockHeader := &MsgDeSoHeader{} + mockHeader.Version = HeaderVersion2 + mockHeader.PrevBlockHash = latestBlockView.TipHash + randomBlockHash := NewBlockHash(RandomBytes(32)) + // Any random block hash is fine here. + mockHeader.TransactionMerkleRoot = randomBlockHash + mockHeader.TstampNanoSecs = _maxInt64(time.Now().UnixNano(), pbp.previousBlockTimestampNanoSecs+1) + mockHeader.Height = newBlockHeight + mockHeader.ProposedInView = view + mockHeader.ProposerVotingPublicKey = pbp.proposerVotingPublicKey + mockHeader.ProposerRandomSeedSignature = proposerRandomSeedSignature + if validatorsVoteQC != nil { + mockHeader.ValidatorsVoteQC = validatorsVoteQC + } else { + mockHeader.ValidatorsTimeoutAggregateQC = validatorTimeoutAggregateQC + } + var err error + mockHeader.ProposerVotePartialSignature, err = pbp.getMockBlockSignature() + if err != nil { + return 0, errors.Wrap(err, "PosBlockProducer.mockHeader: Problem getting mock block signature") + } + headerBytes, err := mockHeader.ToBytes(false) + if err != nil { + return 0, errors.Wrap(err, "PosBlockProducer.mockHeader: Problem getting header size") + } + return uint64(len(headerBytes)), nil +} + // createBlockWithoutHeader is a helper function used by createBlockTemplate. It constructs a partially filled out // block with Fee-Time ordered transactions. The returned block all its contents filled, except for the header. func (pbp *PosBlockProducer) createBlockWithoutHeader( - latestBlockView *UtxoView, newBlockHeight uint64, newBlockTimestampNanoSecs int64) (BlockTemplate, error) { + latestBlockView *UtxoView, + newBlockHeight uint64, + newBlockTimestampNanoSecs int64, + headerSizeEstimate uint64, +) (BlockTemplate, error) { block := NewMessage(MsgTypeBlock).(*MsgDeSoBlock) // Create the block reward transaction. @@ -134,19 +239,35 @@ func (pbp *PosBlockProducer) createBlockWithoutHeader( if err != nil { return nil, errors.Wrapf(err, "Error computing block reward txn size: ") } + blockRewardTxnSize := uint64(len(blockRewardTxnSizeBytes)) // PoS Block producer only uses PoS, so we just directly fetch the soft max and hard max block sizes. softMaxBlockSizeBytes := latestBlockView.GetSoftMaxBlockSizeBytesPoS() hardMaxBlockSizeBytes := latestBlockView.GetMaxBlockSizeBytesPoS() + numBytesForHeaderAndBlockRewardTxn, err := SafeUint64().Add(headerSizeEstimate, blockRewardTxnSize) + if err != nil { + return nil, errors.Wrapf(err, "Error computing block reward txn size + mock header size: ") + } + + softMaxTxnSizeBytes, err := SafeUint64().Sub(softMaxBlockSizeBytes, numBytesForHeaderAndBlockRewardTxn) + if err != nil { + return nil, errors.Wrapf(err, "Error computing soft max txn size: ") + } + + hardMaxTxnSizeBytes, err := SafeUint64().Sub(hardMaxBlockSizeBytes, numBytesForHeaderAndBlockRewardTxn) + if err != nil { + return nil, errors.Wrapf(err, "Error computing hard max txn size: ") + } + // Get block transactions from the mempool. feeTimeTxns, maxUtilityFee, err := pbp.getBlockTransactions( pbp.proposerPublicKey, latestBlockView, newBlockHeight, newBlockTimestampNanoSecs, - softMaxBlockSizeBytes-uint64(len(blockRewardTxnSizeBytes)), - hardMaxBlockSizeBytes-uint64(len(blockRewardTxnSizeBytes)), + softMaxTxnSizeBytes, + hardMaxTxnSizeBytes, ) if err != nil { return nil, errors.Wrapf(err, "PosBlockProducer.createBlockWithoutHeader: Problem retrieving block transactions: ") diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 3b0736216..95f5851f4 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -1,6 +1,7 @@ package lib import ( + "github.com/deso-protocol/core/collections/bitset" "math" "math/rand" "testing" @@ -58,8 +59,18 @@ func TestCreateBlockTemplate(t *testing.T) { require.NoError(err) m0Pk := NewPublicKey(m0PubBytes) pbp := NewPosBlockProducer(mempool, params, m0Pk, pub, time.Now().UnixNano()) - - blockTemplate, err := pbp.createBlockTemplate(latestBlockView, 3, 10, seedSignature) + mockQC := &QuorumCertificate{ + BlockHash: NewBlockHash(RandomBytes(32)), + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: &bls.Signature{}, + SignersList: bitset.NewBitset(), + }, + } + headerSizeEstimate, err := pbp.estimateHeaderSize( + latestBlockView, 3, 10, seedSignature, mockQC, nil) + require.NoError(err) + blockTemplate, err := pbp.createBlockTemplate(latestBlockView, 3, 10, seedSignature, headerSizeEstimate) require.NoError(err) require.NotNil(blockTemplate) require.NotNil(blockTemplate.Header) @@ -123,7 +134,18 @@ func TestCreateBlockWithoutHeader(t *testing.T) { NewPublicKey(m0PubBytes), latestBlockView, 3, 0, 50000, 50000) require.NoError(err) - blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0) + mockQC := &QuorumCertificate{ + BlockHash: NewBlockHash(RandomBytes(32)), + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: &bls.Signature{}, + SignersList: bitset.NewBitset(), + }, + } + headerSizeEstimate, err := pbp.estimateHeaderSize( + latestBlockView, 3, 10, &bls.Signature{}, mockQC, nil) + require.NoError(err) + blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0, headerSizeEstimate) require.NoError(err) require.Equal(txns, blockTemplate.Txns[1:]) require.Equal(uint64(0), blockTemplate.Txns[0].TxOutputs[0].AmountNanos) @@ -137,8 +159,17 @@ func TestCreateBlockWithoutHeader(t *testing.T) { txns, maxUtilityFee, err := pbp.getBlockTransactions( NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 50000, 50000) require.NoError(err) - - blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0) + mockQC := &QuorumCertificate{ + BlockHash: NewBlockHash(RandomBytes(32)), + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: &bls.Signature{}, + SignersList: bitset.NewBitset(), + }, + } + headerSizeEstimate, err := pbp.estimateHeaderSize( + latestBlockView, 3, 10, &bls.Signature{}, mockQC, nil) + blockTemplate, err := pbp.createBlockWithoutHeader(latestBlockView, 3, 0, headerSizeEstimate) require.NoError(err) require.Equal(txns, blockTemplate.Txns[1:]) require.Equal(maxUtilityFee, blockTemplate.Txns[0].TxOutputs[0].AmountNanos) diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 71bf22a3e..1d0831635 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2587,8 +2587,19 @@ func _getFullRealBlockTemplate( isTimeout bool, blockTimestampOffset time.Duration, ) BlockTemplate { + mockQC := &QuorumCertificate{ + BlockHash: NewBlockHash(RandomBytes(32)), + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: &bls.Signature{}, + SignersList: bitset.NewBitset(), + }, + } + headerSizeEstimate, err := testMeta.posBlockProducer.estimateHeaderSize( + testMeta.posMempool.readOnlyLatestBlockView, 3, 10, &bls.Signature{}, mockQC, nil) + require.NoError(testMeta.t, err) blockTemplate, err := testMeta.posBlockProducer.createBlockTemplate( - testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature) + testMeta.posMempool.readOnlyLatestBlockView, blockHeight, view, seedSignature, headerSizeEstimate) require.NoError(testMeta.t, err) require.NotNil(testMeta.t, blockTemplate) @@ -2668,7 +2679,19 @@ func _getFullRealBlockTemplate( // _getFullDummyBlockTemplate is a helper function that generates a block template with a dummy ValidatorsVoteQC. func _getFullDummyBlockTemplate(testMeta *TestMeta, latestBlockView *UtxoView, blockHeight uint64, view uint64, seedSignature *bls.Signature) BlockTemplate { - blockTemplate, err := testMeta.posBlockProducer.createBlockTemplate(latestBlockView, blockHeight, view, seedSignature) + mockQC := &QuorumCertificate{ + BlockHash: NewBlockHash(RandomBytes(32)), + ProposedInView: 1, + ValidatorsVoteAggregatedSignature: &AggregatedBLSSignature{ + Signature: &bls.Signature{}, + SignersList: bitset.NewBitset(), + }, + } + headerSizeEstimate, err := testMeta.posBlockProducer.estimateHeaderSize( + testMeta.posMempool.readOnlyLatestBlockView, 3, 10, &bls.Signature{}, mockQC, nil) + require.NoError(testMeta.t, err) + blockTemplate, err := testMeta.posBlockProducer.createBlockTemplate( + latestBlockView, blockHeight, view, seedSignature, headerSizeEstimate) require.NoError(testMeta.t, err) require.NotNil(testMeta.t, blockTemplate) // Add a dummy vote QC From 1a58674f474cdae3f03fed2556852ec04ae9c108 Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Fri, 5 Apr 2024 10:57:28 -0700 Subject: [PATCH 604/762] Pass inner txn hash in atomic txn processing (#1184) * wip * Pass inner txn hash when processing atomic txn --- lib/block_view_atomic_txns.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 072ef5295..3ba08036f 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -267,8 +267,9 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( for _, innerTxn := range txMeta.Txns { // NOTE: By recursively calling _connectSingleTxn, each inner transaction is checked that // it is capable of paying for its own fees as well as having a valid signature. + innerTxnHash := innerTxn.Hash() innerTxnUtxoOps, txnInput, txnOutput, txnFees, err := bav._connectSingleTxn( - innerTxn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) + innerTxn, innerTxnHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) if err != nil { return nil, 0, 0, 0, errors.Wrap(err, "_connectAtomicTxnsWrapper: failed to connect non-atomic transaction") From 1d39ad7055130aede7d7411e79b0f9645dc566a3 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 5 Apr 2024 14:48:30 -0400 Subject: [PATCH 605/762] Index NonValidators With PublicKeys On Handshake Complete (#1182) * Index NonValidators With PublicKeys On Handshake Complete * Address Nina's comments --- lib/network_manager.go | 35 +++++++++++++++++++++-------------- lib/remote_node.go | 9 +++++---- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/lib/network_manager.go b/lib/network_manager.go index 2208a0445..08e74729b 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -193,9 +193,8 @@ func (nm *NetworkManager) startValidatorConnector() { nm.exitGroup.Done() return case <-time.After(nm.params.NetworkManagerRefreshDuration): - activeValidatorsMap := nm.getActiveValidatorsMap() - nm.refreshValidatorIndices(activeValidatorsMap) - nm.connectValidators(activeValidatorsMap) + nm.refreshValidatorIndices() + nm.connectValidators() } } } @@ -540,9 +539,14 @@ func (nm *NetworkManager) getActiveValidatorsMap() *collections.ConcurrentMap[bl return nm.activeValidatorsMap.Clone() } +func (nm *NetworkManager) isActiveValidator(pk bls.SerializedPublicKey) bool { + _, ok := nm.activeValidatorsMap.Get(pk) + return ok +} + // refreshValidatorIndices re-indexes validators based on the activeValidatorsMap. It is called periodically by the // validator connector. -func (nm *NetworkManager) refreshValidatorIndices(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { +func (nm *NetworkManager) refreshValidatorIndices() { // De-index inactive validators. We skip any checks regarding RemoteNodes connection status, nor do we verify whether // de-indexing the validator would result in an excess number of outbound/inbound connections. Any excess connections // will be cleaned up by the NonValidator connector. @@ -553,17 +557,17 @@ func (nm *NetworkManager) refreshValidatorIndices(activeValidatorsMap *collectio validatorOutboundMap := nm.GetValidatorOutboundIndex().ToMap() for pk, rn := range validatorOutboundMap { // If the validator is no longer active, de-index it. - if _, ok := activeValidatorsMap.Get(pk); !ok { - nm.SetNonValidator(rn) + if !nm.isActiveValidator(pk) { nm.UnsetValidator(rn) + nm.SetNonValidator(rn) } } validatorInboundMap := nm.GetValidatorInboundIndex().ToMap() for pk, rn := range validatorInboundMap { // If the validator is no longer active, de-index it. - if _, ok := activeValidatorsMap.Get(pk); !ok { - nm.SetNonValidator(rn) + if !nm.isActiveValidator(pk) { nm.UnsetValidator(rn) + nm.SetNonValidator(rn) } } @@ -597,22 +601,22 @@ func (nm *NetworkManager) refreshValidatorIndices(activeValidatorsMap *collectio } // If the RemoteNode turns out to be in the validator set, index it. - if _, ok := activeValidatorsMap.Get(pk.Serialize()); ok { - nm.SetValidator(rn) + if nm.isActiveValidator(pk.Serialize()) { nm.UnsetNonValidator(rn) + nm.SetValidator(rn) } } } // connectValidators attempts to connect to all active validators that are not already connected. It is called // periodically by the validator connector. -func (nm *NetworkManager) connectValidators(activeValidatorsMap *collections.ConcurrentMap[bls.SerializedPublicKey, consensus.Validator]) { +func (nm *NetworkManager) connectValidators() { // Look through the active validators and connect to any that we're not already connected to. if nm.keystore == nil { return } - validators := activeValidatorsMap.ToMap() + validators := nm.getActiveValidatorsMap().ToMap() for pk, validator := range validators { // Check if we've already dialed an outbound connection to this validator. // It's worth noting that we look up the outbound index, instead of looking up a union of the outbound and @@ -956,9 +960,12 @@ func (nm *NetworkManager) ProcessCompletedHandshake(remoteNode *RemoteNode) { return } - if remoteNode.IsValidator() { - nm.SetValidator(remoteNode) + // A remote node is a validator from our POV if three things are true: + // - It has the validator service flag set, and performed the handshake with a BLS public key + // - Its BLS public key is in the active validator set. + if remoteNode.IsValidator() && nm.isActiveValidator(remoteNode.GetValidatorPublicKey().Serialize()) { nm.UnsetNonValidator(remoteNode) + nm.SetValidator(remoteNode) } else { nm.UnsetValidator(remoteNode) nm.SetNonValidator(remoteNode) diff --git a/lib/remote_node.go b/lib/remote_node.go index 794f4d167..bbd011495 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -3,14 +3,15 @@ package lib import ( "encoding/binary" "fmt" + "net" + "sync" + "time" + "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/bls" "github.com/golang/glog" "github.com/pkg/errors" "golang.org/x/crypto/sha3" - "net" - "sync" - "time" ) type RemoteNodeStatus int @@ -253,7 +254,7 @@ func (rn *RemoteNode) IsValidator() bool { if !rn.IsHandshakeCompleted() { return false } - return rn.hasValidatorServiceFlag() + return rn.hasValidatorServiceFlag() && rn.validatorPublicKey != nil } func (rn *RemoteNode) IsExpectedValidator() bool { From 7ad9edfe700ef829bd9c8c1d38eb835ffb2f432f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 5 Apr 2024 16:06:25 -0400 Subject: [PATCH 606/762] Improve Logging in NetworkManager and PosConsensus (#1186) --- lib/network_manager.go | 8 ++++++++ lib/pos_consensus.go | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/lib/network_manager.go b/lib/network_manager.go index 08e74729b..754ac5c8f 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -650,6 +650,14 @@ func (nm *NetworkManager) connectValidators() { "validator (pk= %v): (error= %v)", validator.GetPublicKey().Serialize(), err) continue } + + // Log the connection attempt + glog.V(2).Infof( + "NetworkManager.connectValidators: Connecting to validator (pk= %v) (domain=%v)", + validator.GetPublicKey().Serialize(), + string(randDomain), + ) + if err := nm.CreateValidatorConnection(string(randDomain), publicKey); err != nil { glog.V(2).Infof("NetworkManager.connectValidators: Problem connecting to validator %v: %v", string(randDomain), err) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index c2ec3c50d..cae354e8d 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -43,6 +43,8 @@ func NewFastHotStuffConsensus( // blockchain state. This should only be called once the blockchain has synced, the node is // ready to join the validator network, and the node is able to validate blocks in the steady state. func (fc *FastHotStuffConsensus) Start() error { + glog.V(2).Infof("FastHotStuffConsensus.Start: Started running FastHotStuffConsensus.") + // Hold the consensus' write lock for thread-safety. fc.lock.Lock() defer fc.lock.Unlock() @@ -123,6 +125,8 @@ func (fc *FastHotStuffConsensus) Start() error { glog.Errorf("FastHotStuffConsensus.tryProcessBlockAsNewTip: Error updating validator connections: %v", err) } + glog.V(2).Infof("FastHotStuffConsensus.Start: Successfully started running FastHotStuffConsensus.") + return nil } From 846f50df291860793431b2d96ba092dcee4d833c Mon Sep 17 00:00:00 2001 From: Jon Pollock <135658176+poolcoke@users.noreply.github.com> Date: Mon, 8 Apr 2024 11:16:31 -0700 Subject: [PATCH 607/762] Add GetAugmentedUniversalViewWithAdditionalTransactions for Backend APIs. (#1164) * Create GetAugmentedUniversalViewWithAdditionalTransactions. * Add GetMempoolTipBlockHeight to mempool interface --- lib/legacy_mempool.go | 4 +++ lib/pos_mempool.go | 63 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 621de9cb7..28e4b9777 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -605,6 +605,10 @@ func (mp *DeSoMempool) GetTransactionsOrderedByTimeAdded() (_poolTxns []*Mempool return poolTxns, nil, nil } +func (mp *DeSoMempool) GetMempoolTipBlockHeight() uint64 { + return uint64(mp.bc.blockTip().Height) +} + func (mp *DeSoMempool) GetMempoolTx(txId *BlockHash) *MempoolTx { return mp.readOnlyUniversalTransactionMap[*txId] } diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index ccf21c789..39b85e5d3 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -41,6 +41,7 @@ type Mempool interface { CheckSpend(op UtxoKey) *MsgDeSoTxn GetOrderedTransactions() []*MempoolTx IsTransactionInPool(txHash *BlockHash) bool + GetMempoolTipBlockHeight() uint64 GetMempoolTx(txHash *BlockHash) *MempoolTx GetMempoolSummaryStats() map[string]*SummaryStats EstimateFee( @@ -62,6 +63,59 @@ type Mempool interface { ) (uint64, error) } +// GetAugmentedUniversalViewWithAdditionalTransactions is meant as a helper function +// for backend APIs to better construct atomic transactions while maintaining various +// transactional sanity checks. It SHOULD NOT be used for consensus critical tasks +// as it does not validate signatures, does not validate fees, and would likely lead +// to a nonsensical state of the blockchain. +// +// In the case of atomic transactions it's likely that a user will have a series of +// dependent transactions (transactions that MUST be submitted together in a specific order) +// that they plan to submit as a single atomic transaction. However, functions like +// GetAugmentedUniversalView may not have access to this series of transactions meaning +// backend APIs using GetAugmentedUniversalView will generate errors unnecessarily in the case +// of certain atomic transaction workflows. To deal with this, we can use +// GetAugmentedUniversalViewWithAdditionalTransactions which will create a +// view that has connected a set of transactions (specified by optionalTxns). +// +// NOTE: GetAugmentedUniversalViewWithAdditionalTransactions DOES NOT validate fees +// as fees are computed in UtxoView.ConnectBlock and optionalTxns are not included +// in any block that can be connected yet. +func GetAugmentedUniversalViewWithAdditionalTransactions( + mempool Mempool, + optionalTxns []*MsgDeSoTxn, +) ( + *UtxoView, + error, +) { + // Generate an augmented view. + newView, err := mempool.GetAugmentedUniversalView() + if err != nil { + return nil, errors.Wrap(err, "GetAugmentedUniversalViewWithAdditionalTransactions") + } + + // Connect optional txns (if any). + currentTimestampNanoSecs := time.Now().UnixNano() + if optionalTxns != nil && len(optionalTxns) > 0 { + for ii, txn := range optionalTxns { + _, _, _, _, err := newView.ConnectTransaction( + txn, + txn.Hash(), + uint32(mempool.GetMempoolTipBlockHeight()+1), + currentTimestampNanoSecs, + false, + true, + ) + if err != nil { + return nil, errors.Wrapf(err, + "GetAugmentedUniversalViewWithAdditionalTransactions failed connecting transaction %d of %d", + ii, len(optionalTxns)) + } + } + } + return newView, nil +} + type MempoolIterator interface { Next() bool Value() (*MempoolTransaction, bool) @@ -987,6 +1041,15 @@ func (mp *PosMempool) IsTransactionInPool(txHash *BlockHash) bool { return exists } +func (mp *PosMempool) GetMempoolTipBlockHeight() uint64 { + mp.RLock() + defer mp.RUnlock() + if !mp.IsRunning() { + return 0 + } + return mp.latestBlockHeight +} + func (mp *PosMempool) GetMempoolTx(txHash *BlockHash) *MempoolTx { mp.RLock() defer mp.RUnlock() From a90ddc4a79bf80f1807d0a2593b8dad30cc7d096 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 9 Apr 2024 14:42:52 -0400 Subject: [PATCH 608/762] Add support for atomic txns in the pos mempool (#1156) --- lib/block_view_atomic_txns.go | 65 ++++++++------ lib/blockchain.go | 5 ++ lib/errors.go | 1 + lib/legacy_mempool_test.go | 2 + lib/network.go | 5 +- lib/pos_fee_estimator.go | 2 +- lib/pos_mempool.go | 160 ++++++++++++++++++++++++++++++---- 7 files changed, 192 insertions(+), 48 deletions(-) diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 3ba08036f..8f0055161 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -215,34 +215,9 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( fmt.Errorf("_connectAtomicTxnsWrapper: TxnMeta type: %v", txn.TxnMeta.GetTxnType().GetTxnString()) } - // Don't allow the atomic transactions and the wrapper to take up more than half of the block. - txnBytes, err := txn.ToBytes(false) - if err != nil { - return nil, 0, 0, 0, errors.Wrapf( - err, "_connectTransaction: Problem serializing transaction: ") - } - maxTxnSizeBytes := bav.Params.MaxBlockSizeBytesPoW / 2 - if bav.Params.IsPoSBlockHeight(uint64(blockHeight)) { - maxTxnSizeBytes = bav.GetMaxTxnSizeBytesPoS() - } - txnSizeBytes := uint64(len(txnBytes)) - if txnSizeBytes > maxTxnSizeBytes { - return nil, 0, 0, 0, RuleErrorTxnTooBig - } - - // Validate that the internal transactions cumulatively pay enough in fees to - // cover the atomic transactions AS WELL AS the wrapper. We validate this - // here to ensure we can test for these edge cases as they're also logically caught - // by _verifyAtomicTxnsWrapper. - if txnSizeBytes != 0 && bav.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB != 0 { - // Make sure there isn't overflow in the fee. - if txn.TxnFeeNanos != ((txn.TxnFeeNanos * 1000) / 1000) { - return nil, 0, 0, 0, RuleErrorOverflowDetectedInFeeRateCalculation - } - // If the fee is less than the minimum network fee per KB, return an error. - if (txn.TxnFeeNanos*1000)/txnSizeBytes < bav.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB { - return nil, 0, 0, 0, RuleErrorTxnFeeBelowNetworkMinimum - } + if err := bav._verifyAtomicTxnsSize(txn, uint64(blockHeight)); err != nil { + return nil, 0, 0, 0, + errors.Wrap(err, "_connectAtomicTxnsWrapper: failed to verify atomic transaction size") } // Verify the wrapper of the transaction. This does not verify the txn.TxnMeta contents, just that @@ -305,6 +280,40 @@ func (bav *UtxoView) _connectAtomicTxnsWrapper( return utxoOpsForTxn, totalInput, totalOutput, totalFees, nil } +func (bav *UtxoView) _verifyAtomicTxnsSize(txn *MsgDeSoTxn, blockHeight uint64) error { + // Don't allow the atomic transactions and the wrapper to take up more than half of the block. + txnBytes, err := txn.ToBytes(false) + if err != nil { + return errors.Wrapf( + err, "_connectTransaction: Problem serializing transaction: ") + } + + maxTxnSizeBytes := bav.Params.MaxBlockSizeBytesPoW / 2 + if bav.Params.IsPoSBlockHeight(blockHeight) { + maxTxnSizeBytes = bav.GetMaxTxnSizeBytesPoS() + } + txnSizeBytes := uint64(len(txnBytes)) + if txnSizeBytes > maxTxnSizeBytes { + return RuleErrorTxnTooBig + } + + // Validate that the internal transactions cumulatively pay enough in fees to + // cover the atomic transactions AS WELL AS the wrapper. We validate this + // here to ensure we can test for these edge cases as they're also logically caught + // by _verifyAtomicTxnsWrapper. + if txnSizeBytes != 0 && bav.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB != 0 { + // Make sure there isn't overflow in the fee. + if txn.TxnFeeNanos != ((txn.TxnFeeNanos * 1000) / 1000) { + return RuleErrorOverflowDetectedInFeeRateCalculation + } + // If the fee is less than the minimum network fee per KB, return an error. + if (txn.TxnFeeNanos*1000)/txnSizeBytes < bav.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB { + return RuleErrorTxnFeeBelowNetworkMinimum + } + } + return nil +} + func _verifyAtomicTxnsWrapper(txn *MsgDeSoTxn) error { // Since the wrapper does not require a public key nor a corresponding signature, we force both // the transaction public key to be the ZeroPublicKey and the signature to be nil. diff --git a/lib/blockchain.go b/lib/blockchain.go index d5c4799f8..35fcd440b 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1732,6 +1732,11 @@ func CheckTransactionSanity(txn *MsgDeSoTxn, blockHeight uint32, params *DeSoPar existingInputs[*txin] = true } + // Make sure the transaction has a signature. + if txn.TxnMeta.GetTxnType() != TxnTypeBitcoinExchange && txn.Signature.Sign == nil { + return RuleErrorTransactionHasNoSignature + } + return nil } diff --git a/lib/errors.go b/lib/errors.go index 655dcdecb..83692f6c8 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -63,6 +63,7 @@ const ( RuleErrorBlockRewardTxnMustHaveOneOutput RuleError = "RuleErrorBlockRewardTxnMustHaveOneOutput" RuleErrorBlockHeightAfterProofOfStakeCutover RuleError = "RuleErrorBlockHeightAfterProofOfStakeCutover" RuleErrorBestChainIsAtProofOfStakeCutover RuleError = "RuleErrorBestChainIsAtProofOfStakeCutover" + RuleErrorTransactionHasNoSignature RuleError = "RuleErrorTransactionHasNoSignature" RuleErrorMissingBlockProducerSignature RuleError = "RuleErrorMissingBlockProducerSignature" RuleErrorInvalidBlockProducerPublicKey RuleError = "RuleErrorInvalidBlockProducerPublicKey" diff --git a/lib/legacy_mempool_test.go b/lib/legacy_mempool_test.go index d367d567a..46f2a1150 100644 --- a/lib/legacy_mempool_test.go +++ b/lib/legacy_mempool_test.go @@ -83,6 +83,7 @@ func TestMempoolLongChainOfDependencies(t *testing.T) { }, TxnMeta: &BasicTransferMetadata{}, PublicKey: recipientPkBytes, + Signature: txn1.Signature, // Dummy signature. } //_signTxn(t, newTxn, false [>isSender is false since this is the recipient<]) @@ -170,6 +171,7 @@ func TestMempoolRateLimit(t *testing.T) { }, TxnMeta: &BasicTransferMetadata{}, PublicKey: recipientPkBytes, + Signature: txn1.Signature, // Dummy signature. } //_signTxn(t, newTxn, false [>isSender is false since this is the recipient<]) diff --git a/lib/network.go b/lib/network.go index 38cb0bb2d..4361163a0 100644 --- a/lib/network.go +++ b/lib/network.go @@ -4150,9 +4150,10 @@ func (msg *MsgDeSoTxn) UnmarshalJSON(data []byte) error { } // ComputeFeeRatePerKBNanos computes the fee rate per KB for a signed transaction. This function should not be used for -// unsigned transactions because the fee rate will not be accurate. +// unsigned transactions because the fee rate will not be accurate. However, we allow unsigned Atomic txn wrappers +// since there will never be a signature for the wrapper transactions. func (txn *MsgDeSoTxn) ComputeFeeRatePerKBNanos() (uint64, error) { - if txn.Signature.Sign == nil { + if txn.Signature.Sign == nil && txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { return 0, fmt.Errorf("ComputeFeeRatePerKBNanos: Cannot compute fee rate for unsigned txn") } diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index f66a56ff2..0d4ced366 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -354,7 +354,7 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFee( } minFeeRateEstimate, err := computeFeeGivenTxnAndFeeRate(txn, minFeeRateNanosPerKB) if minFeeRateEstimate > mempoolFeeEstimate && minFeeRateEstimate > pastBlocksFeeEstimate { - return minFeeRateNanosPerKB, nil + return minFeeRateEstimate, nil } if mempoolFeeEstimate < pastBlocksFeeEstimate { return pastBlocksFeeEstimate, nil diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 39b85e5d3..c4bff351b 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -471,7 +471,7 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { return } - // Add all transactions from the block to the mempool. + // Add all transactions in the block to the mempool. for _, txn := range block.Txns { txnHash := txn.Hash() @@ -525,7 +525,7 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { // First, validate that the transaction is properly formatted according to BalanceModel. We acquire a read lock on // the mempool. This allows multiple goroutines to safely perform transaction validation concurrently. In particular, // transaction signature verification can be parallelized. - if err := mp.checkTransactionSanity(mtxn.GetTxn()); err != nil { + if err := mp.checkTransactionSanity(mtxn.GetTxn(), false); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem verifying transaction") } @@ -563,7 +563,50 @@ func (mp *PosMempool) isTxnHashInRecentBlockCache(txnHash BlockHash) bool { return mp.recentBlockTxnCache.Contains(txnHash) } -func (mp *PosMempool) checkTransactionSanity(txn *MsgDeSoTxn) error { +func (mp *PosMempool) checkTransactionSanity(txn *MsgDeSoTxn, isInnerAtomicTxn bool) error { + // If the txn is an atomic, we need to check the transaction sanity for each txn as well as verify the wrapper. + if txn.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { + // First verify the wrapper. + atomicTxnsWrapper, ok := txn.TxnMeta.(*AtomicTxnsWrapperMetadata) + if !ok { + return fmt.Errorf( + "PosMempool.AddTransaction: Problem verifying atomic txn wrapper - casting metadata failed") + } + // Verify the size + if err := mp.readOnlyLatestBlockView._verifyAtomicTxnsSize(txn, mp.latestBlockHeight); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem verifying atomic txn size") + } + // Verify the wrapper. + if err := _verifyAtomicTxnsWrapper(txn); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem verifying atomic txn wrapper") + } + + // Verify the chain of transactions to make sure they are not tampered with. + if err := _verifyAtomicTxnsChain(atomicTxnsWrapper); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem verifying atomic txn chain") + } + // Okay we've verified the wrapper and the chain of transactions. Now we need to verify each transaction. + for _, innerTxn := range atomicTxnsWrapper.Txns { + if err := mp.checkTransactionSanity(innerTxn, true); err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction sanity") + } + } + // Return early so we do not assess the rest of the validation checks on the wrapper. + return nil + } + + // If the txn is supposed to be an inner txn in an atomic wrapper, we need to make sure it is properly formed. + // If the txn is NOT supposed to an inner txn in an atomic wrapper, we need to make sure it does not have + // the extra data fields that are only allowed in atomic txns. + isAtomicTxn := txn.IsAtomicTxnsInnerTxn() + if isAtomicTxn != isInnerAtomicTxn { + return fmt.Errorf( + "PosMempool.AddTransaction: expected txn to be atomic: %v, got: %v", + isInnerAtomicTxn, + isAtomicTxn, + ) + } + if err := CheckTransactionSanity(txn, uint32(mp.latestBlockHeight), mp.params); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem validating transaction sanity") } @@ -596,24 +639,101 @@ func (mp *PosMempool) updateTransactionValidatedStatus(txnHash *BlockHash, valid txn.SetValidated(validated) } -func (mp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) error { - userPk := NewPublicKey(txn.Tx.PublicKey) +func (mp *PosMempool) checkNonceTracker(txn *MempoolTx, userPk *PublicKey) (*MempoolTx, error) { // Check the nonceTracker to see if this transaction is meant to replace an existing one. existingTxn := mp.nonceTracker.GetTxnByPublicKeyNonce(*userPk, *txn.Tx.TxnNonce) if existingTxn != nil && existingTxn.FeePerKB > txn.FeePerKB { - return errors.Wrapf(MempoolFailedReplaceByHigherFee, "PosMempool.AddTransaction: Problem replacing transaction "+ + return nil, errors.Wrapf(MempoolFailedReplaceByHigherFee, "PosMempool.AddTransaction: Problem replacing transaction "+ "by higher fee failed. New transaction has lower fee.") } + // TODO: is it okay to allow if the incoming tx is an inner atomic txn? + if existingTxn != nil && existingTxn.Tx.IsAtomicTxnsInnerTxn() { + return nil, errors.Wrapf(MempoolFailedReplaceByHigherFee, "PosMempool.AddTransaction: Cannot replace txn that is"+ + "an inner atomic txn.") + } + return existingTxn, nil +} + +func (mp *PosMempool) removeNonces(txns []*MsgDeSoTxn) { + for _, txn := range txns { + userPk := NewPublicKey(txn.PublicKey) + mp.nonceTracker.RemoveTxnByPublicKeyNonce(*userPk, *txn.TxnNonce) + } +} + +func (mp *PosMempool) persistMempoolAddEvent(txn *MempoolTx, persistToDb bool) { + // Emit an event for the newly added transaction. + if persistToDb && !mp.inMemoryOnly { + event := &MempoolEvent{ + Txn: txn, + Type: MempoolEventAdd, + } + mp.persister.EnqueueEvent(event) + } +} + +func (mp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) error { + userPk := NewPublicKey(txn.Tx.PublicKey) + + // Special handling for atomic txns. For atomic txns, the mempool will ignore the nonce for the wrapper txn + // and only track nonces for the inner txns. Additionally, only the wrapper txn will be added to the transaction + // register. + if txn.Tx.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { + // If the txn is an atomic txn, we need to add each txn individually. + atomicTxnsWrapper, ok := txn.Tx.TxnMeta.(*AtomicTxnsWrapperMetadata) + if !ok { + return fmt.Errorf( + "PosMempool.AddTransaction: Problem adding atomic txn - casting metadata failed") + } + var innerMempoolTxs []*MempoolTx + for _, innerTxn := range atomicTxnsWrapper.Txns { + newInnerMempoolTx, err := NewMempoolTx(innerTxn, txn.Added, uint64(txn.Height)) + if err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem creating MempoolTx from inner atomic txn") + } + innerMempoolTxs = append(innerMempoolTxs, newInnerMempoolTx) + } + // We need to track the inners txns for which we've added nonces in the event that + // we need to remove them. We don't want to remove nonces for txns that were never added + // as it is possible the nonce tracker returned an error because the nonce is already used + // and removing it would effectively remove a different transaction from the nonce tracker. + var innerTxnsWithNoncesAdded []*MsgDeSoTxn + for _, innerMempoolTx := range innerMempoolTxs { + innerUserPk := NewPublicKey(innerMempoolTx.Tx.PublicKey) + if _, err := mp.checkNonceTracker(innerMempoolTx, innerUserPk); err != nil { + // if we hit an error, we need to remove all the nonces from the nonce tracker. + mp.removeNonces(innerTxnsWithNoncesAdded) + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem checking nonce tracker") + } + // At this point the transaction is in the mempool. We can now update the nonce tracker. + mp.nonceTracker.AddTxnByPublicKeyNonce(txn, *userPk, *txn.Tx.TxnNonce) + innerTxnsWithNoncesAdded = append(innerTxnsWithNoncesAdded, innerMempoolTx.Tx) + } + // Only add the wrapper transaction to the transaction register. + if err := mp.txnRegister.AddTransaction(txn); err != nil { + return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem adding txn to register") + } + // Emit a persist event only for the wrapper transaction. + mp.persistMempoolAddEvent(txn, persistToDb) + return nil + } + + // Get the existing txn and check that the incoming txn can replace it (if applicable). + existingTxn, err := mp.checkNonceTracker(txn, userPk) + if err != nil { + return errors.Wrapf(err, "PosMempool.AddTransaction: Problem checking nonce tracker") + } + // We can now add the transaction to the mempool. - if err := mp.txnRegister.AddTransaction(txn); err != nil { + if err = mp.txnRegister.AddTransaction(txn); err != nil { return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem adding txn to register") } // If we've determined that this transaction is meant to replace an existing one, we remove the existing transaction now. if existingTxn != nil { - if err := mp.removeTransactionNoLock(existingTxn, true); err != nil { + if err = mp.removeTransactionNoLock(existingTxn, true); err != nil { recoveryErr := mp.txnRegister.RemoveTransaction(txn) return errors.Wrapf(err, "PosMempool.AddTransaction: Problem removing old transaction from mempool during "+ "replacement with higher fee. Recovery error: %v", recoveryErr) @@ -624,13 +744,7 @@ func (mp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) err mp.nonceTracker.AddTxnByPublicKeyNonce(txn, *userPk, *txn.Tx.TxnNonce) // Emit an event for the newly added transaction. - if persistToDb && !mp.inMemoryOnly { - event := &MempoolEvent{ - Txn: txn, - Type: MempoolEventAdd, - } - mp.persister.EnqueueEvent(event) - } + mp.persistMempoolAddEvent(txn, persistToDb) return nil } @@ -683,8 +797,20 @@ func (mp *PosMempool) removeTransactionNoLock(txn *MempoolTx, persistToDb bool) return errors.Wrapf(err, "PosMempool.removeTransactionNoLock: Problem removing txn from register") } - // Remove the txn from the nonce tracker. - mp.nonceTracker.RemoveTxnByPublicKeyNonce(*userPk, *txn.Tx.TxnNonce) + if txn.Tx.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { + // For atomic transactions, we remove the nonces of the inner txns, but not the wrapper txn. + atomicTxnsWrapper, ok := txn.Tx.TxnMeta.(*AtomicTxnsWrapperMetadata) + if !ok { + return fmt.Errorf( + "PosMempool.RemoveTransaction: Problem removing atomic txn - casting metadata failed") + } + // Remove nonces for all inner txns. + mp.removeNonces(atomicTxnsWrapper.Txns) + } else { + // For non-atomic transactions, we just remove the nonce from the nonce tracker. + // Remove the transaction from the nonce tracker. + mp.nonceTracker.RemoveTxnByPublicKeyNonce(*userPk, *txn.Tx.TxnNonce) + } // Emit an event for the removed transaction. if persistToDb && !mp.inMemoryOnly { From 72a25233bf82ace90740c610432ef73349ff3e2f Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 9 Apr 2024 14:44:49 -0400 Subject: [PATCH 609/762] Recompute fees in CreateAtomicTxnWrapper (#1183) This PR has kinda increased in scope but I'm adding a description below that addresses all of the changes in this PR from feature/proof-of-stake. I'm also going to change the comparison branch to feature/proof-of-stake to make it a full review. * From @diamondhands: * Improve the error returned when we reject a txn from the mempool * This required adding a cache of the errors we produce when validating txns and using that to return the error upon request * Improve CreateAtomicTxnWrapper to add automatic fee computation * CreateAtomicTxnWrapper will now compute the exact fee required for each inner txn and update the fee on that txn if it's lower than what was computed * CreateAtomicTxnWrapper now computes the TOTAL fee required to submit the atomic txn, including the wrapper txn, and intelligently adds the extra required to the first inner txn in the list * Fix a bug in computeFeeRecursive to properly assess the nonce. Without this, we were over-estimating the fee significantly. * Fix a bug in computeFeeRecursive to properly assess signature sizes in atomic txns. Without this, we were significantly under-estimating txn size. * Introduce an UpdateTxnFee function that bundles in a special-case of updating the fee on a DAOCoinLimitOrder txn * Replaced all raw fee adjustments with a call to this function in core and backend * Mark DAOCoinLimitOrder.FeeNanos as deprecated. We don't need it after balance model, but if we don't set it to exactly the txn.TxnFeeNanos we get an error. * Add some error-checking in pos mempool that was missing * From @lazynina: * Proper sanity-checking of atomic txns * Add nonce handling for atomic txns * Although note there is still a TODO to add a synthetic nonce for atomic txns so that you can replace them by fee. This isn't time-sensitive so we're punting on it. Added a TODO in addTransactionNoLock for it. --- lib/block_view.go | 18 ++-- lib/block_view_atomic_txns_test.go | 16 +-- lib/blockchain.go | 167 ++++++++++++++++++++++------- lib/legacy_mempool.go | 6 +- lib/network.go | 18 ++-- lib/pos_fee_estimator.go | 42 ++++++-- lib/pos_mempool.go | 68 +++++++++--- lib/server.go | 7 +- 8 files changed, 249 insertions(+), 93 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 1ef84768d..be3d1a5e6 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4163,7 +4163,7 @@ func (bav *UtxoView) ConnectTransactionsFailSafe( txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, verifySignatures bool, ignoreUtxos bool, ignoreFailing bool) ( _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, - _fees []uint64, _successFlags []bool, _err error) { + _fees []uint64, _errorsFound []error, _err error) { return bav._connectTransactionsFailSafe(txns, txHashes, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos, ignoreFailing, 0) @@ -4177,7 +4177,7 @@ func (bav *UtxoView) ConnectTransactionsFailSafeWithLimit( txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, verifySignatures bool, ignoreUtxos bool, ignoreFailing bool, transactionConnectLimit uint64) ( _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, - _fees []uint64, _successFlags []bool, _err error) { + _fees []uint64, _errorsFound []error, _err error) { return bav._connectTransactionsFailSafe(txns, txHashes, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos, ignoreFailing, transactionConnectLimit) @@ -4187,21 +4187,21 @@ func (bav *UtxoView) _connectTransactionsFailSafe( txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, verifySignatures bool, ignoreUtxos bool, ignoreFailing bool, transactionConnectLimit uint64) ( _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, - _fees []uint64, _successFlags []bool, _err error) { + _fees []uint64, _errorsFound []error, _err error) { var combinedUtxoOps [][]*UtxoOperation var totalInputs []uint64 var totalOutputs []uint64 var fees []uint64 - var successFlags []bool + var errorsFound []error var totalConnectedTxns uint64 - updateValues := func(utxoOps []*UtxoOperation, totalInput uint64, totalOutput uint64, fee uint64, success bool) { + updateValues := func(utxoOps []*UtxoOperation, totalInput uint64, totalOutput uint64, fee uint64, errFound error) { combinedUtxoOps = append(combinedUtxoOps, utxoOps) totalInputs = append(totalInputs, totalInput) totalOutputs = append(totalOutputs, totalOutput) fees = append(fees, fee) - successFlags = append(successFlags, success) + errorsFound = append(errorsFound, errFound) } // Connect the transactions in the order they are given. @@ -4219,7 +4219,7 @@ func (bav *UtxoView) _connectTransactionsFailSafe( if err != nil && ignoreFailing { // If ignoreFailing was set, we mark the transaction as failing and continue. glog.V(2).Infof("_connectTransactionsFailSafe: Ignoring failing txn %d: %v", ii, err) - updateValues(nil, 0, 0, 0, false) + updateValues(nil, 0, 0, 0, err) continue } else if err != nil { return nil, nil, nil, nil, nil, @@ -4233,7 +4233,7 @@ func (bav *UtxoView) _connectTransactionsFailSafe( return nil, nil, nil, nil, nil, errors.Wrapf(err, "_connectTransactionsFailSafe: Problem connecting txn %d", ii) } - updateValues(utxoOpsForTxn, totalInput, totalOutput, fee, true) + updateValues(utxoOpsForTxn, totalInput, totalOutput, fee, nil) // If the transactionConnectLimit was set to 0, we will try to connect all the provided transactions. if transactionConnectLimit == 0 { @@ -4248,7 +4248,7 @@ func (bav *UtxoView) _connectTransactionsFailSafe( } } - return combinedUtxoOps, totalInputs, totalOutputs, fees, successFlags, nil + return combinedUtxoOps, totalInputs, totalOutputs, fees, errorsFound, nil } func (bav *UtxoView) ValidateTransactionNonce(txn *MsgDeSoTxn, blockHeight uint64) error { diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 517531f01..04fc8ddc5 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -27,7 +27,7 @@ func TestAtomicTxnsWrapperTxIndexMetadataEncoder(t *testing.T) { atomicTxns, signerPrivKeysBase58 := _generateUnsignedDependentAtomicTransactions(testMeta, int(100)) // Construct an atomic transaction. - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) require.NoError(t, err) // Sign the internal atomic transactions, making the atomic transaction valid. @@ -95,7 +95,7 @@ func TestAtomicTxnsWrapperAtomicity(t *testing.T) { // Wrap the transactions in an atomic wrapper. // NOTE: This must be done before signing to ensure the extra data is present. - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) require.NoError(t, err) // Sign all but the final transaction, sign the last one incorrectly. @@ -178,7 +178,7 @@ func TestAtomicTxnsSignatureFailure(t *testing.T) { nextIndex := (ii + 1) % len(atomicTxns) _signTxn(t, txn, signerPrivKeysBase58[nextIndex]) } - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) require.NoError(t, err) // Try to connect them atomically. @@ -200,7 +200,7 @@ func TestConnectAtomicTxnsWrapperRuleErrors(t *testing.T) { // (This should fail -- RuleErrorTxnTooBig) numTxnsToGenerate := testMeta.params.MaxBlockSizeBytesPoW / 200 atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, int(numTxnsToGenerate)) - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) require.NoError(t, err) _, err = _atomicTransactionsWrapperWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapper, 0) @@ -223,7 +223,7 @@ func TestConnectAtomicTxnsWrapperRuleErrors(t *testing.T) { // Try and cause overflow in the atomic transactions wrapper fee verification. // (This should fail -- RuleErrorOverflowDetectedInFeeRateCalculation) atomicTxns, _ = _generateSignedDependentAtomicTransactions(testMeta, int(100)) - atomicTxnsWrapper, _, err = testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + atomicTxnsWrapper, _, err = testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) require.NoError(t, err) atomicTxnsWrapper.TxnFeeNanos = math.MaxUint64 _, err = _atomicTransactionsWrapperWithConnectTimestamp( @@ -249,7 +249,7 @@ func TestVerifyAtomicTxnsWrapperRuleErrors(t *testing.T) { atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, 100) // Bundle the transactions together in a (valid) wrapper. - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) require.NoError(t, err) // Try to use a public key other than the zero public key in the wrapper. @@ -333,7 +333,7 @@ func TestVerifyAtomicTxnsChain(t *testing.T) { atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, 100) // Bundle the transactions together in a (valid) wrapper. - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) require.NoError(t, err) // Try to remove all the inner transactions. @@ -350,7 +350,7 @@ func TestVerifyAtomicTxnsChain(t *testing.T) { // (This should fail -- RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn) atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() require.NoError(t, err) - innerAtomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns[:100], nil) + innerAtomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns[:100], nil, testMeta.mempool) require.NoError(t, err) atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns = atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[100:] diff --git a/lib/blockchain.go b/lib/blockchain.go index 35fcd440b..b11e286f0 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1733,7 +1733,9 @@ func CheckTransactionSanity(txn *MsgDeSoTxn, blockHeight uint32, params *DeSoPar } // Make sure the transaction has a signature. - if txn.TxnMeta.GetTxnType() != TxnTypeBitcoinExchange && txn.Signature.Sign == nil { + if txn.TxnMeta.GetTxnType() != TxnTypeBitcoinExchange && + txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper && + txn.Signature.Sign == nil { return RuleErrorTransactionHasNoSignature } @@ -3857,6 +3859,8 @@ func (bc *Blockchain) CreateDAOCoinLimitOrderTxn( "CreateDAOCoinLimitOrderTxn: Problem adding inputs: ") } // Set fee to its actual value now that we've added inputs and outputs. + // Note: This should not be necessary after the PoS fork because EstimateFee correctly + // sets this. But we set it here anyway just to be safe. txn.TxnMeta.(*DAOCoinLimitOrderMetadata).FeeNanos = fees // We want our transaction to have at least one input, even if it all @@ -4898,7 +4902,7 @@ func (bc *Blockchain) CreateMaxSpend( } else { feeAmountNanos = _computeMaxTxV1Fee(txn, minFeeRateNanosPerKB) } - txn.TxnFeeNanos = feeAmountNanos + UpdateTxnFee(txn, feeAmountNanos) txn.TxOutputs[len(txn.TxOutputs)-1].AmountNanos = spendableBalance - feeAmountNanos } @@ -5021,7 +5025,7 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( } // Initialize to 0. - txArg.TxnFeeNanos = 0 + UpdateTxnFee(txArg, 0) if txArg.TxnMeta.GetTxnType() != TxnTypeBlockReward { if !isInterfaceValueNil(mempool) { @@ -5030,14 +5034,16 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() } // TODO: replace MaxBasisPoints with variables configured by flags. - txArg.TxnFeeNanos, err = mempool.EstimateFee(txArg, minFeeRateNanosPerKB, MaxBasisPoints, + newTxFee, err := mempool.EstimateFee(txArg, minFeeRateNanosPerKB, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, maxBlockSizeBytes) + UpdateTxnFee(txArg, newTxFee) if err != nil { return 0, 0, 0, 0, errors.Wrapf(err, "AddInputsAndChangeToTransaction: Problem estimating fee: ") } } else { - txArg.TxnFeeNanos = EstimateMaxTxnFeeV1(txArg, minFeeRateNanosPerKB) + newTxFee := EstimateMaxTxnFeeV1(txArg, minFeeRateNanosPerKB) + UpdateTxnFee(txArg, newTxFee) } } @@ -5705,6 +5711,7 @@ func (bc *Blockchain) CreateCoinUnlockTxn( func (bc *Blockchain) CreateAtomicTxnsWrapper( unsignedTransactions []*MsgDeSoTxn, extraData map[string][]byte, + mempool Mempool, ) ( _txn *MsgDeSoTxn, _fees uint64, @@ -5731,48 +5738,128 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( } chainedUnsignedTransactions[0].ExtraData[AtomicTxnsChainLength] = UintToBuf(uint64(len(unsignedTransactions))) - // Construct the chained transactions and keep track of the total fees paid. - var totalFees uint64 - for ii, txn := range chainedUnsignedTransactions { - // Compute the atomic hashes. - nextIndex := (ii + 1) % len(chainedUnsignedTransactions) - nextHash, err := chainedUnsignedTransactions[nextIndex].AtomicHash() - if err != nil { - return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute next hash") + utxoView, err := bc.GetUncommittedTipView() + if err != nil { + return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to get uncommitted tip view") + } + maxBlockSizeBytes := bc.params.MaxBlockSizeBytesPoW + if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { + maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() + } + // First iterate over the transactions, giving them a dummy value for the atomic hash and update the fee nanos. + // If the newly computed fee nanos is less than the original fee nanos, we do not update the fees. + dummyAtomicHashBytes := RandomBytes(32) + for _, txn := range chainedUnsignedTransactions { + if len(txn.ExtraData) == 0 { + txn.ExtraData = make(map[string][]byte) } - prevIndex := (ii - 1 + len(chainedUnsignedTransactions)) % len(chainedUnsignedTransactions) - prevHash, err := chainedUnsignedTransactions[prevIndex].AtomicHash() + txn.ExtraData[NextAtomicTxnPreHash] = dummyAtomicHashBytes + txn.ExtraData[PreviousAtomicTxnPreHash] = dummyAtomicHashBytes + newFeeEstimate, err := mempool.EstimateFee( + txn, 0, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, maxBlockSizeBytes) if err != nil { - return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to copy prev hash") + return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to recompute fee estimate") } + if txn.TxnFeeNanos < newFeeEstimate { + UpdateTxnFee(txn, newFeeEstimate) + } + } - // Set the transaction extra data and append to the chained list. - if len(txn.ExtraData) == 0 { - txn.ExtraData = make(map[string][]byte) + // We break the assembly of the atomic txn into a function so we can call it in a + // loop afterward. + assembleAtomicTxn := func() (*MsgDeSoTxn, error) { + // Construct the chained transactions and keep track of the total fees paid. + var totalFees uint64 + for ii, txn := range chainedUnsignedTransactions { + // Compute the atomic hashes. + nextIndex := (ii + 1) % len(chainedUnsignedTransactions) + nextHash, err := chainedUnsignedTransactions[nextIndex].AtomicHash() + if err != nil { + return nil, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute next hash") + } + prevIndex := (ii - 1 + len(chainedUnsignedTransactions)) % len(chainedUnsignedTransactions) + prevHash, err := chainedUnsignedTransactions[prevIndex].AtomicHash() + if err != nil { + return nil, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to copy prev hash") + } + + // Set the transaction extra data and append to the chained list. + if len(txn.ExtraData) == 0 { + txn.ExtraData = make(map[string][]byte) + } + txn.ExtraData[NextAtomicTxnPreHash] = nextHash.ToBytes() + txn.ExtraData[PreviousAtomicTxnPreHash] = prevHash.ToBytes() + + // Track the total fees paid. + newTotalFees, err := SafeUint64().Add(totalFees, txn.TxnFeeNanos) + if err != nil { + return nil, errors.Wrapf(err, "CreateAtomicTxnsWrapper: total fee "+ + "overflow: %v + %v", totalFees, txn.TxnFeeNanos) + } + totalFees = newTotalFees } - txn.ExtraData[NextAtomicTxnPreHash] = nextHash.ToBytes() - txn.ExtraData[PreviousAtomicTxnPreHash] = prevHash.ToBytes() - // Track the total fees paid. - totalFees, err = SafeUint64().Add(totalFees, txn.TxnFeeNanos) - if err != nil { - return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: total fee overflow") + // Create an atomic transactions wrapper taking special care to the rules specified in _verifyAtomicTxnsWrapper. + // Because we do not call AddInputsAndChangeToTransaction on the wrapper, we must specify ALL fields exactly. + txn := &MsgDeSoTxn{ + TxnVersion: 1, + TxInputs: nil, + TxOutputs: nil, + TxnNonce: &DeSoNonce{ExpirationBlockHeight: 0, PartialID: 0}, + TxnMeta: &AtomicTxnsWrapperMetadata{Txns: chainedUnsignedTransactions}, + PublicKey: ZeroPublicKey.ToBytes(), + ExtraData: extraData, + Signature: DeSoSignature{}, } + // Start by simply setting the fee on this txn to the sum of all the fees in our + // inner txns. + UpdateTxnFee(txn, totalFees) + + return txn, nil } - // Create an atomic transactions wrapper taking special care to the rules specified in _verifyAtomicTxnsWrapper. - // Because we do not call AddInputsAndChangeToTransaction on the wrapper, we must specify ALL fields exactly. - txn := &MsgDeSoTxn{ - TxnVersion: 1, - TxInputs: nil, - TxOutputs: nil, - TxnFeeNanos: totalFees, - TxnNonce: &DeSoNonce{ExpirationBlockHeight: 0, PartialID: 0}, - TxnMeta: &AtomicTxnsWrapperMetadata{Txns: chainedUnsignedTransactions}, - PublicKey: ZeroPublicKey.ToBytes(), - ExtraData: extraData, - Signature: DeSoSignature{}, - } - - return txn, totalFees, nil + // Below we assemble the atomic txn, update the fee, and repeat until the fee + // is consistent. There are multiple reasons why we have to do this: + // + // 1. In order to account for the wrapper txn, we need to call EstimateFee on the + // fully assembled txn and then add the fee delta to one of the inner txns so + // that it's covered. + // + // 2. When we update the fee on an inner txn, we also need to update the AtomicHash + // chain because changing the fee in a txn changes its AtomicHash. + // + // 3. Increasing the fee on the inner txn and the outer txn can result also in the txn + // becoming larger (never smaller) because we encode the fee as a varint. So we + // may need an extra iteration or two after we adjust the fee on the inner txn + // for the overall txn size to stabilize. + for { + atomicTxn, err := assembleAtomicTxn() + if err != nil { + return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to assemble atomic txn: ") + } + previousFeeEstimate := atomicTxn.TxnFeeNanos + + // Use EstimateFee to set the fee INCLUDING the wrapper. Note that this fee should generally be a bit + // higher than the totalFee computed above because the atomic wrapper adds overhead. + newFeeEstimate, err := mempool.EstimateFee( + atomicTxn, 0, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, maxBlockSizeBytes) + if err != nil { + return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute "+ + "fee on full txn") + } + // We know we're done when the fee computed by EstimateFee is <= the totalFee + // that we computed by summing all the fees on the inner txns, which is computed + // by previousFeeEstimate. + if newFeeEstimate <= previousFeeEstimate { + return atomicTxn, newFeeEstimate, nil + } + // If the fees we currently have set in all of our txns come up short, then + // add the extra we need to the first txn. After we do this, we also need to + // adjust the AtomicHash chain because changing the fee in a txn also changes + // its AtomicHash. To do this, we just loop over again. + feeDelta := newFeeEstimate - previousFeeEstimate + UpdateTxnFee( + chainedUnsignedTransactions[0], + chainedUnsignedTransactions[0].TxnFeeNanos+feeDelta) + } } diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 28e4b9777..df288f385 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2458,7 +2458,7 @@ func EstimateMaxTxnFeeV1(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) uint64 { for feeAmountNanos == 0 || feeAmountNanos != prevFeeAmountNanos { prevFeeAmountNanos = feeAmountNanos feeAmountNanos = _computeMaxTxV1Fee(txn, minFeeRateNanosPerKB) - txn.TxnFeeNanos = feeAmountNanos + UpdateTxnFee(txn, feeAmountNanos) } return feeAmountNanos } @@ -2651,9 +2651,9 @@ func (mp *DeSoMempool) BlockUntilReadOnlyViewRegenerated() { // WaitForTxnValidation is a blocking call that waits for a transaction to be validated. // The legacy mempool doesn't validate transactions, so this function always returns true // after BlockUntilReadOnlyViewRegenerated is called. -func (mp *DeSoMempool) WaitForTxnValidation(_ *BlockHash) bool { +func (mp *DeSoMempool) WaitForTxnValidation(_ *BlockHash) error { mp.BlockUntilReadOnlyViewRegenerated() - return true + return nil } func (mp *DeSoMempool) StartMempoolDBDumper() { diff --git a/lib/network.go b/lib/network.go index 4361163a0..f433ab0a0 100644 --- a/lib/network.go +++ b/lib/network.go @@ -4157,21 +4157,23 @@ func (txn *MsgDeSoTxn) ComputeFeeRatePerKBNanos() (uint64, error) { return 0, fmt.Errorf("ComputeFeeRatePerKBNanos: Cannot compute fee rate for unsigned txn") } + var err error txBytes, err := txn.ToBytes(false) if err != nil { return 0, errors.Wrapf(err, "ComputeFeeRatePerKBNanos: Problem converting txn to bytes") } + totalFees := txn.TxnFeeNanos + if totalFees != ((totalFees * 1000) / 1000) { + return 0, errors.Wrapf(RuleErrorOverflowDetectedInFeeRateCalculation, + "ComputeFeeRatePerKBNanos: Overflow detected in fee rate calculation") + } + serializedLen := uint64(len(txBytes)) if serializedLen == 0 { return 0, fmt.Errorf("ComputeFeeRatePerKBNanos: Txn has zero length") } - fees := txn.TxnFeeNanos - if fees != ((fees * 1000) / 1000) { - return 0, errors.Wrapf(RuleErrorOverflowDetectedInFeeRateCalculation, "ComputeFeeRatePerKBNanos: Overflow detected in fee rate calculation") - } - - return (fees * 1000) / serializedLen, nil + return (totalFees * 1000) / serializedLen, nil } // ================================================================== @@ -8222,6 +8224,10 @@ type DAOCoinLimitOrderMetadata struct { // utxo inputs that can be used to immediately execute this trade. BidderInputs []*DeSoInputsByTransactor + // DEPRECATED: This field was needed when we were on a UTXO model but + // it is redundant now that we have switched to a balance model because + // we embed the fee directly into the top level of the txn. + // // Since a DAO Coin Limit Order may spend DESO or yield DESO to the // transactor, we specify FeeNanos in the transaction metadata in // order to ensure the transactor pays the standard fee rate for the size diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 0d4ced366..2462428e3 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -421,15 +421,19 @@ func computeFeeGivenTxnAndFeeRate(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (ui PartialID: math.MaxUint64, } } - if txnClone.TxnNonce.ExpirationBlockHeight == 0 { - txnClone.TxnNonce.ExpirationBlockHeight = math.MaxUint64 - } - if txnClone.TxnNonce.PartialID == 0 { - txnClone.TxnNonce.PartialID = math.MaxUint64 + // Account for the possible size of the nonce. If we're dealing with an atomic txn, + // the nonce values are intentionally set to zero so this is a special case. + if txnClone.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { + if txnClone.TxnNonce.ExpirationBlockHeight == 0 { + txnClone.TxnNonce.ExpirationBlockHeight = math.MaxUint64 + } + if txnClone.TxnNonce.PartialID == 0 { + txnClone.TxnNonce.PartialID = math.MaxUint64 + } } // Set the TxnFeeNanos to the maximum value. - txnClone.TxnFeeNanos = math.MaxUint64 + UpdateTxnFee(txnClone, math.MaxUint64) txnFeeNanos, err := computeFeeRecursive(txnClone, feeRateNanosPerKB) if err != nil { return 0, errors.Wrap(err, "computeFeeGivenTxnAndFeeRate: Problem computing fee rate recursively") @@ -437,6 +441,16 @@ func computeFeeGivenTxnAndFeeRate(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (ui return txnFeeNanos, nil } +func UpdateTxnFee(txn *MsgDeSoTxn, newFee uint64) { + txn.TxnFeeNanos = newFee + // There is a special case if the txn is a DAOCoinLimitOrder whereby we need to set + // the fee in the TxnMeta as well. This field is deprecated and should be removed in + // the future but for now we set it to maximize the accuracy of the fee estimation. + if txn.TxnMeta.GetTxnType() == TxnTypeDAOCoinLimitOrder { + txn.TxnMeta.(*DAOCoinLimitOrderMetadata).FeeNanos = newFee + } +} + // computeFeeRecursive computes the fee in nanos for the provided transaction and fee rate // in nanos per KB. It does this by recursively computing the fee until the fee converges. // It should only be called from computeFeeGivenTxnAndFeeRate and assumes that the TxnFeeNanos @@ -459,8 +473,20 @@ func computeFeeRecursive(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (uint64, err if err != nil { return 0, errors.Wrap(err, "computeFeeRecursive: Problem serializing txn") } + // We need to add a buffer for the signature. The maximum DER signature length is 74 bytes. const MaxDERSigLen = 74 - txnBytesLen := uint64(len(txnBytesNoSignature)) + MaxDERSigLen + var txnBytesLen uint64 + if txn.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { + // If we're dealing with an atomic txn, then we need to add a buffer + // for a signature on *each* of the inner txns. Note we shouldn't ever see an overflow + // here because the size of the txn is capped. + atomicTxnMeta := txn.TxnMeta.(*AtomicTxnsWrapperMetadata) + txnBytesLen = uint64(len(txnBytesNoSignature)) + uint64(len(atomicTxnMeta.Txns))*MaxDERSigLen + } else { + // If we're here, we're dealing with a typical txn that only requires one signature's + // worth of buffer to be added. + txnBytesLen = uint64(len(txnBytesNoSignature)) + MaxDERSigLen + } // Compute the new txn fee. If the computed fee is a decimal, we round up to the // next integer value. We define the math as follows: @@ -469,7 +495,7 @@ func computeFeeRecursive(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (uint64, err // Ref: https://stackoverflow.com/questions/17944/how-to-round-up-the-result-of-integer-division txnFeeNanos := (txnBytesLen*feeRateNanosPerKB + BytesPerKB - 1) / BytesPerKB if txnFeeNanos < txn.TxnFeeNanos { - txn.TxnFeeNanos = txnFeeNanos + UpdateTxnFee(txn, txnFeeNanos) return computeFeeRecursive(txn, feeRateNanosPerKB) } return txnFeeNanos, nil diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index c4bff351b..4e00bf487 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -37,7 +37,7 @@ type Mempool interface { GetAugmentedUniversalView() (*UtxoView, error) GetAugmentedUtxoViewForPublicKey(pk []byte, optionalTx *MsgDeSoTxn) (*UtxoView, error) BlockUntilReadOnlyViewRegenerated() - WaitForTxnValidation(txHash *BlockHash) bool + WaitForTxnValidation(txHash *BlockHash) error CheckSpend(op UtxoKey) *MsgDeSoTxn GetOrderedTransactions() []*MempoolTx IsTransactionInPool(txHash *BlockHash) bool @@ -231,6 +231,10 @@ type PosMempool struct { // This cache is used to power logic that waits for a transaction to either be validated in the mempool // or be included in a block. recentBlockTxnCache lru.KVCache + + // recentRejectedTxnCache is a cache to store the txns that were recently rejected so that we can return better + // errors for them. + recentRejectedTxnCache lru.KVCache } // PosMempoolIterator is a wrapper around FeeTimeIterator, modified to return MsgDeSoTxn instead of MempoolTx. @@ -305,7 +309,8 @@ func (mp *PosMempool) Init( mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis mp.maxValidationViewConnects = maxValidationViewConnects mp.transactionValidationRefreshIntervalMillis = transactionValidationRefreshIntervalMillis - mp.recentBlockTxnCache = lru.NewKVCache(100000) // cache 100K latest txns from blocks. + mp.recentBlockTxnCache = lru.NewKVCache(100000) // cache 100K latest txns from blocks. + mp.recentRejectedTxnCache = lru.NewKVCache(100000) // cache 100K rejected txns. // TODO: parameterize num blocks. Also, how to pass in blocks. err = mp.feeEstimator.Init( @@ -445,10 +450,14 @@ func (mp *PosMempool) OnBlockConnected(block *MsgDeSoBlock) { mp.addTxnHashToRecentBlockCache(*txnHash) // Remove the transaction from the mempool. - mp.removeTransactionNoLock(existingTxn, true) + if err := mp.removeTransactionNoLock(existingTxn, true); err != nil { + glog.Errorf("PosMempool.OnBlockConnected: Problem removing transaction from mempool: %v", err) + } } - mp.refreshNoLock() + if err := mp.refreshNoLock(); err != nil { + glog.Errorf("PosMempool.OnBlockConnected: Problem refreshing mempool: %v", err) + } // Add the block to the fee estimator. This is a best effort operation. If we fail to add the block // to the fee estimator, we log an error and continue. @@ -519,6 +528,12 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { } // Acquire the mempool lock for all operations related to adding the transaction + // TODO: Do we need to wrap all of our validation logic in a write-lock? We should revisit + // this later and try to pull as much as we can out of the critical section here. The reason + // we added this lock is because checkTransactionSanity was calling ValidateTransactionNonce + // on the readOnly view, which was causing a modification of the view's PKID map at the same + // time as another thread was reading from it. This lock solves the issue but may not be the + // most optimal. mp.Lock() defer mp.Unlock() @@ -563,7 +578,7 @@ func (mp *PosMempool) isTxnHashInRecentBlockCache(txnHash BlockHash) bool { return mp.recentBlockTxnCache.Contains(txnHash) } -func (mp *PosMempool) checkTransactionSanity(txn *MsgDeSoTxn, isInnerAtomicTxn bool) error { +func (mp *PosMempool) checkTransactionSanity(txn *MsgDeSoTxn, expectInnerAtomicTxn bool) error { // If the txn is an atomic, we need to check the transaction sanity for each txn as well as verify the wrapper. if txn.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { // First verify the wrapper. @@ -598,12 +613,12 @@ func (mp *PosMempool) checkTransactionSanity(txn *MsgDeSoTxn, isInnerAtomicTxn b // If the txn is supposed to be an inner txn in an atomic wrapper, we need to make sure it is properly formed. // If the txn is NOT supposed to an inner txn in an atomic wrapper, we need to make sure it does not have // the extra data fields that are only allowed in atomic txns. - isAtomicTxn := txn.IsAtomicTxnsInnerTxn() - if isAtomicTxn != isInnerAtomicTxn { + isInnerAtomicTxn := txn.IsAtomicTxnsInnerTxn() + if isInnerAtomicTxn != expectInnerAtomicTxn { return fmt.Errorf( "PosMempool.AddTransaction: expected txn to be atomic: %v, got: %v", + expectInnerAtomicTxn, isInnerAtomicTxn, - isAtomicTxn, ) } @@ -680,6 +695,11 @@ func (mp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) err // Special handling for atomic txns. For atomic txns, the mempool will ignore the nonce for the wrapper txn // and only track nonces for the inner txns. Additionally, only the wrapper txn will be added to the transaction // register. + // + // TODO: We should allow replace-by-fee for atomic txns. To accomplish this, we can compute a "derived nonce" + // for the atomic txn that has {lowest block height, hash(inner txn partial ids)) as its nonce. This would + // allow one to replace an atomic txn with a new one paying a higher fee as long as they keep the nonces of + // the inner txns the same. if txn.Tx.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { // If the txn is an atomic txn, we need to add each txn individually. atomicTxnsWrapper, ok := txn.Tx.TxnMeta.(*AtomicTxnsWrapperMetadata) @@ -713,6 +733,9 @@ func (mp *PosMempool) addTransactionNoLock(txn *MempoolTx, persistToDb bool) err } // Only add the wrapper transaction to the transaction register. if err := mp.txnRegister.AddTransaction(txn); err != nil { + // If we failed to add the transaction to the txn register, we need to remove the inner txns' + // nonces from the nonce tracker. + mp.removeNonces(innerTxnsWithNoncesAdded) return errors.Wrapf(err, "PosMempool.addTransactionNoLock: Problem adding txn to register") } // Emit a persist event only for the wrapper transaction. @@ -900,6 +923,7 @@ func (mp *PosMempool) validateTransactions() error { mp.RLock() // We copy the reference to the readOnlyLatestBlockView. Since the utxoView is immutable, we don't need to copy the // entire view while we hold the lock. + // We hold a read-lock on the mempool to get the transactions and the latest block view. validationView := mp.readOnlyLatestBlockView mempoolTxns := mp.getTransactionsNoLock() mp.RUnlock() @@ -923,7 +947,8 @@ func (mp *PosMempool) validateTransactions() error { } // Connect the transactions to the validation view. We use the latest block height + 1 as the block height to connect // the transactions. This is because the mempool contains transactions that we use for producing the next block. - _, _, _, _, successFlags, err := copyValidationView.ConnectTransactionsFailSafeWithLimit(txns, txHashes, uint32(mp.latestBlockHeight)+1, + _, _, _, _, errorsFound, err := copyValidationView.ConnectTransactionsFailSafeWithLimit( + txns, txHashes, uint32(mp.latestBlockHeight)+1, time.Now().UnixNano(), true, false, true, mp.maxValidationViewConnects) if err != nil { return errors.Wrapf(err, "PosMempool.validateTransactions: Problem connecting transactions") @@ -931,7 +956,7 @@ func (mp *PosMempool) validateTransactions() error { // We iterate through the successFlags and update the validated status of the transactions in the mempool. var txnsToRemove []*MempoolTx - for ii, successFlag := range successFlags { + for ii, errFound := range errorsFound { if ii >= len(mempoolTxns) { break } @@ -939,10 +964,13 @@ func (mp *PosMempool) validateTransactions() error { // transaction in the mempool. If the transaction failed to connect to the validation view, we add it to the // txnsToRemove list. Note that we don't need to hold a lock while updating the validated status of the // transactions in the mempool, since the updateTransactionValidatedStatus already holds the lock. - if successFlag { + if errFound == nil { mp.updateTransactionValidatedStatus(mempoolTxns[ii].Hash, true) } else { txnsToRemove = append(txnsToRemove, mempoolTxns[ii]) + // Add an error for the txn to our cache so we can return it to the user if they + // ask for it later. + mp.recentRejectedTxnCache.Add(*mempoolTxns[ii].Hash, errFound) } } @@ -1123,19 +1151,27 @@ func (mp *PosMempool) BlockUntilReadOnlyViewRegenerated() { // WaitForTxnValidation blocks until the transaction with the given hash is either validated in the mempool, // in a recent block, or no longer in the mempool. -func (mp *PosMempool) WaitForTxnValidation(txHash *BlockHash) bool { +func (mp *PosMempool) WaitForTxnValidation(txHash *BlockHash) error { // Check fairly often. Not too often. checkIntervalMillis := mp.transactionValidationRefreshIntervalMillis / 5 if checkIntervalMillis == 0 { checkIntervalMillis = 1 } for { - mtxn := mp.GetTransaction(txHash) - if mtxn.IsValidated() { - return true + rejectionErr, wasRejected := mp.recentRejectedTxnCache.Lookup(*txHash) + if wasRejected { + return rejectionErr.(error) } + mtxn := mp.GetTransaction(txHash) if mtxn == nil { - return mp.isTxnHashInRecentBlockCache(*txHash) + if mp.isTxnHashInRecentBlockCache(*txHash) { + return nil + } else { + return fmt.Errorf("Txn was never received or it was " + + "rejected for an unknown reason") + } + } else if mtxn.IsValidated() { + return nil } // Sleep for a bit and then check again. time.Sleep(time.Duration(checkIntervalMillis) * time.Millisecond) diff --git a/lib/server.go b/lib/server.go index d354abca6..3cf0f4ce9 100644 --- a/lib/server.go +++ b/lib/server.go @@ -274,9 +274,10 @@ func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MsgDeSoTxn, error) // At this point, we know the transaction has been run through the mempool. // Now wait for an update of the ReadOnlyUtxoView so we don't break anything. - isValidated := srv.GetMempool().WaitForTxnValidation(txnHash) - if !isValidated { - return nil, fmt.Errorf("BroadcastTransaction: Transaction %v was not validated", txnHash) + validationErr := srv.GetMempool().WaitForTxnValidation(txnHash) + if validationErr != nil { + return nil, fmt.Errorf("BroadcastTransaction: Transaction %v "+ + "was not validated due to error: %v", txnHash, validationErr) } return mempoolTxs, nil From 0d1c21b7f3c197ee799591225d4a089bf014b374 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 9 Apr 2024 16:14:17 -0400 Subject: [PATCH 610/762] Make sure global params doesn't overflow on MaximumVestedIntersectionsPerLockupTransaction (#1187) --- lib/block_view.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/block_view.go b/lib/block_view.go index be3d1a5e6..a71a4153a 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3357,6 +3357,12 @@ func (bav *UtxoView) _connectUpdateGlobalParams( "_connectUpdateGlobalParams: " + "unable to decode MaximumVestedIntersectionsPerLockupTransaction as uint64") } + if maximumVestedIntersectionsPerLockupTransaction > math.MaxInt { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: MaximumVestedIntersectionsPerLockupTransaction must be <= %d", + math.MaxInt, + ) + } newGlobalParamsEntry.MaximumVestedIntersectionsPerLockupTransaction = int(maximumVestedIntersectionsPerLockupTransaction) } From 6b6bf518f414ddd15a2bfbcda5409fbc217f89f3 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 9 Apr 2024 16:28:36 -0400 Subject: [PATCH 611/762] Remove Duplicate Group Key Check For Validators (#1191) --- lib/network_manager.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/network_manager.go b/lib/network_manager.go index 754ac5c8f..29b34631c 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -424,13 +424,6 @@ func (nm *NetworkManager) processOutboundConnection(conn Connection) (*RemoteNod nm.AddrMgr.Good(oc.address) } - // If this is a non-persistent outbound peer and the group key overlaps with another peer we're already connected to then - // abort mission. We only connect to one peer per IP group in order to prevent Sybil attacks. - if !oc.isPersistent && nm.cmgr.IsFromRedundantOutboundIPAddress(oc.address) { - return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Rejecting OUTBOUND NON-PERSISTENT "+ - "connection with redundant group key (%s).", addrmgr.GroupKey(oc.address)) - } - na, err := nm.ConvertIPStringToNetAddress(oc.connection.RemoteAddr().String()) if err != nil { return nil, errors.Wrapf(err, "NetworkManager.handleOutboundConnection: Problem calling ipToNetAddr "+ From 30c9acf225387ef45be412a114486bdc41b3db52 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 9 Apr 2024 17:01:44 -0400 Subject: [PATCH 612/762] Checkpoint syncing providers system (#1185) * Checkpoint syncing providers system * Apply suggestions from code review Co-authored-by: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> --------- Co-authored-by: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> --- cmd/config.go | 27 +++++++++ cmd/node.go | 1 + cmd/run.go | 8 +++ lib/blockchain.go | 121 +++++++++++++++++++++++++++++++++++++++++ lib/blockchain_test.go | 4 +- lib/constants.go | 5 ++ lib/server.go | 111 +++++++++++++++++++++++++++++++++---- lib/txindex.go | 2 +- 8 files changed, 266 insertions(+), 13 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 74ad8ddd6..50caaead0 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -1,6 +1,7 @@ package cmd import ( + "net/url" "os" "path/filepath" @@ -81,6 +82,9 @@ type Config struct { // State Syncer StateChangeDir string StateSyncerMempoolTxnSyncLimit uint64 + + // PoS Checkpoint Syncing + CheckpointSyncingProviders []string } func LoadConfig() *Config { @@ -177,6 +181,29 @@ func LoadConfig() *Config { config.StateChangeDir = viper.GetString("state-change-dir") config.StateSyncerMempoolTxnSyncLimit = viper.GetUint64("state-syncer-mempool-txn-sync-limit") + // PoS Checkpoint Syncing + config.CheckpointSyncingProviders = viper.GetStringSlice("checkpoint-syncing-providers") + for _, provider := range config.CheckpointSyncingProviders { + if _, err := url.ParseRequestURI(provider); err != nil { + glog.Fatalf("Invalid checkpoint syncing provider URL: %v", provider) + } + // TODO: do we want to make a request to the checkpoint syncing provider to ensure it's valid? + } + // TODO: add default provider here based on network. However, if someone wants to sync w/o checkpoint + // syncing, they should be able to do so. How do we support this? another flag I guess. + if len(config.CheckpointSyncingProviders) == 0 && !config.Regtest { + if testnet { + config.CheckpointSyncingProviders = []string{lib.DefaultTestnetCheckpointProvider} + } else { + config.CheckpointSyncingProviders = []string{lib.DefaultMainnetCheckpointProvider} + } + } + + if len(config.CheckpointSyncingProviders) == 0 && config.Regtest { + glog.Warningln("No checkpoint syncing providers specified. Syncing will require verification of signatures" + + " on all blocks, which may be slow. Consider specifying a checkpoint syncing provider.") + } + return &config } diff --git a/cmd/node.go b/cmd/node.go index e9d7f0fd8..b7f251e27 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -297,6 +297,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.MempoolMaxValidationViewConnects, node.Config.TransactionValidationRefreshIntervalMillis, node.Config.StateSyncerMempoolTxnSyncLimit, + node.Config.CheckpointSyncingProviders, ) if err != nil { // shouldRestart can be true if, on the previous run, we did not finish flushing all ancestral diff --git a/cmd/run.go b/cmd/run.go index 7b739d42b..30eab52cf 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -1,6 +1,7 @@ package cmd import ( + "fmt" "github.com/deso-protocol/core/lib" "github.com/golang/glog" "github.com/spf13/cobra" @@ -226,6 +227,13 @@ func SetupRunFlags(cmd *cobra.Command) { "from an empty string to a non-empty string (or from a non-empty string to the empty string) requires a resync.") cmd.PersistentFlags().Uint("state-syncer-mempool-txn-sync-limit", 10000, "The maximum number of transactions to "+ "process in the mempool tx state syncer at a time.") + + // PoS Checkpoint Syncing + // TODO: a default value is a little tricky here since we don't know what network we're using. + cmd.PersistentFlags().StringSlice("checkpoint-syncing-providers", []string{}, fmt.Sprintf("A comma-separated list of URLs that "+ + "supports the committed tip block info endpoint to be used for checkpoint syncing. "+ + "If unset, the field will default to %v on mainnet and %v on testnet", + lib.DefaultMainnetCheckpointProvider, lib.DefaultTestnetCheckpointProvider)) cmd.PersistentFlags().VisitAll(func(flag *pflag.Flag) { viper.BindPFlag(flag.Name, flag) }) diff --git a/lib/blockchain.go b/lib/blockchain.go index b11e286f0..1be87d904 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -4,13 +4,16 @@ import ( "bytes" "container/list" "encoding/hex" + "encoding/json" "fmt" "math" "math/big" + "net/http" "reflect" "runtime/debug" "sort" "strings" + "sync" "time" "github.com/decred/dcrd/lru" @@ -454,6 +457,24 @@ type OrphanBlock struct { Hash *BlockHash } +type CheckpointBlockInfo struct { + Height uint64 + Hash *BlockHash + HashHex string +} + +func (checkpointBlockInfo *CheckpointBlockInfo) String() string { + if checkpointBlockInfo == nil { + return "" + } + return fmt.Sprintf("< Height: %d, Hash: %v >", checkpointBlockInfo.Height, checkpointBlockInfo.HashHex) +} + +type CheckpointBlockInfoAndError struct { + CheckpointBlockInfo *CheckpointBlockInfo + Error error +} + type Blockchain struct { db *badger.DB postgres *Postgres @@ -517,9 +538,103 @@ type Blockchain struct { syncingState bool downloadingHistoricalBlocks bool + // checkpointSyncingProviders is a list of providers from which we will request the committed tip block info + // when syncing. The committed tip block info is used to designate a checkpoint before which signature + // verification will be skipped. These checkpoint providers should be trusted and should be able to provide + // the committed tip block info for the chain we are syncing. + checkpointSyncingProviders []string + // checkpointBlockInfo is the latest checkpoint block info that we have received from the checkpoint syncing + // providers. + checkpointBlockInfo *CheckpointBlockInfo + // + checkpointBlockInfoLock sync.RWMutex + timer *Timer } +func (bc *Blockchain) updateCheckpointBlockInfo() { + if len(bc.checkpointSyncingProviders) == 0 { + glog.V(2).Info("updateCheckpointBlockInfo: No checkpoint syncing providers set. Skipping update.") + return + } + ch := make(chan *CheckpointBlockInfoAndError, len(bc.checkpointSyncingProviders)) + for _, provider := range bc.checkpointSyncingProviders { + go getCheckpointBlockInfoFromProvider(provider, ch) + } + + // Collect the results from the channel + checkpointBlockInfos := make([]*CheckpointBlockInfoAndError, len(bc.checkpointSyncingProviders)) + for ii := range bc.checkpointSyncingProviders { + checkpointBlockInfos[ii] = <-ch + } + + // Find the checkpoint block info with the highest height. + var highestHeightCheckpointBlockInfo *CheckpointBlockInfo + for _, checkpointBlockInfo := range checkpointBlockInfos { + if checkpointBlockInfo.Error != nil { + glog.Errorf("updateCheckpointBlockInfo: Error getting checkpoint block info: %v", checkpointBlockInfo.Error) + continue + } + if highestHeightCheckpointBlockInfo == nil || + checkpointBlockInfo.CheckpointBlockInfo.Height > highestHeightCheckpointBlockInfo.Height { + highestHeightCheckpointBlockInfo = checkpointBlockInfo.CheckpointBlockInfo + } + } + if highestHeightCheckpointBlockInfo == nil { + glog.Errorf("updateCheckpointBlockInfo: No valid checkpoint block info found.") + return + } + glog.V(2).Infof("updateCheckpointBlockInfo: Setting checkpoint block info to: %v", highestHeightCheckpointBlockInfo) + bc.checkpointBlockInfoLock.Lock() + bc.checkpointBlockInfo = highestHeightCheckpointBlockInfo + bc.checkpointBlockInfoLock.Unlock() +} + +func (bc *Blockchain) GetCheckpointBlockInfo() *CheckpointBlockInfo { + bc.checkpointBlockInfoLock.RLock() + defer bc.checkpointBlockInfoLock.RUnlock() + return bc.checkpointBlockInfo +} + +func getCheckpointBlockInfoFromProvider(provider string, ch chan<- *CheckpointBlockInfoAndError) { + ch <- getCheckpointBlockInfoFromProviderHelper(provider) +} + +func getCheckpointBlockInfoFromProviderHelper(provider string) *CheckpointBlockInfoAndError { + url := fmt.Sprintf("%s%s", provider, RoutePathGetCommittedTipBlockInfo) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return &CheckpointBlockInfoAndError{ + Error: errors.Wrapf(err, "getCheckpointBlockInfoFromProvider: Problem creating HTTP request"), + } + } + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return &CheckpointBlockInfoAndError{ + Error: errors.Wrapf(err, "getCheckpointBlockInfoFromProvider: Problem sending HTTP request"), + } + } + if resp.StatusCode != 200 { + return &CheckpointBlockInfoAndError{ + Error: fmt.Errorf( + "getCheckpointBlockInfoFromProvider: Problem getting checkpoint block info from provider: %s", + provider, + ), + } + } + defer resp.Body.Close() + responseData := &CheckpointBlockInfo{} + if err = json.NewDecoder(resp.Body).Decode(responseData); err != nil { + return &CheckpointBlockInfoAndError{ + Error: errors.Wrapf(err, "getCheckpointBlockInfoFromProvider: Problem decoding response data"), + } + } + return &CheckpointBlockInfoAndError{ + CheckpointBlockInfo: responseData, + } +} + func (bc *Blockchain) addNewBlockNodeToBlockIndex(blockNode *BlockNode) { bc.blockIndexByHash[*blockNode.Hash] = blockNode if _, exists := bc.blockIndexByHeight[uint64(blockNode.Height)]; !exists { @@ -777,6 +892,7 @@ func NewBlockchain( eventManager *EventManager, snapshot *Snapshot, archivalMode bool, + checkpointSyncingProviders []string, ) (*Blockchain, error) { trustedBlockProducerPublicKeys := make(map[PkMapKey]bool) @@ -812,6 +928,8 @@ func NewBlockchain( blockViewCache: lru.NewKVCache(100), // TODO: parameterize snapshotCache: NewSnapshotCache(), + checkpointSyncingProviders: checkpointSyncingProviders, + orphanList: list.New(), timer: timer, } @@ -832,6 +950,9 @@ func NewBlockchain( return nil, errors.Wrapf(err, "NewBlockchain: ") } + // always update the checkpoint block info when creating a new blockchain + bc.updateCheckpointBlockInfo() + return bc, nil } diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index f81988e0b..aefb50117 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -152,7 +152,7 @@ func NewTestBlockchain(t *testing.T) (*Blockchain, *DeSoParams, *badger.DB) { paramsCopy := DeSoTestnetParams chain, err := NewBlockchain([]string{blockSignerPk}, 0, 0, ¶msCopy, - timesource, db, nil, nil, nil, false) + timesource, db, nil, nil, nil, false, nil) if err != nil { log.Fatal(err) } @@ -254,7 +254,7 @@ func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, } } chain, err := NewBlockchain([]string{blockSignerPk}, 0, 0, - &testParams, timesource, db, postgresDb, NewEventManager(), snap, false) + &testParams, timesource, db, postgresDb, NewEventManager(), snap, false, nil) if err != nil { log.Fatal(err) } diff --git a/lib/constants.go b/lib/constants.go index 7b0888f1d..d3f7d2667 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1842,3 +1842,8 @@ const MaxBasisPoints = uint64(10000) // 1e4 const NanoSecsPerYear = uint64(365) * 24 * 60 * 60 * 1e9 // 365 days * 24 hours * 60 minutes * 60 seconds * 1e9 nanoseconds const BytesPerKB = 1000 + +const DefaultMainnetCheckpointProvider = "https://node.deso.org" +const DefaultTestnetCheckpointProvider = "https://test.deso.org" + +const RoutePathGetCommittedTipBlockInfo = "/api/v0/get-committed-tip-block-info" diff --git a/lib/server.go b/lib/server.go index 3cf0f4ce9..8a823f007 100644 --- a/lib/server.go +++ b/lib/server.go @@ -421,6 +421,7 @@ func NewServer( _mempoolMaxValidationViewConnects uint64, _transactionValidationRefreshIntervalMillis uint64, _stateSyncerMempoolTxnSyncLimit uint64, + _checkpointSyncingProviders []string, ) ( _srv *Server, _err error, @@ -506,7 +507,7 @@ func NewServer( _chain, err := NewBlockchain( _trustedBlockProducerPublicKeys, _trustedBlockProducerStartHeight, _maxSyncBlockHeight, - _params, timesource, _db, postgres, eventManager, _snapshot, archivalMode) + _params, timesource, _db, postgres, eventManager, _snapshot, archivalMode, _checkpointSyncingProviders) if err != nil { return nil, errors.Wrapf(err, "NewServer: Problem initializing blockchain"), true } @@ -931,15 +932,85 @@ func (srv *Server) GetBlocks(pp *Peer, maxHeight int) { pp) } +// shouldVerifySignatures determines if we should verify signatures for headers or not. +// For PoW headers, this always returns true because there are no signatures to verify and there is +// no impact on syncing. +// For PoW blocks, we verify signatures if we're not syncing. +// For PoS headers and blocks, we check if we've seen the checkpoint block. +// If the checkpoint block info is nil, we return true so that we verify signatures. +// If we haven't seen the checkpoint block yet, we skip signature verification. +// If the header height does not match the checkpoint block height, we should disconnect the peer. +// Otherwise, return true. +func (srv *Server) shouldVerifySignatures(header *MsgDeSoHeader, isHeaderChain bool) (_verifySignatures bool, _shouldDisconnect bool) { + // For PoW headers, there is no signature to verify in the header, so we return true + // just to be safe, but it has no impact on the syncing. + // For PoW blocks, we verify signatures if we're not syncing. + if srv.params.IsPoWBlockHeight(header.Height) { + if !isHeaderChain { + return !srv.blockchain.isSyncing(), false + } + return true, false + } + // For PoS blocks, we check if we've seen the checkpoint block. + // If we don't have a check point block info, we return true so that we verify signatures. + checkpointBlockInfo := srv.blockchain.GetCheckpointBlockInfo() + if checkpointBlockInfo == nil { + return true, false + } + var hasSeenCheckpointBlockHash bool + var checkpointBlockNode *BlockNode + if isHeaderChain { + checkpointBlockNode, hasSeenCheckpointBlockHash = srv.blockchain.bestHeaderChainMap[*checkpointBlockInfo.Hash] + } else { + checkpointBlockNode, hasSeenCheckpointBlockHash = srv.blockchain.bestChainMap[*checkpointBlockInfo.Hash] + } + // If we haven't seen the checkpoint block hash yet, we skip signature verification. + if !hasSeenCheckpointBlockHash { + // If we're past the checkpoint height and we haven't seen the checkpoint block, we should + // disconnect from the peer. + if header.Height > checkpointBlockInfo.Height { + return true, true + } + return false, false + } + // If the current header has a height below the checkpoint block height, we should skip signature verification + // even if we've seen the checkpoint block hash. + if header.Height < checkpointBlockInfo.Height { + return false, false + } + // Make sure that the header in the best chain map has the correct height, otherwise we need to disconnect this peer. + if uint64(checkpointBlockNode.Height) != checkpointBlockInfo.Height { + return true, true + } + return true, false +} + +func (srv *Server) getCheckpointSyncingStatus(isHeaders bool) string { + checkpointBlockInfo := srv.blockchain.GetCheckpointBlockInfo() + if checkpointBlockInfo == nil { + return "" + } + hasSeenCheckPointBlockHash := false + if isHeaders { + _, hasSeenCheckPointBlockHash = srv.blockchain.bestHeaderChainMap[*checkpointBlockInfo.Hash] + } else { + _, hasSeenCheckPointBlockHash = srv.blockchain.bestChainMap[*checkpointBlockInfo.Hash] + } + if !hasSeenCheckPointBlockHash { + return fmt.Sprintf("", checkpointBlockInfo.String()) + } + return fmt.Sprintf("", checkpointBlockInfo.String()) +} + func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { printHeight := pp.StartingBlockHeight() if uint64(srv.blockchain.headerTip().Height) > printHeight { printHeight = uint64(srv.blockchain.headerTip().Height) } glog.Infof(CLog(Yellow, fmt.Sprintf("Received header bundle with %v headers "+ - "in state %s from peer %v. Downloaded ( %v / %v ) total headers.", + "in state %s from peer %v. Downloaded ( %v / %v ) total headers. Checkpoint syncing status: %v", len(msg.Headers), srv.blockchain.chainState(), pp, - srv.blockchain.headerTip().Header.Height, printHeight))) + srv.blockchain.headerTip().Header.Height, printHeight, srv.getCheckpointSyncingStatus(true)))) // If we get here, it means that the node is not currently running a Fast-HotStuff // validator or that the node is syncing. In either case, we sync headers according @@ -948,7 +1019,6 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // Start by processing all the headers given to us. They should start // right after the tip of our header chain ideally. While going through them // tally up the number that we actually process. - numNewHeaders := 0 for ii, headerReceived := range msg.Headers { // If we've set a maximum height for node sync and we've reached it, // then we will not process any more headers. @@ -986,12 +1056,20 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { } // If we get here then we have a header we haven't seen before. - // TODO: Delete? This is redundant. - numNewHeaders++ + // check if we need to verify signatures + verifySignatures, shouldDisconnect := srv.shouldVerifySignatures(headerReceived, true) + if shouldDisconnect { + glog.Errorf("Server._handleHeaderBundle: Disconnecting peer %v in state %s because a mismatch was "+ + "found between the received header height %v does not match the checkpoint block info %v", + pp, srv.blockchain.chainState(), headerReceived.Height, + srv.blockchain.GetCheckpointBlockInfo().String()) + pp.Disconnect() + return + } // Process the header, as we haven't seen it before, set verifySignatures to false // if we're in the process of syncing. - _, isOrphan, err := srv.blockchain.ProcessHeader(headerReceived, headerHash, !srv.blockchain.isSyncing()) + _, isOrphan, err := srv.blockchain.ProcessHeader(headerReceived, headerHash, verifySignatures) numLogHeaders := 2000 if ii%numLogHeaders == 0 { @@ -1163,6 +1241,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { "height %d out of %d from peer %v", blockTip.Header.Height+1, msg.TipHeight, pp) maxHeight := -1 + srv.blockchain.updateCheckpointBlockInfo() srv.GetBlocks(pp, maxHeight) return } @@ -2181,6 +2260,17 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { srv.timer.End("Server._handleBlock: General") srv.timer.Start("Server._handleBlock: Process Block") + // check if we should verify signatures or not. + verifySignatures, shouldDisconnect := srv.shouldVerifySignatures(blk.Header, false) + if shouldDisconnect { + glog.Errorf("Server._handleHeaderBundle: Disconnecting peer %v in state %s because a mismatch was "+ + "found between the received header height %v does not match the checkpoint block info %v", + pp, srv.blockchain.chainState(), blk.Header.Height, + srv.blockchain.GetCheckpointBlockInfo().Hash.String()) + pp.Disconnect() + return + } + // Only verify signatures for recent blocks. var isOrphan bool if srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() { @@ -2188,7 +2278,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { // which will validate the block, try to apply it, and handle the orphan case by requesting missing // parents. isOrphan, err = srv.fastHotStuffConsensus.HandleBlock(pp, blk) - } else if srv.blockchain.isSyncing() { + } else if !verifySignatures { glog.V(1).Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Processing block %v WITHOUT "+ "signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp))) @@ -2325,9 +2415,10 @@ func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { pp.Disconnect() return } - glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlockBundle: Received blocks ( %v->%v / %v ) from Peer %v", + glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlockBundle: Received blocks ( %v->%v / %v ) from Peer %v. "+ + "Checkpoint syncing status: %v", bundle.Blocks[0].Header.Height, bundle.Blocks[len(bundle.Blocks)-1].Header.Height, - srv.blockchain.headerTip().Height, pp))) + srv.blockchain.headerTip().Height, pp, srv.getCheckpointSyncingStatus(false)))) srv.timer.Start("Server._handleBlockBundle: General") diff --git a/lib/txindex.go b/lib/txindex.go index 276869d13..9009e67dd 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -130,7 +130,7 @@ func NewTXIndex(coreChain *Blockchain, params *DeSoParams, dataDirectory string) // Note that we *DONT* pass server here because it is already tied to the main blockchain. txIndexChain, err := NewBlockchain( []string{}, 0, coreChain.MaxSyncBlockHeight, params, chainlib.NewMedianTime(), - txIndexDb, nil, nil, nil, false) + txIndexDb, nil, nil, nil, false, nil) if err != nil { return nil, fmt.Errorf("NewTXIndex: Error initializing TxIndex: %v", err) } From e181432170d4599e4fad1a490c06a8355919c394 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 9 Apr 2024 17:53:36 -0400 Subject: [PATCH 613/762] Log Validator Indices and Active Validators (#1192) * Log Validator Indices and Active Validators * Add peer id --- lib/network_manager.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/lib/network_manager.go b/lib/network_manager.go index 29b34631c..1ce11bbd4 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -193,6 +193,7 @@ func (nm *NetworkManager) startValidatorConnector() { nm.exitGroup.Done() return case <-time.After(nm.params.NetworkManagerRefreshDuration): + nm.logValidatorIndices() nm.refreshValidatorIndices() nm.connectValidators() } @@ -537,6 +538,32 @@ func (nm *NetworkManager) isActiveValidator(pk bls.SerializedPublicKey) bool { return ok } +func (nm *NetworkManager) logValidatorIndices() { + // Log the outbound validators + validatorOutboundMap := nm.GetValidatorOutboundIndex().ToMap() + outboundValidatorString := "" + for pk, rn := range validatorOutboundMap { + outboundValidatorString += fmt.Sprintf(" Id (%v) PublicKey (%v) Domain (%v) Status (%v)\n", rn.id, pk, rn.peer.addrStr, rn.connectionStatus) + } + glog.V(2).Infof("NetworkManager.logValidatorIndices: Outbound Validators:\n%v", outboundValidatorString) + + // Log the inbound validators + validatorInboundMap := nm.GetValidatorInboundIndex().ToMap() + inboundValidatorString := "" + for pk, rn := range validatorInboundMap { + inboundValidatorString += fmt.Sprintf(" Id (%v) PublicKey (%v) Domain (%v) Status (%v)\n", rn.id, pk, rn.peer.addrStr, rn.connectionStatus) + } + glog.V(2).Infof("NetworkManager.logValidatorIndices: Inbound Validators:\n%v", inboundValidatorString) + + // Log the active validators + activeValidatorsMap := nm.getActiveValidatorsMap().ToMap() + activeValidatorsString := "" + for pk, validator := range activeValidatorsMap { + activeValidatorsString += fmt.Sprintf(" PublicKey (%v) Domain(%v)\n", pk, string(validator.GetDomains()[0])) + } + glog.V(2).Infof("NetworkManager.logValidatorIndices: Active Validators:\n%v", activeValidatorsString) +} + // refreshValidatorIndices re-indexes validators based on the activeValidatorsMap. It is called periodically by the // validator connector. func (nm *NetworkManager) refreshValidatorIndices() { From a84e138a2fee1ccc77711711f13ed560c96ac924 Mon Sep 17 00:00:00 2001 From: iamsofonias Date: Tue, 9 Apr 2024 18:33:23 -0400 Subject: [PATCH 614/762] Add nil Check For NetworkManager Validator Address Logging --- lib/network_manager.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/network_manager.go b/lib/network_manager.go index 1ce11bbd4..10f340868 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -543,7 +543,11 @@ func (nm *NetworkManager) logValidatorIndices() { validatorOutboundMap := nm.GetValidatorOutboundIndex().ToMap() outboundValidatorString := "" for pk, rn := range validatorOutboundMap { - outboundValidatorString += fmt.Sprintf(" Id (%v) PublicKey (%v) Domain (%v) Status (%v)\n", rn.id, pk, rn.peer.addrStr, rn.connectionStatus) + addr := "" + if rn.peer != nil { + addr = rn.peer.addrStr + } + outboundValidatorString += fmt.Sprintf(" Id (%v) PublicKey (%v) Domain (%v) Status (%v)\n", rn.id, pk, addr, rn.connectionStatus) } glog.V(2).Infof("NetworkManager.logValidatorIndices: Outbound Validators:\n%v", outboundValidatorString) @@ -551,7 +555,11 @@ func (nm *NetworkManager) logValidatorIndices() { validatorInboundMap := nm.GetValidatorInboundIndex().ToMap() inboundValidatorString := "" for pk, rn := range validatorInboundMap { - inboundValidatorString += fmt.Sprintf(" Id (%v) PublicKey (%v) Domain (%v) Status (%v)\n", rn.id, pk, rn.peer.addrStr, rn.connectionStatus) + addr := "" + if rn.peer != nil { + addr = rn.peer.addrStr + } + inboundValidatorString += fmt.Sprintf(" Id (%v) PublicKey (%v) Domain (%v) Status (%v)\n", rn.id, pk, addr, rn.connectionStatus) } glog.V(2).Infof("NetworkManager.logValidatorIndices: Inbound Validators:\n%v", inboundValidatorString) From 7198939ab385aa133b6dab85c358dd80be83edc2 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 11 Apr 2024 11:23:09 -0400 Subject: [PATCH 615/762] Add InitialLeaderIndexOffset to EpochEntry (#1199) * Add InitialLeaderIndexOffset to EpochEntry * Fix typo --- lib/pos_epoch.go | 17 +++++++++++++++++ lib/pos_epoch_test.go | 4 ++++ 2 files changed, 21 insertions(+) diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index 12fa1be6d..ef4e3c78d 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -18,6 +18,14 @@ type EpochEntry struct { InitialView uint64 FinalBlockHeight uint64 + // This captures an offset used to index into the leader schedule at the start of the + // epoch. + // - This value is set to 0 if there were no timeouts at the epoch transition during + // the final two block heights of the previous epoch + // - This value is non-zero if there was at least one timeout at the epoch transition + // during the final two block heights of the previous epoch + InitialLeaderIndexOffset uint64 + // This captures the on-chain timestamp when this epoch entry was created. This does not // represent the timestamp for first block of the epoch, but rather when this epoch entry // was created during that epoch transition at the end of the previous epoch. @@ -30,6 +38,7 @@ func (epochEntry *EpochEntry) Copy() *EpochEntry { InitialBlockHeight: epochEntry.InitialBlockHeight, InitialView: epochEntry.InitialView, FinalBlockHeight: epochEntry.FinalBlockHeight, + InitialLeaderIndexOffset: epochEntry.InitialLeaderIndexOffset, CreatedAtBlockTimestampNanoSecs: epochEntry.CreatedAtBlockTimestampNanoSecs, } } @@ -40,6 +49,7 @@ func (epochEntry *EpochEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipM data = append(data, UintToBuf(epochEntry.InitialBlockHeight)...) data = append(data, UintToBuf(epochEntry.InitialView)...) data = append(data, UintToBuf(epochEntry.FinalBlockHeight)...) + data = append(data, UintToBuf(epochEntry.InitialLeaderIndexOffset)...) data = append(data, IntToBuf(epochEntry.CreatedAtBlockTimestampNanoSecs)...) return data } @@ -71,6 +81,12 @@ func (epochEntry *EpochEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *b return errors.Wrapf(err, "EpochEntry.Decode: Problem reading FinalBlockHeight: ") } + // InitialLeaderIndexOffset + epochEntry.InitialLeaderIndexOffset, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "EpochEntry.Decode: Problem reading InitialLeaderIndexOffset: ") + } + // CreatedAtBlockTimestampNanoSecs epochEntry.CreatedAtBlockTimestampNanoSecs, err = ReadVarint(rr) if err != nil { @@ -124,6 +140,7 @@ func (bav *UtxoView) GetCurrentEpochEntry() (*EpochEntry, error) { InitialBlockHeight: 0, InitialView: 0, FinalBlockHeight: uint64(bav.Params.ForkHeights.ProofOfStake1StateSetupBlockHeight), + InitialLeaderIndexOffset: 0, CreatedAtBlockTimestampNanoSecs: 0, } return genesisEpochEntry, nil diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index 5fb9cd3f9..a8e6afc8a 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -38,6 +38,7 @@ func TestCurrentEpoch(t *testing.T) { InitialBlockHeight: blockHeight + 1, InitialView: 1, FinalBlockHeight: blockHeight + 5, + InitialLeaderIndexOffset: 2, CreatedAtBlockTimestampNanoSecs: blockTimestampNanoSecs + 5*1e9, } utxoView._setCurrentEpochEntry(epochEntry) @@ -51,6 +52,7 @@ func TestCurrentEpoch(t *testing.T) { require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) + require.Equal(t, epochEntry.InitialLeaderIndexOffset, uint64(2)) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) // Test that the CurrentEpoch is flushed from the UtxoView. @@ -64,6 +66,7 @@ func TestCurrentEpoch(t *testing.T) { require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) + require.Equal(t, epochEntry.InitialLeaderIndexOffset, uint64(2)) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) // Test that the CurrentEpoch is set in the UtxoView. @@ -73,6 +76,7 @@ func TestCurrentEpoch(t *testing.T) { require.Equal(t, epochEntry.InitialBlockHeight, blockHeight+1) require.Equal(t, epochEntry.InitialView, uint64(1)) require.Equal(t, epochEntry.FinalBlockHeight, blockHeight+5) + require.Equal(t, epochEntry.InitialLeaderIndexOffset, uint64(2)) require.Equal(t, epochEntry.CreatedAtBlockTimestampNanoSecs, blockTimestampNanoSecs+5*1e9) // Delete CurrentEpoch from the UtxoView. From 1d8a8ce8397549c04bcbdacdf10c3e08653b6531 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 11 Apr 2024 12:04:34 -0400 Subject: [PATCH 616/762] Add Uint64 to Uint64 Pseudo-Random Function (#1201) * Add Uint64 to Uint64 Pseudo-Random Function * Use EncodeUint64 --- lib/utils.go | 25 ++++++++++++++++++++----- lib/utils_test.go | 17 ++++++++++++++++- 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/lib/utils.go b/lib/utils.go index 1ac4b88f1..aed694865 100644 --- a/lib/utils.go +++ b/lib/utils.go @@ -5,16 +5,18 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcutil/hdkeychain" - "github.com/pkg/errors" - "github.com/unrolled/secure" "math/big" "os" "reflect" "sort" "strings" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcutil/hdkeychain" + "github.com/pkg/errors" + "github.com/unrolled/secure" + "golang.org/x/crypto/sha3" ) const SECURE_MIDDLEWARE_RESTRICTIVE_CONTENT_SECURITY_POLICY = "default-src 'self'" @@ -324,3 +326,16 @@ func ReadBoolFromFile(filename string) (bool, error) { return false, nil // Return false if there's no content to read } + +// hashUint64ToUint64 hashes a uint64 to a uint64 using SHA3-256. It's a useful pseudorandom +// function that can be used to deterministically map a uint64 to another uint64. +func hashUint64ToUint64(value uint64) uint64 { + // Convert the input value to binary using big-endian encoding. + binaryValue := EncodeUint64(value) + + // Hash the binary value using SHA3-256. + hash := sha3.Sum256(binaryValue) + + // Convert the lowest eight bytes of the hash to a uint64. + return DecodeUint64(hash[:]) +} diff --git a/lib/utils_test.go b/lib/utils_test.go index cceae7ae6..1a38509ba 100644 --- a/lib/utils_test.go +++ b/lib/utils_test.go @@ -1,9 +1,10 @@ package lib import ( - "github.com/stretchr/testify/require" "math" "testing" + + "github.com/stretchr/testify/require" ) func TestSafeMakeSliceWithLength(t *testing.T) { @@ -36,3 +37,17 @@ func TestSafeMakeMapWithCapacity(t *testing.T) { require.Nil(t, err) require.NotNil(t, goodMap) } + +func TestHashUint64ToUint64(t *testing.T) { + require.Equal(t, uint64(0x48dda5bbe9171a66), hashUint64ToUint64(0)) + require.Equal(t, uint64(0x6c70d57af53dbf4d), hashUint64ToUint64(1)) + require.Equal(t, uint64(0xf90387edb7755d08), hashUint64ToUint64(2)) + require.Equal(t, uint64(0x6168e462f883acea), hashUint64ToUint64(3)) + require.Equal(t, uint64(0xeb667cfa9fe822a), hashUint64ToUint64(4)) + require.Equal(t, uint64(0x14534c98fc4f74a5), hashUint64ToUint64(5)) + require.Equal(t, uint64(0x821888e817f3332d), hashUint64ToUint64(6)) + require.Equal(t, uint64(0x660352baa787b9a3), hashUint64ToUint64(7)) + require.Equal(t, uint64(0xe16e2c6a637a34d7), hashUint64ToUint64(8)) + require.Equal(t, uint64(0x883c56385fb82c1d), hashUint64ToUint64(9)) + require.Equal(t, uint64(0xdb0c9e58da328e78), hashUint64ToUint64(10)) +} From 6295d4c03941a337316c63bde7f1fc7ced0e3042 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 11 Apr 2024 12:46:42 -0400 Subject: [PATCH 617/762] Pass Through PreviousView To EndOfEpochHook (#1200) * Pass Through PreviousView To EndOfEpochHook * Fix segfault --- lib/block_view.go | 12 +++++- lib/pos_blockchain_test.go | 3 +- lib/pos_epoch_complete_hook.go | 7 +++- lib/pos_epoch_complete_hook_test.go | 58 +++++++++++++++++++---------- 4 files changed, 57 insertions(+), 23 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index a71a4153a..3acbd79a1 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4574,8 +4574,18 @@ func (bav *UtxoView) ConnectBlock( return nil, errors.Wrapf(err, "ConnectBlock: error checking if block is last in epoch") } if isLastBlockInEpoch { + // By default, assume that the previous block has a consecutive view with the current + // block. This will always be true for PoW block. + previousBlockViewNumber := blockHeader.GetView() - 1 + + // If the current block is PoS block, then we can extract the previous block's view + // from the QC. + if bav.Params.IsPoSBlockHeight(blockHeight) { + previousBlockViewNumber = blockHeader.GetQC().GetView() + } + var utxoOperations []*UtxoOperation - utxoOperations, err = bav.RunEpochCompleteHook(blockHeight, blockHeader.GetView(), blockHeader.TstampNanoSecs) + utxoOperations, err = bav.RunEpochCompleteHook(blockHeight, blockHeader.GetView(), previousBlockViewNumber, blockHeader.TstampNanoSecs) if err != nil { return nil, errors.Wrapf(err, "ConnectBlock: error running epoch complete hook") } diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 1d0831635..4c81e1933 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -620,7 +620,8 @@ func TestHasValidBlockProposerPoS(t *testing.T) { require.NoError(t, err) // Run the epoch complete hook - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) // Get leader schedule from DB leaderSchedule, err := DBSeekSnapshotLeaderSchedule(testMeta.db, currentEpochNumber) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index cbce462af..61c661513 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -50,7 +50,12 @@ func (bav *UtxoView) IsLastBlockInCurrentEpoch(blockHeight uint64) (bool, error) // - Compute the start block height and view number for the next epoch. // - Compute the final block height for the next epoch. // - Update CurrentEpochEntry to the next epoch's. -func (bav *UtxoView) RunEpochCompleteHook(blockHeight uint64, view uint64, blockTimestampNanoSecs int64) ([]*UtxoOperation, error) { +func (bav *UtxoView) RunEpochCompleteHook( + blockHeight uint64, // the final block height of the epoch + view uint64, // the view from the final block of the epoch + previousView uint64, // the view from the penultimate block of the epoch + blockTimestampNanoSecs int64, // the timestamp of the final block of the epoch +) ([]*UtxoOperation, error) { // Sanity-check that the current block is the last block in the current epoch. // // Note that this will also return true if we're currently at the ProofOfStake1StateSetupBlockHeight diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 8008d8b90..0d235f532 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -152,7 +152,8 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook() with no validators or stakers. - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) } { // Test the state of the snapshots after running our first OnEpochCompleteHook @@ -201,7 +202,8 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) } { // Test CurrentEpochNumber. @@ -229,7 +231,8 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) } { // Test CurrentEpochNumber. @@ -313,7 +316,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, validatorEntry.TotalStakeAmountNanos.Uint64(), uint64(800)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) // Snapshot m5 still has 600 staked. validatorEntry, err = _newUtxoView(testMeta).GetCurrentSnapshotValidatorSetEntryByPKID(m5PKID) @@ -329,7 +333,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(600)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) // Snapshot m5 now has 800 staked. validatorEntry, err = _newUtxoView(testMeta).GetCurrentSnapshotValidatorSetEntryByPKID(m5PKID) @@ -363,7 +368,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, _newUtxoView(testMeta).GlobalParamsEntry.StakeLockupEpochDuration, uint64(2)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) // Snapshot StakeLockupEpochDuration is still 3. snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() @@ -371,7 +377,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, snapshotGlobalsParamsEntry.StakeLockupEpochDuration, uint64(3)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) // Snapshot StakeLockupEpochDuration is updated to 2. snapshotGlobalsParamsEntry, err = _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() @@ -390,7 +397,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) // m0 is still in the snapshot validator set. snapshotValidatorSet, err = _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) @@ -402,7 +410,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Len(t, snapshotStakeEntries, 7) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) // m0 is dropped from the snapshot validator set. snapshotValidatorSet, err = _newUtxoView(testMeta).GetSnapshotValidatorSetByStakeAmount(10) @@ -415,11 +424,13 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) } { // Run OnEpochCompleteHook() - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) } { // Test jailing inactive validators. @@ -478,7 +489,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getNumSnapshotStakes(), 6) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) // In epoch 12, all current registered validators have Status = Jailed. // In snapshot 10, all snapshot validators have Status = Active. @@ -492,7 +504,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, getCurrentValidator(m6PKID).JailedAtEpochNumber, uint64(11)) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) // In epoch 13, all current registered validators have Status = Jailed. // In snapshot 11, the validator set is empty because all validators have Status = Jailed. @@ -503,7 +516,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Empty(t, getNumSnapshotStakes()) // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) // In epoch 14, all current registered validators have Status = Jailed. // In snapshot 12, the validator set is empty because all validators have Status = Jailed. @@ -609,12 +623,14 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) } { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) } { @@ -662,7 +678,8 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) } { @@ -785,7 +802,8 @@ func TestStakingRewardDistribution(t *testing.T) { { // Run OnEpochCompleteHook(). - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), incrViewNumber()) + incrViewNumber() + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) } { @@ -901,12 +919,12 @@ func _newUtxoView(testMeta *TestMeta) *UtxoView { return newUtxoView } -func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64, viewNumber uint64) { +func _runOnEpochCompleteHook(testMeta *TestMeta, blockHeight uint64, viewNumber uint64, previousViewNumber uint64) { tmpUtxoView := _newUtxoView(testMeta) // Set blockTimestampNanoSecs to 1 year * block height. Every time the block height increments, // the timestamp increases by 1 year blockTimestampNanoSecs := int64(blockHeight) * 365 * 24 * 3600 * 1e9 - _, err := tmpUtxoView.RunEpochCompleteHook(blockHeight, viewNumber, blockTimestampNanoSecs) + _, err := tmpUtxoView.RunEpochCompleteHook(blockHeight, viewNumber, previousViewNumber, blockTimestampNanoSecs) require.NoError(testMeta.t, err) require.NoError(testMeta.t, tmpUtxoView.FlushToDb(blockHeight)) } From 0f48f58896e750870608c587bbe650886c5216e1 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 11 Apr 2024 14:27:16 -0400 Subject: [PATCH 618/762] Compute InitialLeaderIndexOffset On Epoch Transition (#1202) --- lib/pos_blockchain.go | 3 +- lib/pos_blockchain_test.go | 8 ++++- lib/pos_epoch_complete_hook.go | 53 +++++++++++++++++++++++------ lib/pos_epoch_complete_hook_test.go | 18 ++++++++-- 4 files changed, 67 insertions(+), 15 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index a4842ce3e..17509b500 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -435,8 +435,7 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // the next epoch entry and check if it is in that epoch. If it's in a future epoch, we just throw it away. // We supply 0 for the view and 0 for the block timestamp as we don't know what those values should be, and // we will ignore these values. - epochEntry, err = utxoView.computeNextEpochEntry(epochEntry.EpochNumber, - epochEntry.FinalBlockHeight, 0, 0) + epochEntry, err = utxoView.simulateNextEpochEntry(epochEntry.EpochNumber, epochEntry.FinalBlockHeight) if err != nil { return errors.Wrap(err, "processOrphanBlockPoS: Problem computing next epoch entry") } diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 4c81e1933..836731b88 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2208,7 +2208,13 @@ func TestProcessOrphanBlockPoS(t *testing.T) { utxoView := _newUtxoView(testMeta) currentEpochEntry, err := utxoView.GetCurrentEpochEntry() require.NoError(t, err) - nextEpochEntry, err := utxoView.computeNextEpochEntry(currentEpochEntry.EpochNumber, currentEpochEntry.FinalBlockHeight, currentEpochEntry.FinalBlockHeight, 1) + nextEpochEntry, err := utxoView.computeNextEpochEntry( + currentEpochEntry.EpochNumber, + currentEpochEntry.FinalBlockHeight, + currentEpochEntry.FinalBlockHeight, + currentEpochEntry.FinalBlockHeight-1, + 1, + ) require.NoError(t, err) var twoEpochsInFutureBlock *MsgDeSoBlock twoEpochsInFutureBlock = _generateRealBlock(testMeta, nextEpochEntry.FinalBlockHeight, nextEpochEntry.FinalBlockHeight, 17283, testMeta.chain.BlockTip().Hash, false) diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 61c661513..8e60ddab3 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -91,7 +91,7 @@ func (bav *UtxoView) RunEpochCompleteHook( // TODO: Evict old snapshots when safe to do so. // Step 3: Roll Over to The Next Epoch - if err = bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, view, blockTimestampNanoSecs); err != nil { + if err = bav.runEpochCompleteEpochRollover(currentEpochEntry.EpochNumber, blockHeight, view, previousView, blockTimestampNanoSecs); err != nil { return nil, errors.Wrapf(err, "RunEpochCompleteHook: ") } @@ -204,15 +204,16 @@ func (bav *UtxoView) SimulateAdjacentEpochEntryForBlockHeight(blockHeight uint64 // We use this function to simulate the next epoch's entry so we can predict the leader schedule and validator set // for the next epoch before the current epoch is over. This is useful for validating orphan blocks. func (bav *UtxoView) simulateNextEpochEntry(currentEpochNumber uint64, currentEpochFinalBlockHeight uint64) (*EpochEntry, error) { - return bav.computeNextEpochEntry( - currentEpochNumber, - currentEpochFinalBlockHeight, - 0, - 0, - ) + return bav.computeNextEpochEntry(currentEpochNumber, currentEpochFinalBlockHeight, 0, 0, 0) } -func (bav *UtxoView) computeNextEpochEntry(currentEpochNumber uint64, currentEpochFinalBlockHeight uint64, currentEpochFinalView uint64, nextEpochBlockTimestampNanoSecs int64) (*EpochEntry, error) { +func (bav *UtxoView) computeNextEpochEntry( + currentEpochNumber uint64, + currentEpochFinalBlockHeight uint64, + currentEpochFinalView uint64, + currentEpochPenultimateView uint64, + nextEpochBlockTimestampNanoSecs int64, +) (*EpochEntry, error) { // Retrieve the SnapshotGlobalParamsEntry to determine the next epoch's final block height. We use the // snapshot global params here because the next epoch begin immediately, and its length is used in the PoS // consensus. The validator set for the next epoch needs to be in agreement on the length of the epoch @@ -232,12 +233,37 @@ func (bav *UtxoView) computeNextEpochEntry(currentEpochNumber uint64, currentEpo return nil, errors.Wrapf(err, "computeNextEpochEntry: problem calculating NextEpochFinalBlockHeight: ") } + // Calculate InitialLeaderIndexOffset + initialLeaderIndexOffset := uint64(0) + if currentEpochPenultimateView+1 < currentEpochFinalView { + // The initialLeaderIndexOffset is set to non-zero only if there was a timeout between the final two + // blocks of the current epoch. This can be determined by checking if the final two views of the + // current epoch are not consecutive. + + // Fetch the next snapshot epoch number so we can fetch the snapshotted leader schedule for the next epoch + nextSnapshotEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(currentEpochNumber + 1) + if err != nil { + return nil, errors.Wrapf(err, "computeNextEpochEntry: problem computing snapshot epoch number: ") + } + + // Fetch the snapshotted leader schedule for the next epoch. + nextSnapshotLeaderSchedule, err := bav.GetSnapshotLeaderScheduleAtEpochNumber(nextSnapshotEpochNumber) + if err != nil { + return nil, errors.Wrapf(err, "computeNextEpochEntry: problem retrieving next snapshot leader schedule: ") + } + + // The initialLeaderIndexOffset is set to a pseudo-random value used to index into the leader schedule. It's + // simply computed as a hash of the current epoch's final view and the next epoch's leader schedule size. + initialLeaderIndexOffset = hashUint64ToUint64(currentEpochFinalView) % uint64(len(nextSnapshotLeaderSchedule)) + } + // Roll-over a new epoch by setting a new CurrentEpochEntry. nextEpochEntry := &EpochEntry{ EpochNumber: currentEpochNumber + 1, InitialBlockHeight: currentEpochFinalBlockHeight + 1, InitialView: currentEpochFinalView + 1, FinalBlockHeight: nextEpochFinalBlockHeight, + InitialLeaderIndexOffset: initialLeaderIndexOffset, CreatedAtBlockTimestampNanoSecs: nextEpochBlockTimestampNanoSecs, } return nextEpochEntry, nil @@ -269,14 +295,21 @@ func (bav *UtxoView) simulatePrevEpochEntry(currentEpochNumber uint64, currentEp InitialBlockHeight: prevEpochInitialBlockHeight, InitialView: 0, FinalBlockHeight: currentEpochInitialBlockHeight - 1, + InitialLeaderIndexOffset: 0, CreatedAtBlockTimestampNanoSecs: 0, } return prevEpochEntry, nil } // Updates the currentEpochEntry to the next epoch's. -func (bav *UtxoView) runEpochCompleteEpochRollover(epochNumber uint64, blockHeight uint64, view uint64, blockTimestampNanoSecs int64) error { - nextEpochEntry, err := bav.computeNextEpochEntry(epochNumber, blockHeight, view, blockTimestampNanoSecs) +func (bav *UtxoView) runEpochCompleteEpochRollover( + epochNumber uint64, + blockHeight uint64, + view uint64, + previousView uint64, + blockTimestampNanoSecs int64, +) error { + nextEpochEntry, err := bav.computeNextEpochEntry(epochNumber, blockHeight, view, previousView, blockTimestampNanoSecs) if err != nil { return errors.Wrap(err, "runEpochCompleteEpochRollover: ") } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 0d235f532..37a3dce5d 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -217,6 +217,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NotNil(t, currentEpoch) require.Equal(t, currentEpoch.InitialBlockHeight, uint64(14)) require.Equal(t, currentEpoch.InitialView, uint64(3)) + require.Equal(t, currentEpoch.FinalBlockHeight, uint64(14)) + require.Equal(t, currentEpoch.InitialLeaderIndexOffset, uint64(0)) // Test SnapshotGlobalParamsEntry is populated. snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() @@ -231,8 +233,12 @@ func TestRunEpochCompleteHook(t *testing.T) { } { // Test RunOnEpochCompleteHook(). + + // Increment the view number twice to simulate one timeout incrViewNumber() - _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-1) + incrViewNumber() + + _runOnEpochCompleteHook(testMeta, incrBlockHeight(), viewNumber, viewNumber-2) } { // Test CurrentEpochNumber. @@ -245,7 +251,15 @@ func TestRunEpochCompleteHook(t *testing.T) { require.NoError(t, err) require.NotNil(t, currentEpoch) require.Equal(t, currentEpoch.InitialBlockHeight, uint64(15)) - require.Equal(t, currentEpoch.InitialView, uint64(4)) + require.Equal(t, currentEpoch.InitialView, uint64(5)) + require.Equal(t, currentEpoch.FinalBlockHeight, uint64(15)) + // InitialLeaderIndexOffset is non-zero because the final two views of the previous epoch were not consecutive. + // + // InitialLeaderIndexOffset = hashUint64ToUint64(currentEpochFinalView) % uint64(len(nextSnapshotLeaderSchedule)) + // = hashUint64ToUint64(4) % 7 + // = 0xeb667cfa9fe822a % 7 + // = 5 + require.Equal(t, currentEpoch.InitialLeaderIndexOffset, uint64(5)) // Test SnapshotGlobalParamsEntry is populated. snapshotGlobalParamsEntry, err := _newUtxoView(testMeta).GetCurrentSnapshotGlobalParamsEntry() From 6a366a9e6be82001348c619a691057b87afc7424 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 11 Apr 2024 14:29:37 -0400 Subject: [PATCH 619/762] Deprecate Previous Epoch Timeouts Count from Leader Index Computation (#1203) --- lib/pos_blockchain.go | 75 ++++++-------------------------------- lib/pos_blockchain_test.go | 20 ++++------ 2 files changed, 20 insertions(+), 75 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 17509b500..bb110ec67 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -614,7 +614,7 @@ func (bc *Blockchain) validateLeaderAndQC( } } - isBlockProposerValid, err := bc.hasValidBlockProposerPoS(block, parentUtxoView) + isBlockProposerValid, err := parentUtxoView.hasValidBlockProposerPoS(block) if err != nil { return false, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem validating block proposer") @@ -1133,12 +1133,12 @@ func (bav *UtxoView) hasValidProposerPartialSignaturePoS(block *MsgDeSoBlock, sn // block height + view number pair. It returns a bool indicating whether // we confirmed that the leader is valid. If we receive an error, we are unsure // if the leader is invalid or not, so we return false. -func (bc *Blockchain) hasValidBlockProposerPoS(block *MsgDeSoBlock, parentUtxoView *UtxoView) (_isValidBlockProposer bool, _err error) { - currentEpochEntry, err := parentUtxoView.GetCurrentEpochEntry() +func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBlockProposer bool, _err error) { + currentEpochEntry, err := bav.GetCurrentEpochEntry() if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting current epoch entry") } - leaders, err := parentUtxoView.GetCurrentSnapshotLeaderSchedule() + leaders, err := bav.GetCurrentSnapshotLeaderSchedule() if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting leader schedule") } @@ -1157,22 +1157,12 @@ func (bc *Blockchain) hasValidBlockProposerPoS(block *MsgDeSoBlock, parentUtxoVi return false, nil } - // Fetch the number timeouts that took place at the final block height of the previous epoch. We need to - // compute this number because a timeout at the start of the current epoch would regress the chain to - // the previous epoch, which would count the timeout as part of the previous epoch. - numTimeoutsBeforeEpochTransition, err := bc.getNumTimeoutsBeforeEpochTransition(block, currentEpochEntry) - if err != nil { - return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting num timeouts before epoch transition") - } - // We compute the current index in the leader schedule as follows: // - [(block.View - currentEpoch.InitialView) - (block.Height - currentEpoch.InitialHeight) + numTimeoutsBeforeEpochTransition] % len(leaders) // - The number of views that have elapsed since the start of the epoch is block.View - currentEpoch.InitialView. // - The number of blocks that have been added to the chain since the start of the epoch is // block.Height - currentEpoch.InitialHeight. // - The difference between the above two numbers is the number of timeouts that have occurred in this epoch. - // - The numTimeoutsBeforeEpochTransition is the number of timeouts that have occurred during the epoch transition - // and are counted as part of the previous epoch. // // For each timeout, we skip one leader in the in the schedule. If we have more timeouts than leaders in // the schedule, we start from the top of the schedule again, which is why we take the modulo of the length @@ -1182,26 +1172,25 @@ func (bc *Blockchain) hasValidBlockProposerPoS(block *MsgDeSoBlock, parentUtxoVi // - Say we have 3 leaders in the schedule // - The epoch started at height 10 and view 11 // - The current block is at height 15 and view 17 - // - There were 6 timeouts at the epoch transition - // - Then the number of timeouts that have occurred is (17 - 11) - (15 - 10) + 6 = 7. - // - The leader index is 7 % 3 = 1. + // - Then the number of timeouts that have occurred is (17 - 11) - (15 - 10) = 1. + // - The leader index is 1 % 3 = 1. // - This means this block should be proposed by the 2nd leader in the schedule, which is at index 1. - leaderIdxUint64 := (viewDiff + numTimeoutsBeforeEpochTransition - heightDiff) % uint64(len(leaders)) + leaderIdxUint64 := (viewDiff - heightDiff) % uint64(len(leaders)) if leaderIdxUint64 > math.MaxUint16 { return false, nil } leaderIdx := uint16(leaderIdxUint64) - leaderEntry, err := parentUtxoView.GetSnapshotLeaderScheduleValidator(leaderIdx) + leaderEntry, err := bav.GetSnapshotLeaderScheduleValidator(leaderIdx) if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting leader schedule validator") } - snapshotAtEpochNumber, err := parentUtxoView.ComputeSnapshotEpochNumberForEpoch(currentEpochEntry.EpochNumber) + snapshotAtEpochNumber, err := bav.ComputeSnapshotEpochNumberForEpoch(currentEpochEntry.EpochNumber) if err != nil { return false, errors.Wrapf(err, "hasValidBlockProposerPoS: Problem getting snapshot epoch number for epoch #%d", currentEpochEntry.EpochNumber) } - leaderEntryFromVotingPublicKey, err := parentUtxoView.GetSnapshotValidatorEntryByBLSPublicKey( + leaderEntryFromVotingPublicKey, err := bav.GetSnapshotValidatorEntryByBLSPublicKey( block.Header.ProposerVotingPublicKey, snapshotAtEpochNumber) if err != nil { @@ -1227,9 +1216,9 @@ func (bc *Blockchain) hasValidBlockProposerPoS(block *MsgDeSoBlock, parentUtxoVi currentEpochEntry.InitialBlockHeight, leaderIdx, len(leaders), - PkToString(leaderEntry.ValidatorPKID.ToBytes(), bc.params), + PkToString(leaderEntry.ValidatorPKID.ToBytes(), bav.Params), leaderEntry.VotingPublicKey.ToAbbreviatedString(), - PkToString(leaderEntryFromVotingPublicKey.ValidatorPKID.ToBytes(), bc.params), + PkToString(leaderEntryFromVotingPublicKey.ValidatorPKID.ToBytes(), bav.Params), leaderEntryFromVotingPublicKey.VotingPublicKey.ToAbbreviatedString(), block.Header.ProposerVotingPublicKey.ToAbbreviatedString(), ) @@ -1241,46 +1230,6 @@ func (bc *Blockchain) hasValidBlockProposerPoS(block *MsgDeSoBlock, parentUtxoVi return true, nil } -func (bc *Blockchain) getNumTimeoutsBeforeEpochTransition(block *MsgDeSoBlock, epochEntry *EpochEntry) (uint64, error) { - if !epochEntry.ContainsBlockHeight(block.Header.Height) { - return 0, errors.New("getNumTimeoutsBeforeEpochTransition: Block height not in epoch") - } - - // Fetch the previous epoch's final block height. - prevEpochFinalBlockHeight := epochEntry.InitialBlockHeight - 1 - - // Fetch the previous epoch's final block that is an ancestor of the given block. This operation is O(n) - // where n is the number of blocks between the given block and the previous epoch's final block. The worst - // case is O(3600) since we only need to go back 3600 blocks to find the previous epoch's final block. - prevEpochFinalBlockHeader := block.Header - for prevEpochFinalBlockHeader.Height > prevEpochFinalBlockHeight { - blockNode, exists := bc.blockIndexByHash[*prevEpochFinalBlockHeader.PrevBlockHash] - if !exists { - return 0, errors.New("getNumTimeoutsBeforeEpochTransition: Missing ancestor block") - } - prevEpochFinalBlockHeader = blockNode.Header - } - - // Fetch the previous epoch's 2nd to last block that is an ancestor of the given block. - prevEpochSecondToLastBlockNode, ok := bc.blockIndexByHash[*prevEpochFinalBlockHeader.PrevBlockHash] - if !ok { - return 0, errors.New("getNumTimeoutsBeforeEpochTransition: Missing ancestor block") - } - - // Ensure that the previous epoch's final two blocks have increasing views - if prevEpochFinalBlockHeader.GetView() <= prevEpochSecondToLastBlockNode.Header.GetView() { - return 0, errors.New("getNumTimeoutsBeforeEpochTransition: Final block view not greater than 2nd to last block view") - } - - // Ensure that the previous epoch's final two blocks have sequential heights - if prevEpochSecondToLastBlockNode.Header.Height != prevEpochFinalBlockHeader.Height-1 { - return 0, errors.New("getNumTimeoutsBeforeEpochTransition: Final block height not sequential with 2nd to last block height") - } - - // Compute the number of timeouts at the end of the previous epoch - return (prevEpochFinalBlockHeader.GetView() - prevEpochSecondToLastBlockNode.Header.GetView() - 1) / 2, nil -} - // isValidPoSQuorumCertificate validates that the QC of this block is valid, meaning a super majority // of the validator set has voted (or timed out). It special cases the first block after the PoS cutover // by overriding the validator set used to validate the high QC in the first block after the PoS cutover. diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 836731b88..95b9a7405 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -585,9 +585,6 @@ func TestHasValidBlockProposerPoS(t *testing.T) { return viewNumber } - // Get a pointer to the blockchain instance - bc := testMeta.chain - // Seed a CurrentEpochEntry. tmpUtxoView := _newUtxoView(testMeta) tmpUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 0, FinalBlockHeight: blockHeight + 1}) @@ -597,7 +594,6 @@ func TestHasValidBlockProposerPoS(t *testing.T) { testMeta.params.DefaultEpochDurationNumBlocks = uint64(1) { - // We need to reset the UniversalUtxoView since the RegisterAsValidator and Stake // txn test helper utils use and flush the UniversalUtxoView. Otherwise, the // updated GlobalParamsEntry will be overwritten by the default one cached in @@ -681,14 +677,14 @@ func TestHasValidBlockProposerPoS(t *testing.T) { ProposerVotingPublicKey: leader0Entry.VotingPublicKey, }, } - isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) require.NoError(t, err) require.True(t, isBlockProposerValid) // If we have a different proposer public key, we will have an error leader1Entry := validatorPKIDToValidatorEntryMap[*leaderSchedule[1]] dummyBlock.Header.ProposerVotingPublicKey = leader1Entry.VotingPublicKey.Copy() - isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) require.NoError(t, err) require.False(t, isBlockProposerValid) @@ -696,7 +692,7 @@ func TestHasValidBlockProposerPoS(t *testing.T) { // we move to leader 1. dummyBlock.Header.ProposedInView = viewNumber + 2 dummyBlock.Header.ProposerVotingPublicKey = leader1Entry.VotingPublicKey - isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) require.NoError(t, err) require.True(t, isBlockProposerValid) @@ -705,27 +701,27 @@ func TestHasValidBlockProposerPoS(t *testing.T) { dummyBlock.Header.ProposedInView = viewNumber + 5 leader4Entry := validatorPKIDToValidatorEntryMap[*leaderSchedule[4]] dummyBlock.Header.ProposerVotingPublicKey = leader4Entry.VotingPublicKey - isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) require.NoError(t, err) require.True(t, isBlockProposerValid) // If we have 7 timeouts, we know everybody timed out, so we go back to leader 0. dummyBlock.Header.ProposedInView = viewNumber + 8 dummyBlock.Header.ProposerVotingPublicKey = leader0Entry.VotingPublicKey - isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) require.NoError(t, err) require.True(t, isBlockProposerValid) // If the block view is less than the epoch's initial view, this is an error. dummyBlock.Header.ProposedInView = viewNumber - isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) require.NoError(t, err) require.False(t, isBlockProposerValid) // If the block height is less than epoch's initial block height, this is an error. dummyBlock.Header.ProposedInView = viewNumber + 1 dummyBlock.Header.Height = blockHeight - isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) require.NoError(t, err) require.False(t, isBlockProposerValid) @@ -734,7 +730,7 @@ func TestHasValidBlockProposerPoS(t *testing.T) { // This would imply that we've had more blocks than views, which is not possible. dummyBlock.Header.ProposedInView = viewNumber + 1 dummyBlock.Header.Height = blockHeight + 2 - isBlockProposerValid, err = bc.hasValidBlockProposerPoS(dummyBlock, utxoView) + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) require.NoError(t, err) require.False(t, isBlockProposerValid) } From 9e41c744192f847dfa9d2cd6a96ac488fe074faa Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 11 Apr 2024 14:31:48 -0400 Subject: [PATCH 620/762] Incorporate InitialLeaderIndexOffset in Leader Index Computation (#1205) --- lib/pos_blockchain.go | 16 +++++++----- lib/pos_blockchain_test.go | 53 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 7 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index bb110ec67..495b31076 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1158,10 +1158,10 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc } // We compute the current index in the leader schedule as follows: - // - [(block.View - currentEpoch.InitialView) - (block.Height - currentEpoch.InitialHeight) + numTimeoutsBeforeEpochTransition] % len(leaders) + // - [currentEpoch.InitialLeaderIndexOffset + (block.View - currentEpoch.InitialView) - (block.Height - currentEpoch.InitialHeight)] % len(leaders) + // - The pseudo-random offset for the leader schedule is currentEpoch.InitialLeaderIndexOffset. // - The number of views that have elapsed since the start of the epoch is block.View - currentEpoch.InitialView. - // - The number of blocks that have been added to the chain since the start of the epoch is - // block.Height - currentEpoch.InitialHeight. + // - The number of blocks that have been added to the chain since the start of the epoch is block.Height - currentEpoch.InitialHeight. // - The difference between the above two numbers is the number of timeouts that have occurred in this epoch. // // For each timeout, we skip one leader in the in the schedule. If we have more timeouts than leaders in @@ -1170,12 +1170,13 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc // // A quick example: // - Say we have 3 leaders in the schedule + // - The initial leader index offset is 3 // - The epoch started at height 10 and view 11 // - The current block is at height 15 and view 17 - // - Then the number of timeouts that have occurred is (17 - 11) - (15 - 10) = 1. - // - The leader index is 1 % 3 = 1. + // - Then the number of timeouts that have occurred is 3 + (17 - 11) - (15 - 10) = 4. + // - The leader index is 4 % 3 = 1. // - This means this block should be proposed by the 2nd leader in the schedule, which is at index 1. - leaderIdxUint64 := (viewDiff - heightDiff) % uint64(len(leaders)) + leaderIdxUint64 := (currentEpochEntry.InitialLeaderIndexOffset + viewDiff - heightDiff) % uint64(len(leaders)) if leaderIdxUint64 > math.MaxUint16 { return false, nil } @@ -1204,7 +1205,7 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc // Dump some debug info on the current block's proposer and the current view's leader. glog.V(2).Infof( "hasValidBlockProposerPoS: Printing block proposer debug info: "+ - "\n Epoch Num: %d, Block View: %d, Block Height: %d, Epoch Initial View: %d, Epoch Initial Block Height: %d"+ + "\n Epoch Num: %d, Block View: %d, Block Height: %d, Epoch Initial View: %d, Epoch Initial Block Height: %d, Epoch Initial Leader Index Offset: %d"+ "\n Leader Idx: %d, Num Leaders: %d"+ "\n Expected Leader PKID: %v, Expected Leader Voting PK: %v"+ "\n Expected Leader PKID from BLS Key Lookup: %v, Expected Leader Voting PK from BLS Key Lookup: %v"+ @@ -1214,6 +1215,7 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc block.Header.Height, currentEpochEntry.InitialView, currentEpochEntry.InitialBlockHeight, + currentEpochEntry.InitialLeaderIndexOffset, leaderIdx, len(leaders), PkToString(leaderEntry.ValidatorPKID.ToBytes(), bav.Params), diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 95b9a7405..5bee3bc43 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -735,6 +735,59 @@ func TestHasValidBlockProposerPoS(t *testing.T) { require.False(t, isBlockProposerValid) } + // Test varying InitialLeaderIndexOffset values. + { + utxoView := _newUtxoView(testMeta) + epochEntry, err := utxoView.GetCurrentEpochEntry() + require.NoError(t, err) + + // Set the InitialLeaderIndexOffset to 3. + epochEntry.InitialLeaderIndexOffset = 3 + utxoView._setCurrentEpochEntry(epochEntry) + + // Construct a dummy block with leader 0 as the leader. + leader0PKID := leaderSchedule[0] + leader0Entry := validatorPKIDToValidatorEntryMap[*leader0PKID] + dummyBlock := &MsgDeSoBlock{ + Header: &MsgDeSoHeader{ + PrevBlockHash: testMeta.chain.BlockTip().Hash, + ProposedInView: viewNumber + 1, + Height: blockHeight + 1, + ProposerVotingPublicKey: leader0Entry.VotingPublicKey, + }, + } + + // The block proposer is invalid because the InitialLeaderIndexOffset is 3. + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.False(t, isBlockProposerValid) + + // Set the leader at index 3 as the block proposer + leader3Entry := validatorPKIDToValidatorEntryMap[*leaderSchedule[3]] + dummyBlock.Header.ProposerVotingPublicKey = leader3Entry.VotingPublicKey + + // The block proposer is valid now. + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.True(t, isBlockProposerValid) + + // Increase the block's view number to simulate 2 timeouts. + dummyBlock.Header.ProposedInView = dummyBlock.Header.ProposedInView + 2 + + // The block proposer at index 3 is invalid now because there have been two timeouts. + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.False(t, isBlockProposerValid) + + // Set the leader at index 5 as the block proposer + leader5Entry := validatorPKIDToValidatorEntryMap[*leaderSchedule[5]] + dummyBlock.Header.ProposerVotingPublicKey = leader5Entry.VotingPublicKey + + // The block proposer is valid now. + isBlockProposerValid, err = utxoView.hasValidBlockProposerPoS(dummyBlock) + require.NoError(t, err) + require.True(t, isBlockProposerValid) + } } // TestGetLineageFromCommittedTip tests that getLineageFromCommittedTip works as expected. From 13f5318edb5b8ca2fb93159434273405bba1dde9 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 11 Apr 2024 14:52:52 -0400 Subject: [PATCH 621/762] Expose network manager for admin panel (#1204) --- lib/remote_node.go | 25 +++++++++++++++++++++++++ lib/server.go | 4 ++++ 2 files changed, 29 insertions(+) diff --git a/lib/remote_node.go b/lib/remote_node.go index bbd011495..e0c5503ae 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -26,6 +26,27 @@ const ( RemoteNodeStatus_Terminated RemoteNodeStatus = 6 ) +func (remodeNodeStatus RemoteNodeStatus) String() string { + switch remodeNodeStatus { + case RemoteNodeStatus_NotConnected: + return "NotConnected" + case RemoteNodeStatus_Connected: + return "Connected" + case RemoteNodeStatus_VersionSent: + return "VersionSent" + case RemoteNodeStatus_VerackSent: + return "VerackSent" + case RemoteNodeStatus_HandshakeCompleted: + return "HandshakeCompleted" + case RemoteNodeStatus_Attempted: + return "Attempted" + case RemoteNodeStatus_Terminated: + return "Terminated" + default: + return "Unknown" + } +} + type RemoteNodeId uint64 func NewRemoteNodeId(id uint64) RemoteNodeId { @@ -250,6 +271,10 @@ func (rn *RemoteNode) IsTerminated() bool { return rn.connectionStatus == RemoteNodeStatus_Terminated } +func (rn *RemoteNode) GetStatus() RemoteNodeStatus { + return rn.connectionStatus +} + func (rn *RemoteNode) IsValidator() bool { if !rn.IsHandshakeCompleted() { return false diff --git a/lib/server.go b/lib/server.go index 8a823f007..e1fd07f41 100644 --- a/lib/server.go +++ b/lib/server.go @@ -3297,3 +3297,7 @@ func (progress *SyncProgress) PrintLoop() { } } } + +func (srv *Server) GetNetworkManagerConnections() []*RemoteNode { + return srv.networkManager.GetAllRemoteNodes().GetAll() +} From fc0aaea51ed446f3b5dff8c55f4c039e86a2986e Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:27:49 -0400 Subject: [PATCH 622/762] Upgrade go to 1.22 (#1190) --- .github/workflows/ci.yml | 4 ++-- Dockerfile | 2 +- go.mod | 2 +- test.Dockerfile | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index afb4dc21c..876eff8ec 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,7 +15,7 @@ jobs: - name: Install Go uses: actions/setup-go@v2 with: - go-version: "1.20" + go-version: "1.22" - name: Checkout branch uses: actions/checkout@v3 @@ -52,7 +52,7 @@ jobs: - name: Install Go uses: actions/setup-go@v2 with: - go-version: "1.20" + go-version: "1.22" - name: Checkout branch uses: actions/checkout@v3 diff --git a/Dockerfile b/Dockerfile index fc52e7d02..ebaeefd76 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM alpine:latest AS core RUN apk update && apk upgrade RUN apk add --update bash cmake g++ gcc git make vips vips-dev -COPY --from=golang:1.20-alpine /usr/local/go/ /usr/local/go/ +COPY --from=golang:1.22-alpine /usr/local/go/ /usr/local/go/ ENV PATH="/usr/local/go/bin:${PATH}" WORKDIR /deso/src/core diff --git a/go.mod b/go.mod index bcccc632b..d53de57fc 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/deso-protocol/core -go 1.20 +go 1.22 require ( github.com/DataDog/datadog-go v4.5.0+incompatible diff --git a/test.Dockerfile b/test.Dockerfile index 6203a79bd..d34b2d179 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -4,7 +4,7 @@ RUN apk update RUN apk upgrade RUN apk add --update bash cmake g++ gcc git make vips vips-dev -COPY --from=golang:1.20-alpine /usr/local/go/ /usr/local/go/ +COPY --from=golang:1.22-alpine /usr/local/go/ /usr/local/go/ ENV PATH="/usr/local/go/bin:${PATH}" WORKDIR /deso/src/core From 528f5092a6557bbbccd6a23247cfd5ea79675158 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:30:49 -0400 Subject: [PATCH 623/762] Upgrade all deps (#1163) --- cmd/node.go | 6 +- consensus/event_loop.go | 16 +- consensus/event_loop_test.go | 8 +- consensus/integration_test.go | 22 +- consensus/utils.go | 14 +- consensus/utils_test.go | 44 +- go.mod | 189 +++-- go.sum | 802 ++++++++------------ lib/base58.go | 2 +- lib/bitcoin_burner.go | 4 +- lib/block_producer.go | 17 +- lib/block_view.go | 16 +- lib/block_view_access_group_members_test.go | 28 +- lib/block_view_access_group_test.go | 10 +- lib/block_view_association_test.go | 4 +- lib/block_view_atomic_txns_test.go | 5 +- lib/block_view_balance_entry.go | 22 +- lib/block_view_bitcoin.go | 4 +- lib/block_view_bitcoin_test.go | 4 +- lib/block_view_creator_coin.go | 26 +- lib/block_view_dao_coin.go | 38 +- lib/block_view_dao_coin_limit_order.go | 28 +- lib/block_view_dao_coin_limit_order_test.go | 406 +++++----- lib/block_view_dao_coin_test.go | 115 ++- lib/block_view_derived_key.go | 8 +- lib/block_view_derived_key_test.go | 97 ++- lib/block_view_flush.go | 2 +- lib/block_view_follow.go | 4 +- lib/block_view_like.go | 2 +- lib/block_view_lockups.go | 54 +- lib/block_view_lockups_test.go | 431 ++++++----- lib/block_view_message.go | 4 +- lib/block_view_message_test.go | 24 +- lib/block_view_new_message_test.go | 22 +- lib/block_view_nft.go | 6 +- lib/block_view_post.go | 2 +- lib/block_view_profile.go | 27 +- lib/block_view_stake.go | 26 +- lib/block_view_stake_test.go | 190 ++--- lib/block_view_test.go | 30 +- lib/block_view_types.go | 10 +- lib/block_view_types_test.go | 4 +- lib/block_view_validator.go | 8 +- lib/block_view_validator_test.go | 108 +-- lib/blockchain.go | 6 +- lib/blockchain_test.go | 10 +- lib/connection_manager.go | 38 +- lib/constants.go | 3 + lib/db_utils.go | 8 +- lib/db_utils_test.go | 14 +- lib/deso_math.go | 14 +- lib/deso_math_test.go | 24 +- lib/errors.go | 4 +- lib/legacy_mempool.go | 7 +- lib/load_test.go | 4 +- lib/miner.go | 4 +- lib/network.go | 54 +- lib/network_connection.go | 10 +- lib/network_connection_test.go | 6 +- lib/network_manager.go | 20 +- lib/network_test.go | 26 +- lib/peer.go | 8 +- lib/pos_blockchain_test.go | 14 +- lib/pos_constants.go | 2 +- lib/pos_epoch_complete_hook_test.go | 68 +- lib/pos_leader_schedule.go | 6 +- lib/pos_leader_schedule_test.go | 2 +- lib/pos_random_seed.go | 2 +- lib/pos_random_seed_test.go | 4 +- lib/pos_server_regtest.go | 2 +- lib/pos_snapshot_entries.go | 2 +- lib/pos_staking_rewards.go | 4 +- lib/pos_transaction_register_test.go | 7 +- lib/postgres.go | 14 +- lib/remote_node.go | 10 +- lib/server.go | 13 +- lib/types.go | 4 +- lib/utils.go | 14 +- 78 files changed, 1603 insertions(+), 1714 deletions(-) diff --git a/cmd/node.go b/cmd/node.go index b7f251e27..4600e5f4a 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -570,20 +570,20 @@ func addIPsForHost(desoAddrMgr *addrmgr.AddrManager, host string, params *lib.De glog.V(1).Infof("_addSeedAddrs: Adding seed IPs from seed %s: %v\n", host, ipAddrs) // Convert addresses to NetAddress'es. - netAddrs, err := lib.SafeMakeSliceWithLength[*wire.NetAddress](uint64(len(ipAddrs))) + netAddrs, err := lib.SafeMakeSliceWithLength[*wire.NetAddressV2](uint64(len(ipAddrs))) if err != nil { glog.V(2).Infof("_addSeedAddrs: Problem creating netAddrs slice with length %d", len(ipAddrs)) return } for ii, ip := range ipAddrs { - netAddrs[ii] = wire.NewNetAddressTimestamp( + netAddrs[ii] = wire.NetAddressV2FromBytes( // We initialize addresses with a // randomly selected "last seen time" between 3 // and 7 days ago similar to what bitcoind does. time.Now().Add(-1*time.Second*time.Duration(lib.SecondsIn3Days+ lib.RandInt32(lib.SecondsIn4Days))), 0, - ip, + ip[:], params.DefaultSocketPort) } glog.V(1).Infof("_addSeedAddrs: Computed the following wire.NetAddress'es: %s", spew.Sdump(netAddrs)) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 6d350dd7e..50a1d927d 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -683,8 +683,8 @@ func (fc *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff votesByValidator := fc.votesSeenByBlockHash[voteSignaturePayload] // Compute the total stake and total stake with votes - totalStake := uint256.NewInt() - totalVotingStake := uint256.NewInt() + totalStake := uint256.NewInt(0) + totalVotingStake := uint256.NewInt(0) // Track the signatures and signers list for the chain tip signersList := bitset.NewBitset() @@ -693,7 +693,7 @@ func (fc *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff // Iterate through the entire validator list and check if each one has voted for the tip block. Track // all voters and their stakes. for ii, validator := range validatorList { - totalStake = uint256.NewInt().Add(totalStake, validator.GetStakeAmount()) + totalStake = uint256.NewInt(0).Add(totalStake, validator.GetStakeAmount()) // Skip the validator if it hasn't voted for the the block vote, hasVoted := votesByValidator[validator.GetPublicKey().ToString()] @@ -707,7 +707,7 @@ func (fc *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff } // Track the vote's signature, stake, and place in the validator list - totalVotingStake = uint256.NewInt().Add(totalVotingStake, validator.GetStakeAmount()) + totalVotingStake = uint256.NewInt(0).Add(totalVotingStake, validator.GetStakeAmount()) signersList.Set(ii, true) signatures = append(signatures, vote.GetSignature()) } @@ -797,8 +797,8 @@ func (fc *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt } // Compute the total stake and total stake with timeouts - totalStake := uint256.NewInt() - totalTimedOutStake := uint256.NewInt() + totalStake := uint256.NewInt(0) + totalTimedOutStake := uint256.NewInt(0) // Track the high QC view for each validator highQCViews := make([]uint64, len(validatorList)) @@ -813,7 +813,7 @@ func (fc *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt // for the signersList bitset. In practice, the validator list is expected to be <= 1000 in size, so // this loop will be fast. for ii, validator := range validatorList { - totalStake = uint256.NewInt().Add(totalStake, validator.GetStakeAmount()) + totalStake = uint256.NewInt(0).Add(totalStake, validator.GetStakeAmount()) // Skip the validator if it hasn't timed out for the previous view timeout, hasTimedOut := timeoutsByValidator[validator.GetPublicKey().ToString()] @@ -830,7 +830,7 @@ func (fc *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt } // Track the signatures, timed out stake, and high QC views for the validator - totalTimedOutStake = uint256.NewInt().Add(totalTimedOutStake, validator.GetStakeAmount()) + totalTimedOutStake = uint256.NewInt(0).Add(totalTimedOutStake, validator.GetStakeAmount()) signersList.Set(ii, true) signatures = append(signatures, timeout.GetSignature()) highQCViews[ii] = timeout.GetHighQC().GetView() diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 96e9465f5..df0057129 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -689,11 +689,11 @@ func TestVoteQCConstructionSignal(t *testing.T) { validatorList := []Validator{ &validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(70), + stakeAmount: uint256.NewInt(0).SetUint64(70), }, &validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(30), + stakeAmount: uint256.NewInt(0).SetUint64(30), }, } @@ -867,11 +867,11 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { validatorList := []Validator{ &validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(70), + stakeAmount: uint256.NewInt(0).SetUint64(70), }, &validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(30), + stakeAmount: uint256.NewInt(0).SetUint64(30), }, } diff --git a/consensus/integration_test.go b/consensus/integration_test.go index d7d4e941b..2f34a9069 100644 --- a/consensus/integration_test.go +++ b/consensus/integration_test.go @@ -16,10 +16,10 @@ import ( func TestNetworkWithOfflineValidators(t *testing.T) { // Create 4 nodes with equal stake. The network has super-majority of stake online // as long as 3 out of 4 nodes are online. - node1 := newValidatorNode(uint256.NewInt().SetUint64(50), true) // block proposer - node2 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator - node3 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator - node4 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + node1 := newValidatorNode(uint256.NewInt(0).SetUint64(50), true) // block proposer + node2 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node3 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node4 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator allNodes := []*validatorNode{node1, node2, node3, node4} @@ -104,10 +104,10 @@ func TestNetworkWithOfflineValidators(t *testing.T) { func TestNetworkWithOfflineBlockProposer(t *testing.T) { // Create 4 nodes with equal stake. The network has super-majority of stake online // as long as 3 out of 4 nodes are online. - node1 := newValidatorNode(uint256.NewInt().SetUint64(50), true) // block proposer - node2 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator - node3 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator - node4 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + node1 := newValidatorNode(uint256.NewInt(0).SetUint64(50), true) // block proposer + node2 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node3 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node4 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator allNodes := []*validatorNode{node1, node2, node3, node4} @@ -181,9 +181,9 @@ func TestNetworkWithOfflineBlockProposer(t *testing.T) { func TestNetworkRecoveryAfterCatastrophicFailure(t *testing.T) { // Create 3 nodes with equal stake. Node 3 has a super-majority of the the stake // and needs to stay online for the network to remain live. - node1 := newValidatorNode(uint256.NewInt().SetUint64(10), true) // block proposer - node2 := newValidatorNode(uint256.NewInt().SetUint64(10), false) // validator - node3 := newValidatorNode(uint256.NewInt().SetUint64(80), false) // validator + node1 := newValidatorNode(uint256.NewInt(0).SetUint64(10), true) // block proposer + node2 := newValidatorNode(uint256.NewInt(0).SetUint64(10), false) // validator + node3 := newValidatorNode(uint256.NewInt(0).SetUint64(80), false) // validator allNodes := []*validatorNode{node1, node2, node3} diff --git a/consensus/utils.go b/consensus/utils.go index 078743313..6f0903591 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -136,8 +136,8 @@ func IsValidSuperMajorityAggregateQuorumCertificate(aggQC AggregateQuorumCertifi func isSuperMajorityStakeSignersList(signersList *bitset.Bitset, validators []Validator) (bool, []*bls.PublicKey) { // Compute the total stake in the QC and the total stake in the network - stakeInQC := uint256.NewInt() - totalStake := uint256.NewInt() + stakeInQC := uint256.NewInt(0) + totalStake := uint256.NewInt(0) // Fetch the validators in the QC validatorPublicKeysInQC := []*bls.PublicKey{} @@ -396,11 +396,11 @@ func isSuperMajorityStake(stake *uint256.Int, totalStake *uint256.Int) bool { } // Compute 3Cq - honestStakeComponent := uint256.NewInt().Mul(stake, uint256.NewInt().SetUint64(3)) + honestStakeComponent := uint256.NewInt(0).Mul(stake, uint256.NewInt(0).SetUint64(3)) // Compute 2N + 1 - totalStakeComponent := uint256.NewInt().Mul(totalStake, uint256.NewInt().SetUint64(2)) - totalStakeComponent = uint256.NewInt().Add(totalStakeComponent, uint256.NewInt().SetUint64(1)) + totalStakeComponent := uint256.NewInt(0).Mul(totalStake, uint256.NewInt(0).SetUint64(2)) + totalStakeComponent = uint256.NewInt(0).Add(totalStakeComponent, uint256.NewInt(0).SetUint64(1)) // Check if 3Cq >= 2N + 1 return honestStakeComponent.Cmp(totalStakeComponent) >= 0 @@ -454,11 +454,11 @@ func createValidatorListForPrivateKeys(pk1 *bls.PrivateKey, pk2 *bls.PrivateKey) validators := []*validator{ { publicKey: pk1.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(100), + stakeAmount: uint256.NewInt(0).SetUint64(100), }, { publicKey: pk2.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(50), + stakeAmount: uint256.NewInt(0).SetUint64(50), }, } // Cast the slice of concrete structs []*validators to a slice of interfaces []Validator diff --git a/consensus/utils_test.go b/consensus/utils_test.go index ab0e5e350..25c2290f6 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -27,17 +27,17 @@ func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { validator1 := validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(3), + stakeAmount: uint256.NewInt(0).SetUint64(3), } validator2 := validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(2), + stakeAmount: uint256.NewInt(0).SetUint64(2), } validator3 := validator{ publicKey: validatorPrivateKey3.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(1), + stakeAmount: uint256.NewInt(0).SetUint64(1), } validators := []Validator{&validator1, &validator2, &validator3} @@ -110,17 +110,17 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { validator1 := validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(3), + stakeAmount: uint256.NewInt(0).SetUint64(3), } validator2 := validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(1), + stakeAmount: uint256.NewInt(0).SetUint64(1), } validator3 := validator{ publicKey: validatorPrivateKey3.PublicKey(), - stakeAmount: uint256.NewInt().SetUint64(1), + stakeAmount: uint256.NewInt(0).SetUint64(1), } validators := []Validator{&validator1, &validator2, &validator3} @@ -357,7 +357,7 @@ func TestIsProperlyFormedValidatorSet(t *testing.T) { // Test nil public key { - validator := validator{publicKey: nil, stakeAmount: uint256.NewInt().SetUint64(1)} + validator := validator{publicKey: nil, stakeAmount: uint256.NewInt(0).SetUint64(1)} require.False(t, isProperlyFormedValidatorSet([]Validator{&validator})) } @@ -369,13 +369,13 @@ func TestIsProperlyFormedValidatorSet(t *testing.T) { // Test zero stake amount { - validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt()} + validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt(0)} require.False(t, isProperlyFormedValidatorSet([]Validator{&validator})) } // Test valid validator { - validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt().SetUint64(1)} + validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt(0).SetUint64(1)} require.True(t, isProperlyFormedValidatorSet([]Validator{&validator})) } } @@ -476,52 +476,52 @@ func TestIsSuperMajorityStake(t *testing.T) { // Test zero values { - require.False(t, isSuperMajorityStake(uint256.NewInt(), uint256.NewInt())) + require.False(t, isSuperMajorityStake(uint256.NewInt(0), uint256.NewInt(0))) } // Test stake amount greater than total stake { - require.False(t, isSuperMajorityStake(uint256.NewInt().SetUint64(2), uint256.NewInt().SetUint64(1))) + require.False(t, isSuperMajorityStake(uint256.NewInt(0).SetUint64(2), uint256.NewInt(0).SetUint64(1))) } // Test stake amount much less than super majority { - stake := uint256.NewInt().SetUint64(1) - totalStake := uint256.NewInt().SetUint64(1000) + stake := uint256.NewInt(0).SetUint64(1) + totalStake := uint256.NewInt(0).SetUint64(1000) require.False(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount less than super majority { - stake := uint256.NewInt().SetUint64(666) - totalStake := uint256.NewInt().SetUint64(1000) + stake := uint256.NewInt(0).SetUint64(666) + totalStake := uint256.NewInt(0).SetUint64(1000) require.False(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount equal to super majority { - stake := uint256.NewInt().SetUint64(667) - totalStake := uint256.NewInt().SetUint64(1000) + stake := uint256.NewInt(0).SetUint64(667) + totalStake := uint256.NewInt(0).SetUint64(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount greater than super majority { - stake := uint256.NewInt().SetUint64(668) - totalStake := uint256.NewInt().SetUint64(1000) + stake := uint256.NewInt(0).SetUint64(668) + totalStake := uint256.NewInt(0).SetUint64(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount much greater than super majority { - stake := uint256.NewInt().SetUint64(999) - totalStake := uint256.NewInt().SetUint64(1000) + stake := uint256.NewInt(0).SetUint64(999) + totalStake := uint256.NewInt(0).SetUint64(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount equal to total stake { - totalStake := uint256.NewInt().SetUint64(1000) + totalStake := uint256.NewInt(0).SetUint64(1000) require.True(t, isSuperMajorityStake(totalStake, totalStake)) } } diff --git a/go.mod b/go.mod index d53de57fc..52f1bb91c 100644 --- a/go.mod +++ b/go.mod @@ -3,114 +3,151 @@ module github.com/deso-protocol/core go 1.22 require ( - github.com/DataDog/datadog-go v4.5.0+incompatible + github.com/DataDog/datadog-go v4.8.3+incompatible github.com/brianvoe/gofakeit v3.18.0+incompatible - github.com/btcsuite/btcd v0.21.0-beta - github.com/btcsuite/btcutil v1.0.2 + github.com/btcsuite/btcd v0.24.0 + github.com/btcsuite/btcd/btcec/v2 v2.3.3 + github.com/btcsuite/btcd/btcutil v1.1.5 + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/bxcodec/faker v2.0.1+incompatible - github.com/cloudflare/circl v1.1.0 - github.com/davecgh/go-spew v1.1.1 - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 - github.com/decred/dcrd/lru v1.1.1 + github.com/cloudflare/circl v1.3.7 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 + github.com/decred/dcrd/lru v1.1.2 github.com/deso-protocol/go-deadlock v1.0.0 github.com/deso-protocol/go-merkle-tree v1.0.0 github.com/dgraph-io/badger/v4 v4.2.0 github.com/emirpasic/gods v1.18.1 - github.com/ethereum/go-ethereum v1.9.25 - github.com/fatih/color v1.13.0 + github.com/ethereum/go-ethereum v1.13.14 + github.com/fatih/color v1.16.0 github.com/gernest/mention v2.0.0+incompatible - github.com/go-pg/pg/v10 v10.10.0 - github.com/golang/glog v1.0.0 - github.com/google/uuid v1.2.0 - github.com/holiman/uint256 v1.1.1 + github.com/go-pg/pg/v10 v10.12.0 + github.com/golang/glog v1.2.1 + github.com/google/uuid v1.6.0 + github.com/holiman/uint256 v1.2.4 github.com/mitchellh/go-homedir v1.1.0 github.com/oleiade/lane v1.0.1 - github.com/onflow/crypto v0.25.0 + github.com/onflow/crypto v0.25.1 github.com/pkg/errors v0.9.1 - github.com/pmezard/go-difflib v1.0.0 + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0 github.com/shibukawa/configdir v0.0.0-20170330084843-e180dbdc8da0 - github.com/spf13/cobra v1.1.3 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.7.1 - github.com/stretchr/testify v1.8.0 - github.com/tyler-smith/go-bip39 v1.0.2 - github.com/unrolled/secure v1.0.8 - golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 - gopkg.in/DataDog/dd-trace-go.v1 v1.29.0 + github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.9.0 + github.com/tyler-smith/go-bip39 v1.1.0 + github.com/unrolled/secure v1.14.0 + golang.org/x/crypto v0.22.0 + golang.org/x/sync v0.7.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 ) require ( - github.com/fergusstrange/embedded-postgres v1.19.0 - github.com/lib/pq v1.10.4 // indirect + github.com/fergusstrange/embedded-postgres v1.26.0 + github.com/lib/pq v1.10.9 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect ) require ( - github.com/Microsoft/go-winio v0.4.16 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect + github.com/AlecAivazis/survey/v2 v2.3.7 // indirect + github.com/DataDog/appsec-internal-go v1.5.0 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 // indirect + github.com/DataDog/datadog-go/v5 v5.5.0 // indirect + github.com/DataDog/go-libddwaf/v2 v2.4.2 // indirect + github.com/DataDog/go-sqllexer v0.0.11 // indirect + github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect + github.com/DataDog/gostackparse v0.7.0 // indirect + github.com/DataDog/sketches-go v1.4.4 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/andygrunwald/go-jira v1.16.0 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect - github.com/bwesterb/go-ristretto v1.2.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/bwesterb/go-ristretto v1.2.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect - github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/ebitengine/purego v0.7.1 // indirect + github.com/fatih/structs v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/git-chglog/git-chglog v0.15.4 // indirect github.com/go-pg/zerochecker v0.2.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.3 // indirect - github.com/google/flatbuffers v2.0.0+incompatible // indirect - github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/imdario/mergo v0.3.8 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/flatbuffers v24.3.25+incompatible // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.12.3 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/magiconair/properties v1.8.1 // indirect - github.com/mattn/go-colorable v0.1.9 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mattn/goveralls v0.0.6 // indirect - github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect - github.com/mitchellh/mapstructure v1.1.2 // indirect - github.com/onsi/ginkgo v1.15.0 // indirect - github.com/onsi/gomega v1.10.5 // indirect - github.com/pelletier/go-toml v1.7.0 // indirect - github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect - github.com/philhofer/fwd v1.1.1 // indirect - github.com/russross/blackfriday/v2 v2.0.1 // indirect - github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect - github.com/spf13/afero v1.1.2 // indirect - github.com/spf13/cast v1.3.0 // indirect - github.com/spf13/jwalterweatherman v1.0.0 // indirect - github.com/subosito/gotenv v1.2.0 // indirect - github.com/tinylib/msgp v1.1.2 // indirect + github.com/klauspost/compress v1.17.8 // indirect + github.com/kyokomi/emoji/v2 v2.2.12 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/goveralls v0.0.12 // indirect + github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/nxadm/tail v1.4.11 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + github.com/onsi/gomega v1.32.0 // indirect + github.com/outcaste-io/ristretto v0.2.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.0 // indirect + github.com/petermattis/goid v0.0.0-20240327183114-c42a807a84ba // indirect + github.com/philhofer/fwd v1.1.2 // indirect + github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tinylib/msgp v1.1.9 // indirect github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect + github.com/trivago/tgo v1.0.7 // indirect github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df // indirect - github.com/urfave/cli v1.22.1 // indirect + github.com/urfave/cli/v2 v2.27.1 // indirect github.com/vmihailenco/bufpool v0.1.11 // indirect - github.com/vmihailenco/msgpack/v5 v5.3.1 // indirect + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - go.opencensus.io v0.23.0 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect - golang.org/x/tools v0.1.12 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - gonum.org/v1/gonum v0.6.1 // indirect - google.golang.org/protobuf v1.28.1 // indirect - gopkg.in/AlecAivazis/survey.v1 v1.8.7 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/ini.v1 v1.51.0 // indirect - gopkg.in/kyokomi/emoji.v1 v1.5.1 // indirect + github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect + go.opencensus.io v0.24.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.20.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + gonum.org/v1/gonum v0.15.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - mellium.im/sasl v0.2.1 // indirect + mellium.im/sasl v0.3.1 // indirect ) diff --git a/go.sum b/go.sum index e4bbfaa35..5686bbc48 100644 --- a/go.sum +++ b/go.sum @@ -1,117 +1,106 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= +github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v4.4.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go v4.5.0+incompatible h1:MyyuIz5LVAI3Im+0F/tfo64ETyH4sNVynZ29yOiHm50= -github.com/DataDog/datadog-go v4.5.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= +github.com/DataDog/appsec-internal-go v1.5.0 h1:8kS5zSx5T49uZ8dZTdT19QVAvC/B8ByyZdhQKYQWHno= +github.com/DataDog/appsec-internal-go v1.5.0/go.mod h1:pEp8gjfNLtEOmz+iZqC8bXhu0h4k7NUsW/qiQb34k1U= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 h1:/oxF4p/4XUGNpNw2TE7vDu/pJV3elEAZ+jES0/MWtiI= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1/go.mod h1:AVPQWekk3h9AOC7+plBlNB68Sy6UIGFoMMVUDeSoNoI= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 h1:mmkGuCHBFuDBpuwNMcqtY1x1I2fCaPH2Br4xPAAjbkM= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1/go.mod h1:JhAilx32dkIgoDkFXquCTfaWDsAOfe+vfBaxbiZoPI0= +github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= +github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/DataDog/go-libddwaf/v2 v2.4.2 h1:ilquGKUmN9/Ty0sIxiEyznVRxP3hKfmH15Y1SMq5gjA= +github.com/DataDog/go-libddwaf/v2 v2.4.2/go.mod h1:gsCdoijYQfj8ce/T2bEDNPZFIYnmHluAgVDpuQOWMZE= +github.com/DataDog/go-sqllexer v0.0.11 h1:OfPBjmayreblOXreszbrOTICNZ3qWrA6Bg4sypvxpbw= +github.com/DataDog/go-sqllexer v0.0.11/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= +github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= +github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4= +github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= +github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8= +github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= +github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= +github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/andygrunwald/go-jira v1.16.0 h1:PU7C7Fkk5L96JvPc6vDVIrd99vdPnYudHu4ju2c2ikQ= +github.com/andygrunwald/go-jira v1.16.0/go.mod h1:UQH4IBVxIYWbgagc0LF/k9FRs9xjIiQ8hIcC6HfLwFU= github.com/brianvoe/gofakeit v3.18.0+incompatible h1:wDOmHc9DLG4nRjUVVaxA+CEglKOW72Y5+4WNxUIkjM8= github.com/brianvoe/gofakeit v3.18.0+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= -github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= -github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= -github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= +github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo= +github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4= +github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.3 h1:6+iXlDKE8RMtKsvK0gshlXIuPbyWM/h84Ensb7o3sC0= +github.com/btcsuite/btcd/btcec/v2 v2.3.3/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= +github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= +github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= +github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= -github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/goleveldb v1.0.0 h1:Tvd0BfvqX9o823q1j2UZ/epQo09eJh6dTcRp79ilIN4= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/snappy-go v1.0.0 h1:ZxaA6lo2EpxGddsA8JwWOcxlzRybb444sgmeJQMJGQE= github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bwesterb/go-ristretto v1.2.0 h1:xxWOVbN5m8NNKiSDZXE1jtZvZnC6JSJ9cYFADiZcWtw= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bwesterb/go-ristretto v1.2.3 h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= +github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/decred/dcrd/lru v1.1.1 h1:kWFDaW0OWx6AD6Ki342c+JPmHbiVdE6rK81pT3fuo/Y= -github.com/decred/dcrd/lru v1.1.1/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/decred/dcrd/lru v1.1.2 h1:KdCzlkxppuoIDGEvCGah1fZRicrDH36IipvlB1ROkFY= +github.com/decred/dcrd/lru v1.1.2/go.mod h1:gEdCVgXs1/YoBvFWt7Scgknbhwik3FgVSzlnCcXL2N8= github.com/deso-protocol/go-deadlock v1.0.0 h1:mw0pHy/19zgC+JFBStuQt1+1Ehv5OKA5NxXqecnL5ic= github.com/deso-protocol/go-deadlock v1.0.0/go.mod h1:K0Wd2OV2x7ck7SMYDraWerpKjFKUeBqaFcwz21tmkb8= github.com/deso-protocol/go-merkle-tree v1.0.0 h1:9zkI5dQsITYy77s4kbTGPQmZnhQ+LsH/kRdL5l/Yzvg= @@ -120,69 +109,59 @@ github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8Bzu github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= -github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.7.1 h1:6/55d26lG3o9VCZX8lping+bZcmShseiqlh2bnUDiPA= +github.com/ebitengine/purego v0.7.1/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.9.25 h1:mMiw/zOOtCLdGLWfcekua0qPrJTe7FVIiHJ4IKNTfR0= -github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM= -github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/ethereum/go-ethereum v1.13.14 h1:EwiY3FZP94derMCIam1iW4HFVrSgIcpsu0HwTQtm6CQ= +github.com/ethereum/go-ethereum v1.13.14/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fergusstrange/embedded-postgres v1.19.0 h1:NqDufJHeA03U7biULlPHZ0pZ10/mDOMKPILEpT50Fyk= -github.com/fergusstrange/embedded-postgres v1.19.0/go.mod h1:0B+3bPsMvcNgR9nN+bdM2x9YaNYDnf3ksUqYp1OAub0= -github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fergusstrange/embedded-postgres v1.26.0 h1:mTgUBNST+6zro0TkIb9Fuo9Qg8mSU0ILus9jZKmFmJg= +github.com/fergusstrange/embedded-postgres v1.26.0/go.mod h1:t/MLs0h9ukYM6FSt99R7InCHs1nW0ordoVCcnzmpTYw= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gernest/mention v2.0.0+incompatible h1:pTXnujBC6tqlw5awDkLojq92TXbt0F+4+8FBlQC+di8= github.com/gernest/mention v2.0.0+incompatible/go.mod h1:/z3Hb+4gaPF+vL8og/lj6Au5j8hh5EfU7/EknmDUuO4= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373 h1:MHrlpWOOFhCfY1L9iCIUy5cv5HgDtempICenzJt+7ws= github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373/go.mod h1:Dcsy1kii/xFyNad5JqY/d0GO5mu91sungp5xotbm3Yk= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/git-chglog/git-chglog v0.15.4 h1:BwPDj7AghQTfpXO+UxG4mZM5MUTe9wfDuenF3jpyNf0= +github.com/git-chglog/git-chglog v0.15.4/go.mod h1:BmWdTpqBVzPjKNrBTZGcQCrQV9zq6gFKurhWNnJbYDA= github.com/go-pg/pg/v10 v10.5.0/go.mod h1:BfgPoQnD2wXNd986RYEHzikqv9iE875PrFaZ9vXvtNM= -github.com/go-pg/pg/v10 v10.10.0 h1:xc5zWYQ/55XI8pk5NkK+ixXqbJh1vnOun3VODPmbYfY= -github.com/go-pg/pg/v10 v10.10.0/go.mod h1:EmoJGYErc+stNN/1Jf+o4csXuprjxcRztBnn6cHe38E= +github.com/go-pg/pg/v10 v10.12.0 h1:rBmfDDHTN7FQW0OemYmcn5UuBy6wkYWgh/Oqt1OBEB8= +github.com/go-pg/pg/v10 v10.12.0/go.mod h1:USA08CdIasAn0F6wC1nBf5nQhMHewVQodWoH89RPXaI= github.com/go-pg/zerochecker v0.2.0 h1:pp7f72c3DobMWOb2ErtZsnrPaSvHd2W4o9//8HtF4mU= github.com/go-pg/zerochecker v0.2.0/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= -github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -196,16 +175,13 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= -github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= +github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -213,241 +189,179 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20210125172800-10e9aeb4a998/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo= +github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= -github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw= -github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= +github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= +github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= +github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= -github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/kyokomi/emoji/v2 v2.2.12 h1:sSVA5nH9ebR3Zji1o31wu3yOwD1zKXQA2z0zUyeit60= +github.com/kyokomi/emoji/v2 v2.2.12/go.mod h1:JUcn42DTdsXJo1SWanHh4HKDEyPaR5CqkmoirZZP9qE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/goveralls v0.0.6 h1:cr8Y0VMo/MnEZBjxNN/vh6G90SZ7IMb6lms1dzMoO+Y= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/goveralls v0.0.6/go.mod h1:h8b4ow6FxSPMQHF6o2ve3qsclnffZjYTNEKmLesRwqw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mattn/goveralls v0.0.12 h1:PEEeF0k1SsTjOBQ8FOmrOAoCu4ytuMaWCnWe94zxbCg= +github.com/mattn/goveralls v0.0.12/go.mod h1:44ImGEUfmqH8bBtaMrYKsM65LXfNLWmwaxFGjZwgMSQ= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oleiade/lane v1.0.1 h1:hXofkn7GEOubzTwNpeL9MaNy8WxolCYb9cInAIeqShU= github.com/oleiade/lane v1.0.1/go.mod h1:IyTkraa4maLfjq/GmHR+Dxb4kCMtEGeb+qmhlrQ5Mk4= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onflow/crypto v0.25.0 h1:BeWbLsh3ZD13Ej+Uky6kg1PL1ZIVBDVX+2MVBNwqddg= -github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= +github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= +github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= +github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= +github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo= +github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/petermattis/goid v0.0.0-20240327183114-c42a807a84ba h1:3jPgmsFGBID1wFfU2AbYocNcN4wqU68UaHSdMjiw/7U= +github.com/petermattis/goid v0.0.0-20240327183114-c42a807a84ba/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0 h1:0/H63lDsoNYVn5YmP6VLDEnnKkoVYiHx7udTWCK4BUI= github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0/go.mod h1:nOkSFfwwDUBFnDDQqMRC2p4PDE7GZb/KSVqILVB3bmw= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/shibukawa/configdir v0.0.0-20170330084843-e180dbdc8da0 h1:Xuk8ma/ibJ1fOy4Ee11vHhUFHQNpHhrBneOCNHVXS5w= github.com/shibukawa/configdir v0.0.0-20170330084843-e180dbdc8da0/go.mod h1:7AwjWCpdPhkSmNAgUv5C7EJ4AbmjEB3r047r3DXWu3Y= -github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= -github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -456,268 +370,210 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= -github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= -github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= +github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs= +github.com/trivago/tgo v1.0.7 h1:uaWH/XIy9aWYWpjm2CU3RpcqZXmX2ysQ9/Go+d9gyrM= +github.com/trivago/tgo v1.0.7/go.mod h1:w4dpD+3tzNIIiIfkWWa85w5/B77tlvdZckQ+6PkFnhc= github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df h1:Y2l28Jr3vOEeYtxfVbMtVfOdAwuUqWaP9fvNKiBVeXY= github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df/go.mod h1:pnyouUty/nBr/zm3GYwTIt+qFTLWbdjeLjZmJdzJOu8= -github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= -github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8= -github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= -github.com/unrolled/secure v1.0.8 h1:JaMvKbe4CRt8oyxVXn+xY+6jlqd7pyJNSVkmsBxxQsM= -github.com/unrolled/secure v1.0.8/go.mod h1:fO+mEan+FLB0CdEnHf6Q4ZZVNqG+5fuLFnP8p0BXDPI= -github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/unrolled/secure v1.14.0 h1:u9vJTU/pR4Bny0ntLUMxdfLtmIRGvQf2sEFuA0TG9AE= +github.com/unrolled/secure v1.14.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= +github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/vmihailenco/bufpool v0.1.11 h1:gOq2WmBrq0i2yW5QJ16ykccQ4wH9UyEsgLm6czKAd94= github.com/vmihailenco/bufpool v0.1.11/go.mod h1:AFf/MOy3l2CFTKbxwt0mp2MwnqjNEs5H/UxrkA5jxTQ= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1/go.mod h1:xlngVLeyQ/Qi05oQxhQ+oTuqa03RjMwMfk/7/TCs+QI= github.com/vmihailenco/msgpack/v5 v5.0.0-beta.8/go.mod h1:HVxBVPUK/+fZMonk4bi1islLa8V3cfnBug0+4dykPzo= -github.com/vmihailenco/msgpack/v5 v5.3.1 h1:0i85a4dsZh8mC//wmyyTEzidDLPQfQAxZIOLtafGbFY= -github.com/vmihailenco/msgpack/v5 v5.3.1/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 h1:ESSUROHIBHg7USnszlcdmjBEwdMj9VUvU+OPk4yl2mc= +golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180606202747-9527bec2660b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201026173827-119d4633e4d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.1 h1:/LSrTrgZtpbXyAR6+0e152SROCkJJSh7goYWVmdPFGc= -gonum.org/v1/gonum v0.6.1/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= +gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -732,36 +588,27 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/AlecAivazis/survey.v1 v1.8.7 h1:oBJqtgsyBLg9K5FK9twNUbcPnbCPoh+R9a+7nag3qJM= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/AlecAivazis/survey.v1 v1.8.7/go.mod h1:iBNOmqKz/NUbZx3bA+4hAGLRC7fSK7tgtVDT4tB22XA= -gopkg.in/DataDog/dd-trace-go.v1 v1.29.0 h1:3C1EEjgFTPqrnS2SXuSqkBbZGacIOPJ7ScGJk4nrP9s= -gopkg.in/DataDog/dd-trace-go.v1 v1.29.0/go.mod h1:FLwUDeuH0z5hkvgvd04/M3MHQN4AF5pQDnedeWRWvok= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 h1:jeZxE4ZlfAc+R0zO5TEmJBwOLet3NThsOfYJeSQg1x0= +gopkg.in/DataDog/dd-trace-go.v1 v1.62.0/go.mod h1:YTvYkk3PTsfw0OWrRFxV/IQ5Gy4nZ5TRvxTAP3JcIzs= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/kyokomi/emoji.v1 v1.5.1 h1:beetH5mWDMzFznJ+Qzd5KVHp79YKhVUMcdO8LpRLeGw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/kyokomi/emoji.v1 v1.5.1/go.mod h1:N9AZ6hi1jHOPn34PsbpufQZUcKftSD7WgS2pgpmH4Lg= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -769,13 +616,12 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/gotraceui v0.2.0 h1:dmNsfQ9Vl3GwbiVD7Z8d/osC6WtGGrasyrC2suc4ZIQ= +honnef.co/go/gotraceui v0.2.0/go.mod h1:qHo4/W75cA3bX0QQoSvDjbJa4R8mAyyFjbWAj63XElc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -mellium.im/sasl v0.2.1 h1:nspKSRg7/SyO0cRGY71OkfHab8tf9kCts6a6oTDut0w= mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= +mellium.im/sasl v0.3.1 h1:wE0LW6g7U83vhvxjC1IY8DnXM+EU095yeo8XClvCdfo= +mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw= pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= diff --git a/lib/base58.go b/lib/base58.go index c02743312..2cfc583d2 100644 --- a/lib/base58.go +++ b/lib/base58.go @@ -3,7 +3,7 @@ package lib import ( "crypto/sha256" "fmt" - "github.com/btcsuite/btcutil/base58" + "github.com/btcsuite/btcd/btcutil/base58" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/bitcoin_burner.go b/lib/bitcoin_burner.go index 20865be74..e77428674 100644 --- a/lib/bitcoin_burner.go +++ b/lib/bitcoin_burner.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "github.com/btcsuite/btcd/btcutil" "github.com/davecgh/go-spew/spew" "io/ioutil" "math" @@ -15,11 +16,10 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" ) // bitcoin_burner.go finds the Bitcoin UTXOs associated with a Bitcoin diff --git a/lib/block_producer.go b/lib/block_producer.go index b5ad66cb0..19fe7836c 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -3,6 +3,7 @@ package lib import ( "encoding/hex" "fmt" + "github.com/btcsuite/btcd/btcec/v2/ecdsa" "math" "strings" "sync" @@ -14,7 +15,7 @@ import ( "github.com/deso-protocol/go-deadlock" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -89,7 +90,7 @@ func NewDeSoBlockProducer( if err != nil { return nil, fmt.Errorf("NewDeSoBlockProducer: Error decoding hex seed: %+v", err) } - privKey, _ = btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) + privKey, _ = btcec.PrivKeyFromBytes(privKeyBytes) } else { seedBytes, err := bip39.NewSeedWithErrorChecking(blockProducerSeed, "") if err != nil { @@ -144,7 +145,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) lastNode := desoBlockProducer.chain.blockTip() // Compute the public key to contribute the reward to. - rewardPk, err := btcec.ParsePubKey(publicKey, btcec.S256()) + rewardPk, err := btcec.ParsePubKey(publicKey) if err != nil { return nil, nil, nil, errors.Wrapf(err, "DeSoBlockProducer._getBlockTemplate: ") } @@ -297,7 +298,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) } // Parse the public key that should be used for the block reward. - blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutput.PublicKey, btcec.S256()) + blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutput.PublicKey) if err != nil { return nil, nil, nil, errors.Wrapf(err, "DeSoBlockProducer._getBlockTemplate: problem parsing block reward output public key: ") } @@ -316,7 +317,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) includeFeesInBlockReward := true if blockRet.Header.Height >= uint64(desoBlockProducer.params.ForkHeights.BlockRewardPatchBlockHeight) { // Parse the transactor's public key to compare with the block reward output public key. - transactorPublicKey, err := btcec.ParsePubKey(txnInBlock.PublicKey, btcec.S256()) + transactorPublicKey, err := btcec.ParsePubKey(txnInBlock.PublicKey) if err != nil { return nil, nil, nil, errors.Wrapf(err, "DeSoBlockProducer._getBlockTemplate: problem parsing transactor public key: ") } @@ -452,7 +453,7 @@ func RecomputeBlockRewardWithBlockRewardOutputPublicKey( blockRewardOutputPublicKeyBytes []byte, params *DeSoParams, ) (*MsgDeSoBlock, error) { - blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutputPublicKeyBytes, btcec.S256()) + blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutputPublicKeyBytes) if err != nil { return nil, errors.Wrap( fmt.Errorf("RecomputeBlockRewardWithBlockRewardOutpubPublicKey: Problem parsing block reward output public key: %v", err), "") @@ -462,7 +463,7 @@ func RecomputeBlockRewardWithBlockRewardOutputPublicKey( // and sum fees to calculate the block reward totalFees := uint64(0) for _, txn := range block.Txns[1:] { - transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey, btcec.S256()) + transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey) if err != nil { glog.Errorf("DeSoMiner._startThread: Error parsing transactor public key: %v", err) continue @@ -585,7 +586,7 @@ func (desoBlockProducer *DeSoBlockProducer) SignBlock(blockFound *MsgDeSoBlock) fmt.Errorf("Error computing block hash from header submitted: %v", err), "") } - signature, err := desoBlockProducer.blockProducerPrivateKey.Sign(blockHash[:]) + signature := ecdsa.Sign(desoBlockProducer.blockProducerPrivateKey, blockHash[:]) if err != nil { return errors.Wrap( fmt.Errorf("Error signing block: %v", err), "") diff --git a/lib/block_view.go b/lib/block_view.go index 3acbd79a1..f62a581bb 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4,6 +4,8 @@ import ( "bytes" "encoding/hex" "fmt" + "github.com/btcsuite/btcd/btcec/v2" + "github.com/dgraph-io/badger/v4" "math" "math/big" "reflect" @@ -18,8 +20,6 @@ import ( "github.com/davecgh/go-spew/spew" - "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -1844,7 +1844,7 @@ func (bav *UtxoView) _verifySignature(txn *MsgDeSoTxn, blockHeight uint32) (_der } // If we got a derived key then try parsing it. if isDerived { - derivedPk, err = btcec.ParsePubKey(derivedPkBytes, btcec.S256()) + derivedPk, err = btcec.ParsePubKey(derivedPkBytes) if err != nil { return nil, fmt.Errorf("%v %v", RuleErrorDerivedKeyInvalidExtraData, RuleErrorDerivedKeyInvalidRecoveryId) } @@ -1852,7 +1852,7 @@ func (bav *UtxoView) _verifySignature(txn *MsgDeSoTxn, blockHeight uint32) (_der // Get the owner public key and attempt turning it into *btcec.PublicKey. ownerPkBytes := txn.PublicKey - ownerPk, err := btcec.ParsePubKey(ownerPkBytes, btcec.S256()) + ownerPk, err := btcec.ParsePubKey(ownerPkBytes) if err != nil { return nil, errors.Wrapf(err, "_verifySignature: Problem parsing owner public key: ") } @@ -4088,7 +4088,7 @@ func (bav *UtxoView) _connectSingleTxn( "ConnectTransaction: TxnTypeUnlockStake must correspond to OperationTypeUnlockStake", ) } - totalLockedAmountNanos := uint256.NewInt() + totalLockedAmountNanos := uint256.NewInt(0) for _, prevLockedStakeEntry := range utxoOp.PrevLockedStakeEntries { totalLockedAmountNanos, err = SafeUint256().Add( totalLockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos, @@ -4115,7 +4115,7 @@ func (bav *UtxoView) _connectSingleTxn( "ConnectTransaction: TxnTypeCoinUnlock must correspond to OperationTypeCoinUnlock", ) } - totalLockedDESOAmountNanos := uint256.NewInt() + totalLockedDESOAmountNanos := uint256.NewInt(0) for _, prevLockedBalanceEntry := range utxoOp.PrevLockedBalanceEntries { totalLockedDESOAmountNanos, err = SafeUint256().Add( totalLockedDESOAmountNanos, &prevLockedBalanceEntry.BalanceBaseUnits) @@ -4382,7 +4382,7 @@ func (bav *UtxoView) ConnectBlock( } var err error blockRewardOutputPublicKey, err = - btcec.ParsePubKey(desoBlock.Txns[0].TxOutputs[0].PublicKey, btcec.S256()) + btcec.ParsePubKey(desoBlock.Txns[0].TxOutputs[0].PublicKey) if err != nil { return nil, fmt.Errorf("ConnectBlock: Problem parsing block reward public key: %v", err) } @@ -4422,7 +4422,7 @@ func (bav *UtxoView) ConnectBlock( includeFeesInBlockReward := true if blockHeight >= uint64(bav.Params.ForkHeights.BlockRewardPatchBlockHeight) && txn.TxnMeta.GetTxnType() != TxnTypeBlockReward { - transactorPubKey, err := btcec.ParsePubKey(txn.PublicKey, btcec.S256()) + transactorPubKey, err := btcec.ParsePubKey(txn.PublicKey) if err != nil { return nil, fmt.Errorf("ConnectBlock: Problem parsing transactor public key: %v", err) } diff --git a/lib/block_view_access_group_members_test.go b/lib/block_view_access_group_members_test.go index d3becd56d..4c237effb 100644 --- a/lib/block_view_access_group_members_test.go +++ b/lib/block_view_access_group_members_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/golang/glog" "github.com/pkg/errors" "github.com/stretchr/testify/require" @@ -67,7 +67,7 @@ func TestAccessGroupMembersAdd(t *testing.T) { randomMemberPublicKeys := []*PublicKey{} randomMemberGroupKeys := []*GroupKeyName{} for ii := 0; ii < randomMemberCounter; ii++ { - privateKey, err := btcec.NewPrivateKey(btcec.S256()) + privateKey, err := btcec.NewPrivateKey() require.NoError(err) privateKeyBase58Check := Base58CheckEncode( privateKey.Serialize(), true, &DeSoTestnetParams) @@ -113,16 +113,16 @@ func TestAccessGroupMembersAdd(t *testing.T) { initChainCallback: initChainCallback, } - groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv1, err := btcec.NewPrivateKey() require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() - groupPriv2, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv2, err := btcec.NewPrivateKey() require.NoError(err) groupPk2 := groupPriv2.PubKey().SerializeCompressed() _ = groupPk2 - groupPriv3, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv3, err := btcec.NewPrivateKey() require.NoError(err) groupPk3 := groupPriv3.PubKey().SerializeCompressed() _ = groupPk3 @@ -416,7 +416,7 @@ func TestAccessGroupMembersRemove(t *testing.T) { randomMemberPublicKeys1 := []*PublicKey{} randomMemberPublicKeys2 := []*PublicKey{} for ii := 0; ii < randomMemberCounter1; ii++ { - privateKey, err := btcec.NewPrivateKey(btcec.S256()) + privateKey, err := btcec.NewPrivateKey() require.NoError(err) privateKeyBase58Check := Base58CheckEncode( privateKey.Serialize(), true, &DeSoTestnetParams) @@ -426,7 +426,7 @@ func TestAccessGroupMembersRemove(t *testing.T) { randomMemberPublicKeys1 = append(randomMemberPublicKeys1, publicKey) } for ii := 0; ii < randomMemberCounter2; ii++ { - privateKey, err := btcec.NewPrivateKey(btcec.S256()) + privateKey, err := btcec.NewPrivateKey() require.NoError(err) privateKeyBase58Check := Base58CheckEncode( privateKey.Serialize(), true, &DeSoTestnetParams) @@ -465,17 +465,17 @@ func TestAccessGroupMembersRemove(t *testing.T) { initChainCallback: initChainCallback, } - groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv1, err := btcec.NewPrivateKey() require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() _ = groupPk1 - groupPriv2, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv2, err := btcec.NewPrivateKey() require.NoError(err) groupPk2 := groupPriv2.PubKey().SerializeCompressed() _ = groupPk2 - groupPriv3, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv3, err := btcec.NewPrivateKey() require.NoError(err) groupPk3 := groupPriv3.PubKey().SerializeCompressed() _ = groupPk3 @@ -913,17 +913,17 @@ func TestAccessGroupMembersUpdate(t *testing.T) { initChainCallback: initChainCallback, } - groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv1, err := btcec.NewPrivateKey() require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() _ = groupPk1 - groupPriv2, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv2, err := btcec.NewPrivateKey() require.NoError(err) groupPk2 := groupPriv2.PubKey().SerializeCompressed() _ = groupPk2 - groupPriv3, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv3, err := btcec.NewPrivateKey() require.NoError(err) groupPk3 := groupPriv3.PubKey().SerializeCompressed() _ = groupPk3 @@ -1654,7 +1654,7 @@ func TestAccessGroupMembersTxnWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) // Helper funcs _submitAuthorizeDerivedKeyTxn := func( diff --git a/lib/block_view_access_group_test.go b/lib/block_view_access_group_test.go index 4afd40857..24912adf0 100644 --- a/lib/block_view_access_group_test.go +++ b/lib/block_view_access_group_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/pkg/errors" "github.com/stretchr/testify/require" ) @@ -71,16 +71,16 @@ func TestAccessGroup(t *testing.T) { initChainCallback: initChainCallback, } - groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv1, err := btcec.NewPrivateKey() require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() - groupPriv2, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv2, err := btcec.NewPrivateKey() require.NoError(err) groupPk2 := groupPriv2.PubKey().SerializeCompressed() _ = groupPk2 - groupPriv3, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv3, err := btcec.NewPrivateKey() require.NoError(err) groupPk3 := groupPriv3.PubKey().SerializeCompressed() _ = groupPk3 @@ -524,7 +524,7 @@ func TestAccessGroupTxnWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) // Helper funcs _submitAuthorizeDerivedKeyTxn := func(accessGroupLimitKey AccessGroupLimitKey, count int) string { diff --git a/lib/block_view_association_test.go b/lib/block_view_association_test.go index 81496d9dc..90f0198bc 100644 --- a/lib/block_view_association_test.go +++ b/lib/block_view_association_test.go @@ -7,7 +7,7 @@ import ( "sort" "testing" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/stretchr/testify/require" ) @@ -2255,7 +2255,7 @@ func _testAssociationsWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) // Helper funcs _submitAuthorizeDerivedKeyTxn := func(txnType TxnType, associationLimitKey AssociationLimitKey, count int) (string, error) { diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 04fc8ddc5..59510f84a 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -2,7 +2,8 @@ package lib import ( "bytes" - "github.com/btcsuite/btcd/btcec" + + "github.com/btcsuite/btcd/btcec/v2" "github.com/dgraph-io/badger/v4" "github.com/pkg/errors" "github.com/stretchr/testify/assert" @@ -531,7 +532,7 @@ func _generateUnsignedDependentAtomicTransactions( // Generate the atomic transactions. for ii := 0; ii < numberOfTransactions-1; ii++ { // Generate a new public/private key pair. - privKey_ii, err := btcec.NewPrivateKey(btcec.S256()) + privKey_ii, err := btcec.NewPrivateKey() require.NoError(testMeta.t, err) pubKey_ii := privKey_ii.PubKey() receiverPrivateKeysBase58 = append(receiverPrivateKeysBase58, Base58CheckEncode( diff --git a/lib/block_view_balance_entry.go b/lib/block_view_balance_entry.go index 563463494..84f6391e4 100644 --- a/lib/block_view_balance_entry.go +++ b/lib/block_view_balance_entry.go @@ -2,7 +2,7 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" @@ -209,7 +209,7 @@ func (bav *UtxoView) GetBalanceEntry(holderPkid *PKID, creatorPkid *PKID, isDAOC return &BalanceEntry{ CreatorPKID: creatorPkid, HODLerPKID: holderPkid, - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } return balanceEntry @@ -275,7 +275,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( txMeta := txn.TxnMeta.(*CreatorCoinTransferMetadataa) receiverPublicKey = txMeta.ReceiverPublicKey profilePublicKey = txMeta.ProfilePublicKey - coinToTransferNanos = uint256.NewInt().SetUint64(txMeta.CreatorCoinToTransferNanos) + coinToTransferNanos = uint256.NewInt(txMeta.CreatorCoinToTransferNanos) } // Connect basic txn to get the total input and the total output without @@ -297,7 +297,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( if len(receiverPublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, RuleErrorCoinTransferInvalidReceiverPubKeySize } - if _, err = btcec.ParsePubKey(receiverPublicKey, btcec.S256()); err != nil { + if _, err = btcec.ParsePubKey(receiverPublicKey); err != nil { return 0, 0, nil, errors.Wrap( RuleErrorCoinTransferInvalidReceiverPubKey, err.Error()) } @@ -312,7 +312,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( if len(profilePublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, RuleErrorCoinTransferInvalidProfilePubKeySize } - if _, err = btcec.ParsePubKey(profilePublicKey, btcec.S256()); err != nil { + if _, err = btcec.ParsePubKey(profilePublicKey); err != nil { return 0, 0, nil, errors.Wrap( RuleErrorCoinTransferInvalidProfilePubKey, err.Error()) } @@ -393,7 +393,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( receiverBalanceEntry = &BalanceEntry{ HODLerPKID: receiverPKID.PKID, CreatorPKID: creatorPKID.PKID, - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } @@ -401,10 +401,10 @@ func (bav *UtxoView) HelpConnectCoinTransfer( prevSenderBalanceEntry := *senderBalanceEntry // Subtract the number of coins being given from the sender and add them to the receiver. - senderBalanceEntry.BalanceNanos = *uint256.NewInt().Sub( + senderBalanceEntry.BalanceNanos = *uint256.NewInt(0).Sub( &senderBalanceEntry.BalanceNanos, coinToTransferNanos) - receiverBalanceEntry.BalanceNanos = *uint256.NewInt().Add( + receiverBalanceEntry.BalanceNanos = *uint256.NewInt(0).Add( &receiverBalanceEntry.BalanceNanos, coinToTransferNanos) @@ -418,10 +418,10 @@ func (bav *UtxoView) HelpConnectCoinTransfer( // // CreatorCoins can't exceed a uint64 if senderBalanceEntry.BalanceNanos.Uint64() < bav.Params.CreatorCoinAutoSellThresholdNanos { - receiverBalanceEntry.BalanceNanos = *uint256.NewInt().Add( + receiverBalanceEntry.BalanceNanos = *uint256.NewInt(0).Add( &receiverBalanceEntry.BalanceNanos, &senderBalanceEntry.BalanceNanos) - senderBalanceEntry.BalanceNanos = *uint256.NewInt() + senderBalanceEntry.BalanceNanos = *uint256.NewInt(0) senderBalanceEntry.HasPurchased = false } } @@ -433,7 +433,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( bav._deleteBalanceEntryMappings(receiverBalanceEntry, receiverPublicKey, profilePublicKey, isDAOCoin) bav._setBalanceEntryMappings(receiverBalanceEntry, isDAOCoin) - if senderBalanceEntry.BalanceNanos.Gt(uint256.NewInt()) { + if senderBalanceEntry.BalanceNanos.Gt(uint256.NewInt(0)) { bav._setBalanceEntryMappings(senderBalanceEntry, isDAOCoin) } diff --git a/lib/block_view_bitcoin.go b/lib/block_view_bitcoin.go index e4ea9667e..96e68aeab 100644 --- a/lib/block_view_bitcoin.go +++ b/lib/block_view_bitcoin.go @@ -2,11 +2,11 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" "github.com/pkg/errors" "math" "math/big" diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index 3325495cb..9a4f64f0f 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -11,7 +11,7 @@ import ( "strings" "testing" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/wire" merkletree "github.com/deso-protocol/go-merkle-tree" "github.com/dgraph-io/badger/v4" @@ -163,7 +163,7 @@ func _privStringToKeys(t *testing.T, privString string) (*btcec.PrivateKey, *btc result, _, err := Base58CheckDecodePrefix(privString, 1) require.NoError(err) result = result[:len(result)-1] - return btcec.PrivKeyFromBytes(btcec.S256(), result) + return btcec.PrivKeyFromBytes(result) } func _updateUSDCentsPerBitcoinExchangeRate(t *testing.T, chain *Blockchain, db *badger.DB, diff --git a/lib/block_view_creator_coin.go b/lib/block_view_creator_coin.go index 43718e611..ed7e18cce 100644 --- a/lib/block_view_creator_coin.go +++ b/lib/block_view_creator_coin.go @@ -2,7 +2,7 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/holiman/uint256" "github.com/pkg/errors" "math" @@ -289,7 +289,7 @@ func (bav *UtxoView) _disconnectCreatorCoin( creatorBalanceEntry = &BalanceEntry{ HODLerPKID: creatorPKID, CreatorPKID: creatorPKID, - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } @@ -785,7 +785,7 @@ func (bav *UtxoView) HelpConnectCreatorCoinBuy( existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos, creatorCoinToMintNanos) } // Setting the value in this way is guaranteed to not mess up the prevCoinEntry - existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().SetUint64( + existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt( existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos.Uint64() + creatorCoinToMintNanos) // Calculate the *Creator Coin nanos* to give as a founder reward. @@ -873,7 +873,7 @@ func (bav *UtxoView) HelpConnectCreatorCoinBuy( HODLerPKID: hodlerPKID, // The creator is the owner of the profile that corresponds to the coin. CreatorPKID: creatorPKID, - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } @@ -904,7 +904,7 @@ func (bav *UtxoView) HelpConnectCreatorCoinBuy( creatorBalanceEntry = &BalanceEntry{ HODLerPKID: hodlerPKID, CreatorPKID: creatorPKID, - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } } @@ -950,8 +950,8 @@ func (bav *UtxoView) HelpConnectCreatorCoinBuy( } // Finally increment the buyerBalanceEntry.BalanceNanos to reflect // the purchased coinsBuyerGetsNanos. If coinsBuyerGetsNanos is greater than 0, we set HasPurchased to true. - buyerBalanceEntry.BalanceNanos = *uint256.NewInt().Add( - &buyerBalanceEntry.BalanceNanos, uint256.NewInt().SetUint64(coinsBuyerGetsNanos)) + buyerBalanceEntry.BalanceNanos = *uint256.NewInt(0).Add( + &buyerBalanceEntry.BalanceNanos, uint256.NewInt(coinsBuyerGetsNanos)) buyerBalanceEntry.HasPurchased = true // If the creator is buying their own coin, this will just be modifying @@ -986,9 +986,9 @@ func (bav *UtxoView) HelpConnectCreatorCoinBuy( bav._setProfileEntryMappings(existingProfileEntry) } // CreatorCoin balances can't exceed uint64 - creatorBalanceEntry.BalanceNanos = *uint256.NewInt().Add( + creatorBalanceEntry.BalanceNanos = *uint256.NewInt(0).Add( &creatorBalanceEntry.BalanceNanos, - uint256.NewInt().SetUint64(creatorCoinFounderRewardNanos)) + uint256.NewInt(creatorCoinFounderRewardNanos)) // At this point the balances for the buyer and the creator should be correct // so set the mappings in the view. @@ -1241,7 +1241,7 @@ func (bav *UtxoView) HelpConnectCreatorCoinSell( "is selling %v exceeds CreatorCoin nanos in circulation %v", creatorCoinToSellNanos, existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos) } - existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().SetUint64( + existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt( existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos.Uint64() - creatorCoinToSellNanos) // Check if this is a complete sell of the seller's remaining creator coins @@ -1256,7 +1256,7 @@ func (bav *UtxoView) HelpConnectCreatorCoinSell( // It's okay to modify these values because they are saved in the PrevCoinEntry. if existingProfileEntry.CreatorCoinEntry.NumberOfHolders == 0 { existingProfileEntry.CreatorCoinEntry.DeSoLockedNanos = 0 - existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt() + existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt(0) } // Save the seller's balance before we modify it. We don't need to save the @@ -1271,9 +1271,9 @@ func (bav *UtxoView) HelpConnectCreatorCoinSell( // below CreatorCoinAutoSellThresholdNanos. // // CreatorCoin balances can't exceed uint64 - sellerBalanceEntry.BalanceNanos = *uint256.NewInt().Sub( + sellerBalanceEntry.BalanceNanos = *uint256.NewInt(0).Sub( &sellerBalanceEntry.BalanceNanos, - uint256.NewInt().SetUint64(creatorCoinToSellNanos)) + uint256.NewInt(creatorCoinToSellNanos)) // If the seller's balance will be zero after this transaction, set HasPurchased to false // diff --git a/lib/block_view_dao_coin.go b/lib/block_view_dao_coin.go index 9e0e7930c..36ef321c5 100644 --- a/lib/block_view_dao_coin.go +++ b/lib/block_view_dao_coin.go @@ -2,7 +2,7 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/holiman/uint256" "github.com/pkg/errors" "reflect" @@ -68,7 +68,7 @@ func (bav *UtxoView) _disconnectDAOCoin( transactorBalanceEntry = &BalanceEntry{ CreatorPKID: creatorPKID, HODLerPKID: hodlerPKID, - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } @@ -84,7 +84,7 @@ func (bav *UtxoView) _disconnectDAOCoin( return fmt.Errorf("_disconnectDAOCoin: Must mint more than zero coins; this should never happen") } // Coins minted + prev coin entry's coins in circulation matches new coin entry's coins in circulation - CoinsInCirculationPlusCoinsToMintNanos := uint256.NewInt().Add( + CoinsInCirculationPlusCoinsToMintNanos := uint256.NewInt(0).Add( &operationData.PrevCoinEntry.CoinsInCirculationNanos, &txMeta.CoinsToMintNanos) if !existingProfileEntry.DAOCoinEntry.CoinsInCirculationNanos.Eq( @@ -97,7 +97,7 @@ func (bav *UtxoView) _disconnectDAOCoin( } // Check that creator's current balance is equal to previous balance plus coins to mint. Note: the creator is // the transactor in this case - PrevBalanceNanosPlusCoinsToMintNanos := uint256.NewInt().Add( + PrevBalanceNanosPlusCoinsToMintNanos := uint256.NewInt(0).Add( &operationData.PrevCreatorBalanceEntry.BalanceNanos, &txMeta.CoinsToMintNanos) if !transactorBalanceEntry.BalanceNanos.Eq(PrevBalanceNanosPlusCoinsToMintNanos) { @@ -118,7 +118,7 @@ func (bav *UtxoView) _disconnectDAOCoin( return fmt.Errorf("_disconnctDAOCoin: Must burn more than zero coins; this should never happen") } // prev coin entry's coins in circulation minus coins burned matches new coin entry's coins in circulation - PrevCoinsInCirculationMinusCoinsToBurnNanos := uint256.NewInt().Sub( + PrevCoinsInCirculationMinusCoinsToBurnNanos := uint256.NewInt(0).Sub( &operationData.PrevCoinEntry.CoinsInCirculationNanos, &txMeta.CoinsToBurnNanos) if !existingProfileEntry.DAOCoinEntry.CoinsInCirculationNanos.Eq( @@ -133,7 +133,7 @@ func (bav *UtxoView) _disconnectDAOCoin( // prev balance entry - coins burned matches the new balance entry's balance // Check that transactor's current balance is equal to previous balance minus coins to mint. Note: the creator is // the transactor in this case - PrevBalanceNanosMinusCoinsToBurnNanos := uint256.NewInt().Sub( + PrevBalanceNanosMinusCoinsToBurnNanos := uint256.NewInt(0).Sub( &operationData.PrevTransactorBalanceEntry.BalanceNanos, &txMeta.CoinsToBurnNanos) if !transactorBalanceEntry.BalanceNanos.Eq(PrevBalanceNanosMinusCoinsToBurnNanos) { @@ -227,7 +227,7 @@ func (bav *UtxoView) _disconnectDAOCoinTransfer( PkToStringBoth(currentTxn.PublicKey), PkToStringBoth(txMeta.ProfilePublicKey)) } senderPrevBalanceNanos := operationData.PrevSenderBalanceEntry.BalanceNanos - senderCurrBalanceNanos := *uint256.NewInt() + senderCurrBalanceNanos := *uint256.NewInt(0) // Since the sender may have given away their whole balance, their BalanceEntry can be nil. if senderBalanceEntry != nil && !senderBalanceEntry.isDeleted { // This assignment is OK because we never modify values in-place @@ -245,7 +245,7 @@ func (bav *UtxoView) _disconnectDAOCoinTransfer( PkToStringBoth(currentTxn.PublicKey), PkToStringBoth(txMeta.ProfilePublicKey)) } receiverCurrBalanceNanos := receiverBalanceEntry.BalanceNanos - receiverPrevBalanceNanos := *uint256.NewInt() + receiverPrevBalanceNanos := *uint256.NewInt(0) if operationData.PrevReceiverBalanceEntry != nil { // This assignment is OK because we never modify values in-place receiverPrevBalanceNanos = operationData.PrevReceiverBalanceEntry.BalanceNanos @@ -266,8 +266,8 @@ func (bav *UtxoView) _disconnectDAOCoinTransfer( } // Sanity check the sender's increase equals the receiver's decrease after disconnect. - senderBalanceIncrease := uint256.NewInt().Sub(&senderPrevBalanceNanos, &senderCurrBalanceNanos) - receiverBalanceDecrease := uint256.NewInt().Sub(&receiverCurrBalanceNanos, &receiverPrevBalanceNanos) + senderBalanceIncrease := uint256.NewInt(0).Sub(&senderPrevBalanceNanos, &senderCurrBalanceNanos) + receiverBalanceDecrease := uint256.NewInt(0).Sub(&receiverCurrBalanceNanos, &receiverPrevBalanceNanos) if !senderBalanceIncrease.Eq(receiverBalanceDecrease) { return fmt.Errorf("_disconnectDAOCoinTransfer: Sender's balance increase "+ "of %d will not equal the receiver's balance decrease of %v after disconnect.", @@ -331,7 +331,7 @@ func (bav *UtxoView) HelpConnectDAOCoinInitialization(txn *MsgDeSoTxn, txHash *B return 0, 0, nil, nil, RuleErrorDAOCoinInvalidPubKeySize } - if _, err = btcec.ParsePubKey(txMeta.ProfilePublicKey, btcec.S256()); err != nil { + if _, err = btcec.ParsePubKey(txMeta.ProfilePublicKey); err != nil { return 0, 0, nil, nil, errors.Wrap(RuleErrorDAOCoinInvalidPubKey, err.Error()) } @@ -381,14 +381,14 @@ func (bav *UtxoView) HelpConnectDAOCoinMint( // // if CoinsInCirculationNanos > MaxUint256 - CoinsToMintNanos if creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos.Gt( - uint256.NewInt().Sub(MaxUint256, &txMeta.CoinsToMintNanos)) { + uint256.NewInt(0).Sub(MaxUint256, &txMeta.CoinsToMintNanos)) { return 0, 0, nil, errors.Wrapf( RuleErrorOverflowWhileMintingDAOCoins, fmt.Sprintf( "_connectDAOCoin: Overflow while summing CoinsInCirculationNanos and CoinsToMinNanos: %v, %v", creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos, txMeta.CoinsToMintNanos)) } // CoinsInCirculationNanos = CoinsInCirculationNanos + CoinsToMintNanos - creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().Add( + creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt(0).Add( &creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos, &txMeta.CoinsToMintNanos) // Increase Balance entry for owner @@ -398,7 +398,7 @@ func (bav *UtxoView) HelpConnectDAOCoinMint( profileOwnerBalanceEntry = &BalanceEntry{ HODLerPKID: hodlerPKID, CreatorPKID: creatorPKID, - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } @@ -407,14 +407,14 @@ func (bav *UtxoView) HelpConnectDAOCoinMint( // Check for overflow of the uint256 // if profileOwnerBalanceEntry.BalanceNanos > MaxUint256-txMeta.CoinsToMintNanos - if profileOwnerBalanceEntry.BalanceNanos.Gt(uint256.NewInt().Sub( + if profileOwnerBalanceEntry.BalanceNanos.Gt(uint256.NewInt(0).Sub( MaxUint256, &txMeta.CoinsToMintNanos)) { return 0, 0, nil, fmt.Errorf( "_connectDAOCoin: Overflow while summing profileOwnerBalanceEntry.BalanceNanos and CoinsToMintNanos: %v, %v", profileOwnerBalanceEntry.BalanceNanos, txMeta.CoinsToMintNanos) } - profileOwnerBalanceEntry.BalanceNanos = *uint256.NewInt().Add( + profileOwnerBalanceEntry.BalanceNanos = *uint256.NewInt(0).Add( &profileOwnerBalanceEntry.BalanceNanos, &txMeta.CoinsToMintNanos) bav._setDAOCoinBalanceEntryMappings(profileOwnerBalanceEntry) @@ -496,13 +496,13 @@ func (bav *UtxoView) HelpConnectDAOCoinBurn( // Now we're safe to burn the coins // Reduce the total number of coins in circulation prevCoinEntry := creatorProfileEntry.DAOCoinEntry - creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().Sub( + creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt(0).Sub( &creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos, &daoCoinToBurn) // Burn them from the burner's balance entry prevTransactorBalanceEntry := *burnerBalanceEntry - burnerBalanceEntry.BalanceNanos = *uint256.NewInt().Sub( + burnerBalanceEntry.BalanceNanos = *uint256.NewInt(0).Sub( &burnerBalanceEntry.BalanceNanos, &daoCoinToBurn) @@ -517,7 +517,7 @@ func (bav *UtxoView) HelpConnectDAOCoinBurn( // Set the new BalanceEntry in our mappings for the burner and set the // ProfileEntry mappings as well since everything is up to date. - if burnerBalanceEntry.BalanceNanos.Gt(uint256.NewInt()) { + if burnerBalanceEntry.BalanceNanos.Gt(uint256.NewInt(0)) { bav._setDAOCoinBalanceEntryMappings(burnerBalanceEntry) } bav._setProfileEntryMappings(creatorProfileEntry) diff --git a/lib/block_view_dao_coin_limit_order.go b/lib/block_view_dao_coin_limit_order.go index 6b8b5d851..93f09696c 100644 --- a/lib/block_view_dao_coin_limit_order.go +++ b/lib/block_view_dao_coin_limit_order.go @@ -53,7 +53,7 @@ func (bav *UtxoView) getAdjustedDAOCoinBalanceForUserInBaseUnits( return nil, err } return adjustBalance( - uint256.NewInt().SetUint64(transactorDESOBalanceNanos), delta) + uint256.NewInt(transactorDESOBalanceNanos), delta) } // If we get here, we know we're dealing with a DAO coin now. @@ -62,7 +62,7 @@ func (bav *UtxoView) getAdjustedDAOCoinBalanceForUserInBaseUnits( // If the balance entry doesn't exist or is deleted then return zero if transactorBalanceEntry == nil || transactorBalanceEntry.isDeleted { - return adjustBalance(uint256.NewInt(), delta) + return adjustBalance(uint256.NewInt(0), delta) } // Make a copy and return just to be safe @@ -128,7 +128,7 @@ func (bav *UtxoView) balanceChange( oldBalanceEntry = &BalanceEntry{ HODLerPKID: userPKID, CreatorPKID: daoCoinPKID, - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } } @@ -483,7 +483,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( } if updatedTransactorOrderQuantityToFill.IsZero() { // Transactor's order was fully filled. - transactorOrder.QuantityToFillInBaseUnits = uint256.NewInt() + transactorOrder.QuantityToFillInBaseUnits = uint256.NewInt(0) orderFilled = true transactorOrderFilledOrder.IsFulfilled = true } else { @@ -916,7 +916,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( newBalanceEntry = &BalanceEntry{ HODLerPKID: &userPKID, CreatorPKID: &daoCoinPKID, - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } else { // Otherwise, we create a copy of the previous balance entry before updating. @@ -1252,7 +1252,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( transactorQuantityToFillInBaseUnits.Eq(matchingOrderQuantityToBuy) { // The matching order fully fills the transactor's order, so there won't be anything // left to fill after this order is matched. - updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt() + updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt(0) // The transactor quantity specifies the amount of coin they want to sell // and their order is fully filled. We use the matching order's exchange @@ -1296,7 +1296,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( // than the other way around. // There is nothing left in the matching order - updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt() + updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt(0) // We calculate what is left over for the transactor's order. Note that matchingOrderQuantityToBuy // can't overflow because we checked it earlier. @@ -1335,7 +1335,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( if transactorQuantityToFillInBaseUnits.Lt(matchingOrderQuantityToSell) || transactorQuantityToFillInBaseUnits.Eq(matchingOrderQuantityToSell) { // The matching order fulfills the transactor's order. - updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt() + updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt(0) // The transactor quantity specifies the amount of coin they want to buy // and their order is fully fulfilled. @@ -1370,7 +1370,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( // in terms of quantity. // The matching order has no quantity left after this match. - updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt() + updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt(0) // We calculate what is left over for the transactor's order. updatedTransactorQuantityToFillInBaseUnits, err := SafeUint256().Sub( @@ -1400,7 +1400,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( if transactorQuantityToFillInBaseUnits.Lt(matchingOrder.QuantityToFillInBaseUnits) || transactorQuantityToFillInBaseUnits.Eq(matchingOrder.QuantityToFillInBaseUnits) { // The matching order will fully fill the transactor's order. - updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt() + updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt(0) // We calculate what is left for the matching order. updatedMatchingQuantityToFillInBaseUnits, err := SafeUint256().Sub( @@ -1445,7 +1445,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( } // If we get here, the transactor's order fully covers the matching order. - updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt() + updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt(0) // We calculate what is left for the transactor. updatedTransactorQuantityToFillInBaseUnits, err := SafeUint256().Sub( @@ -1769,7 +1769,7 @@ func (bav *UtxoView) IsValidDAOCoinLimitOrder(order *DAOCoinLimitOrderEntry) err if err != nil { return err } - if baseUnitsToBuy.Eq(uint256.NewInt()) { + if baseUnitsToBuy.Eq(uint256.NewInt(0)) { return errors.Wrapf(RuleErrorDAOCoinLimitOrderTotalCostIsLessThanOneNano, "baseUnitsToBuy: ") } // If buying $DESO, validate that qty to buy is less than the max uint64. @@ -1782,7 +1782,7 @@ func (bav *UtxoView) IsValidDAOCoinLimitOrder(order *DAOCoinLimitOrderEntry) err if err != nil { return err } - if baseUnitsToSell.Eq(uint256.NewInt()) { + if baseUnitsToSell.Eq(uint256.NewInt(0)) { return errors.Wrapf(RuleErrorDAOCoinLimitOrderTotalCostIsLessThanOneNano, "baseUnitsToSell: ") } @@ -1997,7 +1997,7 @@ func (bav *UtxoView) GetDESONanosToFillOrder(transactorOrder *DAOCoinLimitOrderE // and add that as an additional fee when adding inputs and outputs. var lastSeenOrder *DAOCoinLimitOrderEntry - desoNanosToFulfillOrders := uint256.NewInt() + desoNanosToFulfillOrders := uint256.NewInt(0) transactorQuantityToFill := transactorOrder.QuantityToFillInBaseUnits.Clone() for transactorQuantityToFill.GtUint64(0) { diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index 26d2e0ece..1749aa525 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -152,9 +152,9 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { // Store how many $DESO and DAO coin units will be transferred. bb, _ := big.NewInt(0).SetString("100000000", 10) - daoCoinQuantityChange := uint256.NewInt() + daoCoinQuantityChange := uint256.NewInt(0) daoCoinQuantityChange.SetFromBig(bb) - desoQuantityChange := uint256.NewInt().SetUint64(1000) + desoQuantityChange := uint256.NewInt(1000) // Mint DAO coins for m0. { @@ -168,7 +168,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { //daoCoinTransferMetadata := DAOCoinTransferMetadata{ // ProfilePublicKey: m0PkBytes, - // DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(3000), + // DAOCoinToTransferNanos: *uint256.NewInt(3000), // ReceiverPublicKey: m1PkBytes, //} // @@ -270,7 +270,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Store how many $DESO and DAO coin units will be transferred. bb, _ = big.NewInt(0).SetString("100", 10) - daoCoinQuantity := uint256.NewInt() + daoCoinQuantity := uint256.NewInt(0) daoCoinQuantity.SetFromBig(bb) { // Construct an ask from m0. See above for description @@ -300,7 +300,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Construct a bid from m1. See above for description val, _ := uint256.FromBig(big.NewInt(1)) - newQuantity := uint256.NewInt().Sub(daoCoinQuantity, val) + newQuantity := uint256.NewInt(0).Sub(daoCoinQuantity, val) exchangeRate, err := CalculateScaledExchangeRateFromString("1.0") require.NoError(err) metadataM1 := DAOCoinLimitOrderMetadata{ @@ -328,7 +328,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Store how many $DESO and DAO coin units will be transferred. bb, _ = big.NewInt(0).SetString("100", 10) - daoCoinQuantity := uint256.NewInt() + daoCoinQuantity := uint256.NewInt(0) daoCoinQuantity.SetFromBig(bb) { // Construct an ask from m0. See above for description @@ -358,7 +358,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Construct a bid from m1. See above for description val, _ := uint256.FromBig(big.NewInt(1)) - newQuantity := uint256.NewInt().Sub(daoCoinQuantity, val) + newQuantity := uint256.NewInt(0).Sub(daoCoinQuantity, val) exchangeRate, err := CalculateScaledExchangeRateFromString("1.2") require.NoError(err) metadataM1 := DAOCoinLimitOrderMetadata{ @@ -384,7 +384,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Store how many $DESO and DAO coin units will be transferred. bb, _ = big.NewInt(0).SetString("100", 10) - daoCoinQuantity := uint256.NewInt() + daoCoinQuantity := uint256.NewInt(0) daoCoinQuantity.SetFromBig(bb) { // Construct an ask from m0. See above for description @@ -414,7 +414,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Construct a bid from m1. See above for description val, _ := uint256.FromBig(big.NewInt(1)) - newQuantity := uint256.NewInt().Sub(daoCoinQuantity, val) + newQuantity := uint256.NewInt(0).Sub(daoCoinQuantity, val) exchangeRate, err := CalculateScaledExchangeRateFromString("1.0") require.NoError(err) metadataM1 := DAOCoinLimitOrderMetadata{ @@ -441,7 +441,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Store how many $DESO and DAO coin units will be transferred. bb, _ = big.NewInt(0).SetString("100", 10) - daoCoinQuantity := uint256.NewInt() + daoCoinQuantity := uint256.NewInt(0) daoCoinQuantity.SetFromBig(bb) { // Construct an ask from m0. See above for description @@ -471,7 +471,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Construct a bid from m1. See above for description val, _ := uint256.FromBig(big.NewInt(1)) - newQuantity := uint256.NewInt().Sub(daoCoinQuantity, val) + newQuantity := uint256.NewInt(0).Sub(daoCoinQuantity, val) exchangeRate, err := CalculateScaledExchangeRateFromString("1.000001") require.NoError(err) metadataM1 := DAOCoinLimitOrderMetadata{ @@ -502,7 +502,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Store how many $DESO and DAO coin units will be transferred. bb, _ = big.NewInt(0).SetString("100", 10) - daoCoinQuantity := uint256.NewInt() + daoCoinQuantity := uint256.NewInt(0) daoCoinQuantity.SetFromBig(bb) { // Construct an ask from m0. See above for description @@ -532,7 +532,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Construct a bid from m1. See above for description val, _ := uint256.FromBig(big.NewInt(1)) - newQuantity := uint256.NewInt().Sub(daoCoinQuantity, val) + newQuantity := uint256.NewInt(0).Sub(daoCoinQuantity, val) exchangeRate, err := CalculateScaledExchangeRateFromString("0.9") require.NoError(err) metadataM1 := DAOCoinLimitOrderMetadata{ @@ -567,7 +567,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { ScaledExchangeRateCoinsToSellPerCoinToBuy: MaxUint256, // Buy one nano of DESO with the DAO coin. This should work as long as one DAO coin // base unit costs less than one full DESO (which is our current balance). - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1), + QuantityToFillInBaseUnits: uint256.NewInt(1), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeImmediateOrCancel, BlockHeight: math.MaxUint32, @@ -590,7 +590,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { ScaledExchangeRateCoinsToSellPerCoinToBuy: MaxUint256, // Buy one nano of DESO with the DAO coin. This should work as long as one DAO coin // base unit costs less than one full DESO (which is our current balance). - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1), + QuantityToFillInBaseUnits: uint256.NewInt(1), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeImmediateOrCancel, BlockHeight: math.MaxUint32, @@ -733,8 +733,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { // ----------------------- // Store how many $DESO and DAO coin units will be transferred. - daoCoinQuantityChange := uint256.NewInt().SetUint64(100) - desoQuantityChange := uint256.NewInt().SetUint64(10) + daoCoinQuantityChange := uint256.NewInt(100) + desoQuantityChange := uint256.NewInt(10) // Construct metadata for a m0 limit order: // * Buying: DAO coin @@ -823,7 +823,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // RuleErrorDAOCoinLimitOrderInvalidExchangeRate: zero { originalValue := metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy - metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy = uint256.NewInt() + metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy = uint256.NewInt(0) _, _, _, err = _doDAOCoinLimitOrderTxn( t, chain, db, params, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) @@ -836,7 +836,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // RuleErrorDAOCoinLimitOrderInvalidQuantity: zero { originalValue := metadataM0.QuantityToFillInBaseUnits - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt() + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(0) _, _, _, err = _doDAOCoinLimitOrderTxn( t, chain, db, params, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) @@ -871,7 +871,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // 100 * .009 = .9, which should truncate to 0 coins to sell metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRateFromString(".009") require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(100) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(100) // Perform txn. _, _, _, err = _doDAOCoinLimitOrderTxn( @@ -888,8 +888,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { { originalPrice := metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy originalQuantity := metadataM0.QuantityToFillInBaseUnits - metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy = uint256.NewInt().SetUint64(1) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(1) + metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy = uint256.NewInt(1) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(1) // Perform txn. _, _, _, err = _doDAOCoinLimitOrderTxn( @@ -908,7 +908,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { originalQuantity := metadataM0.QuantityToFillInBaseUnits metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(1.0) require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(math.MaxUint64) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(math.MaxUint64) _, _, _, err = _doDAOCoinLimitOrderTxn( t, chain, db, params, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) @@ -1007,14 +1007,14 @@ func TestDAOCoinLimitOrder(t *testing.T) { daoCoinMintMetadata := DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1e4), + CoinsToMintNanos: *uint256.NewInt(1e4), } _daoCoinTxnWithTestMeta(testMeta, feeRateNanosPerKb, m0Pub, m0Priv, daoCoinMintMetadata) daoCoinTransferMetadata := DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(3000), + DAOCoinToTransferNanos: *uint256.NewInt(3000), ReceiverPublicKey: m1PkBytes, } @@ -1070,7 +1070,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { updatedM0DESOBalance) require.Equal( - *uint256.NewInt().Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt(0).Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM0DAOCoinBalance.BalanceNanos) // m1's order is fulfilled buying $DESO so: @@ -1082,7 +1082,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { int64(updatedM1DESOBalance)) require.Equal( - *uint256.NewInt().Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt(0).Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM1DAOCoinBalance.BalanceNanos) } @@ -1105,13 +1105,13 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m1 submits order buying 20 $DESO @ 11 DAO coin / $DESO. metadataM1.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(11.0) require.NoError(err) - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(20) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(20) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) // m1 submits order buying 5 $DESO nanos @ 12 DAO coin / $DESO. metadataM1.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(12.0) require.NoError(err) - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(5) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(5) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) // Confirm 2 existing limit orders. @@ -1139,7 +1139,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // Construct metadata for m0's order buying 240 DAO coin nanos @ 1/8 $DESO / DAO coin. metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(0.125) // 1.0 / 8.0 = 0.125 require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(240) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(240) // Confirm matching limit orders exist. orderEntries, err = dbAdapter.GetMatchingDAOCoinLimitOrders( @@ -1160,7 +1160,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(len(orderEntries), 1) metadataM1.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(11.0) require.NoError(err) - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(10) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(10) require.True(orderEntries[0].Eq(metadataM1.ToEntry(m1PKID.PKID, savedHeight, toPKID))) // Calculate updated $DESO balances. @@ -1172,8 +1172,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { updatedM1DAOCoinBalance := dbAdapter.GetBalanceEntry(m1PKID.PKID, m0PKID.PKID, true) // Calculate changes in $DESO and DAO coins. - daoCoinQuantityChange = uint256.NewInt().SetUint64(240) - desoQuantityChange := uint256.NewInt().SetUint64(20) + daoCoinQuantityChange = uint256.NewInt(240) + desoQuantityChange := uint256.NewInt(20) // m0's order to buy DAO coins is fulfilled so: // * His $DESO balance decreases and @@ -1184,7 +1184,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { int64(updatedM0DESOBalance)) require.Equal( - *uint256.NewInt().Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt(0).Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM0DAOCoinBalance.BalanceNanos) // m1's orders to buy $DESO are fulfilled so: @@ -1194,7 +1194,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { originalM1DESOBalance+desoQuantityChange.Uint64(), updatedM1DESOBalance) require.Equal( - *uint256.NewInt().Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt(0).Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM1DAOCoinBalance.BalanceNanos) } @@ -1211,7 +1211,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m1 tries to cancel non-existent order. cancelMetadataM1 := DAOCoinLimitOrderMetadata{ - CancelOrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), + CancelOrderID: NewBlockHash(uint256.NewInt(1).Bytes()), } _, _, _, err = _doDAOCoinLimitOrderTxn( @@ -1250,7 +1250,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m0 submits order buying 100 DAO coin units @ 10 $DESO / DAO coin. metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(10.0) require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(100) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(100) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) // Confirm order is stored. @@ -1270,7 +1270,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m1 submits order selling 50 DAO coin units @ 5 $DESO / DAO coin. metadataM1.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(0.2) require.NoError(err) - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(250) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(250) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) // m0's order is partially fulfilled with 75 coins remaining. m1's order is fully @@ -1281,7 +1281,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrders() require.NoError(err) require.Equal(len(orderEntries), 1) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(75) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(75) require.True(orderEntries[0].Eq(metadataM0.ToEntry(m0PKID.PKID, savedHeight, toPKID))) // Calculate updated $DESO balances. @@ -1293,8 +1293,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { updatedM1DAOCoinBalance := dbAdapter.GetBalanceEntry(m1PKID.PKID, m0PKID.PKID, true) // Calculate changes in $DESO and DAO coins. - daoCoinQuantityChange = uint256.NewInt().SetUint64(25) - desoQuantityChange := uint256.NewInt().SetUint64(250) + daoCoinQuantityChange = uint256.NewInt(25) + desoQuantityChange := uint256.NewInt(250) // m0's order buying DAO coins is partially fulfilled so: // * His $DESO balance decreases and @@ -1304,7 +1304,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { updatedM0DESOBalance) require.Equal( - *uint256.NewInt().Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt(0).Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM0DAOCoinBalance.BalanceNanos) // m1's order selling DAO coins is fulfilled so: @@ -1316,7 +1316,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { int64(updatedM1DESOBalance)) require.Equal( - *uint256.NewInt().Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt(0).Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM1DAOCoinBalance.BalanceNanos) // m0 cancels the remainder of his order. @@ -1339,7 +1339,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m0 submits order buying 100 DAO coins @ 0.1 $DESO / DAO coin. metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(0.1) require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(100) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(100) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) // Confirm 1 existing limit order from m0. @@ -1426,7 +1426,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, ReceiverPublicKey: m2PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(m0BalanceEntry.BalanceNanos.Uint64() - 1), + DAOCoinToTransferNanos: *uint256.NewInt(m0BalanceEntry.BalanceNanos.Uint64() - 1), }, ) @@ -1480,7 +1480,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(7339), + DAOCoinToTransferNanos: *uint256.NewInt(7339), }, ) metadataM0.CancelOrderID = nil @@ -1539,7 +1539,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { daoCoinMintMetadata := DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1e5), + CoinsToMintNanos: *uint256.NewInt(1e5), } // 29 @@ -1548,7 +1548,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // Transfer 10K nanos to M2 daoCoinTransferMetadata := DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1e4), + DAOCoinToTransferNanos: *uint256.NewInt(1e4), ReceiverPublicKey: m2PkBytes, } @@ -1565,7 +1565,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(10), + QuantityToFillInBaseUnits: uint256.NewInt(10), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1586,7 +1586,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(5), + QuantityToFillInBaseUnits: uint256.NewInt(5), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1620,7 +1620,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(300), + QuantityToFillInBaseUnits: uint256.NewInt(300), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1641,7 +1641,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { orders, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisDAOCoinPair(m1PKID.PKID, &ZeroPKID) require.NoError(err) require.Len(orders, 1) - require.True(orders[0].QuantityToFillInBaseUnits.Eq(uint256.NewInt().SetUint64(240))) + require.True(orders[0].QuantityToFillInBaseUnits.Eq(uint256.NewInt(240))) // Get balance entries for all users. m0BalanceEntryAfter := dbAdapter.GetBalanceEntry(m0PKID.PKID, m1PKID.PKID, true) @@ -1650,13 +1650,13 @@ func TestDAOCoinLimitOrder(t *testing.T) { m2Decrease, err := SafeUint256().Sub(&m2BalanceEntryBefore.BalanceNanos, &m2BalanceEntryAfter.BalanceNanos) require.NoError(err) - require.True(m2Decrease.Eq(uint256.NewInt().SetUint64(10))) + require.True(m2Decrease.Eq(uint256.NewInt(10))) m1Decrease, err := SafeUint256().Sub(&m1BalanceEntryBefore.BalanceNanos, &m1BalanceEntryAfter.BalanceNanos) require.NoError(err) - require.True(m1Decrease.Eq(uint256.NewInt().SetUint64(50))) + require.True(m1Decrease.Eq(uint256.NewInt(50))) - require.True(m0BalanceEntryAfter.BalanceNanos.Eq(uint256.NewInt().SetUint64(60))) + require.True(m0BalanceEntryAfter.BalanceNanos.Eq(uint256.NewInt(60))) m0DESOBalanceAfter := _getBalance(t, chain, mempool, m0Pub) m1DESOBalanceAfter := _getBalance(t, chain, mempool, m1Pub) @@ -1727,12 +1727,12 @@ func TestDAOCoinLimitOrder(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.9) require.NoError(err) queryEntry := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m1PKID.PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m1PKID.PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), } orderEntries, err = utxoView.GetNextLimitOrdersToFill(queryEntry, nil, savedHeight) @@ -1747,7 +1747,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(1.0) require.NoError(err) require.Equal(orderEntries[0].ScaledExchangeRateCoinsToSellPerCoinToBuy, exchangeRate) - require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt().SetUint64(240)) + require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt(240)) // m0 submits another order slightly better than previous. // Transactor: m0 @@ -1759,7 +1759,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM0.SellingDAOCoinCreatorPublicKey = &ZeroPublicKey metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(1.05) require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(110) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(110) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) orderEntries, err = utxoView._getAllDAOCoinLimitOrders() require.NoError(err) @@ -1779,23 +1779,23 @@ func TestDAOCoinLimitOrder(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(1.05) require.NoError(err) require.Equal(orderEntries[0].ScaledExchangeRateCoinsToSellPerCoinToBuy, exchangeRate) - require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt().SetUint64(110)) + require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt(110)) // Test get matching DAO coin limit orders. // Query with identical order as before but higher quantity. // Should match both of m0's orders with better listed first. - queryEntry.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(150) + queryEntry.QuantityToFillInBaseUnits = uint256.NewInt(150) orderEntries, err = utxoView.GetNextLimitOrdersToFill(queryEntry, nil, savedHeight) require.NoError(err) require.Equal(len(orderEntries), 2) exchangeRate, err = CalculateScaledExchangeRate(1.05) require.NoError(err) require.Equal(orderEntries[0].ScaledExchangeRateCoinsToSellPerCoinToBuy, exchangeRate) - require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt().SetUint64(110)) + require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt(110)) exchangeRate, err = CalculateScaledExchangeRate(1.0) require.NoError(err) require.Equal(orderEntries[1].ScaledExchangeRateCoinsToSellPerCoinToBuy, exchangeRate) - require.Equal(orderEntries[1].QuantityToFillInBaseUnits, uint256.NewInt().SetUint64(240)) + require.Equal(orderEntries[1].QuantityToFillInBaseUnits, uint256.NewInt(240)) } { @@ -1816,7 +1816,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeBID, CancelOrderID: orderEntries[1].OrderID, } @@ -1854,7 +1854,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(160), + QuantityToFillInBaseUnits: uint256.NewInt(160), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1901,7 +1901,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(250), + QuantityToFillInBaseUnits: uint256.NewInt(250), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1924,7 +1924,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrders() require.NoError(err) require.Equal(len(orderEntries), 2) - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(60) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(60) require.True(orderEntries[1].Eq(metadataM1.ToEntry(m1PKID.PKID, savedHeight, toPKID))) // m0 has 1 remaining open orders. @@ -1953,7 +1953,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(200), + QuantityToFillInBaseUnits: uint256.NewInt(200), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1987,7 +1987,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2008,8 +2008,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { m1DAOCoinBalanceM0After := dbAdapter.GetBalanceEntry(m1PKID.PKID, m0PKID.PKID, true).BalanceNanos m1DAOCoinBalanceM1After := dbAdapter.GetBalanceEntry(m1PKID.PKID, m1PKID.PKID, true).BalanceNanos - daoCoinM0UnitsTransferred := uint256.NewInt().SetUint64(100) - daoCoinM1UnitsTransferred := uint256.NewInt().SetUint64(200) + daoCoinM0UnitsTransferred := uint256.NewInt(100) + daoCoinM1UnitsTransferred := uint256.NewInt(200) m0DAOCoinM0Decrease, err := SafeUint256().Sub(&m0DAOCoinBalanceM0Before, &m0DAOCoinBalanceM0After) require.NoError(err) @@ -2052,7 +2052,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), + QuantityToFillInBaseUnits: uint256.NewInt(50), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2074,7 +2074,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(110), + QuantityToFillInBaseUnits: uint256.NewInt(110), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2107,21 +2107,21 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(m0DESONanosIncrease, uint64(45)) m0DAOCoinUnitsDecrease, err := SafeUint256().Sub(&m0DAOCoinBalanceUnitsBefore, &m0DAOCoinBalanceUnitsAfter) require.NoError(err) - require.Equal(m0DAOCoinUnitsDecrease, uint256.NewInt().SetUint64(50)) + require.Equal(m0DAOCoinUnitsDecrease, uint256.NewInt(50)) // m1's accounting m1DESONanosIncrease := m1DESOBalanceNanosAfter - m1DESOBalanceNanosBefore require.Equal(m1DESONanosIncrease, uint64(50)) m1DAOCoinUnitsDecrease, err := SafeUint256().Sub(&m1DAOCoinBalanceUnitsBefore, &m1DAOCoinBalanceUnitsAfter) require.NoError(err) - require.Equal(m1DAOCoinUnitsDecrease, uint256.NewInt().SetUint64(60)) + require.Equal(m1DAOCoinUnitsDecrease, uint256.NewInt(60)) // m2's accounting m2DESONanosDecrease := m2DESOBalanceNanosBefore - m2DESOBalanceNanosAfter require.Equal(m2DESONanosDecrease, uint64(95)+_feeNanos()) m2DAOCoinUnitsIncrease, err := SafeUint256().Sub(&m2DAOCoinBalanceUnitsAfter, &m2DAOCoinBalanceUnitsBefore) require.NoError(err) - require.Equal(m2DAOCoinUnitsIncrease, uint256.NewInt().SetUint64(110)) + require.Equal(m2DAOCoinUnitsIncrease, uint256.NewInt(110)) } { @@ -2141,7 +2141,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(300), + QuantityToFillInBaseUnits: uint256.NewInt(300), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2162,7 +2162,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(600), + QuantityToFillInBaseUnits: uint256.NewInt(600), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2190,7 +2190,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(900), + QuantityToFillInBaseUnits: uint256.NewInt(900), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2216,21 +2216,21 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(m0DESONanosDecrease, uint64(30)) m0DAOCoinUnitsIncrease, err := SafeUint256().Sub(&m0DAOCoinBalanceUnitsAfter, &m0DAOCoinBalanceUnitsBefore) require.NoError(err) - require.Equal(m0DAOCoinUnitsIncrease, uint256.NewInt().SetUint64(300)) + require.Equal(m0DAOCoinUnitsIncrease, uint256.NewInt(300)) // m1's accounting m1DESONanosDecrease := m1DESOBalanceNanosBefore - m1DESOBalanceNanosAfter require.Equal(m1DESONanosDecrease, uint64(120)) m1DAOCoinUnitsIncrease, err := SafeUint256().Sub(&m1DAOCoinBalanceUnitsAfter, &m1DAOCoinBalanceUnitsBefore) require.NoError(err) - require.Equal(m1DAOCoinUnitsIncrease, uint256.NewInt().SetUint64(600)) + require.Equal(m1DAOCoinUnitsIncrease, uint256.NewInt(600)) // m2's accounting m2DESONanosIncrease := m2DESOBalanceNanosAfter - m2DESOBalanceNanosBefore require.Equal(m2DESONanosIncrease, uint64(150)-_feeNanos()) m2DAOCoinUnitsDecrease, err := SafeUint256().Sub(&m2DAOCoinBalanceUnitsBefore, &m2DAOCoinBalanceUnitsAfter) require.NoError(err) - require.Equal(m2DAOCoinUnitsDecrease, uint256.NewInt().SetUint64(900)) + require.Equal(m2DAOCoinUnitsDecrease, uint256.NewInt(900)) } { @@ -2250,7 +2250,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(89), + QuantityToFillInBaseUnits: uint256.NewInt(89), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2319,7 +2319,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(10), + QuantityToFillInBaseUnits: uint256.NewInt(10), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2381,7 +2381,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), + QuantityToFillInBaseUnits: uint256.NewInt(50), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2399,7 +2399,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), + QuantityToFillInBaseUnits: uint256.NewInt(50), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2633,7 +2633,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), + QuantityToFillInBaseUnits: uint256.NewInt(50), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2651,7 +2651,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), + QuantityToFillInBaseUnits: uint256.NewInt(50), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2735,7 +2735,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2756,8 +2756,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM1 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(200), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), + QuantityToFillInBaseUnits: uint256.NewInt(200), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: 99, } @@ -2790,7 +2790,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(originalM1BalanceM1Coins, updatedM1BalanceM1Coins) // m1 submits a FillOrKill order buying 100 m1 DAO coins that is filled. - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(100) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(100) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrders() require.NoError(err) @@ -2824,7 +2824,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2845,8 +2845,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM1 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(200), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), + QuantityToFillInBaseUnits: uint256.NewInt(200), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeFillOrKill, } @@ -2872,7 +2872,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(originalM1BalanceM1Coins, updatedM1BalanceM1Coins) // m1 submits a FillOrKill order selling 100 m1 DAO coins that is filled. - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(100) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(100) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrders() require.NoError(err) @@ -2906,7 +2906,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2928,8 +2928,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM1 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(200), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), + QuantityToFillInBaseUnits: uint256.NewInt(200), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeImmediateOrCancel, } @@ -2967,7 +2967,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2989,8 +2989,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM1 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(200), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), + QuantityToFillInBaseUnits: uint256.NewInt(200), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeImmediateOrCancel, } @@ -3052,14 +3052,14 @@ func TestDAOCoinLimitOrder(t *testing.T) { // Confirm that m0 cannot afford to fulfill m1's order. m1RequestedDESONanos, err := m1OrderEntry.BaseUnitsToBuyUint256() require.NoError(err) - require.True(m1RequestedDESONanos.Gt(uint256.NewInt().SetUint64(originalM0DESOBalance))) + require.True(m1RequestedDESONanos.Gt(uint256.NewInt(originalM0DESOBalance))) // m0 submits a FillOrKill order trying to fulfill m1's order. // m0 does not have sufficient $DESO. metadataM0 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), QuantityToFillInBaseUnits: metadataM0.QuantityToFillInBaseUnits, OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeFillOrKill, @@ -3154,7 +3154,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM0 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), QuantityToFillInBaseUnits: metadataM0.QuantityToFillInBaseUnits, OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeFillOrKill, @@ -3212,7 +3212,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3238,7 +3238,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), + QuantityToFillInBaseUnits: uint256.NewInt(50), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeFillOrKill, } @@ -3321,7 +3321,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m0 submits an order selling m1 DAO coin units for $DESO. Order is stored. exchangeRate, err := CalculateScaledExchangeRate(1.0) require.NoError(err) - quantityToFill := uint256.NewInt().SetUint64(60) + quantityToFill := uint256.NewInt(60) metadataM0 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, @@ -3382,7 +3382,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3437,7 +3437,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m3PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(350), + QuantityToFillInBaseUnits: uint256.NewInt(350), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3472,7 +3472,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m3PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3498,12 +3498,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), + QuantityToFillInBaseUnits: uint256.NewInt(1000), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3512,12 +3512,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.1) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), + QuantityToFillInBaseUnits: uint256.NewInt(1000), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3529,10 +3529,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(100)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(1000)) // m1 = transactor, m0 = matching order updatedTransactorQuantityToFillInBaseUnits, @@ -3541,10 +3541,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(1000)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(100)) } // Scenario 2: one BID, one ASK, matching orders w/ mismatched prices @@ -3553,12 +3553,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), + QuantityToFillInBaseUnits: uint256.NewInt(1000), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3567,12 +3567,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.2) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(500), + QuantityToFillInBaseUnits: uint256.NewInt(500), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3585,10 +3585,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(500)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(500)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(2500)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(500)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(500)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(2500)) // m1 = transactor, m0 = matching order // m1 sells 500 DAO coin base units @ 10 $DESO / DAO coin. @@ -3598,10 +3598,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(500)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(5000)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(500)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(500)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(5000)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(500)) } // Scenario 3: m0 and m1 both submit BIDs that should match @@ -3610,12 +3610,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3624,12 +3624,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.1) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), + QuantityToFillInBaseUnits: uint256.NewInt(1000), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3642,10 +3642,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(100)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(1000)) // m1 = transactor, m0 = matching order // m1 buys 1000 $DESO @ 0.1 DAO coin / $DESO. @@ -3655,10 +3655,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(1000)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(100)) } // Scenario 4: m0 and m1 both submit BIDs that match, m1 gets a better price than expected @@ -3667,12 +3667,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3681,12 +3681,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.2) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(250), + QuantityToFillInBaseUnits: uint256.NewInt(250), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3699,10 +3699,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(50)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(50)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(250)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(50)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(50)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(250)) // m1 = transactor, m0 = matching order // m1 buys 250 $DESO @ 0.1 DAO coins / $DESO. @@ -3712,10 +3712,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(75)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(250)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(25)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(75)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(250)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(25)) } // Scenario 5: m0 and m1 both submit ASKs that should match @@ -3724,12 +3724,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), + QuantityToFillInBaseUnits: uint256.NewInt(1000), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3738,12 +3738,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.1) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3756,10 +3756,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(100)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(1000)) // m1 = transactor, m0 = matching order // m1 sells 100 DAO coin base units @ 0.1 DAO coin / $DESO. @@ -3769,10 +3769,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(1000)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(100)) } // Scenario 6: m0 and m1 both submit ASKs that match, m1 gets a better price than expected @@ -3781,12 +3781,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), + QuantityToFillInBaseUnits: uint256.NewInt(1000), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3795,12 +3795,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.2) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), + QuantityToFillInBaseUnits: uint256.NewInt(50), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3813,10 +3813,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(750)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(50)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(250)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(750)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(50)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(250)) // m1 = transactor, m0 = matching order // m1 sells 50 DAO coin units for 0.1 DAO coin / $DESO. @@ -3826,10 +3826,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(500)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(500)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(50)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(500)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(500)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(50)) } // Scenario 7: @@ -3840,12 +3840,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), + QuantityToFillInBaseUnits: uint256.NewInt(1000), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3854,12 +3854,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.2) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(500), + QuantityToFillInBaseUnits: uint256.NewInt(500), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3872,10 +3872,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(500)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(500)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(500)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(100)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(500)) // m1 = transactor, m0 = matching order // m1 buys 500 DAO coin units @ 10 DAO coin / $DESO. @@ -3885,10 +3885,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(500)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(500)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(50)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(500)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(500)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(50)) } } @@ -3902,7 +3902,7 @@ func TestComputeBaseUnitsToBuyUint256(t *testing.T) { if !worked { panic(fmt.Sprintf("Failed to convert sell quantity %v into bigint", quantityToSellStr)) } - selLValUint256 := uint256.NewInt() + selLValUint256 := uint256.NewInt(0) overflow := selLValUint256.SetFromBig(sellValBig) if overflow { panic(fmt.Sprintf("Failed to convert sell quantity %v into uint256 because of overflow", quantityToSellStr)) @@ -3914,7 +3914,7 @@ func TestComputeBaseUnitsToBuyUint256(t *testing.T) { if !worked { panic(fmt.Sprintf("Failed to convert buy quantity %v into bigint", quantityToBuyStr)) } - buyValUint256 := uint256.NewInt() + buyValUint256 := uint256.NewInt(0) overflow = buyValUint256.SetFromBig(buyValBig) if overflow { panic(fmt.Sprintf("Failed to convert buy quantity %v into uint256 because of overflow", quantityToBuyStr)) @@ -3925,10 +3925,10 @@ func TestComputeBaseUnitsToBuyUint256(t *testing.T) { assertEqual := func(exchangeRateFloat float64, quantityToSellInt int, quantityToBuyInt int) { exchangeRate, err := CalculateScaledExchangeRate(exchangeRateFloat) require.NoError(err) - quantityToSell := uint256.NewInt().SetUint64(uint64(quantityToSellInt)) + quantityToSell := uint256.NewInt(uint64(quantityToSellInt)) quantityToBuy, err := ComputeBaseUnitsToBuyUint256(exchangeRate, quantityToSell) require.NoError(err) - require.Equal(quantityToBuy, uint256.NewInt().SetUint64(uint64(quantityToBuyInt))) + require.Equal(quantityToBuy, uint256.NewInt(uint64(quantityToBuyInt))) // We also call assertEqualStr when this function is used assertEqualStr( @@ -3999,7 +3999,7 @@ func TestComputeBaseUnitsToBuyUint256(t *testing.T) { if !worked { panic(fmt.Sprintf("Failed to convert sell quantity %v into bigint", quantityToSellStr)) } - selLValUint256 := uint256.NewInt() + selLValUint256 := uint256.NewInt(0) overflow := selLValUint256.SetFromBig(sellValBig) if overflow { panic(fmt.Sprintf("Failed to convert sell quantity %v into uint256 because of overflow", quantityToSellStr)) @@ -4212,7 +4212,7 @@ func (txnData *DAOCoinLimitOrderMetadata) ToEntry( // one here for testing purposes as OrderID can't be nil. // Note: the OrderID is skipped when we compare if two // order entries are equal in these tests for this reason. - OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), + OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), TransactorPKID: transactorPKID, BuyingDAOCoinCreatorPKID: toPKID(txnData.BuyingDAOCoinCreatorPublicKey), SellingDAOCoinCreatorPKID: toPKID(txnData.SellingDAOCoinCreatorPublicKey), @@ -4307,7 +4307,7 @@ func TestFlushingDAOCoinLimitOrders(t *testing.T) { BuyingDAOCoinCreatorPublicKey: buying.PublicKey, SellingDAOCoinCreatorPublicKey: selling.PublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(quantity), + QuantityToFillInBaseUnits: uint256.NewInt(quantity), OperationType: operationType, FillType: fillType, } @@ -4391,7 +4391,7 @@ func TestFlushingDAOCoinLimitOrders(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, feeRateNanosPerKb, m0.Pub, m0.Priv, DAOCoinMetadata{ ProfilePublicKey: m0.PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1e12), + CoinsToMintNanos: *uint256.NewInt(1e12), }) } { diff --git a/lib/block_view_dao_coin_test.go b/lib/block_view_dao_coin_test.go index 2dfa6b76b..4dd6d5b30 100644 --- a/lib/block_view_dao_coin_test.go +++ b/lib/block_view_dao_coin_test.go @@ -1,14 +1,13 @@ package lib import ( - "reflect" - "testing" - - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/dgraph-io/badger/v4" "github.com/holiman/uint256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "reflect" + "testing" ) func _daoCoinTxn(t *testing.T, chain *Blockchain, db *badger.DB, @@ -233,7 +232,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(100), + CoinsToMintNanos: *uint256.NewInt(100), }) require.Error(err) @@ -266,7 +265,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m1Pub, m1Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(100), + CoinsToMintNanos: *uint256.NewInt(100), }) require.Error(err) require.Contains(err.Error(), RuleErrorOnlyProfileOwnerCanMintDAOCoin) @@ -287,7 +286,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(0), + CoinsToMintNanos: *uint256.NewInt(0), }) require.Error(err) require.Contains(err.Error(), RuleErrorDAOCoinMustMintNonZeroDAOCoin) @@ -302,7 +301,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(oneMCoins), + CoinsToMintNanos: *uint256.NewInt(oneMCoins), }, ) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -324,7 +323,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt().SetUint64(hundredKCoins), + CoinsToBurnNanos: *uint256.NewInt(hundredKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -342,7 +341,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m1Pub, m1Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt().SetUint64(100), + CoinsToBurnNanos: *uint256.NewInt(100), }) require.Error(err) require.Contains(err.Error(), RuleErrorDAOCoinBurnInsufficientCoins) @@ -358,7 +357,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, ReceiverPublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(tenKCoins), + DAOCoinToTransferNanos: *uint256.NewInt(tenKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -387,7 +386,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt().SetUint64(tenKCoins), + CoinsToBurnNanos: *uint256.NewInt(tenKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -402,7 +401,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(tenKCoins), + CoinsToMintNanos: *uint256.NewInt(tenKCoins), }) daoBalanceEntry = DBGetBalanceEntryForHODLerAndCreatorPKIDs( db, chain.snapshot, m0PKID.PKID, m0PKID.PKID, true) @@ -426,7 +425,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt().SetUint64(oneKCoins), + CoinsToBurnNanos: *uint256.NewInt(oneKCoins), }) m1DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -457,7 +456,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(100), + CoinsToMintNanos: *uint256.NewInt(100), }) require.Error(err) require.Contains(err.Error(), RuleErrorDAOCoinCannotMintIfMintingIsDisabled) @@ -477,7 +476,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneMCoins), + DAOCoinToTransferNanos: *uint256.NewInt(oneMCoins), ReceiverPublicKey: m2PkBytes, }) require.Error(err) @@ -488,7 +487,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneKCoins), + DAOCoinToTransferNanos: *uint256.NewInt(oneKCoins), ReceiverPublicKey: m0PkBytes, }) require.Error(err) @@ -499,7 +498,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m2Pub, m2Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1), + DAOCoinToTransferNanos: *uint256.NewInt(1), ReceiverPublicKey: m0PkBytes, }) require.Error(err) @@ -510,7 +509,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m2PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneMCoins), + DAOCoinToTransferNanos: *uint256.NewInt(oneMCoins), ReceiverPublicKey: m2PkBytes, }) require.Error(err) @@ -521,7 +520,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneMCoins), + DAOCoinToTransferNanos: *uint256.NewInt(oneMCoins), ReceiverPublicKey: m2PkBytes[:10], }) require.Error(err) @@ -532,7 +531,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes[:10], - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneMCoins), + DAOCoinToTransferNanos: *uint256.NewInt(oneMCoins), ReceiverPublicKey: m2PkBytes, }) require.Error(err) @@ -544,7 +543,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt().SetUint64(oneMCoins), + CoinsToBurnNanos: *uint256.NewInt(oneMCoins), }) require.Error(err) require.Contains(err.Error(), RuleErrorDAOCoinBurnInsufficientCoins) @@ -561,7 +560,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(tenKCoins - oneKCoins), + DAOCoinToTransferNanos: *uint256.NewInt(tenKCoins - oneKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -571,7 +570,7 @@ func TestDAOCoinBasic(t *testing.T) { m1DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( db, chain.snapshot, m1PKID.PKID, m0PKID.PKID, true) // M1's balance entry is deleted because they have nothing - require.Equal(true, m1DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt())) + require.Equal(true, m1DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt(0))) profileEntry := DBGetProfileEntryForPKID(db, chain.snapshot, m0PKID.PKID) require.Equal(profileEntry.DAOCoinEntry.CoinsInCirculationNanos.Uint64(), oneMCoins-hundredKCoins-oneKCoins) @@ -589,7 +588,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, ReceiverPublicKey: m2PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(tenKCoins), + DAOCoinToTransferNanos: *uint256.NewInt(tenKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -639,7 +638,7 @@ func TestDAOCoinBasic(t *testing.T) { // M0 shouldn't own any M3 DAO Coin m0DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs(db, chain.snapshot, m0PKID.PKID, m3PKID.PKID, true) - require.Equal(true, m0DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt())) + require.Equal(true, m0DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt(0))) // M3's DAO Balance entry should be what M0's was prior to the swap m3DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs(db, chain.snapshot, m3PKID.PKID, m3PKID.PKID, true) @@ -677,7 +676,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m2Pub, m2Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1), + DAOCoinToTransferNanos: *uint256.NewInt(1), ReceiverPublicKey: m1PkBytes, }) require.Error(err) @@ -695,7 +694,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m3PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneKCoins), + DAOCoinToTransferNanos: *uint256.NewInt(oneKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -724,7 +723,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m3Pub, m3Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneKCoins), + DAOCoinToTransferNanos: *uint256.NewInt(oneKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -758,7 +757,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m4PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(100), + DAOCoinToTransferNanos: *uint256.NewInt(100), }) require.Error(err) @@ -778,7 +777,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m2PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(hundredCoins), + DAOCoinToTransferNanos: *uint256.NewInt(hundredCoins), }) m1DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -810,7 +809,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m3Pub, m3Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m4PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(hundredCoins), + DAOCoinToTransferNanos: *uint256.NewInt(hundredCoins), }) m3DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -842,7 +841,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m4PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(hundredCoins), + DAOCoinToTransferNanos: *uint256.NewInt(hundredCoins), }) m1DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -864,7 +863,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(100), + DAOCoinToTransferNanos: *uint256.NewInt(100), }) require.Error(err) @@ -912,7 +911,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(hundredCoins), + DAOCoinToTransferNanos: *uint256.NewInt(hundredCoins), }) m1DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -945,12 +944,12 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m3Pub, m3Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m2PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneMCoins - hundredKCoins - tenKCoins - oneKCoins - hundredCoins), + DAOCoinToTransferNanos: *uint256.NewInt(oneMCoins - hundredKCoins - tenKCoins - oneKCoins - hundredCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( db, chain.snapshot, m3PKID.PKID, m3PKID.PKID, true) - require.Equal(true, daoBalanceEntry.BalanceNanos.Eq(uint256.NewInt())) + require.Equal(true, daoBalanceEntry.BalanceNanos.Eq(uint256.NewInt(0))) m2DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( db, chain.snapshot, m2PKID.PKID, m3PKID.PKID, true) @@ -977,12 +976,12 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m3PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt().SetUint64(oneMCoins - hundredKCoins - oneKCoins - oneKCoins), + CoinsToBurnNanos: *uint256.NewInt(oneMCoins - hundredKCoins - oneKCoins - oneKCoins), }) m2DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( db, chain.snapshot, m2PKID.PKID, m3PKID.PKID, true) - require.Equal(true, m2DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt())) + require.Equal(true, m2DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt(0))) profileEntry := DBGetProfileEntryForPKID(db, chain.snapshot, m3PKID.PKID) require.Equal(profileEntry.DAOCoinEntry.CoinsInCirculationNanos.Uint64(), oneKCoins) @@ -1014,7 +1013,7 @@ func TestDAOCoinBasic(t *testing.T) { // - nobody has any // M2 DAO cap table after: // - M2: max-1k - maxMinus1k := uint256.NewInt().Sub(MaxUint256, uint256.NewInt().SetUint64(1000)) + maxMinus1k := uint256.NewInt(0).Sub(MaxUint256, uint256.NewInt(1000)) { _daoCoinTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, @@ -1036,7 +1035,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1001), + CoinsToMintNanos: *uint256.NewInt(1001), }) require.Error(err) require.Contains(err.Error(), RuleErrorOverflowWhileMintingDAOCoins) @@ -1048,10 +1047,10 @@ func TestDAOCoinBasic(t *testing.T) { // M2 DAO cap table after: // - M1: (max-1k) / 2 // - M2: (max-1k) / 2 + 1 - maxMinus1kDiv2 := uint256.NewInt().Div(maxMinus1k, uint256.NewInt().SetUint64(2)) - maxMinus1kDiv2PlusOne := uint256.NewInt().Add( + maxMinus1kDiv2 := uint256.NewInt(0).Div(maxMinus1k, uint256.NewInt(2)) + maxMinus1kDiv2PlusOne := uint256.NewInt(0).Add( maxMinus1kDiv2, - uint256.NewInt().SetUint64(1)) + uint256.NewInt(1)) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m2PkBytes, @@ -1081,7 +1080,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1001), + CoinsToMintNanos: *uint256.NewInt(1001), }) require.Error(err) require.Contains(err.Error(), RuleErrorOverflowWhileMintingDAOCoins) @@ -1094,13 +1093,13 @@ func TestDAOCoinBasic(t *testing.T) { // M2 DAO cap table after: // - M1: (max-1k)/2 // - M2: (max-1k)/2 + 1k + 1 - maxMinus1kDiv2Plus1kPlusOne := uint256.NewInt().Add( - maxMinus1kDiv2, uint256.NewInt().SetUint64(1001)) + maxMinus1kDiv2Plus1kPlusOne := uint256.NewInt(0).Add( + maxMinus1kDiv2, uint256.NewInt(1001)) { _daoCoinTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1000), + CoinsToMintNanos: *uint256.NewInt(1000), }) { @@ -1127,16 +1126,16 @@ func TestDAOCoinBasic(t *testing.T) { // M2 DAO cap table after: // - M1: (max-1k)/2 - 2k // - M2: (max-1k)/2 + 1k + 1 - maxMinus1kDiv2Minus2k := uint256.NewInt().Sub( - maxMinus1kDiv2, uint256.NewInt().SetUint64(2000)) - maxMinus1kDiv2Minus1kPlus1 := uint256.NewInt().Add( + maxMinus1kDiv2Minus2k := uint256.NewInt(0).Sub( + maxMinus1kDiv2, uint256.NewInt(2000)) + maxMinus1kDiv2Minus1kPlus1 := uint256.NewInt(0).Add( maxMinus1kDiv2Minus2k, maxMinus1kDiv2Plus1kPlusOne) { _daoCoinTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt().SetUint64(2000), + CoinsToBurnNanos: *uint256.NewInt(2000), }) { @@ -1163,15 +1162,15 @@ func TestDAOCoinBasic(t *testing.T) { // M2 DAO cap table after: // - M1: (max-1k)/2 - 2k // - M2: (max-1k)/2 + 2k + 1 - maxMinus1kDiv2Plus2k := uint256.NewInt().Add( - maxMinus1kDiv2, uint256.NewInt().SetUint64(2000)) - maxMinus1kDiv2Plus2kPlus1 := uint256.NewInt().Add( - maxMinus1kDiv2Plus2k, uint256.NewInt().SetUint64(1)) + maxMinus1kDiv2Plus2k := uint256.NewInt(0).Add( + maxMinus1kDiv2, uint256.NewInt(2000)) + maxMinus1kDiv2Plus2kPlus1 := uint256.NewInt(0).Add( + maxMinus1kDiv2Plus2k, uint256.NewInt(1)) { _daoCoinTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1000), + CoinsToMintNanos: *uint256.NewInt(1000), }) { @@ -1196,7 +1195,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1001), + CoinsToMintNanos: *uint256.NewInt(1001), }) require.Error(err) require.Contains(err.Error(), RuleErrorOverflowWhileMintingDAOCoins) diff --git a/lib/block_view_derived_key.go b/lib/block_view_derived_key.go index c3561e6b6..08b721a92 100644 --- a/lib/block_view_derived_key.go +++ b/lib/block_view_derived_key.go @@ -3,7 +3,7 @@ package lib import ( "bytes" "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/holiman/uint256" "github.com/pkg/errors" "reflect" @@ -129,7 +129,7 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( if len(ownerPublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, RuleErrorAuthorizeDerivedKeyInvalidOwnerPublicKey } - if _, err := btcec.ParsePubKey(ownerPublicKey, btcec.S256()); err != nil { + if _, err := btcec.ParsePubKey(ownerPublicKey); err != nil { return 0, 0, nil, errors.Wrap( RuleErrorAuthorizeDerivedKeyInvalidOwnerPublicKey, err.Error()) } @@ -489,7 +489,7 @@ func (bav *UtxoView) _disconnectAuthorizeDerivedKey( if len(currentTxn.PublicKey) != btcec.PubKeyBytesLenCompressed { return fmt.Errorf("_disconnectAuthorizeDerivedKey invalid public key: %v", currentTxn.PublicKey) } - _, err := btcec.ParsePubKey(currentTxn.PublicKey, btcec.S256()) + _, err := btcec.ParsePubKey(currentTxn.PublicKey) if err != nil { return fmt.Errorf("_disconnectAuthorizeDerivedKey invalid public key: %v", err) } @@ -500,7 +500,7 @@ func (bav *UtxoView) _disconnectAuthorizeDerivedKey( if len(txMeta.DerivedPublicKey) != btcec.PubKeyBytesLenCompressed { return fmt.Errorf("_disconnectAuthorizeDerivedKey invalid derived key: %v", txMeta.DerivedPublicKey) } - _, err = btcec.ParsePubKey(txMeta.DerivedPublicKey, btcec.S256()) + _, err = btcec.ParsePubKey(txMeta.DerivedPublicKey) if err != nil { return fmt.Errorf("_disconnectAuthorizeDerivedKey invalid derived key: %v", err) } diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index bf5323fee..670567a4a 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -10,7 +10,8 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/ecdsa" "github.com/dgraph-io/badger/v4" "github.com/holiman/uint256" "github.com/stretchr/testify/assert" @@ -545,7 +546,7 @@ func _doTxnWithBlockHeight( // ====== Access Group Fork ====== // We will only have utxoop expectation if spending limit contains CC, DAO, or NFT spending limits. transactorPrivBytes, _, err := Base58CheckDecode(TransactorPrivKeyBase58Check) - _, transactorPub := btcec.PrivKeyFromBytes(btcec.S256(), transactorPrivBytes) + _, transactorPub := btcec.PrivKeyFromBytes(transactorPrivBytes) transactorPubBytes := transactorPub.SerializeCompressed() require.NoError(err) if !utxoView.GetDerivedKeyMappingForOwner(txn.PublicKey, transactorPubBytes).TransactionSpendingLimitTracker.IsUnlimited { @@ -650,15 +651,14 @@ func _getAuthorizeDerivedKeyMetadata( require := require.New(t) // Generate a random derived key pair - derivedPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + derivedPrivateKey, err := btcec.NewPrivateKey() require.NoError(err, "_getAuthorizeDerivedKeyMetadata: Error generating a derived key pair") derivedPublicKey := derivedPrivateKey.PubKey().SerializeCompressed() // Create access signature expirationBlockByte := EncodeUint64(expirationBlock) accessBytes := append(derivedPublicKey, expirationBlockByte[:]...) - accessSignature, err := ownerPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) - require.NoError(err, "_getAuthorizeDerivedKeyMetadata: Error creating access signature") + accessSignature := ecdsa.Sign(ownerPrivateKey, Sha256DoubleHash(accessBytes)[:]) // Determine operation type var operationType AuthorizeDerivedKeyOperationType @@ -686,7 +686,7 @@ func _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit( require := require.New(t) // Generate a random derived key pair - derivedPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + derivedPrivateKey, err := btcec.NewPrivateKey() require.NoError(err, "_getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit: Error generating a derived key pair") derivedPublicKey := derivedPrivateKey.PubKey().SerializeCompressed() @@ -714,9 +714,8 @@ func _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit( accessBytes = AssembleAccessBytesWithMetamaskStrings(derivedPublicKey, expirationBlock, transactionSpendingLimit, &DeSoTestnetParams) } - signature, err := ownerPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) + signature := ecdsa.Sign(ownerPrivateKey, Sha256DoubleHash(accessBytes)[:]) accessSignature := signature.Serialize() - require.NoError(err, "_getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit: Error creating access signature") return &AuthorizeDerivedKeyMetadata{ derivedPublicKey, @@ -746,8 +745,7 @@ func _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimitAndDerivedPrivat require.NoError(err, "_getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit: Error in transaction spending limit to bytes") accessBytes = append(accessBytes, transactionSpendingLimitBytes[:]...) - accessSignature, err := ownerPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) - require.NoError(err, "_getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit: Error creating access signature") + accessSignature := ecdsa.Sign(ownerPrivateKey, Sha256DoubleHash(accessBytes)[:]) // Determine operation type var operationType AuthorizeDerivedKeyOperationType @@ -777,10 +775,7 @@ func _getAccessSignature( return nil, err } accessBytes = append(accessBytes, transactionSpendingLimitBytes...) - accessSignature, err := ownerPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) - if err != nil { - return nil, err - } + accessSignature := ecdsa.Sign(ownerPrivateKey, Sha256DoubleHash(accessBytes)[:]) return accessSignature.Serialize(), nil } @@ -924,7 +919,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { require.NoError(err) // Get AuthorizeDerivedKey txn metadata with expiration at block 6 - senderPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPriv, _ := btcec.PrivKeyFromBytes(senderPrivBytes) var transactionSpendingLimit *TransactionSpendingLimit authTxnMeta, derivedPriv := _getAuthorizeDerivedKeyMetadata(t, senderPriv, 6, false) derivedPrivBase58Check := Base58CheckEncode(derivedPriv.Serialize(), true, params) @@ -953,7 +948,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { { utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(err) - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) _, _, _, err = _doAuthorizeTxn( @@ -980,11 +975,11 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { { utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(err) - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) expirationBlockByte := UintToBuf(authTxnMeta.ExpirationBlock) accessBytes := append(authTxnMeta.DerivedPublicKey, expirationBlockByte[:]...) - accessSignatureRandom, err := randomPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) + accessSignatureRandom := ecdsa.Sign(randomPrivateKey, Sha256DoubleHash(accessBytes)[:]) require.NoError(err) _, _, _, err = _doAuthorizeTxn( testMeta, @@ -1090,7 +1085,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Should fail. Well... theoretically, it could pass in a distant future. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -1166,7 +1161,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -1220,7 +1215,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, @@ -1316,7 +1311,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -1767,7 +1762,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { require.NoError(err) // Get AuthorizeDerivedKey txn metadata with expiration at block 6 - senderPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPriv, _ := btcec.PrivKeyFromBytes(senderPrivBytes) transactionCountLimitMap := make(map[TxnType]uint64) transactionCountLimitMap[TxnTypeAuthorizeDerivedKey] = 1 transactionCountLimitMap[TxnTypeBasicTransfer] = 1 @@ -1805,7 +1800,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { { utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(err) - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) _, _, _, err = _doAuthorizeTxn( @@ -1832,11 +1827,11 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { { utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(err) - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) expirationBlockByte := UintToBuf(authTxnMeta.ExpirationBlock) accessBytes := append(authTxnMeta.DerivedPublicKey, expirationBlockByte[:]...) - accessSignatureRandom, err := randomPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) + accessSignatureRandom := ecdsa.Sign(randomPrivateKey, Sha256DoubleHash(accessBytes)[:]) require.NoError(err) _, _, _, err = _doAuthorizeTxn( testMeta, @@ -1942,7 +1937,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Should fail. Well... theoretically, it could pass in a distant future. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -2018,7 +2013,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -2072,7 +2067,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, @@ -2200,7 +2195,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -2735,7 +2730,7 @@ REPEAT: utxoView, err := mempool.GetAugmentedUniversalView() require.NoError(err) m1PrivKeyBytes, _, err := Base58CheckDecode(m1Priv) - m1PrivateKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), m1PrivKeyBytes) + m1PrivateKey, _ := btcec.PrivKeyFromBytes(m1PrivKeyBytes) m1PKID := utxoView.GetPKIDForPublicKey(m1PkBytes).PKID transactionSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: 100, @@ -2789,8 +2784,8 @@ REPEAT: &DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(100 * NanosPerUnit), - CoinsToBurnNanos: *uint256.NewInt(), + CoinsToMintNanos: *uint256.NewInt(100 * NanosPerUnit), + CoinsToBurnNanos: *uint256.NewInt(0), }, nil, blockHeight+1, @@ -2807,8 +2802,8 @@ REPEAT: &DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(100 * NanosPerUnit), - CoinsToBurnNanos: *uint256.NewInt(), + CoinsToMintNanos: *uint256.NewInt(100 * NanosPerUnit), + CoinsToBurnNanos: *uint256.NewInt(0), }, nil, blockHeight+1, @@ -2830,7 +2825,7 @@ REPEAT: &DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(10 * NanosPerUnit), + DAOCoinToTransferNanos: *uint256.NewInt(10 * NanosPerUnit), }, nil, blockHeight+1, @@ -2847,7 +2842,7 @@ REPEAT: &DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(10 * NanosPerUnit), + DAOCoinToTransferNanos: *uint256.NewInt(10 * NanosPerUnit), }, nil, blockHeight+1, @@ -2861,7 +2856,7 @@ REPEAT: utxoView, err := mempool.GetAugmentedUniversalView() require.NoError(err) derivedPrivBytes, _, err := Base58CheckDecode(derivedPrivBase58Check) - _, derivedPub := btcec.PrivKeyFromBytes(btcec.S256(), derivedPrivBytes) + _, derivedPub := btcec.PrivKeyFromBytes(derivedPrivBytes) derivedPubBytes := derivedPub.SerializeCompressed() require.NoError(err) @@ -2922,7 +2917,7 @@ REPEAT: &DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(10 * NanosPerUnit), + DAOCoinToTransferNanos: *uint256.NewInt(10 * NanosPerUnit), }, nil, blockHeight+1, @@ -2947,8 +2942,8 @@ REPEAT: &DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(100 * NanosPerUnit), - CoinsToBurnNanos: *uint256.NewInt(), + CoinsToMintNanos: *uint256.NewInt(100 * NanosPerUnit), + CoinsToBurnNanos: *uint256.NewInt(0), }, nil, blockHeight+1, @@ -3079,7 +3074,7 @@ REPEAT: &DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt().SetUint64(10 * NanosPerUnit), + CoinsToBurnNanos: *uint256.NewInt(10 * NanosPerUnit), }, nil, blockHeight+1, @@ -3095,7 +3090,7 @@ REPEAT: } m0PrivKeyBytes, _, err := Base58CheckDecode(m0Priv) - m0PrivateKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), m0PrivKeyBytes) + m0PrivateKey, _ := btcec.PrivKeyFromBytes(m0PrivKeyBytes) blockHeight, err = GetBlockTipHeight(db, false) require.NoError(err) m0AuthTxnMeta, derived0Priv := _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit( @@ -3689,7 +3684,7 @@ REPEAT: BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), + QuantityToFillInBaseUnits: uint256.NewInt(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3965,11 +3960,11 @@ func TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t *testing // // For each spending limit, we will submit a bunch of txns to make sure the limit works properly. // We will also try updating a spending limit. - groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv1, err := btcec.NewPrivateKey() require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() _ = groupPk1 - derivedPriv1, err := btcec.NewPrivateKey(btcec.S256()) + derivedPriv1, err := btcec.NewPrivateKey() require.NoError(err) groupKeyName1 := NewGroupKeyName([]byte("group 1")) groupKeyName2 := NewGroupKeyName([]byte("group 2")) @@ -3988,7 +3983,7 @@ func TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t *testing "test vector for m0PublicKey before the block height authorizing access group transaction", m0Priv, m0PubBytes, derivedPriv1, tv1SpendingLimit, []byte{}, false, AuthorizeDerivedKeyOperationValid, 100, nil, nil) - derivedPriv2, err := btcec.NewPrivateKey(btcec.S256()) + derivedPriv2, err := btcec.NewPrivateKey() tv2SpendingLimit := TransactionSpendingLimit{ GlobalDESOLimit: 10, AccessGroupMemberMap: map[AccessGroupMemberLimitKey]uint64{ @@ -4003,7 +3998,7 @@ func TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t *testing "test vector for m0PublicKey before the block height authorizing access group member transaction", m0Priv, m0PubBytes, derivedPriv2, tv2SpendingLimit, []byte{}, false, AuthorizeDerivedKeyOperationValid, 100, nil, nil) - derivedPriv3, err := btcec.NewPrivateKey(btcec.S256()) + derivedPriv3, err := btcec.NewPrivateKey() tv3SpendingLimit := TransactionSpendingLimit{ GlobalDESOLimit: 10, AccessGroupMap: tv1SpendingLimit.AccessGroupMap, @@ -4013,7 +4008,7 @@ func TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t *testing "test vector for m0PublicKey before the block height authorizing access group and access group member transactions", m0Priv, m0PubBytes, derivedPriv3, tv3SpendingLimit, []byte{}, false, AuthorizeDerivedKeyOperationValid, 100, nil, nil) - derivedPriv4, err := btcec.NewPrivateKey(btcec.S256()) + derivedPriv4, err := btcec.NewPrivateKey() tv4SpendingLimit := TransactionSpendingLimit{ GlobalDESOLimit: 10, TransactionCountLimitMap: map[TxnType]uint64{ @@ -4025,7 +4020,7 @@ func TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t *testing derivedPriv4, tv4SpendingLimit, []byte{}, false, AuthorizeDerivedKeyOperationValid, 100, nil, nil) - derivedPriv4p5, err := btcec.NewPrivateKey(btcec.S256()) + derivedPriv4p5, err := btcec.NewPrivateKey() tv4p5SpendingLimit := TransactionSpendingLimit{ IsUnlimited: true, } @@ -4159,7 +4154,7 @@ func _getDerivedKeyMetadata(t *testing.T, ownerPrivateKeyString string, derivedP ownerPriv, _, err := Base58CheckDecode(ownerPrivateKeyString) require.NoError(err) - ownerPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), ownerPriv) + ownerPrivKey, _ := btcec.PrivKeyFromBytes(ownerPriv) spendingLimit := limit accessSignature, err := _getAccessSignature( derivedPublicKey, expirationBlock, &spendingLimit, ownerPrivKey, blockHeight) diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index bd289cb40..99401d8da 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" diff --git a/lib/block_view_follow.go b/lib/block_view_follow.go index 55a8b2b96..6c78bcadc 100644 --- a/lib/block_view_follow.go +++ b/lib/block_view_follow.go @@ -2,7 +2,7 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/golang/glog" "github.com/pkg/errors" "reflect" @@ -207,7 +207,7 @@ func (bav *UtxoView) _connectFollow( } // TODO: This check feels unnecessary and is expensive - //_, err := btcec.ParsePubKey(txMeta.FollowedPublicKey, btcec.S256()) + //_, err := btcec.ParsePubKey(txMeta.FollowedPublicKey) //if err != nil { // return 0, 0, nil, errors.Wrapf( // RuleErrorFollowParsePubKeyError, "_connectFollow: Parse error: %v", err) diff --git a/lib/block_view_like.go b/lib/block_view_like.go index c3bb9355c..3af7983aa 100644 --- a/lib/block_view_like.go +++ b/lib/block_view_like.go @@ -2,7 +2,7 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/golang/glog" "github.com/pkg/errors" "reflect" diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 85fab796f..c5d0000a9 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -6,7 +6,7 @@ import ( "reflect" "sort" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/holiman/uint256" @@ -187,7 +187,7 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawDecodeWithoutMetadata(blockHeig if err != nil { return errors.Wrap(err, "LockedBalanceEntry.Decode: Problem reading BalanceBaseUnits") } - lockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt().SetBytes(balanceBaseUnitsBytes) + lockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt(0).SetBytes(balanceBaseUnitsBytes) return nil } @@ -1178,14 +1178,14 @@ func (bav *UtxoView) _connectCoinLockup( // Spend the transactor's DAO coin balance. transactorBalanceEntry.BalanceNanos = - *uint256.NewInt().Sub(&transactorBalanceEntry.BalanceNanos, txMeta.LockupAmountBaseUnits) + *uint256.NewInt(0).Sub(&transactorBalanceEntry.BalanceNanos, txMeta.LockupAmountBaseUnits) bav._setDAOCoinBalanceEntryMappings(transactorBalanceEntry) // Create a copy of the associated CoinEntry in the event we must roll back the transaction. prevCoinEntry := profileEntry.DAOCoinEntry.Copy() // Update CoinsInCirculation and NumberOfHolders associated with the DAO coin balance. - profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().Sub( + profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt(0).Sub( &profileEntry.DAOCoinEntry.CoinsInCirculationNanos, txMeta.LockupAmountBaseUnits) if transactorBalanceEntry.BalanceNanos.IsZero() && !prevTransactorBalanceEntry.BalanceNanos.IsZero() { @@ -1216,7 +1216,7 @@ func (bav *UtxoView) _connectCoinLockup( // If this is an unvested lockup, compute any accrued yield. // In the vested lockup case, the yield earned is always zero. - yieldFromTxn := uint256.NewInt() + yieldFromTxn := uint256.NewInt(0) if profileEnablesYield && txMeta.UnlockTimestampNanoSecs == txMeta.VestingEndTimestampNanoSecs { // Compute the lockup duration in nanoseconds. lockupDurationNanoSeconds := txMeta.UnlockTimestampNanoSecs - blockTimestampNanoSecs @@ -1251,8 +1251,8 @@ func (bav *UtxoView) _connectCoinLockup( } // Convert variables to a consistent uint256 representation. This is to use them in SafeUint256 math. - txnYieldBasisPoints256 := uint256.NewInt().SetUint64(txnYieldBasisPoints) - txnYieldEarningDurationNanoSecs256 := uint256.NewInt().SetUint64(uint64(txnYieldEarningDurationNanoSecs)) + txnYieldBasisPoints256 := uint256.NewInt(txnYieldBasisPoints) + txnYieldEarningDurationNanoSecs256 := uint256.NewInt(uint64(txnYieldEarningDurationNanoSecs)) // Compute the yield associated with this operation, checking to ensure there's no overflow. yieldFromTxn, err = @@ -1300,7 +1300,7 @@ func (bav *UtxoView) _connectCoinLockup( ProfilePKID: profilePKID, UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: txMeta.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + BalanceBaseUnits: *uint256.NewInt(0), } } @@ -1772,7 +1772,7 @@ func SplitVestedLockedBalanceEntry( remainingLockedBalanceEntry.BalanceBaseUnits = *remainingValue // Sanity check the split does not print money. - if uint256.NewInt().Add( + if uint256.NewInt(0).Add( &splitLockedBalanceEntry.BalanceBaseUnits, &remainingLockedBalanceEntry.BalanceBaseUnits). Gt(&lockedBalanceEntry.BalanceBaseUnits) { return nil, nil, @@ -1803,12 +1803,12 @@ func CalculateLockupValueOverElapsedDuration( } // Convert the elapsedDuration to an uint256 - numerator := uint256.NewInt().SetUint64(uint64(elapsedDuration)) + numerator := uint256.NewInt(uint64(elapsedDuration)) // Compute the time that passes over the duration of the locked balance entry denominator, err := SafeUint256().Sub( - uint256.NewInt().SetUint64(uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs)), - uint256.NewInt().SetUint64(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))) + uint256.NewInt(uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs)), + uint256.NewInt(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))) if err != nil { return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ "(lockedBalanceEntry.UnlockTimestamp - lockedBalanceEntry.VestingEndTimestamp) underflow") @@ -1853,13 +1853,13 @@ func CalculateLockupYield( // The SafeUint256 Library uses division to ensure there's no overflow. This leads to possible // unnecessary false overflows in the event the duration or the yield is 0. Hence, we do a separate check here. if apyYieldBasisPoints.IsZero() || durationNanoSecs.IsZero() { - return uint256.NewInt(), nil + return uint256.NewInt(0), nil } // Compute the denominators from the nanosecond to year conversion and the basis point computation. denominators, err := SafeUint256().Mul( - uint256.NewInt().SetUint64(NanoSecsPerYear), - uint256.NewInt().SetUint64(10000)) + uint256.NewInt(NanoSecsPerYear), + uint256.NewInt(10000)) if err != nil { return nil, errors.Wrap(RuleErrorCoinLockupCoinYieldOverflow, "CalculateLockupYield (nanoSecsPerYear * 10000)") @@ -1930,7 +1930,7 @@ func (bav *UtxoView) _disconnectCoinLockup( ProfilePKID: operationData.PrevLockedBalanceEntry.ProfilePKID, UnlockTimestampNanoSecs: operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: operationData.PrevLockedBalanceEntry.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + BalanceBaseUnits: *uint256.NewInt(0), } } if lockedBalanceEntry.BalanceBaseUnits.Lt(&operationData.PrevLockedBalanceEntry.BalanceBaseUnits) { @@ -2308,7 +2308,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( ProfilePKID: profilePKID, UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + BalanceBaseUnits: *uint256.NewInt(0), } } prevSenderLockedBalanceEntry := senderLockedBalanceEntry.Copy() @@ -2320,7 +2320,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( } // Debit the sender's balance entry. - senderLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt().Sub( + senderLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt(0).Sub( &senderLockedBalanceEntry.BalanceBaseUnits, txMeta.LockedCoinsToTransferBaseUnits) // Fetch the recipient's balance entry. @@ -2340,7 +2340,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( ProfilePKID: profilePKID, UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + BalanceBaseUnits: *uint256.NewInt(0), } } prevReceiverLockedBalanceEntry := receiverLockedBalanceEntry.Copy() @@ -2467,7 +2467,7 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( ProfilePKID: operationData.PrevSenderLockedBalanceEntry.ProfilePKID, UnlockTimestampNanoSecs: operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: operationData.PrevSenderLockedBalanceEntry.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + BalanceBaseUnits: *uint256.NewInt(0), } } receiverLockedBalanceEntry, err := @@ -2485,7 +2485,7 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( ProfilePKID: operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, UnlockTimestampNanoSecs: operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: operationData.PrevReceiverLockedBalanceEntry.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + BalanceBaseUnits: *uint256.NewInt(0), } } @@ -2596,7 +2596,7 @@ func (bav *UtxoView) _connectCoinUnlock( } // Create an unlockedBalance uint256 to track what will be given back to the user. - unlockedBalance := uint256.NewInt() + unlockedBalance := uint256.NewInt(0) // Unlock all unvested unlockable locked balance entries. var prevLockedBalanceEntries []*LockedBalanceEntry @@ -2612,7 +2612,7 @@ func (bav *UtxoView) _connectCoinUnlock( prevLockedBalanceEntries = append(prevLockedBalanceEntries, unlockableLockedBalanceEntry.Copy()) // Update the LockedBalanceEntry and delete the record. - unlockableLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt() + unlockableLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt(0) bav._deleteLockedBalanceEntry(unlockableLockedBalanceEntry) } @@ -2721,7 +2721,7 @@ func CalculateVestedEarnings( ) { // Check if this lockup should not be unlocked right now. if blockTimestampNanoSecs <= lockedBalanceEntry.UnlockTimestampNanoSecs { - return uint256.NewInt(), nil + return uint256.NewInt(0), nil } // Check if this lockup should be fully unlocked. @@ -2734,14 +2734,14 @@ func CalculateVestedEarnings( lockedBalanceEntry, blockTimestampNanoSecs-lockedBalanceEntry.UnlockTimestampNanoSecs) if err != nil { - return uint256.NewInt(), + return uint256.NewInt(0), errors.Wrap(err, "CalculateVestedEarnings failed to compute vestedEarnings") } // Sanity check that vestedEarnings < BalanceBaseUnits if vestedEarnings.Gt(&lockedBalanceEntry.BalanceBaseUnits) || vestedEarnings.Eq(&lockedBalanceEntry.BalanceBaseUnits) { - return uint256.NewInt(), + return uint256.NewInt(0), errors.New("ComputeVestedEarnings: " + "vested earnings >= outstanding balance; this shouldn't be possible") } @@ -2804,7 +2804,7 @@ func (bav *UtxoView) _disconnectCoinUnlock( ProfilePKID: prevLockedBalanceEntry.ProfilePKID, UnlockTimestampNanoSecs: prevLockedBalanceEntry.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: prevLockedBalanceEntry.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(), + BalanceBaseUnits: *uint256.NewInt(0), } } if prevLockedBalanceEntry.BalanceBaseUnits.Lt(&lockedBalanceEntry.BalanceBaseUnits) { diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 1ab7fb017..58cd4c5f9 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -2,10 +2,9 @@ package lib import ( "testing" - "time" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/dgraph-io/badger/v4" "github.com/holiman/uint256" "github.com/pkg/errors" @@ -72,7 +71,7 @@ func TestCoinLockupsForkHeight(t *testing.T) { m0Pub, 1000, 1000, - uint256.NewInt().SetUint64(100), + uint256.NewInt(100), 0) _, _, _, err2 := _updateCoinLockupParams( t, chain, db, params, @@ -93,7 +92,7 @@ func TestCoinLockupsForkHeight(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m0PkBytes), 1000, - uint256.NewInt().SetUint64(1)) + uint256.NewInt(1)) _, _, _, err4 := _coinUnlockWithConnectTimestamp( t, chain, db, params, feeRateNanosPerKb, @@ -118,33 +117,33 @@ func TestCalculateLockupYield(t *testing.T) { // Ensure that a lockup with zero duration has zero yield. yield, err = CalculateLockupYield( MaxUint256, - uint256.NewInt(), - uint256.NewInt().SetUint64(1)) + uint256.NewInt(0), + uint256.NewInt(1)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt()) + require.Equal(t, *yield, *uint256.NewInt(0)) // Ensure that a lockup with zero apyYieldBasisPoints has zero yield. yield, err = CalculateLockupYield( MaxUint256, - uint256.NewInt().SetUint64(1), - uint256.NewInt()) + uint256.NewInt(1), + uint256.NewInt(0)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt()) + require.Equal(t, *yield, *uint256.NewInt(0)) // Ensure that when principal is MaxUint256 and the apy yield is 2bp, // the operation fails due to lack of precision. _, err = CalculateLockupYield( MaxUint256, - uint256.NewInt().SetUint64(2), - uint256.NewInt().SetUint64(1)) + uint256.NewInt(2), + uint256.NewInt(1)) require.Contains(t, err.Error(), RuleErrorCoinLockupCoinYieldOverflow) // Ensure that when principal is MaxUint256 and the duration is 2ns, // the operation fails due to lack of precision. _, err = CalculateLockupYield( MaxUint256, - uint256.NewInt().SetUint64(1), - uint256.NewInt().SetUint64(2)) + uint256.NewInt(1), + uint256.NewInt(2)) require.Contains(t, err.Error(), RuleErrorCoinLockupCoinYieldOverflow) // Ensure that the CalculateLockupYield operation acts as a floor of @@ -162,11 +161,11 @@ func TestCalculateLockupYield(t *testing.T) { // In theory, this should return a yield of 1 without any overflow in the operation. // We test this below: yield, err = CalculateLockupYield( - uint256.NewInt().SetUint64(365*24*10000), - uint256.NewInt().SetUint64(60*60), - uint256.NewInt().SetUint64(1e9)) + uint256.NewInt(365*24*10000), + uint256.NewInt(60*60), + uint256.NewInt(1e9)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt().SetUint64(1)) + require.Equal(t, *yield, *uint256.NewInt(1)) // Knowing this, we can now check to ensure the edges of the CalculateLockupYield // operation are behaving correctly and never minting more coins than expected. @@ -174,21 +173,21 @@ func TestCalculateLockupYield(t *testing.T) { // To test this, we set duration = 1e9 - 1. // (This decreases only the largest factor, leading to the smallest decrease possible in the numerator) yield, err = CalculateLockupYield( - uint256.NewInt().SetUint64(365*24*10000), - uint256.NewInt().SetUint64(60*60), - uint256.NewInt().SetUint64(1e9-1)) + uint256.NewInt(365*24*10000), + uint256.NewInt(60*60), + uint256.NewInt(1e9-1)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt().SetUint64(0)) + require.Equal(t, *yield, *uint256.NewInt(0)) // If we only slightly increase the numerator, we should expect to see the yield remain the same. // To test this, we set duration = 1e9 + 1 // (This increases only the largest factor, leading to the smallest increase possible in the numerator) yield, err = CalculateLockupYield( - uint256.NewInt().SetUint64(365*24*10000), - uint256.NewInt().SetUint64(60*60), - uint256.NewInt().SetUint64(1e9+1)) + uint256.NewInt(365*24*10000), + uint256.NewInt(60*60), + uint256.NewInt(1e9+1)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt().SetUint64(1)) + require.Equal(t, *yield, *uint256.NewInt(1)) // We should only see an increase to the output yield if the numerator is scaled by a constant. // To do this, we can iterate through various constants and see if the output yield matches. @@ -196,19 +195,19 @@ func TestCalculateLockupYield(t *testing.T) { // We also ensure that slight deviations do not alter the output. for ii := uint64(0); ii < 100000; ii++ { yield, err = CalculateLockupYield( - uint256.NewInt().SetUint64(ii*365*24*10000), - uint256.NewInt().SetUint64(60*60), - uint256.NewInt().SetUint64(1e9)) + uint256.NewInt(ii*365*24*10000), + uint256.NewInt(60*60), + uint256.NewInt(1e9)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt().SetUint64(ii)) + require.Equal(t, *yield, *uint256.NewInt(ii)) // Slight increase to the numerator. Ensure we don't create more yield than expected. yield, err = CalculateLockupYield( - uint256.NewInt().SetUint64(ii*365*24*10000), - uint256.NewInt().SetUint64(60*60), - uint256.NewInt().SetUint64(1e9+1)) + uint256.NewInt(ii*365*24*10000), + uint256.NewInt(60*60), + uint256.NewInt(1e9+1)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt().SetUint64(ii)) + require.Equal(t, *yield, *uint256.NewInt(ii)) // Slight decrease to the numerator. Ensure we create strictly less yield. expectedValue := ii - 1 @@ -216,11 +215,11 @@ func TestCalculateLockupYield(t *testing.T) { expectedValue = 0 } yield, err = CalculateLockupYield( - uint256.NewInt().SetUint64(ii*365*24*10000), - uint256.NewInt().SetUint64(60*60), - uint256.NewInt().SetUint64(1e9-1)) + uint256.NewInt(ii*365*24*10000), + uint256.NewInt(60*60), + uint256.NewInt(1e9-1)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt().SetUint64(expectedValue)) + require.Equal(t, *yield, *uint256.NewInt(expectedValue)) } } @@ -237,7 +236,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 0, 0, uint256.NewInt(), 0) + 0, 0, uint256.NewInt(0), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupOfAmountZero) } @@ -247,7 +246,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m2Pub, m0Pub, - 0, 0, uint256.NewInt().SetUint64(1), 0) + 0, 0, uint256.NewInt(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupOnNonExistentProfile) } @@ -257,7 +256,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m0Pub, - 0, 0, uint256.NewInt().SetUint64(1), 0) + 0, 0, uint256.NewInt(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupCannotLockupZeroKey) } @@ -267,7 +266,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 0, 0, uint256.NewInt().SetUint64(1), 0) + 0, 0, uint256.NewInt(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) } @@ -277,7 +276,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 0, 0, uint256.NewInt().SetUint64(1), 1) + 0, 0, uint256.NewInt(1), 1) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) } @@ -287,7 +286,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 900, uint256.NewInt().SetUint64(1), 950) + 1000, 900, uint256.NewInt(1), 950) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidVestingEndTimestamp) } @@ -297,7 +296,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), - 1000, 1000, uint256.NewInt().SetUint64(1), 950) + 1000, 1000, uint256.NewInt(1), 950) require.Contains(t, err.Error(), RuleErrorCoinLockupZeroPublicKeyAsRecipient) } @@ -307,7 +306,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1, 1, uint256.NewInt().SetUint64(1e10), 0) + 1, 1, uint256.NewInt(1e10), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupInsufficientCoins) } @@ -326,12 +325,12 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { ) _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), + DAOCoinToTransferNanos: *uint256.NewInt(1000), ReceiverPublicKey: m1PkBytes, }) _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), + DAOCoinToTransferNanos: *uint256.NewInt(1000), ReceiverPublicKey: m2PkBytes, }) @@ -341,7 +340,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m0Pub, m0Pub, - 1000, 1000, uint256.NewInt().SetUint64(1000), 0) + 1000, 1000, uint256.NewInt(1000), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) } @@ -364,7 +363,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m0Pub, m3Pub, - 1000, 1000, uint256.NewInt().SetUint64(1000), 0) + 1000, 1000, uint256.NewInt(1000), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToDAOMembers) } @@ -374,7 +373,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m0Pub, m2Pub, - 1000, 1000, uint256.NewInt().SetUint64(1000), 0) + 1000, 1000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -400,7 +399,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 1000, uint256.NewInt().SetUint64(1000), 0) + 1000, 1000, uint256.NewInt(1000), 0) require.NoError(t, err) } } @@ -548,7 +547,7 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m0PkBytes), 0, - uint256.NewInt()) + uint256.NewInt(0)) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferOfAmountZero) } @@ -632,7 +631,7 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { _coinLockupWithTestMetaAndConnectTimestamp( testMeta, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1, 1, uint256.NewInt().SetUint64(1e6), 0) + 1, 1, uint256.NewInt(1e6), 0) // Send 1000 locked M0 coins to M2. _coinLockupTransferWithTestMeta( @@ -643,7 +642,7 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { NewPublicKey(m2PkBytes), NewPublicKey(m0PkBytes), 1, - uint256.NewInt().SetUint64(1e6), + uint256.NewInt(1e6), ) // Attempt to have M2 send locked M0 coins to M3. @@ -656,7 +655,7 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m0PkBytes), 1, - uint256.NewInt().SetUint64(1)) + uint256.NewInt(1)) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) } @@ -686,7 +685,7 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m0PkBytes), 1, - uint256.NewInt().SetUint64(1)) + uint256.NewInt(1)) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToDAOMembers) } @@ -796,7 +795,7 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) profileEntry := utxoView.GetProfileEntryForPublicKey(m2PkBytes) - require.Equal(t, *uint256.NewInt(), profileEntry.DAOCoinEntry.CoinsInCirculationNanos) + require.Equal(t, *uint256.NewInt(0), profileEntry.DAOCoinEntry.CoinsInCirculationNanos) require.Equal(t, uint64(0), profileEntry.DAOCoinEntry.NumberOfHolders) } @@ -822,7 +821,7 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m2Pub, m2Priv, m2Pub, m2Pub, - 1000, 1000, uint256.NewInt().SetUint64(1), 0) + 1000, 1000, uint256.NewInt(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry) } @@ -966,7 +965,7 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1), + CoinsToMintNanos: *uint256.NewInt(1), CoinsToBurnNanos: uint256.Int{}, TransferRestrictionStatus: 0, }) @@ -1053,7 +1052,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 365*24*60*60*1e9+365*12*60*60*1e9, 365*24*60*60*1e9+365*12*60*60*1e9, - uint256.NewInt().SetUint64(10000), + uint256.NewInt(10000), 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10000 base units. @@ -1067,15 +1066,15 @@ func TestLockupStandardProfileFlows(t *testing.T) { 365*24*60*60*1e9+365*12*60*60*1e9, 365*24*60*60*1e9+365*12*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(10000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(10000), lockedBalanceEntry.BalanceBaseUnits) // Check to ensure that the BalanceEntry has decreased by exactly 10000. newBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( m1PkBytes, m1PkBytes, true) require.True(t, originalBalanceEntry.BalanceNanos.Gt(&newBalanceEntry.BalanceNanos)) require.Equal(t, - *uint256.NewInt().Sub(&originalBalanceEntry.BalanceNanos, &newBalanceEntry.BalanceNanos), - *uint256.NewInt().SetUint64(10000)) + *uint256.NewInt(0).Sub(&originalBalanceEntry.BalanceNanos, &newBalanceEntry.BalanceNanos), + *uint256.NewInt(10000)) } // Have m1 lockup 10000 m1 DAO tokens for one year. @@ -1098,7 +1097,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(10000), + uint256.NewInt(10000), 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10500 base units. @@ -1112,15 +1111,15 @@ func TestLockupStandardProfileFlows(t *testing.T) { 2*365*24*60*60*1e9, 2*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(10500), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(10500), lockedBalanceEntry.BalanceBaseUnits) // Check to ensure that the BalanceEntry has decreased by exactly 10000. newBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( m1PkBytes, m1PkBytes, true) require.True(t, originalBalanceEntry.BalanceNanos.Gt(&newBalanceEntry.BalanceNanos)) require.Equal(t, - *uint256.NewInt().Sub(&originalBalanceEntry.BalanceNanos, &newBalanceEntry.BalanceNanos), - *uint256.NewInt().SetUint64(10000)) + *uint256.NewInt(0).Sub(&originalBalanceEntry.BalanceNanos, &newBalanceEntry.BalanceNanos), + *uint256.NewInt(10000)) } // Have m1 lockup 10000 m1 DAO tokens for one and a half year. @@ -1133,7 +1132,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 2*365*24*60*60*1e9+365*12*60*60*1e9, 2*365*24*60*60*1e9+365*12*60*60*1e9, - uint256.NewInt().SetUint64(10000), + uint256.NewInt(10000), 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10500 base units. @@ -1149,7 +1148,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { 2*365*24*60*60*1e9+365*12*60*60*1e9, 2*365*24*60*60*1e9+365*12*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(10500), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(10500), lockedBalanceEntry.BalanceBaseUnits) } // Have m1 lockup 10000 m1 DAO tokens for two years. @@ -1161,7 +1160,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 3*365*24*60*60*1e9, 3*365*24*60*60*1e9, - uint256.NewInt().SetUint64(10000), + uint256.NewInt(10000), 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 12000 base units. @@ -1177,7 +1176,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { 3*365*24*60*60*1e9, 3*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(12000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(12000), lockedBalanceEntry.BalanceBaseUnits) } // Have m1 distribute 1 year locked tokens. @@ -1190,7 +1189,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { NewPublicKey(m2PkBytes), NewPublicKey(m1PkBytes), 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(500), + uint256.NewInt(500), ) _coinLockupTransferWithTestMeta( testMeta, @@ -1200,7 +1199,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m1PkBytes), 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(500), + uint256.NewInt(500), ) _coinLockupTransferWithTestMeta( testMeta, @@ -1210,7 +1209,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { NewPublicKey(m4PkBytes), NewPublicKey(m1PkBytes), 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(500), + uint256.NewInt(500), ) // Check to ensure the resulting locked balance entry for m1 has 9000 base units. @@ -1226,7 +1225,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { 2*365*24*60*60*1e9, 2*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(9000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(9000), lockedBalanceEntry.BalanceBaseUnits) } // Check to make sure locked tokens are not liquid. @@ -1238,7 +1237,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m1PkBytes), 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(500), + uint256.NewInt(500), ) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) } @@ -1271,7 +1270,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { newBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( m2PkBytes, m1PkBytes, true) require.True(t, newBalanceEntry.BalanceNanos.Gt(&originalBalanceEntry.BalanceNanos)) - require.Equal(t, *uint256.NewInt().SetUint64(500), *uint256.NewInt().Sub( + require.Equal(t, *uint256.NewInt(500), *uint256.NewInt(0).Sub( &newBalanceEntry.BalanceNanos, &originalBalanceEntry.BalanceNanos)) } } @@ -1296,7 +1295,7 @@ func TestLockupWithDerivedKey(t *testing.T) { senderPrivBytes, _, err := Base58CheckDecode(m0Priv) require.NoError(t, err) - m0PrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + m0PrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) // Setup helper functions for creating m0 derived keys newUtxoView := func() *UtxoView { @@ -1508,7 +1507,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1521,7 +1520,7 @@ func TestLockupWithDerivedKey(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 365*24*60*60*1e9, 365*24*60*60*1e9, - uint256.NewInt().SetUint64(1000), + uint256.NewInt(1000), 0) _coinLockupTransferWithTestMeta( testMeta, @@ -1531,7 +1530,7 @@ func TestLockupWithDerivedKey(t *testing.T) { NewPublicKey(m0PkBytes), NewPublicKey(m1PkBytes), 365*24*60*60*1e9, - uint256.NewInt().SetUint64(1000), + uint256.NewInt(1000), ) coinUnlockMetadata := &CoinUnlockMetadata{ProfilePublicKey: NewPublicKey(m1PkBytes)} _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( @@ -1543,7 +1542,7 @@ func TestLockupWithDerivedKey(t *testing.T) { // (Correct profile + correct operation) _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), + DAOCoinToTransferNanos: *uint256.NewInt(1000), ReceiverPublicKey: m0PkBytes, }) coinLockupMetadata = &CoinLockupMetadata{ @@ -1551,7 +1550,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m1PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1561,7 +1560,7 @@ func TestLockupWithDerivedKey(t *testing.T) { // Ensure the operation cannot be performed again as the transaction limit was set to 1. _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), + DAOCoinToTransferNanos: *uint256.NewInt(1000), ReceiverPublicKey: m0PkBytes, }) coinLockupMetadata = &CoinLockupMetadata{ @@ -1569,7 +1568,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m1PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1597,7 +1596,7 @@ func TestLockupWithDerivedKey(t *testing.T) { // Have m1 transfer 1000 unlocked m1 coins to m0 _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), + DAOCoinToTransferNanos: *uint256.NewInt(1000), ReceiverPublicKey: m0PkBytes, }) @@ -1608,7 +1607,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m1PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1622,7 +1621,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1644,7 +1643,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 365*24*60*60*1e9+1, @@ -1679,7 +1678,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1692,7 +1691,7 @@ func TestLockupWithDerivedKey(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 365*24*60*60*1e9, 365*24*60*60*1e9, - uint256.NewInt().SetUint64(1000), + uint256.NewInt(1000), 0) _coinLockupTransferWithTestMeta( testMeta, @@ -1702,7 +1701,7 @@ func TestLockupWithDerivedKey(t *testing.T) { NewPublicKey(m0PkBytes), NewPublicKey(m1PkBytes), 365*24*60*60*1e9, - uint256.NewInt().SetUint64(1000), + uint256.NewInt(1000), ) // Have m0 unlock the 1,000 locked m1 tokens. @@ -1719,7 +1718,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 365*24*60*60*1e9+1, @@ -1804,7 +1803,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1820,7 +1819,7 @@ func TestLockupWithDerivedKey(t *testing.T) { 365*24*60*60*1e9, 365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(1000), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) // Perform the second lockup operation of 1000 m0 coins at 2yrs @@ -1829,7 +1828,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1845,7 +1844,7 @@ func TestLockupWithDerivedKey(t *testing.T) { 2*365*24*60*60*1e9, 2*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(1000), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(2*365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) // Perform the first transfer operation to m1 of 500 locked m0 coins @ 1yr @@ -1853,7 +1852,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m1PkBytes), ProfilePublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockedCoinsToTransferBaseUnits: uint256.NewInt().SetUint64(500), + LockedCoinsToTransferBaseUnits: uint256.NewInt(500), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupTransferMetadata}, 0, @@ -1869,7 +1868,7 @@ func TestLockupWithDerivedKey(t *testing.T) { 365*24*60*60*1e9, 365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(500), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) // Perform the second transfer operation to m1 of 500 locked m0 coins @ 2yrs @@ -1877,7 +1876,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m1PkBytes), ProfilePublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, - LockedCoinsToTransferBaseUnits: uint256.NewInt().SetUint64(500), + LockedCoinsToTransferBaseUnits: uint256.NewInt(500), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupTransferMetadata}, 0, @@ -1893,7 +1892,7 @@ func TestLockupWithDerivedKey(t *testing.T) { 2*365*24*60*60*1e9, 2*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(500), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(2*365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) // Perform the first unlock operation of 500 m1 tokens @ 1yr @@ -1912,8 +1911,8 @@ func TestLockupWithDerivedKey(t *testing.T) { require.NoError(t, err) balanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) require.True(t, balanceEntry.BalanceNanos.Gt(&startingBalance)) - require.Equal(t, *uint256.NewInt().SetUint64(500), - *uint256.NewInt().Sub(&balanceEntry.BalanceNanos, &startingBalance)) + require.Equal(t, *uint256.NewInt(500), + *uint256.NewInt(0).Sub(&balanceEntry.BalanceNanos, &startingBalance)) lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -1936,8 +1935,8 @@ func TestLockupWithDerivedKey(t *testing.T) { require.NoError(t, err) balanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) require.True(t, balanceEntry.BalanceNanos.Gt(&startingBalance)) - require.Equal(t, *uint256.NewInt().SetUint64(1000), - *uint256.NewInt().Sub(&balanceEntry.BalanceNanos, &startingBalance)) + require.Equal(t, *uint256.NewInt(1000), + *uint256.NewInt(0).Sub(&balanceEntry.BalanceNanos, &startingBalance)) lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -1953,7 +1952,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 3 * 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 3 * 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), + LockupAmountBaseUnits: uint256.NewInt(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 2*365*24*60*60*1e9+2, @@ -1980,7 +1979,7 @@ func TestLockupDisconnects(t *testing.T) { m0Pub, m0Priv, m0Pub, m0Pub, 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(1000), + uint256.NewInt(1000), 365*24*60*60*1e9) require.NoError(t, err) utxoOps2, txn2, _, err := _coinLockupWithConnectTimestamp( @@ -1988,7 +1987,7 @@ func TestLockupDisconnects(t *testing.T) { m0Pub, m0Priv, m0Pub, m0Pub, 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, - uint256.NewInt().SetUint64(1000), + uint256.NewInt(1000), 365*24*60*60*1e9) require.NoError(t, err) txHash := txn2.Hash() @@ -2010,9 +2009,9 @@ func TestLockupDisconnects(t *testing.T) { 2*365*24*60*60*1e9, 2*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(1000), lockedBalanceEntry.BalanceBaseUnits) balanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) - require.Equal(t, *uint256.NewInt().SetUint64(999000), balanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(999000), balanceEntry.BalanceNanos) err = utxoView.DisconnectTransaction(txn1, txn1.Hash(), utxoOps1, blockHeight) require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) require.NoError(t, err) @@ -2027,7 +2026,7 @@ func TestLockupDisconnects(t *testing.T) { 2*365*24*60*60*1e9) require.True(t, lockedBalanceEntry == nil) balanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) - require.Equal(t, *uint256.NewInt().SetUint64(1000000), balanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(1000000), balanceEntry.BalanceNanos) // // Test Update Coin Lockup Params for Profiles @@ -2258,7 +2257,7 @@ func TestLockupDisconnects(t *testing.T) { m4be, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) require.NoError(t, err) require.Equal(t, *MaxUint256, m4LockedBalanceEntry.BalanceBaseUnits) - require.Equal(t, *uint256.NewInt(), m4be.BalanceNanos) + require.Equal(t, *uint256.NewInt(0), m4be.BalanceNanos) utxoOps, txn, _, err = _coinUnlockWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, @@ -2303,7 +2302,7 @@ func TestLockupDisconnects(t *testing.T) { 1000) require.NoError(t, err) m4be, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) - require.Equal(t, *uint256.NewInt(), m4be.BalanceNanos) + require.Equal(t, *uint256.NewInt(0), m4be.BalanceNanos) require.Equal(t, *MaxUint256, m4LockedBalanceEntry.BalanceBaseUnits) } @@ -2331,8 +2330,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.True(t, m0LeftYieldCurvePoint == nil) m0BalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) - require.Equal(t, *uint256.NewInt().SetUint64(1000000), m0BalanceEntry.BalanceNanos) - require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(1000000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(0), m3BalanceEntry.BalanceNanos) m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2363,12 +2362,12 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { _signTxn(t, updateTxn, m0Priv) lockupTxn, _, _, _, err := testMeta.chain.CreateCoinLockupTxn( m0PkBytes, m0PkBytes, m0PkBytes, tipTimestamp+2e9, tipTimestamp+2e9, - uint256.NewInt().SetUint64(1000), nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + uint256.NewInt(1000), nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) _signTxn(t, lockupTxn, m0Priv) transferTxn, _, _, _, err := testMeta.chain.CreateCoinLockupTransferTxn( m0PkBytes, m3PkBytes, m0PkBytes, tipTimestamp+2e9, - uint256.NewInt().SetUint64(1000), nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + uint256.NewInt(1000), nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) _signTxn(t, transferTxn, m0Priv) @@ -2411,8 +2410,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.Equal(t, uint64(1000), m0LeftYieldCurvePoint.LockupYieldAPYBasisPoints) m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) - require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) - require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(999000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(0), m3BalanceEntry.BalanceNanos) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2428,7 +2427,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { tipTimestamp+2e9, tipTimestamp+2e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(1000), m3LockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(1000), m3LockedBalanceEntry.BalanceBaseUnits) // // Construct a subsequent second block and test unlock. @@ -2467,8 +2466,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.NoError(t, err) m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) - require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) - require.Equal(t, *uint256.NewInt().SetUint64(1000), m3BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(999000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(1000), m3BalanceEntry.BalanceNanos) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2522,8 +2521,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.Equal(t, uint64(1000), m0LeftYieldCurvePoint.LockupYieldAPYBasisPoints) m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) - require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) - require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(999000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(0), m3BalanceEntry.BalanceNanos) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2539,7 +2538,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { tipTimestamp+2e9, tipTimestamp+2e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt().SetUint64(1000), m3LockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt(1000), m3LockedBalanceEntry.BalanceBaseUnits) // // Disconnect the first block and ensure state is reverted. @@ -2575,8 +2574,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.True(t, m0LeftYieldCurvePoint == nil) m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) - require.Equal(t, *uint256.NewInt().SetUint64(1000000), m0BalanceEntry.BalanceNanos) - require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(1000000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(0), m3BalanceEntry.BalanceNanos) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2607,7 +2606,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m3Pub, - 1000, 1000, uint256.NewInt().SetUint64(1000), 0) + 1000, 1000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -2628,7 +2627,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { }) require.NoError(t, err) require.True(t, m3LockedBalanceEntry != nil) - require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) + require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1000))) // Check the m0 LockedBalanceEntry as non-existent m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ @@ -2645,7 +2644,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m3Pub, - 1050, 1100, uint256.NewInt().SetUint64(1000), 0) + 1050, 1100, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -2658,7 +2657,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { }) require.NoError(t, err) require.True(t, m3LockedBalanceEntry != nil) - require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) + require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1000))) // Check the m0 LockedBalanceEntry as non-existent m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ @@ -2683,7 +2682,7 @@ func TestSimpleVestedLockup(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -2728,15 +2727,15 @@ func TestSimpleVestedLockup(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) // Get the updated m0 balance entry base units and ensure it's been credited 500 base units. utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) updatedBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) - require.True(t, uint256.NewInt().SetUint64(500).Eq( - uint256.NewInt().Sub( + require.True(t, uint256.NewInt(500).Eq( + uint256.NewInt(0).Sub( &updatedBalanceEntry.BalanceNanos, &originalBalanceEntry.BalanceNanos))) originalBalanceEntry = updatedBalanceEntry @@ -2774,15 +2773,15 @@ func TestSimpleVestedLockup(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(250))) // Get the updated m0 balance entry base units and ensure it's been credited 250 base units. utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) require.NoError(t, err) updatedBalanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) - require.True(t, uint256.NewInt().SetUint64(250).Eq( - uint256.NewInt().Sub( + require.True(t, uint256.NewInt(250).Eq( + uint256.NewInt(0).Sub( &updatedBalanceEntry.BalanceNanos, &originalBalanceEntry.BalanceNanos))) originalBalanceEntry = updatedBalanceEntry @@ -2826,14 +2825,14 @@ func TestSimpleVestedLockup(t *testing.T) { require.NoError(t, err) updatedBalanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) - require.True(t, uint256.NewInt().SetUint64(250).Eq( - uint256.NewInt().Sub( + require.True(t, uint256.NewInt(250).Eq( + uint256.NewInt(0).Sub( &updatedBalanceEntry.BalanceNanos, &originalBalanceEntry.BalanceNanos))) originalBalanceEntry = updatedBalanceEntry // Check that we're back to where we started (1e6 base units) - require.True(t, uint256.NewInt().SetUint64(1e6).Eq(&updatedBalanceEntry.BalanceNanos)) + require.True(t, uint256.NewInt(1e6).Eq(&updatedBalanceEntry.BalanceNanos)) } func TestNoOverlapVestedLockupConsolidation(t *testing.T) { @@ -2848,7 +2847,7 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -2857,7 +2856,7 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 3000, 4000, uint256.NewInt().SetUint64(1000), 0) + 3000, 4000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -2877,7 +2876,7 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1000))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -2886,7 +2885,7 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1000))) } func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { @@ -2901,7 +2900,7 @@ func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -2910,7 +2909,7 @@ func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -2930,7 +2929,7 @@ func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(2000))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(2000))) } func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { @@ -2952,7 +2951,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -2961,7 +2960,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1500, 2000, uint256.NewInt().SetUint64(1000), 0) + 1500, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -2981,7 +2980,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -2990,7 +2989,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1500))) // Now we test the opposite vested lockup consolidation type: // existing lockup: ------------------- @@ -3004,7 +3003,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1500, 2000, uint256.NewInt().SetUint64(1000), 0) + 1500, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3013,7 +3012,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3028,7 +3027,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3037,7 +3036,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1500))) } func TestRightOverhangVestedLockupConsolidation(t *testing.T) { @@ -3059,7 +3058,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3068,7 +3067,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 1499, uint256.NewInt().SetUint64(1000), 0) + 1000, 1499, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3088,7 +3087,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1499))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1499))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3097,7 +3096,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(501))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(501))) // Now we test the opposite vested lockup consolidation type: // existing lockup: ------------------- @@ -3111,7 +3110,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1000, 1499, uint256.NewInt().SetUint64(1000), 0) + 1000, 1499, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3120,7 +3119,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3135,7 +3134,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1500))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3144,7 +3143,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) } func TestExternalThreeWayLockupConsolidation(t *testing.T) { @@ -3165,7 +3164,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1250, 1750, uint256.NewInt().SetUint64(1000), 0) + 1250, 1750, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3174,7 +3173,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 1500, uint256.NewInt().SetUint64(1000), 0) + 1000, 1500, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3194,7 +3193,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3203,7 +3202,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3212,7 +3211,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1000))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3233,7 +3232,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1000, 1500, uint256.NewInt().SetUint64(1000), 0) + 1000, 1500, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3242,7 +3241,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1250, 1750, uint256.NewInt().SetUint64(1000), 0) + 1250, 1750, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3257,7 +3256,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3266,7 +3265,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1002))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1002))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3275,7 +3274,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(498))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(498))) } func TestInternalThreeWayLockupConsolidation(t *testing.T) { @@ -3296,7 +3295,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3305,7 +3304,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1250, 1750, uint256.NewInt().SetUint64(1000), 0) + 1250, 1750, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3325,7 +3324,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(250))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3334,7 +3333,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1500))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3343,7 +3342,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(250))) // Now we test the opposite vested lockup consolidation type: // existing lockup: ------------------ @@ -3356,7 +3355,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1250, 1750, uint256.NewInt().SetUint64(1000), 0) + 1250, 1750, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3365,7 +3364,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3380,7 +3379,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(250))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3389,7 +3388,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1501))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1501))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3398,7 +3397,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(249))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(249))) } func TestSimpleJointExistingVestedLockups(t *testing.T) { @@ -3419,7 +3418,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 1500, uint256.NewInt().SetUint64(1000), 0) + 1000, 1500, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3428,7 +3427,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1501, 2000, uint256.NewInt().SetUint64(1000), 0) + 1501, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3437,7 +3436,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3457,7 +3456,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1501))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1501))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3466,7 +3465,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1499))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1499))) } func TestSimpleDisjointExistingVestedLockups(t *testing.T) { @@ -3487,7 +3486,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt().SetUint64(1000), 0) + 1000, 2000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3496,7 +3495,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 3000, 4000, uint256.NewInt().SetUint64(1000), 0) + 3000, 4000, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3505,7 +3504,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1500, 3500, uint256.NewInt().SetUint64(1000), 0) + 1500, 3500, uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3525,7 +3524,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3534,7 +3533,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(750))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(750))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3543,7 +3542,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(499))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(499))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3552,7 +3551,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(751))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(751))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3561,7 +3560,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) } func TestVestingIntersectionLimit(t *testing.T) { @@ -3583,7 +3582,7 @@ func TestVestingIntersectionLimit(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - int64(ii*1000)+1, int64(ii*1000)+1000, uint256.NewInt().SetUint64(1000), 0) + int64(ii*1000)+1, int64(ii*1000)+1000, uint256.NewInt(1000), 0) require.NoError(t, err) } } @@ -3599,7 +3598,7 @@ func TestVestingIntersectionLimit(t *testing.T) { DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1e9), + CoinsToMintNanos: *uint256.NewInt(1e9), CoinsToBurnNanos: uint256.Int{}, TransferRestrictionStatus: 0, }) @@ -3612,7 +3611,7 @@ func TestVestingIntersectionLimit(t *testing.T) { t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, 1, int64((maxIntersections-1)*1000)+1000, - uint256.NewInt().SetUint64(uint64(maxIntersections)*1000), 0) + uint256.NewInt(uint64(maxIntersections)*1000), 0) require.NoError(t, err) } @@ -3631,7 +3630,7 @@ func TestVestingIntersectionLimit(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(2000))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(2000))) } // Now add another vested lockup, pushing us over the limit. @@ -3641,7 +3640,7 @@ func TestVestingIntersectionLimit(t *testing.T) { m0Pub, m0Priv, m0Pub, m0Pub, int64(maxIntersections*1000)+1, int64(maxIntersections*1000)+1000, - uint256.NewInt().SetUint64(1000), 0) + uint256.NewInt(1000), 0) require.NoError(t, err) } @@ -3652,7 +3651,7 @@ func TestVestingIntersectionLimit(t *testing.T) { t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, 1, int64((maxIntersections)*1000)+1000, - uint256.NewInt().SetUint64(uint64(maxIntersections)*1000), 0) + uint256.NewInt(uint64(maxIntersections)*1000), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupViolatesVestingIntersectionLimit) } @@ -3664,7 +3663,7 @@ func TestVestingIntersectionLimit(t *testing.T) { t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, 1000, int64((maxIntersections)*1000)+1, - uint256.NewInt().SetUint64(uint64(maxIntersections)*1000), 0) + uint256.NewInt(uint64(maxIntersections)*1000), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupViolatesVestingIntersectionLimit) } @@ -3688,8 +3687,8 @@ func TestVestingIntersectionLimit(t *testing.T) { finalBalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) require.True(t, finalBalanceEntry != nil) require.True(t, - uint256.NewInt().Sub(&finalBalanceEntry.BalanceNanos, &startingBalanceEntry.BalanceNanos).Eq( - uint256.NewInt().SetUint64(uint64(maxIntersections)*2000+1000))) + uint256.NewInt(0).Sub(&finalBalanceEntry.BalanceNanos, &startingBalanceEntry.BalanceNanos).Eq( + uint256.NewInt(uint64(maxIntersections)*2000+1000))) // Now just to be extra sure, check to make sure there's no more unlockable locked balance entries. unvestedUnlockable, vestedUnlockable, err := @@ -3730,7 +3729,7 @@ func TestRealWorldLockupsUseCase(t *testing.T) { DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1e7), + CoinsToMintNanos: *uint256.NewInt(1e7), CoinsToBurnNanos: uint256.Int{}, TransferRestrictionStatus: 0, }) @@ -3748,7 +3747,7 @@ func TestRealWorldLockupsUseCase(t *testing.T) { startTime := time.Date(2024, time.January, 1, 14, 0, 0, 0, time.UTC) // We iterate for 10 years. - totalLocked := uint256.NewInt() + totalLocked := uint256.NewInt(0) for ii := 0; ii < 365*5; ii++ { // Check if it's time for a deposit. if ii%7 != 0 { @@ -3777,14 +3776,14 @@ func TestRealWorldLockupsUseCase(t *testing.T) { m0Pub, m0Priv, m0Pub, m0Pub, nextLockupStartTime.UnixNano(), nextLockupEndTime.UnixNano()-1, - uint256.NewInt().SetUint64(1000), + uint256.NewInt(1000), blockConnectTime.UnixNano()) require.NoError(t, err) } // Add to total locked. - totalLocked = uint256.NewInt().Add( - totalLocked, uint256.NewInt().SetUint64(1000)) + totalLocked = uint256.NewInt(0).Add( + totalLocked, uint256.NewInt(1000)) } // Verify the locked balance entries in the db. @@ -3797,10 +3796,10 @@ func TestRealWorldLockupsUseCase(t *testing.T) { require.NoError(t, err) // Verify the lockedBalanceEntries locked the correct amount and that the entries are consecutive. - totalLockedFound := uint256.NewInt() + totalLockedFound := uint256.NewInt(0) for ii, lockedBalanceEntry := range lockedBalanceEntries { // Add to the balance found. - totalLockedFound = uint256.NewInt().Add( + totalLockedFound = uint256.NewInt(0).Add( totalLockedFound, &lockedBalanceEntry.BalanceBaseUnits) // Check if we're consecutive. @@ -3885,7 +3884,7 @@ func _setUpProfilesAndMintM0M1DAOCoins(testMeta *TestMeta) { DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1e6), + CoinsToMintNanos: *uint256.NewInt(1e6), CoinsToBurnNanos: uint256.Int{}, TransferRestrictionStatus: 0, }) @@ -3901,7 +3900,7 @@ func _setUpProfilesAndMintM0M1DAOCoins(testMeta *TestMeta) { DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt().SetUint64(1e9), + CoinsToMintNanos: *uint256.NewInt(1e9), CoinsToBurnNanos: uint256.Int{}, TransferRestrictionStatus: 0, }) diff --git a/lib/block_view_message.go b/lib/block_view_message.go index 1b64f481e..3e55a3702 100644 --- a/lib/block_view_message.go +++ b/lib/block_view_message.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/golang/glog" "github.com/pkg/errors" "math" @@ -814,7 +814,7 @@ func (bav *UtxoView) _connectMessagingGroup( var messagingPublicKey *PublicKey if reflect.DeepEqual(txMeta.MessagingPublicKey, GetS256BasePointCompressed()) { messagingGroupKey = NewMessagingGroupKey(NewPublicKey(GetS256BasePointCompressed()), txMeta.MessagingGroupKeyName) - _, keyPublic := btcec.PrivKeyFromBytes(btcec.S256(), Sha256DoubleHash(txMeta.MessagingGroupKeyName)[:]) + _, keyPublic := btcec.PrivKeyFromBytes(Sha256DoubleHash(txMeta.MessagingGroupKeyName)[:]) messagingPublicKey = NewPublicKey(keyPublic.SerializeCompressed()) } else { messagingGroupKey = NewMessagingGroupKey(NewPublicKey(txn.PublicKey), txMeta.MessagingGroupKeyName) diff --git a/lib/block_view_message_test.go b/lib/block_view_message_test.go index dfa640926..a7e08fb46 100644 --- a/lib/block_view_message_test.go +++ b/lib/block_view_message_test.go @@ -9,7 +9,8 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/ecdsa" "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -19,20 +20,19 @@ func TestBasePointSignature(t *testing.T) { require := require.New(t) // Retrieve the base point bytes and parse them to a public key. basePointBytes := GetS256BasePointCompressed() - basePoint, err := btcec.ParsePubKey(basePointBytes, btcec.S256()) + basePoint, err := btcec.ParsePubKey(basePointBytes) require.NoError(err) // Verify that k = 1 is the correct private key for the secp256k1 base point priveKeyBytes := []byte{1} - priveKey, publicKey := btcec.PrivKeyFromBytes(btcec.S256(), priveKeyBytes) + priveKey, publicKey := btcec.PrivKeyFromBytes(priveKeyBytes) require.Equal(basePointBytes, publicKey.SerializeCompressed()) require.Equal(basePoint.SerializeCompressed(), publicKey.SerializeCompressed()) // Now test signing messages with the private key of the base point k = 1. message := []byte("Test message") messageHash := Sha256DoubleHash(message) - messageSignature, err := priveKey.Sign(messageHash[:]) - require.NoError(err) + messageSignature := ecdsa.Sign(priveKey, messageHash[:]) // Now make sure the base point passes signature verification. require.Equal(true, messageSignature.Verify(messageHash[:], basePoint)) @@ -333,7 +333,7 @@ func TestPrivateMessage(t *testing.T) { require.Equal(messageEntry.RecipientPublicKey[:], _strToPk(t, m1Pub)) require.Equal(messageEntry.TstampNanos, tstamp1) require.Equal(messageEntry.isDeleted, false) - priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), _strToPk(t, m1Priv)) + priv, _ := btcec.PrivKeyFromBytes(_strToPk(t, m1Priv)) decryptedBytes, err := DecryptBytesWithPrivateKey(messageEntry.EncryptedText, priv.ToECDSA()) require.NoError(err) require.Equal(message1, string(decryptedBytes)) @@ -623,13 +623,13 @@ func TestPrivateMessage(t *testing.T) { func _generateMessagingKey(senderPub []byte, senderPriv []byte, keyName []byte) ( priv *btcec.PrivateKey, sign []byte, messagingKeyEntry *MessagingGroupEntry) { - senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPriv) + senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPriv) - priv, _ = btcec.NewPrivateKey(btcec.S256()) + priv, _ = btcec.NewPrivateKey() pub := priv.PubKey().SerializeCompressed() payload := append(pub, keyName...) - signature, _ := senderPrivKey.Sign(Sha256DoubleHash(payload)[:]) + signature := ecdsa.Sign(senderPrivKey, Sha256DoubleHash(payload)[:]) return priv, signature.Serialize(), _initMessagingKey(senderPub, pub, keyName) } @@ -1469,7 +1469,7 @@ func TestMessagingKeys(t *testing.T) { nil) // The DB entry should have the messaging public key derived deterministically from the group key name. // Compute the public key and compare it with the DB entry. - _, groupPkBytes := btcec.PrivKeyFromBytes(btcec.S256(), Sha256DoubleHash(groupKeyName)[:]) + _, groupPkBytes := btcec.PrivKeyFromBytes(Sha256DoubleHash(groupKeyName)[:]) groupPk := NewPublicKey(groupPkBytes.SerializeCompressed()) expectedEntry := &MessagingGroupEntry{} rr := bytes.NewReader(EncodeToBytes(0, entry)) @@ -2276,7 +2276,7 @@ func TestGroupMessages(t *testing.T) { // Define helper functions for encryption/decryption so that we can do some real crypto. encrypt := func(plain, recipient []byte) []byte { - recipientPk, err := btcec.ParsePubKey(recipient, btcec.S256()) + recipientPk, err := btcec.ParsePubKey(recipient) if err != nil { return nil } @@ -2288,7 +2288,7 @@ func TestGroupMessages(t *testing.T) { return encryptedMessageBytes } decrypt := func(cipher, recipientPrivKey []byte) []byte { - recipientPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), recipientPrivKey) + recipientPriv, _ := btcec.PrivKeyFromBytes(recipientPrivKey) plain, err := DecryptBytesWithPrivateKey(cipher, recipientPriv.ToECDSA()) if err != nil { fmt.Println(err) diff --git a/lib/block_view_new_message_test.go b/lib/block_view_new_message_test.go index baf5124ad..9da3775c8 100644 --- a/lib/block_view_new_message_test.go +++ b/lib/block_view_new_message_test.go @@ -3,7 +3,7 @@ package lib import ( "bytes" "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/pkg/errors" "github.com/stretchr/testify/require" "math" @@ -91,17 +91,17 @@ func TestNewMessage(t *testing.T) { initChainCallback: initChainCallback, } - groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv1, err := btcec.NewPrivateKey() require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() _ = groupPk1 - groupPriv2, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv2, err := btcec.NewPrivateKey() require.NoError(err) groupPk2 := groupPriv2.PubKey().SerializeCompressed() _ = groupPk2 - groupPriv3, err := btcec.NewPrivateKey(btcec.S256()) + groupPriv3, err := btcec.NewPrivateKey() require.NoError(err) groupPk3 := groupPriv3.PubKey().SerializeCompressed() _ = groupPk3 @@ -376,13 +376,13 @@ func TestNewMessage(t *testing.T) { // (m0, defaultKey) -> // (m1, defaultKey) -> // (m3, defaultKey) -> - m0DefaultKeyPriv, err := btcec.NewPrivateKey(btcec.S256()) + m0DefaultKeyPriv, err := btcec.NewPrivateKey() require.NoError(err) m0DefaultKeyPk := NewPublicKey(m0DefaultKeyPriv.PubKey().SerializeCompressed()) - m1DefaultKeyPriv, err := btcec.NewPrivateKey(btcec.S256()) + m1DefaultKeyPriv, err := btcec.NewPrivateKey() require.NoError(err) m1DefaultKeyPk := NewPublicKey(m1DefaultKeyPriv.PubKey().SerializeCompressed()) - m3DefaultKeyPriv, err := btcec.NewPrivateKey(btcec.S256()) + m3DefaultKeyPriv, err := btcec.NewPrivateKey() require.NoError(err) m3DefaultKeyPk := NewPublicKey(m3DefaultKeyPriv.PubKey().SerializeCompressed()) @@ -406,7 +406,7 @@ func TestNewMessage(t *testing.T) { // (m3, groupName3) -> // (m3, defaultKey) // (m0, defaultKey) - groupName3SharedPriv, err := btcec.NewPrivateKey(btcec.S256()) + groupName3SharedPriv, err := btcec.NewPrivateKey() require.NoError(err) groupName3SharedPk := NewPublicKey(groupName3SharedPriv.PubKey().SerializeCompressed()) groupName3SharedPk_EncryptedTo_m0DefaultPk := _encryptBytes(groupName3SharedPriv.Serialize(), *m0DefaultKeyPk) @@ -1157,7 +1157,7 @@ func _verifyGroupMessageEntriesDecryption(t *testing.T, utxoView *UtxoView, grou require.NoError(err) require.NotNil(memberAccessGroupEntry) require.Equal(false, memberAccessGroupEntry.isDeleted) - _, memberAccessGroupPublicKeyFromPriv := btcec.PrivKeyFromBytes(btcec.S256(), memberAccessGroupPrivateKey) + _, memberAccessGroupPublicKeyFromPriv := btcec.PrivKeyFromBytes(memberAccessGroupPrivateKey) require.Equal(true, bytes.Equal(memberAccessGroupEntry.AccessGroupPublicKey.ToBytes(), memberAccessGroupPublicKeyFromPriv.SerializeCompressed())) // Decrypt the EncryptedKey present in the memberEntry to get the message encryption/decryption key. decryptionKey := _decryptBytes(memberEntry.EncryptedKey, memberAccessGroupPrivateKey) @@ -1181,7 +1181,7 @@ func _verifyEqualMessageEntries(t *testing.T, messageEntryA *NewMessageEntry, me } func _encryptBytes(plainText []byte, publicKey PublicKey) []byte { - pk, err := btcec.ParsePubKey(publicKey.ToBytes(), btcec.S256()) + pk, err := btcec.ParsePubKey(publicKey.ToBytes()) if err != nil { return nil } @@ -1194,7 +1194,7 @@ func _encryptBytes(plainText []byte, publicKey PublicKey) []byte { } func _decryptBytes(cipherText []byte, privateKey []byte) []byte { - recipientPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), privateKey) + recipientPriv, _ := btcec.PrivKeyFromBytes(privateKey) plain, err := DecryptBytesWithPrivateKey(cipherText, recipientPriv.ToECDSA()) if err != nil { fmt.Println(err) diff --git a/lib/block_view_nft.go b/lib/block_view_nft.go index fd6b1983d..0892c2a26 100644 --- a/lib/block_view_nft.go +++ b/lib/block_view_nft.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/golang/glog" "github.com/pkg/errors" "math" @@ -506,7 +506,7 @@ func (bav *UtxoView) extractAdditionalRoyaltyMap( pkBytess := pkBytesIter // Validate the public key - if _, err = btcec.ParsePubKey(pkBytess[:], btcec.S256()); err != nil { + if _, err = btcec.ParsePubKey(pkBytess[:]); err != nil { return nil, 0, errors.Wrapf( RuleErrorAdditionalRoyaltyPubKeyMustBeValid, "Error parsing public key: %v, %v", PkToStringBoth(pkBytess[:]), err) @@ -1286,7 +1286,7 @@ func (bav *UtxoView) _helpConnectNFTSold(args HelpConnectNFTSoldStruct) ( return 0, nil, fmt.Errorf( "_helpConnectNFTSold: invalid public key found for pkid in additional DESO royalty map") } - if _, err = btcec.ParsePubKey(pkBytes, btcec.S256()); err != nil { + if _, err = btcec.ParsePubKey(pkBytes); err != nil { return 0, nil, errors.Wrapf(err, "Unable to parse public key") } diff --git a/lib/block_view_post.go b/lib/block_view_post.go index 2ea329a0c..e199e459c 100644 --- a/lib/block_view_post.go +++ b/lib/block_view_post.go @@ -11,7 +11,7 @@ import ( "sort" "strings" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/davecgh/go-spew/spew" "github.com/dgraph-io/badger/v4" "github.com/gernest/mention" diff --git a/lib/block_view_profile.go b/lib/block_view_profile.go index 35400c350..36176ceff 100644 --- a/lib/block_view_profile.go +++ b/lib/block_view_profile.go @@ -3,8 +3,9 @@ package lib import ( "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/davecgh/go-spew/spew" + ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "github.com/ethereum/go-ethereum/crypto" "github.com/golang/glog" "github.com/holiman/uint256" @@ -451,10 +452,10 @@ func (bav *UtxoView) setProfileMappings(profile *PGProfile) (*ProfileEntry, *PKI var err error daoCoinsInCirculationNanos, err = uint256.FromHex(profile.DAOCoinCoinsInCirculationNanos) if err != nil { - daoCoinsInCirculationNanos = uint256.NewInt() + daoCoinsInCirculationNanos = uint256.NewInt(0) } } else { - daoCoinsInCirculationNanos = uint256.NewInt() + daoCoinsInCirculationNanos = uint256.NewInt(0) } profileEntry = &ProfileEntry{ PublicKey: profile.PublicKey.ToBytes(), @@ -465,7 +466,7 @@ func (bav *UtxoView) setProfileMappings(profile *PGProfile) (*ProfileEntry, *PKI CreatorBasisPoints: profile.CreatorBasisPoints, DeSoLockedNanos: profile.DeSoLockedNanos, NumberOfHolders: profile.NumberOfHolders, - CoinsInCirculationNanos: *uint256.NewInt().SetUint64(profile.CoinsInCirculationNanos), + CoinsInCirculationNanos: *uint256.NewInt(profile.CoinsInCirculationNanos), CoinWatermarkNanos: profile.CoinWatermarkNanos, MintingDisabled: profile.MintingDisabled, }, @@ -576,7 +577,7 @@ func (bav *UtxoView) _connectUpdateProfile( if len(txMeta.ProfilePublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, errors.Wrapf(RuleErrorProfilePublicKeySize, "_connectUpdateProfile: %#v", txMeta.ProfilePublicKey) } - _, err := btcec.ParsePubKey(txMeta.ProfilePublicKey, btcec.S256()) + _, err := btcec.ParsePubKey(txMeta.ProfilePublicKey) if err != nil { return 0, 0, nil, errors.Wrapf(RuleErrorProfileBadPublicKey, "_connectUpdateProfile: %v", err) } @@ -825,7 +826,7 @@ func (bav *UtxoView) _connectSwapIdentity( if len(fromPublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, RuleErrorFromPublicKeyIsRequired } - if _, err := btcec.ParsePubKey(fromPublicKey, btcec.S256()); err != nil { + if _, err := btcec.ParsePubKey(fromPublicKey); err != nil { return 0, 0, nil, errors.Wrap(RuleErrorInvalidFromPublicKey, err.Error()) } @@ -834,7 +835,7 @@ func (bav *UtxoView) _connectSwapIdentity( if len(toPublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, RuleErrorToPublicKeyIsRequired } - if _, err := btcec.ParsePubKey(toPublicKey, btcec.S256()); err != nil { + if _, err := btcec.ParsePubKey(toPublicKey); err != nil { return 0, 0, nil, errors.Wrap(RuleErrorInvalidToPublicKey, err.Error()) } @@ -968,13 +969,13 @@ func _verifyDeSoSignature(signer, data, signature []byte) error { bytes := Sha256DoubleHash(data) // Convert signature to *btcec.Signature. - sign, err := btcec.ParseDERSignature(signature, btcec.S256()) + sign, err := ecdsa2.ParseDERSignature(signature) if err != nil { return errors.Wrapf(err, "_verifyBytesSignature: Problem parsing access signature: ") } // Verify signature. - ownerPk, _ := btcec.ParsePubKey(signer, btcec.S256()) + ownerPk, _ := btcec.ParsePubKey(signer) if !sign.Verify(bytes[:], ownerPk) { return fmt.Errorf("_verifyBytesSignature: Invalid signature") } @@ -994,17 +995,17 @@ func TextAndHash(data []byte) ([]byte, string) { func VerifyEthPersonalSignature(signer, data, signature []byte) error { // Ethereum likes uncompressed public keys while we use compressed keys a lot. Make sure we have uncompressed pk bytes. var uncompressedSigner []byte - pubKey, err := btcec.ParsePubKey(signer, btcec.S256()) + pubKey, err := btcec.ParsePubKey(signer) if err != nil { return errors.Wrapf(err, "VerifyEthPersonalSignature: Problem parsing signer public key") } if len(signer) == btcec.PubKeyBytesLenCompressed { uncompressedSigner = pubKey.SerializeUncompressed() - } else if len(signer) == btcec.PubKeyBytesLenUncompressed { + } else if len(signer) == BtcecPubKeyBytesLenUncompressed { uncompressedSigner = signer } else { return fmt.Errorf("VerifyEthPersonalSignature: Public key has incorrect length. It should be either "+ - "(%v) for compressed key or (%v) for uncompressed key", btcec.PubKeyBytesLenCompressed, btcec.PubKeyBytesLenUncompressed) + "(%v) for compressed key or (%v) for uncompressed key", btcec.PubKeyBytesLenCompressed, BtcecPubKeyBytesLenUncompressed) } // Change the data bytes into Ethereum's personal_sign message standard. This will prepend the message prefix and hash @@ -1054,7 +1055,7 @@ func (bav *UtxoView) _disconnectUpdateProfile( if len(txMeta.ProfilePublicKey) != btcec.PubKeyBytesLenCompressed { return fmt.Errorf("_disconnectUpdateProfile: %#v", txMeta.ProfilePublicKey) } - _, err := btcec.ParsePubKey(txMeta.ProfilePublicKey, btcec.S256()) + _, err := btcec.ParsePubKey(txMeta.ProfilePublicKey) if err != nil { return fmt.Errorf("_disconnectUpdateProfile: %v", err) } diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 15dc7b4f9..793d4373d 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1629,7 +1629,7 @@ func (bav *UtxoView) _connectUnstake( } // 2. Create a currentStakeEntry, if updated StakeAmountNanos > 0. var currentStakeEntry *StakeEntry - if stakeAmountNanos.Cmp(uint256.NewInt()) > 0 { + if stakeAmountNanos.Cmp(uint256.NewInt(0)) > 0 { currentStakeEntry = prevStakeEntry.Copy() currentStakeEntry.StakeAmountNanos = stakeAmountNanos.Clone() } @@ -1906,7 +1906,7 @@ func (bav *UtxoView) _connectUnlockStake( } // Calculate the TotalUnlockedAmountNanos and delete the PrevLockedStakeEntries. - totalUnlockedAmountNanos := uint256.NewInt() + totalUnlockedAmountNanos := uint256.NewInt(0) for _, prevLockedStakeEntry := range prevLockedStakeEntries { totalUnlockedAmountNanos, err = SafeUint256().Add( totalUnlockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos, @@ -1994,7 +1994,7 @@ func (bav *UtxoView) _disconnectUnlockStake( } // Calculate the TotalUnlockedAmountNanos. - totalUnlockedAmountNanos := uint256.NewInt() + totalUnlockedAmountNanos := uint256.NewInt(0) var err error for _, prevLockedStakeEntry := range operationData.PrevLockedStakeEntries { totalUnlockedAmountNanos, err = SafeUint256().Add( @@ -2060,7 +2060,7 @@ func (bav *UtxoView) IsValidStakeMetadata(transactorPkBytes []byte, metadata *St if err != nil { return errors.Wrapf(err, "UtxoView.IsValidStakeMetadata: ") } - if uint256.NewInt().SetUint64(transactorDeSoBalanceNanos).Cmp(metadata.StakeAmountNanos) < 0 { + if uint256.NewInt(transactorDeSoBalanceNanos).Cmp(metadata.StakeAmountNanos) < 0 { return errors.Wrapf(RuleErrorInvalidStakeInsufficientBalance, "UtxoView.IsValidStakeMetadata: ") } @@ -2199,7 +2199,7 @@ func (bav *UtxoView) IsCorrectValidatorTotalStakeAmountNanos(validatorEntry *Val } } // Calculate the total stake amount for the validator. - totalStakeAmountNanos := uint256.NewInt() + totalStakeAmountNanos := uint256.NewInt(0) for _, stakeEntry := range stakeEntryMap { // If an entry is deleted, we don't count it towards the total. if stakeEntry.isDeleted { @@ -2243,7 +2243,7 @@ func (bav *UtxoView) SanityCheckStakeTxn( } // Validate StakeEntry.StakeAmountNanos increase. - prevStakeEntry := &StakeEntry{StakeAmountNanos: uint256.NewInt()} + prevStakeEntry := &StakeEntry{StakeAmountNanos: uint256.NewInt(0)} if len(utxoOp.PrevStakeEntries) == 1 { prevStakeEntry = utxoOp.PrevStakeEntries[0] } @@ -2279,7 +2279,7 @@ func (bav *UtxoView) SanityCheckStakeTxn( if err != nil { return errors.Wrapf(err, "SanityCheckStakeTxn: error including fees in TransactorBalance decrease: ") } - if !uint256.NewInt().SetUint64(transactorBalanceNanosDecrease).Eq(amountNanos) { + if !uint256.NewInt(transactorBalanceNanosDecrease).Eq(amountNanos) { return errors.New("SanityCheckStakeTxn: TransactorBalance decrease does not match") } @@ -2331,7 +2331,7 @@ func (bav *UtxoView) SanityCheckUnstakeTxn(transactorPKID *PKID, utxoOp *UtxoOpe return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving StakeEntry: ") } if currentStakeEntry == nil { - currentStakeEntry = &StakeEntry{StakeAmountNanos: uint256.NewInt()} + currentStakeEntry = &StakeEntry{StakeAmountNanos: uint256.NewInt(0)} } stakeEntryStakeAmountNanosDecrease, err := SafeUint256().Sub( prevStakeEntry.StakeAmountNanos, currentStakeEntry.StakeAmountNanos, @@ -2344,7 +2344,7 @@ func (bav *UtxoView) SanityCheckUnstakeTxn(transactorPKID *PKID, utxoOp *UtxoOpe } // Validate LockedStakeEntry.LockedAmountNanos increase. - prevLockedStakeEntry := &LockedStakeEntry{LockedAmountNanos: uint256.NewInt()} + prevLockedStakeEntry := &LockedStakeEntry{LockedAmountNanos: uint256.NewInt(0)} if len(utxoOp.PrevLockedStakeEntries) == 1 { prevLockedStakeEntry = utxoOp.PrevLockedStakeEntries[0] } @@ -2395,7 +2395,7 @@ func (bav *UtxoView) SanityCheckUnlockStakeTxn( if utxoOp.PrevLockedStakeEntries == nil || len(utxoOp.PrevLockedStakeEntries) == 0 { return errors.New("SanityCheckUnlockStakeTxn: PrevLockedStakeEntries is empty") } - totalUnlockedAmountNanos := uint256.NewInt() + totalUnlockedAmountNanos := uint256.NewInt(0) var err error for _, prevLockedStakeEntry := range utxoOp.PrevLockedStakeEntries { totalUnlockedAmountNanos, err = SafeUint256().Add(totalUnlockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos) @@ -2422,7 +2422,7 @@ func (bav *UtxoView) SanityCheckUnlockStakeTxn( if err != nil { return errors.Wrapf(err, "SanityCheckStakeTxn: error including fees in TransactorBalance decrease: ") } - if !uint256.NewInt().SetUint64(transactorBalanceNanosIncrease).Eq(amountNanos) { + if !uint256.NewInt(transactorBalanceNanosIncrease).Eq(amountNanos) { return errors.New("SanityCheckUnlockStakeTxn: TransactorBalance increase does not match") } @@ -2907,7 +2907,7 @@ func (bav *UtxoView) CreateUnlockStakeTxindexMetadata(utxoOp *UtxoOperation, txn validatorPublicKeyBase58Check := PkToString(metadata.ValidatorPublicKey.ToBytes(), bav.Params) // Calculate TotalUnlockedAmountNanos. - totalUnlockedAmountNanos := uint256.NewInt() + totalUnlockedAmountNanos := uint256.NewInt(0) var err error for _, prevLockedStakeEntry := range utxoOp.PrevLockedStakeEntries { totalUnlockedAmountNanos, err = SafeUint256().Add( @@ -2915,7 +2915,7 @@ func (bav *UtxoView) CreateUnlockStakeTxindexMetadata(utxoOp *UtxoOperation, txn ) if err != nil { glog.Errorf("CreateUnlockStakeTxindexMetadata: error calculating TotalUnlockedAmountNanos: %v", err) - totalUnlockedAmountNanos = uint256.NewInt() + totalUnlockedAmountNanos = uint256.NewInt(0) break } } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 954a76d16..7bde74817 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -6,7 +6,7 @@ import ( "math" "testing" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/holiman/uint256" "github.com/stretchr/testify/require" ) @@ -128,7 +128,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -145,7 +145,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m2PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt(), + StakeAmountNanos: uint256.NewInt(0), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -158,7 +158,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: 99, - StakeAmountNanos: uint256.NewInt().SetUint64(1), + StakeAmountNanos: uint256.NewInt(1), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -184,7 +184,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt(), + StakeAmountNanos: uint256.NewInt(0), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -210,7 +210,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt().SetUint64(math.MaxUint64), + StakeAmountNanos: uint256.NewInt(math.MaxUint64), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -224,7 +224,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } extraData := map[string][]byte{"TestKey": []byte("TestValue")} feeNanos, err := _submitStakeTxn( @@ -237,14 +237,14 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.NotNil(t, stakeEntry) require.Equal(t, stakeEntry.RewardMethod, StakingRewardMethodPayToBalance) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(100)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue")) // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) @@ -256,7 +256,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt().SetUint64(50), + StakeAmountNanos: uint256.NewInt(50), } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} feeNanos, err := _submitStakeTxn( @@ -269,14 +269,14 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.NotNil(t, stakeEntry) require.Equal(t, stakeEntry.RewardMethod, StakingRewardMethodPayToBalance) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(150)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue2")) // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(150)) // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) @@ -288,7 +288,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodRestake, - StakeAmountNanos: uint256.NewInt(), + StakeAmountNanos: uint256.NewInt(0), } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} feeNanos, err := _submitStakeTxn( @@ -300,7 +300,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(150)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue2")) // Verify the StakeEntry.RewardMethod has changed to StakingRewardMethodRestake. @@ -310,7 +310,7 @@ func _testStaking(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(150)) // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) @@ -327,7 +327,7 @@ func _testStaking(t *testing.T, flushToDB bool) { unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(40), + UnstakeAmountNanos: uint256.NewInt(40), } _, err = _submitUnstakeTxn( testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, @@ -343,7 +343,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidValidatorPKID unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m2PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(40), + UnstakeAmountNanos: uint256.NewInt(40), } _, err = _submitUnstakeTxn( testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, @@ -355,7 +355,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidUnstakeNoStakeFound unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(40), + UnstakeAmountNanos: uint256.NewInt(40), } _, err = _submitUnstakeTxn( testMeta, m2Pub, m2Priv, unstakeMetadata, nil, flushToDB, @@ -379,7 +379,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidUnstakeAmountNanos unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(), + UnstakeAmountNanos: uint256.NewInt(0), } _, err = _submitUnstakeTxn( testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, @@ -404,7 +404,7 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(40), + UnstakeAmountNanos: uint256.NewInt(40), } extraData := map[string][]byte{"TestKey": []byte("TestValue")} feeNanos, err := _submitUnstakeTxn( @@ -415,17 +415,17 @@ func _testStaking(t *testing.T, flushToDB bool) { // Verify StakeEntry.StakeAmountNanos. stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(110)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(110)) // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(110)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(110)) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(40)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(40)) require.Equal(t, lockedStakeEntry.ExtraData["TestKey"], []byte("TestValue")) // Verify m1's balance stays the same (net of fees). @@ -437,7 +437,7 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(30), + UnstakeAmountNanos: uint256.NewInt(30), } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} feeNanos, err := _submitUnstakeTxn( @@ -448,17 +448,17 @@ func _testStaking(t *testing.T, flushToDB bool) { // Verify StakeEntry.StakeAmountNanos. stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(80)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(80)) // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(80)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(80)) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(70)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(70)) require.Equal(t, lockedStakeEntry.ExtraData["TestKey"], []byte("TestValue2")) // Verify m1's balance stays the same (net of fees). @@ -470,7 +470,7 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(80), + UnstakeAmountNanos: uint256.NewInt(80), } feeNanos, err := _submitUnstakeTxn( testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, @@ -485,12 +485,12 @@ func _testStaking(t *testing.T, flushToDB bool) { // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(0)) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(150)) require.Equal(t, lockedStakeEntry.ExtraData["TestKey"], []byte("TestValue2")) // Verify m1's balance stays the same (net of fees). @@ -567,7 +567,7 @@ func _testStaking(t *testing.T, flushToDB bool) { ) require.NoError(t, err) require.Equal(t, len(lockedStakeEntries), 1) - require.Equal(t, lockedStakeEntries[0].LockedAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, lockedStakeEntries[0].LockedAmountNanos, uint256.NewInt(150)) m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) unlockStakeMetadata := &UnlockStakeMetadata{ @@ -588,7 +588,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(0)) // Verify LockedStakeEntry.isDeleted. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) @@ -816,7 +816,7 @@ func TestStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { @@ -974,7 +974,7 @@ func TestStakingWithDerivedKey(t *testing.T) { TransactionCountLimitMap: map[TxnType]uint64{ TxnTypeAuthorizeDerivedKey: 1, }, - StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(100)}, } derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) require.Error(t, err) @@ -988,7 +988,7 @@ func TestStakingWithDerivedKey(t *testing.T) { TransactionCountLimitMap: map[TxnType]uint64{ TxnTypeAuthorizeDerivedKey: 1, }, - UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(100)}, } derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) require.Error(t, err) @@ -1017,7 +1017,7 @@ func TestStakingWithDerivedKey(t *testing.T) { TransactionCountLimitMap: map[TxnType]uint64{ TxnTypeAuthorizeDerivedKey: 1, }, - StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(100)}, } derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) require.NoError(t, err) @@ -1025,7 +1025,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to stake 100 $DESO nanos with m1 using the DerivedKey. Errors. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, @@ -1036,7 +1036,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to stake 200 $DESO nanos with m0 using the DerivedKey. Errors. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(200), + StakeAmountNanos: uint256.NewInt(200), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, @@ -1048,7 +1048,7 @@ func TestStakingWithDerivedKey(t *testing.T) { senderOldDESOBalanceNanos := getDESOBalanceNanos(senderPkBytes) stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } feeNanos, err := _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, @@ -1059,7 +1059,7 @@ func TestStakingWithDerivedKey(t *testing.T) { stakeEntry, err := newUtxoView().GetStakeEntry(m0PKID, senderPKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(100)) // Verify sender's DESO balance is reduced by StakeAmountNanos (net of fees). senderNewDESOBalanceNanos := getDESOBalanceNanos(senderPkBytes) @@ -1075,7 +1075,7 @@ func TestStakingWithDerivedKey(t *testing.T) { TransactionCountLimitMap: map[TxnType]uint64{ TxnTypeAuthorizeDerivedKey: 1, }, - UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(50)}, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(50)}, } derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) require.NoError(t, err) @@ -1083,7 +1083,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to unstake 50 $DESO nanos from m1 using the DerivedKey. Errors. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + UnstakeAmountNanos: uint256.NewInt(50), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1094,7 +1094,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes 50 $DESO nanos with m1. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(50), + StakeAmountNanos: uint256.NewInt(50), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1104,7 +1104,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to unstake 50 $DESO nanos from m1 using the DerivedKey. Errors. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + UnstakeAmountNanos: uint256.NewInt(50), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1115,7 +1115,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to unstake 200 $DESO nanos from m0 using the DerivedKey. Errors. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(200), + UnstakeAmountNanos: uint256.NewInt(200), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1126,7 +1126,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to unstake 100 $DESO nanos from m0 using the DerivedKey. Errors. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(100), + UnstakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1137,7 +1137,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes 50 $DESO nanos from m0 using the DerivedKey. Succeeds. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + UnstakeAmountNanos: uint256.NewInt(50), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1148,13 +1148,13 @@ func TestStakingWithDerivedKey(t *testing.T) { stakeEntry, err := newUtxoView().GetStakeEntry(m0PKID, senderPKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(50)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(50)) // LockedStakeEntry was created. lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, senderPKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(50)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(50)) } { // sender unlocks stake using a DerivedKey. @@ -1186,7 +1186,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes 50 $DESO nanos from m1. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + UnstakeAmountNanos: uint256.NewInt(50), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1229,7 +1229,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes + unstakes 50 $DESO nanos with m0. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(50), + StakeAmountNanos: uint256.NewInt(50), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1237,7 +1237,7 @@ func TestStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + UnstakeAmountNanos: uint256.NewInt(50), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1266,8 +1266,8 @@ func TestStakingWithDerivedKey(t *testing.T) { TransactionCountLimitMap: map[TxnType]uint64{ TxnTypeAuthorizeDerivedKey: 1, }, - StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(50)}, - UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(50)}, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(50)}, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(50)}, UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 2}, } derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) @@ -1276,7 +1276,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m0 using the DerivedKey. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(25), + StakeAmountNanos: uint256.NewInt(25), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1286,7 +1286,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m1 using the DerivedKey. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(25), + StakeAmountNanos: uint256.NewInt(25), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1296,7 +1296,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m0 using the DerivedKey. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(25), + UnstakeAmountNanos: uint256.NewInt(25), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1306,7 +1306,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m1 using the DerivedKey. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(25), + UnstakeAmountNanos: uint256.NewInt(25), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1349,7 +1349,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m0 using the DerivedKey. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(25), + StakeAmountNanos: uint256.NewInt(25), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1359,7 +1359,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m1 using the DerivedKey. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(25), + StakeAmountNanos: uint256.NewInt(25), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1369,7 +1369,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m0 using the DerivedKey. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(25), + UnstakeAmountNanos: uint256.NewInt(25), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1379,7 +1379,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m1 using the DerivedKey. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(25), + UnstakeAmountNanos: uint256.NewInt(25), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1422,12 +1422,12 @@ func TestStakingWithDerivedKey(t *testing.T) { TxnTypeAuthorizeDerivedKey: 1, }, StakeLimitMap: map[StakeLimitKey]*uint256.Int{ - scopedStakeLimitKey: uint256.NewInt().SetUint64(100), - globalStakeLimitKey: uint256.NewInt().SetUint64(200), + scopedStakeLimitKey: uint256.NewInt(100), + globalStakeLimitKey: uint256.NewInt(200), }, UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{ - scopedStakeLimitKey: uint256.NewInt().SetUint64(100), - globalStakeLimitKey: uint256.NewInt().SetUint64(200), + scopedStakeLimitKey: uint256.NewInt(100), + globalStakeLimitKey: uint256.NewInt(200), }, UnlockStakeLimitMap: map[StakeLimitKey]uint64{scopedStakeLimitKey: 1, globalStakeLimitKey: 1}, } @@ -1437,7 +1437,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m1 using the global TransactionSpendingLimit. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(200), + StakeAmountNanos: uint256.NewInt(200), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, @@ -1447,7 +1447,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m1 using the global TransactionSpendingLimit. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(200), + UnstakeAmountNanos: uint256.NewInt(200), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1468,7 +1468,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m1 using the scoped TransactionSpendingLimit. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, @@ -1478,7 +1478,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m1 using the scoped TransactionSpendingLimit. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(100), + UnstakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1506,11 +1506,11 @@ func TestStakingWithDerivedKey(t *testing.T) { TxnTypeAuthorizeDerivedKey: 1, }, StakeLimitMap: map[StakeLimitKey]*uint256.Int{ - stakeLimitKey1: uint256.NewInt().SetUint64(uint64(1.5 * float64(NanosPerUnit))), - stakeLimitKey2: uint256.NewInt().SetUint64(uint64(2.0 * float64(NanosPerUnit))), + stakeLimitKey1: uint256.NewInt(uint64(1.5 * float64(NanosPerUnit))), + stakeLimitKey2: uint256.NewInt(uint64(2.0 * float64(NanosPerUnit))), }, UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{ - stakeLimitKey1: uint256.NewInt().SetUint64(uint64(3.25 * float64(NanosPerUnit))), + stakeLimitKey1: uint256.NewInt(uint64(3.25 * float64(NanosPerUnit))), }, UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey1: 2, stakeLimitKey2: 3}, } @@ -1554,10 +1554,10 @@ func TestStakingWithDerivedKey(t *testing.T) { TxnTypeAuthorizeDerivedKey: 1, }, StakeLimitMap: map[StakeLimitKey]*uint256.Int{ - stakeLimitKey: uint256.NewInt().SetUint64(uint64(0.65 * float64(NanosPerUnit))), + stakeLimitKey: uint256.NewInt(uint64(0.65 * float64(NanosPerUnit))), }, UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{ - stakeLimitKey: uint256.NewInt().SetUint64(uint64(2.1 * float64(NanosPerUnit))), + stakeLimitKey: uint256.NewInt(uint64(2.1 * float64(NanosPerUnit))), }, UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 1}, } @@ -1666,7 +1666,7 @@ func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(validatorPkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt().SetUint64(amountNanos), + StakeAmountNanos: uint256.NewInt(amountNanos), } _, err := _submitStakeTxn(testMeta, stakerPk, stakerPriv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -1955,7 +1955,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { // m0 stakes with himself. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxn(testMeta, m0Pub, m0Priv, stakeMetadata, nil, true) require.NoError(t, err) @@ -1963,13 +1963,13 @@ func TestStakeLockupEpochDuration(t *testing.T) { stakeEntry, err := newUtxoView().GetStakeEntry(m0PKID, m0PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(100)) } { // m0 unstakes from himself. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(100), + UnstakeAmountNanos: uint256.NewInt(100), } _, err = _submitUnstakeTxn(testMeta, m0Pub, m0Priv, unstakeMetadata, nil, true) require.NoError(t, err) @@ -1981,7 +1981,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, m0PKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(100)) } { // RuleErrorInvalidUnlockStakeMustWaitLockupDuration @@ -2139,7 +2139,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m1 stakes with m0. m0 is active. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(150), + StakeAmountNanos: uint256.NewInt(150), } _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2147,13 +2147,13 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos increases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(150)) } { // m1 unstakes some from m0. m0 is active. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + UnstakeAmountNanos: uint256.NewInt(50), } _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2161,7 +2161,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos decreases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) } { // Jail m0. Since this update takes place outside a transaction, @@ -2176,13 +2176,13 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) // m0 TotalStakeAmountNanos stays the same. - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) } { // m1 stakes more with m0. m0 is jailed. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(50), + StakeAmountNanos: uint256.NewInt(50), } _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2190,13 +2190,13 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos increases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(150)) } { // m1 unstakes some from m0. m0 is jailed. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + UnstakeAmountNanos: uint256.NewInt(50), } _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2204,7 +2204,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos decreases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) } { // m0 unjails himself. @@ -2214,13 +2214,13 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos stays the same. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) } { // m1 stakes more with m0. m0 is active. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(50), + StakeAmountNanos: uint256.NewInt(50), } _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2228,13 +2228,13 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos increases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(150)) } { // m1 unstakes some from m0. m0 is active. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + UnstakeAmountNanos: uint256.NewInt(50), } _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2242,7 +2242,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos decreases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) } { // Jail m0 again. Since this update takes place outside a transaction, @@ -2253,7 +2253,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos stays the same. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) } { // m0 unregisters as a validator. diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 000130b17..372447b19 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -12,7 +12,7 @@ import ( "github.com/deso-protocol/core/bls" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/decred/dcrd/lru" "github.com/dgraph-io/badger/v4" embeddedpostgres "github.com/fergusstrange/embedded-postgres" @@ -2183,7 +2183,7 @@ func TestBasicTransferSignatures(t *testing.T) { require.NoError(err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) recipientPkBytes, _, err := Base58CheckDecode(recipientPkString) require.NoError(err) @@ -2236,8 +2236,12 @@ func TestBasicTransferSignatures(t *testing.T) { // Now fetch all transactions from the db and verify their signatures have been properly persisted. if postgres != nil { pgTxn := postgres.GetTransactionByHash(transactionHash) - require.Equal(true, reflect.DeepEqual(txn.Signature.Sign.R.Bytes(), HashToBigint(pgTxn.R).Bytes())) - require.Equal(true, reflect.DeepEqual(txn.Signature.Sign.S.Bytes(), HashToBigint(pgTxn.S).Bytes())) + r := txn.Signature.Sign.R() + rBytes := (&r).Bytes() + s := txn.Signature.Sign.S() + sBytes := (&s).Bytes() + require.Equal(true, reflect.DeepEqual(rBytes[:], HashToBigint(pgTxn.R).Bytes())) + require.Equal(true, reflect.DeepEqual(sBytes[:], HashToBigint(pgTxn.S).Bytes())) require.Equal(txn.Signature.RecoveryId, byte(pgTxn.RecoveryId)) require.Equal(txn.Signature.IsRecoverable, pgTxn.IsRecoverable) } else { @@ -2245,8 +2249,16 @@ func TestBasicTransferSignatures(t *testing.T) { require.NoError(err) for _, blockTxn := range dbBlock.Txns { if reflect.DeepEqual(transactionHash.ToBytes(), blockTxn.Hash().ToBytes()) { - require.Equal(true, reflect.DeepEqual(txn.Signature.Sign.R.Bytes(), blockTxn.Signature.Sign.R.Bytes())) - require.Equal(true, reflect.DeepEqual(txn.Signature.Sign.S.Bytes(), blockTxn.Signature.Sign.S.Bytes())) + rTxn := txn.Signature.Sign.R() + rTxnBytes := (&rTxn).Bytes() + sTxn := txn.Signature.Sign.S() + sTxnBytes := (&sTxn).Bytes() + rBlockTxn := blockTxn.Signature.Sign.R() + rBlockTxnBytes := (&rBlockTxn).Bytes() + sBlockTxn := blockTxn.Signature.Sign.S() + sBlockTxnBytes := (&sBlockTxn).Bytes() + require.Equal(true, reflect.DeepEqual(rTxnBytes[:], rBlockTxnBytes[:])) + require.Equal(true, reflect.DeepEqual(sTxnBytes[:], sBlockTxnBytes[:])) require.Equal(txn.Signature.RecoveryId, blockTxn.Signature.RecoveryId) require.Equal(txn.Signature.IsRecoverable, blockTxn.Signature.IsRecoverable) } @@ -2375,7 +2387,7 @@ func TestBasicTransferSignatures(t *testing.T) { testRandomVector := [3]RuleError{ RuleErrorInvalidTransactionSignature, RuleErrorDerivedKeyNotAuthorized, RuleErrorDerivedKeyNotAuthorized, } - randomPrivKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivKeyBase58Check := Base58CheckEncode(randomPrivKey.Serialize(), true, params) @@ -2430,7 +2442,7 @@ func TestBasicTransferSignatures(t *testing.T) { testRandomKeyVector := [3]RuleError{ RuleErrorInvalidTransactionSignature, RuleErrorDerivedKeyNotAuthorized, RuleErrorDerivedKeyNotAuthorized, } - randomPrivKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivKeyBase58Check := Base58CheckEncode(randomPrivKey.Serialize(), true, params) allTxns = append(allTxns, mempoolProcessAllSignatureCombinations( @@ -2493,7 +2505,7 @@ func TestBasicTransferSignatures(t *testing.T) { testMoneyRandomVector := [3]RuleError{ RuleErrorInvalidTransactionSignature, RuleErrorDerivedKeyNotAuthorized, RuleErrorDerivedKeyNotAuthorized, } - randomPrivKey, err := btcec.NewPrivateKey(btcec.S256()) + randomPrivKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivKeyBase58Check := Base58CheckEncode(randomPrivKey.Serialize(), true, params) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 62124a7db..d132a7bfe 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -12,7 +12,7 @@ import ( "sort" "strings" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" @@ -4963,7 +4963,7 @@ func (ce *CoinEntry) Copy() *CoinEntry { CreatorBasisPoints: ce.CreatorBasisPoints, DeSoLockedNanos: ce.DeSoLockedNanos, NumberOfHolders: ce.NumberOfHolders, - CoinsInCirculationNanos: *uint256.NewInt().Set(&ce.CoinsInCirculationNanos), + CoinsInCirculationNanos: *uint256.NewInt(0).Set(&ce.CoinsInCirculationNanos), CoinWatermarkNanos: ce.CoinWatermarkNanos, MintingDisabled: ce.MintingDisabled, TransferRestrictionStatus: ce.TransferRestrictionStatus, @@ -5626,7 +5626,7 @@ func VariableDecodeUint256(rr *bytes.Reader) (*uint256.Int, error) { if err != nil { return nil, errors.Wrapf(err, "DecodeUint256: Error reading uint256") } - return uint256.NewInt().SetBytes(numberBytes), nil + return uint256.NewInt(0).SetBytes(numberBytes), nil } else if err != nil { return nil, errors.Wrapf(err, "DecodeUint256: Error reading uint256") } else { @@ -5936,7 +5936,7 @@ func ComputeBaseUnitsToBuyUint256( // If the quantity to sell is zero then return zero (selling // zero means you should buy zero) if quantityToSellBaseUnits.IsZero() { - return uint256.NewInt(), nil + return uint256.NewInt(0), nil } // Perform calculation. @@ -6003,7 +6003,7 @@ func ComputeBaseUnitsToSellUint256( // If the quantity to buy is zero then return zero (buying // zero means you should sell zero) if quantityToBuyBaseUnits.IsZero() { - return uint256.NewInt(), nil + return uint256.NewInt(0), nil } // Perform calculation. diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index fc990d288..d55b43160 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -416,8 +416,8 @@ func TestUtxoEntryEncodeDecode(t *testing.T) { func TestEncodingUint256s(t *testing.T) { // Create three uint256.Ints. - num1 := uint256.NewInt() - num2 := uint256.NewInt().SetUint64(598128756) + num1 := uint256.NewInt(0) + num2 := uint256.NewInt(598128756) num3 := MaxUint256 // Encode them to bytes using VariableEncodeUint256. diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index cd88f8656..6ee17feaf 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1278,7 +1278,7 @@ func (bav *UtxoView) _connectRegisterAsValidator( } // Calculate TotalStakeAmountNanos. - totalStakeAmountNanos := uint256.NewInt() + totalStakeAmountNanos := uint256.NewInt(0) if prevValidatorEntry != nil { totalStakeAmountNanos = prevValidatorEntry.TotalStakeAmountNanos.Clone() } @@ -1459,7 +1459,7 @@ func (bav *UtxoView) _connectUnregisterAsValidator( // Delete each StakeEntry and create or update the corresponding LockedStakeEntry. // Track TotalUnstakedAmountNanos and PrevLockedStakeEntries. - totalUnstakedAmountNanos := uint256.NewInt() + totalUnstakedAmountNanos := uint256.NewInt(0) var prevLockedStakeEntries []*LockedStakeEntry for _, prevStakeEntry := range prevStakeEntries { @@ -1982,7 +1982,7 @@ func (bav *UtxoView) SanityCheckUnregisterAsValidatorTxn( } // Sanity check the deleted StakeEntries. - totalUnstakedAmountNanos := uint256.NewInt() + totalUnstakedAmountNanos := uint256.NewInt(0) for _, stakeEntry := range utxoOp.PrevStakeEntries { totalUnstakedAmountNanos, err = SafeUint256().Add(totalUnstakedAmountNanos, stakeEntry.StakeAmountNanos) if err != nil { @@ -2472,7 +2472,7 @@ func (bav *UtxoView) CreateUnjailValidatorTxindexMetadata( } func SumValidatorEntriesTotalStakeAmountNanos(validatorEntries []*ValidatorEntry) *uint256.Int { - totalStakeAmountNanos := uint256.NewInt() + totalStakeAmountNanos := uint256.NewInt(0) for _, validatorEntry := range validatorEntries { totalStakeAmountNanos.Add(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 2bb00fdee..517302916 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -6,7 +6,7 @@ import ( "math" "testing" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/deso-protocol/core/bls" "github.com/holiman/uint256" "github.com/stretchr/testify/require" @@ -485,7 +485,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { @@ -823,7 +823,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { // m3 stakes 100 DESO nanos with m0. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxn(testMeta, m3Pub, m3Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -831,7 +831,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { // m3 stakes 200 DESO nanos with m1. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(200), + StakeAmountNanos: uint256.NewInt(200), } _, err = _submitStakeTxn(testMeta, m3Pub, m3Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -839,7 +839,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { // m3 stakes 300 DESO nanos with m2. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m2PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(300), + StakeAmountNanos: uint256.NewInt(300), } _, err = _submitStakeTxn(testMeta, m3Pub, m3Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -849,17 +849,17 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(200)) require.Equal(t, validatorEntries[2].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt(100)) } { // m3 unstakes from m1. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(150), + UnstakeAmountNanos: uint256.NewInt(150), } _, err = _submitUnstakeTxn(testMeta, m3Pub, m3Priv, unstakeMetadata, nil, flushToDB) @@ -868,17 +868,17 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(100)) require.Equal(t, validatorEntries[2].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50)) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt(50)) } { // m3 unstakes more from m1. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(50), + UnstakeAmountNanos: uint256.NewInt(50), } _, err = _submitUnstakeTxn(testMeta, m3Pub, m3Priv, unstakeMetadata, nil, flushToDB) @@ -887,9 +887,9 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(100)) } { // m2 unregisters as validator. @@ -901,13 +901,13 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(100)) } { // m4 stakes with m1. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(150), + StakeAmountNanos: uint256.NewInt(150), } _, err = _submitStakeTxn(testMeta, m4Pub, m4Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -917,15 +917,15 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(150)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(100)) } { // m4 stakes more with m1. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxn(testMeta, m4Pub, m4Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -935,9 +935,9 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(250)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(250)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(100)) } { // Verify top validators with LIMIT. @@ -945,7 +945,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(250)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(250)) } // Flush mempool to the db and test rollbacks. @@ -980,7 +980,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) validatorEntry := &ValidatorEntry{ ValidatorPKID: m0PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), + TotalStakeAmountNanos: uint256.NewInt(100), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -991,7 +991,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m0PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) // Verify m0 is not stored in the UtxoView. require.Empty(t, utxoView.ValidatorPKIDToValidatorEntry) @@ -1000,7 +1000,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m1PkBytes) validatorEntry = &ValidatorEntry{ ValidatorPKID: m1PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), + TotalStakeAmountNanos: uint256.NewInt(400), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, JailedAtEpochNumber: 1, @@ -1012,14 +1012,14 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m1PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(400)) require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) // Store m2's ValidatorEntry in the db with TotalStake = 300 nanos. votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m2PkBytes) m2ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m2PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), + TotalStakeAmountNanos: uint256.NewInt(300), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1030,13 +1030,13 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m2PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(300)) // Store m3's ValidatorEntry in the db with TotalStake = 600 nanos. votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m3PkBytes) m3ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m3PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), + TotalStakeAmountNanos: uint256.NewInt(600), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1047,7 +1047,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m3PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(600)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(600)) // Fetch m2 so it is also cached in the UtxoView. validatorEntry, err = utxoView.GetValidatorByPKID(m2PKID) @@ -1060,7 +1060,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal( t, utxoView.ValidatorPKIDToValidatorEntry[*m2ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, - uint256.NewInt().SetUint64(300), + uint256.NewInt(300), ) // Store m3's ValidatorEntry in the UtxoView with isDeleted=true. @@ -1074,7 +1074,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m4PkBytes) m4ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m4PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), + TotalStakeAmountNanos: uint256.NewInt(50), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1091,14 +1091,14 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal( t, utxoView.ValidatorPKIDToValidatorEntry[*m4ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, - uint256.NewInt().SetUint64(50), + uint256.NewInt(50), ) // Store m5's jailed ValidatorEntry in the UtxoView with TotalStake = 500 nanos. votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m5PkBytes) m5ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m5PKID, - TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), + TotalStakeAmountNanos: uint256.NewInt(500), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, JailedAtEpochNumber: 1, @@ -1116,7 +1116,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal( t, utxoView.ValidatorPKIDToValidatorEntry[*m5ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, - uint256.NewInt().SetUint64(500), + uint256.NewInt(500), ) require.Equal( t, utxoView.ValidatorPKIDToValidatorEntry[*m5ValidatorEntry.ValidatorPKID].Status(), ValidatorStatusJailed, @@ -1127,11 +1127,11 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(100)) require.Equal(t, validatorEntries[2].ValidatorPKID, m4PKID) - require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50)) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt(50)) } func TestUpdatingValidatorDisableDelegatedStake(t *testing.T) { @@ -1244,7 +1244,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 stakes with himself. This is allowed even though DisableDelegatedStake = TRUE. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxn( testMeta, m0Pub, m0Priv, stakeMetadata, nil, flushToDB, @@ -1260,7 +1260,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m1 tries to stake with m0. Errors. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -1289,7 +1289,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m1 stakes with m0. Succeeds. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -1412,7 +1412,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { // m0 stakes with himself. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(600), + StakeAmountNanos: uint256.NewInt(600), } _, err = _submitStakeTxn( testMeta, m0Pub, m0Priv, stakeMetadata, nil, flushToDB, @@ -1422,13 +1422,13 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m0PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(600)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(600)) } { // m1 stakes with m0. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(400), + StakeAmountNanos: uint256.NewInt(400), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -1438,13 +1438,13 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(400)) } { // m1 partially unstakes with m0. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(100), + UnstakeAmountNanos: uint256.NewInt(100), } _, err = _submitUnstakeTxn( testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, @@ -1454,13 +1454,13 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(300)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(300)) // m1 has a LockedStakeEntry created. lockedStakeEntry, err = utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(100)) } { // m0 unregisters as a validator. @@ -1481,7 +1481,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { lockedStakeEntry, err = utxoView().GetLockedStakeEntry(m0PKID, m0PKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(600)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(600)) // m1 is unstaked. // m1's StakeEntry is deleted. @@ -1492,7 +1492,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { lockedStakeEntry, err = utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(400)) } // Flush mempool to the db and test rollbacks. @@ -1625,7 +1625,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // m1 stakes with m0. Succeeds. You can stake to a jailed validator. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(100), + StakeAmountNanos: uint256.NewInt(100), } _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -1638,7 +1638,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // m1 unstakes from m0. Succeeds. You can unstake from a jailed validator. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt().SetUint64(100), + UnstakeAmountNanos: uint256.NewInt(100), } _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -1763,7 +1763,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { diff --git a/lib/blockchain.go b/lib/blockchain.go index 1be87d904..262624f9a 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -26,7 +26,7 @@ import ( btcdchain "github.com/btcsuite/btcd/blockchain" chainlib "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" "github.com/deso-protocol/go-deadlock" @@ -2215,7 +2215,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // trusted. signature := desoBlock.BlockProducerInfo.Signature - pkObj, err := btcec.ParsePubKey(publicKey, btcec.S256()) + pkObj, err := btcec.ParsePubKey(publicKey) if err != nil { return false, false, errors.Wrapf(err, "ProcessBlock: Error parsing block producer public key: %v.", @@ -3213,7 +3213,7 @@ func (bc *Blockchain) CreatePrivateMessageTxn( // Encrypt the passed-in message text with the recipient's public key. // // Parse the recipient public key. - recipientPk, err := btcec.ParsePubKey(recipientPublicKey, btcec.S256()) + recipientPk, err := btcec.ParsePubKey(recipientPublicKey) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "CreatePrivateMessageTxn: Problem parsing "+ "recipient public key: ") diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index aefb50117..c55e40d1b 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "flag" "fmt" + ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "log" "math" "math/big" @@ -17,7 +18,8 @@ import ( "github.com/go-pg/pg/v10" chainlib "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/btcec" + + "github.com/btcsuite/btcd/btcec/v2" "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/stretchr/testify/assert" @@ -907,7 +909,7 @@ func _signTxn(t *testing.T, txn *MsgDeSoTxn, privKeyStrArg string) { privKeyBytes, _, err := Base58CheckDecode(privKeyStrArg) require.NoError(err) - privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) + privKey, _ := btcec.PrivKeyFromBytes(privKeyBytes) txnSignature, err := txn.Sign(privKey) require.NoError(err) txn.Signature.SetSignature(txnSignature) @@ -925,7 +927,7 @@ func _signTxnWithDerivedKeyAndType(t *testing.T, txn *MsgDeSoTxn, privKeyStrBase privKeyBytes, _, err := Base58CheckDecode(privKeyStrBase58Check) require.NoError(err) - privateKey, publicKey := btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) + privateKey, publicKey := btcec.PrivKeyFromBytes(privKeyBytes) // We will randomly sign with the standard DER encoding + ExtraData, or with the DeSo-DER encoding. if signatureType == 0 { @@ -1668,7 +1670,7 @@ func TestBadBlockSignature(t *testing.T) { // Since MineAndProcesssSingleBlock returns a valid block above, we can play with its // signature and re-process the block to see what happens. - blockProducerInfoCopy := &BlockProducerInfo{Signature: &btcec.Signature{}} + blockProducerInfoCopy := &BlockProducerInfo{Signature: &ecdsa2.Signature{}} blockProducerInfoCopy.PublicKey = append([]byte{}, finalBlock1.BlockProducerInfo.PublicKey...) *blockProducerInfoCopy.Signature = *finalBlock1.BlockProducerInfo.Signature diff --git a/lib/connection_manager.go b/lib/connection_manager.go index 1a8448521..90f951d84 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -158,7 +158,7 @@ func NewConnectionManager( } // Check if the address passed shares a group with any addresses already in our data structures. -func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddress) bool { +func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddressV2) bool { groupKey := addrmgr.GroupKey(na) // For the sake of running multiple nodes on the same machine, we allow localhost connections. if groupKey == "local" { @@ -172,7 +172,7 @@ func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddr if numGroupsForKey != 0 && numGroupsForKey != 1 { glog.V(2).Infof("IsFromRedundantOutboundIPAddress: Found numGroupsForKey != (0 or 1). Is (%d) "+ "instead for addr (%s) and group key (%s). This "+ - "should never happen.", numGroupsForKey, na.IP.String(), groupKey) + "should never happen.", numGroupsForKey, na.Addr.String(), groupKey) } if numGroupsForKey == 0 { @@ -181,7 +181,7 @@ func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddr return true } -func (cmgr *ConnectionManager) AddToGroupKey(na *wire.NetAddress) { +func (cmgr *ConnectionManager) AddToGroupKey(na *wire.NetAddressV2) { groupKey := addrmgr.GroupKey(na) cmgr.mtxOutboundConnIPGroups.Lock() @@ -189,7 +189,7 @@ func (cmgr *ConnectionManager) AddToGroupKey(na *wire.NetAddress) { cmgr.mtxOutboundConnIPGroups.Unlock() } -func (cmgr *ConnectionManager) subFromGroupKey(na *wire.NetAddress) { +func (cmgr *ConnectionManager) subFromGroupKey(na *wire.NetAddressV2) { groupKey := addrmgr.GroupKey(na) cmgr.mtxOutboundConnIPGroups.Lock() @@ -197,7 +197,7 @@ func (cmgr *ConnectionManager) subFromGroupKey(na *wire.NetAddress) { cmgr.mtxOutboundConnIPGroups.Unlock() } -func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddress, unit time.Duration) (_retryDuration time.Duration) { +func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddressV2, unit time.Duration) (_retryDuration time.Duration) { // No delay if we haven't tried yet or if the number of retries isn't positive. if retryCount <= 0 { return 0 @@ -207,7 +207,7 @@ func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddress, u if persistentAddrForLogging != nil { glog.V(1).Infof("Retrying connection to outbound persistent peer: "+ - "(%s:%d) in (%d) seconds.", persistentAddrForLogging.IP.String(), + "(%s:%d) in (%d) seconds.", persistentAddrForLogging.Addr.String(), persistentAddrForLogging.Port, numSecs) } else { glog.V(2).Infof("Retrying connection to outbound non-persistent peer in (%d) seconds.", numSecs) @@ -215,41 +215,41 @@ func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddress, u return retryDelay } -func (cmgr *ConnectionManager) IsConnectedOutboundIpAddress(netAddr *wire.NetAddress) bool { +func (cmgr *ConnectionManager) IsConnectedOutboundIpAddress(netAddr *wire.NetAddressV2) bool { cmgr.mtxAddrsMaps.RLock() defer cmgr.mtxAddrsMaps.RUnlock() return cmgr.connectedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] } -func (cmgr *ConnectionManager) IsAttemptedOutboundIpAddress(netAddr *wire.NetAddress) bool { +func (cmgr *ConnectionManager) IsAttemptedOutboundIpAddress(netAddr *wire.NetAddressV2) bool { cmgr.mtxAddrsMaps.RLock() defer cmgr.mtxAddrsMaps.RUnlock() return cmgr.attemptedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] } -func (cmgr *ConnectionManager) AddAttemptedOutboundAddrs(netAddr *wire.NetAddress) { +func (cmgr *ConnectionManager) AddAttemptedOutboundAddrs(netAddr *wire.NetAddressV2) { cmgr.mtxAddrsMaps.Lock() defer cmgr.mtxAddrsMaps.Unlock() cmgr.attemptedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] = true } -func (cmgr *ConnectionManager) RemoveAttemptedOutboundAddrs(netAddr *wire.NetAddress) { +func (cmgr *ConnectionManager) RemoveAttemptedOutboundAddrs(netAddr *wire.NetAddressV2) { cmgr.mtxAddrsMaps.Lock() defer cmgr.mtxAddrsMaps.Unlock() delete(cmgr.attemptedOutboundAddrs, addrmgr.NetAddressKey(netAddr)) } // DialPersistentOutboundConnection attempts to connect to a persistent peer. -func (cmgr *ConnectionManager) DialPersistentOutboundConnection(persistentAddr *wire.NetAddress, attemptId uint64) (_attemptId uint64) { +func (cmgr *ConnectionManager) DialPersistentOutboundConnection(persistentAddr *wire.NetAddressV2, attemptId uint64) (_attemptId uint64) { glog.V(2).Infof("ConnectionManager.DialPersistentOutboundConnection: Connecting to peer (IP=%v, Port=%v)", - persistentAddr.IP.String(), persistentAddr.Port) + persistentAddr.Addr.String(), persistentAddr.Port) return cmgr._dialOutboundConnection(persistentAddr, attemptId, true) } // DialOutboundConnection attempts to connect to a non-persistent peer. -func (cmgr *ConnectionManager) DialOutboundConnection(addr *wire.NetAddress, attemptId uint64) { +func (cmgr *ConnectionManager) DialOutboundConnection(addr *wire.NetAddressV2, attemptId uint64) { glog.V(2).Infof("ConnectionManager.ConnectOutboundConnection: Connecting to peer (IP=%v, Port=%v)", - addr.IP.String(), addr.Port) + addr.Addr.String(), addr.Port) cmgr._dialOutboundConnection(addr, attemptId, false) } @@ -266,7 +266,7 @@ func (cmgr *ConnectionManager) CloseAttemptedConnection(attemptId uint64) { // _dialOutboundConnection is the internal method that spawns and initiates an OutboundConnectionAttempt, which handles the // connection attempt logic. It returns the attemptId of the attempt that was created. -func (cmgr *ConnectionManager) _dialOutboundConnection(addr *wire.NetAddress, attemptId uint64, isPersistent bool) (_attemptId uint64) { +func (cmgr *ConnectionManager) _dialOutboundConnection(addr *wire.NetAddressV2, attemptId uint64, isPersistent bool) (_attemptId uint64) { connectionAttempt := NewOutboundConnectionAttempt(attemptId, addr, isPersistent, cmgr.params.DialTimeout, cmgr.outboundConnectionChan) cmgr.mtxConnectionAttempts.Lock() @@ -284,7 +284,7 @@ func (cmgr *ConnectionManager) _dialOutboundConnection(addr *wire.NetAddress, at // is set, then we will connect only to that addr. Otherwise, we will use // the addrmgr to randomly select addrs and create OUTBOUND connections // with them until we find a worthy peer. -func (cmgr *ConnectionManager) ConnectPeer(id uint64, conn net.Conn, na *wire.NetAddress, isOutbound bool, +func (cmgr *ConnectionManager) ConnectPeer(id uint64, conn net.Conn, na *wire.NetAddressV2, isOutbound bool, isPersistent bool) *Peer { // At this point Conn is set so create a peer object to do a version negotiation. @@ -305,7 +305,7 @@ func (cmgr *ConnectionManager) ConnectPeer(id uint64, conn net.Conn, na *wire.Ne return peer } -func (cmgr *ConnectionManager) IsDuplicateInboundIPAddress(netAddr *wire.NetAddress) bool { +func (cmgr *ConnectionManager) IsDuplicateInboundIPAddress(netAddr *wire.NetAddressV2) bool { cmgr.mtxPeerMaps.RLock() defer cmgr.mtxPeerMaps.RUnlock() @@ -316,7 +316,7 @@ func (cmgr *ConnectionManager) IsDuplicateInboundIPAddress(netAddr *wire.NetAddr // If the IP is a localhost IP let it slide. This is useful for testing fake // nodes on a local machine. // TODO: Should this be a flag? - if net.IP([]byte{127, 0, 0, 1}).Equal(netAddr.IP) { + if net.IP([]byte{127, 0, 0, 1}).Equal(netAddr.ToLegacy().IP) { glog.V(1).Infof("ConnectionManager.IsDuplicateInboundIPAddress: Allowing " + "localhost IP address to connect") return false @@ -324,7 +324,7 @@ func (cmgr *ConnectionManager) IsDuplicateInboundIPAddress(netAddr *wire.NetAddr for _, peer := range cmgr.inboundPeers { // If the peer's IP is equal to the passed IP then we have found a duplicate // inbound connection - if peer.netAddr.IP.Equal(netAddr.IP) { + if peer.netAddr.ToLegacy().IP.Equal(netAddr.ToLegacy().IP) { return true } } diff --git a/lib/constants.go b/lib/constants.go index d3f7d2667..0da7ef03f 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1847,3 +1847,6 @@ const DefaultMainnetCheckpointProvider = "https://node.deso.org" const DefaultTestnetCheckpointProvider = "https://test.deso.org" const RoutePathGetCommittedTipBlockInfo = "/api/v0/get-committed-tip-block-info" + +// Constants that was removed from newer version of Btcec +const BtcecPubKeyBytesLenUncompressed = 65 diff --git a/lib/db_utils.go b/lib/db_utils.go index 49a002a95..e85563681 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -21,7 +21,7 @@ import ( "github.com/holiman/uint256" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/davecgh/go-spew/spew" "github.com/dgraph-io/badger/v4" "github.com/golang/glog" @@ -9130,7 +9130,7 @@ func DBGetBalanceEntryForHODLerAndCreatorPKIDsWithTxn(txn *badger.Txn, snap *Sna return &BalanceEntry{ HODLerPKID: hodlerPKID.NewPKID(), CreatorPKID: creatorPKID.NewPKID(), - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } balanceEntryObj := &BalanceEntry{} @@ -9206,7 +9206,7 @@ func DBPutBalanceEntryMappingsWithTxn(txn *badger.Txn, snap *Snapshot, blockHeig // If the balance is zero, then there is no point in storing this entry. // We already placeholder a "zero" balance entry in connect logic. - if balanceEntry.BalanceNanos.Eq(uint256.NewInt()) && !balanceEntry.HasPurchased { + if balanceEntry.BalanceNanos.Eq(uint256.NewInt(0)) && !balanceEntry.HasPurchased { return nil } @@ -9288,7 +9288,7 @@ func DbGetHolderPKIDCreatorPKIDToBalanceEntryWithTxn(txn *badger.Txn, snap *Snap return &BalanceEntry{ HODLerPKID: holder.NewPKID(), CreatorPKID: creator.NewPKID(), - BalanceNanos: *uint256.NewInt(), + BalanceNanos: *uint256.NewInt(0), } } diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index 7b5decf50..cc9713de2 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -250,15 +250,15 @@ func TestPrivateMessages(t *testing.T) { db, _ := GetTestBadgerDb() defer CleanUpBadger(db) - priv1, err := btcec.NewPrivateKey(btcec.S256()) + priv1, err := btcec.NewPrivateKey() require.NoError(err) pk1 := priv1.PubKey().SerializeCompressed() - priv2, err := btcec.NewPrivateKey(btcec.S256()) + priv2, err := btcec.NewPrivateKey() require.NoError(err) pk2 := priv2.PubKey().SerializeCompressed() - priv3, err := btcec.NewPrivateKey(btcec.S256()) + priv3, err := btcec.NewPrivateKey() require.NoError(err) pk3 := priv3.PubKey().SerializeCompressed() @@ -514,15 +514,15 @@ func TestFollows(t *testing.T) { db, _ := GetTestBadgerDb() defer CleanUpBadger(db) - priv1, err := btcec.NewPrivateKey(btcec.S256()) + priv1, err := btcec.NewPrivateKey() require.NoError(err) pk1 := priv1.PubKey().SerializeCompressed() - priv2, err := btcec.NewPrivateKey(btcec.S256()) + priv2, err := btcec.NewPrivateKey() require.NoError(err) pk2 := priv2.PubKey().SerializeCompressed() - priv3, err := btcec.NewPrivateKey(btcec.S256()) + priv3, err := btcec.NewPrivateKey() require.NoError(err) pk3 := priv3.PubKey().SerializeCompressed() diff --git a/lib/deso_math.go b/lib/deso_math.go index b9fe6bcd6..55655dd3f 100644 --- a/lib/deso_math.go +++ b/lib/deso_math.go @@ -5,7 +5,7 @@ import ( "math" "math/big" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/holiman/uint256" ) @@ -236,11 +236,11 @@ func SafeUint256() *_SafeUint256 { } func (safeUint256 *_SafeUint256) Add(x *uint256.Int, y *uint256.Int) (*uint256.Int, error) { - if uint256.NewInt().Sub(MaxUint256, y).Lt(x) { + if uint256.NewInt(0).Sub(MaxUint256, y).Lt(x) { return nil, fmt.Errorf("addition overflows uint256") } - return uint256.NewInt().Add(x, y), nil + return uint256.NewInt(0).Add(x, y), nil } func (safeUint256 *_SafeUint256) Sub(x *uint256.Int, y *uint256.Int) (*uint256.Int, error) { @@ -248,15 +248,15 @@ func (safeUint256 *_SafeUint256) Sub(x *uint256.Int, y *uint256.Int) (*uint256.I return nil, fmt.Errorf("subtraction underflows uint256") } - return uint256.NewInt().Sub(x, y), nil + return uint256.NewInt(0).Sub(x, y), nil } func (safeUint256 *_SafeUint256) Mul(x *uint256.Int, y *uint256.Int) (*uint256.Int, error) { - if uint256.NewInt().Div(MaxUint256, y).Lt(x) { + if uint256.NewInt(0).Div(MaxUint256, y).Lt(x) { return nil, fmt.Errorf("multiplication overflows uint256") } - return uint256.NewInt().Mul(x, y), nil + return uint256.NewInt(0).Mul(x, y), nil } func (safeUint256 *_SafeUint256) Div(x *uint256.Int, y *uint256.Int) (*uint256.Int, error) { @@ -264,7 +264,7 @@ func (safeUint256 *_SafeUint256) Div(x *uint256.Int, y *uint256.Int) (*uint256.I return nil, fmt.Errorf("division by zero") } - return uint256.NewInt().Div(x, y), nil + return uint256.NewInt(0).Div(x, y), nil } // SafeUint64 allows for arithmetic operations that error diff --git a/lib/deso_math_test.go b/lib/deso_math_test.go index 8ccc278f0..58766dda7 100644 --- a/lib/deso_math_test.go +++ b/lib/deso_math_test.go @@ -13,47 +13,47 @@ func TestSafeUint256(t *testing.T) { var err error // Test Add(): sad path - result, err = SafeUint256().Add(uint256.NewInt().SetUint64(1), MaxUint256) + result, err = SafeUint256().Add(uint256.NewInt(1), MaxUint256) require.Nil(result) require.Error(err) require.Contains(err.Error(), "addition overflows uint256") // Test Add(): happy path - result, err = SafeUint256().Add(uint256.NewInt().SetUint64(2), uint256.NewInt().SetUint64(2)) - require.Equal(result, uint256.NewInt().SetUint64(4)) + result, err = SafeUint256().Add(uint256.NewInt(2), uint256.NewInt(2)) + require.Equal(result, uint256.NewInt(4)) require.NoError(err) // Test Sub(): sad path - result, err = SafeUint256().Sub(uint256.NewInt().SetUint64(1), uint256.NewInt().SetUint64(2)) + result, err = SafeUint256().Sub(uint256.NewInt(1), uint256.NewInt(2)) require.Nil(result) require.Error(err) require.Contains(err.Error(), "subtraction underflows uint256") // Test Sub(): happy path - result, err = SafeUint256().Sub(uint256.NewInt().SetUint64(3), uint256.NewInt().SetUint64(2)) - require.Equal(result, uint256.NewInt().SetUint64(1)) + result, err = SafeUint256().Sub(uint256.NewInt(3), uint256.NewInt(2)) + require.Equal(result, uint256.NewInt(1)) require.NoError(err) // Test Mul(): sad path - result, err = SafeUint256().Mul(MaxUint256, uint256.NewInt().SetUint64(2)) + result, err = SafeUint256().Mul(MaxUint256, uint256.NewInt(2)) require.Nil(result) require.Error(err) require.Contains(err.Error(), "multiplication overflows uint256") // Test Mul(): happy path - result, err = SafeUint256().Mul(uint256.NewInt().SetUint64(3), uint256.NewInt().SetUint64(4)) - require.Equal(result, uint256.NewInt().SetUint64(12)) + result, err = SafeUint256().Mul(uint256.NewInt(3), uint256.NewInt(4)) + require.Equal(result, uint256.NewInt(12)) require.NoError(err) // Test Div(): sad path - result, err = SafeUint256().Div(uint256.NewInt().SetUint64(3), uint256.NewInt()) + result, err = SafeUint256().Div(uint256.NewInt(3), uint256.NewInt(0)) require.Nil(result) require.Error(err) require.Contains(err.Error(), "division by zero") // Test Div(): happy path - result, err = SafeUint256().Div(uint256.NewInt().SetUint64(9), uint256.NewInt().SetUint64(3)) - require.Equal(result, uint256.NewInt().SetUint64(3)) + result, err = SafeUint256().Div(uint256.NewInt(9), uint256.NewInt(3)) + require.Equal(result, uint256.NewInt(3)) require.NoError(err) } diff --git a/lib/errors.go b/lib/errors.go index 83692f6c8..51aaf7641 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -5,7 +5,7 @@ import ( "reflect" "strings" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" ) // RuleError is an error type that specifies an error occurred during @@ -620,7 +620,7 @@ func IsByteArrayValidPublicKey(bytes []byte) error { if len(bytes) != btcec.PubKeyBytesLenCompressed { return RuleErrorPubKeyLen } - _, err := btcec.ParsePubKey(bytes, btcec.S256()) + _, err := btcec.ParsePubKey(bytes) if err != nil { return RuleErrorParsePublicKey } diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index df288f385..e240048aa 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -16,11 +16,12 @@ import ( "sync/atomic" "time" - "github.com/btcsuite/btcutil" - "github.com/dgraph-io/badger/v4" + "github.com/btcsuite/btcd/btcutil" "github.com/gernest/mention" - "github.com/btcsuite/btcd/btcec" + "github.com/dgraph-io/badger/v4" + + "github.com/btcsuite/btcd/btcec/v2" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" diff --git a/lib/load_test.go b/lib/load_test.go index 88c54543c..8c8f95f8d 100644 --- a/lib/load_test.go +++ b/lib/load_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -35,7 +35,7 @@ func TestComputeMaxTPS(t *testing.T) { for ii := 0; ii < numProfiles; ii++ { fmt.Println("Processing top txn: ", len(txns)) // Compute a private/public key pair - privKey, err := btcec.NewPrivateKey(btcec.S256()) + privKey, err := btcec.NewPrivateKey() require.NoError(err) privKeys = append(privKeys, privKey) pubKeys = append(pubKeys, privKey.PubKey()) diff --git a/lib/miner.go b/lib/miner.go index 5bc1e45d1..67ed8976a 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -17,7 +17,7 @@ import ( "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/desohash" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/davecgh/go-spew/spew" merkletree "github.com/deso-protocol/go-merkle-tree" "github.com/golang/glog" @@ -47,7 +47,7 @@ func NewDeSoMiner(_minerPublicKeys []string, _numThreads uint32, if err != nil { return nil, errors.Wrapf(err, "NewDeSoMiner: ") } - pkObj, err := btcec.ParsePubKey(pkBytes, btcec.S256()) + pkObj, err := btcec.ParsePubKey(pkBytes) if err != nil { return nil, errors.Wrapf(err, "NewDeSoMiner: ") } diff --git a/lib/network.go b/lib/network.go index f433ab0a0..4ddcb9eb2 100644 --- a/lib/network.go +++ b/lib/network.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "io" "math" "math/big" @@ -19,9 +20,7 @@ import ( "github.com/golang/glog" - "github.com/decred/dcrd/dcrec/secp256k1/v4" - - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" decredEC "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" @@ -2762,7 +2761,7 @@ func (msg *MsgDeSoHeader) String() string { type BlockProducerInfo struct { PublicKey []byte - Signature *btcec.Signature + Signature *ecdsa2.Signature } func (bpi *BlockProducerInfo) Serialize() []byte { @@ -2823,7 +2822,7 @@ func (bpi *BlockProducerInfo) Deserialize(data []byte) error { } ret.Signature = nil if sigLen > 0 { - sig, err := btcec.ParseDERSignature(sigBytes, btcec.S256()) + sig, err := ecdsa2.ParseDERSignature(sigBytes) if err != nil { return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Error parsing signature bytes: ") } @@ -3326,7 +3325,7 @@ const ( // DeSoSignature is a wrapper around ECDSA signatures used primarily in the MsgDeSoTxn transaction type. type DeSoSignature struct { // Sign stores the main ECDSA signature. We use the btcec crypto package for most of the heavy-lifting. - Sign *btcec.Signature + Sign *ecdsa2.Signature // RecoveryId is the public key recovery id. The RecoveryId is taken from the DeSo-DER signature header magic byte and // must be in the [0, 3] range. @@ -3335,7 +3334,7 @@ type DeSoSignature struct { IsRecoverable bool } -func (desoSign *DeSoSignature) SetSignature(sign *btcec.Signature) { +func (desoSign *DeSoSignature) SetSignature(sign *ecdsa2.Signature) { desoSign.Sign = sign } @@ -3354,7 +3353,9 @@ func (desoSign *DeSoSignature) HasHighS() bool { } // We reject high-S signatures as they lead to inconsistent public key recovery // https://github.com/indutny/elliptic/blob/master/lib/elliptic/ec/index.js#L147 - return desoSign.Sign.S.Cmp(big.NewInt(0).Rsh(secp256k1.Params().N, 1)) != -1 + // TODO: this should be removed once we have a proper fix for this. + //return desoSign.Sign.S.Cmp(big.NewInt(0).Rsh(secp256k1.Params().N, 1)) != -1 + return false } // ToBytes encodes the signature in accordance to the DeSo-DER ECDSA format. @@ -3399,7 +3400,7 @@ func (desoSign *DeSoSignature) FromBytes(signatureBytes []byte) error { signatureBytesCopy[0] = derSigMagicOffset } // Parse the signature assuming it's encoded in the standard DER format. - desoSign.Sign, err = btcec.ParseDERSignature(signatureBytesCopy, btcec.S256()) + desoSign.Sign, err = ecdsa2.ParseDERSignature(signatureBytesCopy) if err != nil { return errors.Wrapf(err, "Problem parsing signatureBytes") } @@ -3442,16 +3443,12 @@ func (desoSign *DeSoSignature) _btcecSerializeCompact() ([]byte, error) { // We will change from the btcec signature type to the dcrec signature type. To achieve this, we will create the // ecdsa (R, S) pair using the decred's package. // Reference: https://github.com/decred/dcrd/blob/1eff7/dcrec/secp256k1/modnscalar_test.go#L26 - rBytes := desoSign.Sign.R.Bytes() - r := &secp256k1.ModNScalar{} - r.SetByteSlice(rBytes) + r := desoSign.Sign.R() - sBytes := desoSign.Sign.S.Bytes() - s := &secp256k1.ModNScalar{} - s.SetByteSlice(sBytes) + s := desoSign.Sign.S() // To make sure the signature has been correctly parsed, we verify DER encoding of both signatures matches. - verifySignature := decredEC.NewSignature(r, s) + verifySignature := decredEC.NewSignature(&r, &s) if !bytes.Equal(verifySignature.Serialize(), desoSign.Sign.Serialize()) { return nil, fmt.Errorf("_btcecSerializeCompact: Problem sanity-checking signature") } @@ -3477,7 +3474,7 @@ func (desoSign *DeSoSignature) RecoverPublicKey(messageHash []byte) (*btcec.Publ } // Now recover the public key from the compact encoding. - recoveredPublicKey, _, err := btcec.RecoverCompact(btcec.S256(), signatureBytes, messageHash) + recoveredPublicKey, _, err := ecdsa2.RecoverCompact(signatureBytes, messageHash) if err != nil { return nil, errors.Wrapf(err, "RecoverPublicKey: Problem recovering public key from the signature bytes") } @@ -3492,18 +3489,12 @@ func (desoSign *DeSoSignature) RecoverPublicKey(messageHash []byte) (*btcec.Publ // the first byte. This makes it so that the first byte will be between [0x31, 0x34] inclusive, // instead of being 0x30, which is the standard DER signature magic number. func SignRecoverable(bb []byte, privateKey *btcec.PrivateKey) (*DeSoSignature, error) { - signature, err := privateKey.Sign(bb) - if err != nil { - return nil, err - } + signature := ecdsa2.Sign(privateKey, bb) // We use SignCompact from the btcec library to get the recoverID. This results in a non-standard // encoding that we need to manipulate in order to get the recoveryID back out. See comment on // _btcecSerializeCompact for more information. - signatureCompact, err := btcec.SignCompact(btcec.S256(), privateKey, bb, true) - if err != nil { - return nil, err - } + signatureCompact := ecdsa2.SignCompact(privateKey, bb, true) recoveryId := (signatureCompact[0] - btcecCompactSigMagicOffset) & ^byte(btcecCompactSigCompPubKey) return &DeSoSignature{ @@ -4006,7 +3997,7 @@ func (msg *MsgDeSoTxn) Copy() (*MsgDeSoTxn, error) { return newTxn, nil } -func (msg *MsgDeSoTxn) Sign(privKey *btcec.PrivateKey) (*btcec.Signature, error) { +func (msg *MsgDeSoTxn) Sign(privKey *btcec.PrivateKey) (*ecdsa2.Signature, error) { // Serialize the transaction without the signature portion. txnBytes, err := msg.ToBytes(true /*preSignature*/) if err != nil { @@ -4015,10 +4006,7 @@ func (msg *MsgDeSoTxn) Sign(privKey *btcec.PrivateKey) (*btcec.Signature, error) // Compute a hash of the transaction bytes without the signature // portion and sign it with the passed private key. txnSignatureHash := Sha256DoubleHash(txnBytes) - txnSignature, err := privKey.Sign(txnSignatureHash[:]) - if err != nil { - return nil, err - } + txnSignature := ecdsa2.Sign(privKey, txnSignatureHash[:]) return txnSignature, nil } @@ -8069,7 +8057,7 @@ func (txnData *DAOCoinMetadata) FromBytes(data []byte) error { if err != nil { return fmt.Errorf("DAOCoinMetadata.FromBytes: Error reading coinsToMintBytes: %v", err) } - ret.CoinsToMintNanos = *uint256.NewInt().SetBytes(coinsToMintBytes) + ret.CoinsToMintNanos = *uint256.NewInt(0).SetBytes(coinsToMintBytes) } { @@ -8090,7 +8078,7 @@ func (txnData *DAOCoinMetadata) FromBytes(data []byte) error { if err != nil { return fmt.Errorf("DAOCoinMetadata.FromBytes: Error reading coinsToBurnBytes: %v", err) } - ret.CoinsToBurnNanos = *uint256.NewInt().SetBytes(coinsToBurnBytes) + ret.CoinsToBurnNanos = *uint256.NewInt(0).SetBytes(coinsToBurnBytes) } transferRestrictionStatus, err := rr.ReadByte() @@ -8179,7 +8167,7 @@ func (txnData *DAOCoinTransferMetadata) FromBytes(data []byte) error { if err != nil { return fmt.Errorf("DAOCoinTransferMetadata.FromBytes: Error reading coinsToTransferBytes: %v", err) } - ret.DAOCoinToTransferNanos = *uint256.NewInt().SetBytes(coinsToTransferBytes) + ret.DAOCoinToTransferNanos = *uint256.NewInt(0).SetBytes(coinsToTransferBytes) } // ReceiverPublicKey diff --git a/lib/network_connection.go b/lib/network_connection.go index 4d50d22a8..0c7b1193f 100644 --- a/lib/network_connection.go +++ b/lib/network_connection.go @@ -16,7 +16,7 @@ type outboundConnection struct { terminated bool attemptId uint64 - address *wire.NetAddress + address *wire.NetAddressV2 connection net.Conn isPersistent bool failed bool @@ -76,7 +76,7 @@ type OutboundConnectionAttempt struct { attemptId uint64 // netAddr is the address of the peer we are attempting to connect to. - netAddr *wire.NetAddress + netAddr *wire.NetAddressV2 // isPersistent is used to indicate whether we should retry connecting to the peer if the connection attempt fails. // If isPersistent is true, we will retry connecting to the peer until we are successful. Each time such connection // fails, we will sleep according to exponential backoff. Otherwise, we will only attempt to connect to the peer once. @@ -104,7 +104,7 @@ const ( outboundConnectionAttemptTerminated outboundConnectionAttemptStatus = 2 ) -func NewOutboundConnectionAttempt(attemptId uint64, netAddr *wire.NetAddress, isPersistent bool, +func NewOutboundConnectionAttempt(attemptId uint64, netAddr *wire.NetAddressV2, isPersistent bool, dialTimeout time.Duration, connectionChan chan *outboundConnection) *OutboundConnectionAttempt { return &OutboundConnectionAttempt{ @@ -202,11 +202,11 @@ func (oca *OutboundConnectionAttempt) SetTimeoutUnit(timeoutUnit time.Duration) // Otherwise, it will return nil. func (oca *OutboundConnectionAttempt) attemptOutboundConnection() net.Conn { // If the peer is not persistent, update the addrmgr. - glog.V(1).Infof("Attempting to connect to addr: %v:%v", oca.netAddr.IP.String(), oca.netAddr.Port) + glog.V(1).Infof("Attempting to connect to addr: %v:%v", oca.netAddr.Addr.String(), oca.netAddr.Port) var err error tcpAddr := net.TCPAddr{ - IP: oca.netAddr.IP, + IP: oca.netAddr.ToLegacy().IP, Port: int(oca.netAddr.Port), } conn, err := net.DialTimeout(tcpAddr.Network(), tcpAddr.String(), oca.dialTimeout) diff --git a/lib/network_connection_test.go b/lib/network_connection_test.go index 6b630eb02..224dd5de6 100644 --- a/lib/network_connection_test.go +++ b/lib/network_connection_test.go @@ -14,7 +14,7 @@ import ( type simpleListener struct { t *testing.T ll net.Listener - addr *wire.NetAddress + addr *wire.NetAddressV2 closed bool connectionChan chan Connection @@ -102,9 +102,9 @@ func verifyOutboundConnection(t *testing.T, conn *outboundConnection, sl *simple return } - require.Equal(conn.address.IP.String(), sl.getTCPAddr().IP.String()) + require.Equal(conn.address.ToLegacy().IP.String(), sl.getTCPAddr().IP.String()) require.Equal(conn.address.Port, uint16(sl.getTCPAddr().Port)) - require.Equal(conn.address.IP.String(), sl.getTCPAddr().IP.String()) + require.Equal(conn.address.ToLegacy().IP.String(), sl.getTCPAddr().IP.String()) require.Equal(conn.address.Port, uint16(sl.getTCPAddr().Port)) } diff --git a/lib/network_manager.go b/lib/network_manager.go index 10f340868..5f7b34942 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -417,7 +417,7 @@ func (nm *NetworkManager) processOutboundConnection(conn Connection) (*RemoteNod if oc.failed { return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Failed to connect to peer (%s:%v)", - oc.address.IP.String(), oc.address.Port) + oc.address.Addr.String(), oc.address.Port) } if !oc.isPersistent { @@ -840,7 +840,7 @@ func (nm *NetworkManager) connectNonValidators() { } // getRandomUnconnectedAddress returns a random address from the address manager that we are not already connected to. -func (nm *NetworkManager) getRandomUnconnectedAddress() *wire.NetAddress { +func (nm *NetworkManager) getRandomUnconnectedAddress() *wire.NetAddressV2 { for tries := 0; tries < 100; tries++ { addr := nm.AddrMgr.GetAddress() if addr == nil { @@ -888,7 +888,7 @@ func (nm *NetworkManager) CreateValidatorConnection(ipStr string, publicKey *bls remoteNode := nm.newRemoteNode(publicKey, false) if err := remoteNode.DialOutboundConnection(netAddr); err != nil { return errors.Wrapf(err, "NetworkManager.CreateValidatorConnection: Problem calling DialPersistentOutboundConnection "+ - "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + "for addr: (%s:%v)", netAddr.Addr.String(), netAddr.Port) } nm.setRemoteNode(remoteNode) // Since we're initiating this connection, add the RemoteNode to the outbound validator index. @@ -908,7 +908,7 @@ func (nm *NetworkManager) CreateNonValidatorPersistentOutboundConnection(ipStr s remoteNode := nm.newRemoteNode(nil, true) if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { return 0, errors.Wrapf(err, "NetworkManager.CreateNonValidatorPersistentOutboundConnection: Problem calling DialPersistentOutboundConnection "+ - "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + "for addr: (%s:%v)", netAddr.Addr.String(), netAddr.Port) } nm.setRemoteNode(remoteNode) nm.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) @@ -923,7 +923,7 @@ func (nm *NetworkManager) CreateNonValidatorOutboundConnection(ipStr string) err return nm.createNonValidatorOutboundConnection(netAddr) } -func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.NetAddress) error { +func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.NetAddressV2) error { if netAddr == nil { return fmt.Errorf("NetworkManager.CreateNonValidatorOutboundConnection: netAddr is nil") } @@ -931,7 +931,7 @@ func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.Net remoteNode := nm.newRemoteNode(nil, false) if err := remoteNode.DialOutboundConnection(netAddr); err != nil { return errors.Wrapf(err, "NetworkManager.CreateNonValidatorOutboundConnection: Problem calling DialOutboundConnection "+ - "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) + "for addr: (%s:%v)", netAddr.Addr.String(), netAddr.Port) } nm.setRemoteNode(remoteNode) nm.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) @@ -939,7 +939,7 @@ func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.Net } func (nm *NetworkManager) AttachInboundConnection(conn net.Conn, - na *wire.NetAddress) (*RemoteNode, error) { + na *wire.NetAddressV2) (*RemoteNode, error) { remoteNode := nm.newRemoteNode(nil, false) if err := remoteNode.AttachInboundConnection(conn, na); err != nil { @@ -952,7 +952,7 @@ func (nm *NetworkManager) AttachInboundConnection(conn net.Conn, return remoteNode, nil } -func (nm *NetworkManager) AttachOutboundConnection(conn net.Conn, na *wire.NetAddress, +func (nm *NetworkManager) AttachOutboundConnection(conn net.Conn, na *wire.NetAddressV2, remoteNodeId uint64, isPersistent bool) (*RemoteNode, error) { id := NewRemoteNodeId(remoteNodeId) @@ -1339,7 +1339,7 @@ func (nm *NetworkManager) handleHandshakeCompletePoSMessage(remoteNode *RemoteNo // ## Helper Functions // ########################### -func (nm *NetworkManager) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAddress, error) { +func (nm *NetworkManager) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAddressV2, error) { netAddr, err := IPToNetAddr(ipStr, nm.AddrMgr, nm.params) if err != nil { return nil, errors.Wrapf(err, @@ -1353,7 +1353,7 @@ func (nm *NetworkManager) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAd return netAddr, nil } -func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) (*wire.NetAddress, error) { +func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) (*wire.NetAddressV2, error) { port := params.DefaultSocketPort host, portstr, err := net.SplitHostPort(ipStr) if err != nil { diff --git a/lib/network_test.go b/lib/network_test.go index 2598ab0f1..878bf0c6d 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -3,6 +3,7 @@ package lib import ( "bytes" "encoding/hex" + ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "math/big" "math/rand" "reflect" @@ -16,7 +17,7 @@ import ( "github.com/holiman/uint256" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/wire" "github.com/bxcodec/faker" @@ -434,10 +435,9 @@ func createTestBlockVersion1(t *testing.T) *MsgDeSoBlock { newBlockV1 := *expectedBlock // Add a signature to the block V1 - priv, err := btcec.NewPrivateKey(btcec.S256()) - require.NoError(err) - newBlockV1.BlockProducerInfo.Signature, err = priv.Sign([]byte{0x01, 0x02, 0x03}) + priv, err := btcec.NewPrivateKey() require.NoError(err) + newBlockV1.BlockProducerInfo.Signature = ecdsa2.Sign(priv, []byte{0x01, 0x02, 0x03}) return &newBlockV1 } @@ -1316,7 +1316,7 @@ func TestDAOCoin(t *testing.T) { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, 0x02} txMeta.OperationType = DAOCoinOperationTypeMint - txMeta.CoinsToMintNanos = *uint256.NewInt().SetUint64(100) + txMeta.CoinsToMintNanos = *uint256.NewInt(100) data, err := txMeta.ToBytes(false) require.NoError(err) @@ -1336,7 +1336,7 @@ func TestDAOCoin(t *testing.T) { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, 0x02} txMeta.OperationType = DAOCoinOperationTypeBurn - txMeta.CoinsToBurnNanos = *uint256.NewInt().SetUint64(100) + txMeta.CoinsToBurnNanos = *uint256.NewInt(100) data, err := txMeta.ToBytes(false) require.NoError(err) @@ -1405,7 +1405,7 @@ func TestDAOCoinTransfer(t *testing.T) { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, 0x02} - txMeta.DAOCoinToTransferNanos = *uint256.NewInt().SetUint64(100) + txMeta.DAOCoinToTransferNanos = *uint256.NewInt(100) data, err := txMeta.ToBytes(false) require.NoError(err) @@ -1423,10 +1423,9 @@ func TestMessagingKey(t *testing.T) { m0PrivBytes, _, err := Base58CheckDecode(m0Priv) require.NoError(err) - privKey, pubKey := btcec.PrivKeyFromBytes(btcec.S256(), m0PrivBytes) + privKey, pubKey := btcec.PrivKeyFromBytes(m0PrivBytes) hash := Sha256DoubleHash([]byte{0x00, 0x01}) - signature, err := privKey.Sign(hash[:]) - require.NoError(err) + signature := ecdsa2.Sign(privKey, hash[:]) encrypted, err := EncryptBytesWithPublicKey(hash[:], pubKey.ToECDSA()) require.NoError(err) @@ -1771,7 +1770,7 @@ func TestDeSoSignature_SerializeCompact(t *testing.T) { for ; numTestCases > 0; numTestCases-- { // Generate a random (private, public) keypair. - privateKey, err := btcec.NewPrivateKey(btcec.S256()) + privateKey, err := btcec.NewPrivateKey() require.NoError(err) publicKeyBytes := privateKey.PubKey().SerializeCompressed() @@ -1783,8 +1782,7 @@ func TestDeSoSignature_SerializeCompact(t *testing.T) { require.NoError(err) // Verify that the compact signature is equal to what we serialized. - signatureCompact, err := btcec.SignCompact(btcec.S256(), privateKey, messageHash, true) - require.NoError(err) + signatureCompact := ecdsa2.SignCompact(privateKey, messageHash, true) // Use the DeSoSignature.SerializeCompact encoding. signatureCompactCustom, err := desoSignature._btcecSerializeCompact() @@ -1793,7 +1791,7 @@ func TestDeSoSignature_SerializeCompact(t *testing.T) { require.Equal(true, reflect.DeepEqual(signatureCompact, signatureCompactCustom)) // Recover the public key from our custom encoding. - recoveredPublicKey, _, err := btcec.RecoverCompact(btcec.S256(), signatureCompactCustom, messageHash) + recoveredPublicKey, _, err := ecdsa2.RecoverCompact(signatureCompactCustom, messageHash) require.NoError(err) // Verify that the recovered public key matches the original public key. diff --git a/lib/peer.go b/lib/peer.go index 727c252c9..e28d69e8c 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -72,7 +72,7 @@ type Peer struct { serviceFlags ServiceFlag latestHeight uint64 addrStr string - netAddr *wire.NetAddress + netAddr *wire.NetAddressV2 minTxFeeRateNanosPerKB uint64 // Messages for which we are expecting a reply within a fixed // amount of time. This list is always sorted by ExpectedTime, @@ -625,7 +625,7 @@ func (pp *Peer) StartDeSoMessageProcessor() { } // NewPeer creates a new Peer object. -func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress, +func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddressV2, _isPersistent bool, _stallTimeoutSeconds uint64, _minFeeRateNanosPerKB uint64, params *DeSoParams, @@ -799,12 +799,12 @@ func (pp *Peer) Address() string { return pp.addrStr } -func (pp *Peer) NetAddress() *wire.NetAddress { +func (pp *Peer) NetAddress() *wire.NetAddressV2 { return pp.netAddr } func (pp *Peer) IP() string { - return pp.netAddr.IP.String() + return pp.netAddr.ToLegacy().IP.String() } func (pp *Peer) Port() uint16 { diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 5bee3bc43..b018dbf5c 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -902,21 +902,21 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { validator1Entry := &ValidatorEntry{ ValidatorPKID: m1PKID, VotingPublicKey: m1VotingPrivateKey.PublicKey(), - TotalStakeAmountNanos: uint256.NewInt().SetUint64(3), + TotalStakeAmountNanos: uint256.NewInt(3), } m2PKID := DBGetPKIDEntryForPublicKey(bc.db, nil, m2PkBytes).PKID m2VotingPrivateKey := _generateRandomBLSPrivateKey(t) validator2Entry := &ValidatorEntry{ ValidatorPKID: m2PKID, VotingPublicKey: m2VotingPrivateKey.PublicKey(), - TotalStakeAmountNanos: uint256.NewInt().SetUint64(2), + TotalStakeAmountNanos: uint256.NewInt(2), } m3PKID := DBGetPKIDEntryForPublicKey(bc.db, nil, m3PkBytes).PKID m3VotingPrivateKey := _generateRandomBLSPrivateKey(t) validator3Entry := &ValidatorEntry{ ValidatorPKID: m3PKID, VotingPublicKey: m3VotingPrivateKey.PublicKey(), - TotalStakeAmountNanos: uint256.NewInt().SetUint64(1), + TotalStakeAmountNanos: uint256.NewInt(1), } validatorSet := []*ValidatorEntry{validator1Entry, validator2Entry, validator3Entry} @@ -961,7 +961,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // Malformed validators should fail { // Zero stake amount - validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(0) + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt(0) err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -973,7 +973,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { require.Equal(t, err, RuleErrorInvalidVoteQC) // Reset stake amount - validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(3) + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt(3) // Nil voting public key validatorSet[0].VotingPublicKey = nil err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) @@ -1166,7 +1166,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { { // Invalid validator set tests // Zero stake amount - validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(0) + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt(0) err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) @@ -1178,7 +1178,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { require.Equal(t, err, RuleErrorInvalidTimeoutQC) // Reset stake amount - validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(3) + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt(3) // Nil voting public key validatorSet[0].VotingPublicKey = nil err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) diff --git a/lib/pos_constants.go b/lib/pos_constants.go index 3a19e8caf..d17631062 100644 --- a/lib/pos_constants.go +++ b/lib/pos_constants.go @@ -31,7 +31,7 @@ func BuildProofOfStakeCutoverValidator() (consensus.Validator, error) { } validatorEntry := &ValidatorEntry{ VotingPublicKey: blsPrivateKey.PublicKey(), - TotalStakeAmountNanos: uint256.NewInt().SetUint64(1e9), + TotalStakeAmountNanos: uint256.NewInt(1e9), } return validatorEntry, nil } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 37a3dce5d..4150b4244 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -286,13 +286,13 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Len(t, validatorEntries, 7) require.Equal(t, validatorEntries[0].ValidatorPKID, m6PKID) require.Equal(t, validatorEntries[6].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(700)) - require.Equal(t, validatorEntries[6].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(700)) + require.Equal(t, validatorEntries[6].TotalStakeAmountNanos, uint256.NewInt(100)) // Test SnapshotValidatorSetTotalStakeAmountNanos is populated. snapshotValidatorSetTotalStakeAmountNanos, err := _newUtxoView(testMeta).GetSnapshotValidatorSetTotalStakeAmountNanos() require.NoError(t, err) - require.Equal(t, snapshotValidatorSetTotalStakeAmountNanos, uint256.NewInt().SetUint64(2800)) + require.Equal(t, snapshotValidatorSetTotalStakeAmountNanos, uint256.NewInt(2800)) // Test SnapshotLeaderSchedule is populated. for index := range validatorPKIDs { @@ -308,8 +308,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Len(t, stakeEntries, 7) require.Equal(t, stakeEntries[0].StakerPKID, m6PKID) require.Equal(t, stakeEntries[6].StakerPKID, m0PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(700)) - require.Equal(t, stakeEntries[6].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt(700)) + require.Equal(t, stakeEntries[6].StakeAmountNanos, uint256.NewInt(100)) } { // Test snapshotting changing stake. @@ -344,7 +344,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _sortStakeEntriesByStakeAmount(stakeEntries) require.Len(t, stakeEntries, 7) require.Equal(t, stakeEntries[1].StakerPKID, m5PKID) - require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(600)) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt(600)) // Run OnEpochCompleteHook(). incrViewNumber() @@ -361,7 +361,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _sortStakeEntriesByStakeAmount(stakeEntries) require.Len(t, stakeEntries, 7) require.Equal(t, stakeEntries[0].StakerPKID, m5PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(800)) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt(800)) } { // Test snapshotting changing GlobalParams. @@ -612,11 +612,11 @@ func TestStakingRewardDistribution(t *testing.T) { // Validator m0 has 500 nanos staked in total: 400 staked by itself and 100 delegated by m2. require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(500)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(500)) // Validator m1 has 250 nanos staked in total: 200 staked by itself and 50 delegated by m3. require.Equal(t, validatorEntries[1].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(250)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(250)) } { @@ -626,13 +626,13 @@ func TestStakingRewardDistribution(t *testing.T) { require.Len(t, stakeEntries, 4) require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt(400)) require.Equal(t, stakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) require.Equal(t, stakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt(100)) require.Equal(t, stakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) } { @@ -653,13 +653,13 @@ func TestStakingRewardDistribution(t *testing.T) { require.NoError(t, err) require.Len(t, stakeEntries, 4) require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt(400)) require.Equal(t, stakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) require.Equal(t, stakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt(100)) require.Equal(t, stakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) // Test that DESO wallet balances are unchanged. m0Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m0PkBytes) @@ -681,13 +681,13 @@ func TestStakingRewardDistribution(t *testing.T) { _sortStakeEntriesByStakeAmount(snapshotStakeEntries) require.Len(t, snapshotStakeEntries, 4) require.Equal(t, snapshotStakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt(400)) require.Equal(t, snapshotStakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) require.Equal(t, snapshotStakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt(100)) require.Equal(t, snapshotStakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) } { @@ -719,7 +719,7 @@ func TestStakingRewardDistribution(t *testing.T) { // Final stake amount: // - m0's final stake is: 400 + 42 + 2 = 444 nanos require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(444)) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt(444)) // Test that m0's DESO wallet balance is unchanged. m0Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m0PkBytes) @@ -745,7 +745,7 @@ func TestStakingRewardDistribution(t *testing.T) { // Test that m1's stake is unchanged. require.Equal(t, stakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) // Test reward computation and restaking for m2: // - m2's original stake was 100 nanos @@ -759,7 +759,7 @@ func TestStakingRewardDistribution(t *testing.T) { // Final stake amount: // - m2's final stake is: 100 + 10 - 2 = 108 nanos require.Equal(t, stakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(108)) + require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt(108)) // Test that m2's DESO wallet balance is unchanged. m2Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m2PkBytes) @@ -784,7 +784,7 @@ func TestStakingRewardDistribution(t *testing.T) { // Test that m3's stake is unchanged. require.Equal(t, stakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) // Make sure the validator's total stake amount is correct. // M0's total stake amount nanos should be the sum of M0 and M2's stakes (444 + 108) @@ -805,13 +805,13 @@ func TestStakingRewardDistribution(t *testing.T) { _sortStakeEntriesByStakeAmount(snapshotStakeEntries) require.Len(t, snapshotStakeEntries, 4) require.Equal(t, snapshotStakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt(400)) require.Equal(t, snapshotStakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) require.Equal(t, snapshotStakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) + require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt(100)) require.Equal(t, snapshotStakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) } { @@ -829,13 +829,13 @@ func TestStakingRewardDistribution(t *testing.T) { _sortStakeEntriesByStakeAmount(snapshotStakeEntries) require.Len(t, snapshotStakeEntries, 4) require.Equal(t, snapshotStakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(444)) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt(444)) require.Equal(t, snapshotStakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) + require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) require.Equal(t, snapshotStakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(108)) + require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt(108)) require.Equal(t, snapshotStakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) + require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) } } @@ -921,7 +921,7 @@ func _stakeToValidator( stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(validatorPkBytes), RewardMethod: rewardMethod, - StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), + StakeAmountNanos: uint256.NewInt(stakeAmountNanos), } _, err = _submitStakeTxn(testMeta, stakerPubKey, stakerPrivKey, stakeMetadata, nil, true) require.NoError(testMeta.t, err) diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 618e54bf5..246b206be 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -30,7 +30,7 @@ func (bav *UtxoView) GenerateLeaderSchedule(validatorSet []*ValidatorEntry) ([]* } // Sum TotalStakeAmountNanos. - totalStakeAmountNanos := uint256.NewInt() + totalStakeAmountNanos := uint256.NewInt(0) for _, validatorEntry := range validatorEntries { totalStakeAmountNanos, err = SafeUint256().Add(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) if err != nil { @@ -66,10 +66,10 @@ func (bav *UtxoView) GenerateLeaderSchedule(validatorSet []*ValidatorEntry) ([]* } // Take RandomUint256 % TotalStakeAmountNanos. - randomUint256 := uint256.NewInt().Mod(currentRandomSeedHash.ToUint256(), totalStakeAmountNanos) + randomUint256 := uint256.NewInt(0).Mod(currentRandomSeedHash.ToUint256(), totalStakeAmountNanos) // Keep track of the stake seen so far in this loop. - sumStakeAmountNanos := uint256.NewInt() + sumStakeAmountNanos := uint256.NewInt(0) for _, validatorEntry := range validatorEntries { // Skip if ValidatorEntry has already been added to the leader schedule. diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 10c938444..52ea92d07 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -86,7 +86,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { } stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(pkBytes), - StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), + StakeAmountNanos: uint256.NewInt(stakeAmountNanos), } _, err = _submitStakeTxn(testMeta, publicKey, privateKey, stakeMetadata, nil, true) require.NoError(t, err) diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index d5de7494e..acc97f5b2 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -20,7 +20,7 @@ import ( type RandomSeedHash [32]byte func (randomSeedHash *RandomSeedHash) ToUint256() *uint256.Int { - return uint256.NewInt().SetBytes(randomSeedHash.ToBytes()) + return uint256.NewInt(0).SetBytes(randomSeedHash.ToBytes()) } func (randomSeedHash *RandomSeedHash) Copy() *RandomSeedHash { diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go index 799a10a97..61cb7b25a 100644 --- a/lib/pos_random_seed_test.go +++ b/lib/pos_random_seed_test.go @@ -83,10 +83,10 @@ func TestCurrentRandomSeedHash(t *testing.T) { // Test RandomSeedHash.ToUint256(). Generates a valid uint256. // Idempotent: generates the same uint256 each time. - require.True(t, randomSeedHash1.ToUint256().Cmp(uint256.NewInt()) > 0) + require.True(t, randomSeedHash1.ToUint256().Cmp(uint256.NewInt(0)) > 0) require.True(t, randomSeedHash1.ToUint256().Cmp(MaxUint256) < 0) require.True(t, randomSeedHash1.ToUint256().Eq(randomSeedHash1.ToUint256())) - require.True(t, randomSeedHash2.ToUint256().Cmp(uint256.NewInt()) > 0) + require.True(t, randomSeedHash2.ToUint256().Cmp(uint256.NewInt(0)) > 0) require.True(t, randomSeedHash2.ToUint256().Cmp(MaxUint256) < 0) require.True(t, randomSeedHash2.ToUint256().Eq(randomSeedHash2.ToUint256())) require.False(t, randomSeedHash1.ToUint256().Eq(randomSeedHash2.ToUint256())) diff --git a/lib/pos_server_regtest.go b/lib/pos_server_regtest.go index 2aed5f984..a7e441ab8 100644 --- a/lib/pos_server_regtest.go +++ b/lib/pos_server_regtest.go @@ -68,7 +68,7 @@ func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { stakeTxnMeta := StakeMetadata{ ValidatorPublicKey: NewPublicKey(transactorPubKey), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt().SetUint64(10 * 1e6), + StakeAmountNanos: uint256.NewInt(10 * 1e6), } stakeTxn, _, _, _, err := srv.blockProducer.chain.CreateStakeTxn( diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 2548c12d9..e6052f736 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -908,7 +908,7 @@ func (bav *UtxoView) GetSnapshotValidatorSetTotalStakeAmountNanos() (*uint256.In ) } if globalActiveStakeAmountNanos == nil { - globalActiveStakeAmountNanos = uint256.NewInt() + globalActiveStakeAmountNanos = uint256.NewInt(0) } // Cache the result in the UtxoView. bav._setSnapshotValidatorSetTotalStakeAmountNanos(globalActiveStakeAmountNanos, snapshotAtEpochNumber) diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index 40f2083ce..5ff9e2599 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -222,9 +222,9 @@ func (bav *UtxoView) distributeStakingReward( IsValidatorCommission: isValidatorCommission, }, } - stakeEntry.StakeAmountNanos = uint256.NewInt().Add(stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(rewardNanos)) + stakeEntry.StakeAmountNanos = uint256.NewInt(0).Add(stakeEntry.StakeAmountNanos, uint256.NewInt(rewardNanos)) bav._setStakeEntryMappings(stakeEntry) - validatorEntry.TotalStakeAmountNanos = uint256.NewInt().Add(validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(rewardNanos)) + validatorEntry.TotalStakeAmountNanos = uint256.NewInt(0).Add(validatorEntry.TotalStakeAmountNanos, uint256.NewInt(rewardNanos)) bav._setValidatorEntryMappings(validatorEntry) return utxoOperation, nil } diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index 4b5fb41a0..33a3651ae 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -2,13 +2,14 @@ package lib import ( "bytes" + ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "math" "math/rand" "sort" "testing" "time" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/stretchr/testify/require" ) @@ -376,9 +377,9 @@ func _testGetDefaultGlobalParams() *GlobalParamsEntry { func _testGetRandomMempoolTxns(rand *rand.Rand, feeMin uint64, feeMax uint64, sizeMax uint64, timestampRange uint64, numTxns int) []*MempoolTx { txnPool := []*MempoolTx{} for ii := 0; ii < numTxns; ii++ { - randPriv, _ := btcec.NewPrivateKey(btcec.S256()) + randPriv, _ := btcec.NewPrivateKey() randMsg := RandomBytes(32) - randSig, _ := randPriv.Sign(randMsg) + randSig := ecdsa2.Sign(randPriv, randMsg) fee := rand.Uint64()%(feeMax-feeMin) + feeMin txnPool = append(txnPool, &MempoolTx{ diff --git a/lib/postgres.go b/lib/postgres.go index f3cae7833..cc606705d 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -720,7 +720,7 @@ func (balance *PGCreatorCoinBalance) NewBalanceEntry() *BalanceEntry { HODLerPKID: balance.HolderPKID, CreatorPKID: balance.CreatorPKID, // FIXME: This will break if the value exceeds uint256 - BalanceNanos: *uint256.NewInt().SetUint64(balance.BalanceNanos), + BalanceNanos: *uint256.NewInt(balance.BalanceNanos), HasPurchased: balance.HasPurchased, } } @@ -1093,14 +1093,14 @@ func (messageEntry *PGNewMessageGroupChatThreadEntry) ToAccessGroupId() AccessGr } func HexToUint256(input string) *uint256.Int { - output := uint256.NewInt() + output := uint256.NewInt(0) if input != "" { var err error output, err = uint256.FromHex(input) if err != nil { - output = uint256.NewInt() + output = uint256.NewInt(0) } } @@ -1439,8 +1439,12 @@ func (postgres *Postgres) InsertTransactionsTx(tx *pg.Tx, desoTxns []*MsgDeSoTxn } if txn.Signature.Sign != nil { - transaction.R = BigintToHash(txn.Signature.Sign.R) - transaction.S = BigintToHash(txn.Signature.Sign.S) + r := txn.Signature.Sign.R() + s := txn.Signature.Sign.S() + rBytes := (&r).Bytes() + sBytes := (&s).Bytes() + transaction.R = NewBlockHash(rBytes[:]) + transaction.S = NewBlockHash(sBytes[:]) transaction.RecoveryId = uint32(txn.Signature.RecoveryId) transaction.IsRecoverable = txn.Signature.IsRecoverable } diff --git a/lib/remote_node.go b/lib/remote_node.go index e0c5503ae..086abb41f 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -228,7 +228,7 @@ func (rn *RemoteNode) GetUserAgent() string { return rn.handshakeMetadata.userAgent } -func (rn *RemoteNode) GetNetAddress() *wire.NetAddress { +func (rn *RemoteNode) GetNetAddress() *wire.NetAddressV2 { if !rn.IsHandshakeCompleted() || rn.GetPeer() == nil { return nil } @@ -291,7 +291,7 @@ func (rn *RemoteNode) hasValidatorServiceFlag() bool { } // DialOutboundConnection dials an outbound connection to the provided netAddr. -func (rn *RemoteNode) DialOutboundConnection(netAddr *wire.NetAddress) error { +func (rn *RemoteNode) DialOutboundConnection(netAddr *wire.NetAddressV2) error { rn.mtx.Lock() defer rn.mtx.Unlock() @@ -305,7 +305,7 @@ func (rn *RemoteNode) DialOutboundConnection(netAddr *wire.NetAddress) error { } // DialPersistentOutboundConnection dials a persistent outbound connection to the provided netAddr. -func (rn *RemoteNode) DialPersistentOutboundConnection(netAddr *wire.NetAddress) error { +func (rn *RemoteNode) DialPersistentOutboundConnection(netAddr *wire.NetAddressV2) error { rn.mtx.Lock() defer rn.mtx.Unlock() @@ -319,7 +319,7 @@ func (rn *RemoteNode) DialPersistentOutboundConnection(netAddr *wire.NetAddress) } // AttachInboundConnection creates an inbound peer once a successful inbound connection has been established. -func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddress) error { +func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddressV2) error { rn.mtx.Lock() defer rn.mtx.Unlock() @@ -338,7 +338,7 @@ func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddress } // AttachOutboundConnection creates an outbound peer once a successful outbound connection has been established. -func (rn *RemoteNode) AttachOutboundConnection(conn net.Conn, na *wire.NetAddress, isPersistent bool) error { +func (rn *RemoteNode) AttachOutboundConnection(conn net.Conn, na *wire.NetAddressV2, isPersistent bool) error { rn.mtx.Lock() defer rn.mtx.Unlock() diff --git a/lib/server.go b/lib/server.go index e1fd07f41..1458c0a34 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2646,9 +2646,10 @@ func (srv *Server) _handleAddrMessage(pp *Peer, desoMsg DeSoMessage) { } // Add all the addresses we received to the addrmgr. - netAddrsReceived := []*wire.NetAddress{} + netAddrsReceived := []*wire.NetAddressV2{} for _, addr := range msg.AddrList { - addrAsNetAddr := wire.NewNetAddressIPPort(addr.IP, addr.Port, (wire.ServiceFlag)(addr.Services)) + addrAsNetAddr := wire.NetAddressV2FromBytes( + addr.Timestamp, (wire.ServiceFlag)(addr.Services), addr.IP[:], addr.Port) if !addrmgr.IsRoutable(addrAsNetAddr) { glog.V(1).Infof("Server._handleAddrMessage: Dropping address %v from peer %v because it is not routable", addr, pp) continue @@ -2665,7 +2666,7 @@ func (srv *Server) _handleAddrMessage(pp *Peer, desoMsg DeSoMessage) { "peer %v", len(msg.AddrList), pp) sourceAddr := &SingleAddr{ Timestamp: time.Now(), - IP: pp.netAddr.IP, + IP: pp.netAddr.ToLegacy().IP, Port: pp.netAddr.Port, Services: pp.serviceFlags, } @@ -2712,7 +2713,7 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, desoMsg DeSoMessage) { for _, netAddr := range netAddrsFound { singleAddr := &SingleAddr{ Timestamp: time.Now(), - IP: netAddr.IP, + IP: netAddr.ToLegacy().IP, Port: netAddr.Port, Services: (ServiceFlag)(netAddr.Services), } @@ -2959,12 +2960,12 @@ func (srv *Server) _startAddressRelayer() { bestAddress := srv.AddrMgr.GetBestLocalAddress(netAddr) if bestAddress != nil { glog.V(2).Infof("Server.startAddressRelayer: Relaying address %v to "+ - "RemoteNode (id= %v)", bestAddress.IP.String(), rn.GetId()) + "RemoteNode (id= %v)", bestAddress.Addr.String(), rn.GetId()) addrMsg := &MsgDeSoAddr{ AddrList: []*SingleAddr{ { Timestamp: time.Now(), - IP: bestAddress.IP, + IP: bestAddress.ToLegacy().IP, Port: bestAddress.Port, Services: (ServiceFlag)(bestAddress.Services), }, diff --git a/lib/types.go b/lib/types.go index 5395d360b..1c4649c1e 100644 --- a/lib/types.go +++ b/lib/types.go @@ -298,9 +298,9 @@ func FixedWidthDecodeUint256(rr *bytes.Reader) (*uint256.Int, error) { valBytes := make([]byte, 32, 32) _, err := io.ReadFull(rr, valBytes) if err != nil { - return uint256.NewInt(), fmt.Errorf("ReadUint256: Error reading value bytes: %v", err) + return uint256.NewInt(0), fmt.Errorf("ReadUint256: Error reading value bytes: %v", err) } - return uint256.NewInt().SetBytes(valBytes), nil + return uint256.NewInt(0).SetBytes(valBytes), nil } return nil, nil } diff --git a/lib/utils.go b/lib/utils.go index aed694865..f938d9105 100644 --- a/lib/utils.go +++ b/lib/utils.go @@ -5,15 +5,15 @@ import ( "bytes" "encoding/hex" "fmt" + "github.com/btcsuite/btcd/btcutil/hdkeychain" "math/big" "os" "reflect" "sort" "strings" - "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcutil/hdkeychain" "github.com/pkg/errors" "github.com/unrolled/secure" "golang.org/x/crypto/sha3" @@ -96,23 +96,23 @@ func ComputeKeysFromSeedWithNet(seedBytes []byte, index uint32, isTestnet bool) // m/44'/0'/0'/0/0 also maps to the first // address you'd get if you put the user's seed into most standard // Bitcoin wallets (Mycelium, Electrum, Ledger, iancoleman, etc...). - purpose, err := masterKey.Child(hdkeychain.HardenedKeyStart + 44) + purpose, err := masterKey.Derive(hdkeychain.HardenedKeyStart + 44) if err != nil { return nil, nil, "", fmt.Errorf("ComputeKeyFromSeed: Error encountered generating 'purpose' from seed (%v)", err) } - coinTypeKey, err := purpose.Child(hdkeychain.HardenedKeyStart + 0) + coinTypeKey, err := purpose.Derive(hdkeychain.HardenedKeyStart + 0) if err != nil { return nil, nil, "", fmt.Errorf("ComputeKeyFromSeed: Error encountered generating 'coinType' from seed (%v)", err) } - accountKey, err := coinTypeKey.Child(hdkeychain.HardenedKeyStart + 0) + accountKey, err := coinTypeKey.Derive(hdkeychain.HardenedKeyStart + 0) if err != nil { return nil, nil, "", fmt.Errorf("ComputeKeyFromSeed: Error encountered generating 'accountKey' from seed (%v)", err) } - changeKey, err := accountKey.Child(0) + changeKey, err := accountKey.Derive(0) if err != nil { return nil, nil, "", fmt.Errorf("ComputeKeyFromSeed: Error encountered generating 'changeKey' from seed (%v)", err) } - addressKey, err := changeKey.Child(index) + addressKey, err := changeKey.Derive(index) if err != nil { return nil, nil, "", fmt.Errorf("ComputeKeyFromSeed: Error encountered generating 'addressKey' from seed (%v)", err) } From a89ba61a8fa59b4a7dea0b561263674d348e0dfc Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 11 Apr 2024 17:11:16 -0400 Subject: [PATCH 624/762] Move NetworkManager GoRoutine Refresh Interval to Command Line Config (#1206) --- cmd/config.go | 6 ++++ cmd/node.go | 1 + cmd/run.go | 6 ++++ integration_testing/connection_bridge.go | 17 +++++++--- .../network_manager_routines_test.go | 15 ++++----- lib/constants.go | 17 ++++------ lib/network_manager.go | 31 ++++++++++++++----- lib/server.go | 3 +- 8 files changed, 64 insertions(+), 32 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index 50caaead0..dadcefb67 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -35,6 +35,9 @@ type Config struct { MaxInboundPeers uint32 OneInboundPerIp bool + // NetworkingManager config + PeerConnectionRefreshIntervalMillis uint64 + // Snapshot HyperSync bool ForceChecksum bool @@ -150,6 +153,9 @@ func LoadConfig() *Config { config.MaxInboundPeers = viper.GetUint32("max-inbound-peers") config.OneInboundPerIp = viper.GetBool("one-inbound-per-ip") + // NetworkManager config + config.PeerConnectionRefreshIntervalMillis = viper.GetUint64("peer-connection-refresh-interval-millis") + // Mining + Admin config.MinerPublicKeys = viper.GetStringSlice("miner-public-keys") config.NumMiningThreads = viper.GetUint64("num-mining-threads") diff --git a/cmd/node.go b/cmd/node.go index 4600e5f4a..af8675756 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -263,6 +263,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.MinerPublicKeys, node.Config.NumMiningThreads, node.Config.OneInboundPerIp, + node.Config.PeerConnectionRefreshIntervalMillis, node.Config.HyperSync, node.Config.SyncType, node.Config.MaxSyncBlockHeight, diff --git a/cmd/run.go b/cmd/run.go index 30eab52cf..659901824 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -2,6 +2,7 @@ package cmd import ( "fmt" + "github.com/deso-protocol/core/lib" "github.com/golang/glog" "github.com/spf13/cobra" @@ -144,6 +145,11 @@ func SetupRunFlags(cmd *cobra.Command) { "disable this flag when testing locally to allow multiple inbound connections "+ "from test servers") + cmd.PersistentFlags().Uint64("peer-connection-refresh-interval-millis", 10000, + "The frequency in milliseconds with which the node will refresh its peer connections. This applies to"+ + "both outbound validators and outbound persistent non-validators", + ) + // Listeners cmd.PersistentFlags().Uint64("protocol-port", 0, "When set, determines the port on which this node will listen for protocol-related "+ diff --git a/integration_testing/connection_bridge.go b/integration_testing/connection_bridge.go index b93fabac5..9bfd98678 100644 --- a/integration_testing/connection_bridge.go +++ b/integration_testing/connection_bridge.go @@ -2,15 +2,16 @@ package integration_testing import ( "fmt" - "github.com/btcsuite/btcd/addrmgr" - "github.com/deso-protocol/core/cmd" - "github.com/deso-protocol/core/lib" - "github.com/golang/glog" "math" "net" "strconv" "sync" "time" + + "github.com/btcsuite/btcd/addrmgr" + "github.com/deso-protocol/core/cmd" + "github.com/deso-protocol/core/lib" + "github.com/golang/glog" ) // TODO: DEPRECATE @@ -101,8 +102,14 @@ func (bridge *ConnectionBridge) createInboundConnection(node *cmd.Node) *lib.Pee if err != nil { panic(err) } + + ip, _, err := net.ParseCIDR(netAddress.Addr.String()) + if err != nil { + panic(err) + } + netAddress2 := net.TCPAddr{ - IP: netAddress.IP, + IP: ip, Port: int(netAddress.Port), } // Dial/connect to the node. diff --git a/integration_testing/network_manager_routines_test.go b/integration_testing/network_manager_routines_test.go index b16b1a19f..8bde4fcdf 100644 --- a/integration_testing/network_manager_routines_test.go +++ b/integration_testing/network_manager_routines_test.go @@ -2,6 +2,9 @@ package integration_testing import ( "fmt" + "testing" + "time" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/collections" @@ -9,8 +12,6 @@ import ( "github.com/deso-protocol/core/lib" "github.com/stretchr/testify/require" "github.com/tyler-smith/go-bip39" - "testing" - "time" ) func TestConnectionControllerInitiatePersistentConnections(t *testing.T) { @@ -39,7 +40,7 @@ func TestConnectionControllerInitiatePersistentConnections(t *testing.T) { } node1 = startNode(t, node1) // Let node1 connect to its connect-ips. - time.Sleep(1 * node1.Config.Params.NetworkManagerRefreshDuration) + time.Sleep(10 * time.Second) // Now set the active validators to node4 and node5. activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node4, node5) setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5) @@ -70,7 +71,7 @@ func TestConnectionControllerInitiatePersistentConnections(t *testing.T) { } node6 = startNode(t, node6) // Let node6 connect to its connect-ips. - time.Sleep(1 * node1.Config.Params.NetworkManagerRefreshDuration) + time.Sleep(10 * time.Second) activeValidatorsMap = getActiveValidatorsMapWithValidatorNodes(t, node4, node5, node6) setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6) waitForNonValidatorOutboundConnection(t, node6, node2) @@ -117,7 +118,7 @@ func TestConnectionControllerValidatorCircularConnectIps(t *testing.T) { node1 = startNode(t, node1) node2 = startNode(t, node2) - time.Sleep(1 * node1.Config.Params.NetworkManagerRefreshDuration) + time.Sleep(10 * time.Second) activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node1, node2) setActiveValidators(activeValidatorsMap, node1, node2) // The allCount for node1 and node2 should be 2, as they should have an outbound and an inbound RemoteNode @@ -153,7 +154,7 @@ func TestNetworkManagerPersistentConnectorReconnect(t *testing.T) { } node1 = startNode(t, node1) // Let node1 connect to its connect-ips. - time.Sleep(1 * node1.Config.Params.NetworkManagerRefreshDuration) + time.Sleep(10 * time.Second) activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node3) setActiveValidators(activeValidatorsMap, node1, node2, node3) @@ -224,7 +225,7 @@ func TestConnectionControllerValidatorConnector(t *testing.T) { node6 = startNode(t, node6) node7 = startNode(t, node7) // Let node6 and node7 connect to their connect-ips. - time.Sleep(1 * node1.Config.Params.NetworkManagerRefreshDuration) + time.Sleep(10 * time.Second) activeValidatorsMap := getActiveValidatorsMapWithValidatorNodes(t, node1, node2, node3, node4, node5) setActiveValidators(activeValidatorsMap, node1, node2, node3, node4, node5, node6, node7) diff --git a/lib/constants.go b/lib/constants.go index 0da7ef03f..b44819355 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -596,9 +596,6 @@ type DeSoParams struct { // The amount of time we wait to receive a verack message from a peer. VerackNegotiationTimeout time.Duration - // The amount of time it takes NetworkManager to refresh its routines. - NetworkManagerRefreshDuration time.Duration - // The maximum number of addresses to broadcast to peers. MaxAddressesToBroadcast uint32 @@ -1152,10 +1149,9 @@ var DeSoMainnetParams = DeSoParams{ DefaultSocketPort: uint16(17000), DefaultJSONPort: uint16(17001), - DialTimeout: 30 * time.Second, - VersionNegotiationTimeout: 30 * time.Second, - VerackNegotiationTimeout: 30 * time.Second, - NetworkManagerRefreshDuration: 1 * time.Second, + DialTimeout: 30 * time.Second, + VersionNegotiationTimeout: 30 * time.Second, + VerackNegotiationTimeout: 30 * time.Second, MaxAddressesToBroadcast: 10, @@ -1460,10 +1456,9 @@ var DeSoTestnetParams = DeSoParams{ DefaultSocketPort: uint16(18000), DefaultJSONPort: uint16(18001), - DialTimeout: 30 * time.Second, - VersionNegotiationTimeout: 30 * time.Second, - VerackNegotiationTimeout: 30 * time.Second, - NetworkManagerRefreshDuration: 1 * time.Second, + DialTimeout: 30 * time.Second, + VersionNegotiationTimeout: 30 * time.Second, + VerackNegotiationTimeout: 30 * time.Second, MaxAddressesToBroadcast: 10, diff --git a/lib/network_manager.go b/lib/network_manager.go index 5f7b34942..27017b2c1 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -95,15 +95,29 @@ type NetworkManager struct { // among other things. limitOneInboundRemoteNodePerIP bool + // The frequency at which the NetworkManager goroutines should run. + peerConnectionRefreshIntervalMillis time.Duration + startGroup sync.WaitGroup exitChan chan struct{} exitGroup sync.WaitGroup } -func NewNetworkManager(params *DeSoParams, srv *Server, bc *Blockchain, cmgr *ConnectionManager, - blsKeystore *BLSKeystore, addrMgr *addrmgr.AddrManager, connectIps []string, - targetNonValidatorOutboundRemoteNodes uint32, targetNonValidatorInboundRemoteNodes uint32, - limitOneInboundConnectionPerIP bool, minTxFeeRateNanosPerKB uint64, nodeServices ServiceFlag) *NetworkManager { +func NewNetworkManager( + params *DeSoParams, + srv *Server, + bc *Blockchain, + cmgr *ConnectionManager, + blsKeystore *BLSKeystore, + addrMgr *addrmgr.AddrManager, + connectIps []string, + targetNonValidatorOutboundRemoteNodes uint32, + targetNonValidatorInboundRemoteNodes uint32, + limitOneInboundConnectionPerIP bool, + peerConnectionRefreshIntervalMillis uint64, + minTxFeeRateNanosPerKB uint64, + nodeServices ServiceFlag, +) *NetworkManager { return &NetworkManager{ params: params, @@ -126,6 +140,7 @@ func NewNetworkManager(params *DeSoParams, srv *Server, bc *Blockchain, cmgr *Co targetNonValidatorOutboundRemoteNodes: targetNonValidatorOutboundRemoteNodes, targetNonValidatorInboundRemoteNodes: targetNonValidatorInboundRemoteNodes, limitOneInboundRemoteNodePerIP: limitOneInboundConnectionPerIP, + peerConnectionRefreshIntervalMillis: time.Duration(peerConnectionRefreshIntervalMillis) * time.Millisecond, exitChan: make(chan struct{}), } } @@ -174,7 +189,7 @@ func (nm *NetworkManager) startPersistentConnector() { case <-nm.exitChan: nm.exitGroup.Done() return - case <-time.After(nm.params.NetworkManagerRefreshDuration): + case <-time.After(nm.peerConnectionRefreshIntervalMillis): nm.refreshConnectIps() } } @@ -192,7 +207,7 @@ func (nm *NetworkManager) startValidatorConnector() { case <-nm.exitChan: nm.exitGroup.Done() return - case <-time.After(nm.params.NetworkManagerRefreshDuration): + case <-time.After(nm.peerConnectionRefreshIntervalMillis): nm.logValidatorIndices() nm.refreshValidatorIndices() nm.connectValidators() @@ -212,7 +227,7 @@ func (nm *NetworkManager) startNonValidatorConnector() { case <-nm.exitChan: nm.exitGroup.Done() return - case <-time.After(nm.params.NetworkManagerRefreshDuration): + case <-time.After(nm.peerConnectionRefreshIntervalMillis): nm.refreshNonValidatorOutboundIndex() nm.refreshNonValidatorInboundIndex() nm.connectNonValidators() @@ -230,7 +245,7 @@ func (nm *NetworkManager) startRemoteNodeCleanup() { case <-nm.exitChan: nm.exitGroup.Done() return - case <-time.After(nm.params.NetworkManagerRefreshDuration): + case <-time.After(nm.peerConnectionRefreshIntervalMillis): nm.Cleanup() } } diff --git a/lib/server.go b/lib/server.go index 1458c0a34..77b4852f8 100644 --- a/lib/server.go +++ b/lib/server.go @@ -387,6 +387,7 @@ func NewServer( _minerPublicKeys []string, _numMiningThreads uint64, _limitOneInboundConnectionPerIP bool, + _peerConnectionRefreshIntervalMillis uint64, _hyperSync bool, _syncType NodeSyncType, _maxSyncBlockHeight uint32, @@ -542,7 +543,7 @@ func NewServer( } srv.networkManager = NewNetworkManager(_params, srv, _chain, _cmgr, _blsKeystore, _desoAddrMgr, _connectIps, _targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP, - _minFeeRateNanosPerKB, nodeServices) + _peerConnectionRefreshIntervalMillis, _minFeeRateNanosPerKB, nodeServices) if srv.stateChangeSyncer != nil { srv.stateChangeSyncer.BlockHeight = uint64(_chain.headerTip().Height) From 1aa98a336027b61492568236f1194fda86c3b5f8 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 11 Apr 2024 20:04:23 -0400 Subject: [PATCH 625/762] Remove Validator Index Logging in NetworkManager (#1208) --- lib/network_manager.go | 35 ----------------------------------- 1 file changed, 35 deletions(-) diff --git a/lib/network_manager.go b/lib/network_manager.go index 27017b2c1..b78be864c 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -208,7 +208,6 @@ func (nm *NetworkManager) startValidatorConnector() { nm.exitGroup.Done() return case <-time.After(nm.peerConnectionRefreshIntervalMillis): - nm.logValidatorIndices() nm.refreshValidatorIndices() nm.connectValidators() } @@ -553,40 +552,6 @@ func (nm *NetworkManager) isActiveValidator(pk bls.SerializedPublicKey) bool { return ok } -func (nm *NetworkManager) logValidatorIndices() { - // Log the outbound validators - validatorOutboundMap := nm.GetValidatorOutboundIndex().ToMap() - outboundValidatorString := "" - for pk, rn := range validatorOutboundMap { - addr := "" - if rn.peer != nil { - addr = rn.peer.addrStr - } - outboundValidatorString += fmt.Sprintf(" Id (%v) PublicKey (%v) Domain (%v) Status (%v)\n", rn.id, pk, addr, rn.connectionStatus) - } - glog.V(2).Infof("NetworkManager.logValidatorIndices: Outbound Validators:\n%v", outboundValidatorString) - - // Log the inbound validators - validatorInboundMap := nm.GetValidatorInboundIndex().ToMap() - inboundValidatorString := "" - for pk, rn := range validatorInboundMap { - addr := "" - if rn.peer != nil { - addr = rn.peer.addrStr - } - inboundValidatorString += fmt.Sprintf(" Id (%v) PublicKey (%v) Domain (%v) Status (%v)\n", rn.id, pk, addr, rn.connectionStatus) - } - glog.V(2).Infof("NetworkManager.logValidatorIndices: Inbound Validators:\n%v", inboundValidatorString) - - // Log the active validators - activeValidatorsMap := nm.getActiveValidatorsMap().ToMap() - activeValidatorsString := "" - for pk, validator := range activeValidatorsMap { - activeValidatorsString += fmt.Sprintf(" PublicKey (%v) Domain(%v)\n", pk, string(validator.GetDomains()[0])) - } - glog.V(2).Infof("NetworkManager.logValidatorIndices: Active Validators:\n%v", activeValidatorsString) -} - // refreshValidatorIndices re-indexes validators based on the activeValidatorsMap. It is called periodically by the // validator connector. func (nm *NetworkManager) refreshValidatorIndices() { From 2eacb566d417148472fefb0b3b59aa36ecc750fd Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 11 Apr 2024 20:08:22 -0400 Subject: [PATCH 626/762] Add validateTransactionsReadOnlyLatestBlockView (#1196) --- lib/pos_mempool.go | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 4e00bf487..ae431b91e 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -194,6 +194,10 @@ type PosMempool struct { // PosMempool only needs read-access to the block view. It isn't necessary to copy the block view before passing it // to the mempool. readOnlyLatestBlockView *UtxoView + // validateTransactionsReadOnlyLatestBlockView is the same as the readOnlyLatestBlockView but is exclusively for use + // in the validateTransactions routine. The validateTransactions routine is a routine that validates the top Fee-Time + // ordered transactions in the mempool. + validateTransactionsReadOnlyLatestBlockView *UtxoView // augmentedReadOnlyLatestBlockView is a copy of the latest block view with all the transactions in the mempool applied to // it. This allows the backend to display the current state of the blockchain including the mempool. // The augmentedReadOnlyLatestBlockView is updated every 10 milliseconds to reflect the latest state of the mempool. @@ -294,12 +298,20 @@ func (mp *PosMempool) Init( // Initialize the parametrized fields. mp.params = params mp.globalParams = globalParams - mp.readOnlyLatestBlockView = readOnlyLatestBlockView var err error if readOnlyLatestBlockView != nil { + mp.readOnlyLatestBlockView, err = readOnlyLatestBlockView.CopyUtxoView() + if err != nil { + return errors.Wrapf(err, "PosMempool.Init: Problem copying utxo view to readOnlyLatestBlockView") + } mp.augmentedReadOnlyLatestBlockView, err = readOnlyLatestBlockView.CopyUtxoView() if err != nil { - return errors.Wrapf(err, "PosMempool.Init: Problem copying utxo view") + return errors.Wrapf(err, "PosMempool.Init: Problem copying utxo view to augmentedReadOnlyLatestBlockView") + } + mp.validateTransactionsReadOnlyLatestBlockView, err = readOnlyLatestBlockView.CopyUtxoView() + if err != nil { + return errors.Wrapf(err, + "PosMempool.Init: Problem copying utxo view to validateTransactionsReadOnlyLatestBlockView") } } mp.latestBlockHeight = latestBlockHeight @@ -924,7 +936,7 @@ func (mp *PosMempool) validateTransactions() error { // We copy the reference to the readOnlyLatestBlockView. Since the utxoView is immutable, we don't need to copy the // entire view while we hold the lock. // We hold a read-lock on the mempool to get the transactions and the latest block view. - validationView := mp.readOnlyLatestBlockView + validationView := mp.validateTransactionsReadOnlyLatestBlockView mempoolTxns := mp.getTransactionsNoLock() mp.RUnlock() @@ -1094,7 +1106,17 @@ func (mp *PosMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) return } - mp.readOnlyLatestBlockView = blockView + if blockView != nil { + var err error + mp.readOnlyLatestBlockView, err = blockView.CopyUtxoView() + if err != nil { + glog.Errorf("PosMempool.UpdateLatestBlock: Problem copying utxo view: %v", err) + } + mp.validateTransactionsReadOnlyLatestBlockView, err = blockView.CopyUtxoView() + if err != nil { + glog.Errorf("PosMempool.UpdateLatestBlock: Problem copying utxo view: %v", err) + } + } mp.latestBlockHeight = blockHeight } From 81abe1ccb9f57add03d31cb5801cff593e40f704 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 11 Apr 2024 20:10:43 -0400 Subject: [PATCH 627/762] NewUtxoView never returns an error (#1197) --- lib/block_producer.go | 12 +- lib/block_view.go | 18 +- lib/block_view_access_group_members_test.go | 45 ++--- lib/block_view_access_group_test.go | 12 +- lib/block_view_association_test.go | 9 +- lib/block_view_atomic_txns_test.go | 18 +- lib/block_view_bitcoin_test.go | 53 ++--- lib/block_view_creator_coin_test.go | 52 ++--- lib/block_view_dao_coin_limit_order_test.go | 19 +- lib/block_view_dao_coin_test.go | 6 +- lib/block_view_derived_key_test.go | 189 ++++++------------ lib/block_view_follow_test.go | 15 +- lib/block_view_like_test.go | 15 +- lib/block_view_lockups_test.go | 208 +++++++------------- lib/block_view_message_test.go | 57 ++---- lib/block_view_new_message_test.go | 6 +- lib/block_view_nft_test.go | 24 +-- lib/block_view_post_test.go | 33 ++-- lib/block_view_profile_test.go | 30 +-- lib/block_view_stake.go | 30 +-- lib/block_view_stake_test.go | 16 +- lib/block_view_test.go | 91 ++++----- lib/block_view_types_test.go | 4 +- lib/block_view_validator.go | 30 +-- lib/block_view_validator_test.go | 31 ++- lib/blockchain.go | 78 +++----- lib/blockchain_test.go | 5 +- lib/db_utils.go | 10 +- lib/legacy_mempool.go | 12 +- lib/load_test.go | 3 +- lib/pos_block_producer_test.go | 9 +- lib/pos_blockchain.go | 7 +- lib/pos_blockchain_test.go | 3 +- lib/pos_epoch_complete_hook_test.go | 7 +- lib/pos_epoch_test.go | 3 +- lib/pos_fee_estimator_test.go | 5 +- lib/pos_leader_schedule_test.go | 3 +- lib/pos_mempool_test.go | 17 +- lib/pos_random_seed_test.go | 3 +- lib/pos_snapshot_cache.go | 16 +- lib/state_change_syncer.go | 2 +- lib/txindex.go | 4 +- 42 files changed, 405 insertions(+), 805 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index 19fe7836c..b1bad6b74 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -215,12 +215,8 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) currentBlockSize := uint64(len(blockBytes) + MaxVarintLen64) // Create a new view object. - utxoView, err := NewUtxoView(desoBlockProducer.chain.db, desoBlockProducer.params, + utxoView := NewUtxoView(desoBlockProducer.chain.db, desoBlockProducer.params, desoBlockProducer.postgres, desoBlockProducer.chain.snapshot, nil) - if err != nil { - return nil, nil, nil, errors.Wrapf(err, - "DeSoBlockProducer._getBlockTemplate: Error generating checker UtxoView: ") - } txnsAddedToBlock := make(map[BlockHash]bool) for ii, mempoolTx := range txnsOrderedByTimeAdded { @@ -290,12 +286,8 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) // Compute the total fee the BlockProducer should get. totalFeeNanos := uint64(0) - feesUtxoView, err := NewUtxoView(desoBlockProducer.chain.db, desoBlockProducer.params, + feesUtxoView := NewUtxoView(desoBlockProducer.chain.db, desoBlockProducer.params, desoBlockProducer.postgres, desoBlockProducer.chain.snapshot, nil) - if err != nil { - return nil, nil, nil, fmt.Errorf( - "DeSoBlockProducer._getBlockTemplate: Error generating UtxoView to compute txn fees: %v", err) - } // Parse the public key that should be used for the block reward. blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutput.PublicKey) diff --git a/lib/block_view.go b/lib/block_view.go index f62a581bb..fe55b0373 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -325,10 +325,7 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { } func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { - newView, err := NewUtxoView(bav.Handle, bav.Params, bav.Postgres, bav.Snapshot, bav.EventManager) - if err != nil { - return nil, err - } + newView := NewUtxoView(bav.Handle, bav.Params, bav.Postgres, bav.Snapshot, bav.EventManager) // Copy the UtxoEntry data // Note that using _setUtxoMappings is dangerous because the Pos within @@ -679,11 +676,8 @@ func NewUtxoViewWithSnapshotCache( _snapshot *Snapshot, _eventManager *EventManager, _snapshotCache *SnapshotCache, -) (*UtxoView, error) { - utxoView, err := NewUtxoView(_handle, _params, _postgres, _snapshot, _eventManager) - if err != nil { - return nil, err - } +) *UtxoView { + utxoView := NewUtxoView(_handle, _params, _postgres, _snapshot, _eventManager) if _snapshotCache != nil { allValidatorSetEntries := _snapshotCache.GetAllCachedSnapshotValidatorSetEntries() for snapshotAtEpochNumber, validatorSetEntries := range allValidatorSetEntries { @@ -711,7 +705,7 @@ func NewUtxoViewWithSnapshotCache( utxoView.SnapshotGlobalParamEntries[snapshotAtEpochNumber] = globalParamsEntry.Copy() } } - return utxoView, nil + return utxoView } func NewUtxoView( @@ -720,7 +714,7 @@ func NewUtxoView( _postgres *Postgres, _snapshot *Snapshot, _eventManager *EventManager, -) (*UtxoView, error) { +) *UtxoView { view := UtxoView{ Handle: _handle, @@ -757,7 +751,7 @@ func NewUtxoView( // but we can use it here to initialize the mappings. view._ResetViewMappingsAfterFlush() - return &view, nil + return &view } func (bav *UtxoView) _deleteUtxoMappings(utxoEntry *UtxoEntry) error { diff --git a/lib/block_view_access_group_members_test.go b/lib/block_view_access_group_members_test.go index 4c237effb..ec8799230 100644 --- a/lib/block_view_access_group_members_test.go +++ b/lib/block_view_access_group_members_test.go @@ -173,13 +173,11 @@ func TestAccessGroupMembersAdd(t *testing.T) { // Place the above transactions into a block. tvv1 := []*transactionTestVector{tv1, tv2, tv3, tv4, tv4p5} blockConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName1), []*PublicKey{m0PublicKey, m4PublicKey}) } blockDisconnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName1), []*PublicKey{}) } tvb1 := NewTransactionTestVectorBlock(tvv1, blockConnectCallback, blockDisconnectCallback) @@ -330,16 +328,14 @@ func TestAccessGroupMembersAdd(t *testing.T) { // Mine all the above transactions into a new block. tvv2 := []*transactionTestVector{tv5, tv6, tv7, tv8, tv9, tv10, tv11, tv12, tv13, tv14, tv15, tv16, tv17, tv18, tv19} block2ConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName1), []*PublicKey{m0PublicKey, m1PublicKey, m4PublicKey}) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName2), []*PublicKey{m0PublicKey}) _verifyMembersList(tm, utxoView, m1PublicKey, NewGroupKeyName(groupName3), []*PublicKey{}) _verifyMembersList(tm, utxoView, m2PublicKey, NewGroupKeyName(groupName4), []*PublicKey{m0PublicKey, m1PublicKey, m2PublicKey}) } block2DisconnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName1), []*PublicKey{m0PublicKey, m4PublicKey}) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName2), []*PublicKey{}) _verifyMembersList(tm, utxoView, m1PublicKey, NewGroupKeyName(groupName3), []*PublicKey{}) @@ -382,16 +378,14 @@ func TestAccessGroupMembersAdd(t *testing.T) { // Mine all the above transactions into a new block. block3ConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName1), []*PublicKey{m0PublicKey, m1PublicKey, m4PublicKey}) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName2), []*PublicKey{m0PublicKey}) _verifyMembersList(tm, utxoView, m1PublicKey, NewGroupKeyName(groupName3), []*PublicKey{}) _verifyMembersList(tm, utxoView, m2PublicKey, NewGroupKeyName(groupName4), totalMembers) } block3DisconnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName1), []*PublicKey{m0PublicKey, m1PublicKey, m4PublicKey}) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName2), []*PublicKey{m0PublicKey}) _verifyMembersList(tm, utxoView, m1PublicKey, NewGroupKeyName(groupName3), []*PublicKey{}) @@ -655,13 +649,11 @@ func TestAccessGroupMembersRemove(t *testing.T) { // Mine all above transactions into a block. tvv1 := []*transactionTestVector{tv1, tv2, tv3, tv3p5, tv4, tv5, tv6, tv7, tv8, tv9, tv10, tv11} tvb1ConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) tv11.connectCallback(tv11, tm, utxoView) } tvb1DisconnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName1), []*PublicKey{}) _verifyMembersList(tm, utxoView, m1PublicKey, NewGroupKeyName(groupName2), []*PublicKey{}) } @@ -808,8 +800,7 @@ func TestAccessGroupMembersRemove(t *testing.T) { // Mine all above transactions into a block. tvv2 := []*transactionTestVector{tv12, tv13, tv14, tv15} tvb2ConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) tv15.connectCallback(tv15, tm, utxoView) } tvb2DisconnectCallback := tvb1ConnectCallback @@ -868,8 +859,7 @@ func TestAccessGroupMembersRemove(t *testing.T) { // Mine the above transactions into a block. tvv3 := []*transactionTestVector{tv16, tv17} tvb3ConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) tv17.connectCallback(tv17, tm, utxoView) } tvb3DisconnectCallback := tvb2ConnectCallback @@ -1044,13 +1034,11 @@ func TestAccessGroupMembersUpdate(t *testing.T) { // Mine the above transactions into a block. tvv1 := []*transactionTestVector{tv1, tv2, tv3, tv4, tv5, tv6, tv7, tv8} tvb1ConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) tv8.connectCallback(tv8, tm, utxoView) } tvb1DisconnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyMembersList(tm, utxoView, m2PublicKey, NewGroupKeyName(groupName1), []*PublicKey{}) } tvb1 := NewTransactionTestVectorBlock(tvv1, tvb1ConnectCallback, tvb1DisconnectCallback) @@ -1264,8 +1252,7 @@ func TestAccessGroupMembersUpdate(t *testing.T) { tv17.disconnectCallback = tv16.connectCallback tvv2 := []*transactionTestVector{tv9, tv10, tv11, tv12, tv13, tv14, tv15, tv16, tv17} tvb2ConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) tv17.connectCallback(tv17, tm, utxoView) _verifyMembersList(tm, utxoView, m0PublicKey, NewGroupKeyName(groupName4), []*PublicKey{}) _verifyMembersList(tm, utxoView, m1PublicKey, NewGroupKeyName(groupName3), []*PublicKey{}) @@ -1662,8 +1649,7 @@ func TestAccessGroupMembersTxnWithDerivedKey(t *testing.T) { accessGroupMemberLimitKey *AccessGroupMemberLimitKey, count int, ) string { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit @@ -1759,8 +1745,7 @@ func TestAccessGroupMembersTxnWithDerivedKey(t *testing.T) { derivedKeyPrivBase58Check string, ) error { // Get UTXO view. - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) // Sign txn. _signTxnWithDerivedKey(t, txn, derivedKeyPrivBase58Check) // Connect txn. diff --git a/lib/block_view_access_group_test.go b/lib/block_view_access_group_test.go index 24912adf0..f6a1ffaa4 100644 --- a/lib/block_view_access_group_test.go +++ b/lib/block_view_access_group_test.go @@ -241,15 +241,13 @@ func TestAccessGroup(t *testing.T) { tv13, tv14, tv15, tv16, tv17, tv18, tv19, tv20} tvbConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) tv15.connectCallback(tv15, tm, utxoView) } tvbDisconnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { // Reset the ForkHeight for access groups tm.params.ForkHeights.AssociationsAndAccessGroupsBlockHeight = uint32(1000) - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyGroupIdsForUser(t, m0PubBytes, utxoView, []*AccessGroupId{groupM0B}, []*AccessGroupId{}) _verifyGroupIdsForUser(t, m1PubBytes, utxoView, []*AccessGroupId{groupM1B}, []*AccessGroupId{}) _verifyGroupIdsForUser(t, m2PubBytes, utxoView, []*AccessGroupId{groupM2B}, []*AccessGroupId{}) @@ -528,8 +526,7 @@ func TestAccessGroupTxnWithDerivedKey(t *testing.T) { // Helper funcs _submitAuthorizeDerivedKeyTxn := func(accessGroupLimitKey AccessGroupLimitKey, count int) string { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) fillerAccessGroupLimitKey := AccessGroupLimitKey{ AccessGroupOwnerPublicKey: accessGroupLimitKey.AccessGroupOwnerPublicKey, @@ -592,8 +589,7 @@ func TestAccessGroupTxnWithDerivedKey(t *testing.T) { operationType AccessGroupOperationType, derivedKeyPrivBase58Check string, ) error { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) // Construct txn. var txn *MsgDeSoTxn diff --git a/lib/block_view_association_test.go b/lib/block_view_association_test.go index 90f0198bc..a1e278edb 100644 --- a/lib/block_view_association_test.go +++ b/lib/block_view_association_test.go @@ -2259,8 +2259,7 @@ func _testAssociationsWithDerivedKey(t *testing.T) { // Helper funcs _submitAuthorizeDerivedKeyTxn := func(txnType TxnType, associationLimitKey AssociationLimitKey, count int) (string, error) { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) txnSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit @@ -2307,8 +2306,7 @@ func _testAssociationsWithDerivedKey(t *testing.T) { _submitAssociationTxnWithDerivedKey := func( transactorPkBytes []byte, derivedKeyPrivBase58Check string, inputTxn MsgDeSoTxn, ) error { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) var txn *MsgDeSoTxn switch inputTxn.TxnMeta.GetTxnType() { @@ -2505,8 +2503,7 @@ func _testAssociationsWithDerivedKey(t *testing.T) { AssociationType: []byte("ENDORSEMENT"), AssociationValue: []byte("Python"), } - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) userAssociationEntries, err := utxoView.GetUserAssociationsByAttributes(userAssociationQuery) require.NoError(t, err) require.Len(t, userAssociationEntries, 1) diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 59510f84a..f4daee10b 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -40,9 +40,8 @@ func TestAtomicTxnsWrapperTxIndexMetadataEncoder(t *testing.T) { } // Compute the txindex metadata for the transaction and ensure its computed correctly. - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) blockHeight := testMeta.chain.BlockTip().Height + 1 atomicTxnMetadata := ComputeTransactionMetadata( atomicTxnsWrapper, @@ -77,9 +76,8 @@ func TestAtomicTxnsWrapperAtomicity(t *testing.T) { atomicTxns, signerPrivKeysBase58 := _generateUnsignedDependentAtomicTransactions(testMeta, int(100)) // Construct a UtxoView and block height for getting balances. - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) blockHeight := testMeta.chain.BlockTip().Height + 1 // Fetch all starting balances for affected public keys. @@ -124,9 +122,8 @@ func TestAtomicTxnsWrapperAtomicity(t *testing.T) { // Because the transaction fails, we must construct a new UtxoView as there's // invalid data in the previous view. - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) // Check that the balances are not updated. // This ensures that if a single transaction within the atomic transaction @@ -419,9 +416,8 @@ func TestDependentAtomicTransactionGeneration(t *testing.T) { atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, 100) // Construct a new view to connect the transactions to. - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) blockHeight := testMeta.chain.BlockTip().Height + 1 // Get the initial balance for m0. @@ -591,9 +587,8 @@ func _generateUnsignedMaxBasicTransfer( } // Construct a UtxoView for fetching state. - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(testMeta.t, err) // Construct a transfer template. // NOTE: Because of variable encoding of txn.TxOutputs[0].AmountNanos, we use MaxUint64 to ensure @@ -708,8 +703,7 @@ func _atomicTransactionsWrapperWithConnectTimestamp( _ = require // Construct a new view to connect the transactions to. - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) // Connect the transaction. txHash := atomicTransactionsWrapper.Hash() diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index 9a4f64f0f..99352cf84 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -194,8 +194,7 @@ func _updateUSDCentsPerBitcoinExchangeRate(t *testing.T, chain *Blockchain, db * // Sign the transaction now that its inputs are set up. _signTxn(t, txn, updaterPrivBase58Check) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txHash := txn.Hash() // Always use height+1 for validation since it's assumed the transaction will @@ -456,8 +455,7 @@ func TestBitcoinExchange(t *testing.T) { // in the middle. utxoOpsList := [][]*UtxoOperation{} { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) // Add a placeholder where the rate update is going to be fff := append([]*MsgDeSoTxn{}, bitcoinExchangeTxns[:rateUpdateIndex]...) @@ -602,8 +600,7 @@ func TestBitcoinExchange(t *testing.T) { { // Rolling back all the transactions should work. - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) for ii := range bitcoinExchangeTxns { index := len(bitcoinExchangeTxns) - 1 - ii burnTxn := bitcoinExchangeTxns[index] @@ -638,8 +635,7 @@ func TestBitcoinExchange(t *testing.T) { // flushing should be fine. utxoOpsList = [][]*UtxoOperation{} { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) for ii, burnTxn := range bitcoinExchangeTxns { blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := @@ -836,8 +832,7 @@ func TestBitcoinExchange(t *testing.T) { // Roll back the blocks and make sure we don't hit any errors. { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) { // Fetch the utxo operations for the block we're detaching. We need these @@ -1159,20 +1154,20 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { // Verify that adding the transaction to the UtxoView fails because there is // not enough work on the burn block yet. { - utxoView, _ := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 utxoView.ConnectTransaction(burnTxn1, txHash1, blockHeight, 0, true, false) } { - utxoView, _ := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 utxoView.ConnectTransaction(burnTxn1, txHash1, blockHeight, 0, true, false) } // The transaction should pass now { - utxoView, _ := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := utxoView.ConnectTransaction(burnTxn1, txHash1, blockHeight, 0, true, false) @@ -1188,8 +1183,7 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { // in the middle. utxoOpsList := [][]*UtxoOperation{} { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) // Add a placeholder where the rate update is going to be fff := append([]*MsgDeSoTxn{}, bitcoinExchangeTxns[:rateUpdateIndex]...) @@ -1332,8 +1326,7 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { { // Rolling back all the transactions should work. - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) for ii := range bitcoinExchangeTxns { index := len(bitcoinExchangeTxns) - 1 - ii burnTxn := bitcoinExchangeTxns[index] @@ -1368,8 +1361,7 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { // flushing should be fine. utxoOpsList = [][]*UtxoOperation{} { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) for ii, burnTxn := range bitcoinExchangeTxns { blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := @@ -1565,8 +1557,7 @@ func TestBitcoinExchangeGlobalParams(t *testing.T) { // Roll back the blocks and make sure we don't hit any errors. { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) { // Fetch the utxo operations for the block we're detaching. We need these @@ -2050,7 +2041,7 @@ func TestSpendOffOfUnminedTxnsBitcoinExchange(t *testing.T) { // The transaction should pass now { - utxoView, _ := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 _, _, _, _, err := @@ -2141,8 +2132,7 @@ func TestSpendOffOfUnminedTxnsBitcoinExchange(t *testing.T) { // the mempool's disconnect function to make sure we get the txns // back during a reorg. { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) { // Fetch the utxo operations for the block we're detaching. We need these @@ -2485,8 +2475,7 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { // in the middle. utxoOpsList := [][]*UtxoOperation{} { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) // Add a placeholder where the rate update is going to be fff := append([]*MsgDeSoTxn{}, bitcoinExchangeTxns[:rateUpdateIndex]...) @@ -2629,8 +2618,7 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { { // Rolling back all the transactions should work. - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) for ii := range bitcoinExchangeTxns { index := len(bitcoinExchangeTxns) - 1 - ii burnTxn := bitcoinExchangeTxns[index] @@ -2665,8 +2653,7 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { // flushing should be fine. utxoOpsList = [][]*UtxoOperation{} { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) for ii, burnTxn := range bitcoinExchangeTxns { blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := @@ -2842,8 +2829,7 @@ func TestBitcoinExchangeWithAmountNanosNonZeroAtGenesis(t *testing.T) { // Roll back the blocks and make sure we don't hit any errors. { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, chain.eventManager) { // Fetch the utxo operations for the block we're detaching. We need these @@ -2971,8 +2957,7 @@ func TestUpdateExchangeRate(t *testing.T) { newUSDCentsPerBitcoin) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := utxoView.ConnectTransaction(updateExchangeRateTxn, updateExchangeRateTxn.Hash(), blockHeight, 0, true, false) diff --git a/lib/block_view_creator_coin_test.go b/lib/block_view_creator_coin_test.go index 844e23921..4a32188f3 100644 --- a/lib/block_view_creator_coin_test.go +++ b/lib/block_view_creator_coin_test.go @@ -235,9 +235,7 @@ func _helpTestCreatorCoinBuySell( // If no UtxoView is passed, use a new one to run our checks. if utxoView == nil { - var err error - utxoView, err = NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView = NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) } // Profile fields @@ -573,8 +571,7 @@ func _helpTestCreatorCoinBuySell( _checkTestData(testData, fmt.Sprintf("SimpleDisconnect: Index: %v", testIndex), nil, nil) // Disconnect the transaction - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 fmt.Printf("Disconnecting test index: %v\n", testIndex) require.NoError(utxoView.DisconnectTransaction( @@ -602,8 +599,7 @@ func _helpTestCreatorCoinBuySell( // Connect all the txns to a single UtxoView without flushing { // Create a new UtxoView to check on the state of things - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) for testIndex, testData := range creatorCoinTests { fmt.Printf("Applying test index: %v\n", testIndex) txn := testTxns[testIndex] @@ -626,8 +622,7 @@ func _helpTestCreatorCoinBuySell( // Disconnect all the txns on a single view and flush at the end { // Create a new UtxoView to check on the state of things - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 for iterIndex := range creatorCoinTests { testIndex := len(creatorCoinTests) - 1 - iterIndex @@ -763,8 +758,7 @@ func _helpTestCreatorCoinBuySell( require.NoError(utxoView.DisconnectBlock(blockToDisconnect, txHashes, utxoOps, 0)) } { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) // This fails because public key is not defined in one of the creator coin sell transactions disconnectSingleBlock(finalBlock2, utxoView) @@ -1118,8 +1112,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { receiverPkBytes, _, err := Base58CheckDecode(m1Pub) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txn, _, _, _, err := chain.CreateCreatorCoinTransferTxnWithDiamonds( senderPkBytes, @@ -1152,8 +1145,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { receiverPkBytes, _, err := Base58CheckDecode(m1Pub) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txn, _, _, _, err := chain.CreateCreatorCoinTransferTxnWithDiamonds( senderPkBytes, @@ -1185,8 +1177,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { receiverPkBytes, _, err := Base58CheckDecode(m1Pub) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txn, _, _, _, err := chain.CreateCreatorCoinTransferTxnWithDiamonds( senderPkBytes, @@ -1218,8 +1209,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { receiverPkBytes, _, err := Base58CheckDecode(m1Pub) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txn, _, _, _, err := chain.CreateCreatorCoinTransferTxnWithDiamonds( senderPkBytes, @@ -1251,8 +1241,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { receiverPkBytes, _, err := Base58CheckDecode(m1Pub) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txn, _, _, _, err := chain.CreateCreatorCoinTransferTxnWithDiamonds( senderPkBytes, @@ -1284,8 +1273,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { receiverPkBytes, _, err := Base58CheckDecode(m1Pub) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txn, _, _, _, err := chain.CreateCreatorCoinTransferTxnWithDiamonds( senderPkBytes, @@ -1318,8 +1306,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { receiverPkBytes, _, err := Base58CheckDecode(m1Pub) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txn, _, _, _, err := chain.CreateCreatorCoinTransferTxnWithDiamonds( receiverPkBytes, @@ -1345,8 +1332,7 @@ func TestCreatorCoinWithDiamondsFailureCases(t *testing.T) { } // You can't apply the same number of Diamonds to a post twice { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) // Let's have a successful transaction { @@ -1515,8 +1501,7 @@ func TestCreatorCoinDiamondAfterDeSoDiamondsBlockHeight(t *testing.T) { receiverPkBytes, _, err := Base58CheckDecode(m1Pub) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) // Attempt to give two diamonds. txn, _, _, _, err := chain.CreateCreatorCoinTransferTxnWithDiamonds( @@ -4316,8 +4301,7 @@ func _creatorCoinTxn(t *testing.T, chain *Blockchain, db *badger.DB, profilePkBytes, _, err := Base58CheckDecode(ProfilePublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateCreatorCoinTxn( updaterPkBytes, @@ -4433,8 +4417,7 @@ func _doCreatorCoinTransferTxnWithDiamonds(t *testing.T, chain *Blockchain, db * receiverPkBytes, _, err := Base58CheckDecode(ReceiverPublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txn, totalInputMake, _, _, err := chain.CreateCreatorCoinTransferTxnWithDiamonds( senderPkBytes, @@ -4504,8 +4487,7 @@ func _doCreatorCoinTransferTxn(t *testing.T, chain *Blockchain, db *badger.DB, receiverPkBytes, _, err := Base58CheckDecode(ReceiverPublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, chain.eventManager) txn, totalInputMake, _, _, err := chain.CreateCreatorCoinTransferTxn( updaterPkBytes, diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index 1749aa525..1f74a5c1d 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -38,12 +38,11 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { params.ForkHeights.OrderBookDBFetchOptimizationBlockHeight = uint32(0) params.BlockRewardMaturity = time.Second - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) dbAdapter := utxoView.GetDbAdapter() // Mine a few blocks to give the senderPkString some money. - _, err = miner.MineAndProcessSingleBlock(0, mempool) + _, err := miner.MineAndProcessSingleBlock(0, mempool) require.NoError(err) _, err = miner.MineAndProcessSingleBlock(0, mempool) require.NoError(err) @@ -629,12 +628,11 @@ func TestDAOCoinLimitOrder(t *testing.T) { params.ForkHeights.OrderBookDBFetchOptimizationBlockHeight = uint32(0) params.BlockRewardMaturity = time.Second - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) dbAdapter := utxoView.GetDbAdapter() // Mine a few blocks to give the senderPkString some money. - _, err = miner.MineAndProcessSingleBlock(0, mempool) + _, err := miner.MineAndProcessSingleBlock(0, mempool) require.NoError(err) _, err = miner.MineAndProcessSingleBlock(0, mempool) require.NoError(err) @@ -2462,8 +2460,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.NotEmpty(utxoEntriesM0) // Unspent UTXOs exist for m0. // Spend m0's existing UTXO. - tempUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + tempUtxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOp, err := tempUtxoView._spendUtxo(utxoEntriesM0[0].UtxoKey) require.NoError(err) err = tempUtxoView.FlushToDb(0) @@ -4088,8 +4085,7 @@ func _connectDAOCoinLimitOrderTxn( require := require.New(testMeta.t) testMeta.expectedSenderBalances = append( testMeta.expectedSenderBalances, _getBalance(testMeta.t, testMeta.chain, nil, publicKey)) - currentUtxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) - require.NoError(err) + currentUtxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) // Sign the transaction now that its inputs are set up. _signTxn(testMeta.t, txn, privateKey) // Always use savedHeight (blockHeight+1) for validation since it's @@ -4142,8 +4138,7 @@ func _doDAOCoinLimitOrderTxn(t *testing.T, chain *Blockchain, db *badger.DB, updaterPkBytes, _, err := Base58CheckDecode(TransactorPublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateDAOCoinLimitOrderTxn( updaterPkBytes, diff --git a/lib/block_view_dao_coin_test.go b/lib/block_view_dao_coin_test.go index 4dd6d5b30..caa148f4e 100644 --- a/lib/block_view_dao_coin_test.go +++ b/lib/block_view_dao_coin_test.go @@ -27,8 +27,7 @@ func _daoCoinTxn(t *testing.T, chain *Blockchain, db *badger.DB, profilePkBytes := metadata.ProfilePublicKey assert.Len(profilePkBytes, btcec.PubKeyBytesLenCompressed) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateDAOCoinTxn( updaterPkBytes, @@ -108,8 +107,7 @@ func _daoCoinTransferTxn(t *testing.T, chain *Blockchain, db *badger.DB, updaterPkBytes, _, err := Base58CheckDecode(TransactorPublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateDAOCoinTransferTxn( updaterPkBytes, diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index 670567a4a..17821e7f3 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -87,8 +87,7 @@ func _derivedKeyBasicTransfer(t *testing.T, db *badger.DB, chain *Blockchain, pa require.NoError(err) } if utxoView == nil { - utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView = NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) } txHash := txn.Hash() @@ -174,8 +173,7 @@ func _doTxnWithBlockHeight( transactorPublicKey, _, err := Base58CheckDecode(TransactorPublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) chain := testMeta.chain var txn *MsgDeSoTxn @@ -933,8 +931,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Just for the sake of consistency, we run the _derivedKeyBasicTransfer on unauthorized // derived key. It should fail since blockchain hasn't seen this key yet. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -946,8 +943,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Attempt sending an AuthorizeDerivedKey txn signed with an invalid private key. // This must fail because the txn has to be signed either by owner or derived key. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) @@ -973,8 +969,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Attempt sending an AuthorizeDerivedKey txn where access signature is signed with // an invalid private key. This must fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) expirationBlockByte := UintToBuf(authTxnMeta.ExpirationBlock) @@ -1003,8 +998,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Check basic transfer signed with still unauthorized derived key. // Should fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1016,8 +1010,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Now attempt to send the same transaction but signed with the correct derived key. // This must pass. The new derived key will be flushed to the db here. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) extraData := map[string][]byte{ "test": []byte("result"), @@ -1052,8 +1045,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Check basic transfer signed by the owner key. // Should succeed. Flush to db. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, senderPrivString, utxoView, nil, true) require.NoError(err) @@ -1068,8 +1060,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Check basic transfer signed with now authorized derived key. // Should succeed. Flush to db. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.NoError(err) @@ -1088,8 +1079,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, randomPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1108,8 +1098,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { fmt.Println("currentTxn.String()", currentTxn.String()) // Disconnect the transaction - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 fmt.Printf("Disconnecting test index: %v\n", testIndex) require.NoError(utxoView.DisconnectTransaction( @@ -1126,8 +1115,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // After disconnecting, check basic transfer signed with unauthorized derived key. // Should fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1139,8 +1127,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Connect all txns to a single UtxoView flushing only at the end. { // Create a new UtxoView - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) for testIndex, txn := range testTxns { fmt.Printf("Applying test index: %v\n", testIndex) blockHeight := chain.blockTip().Height + 1 @@ -1164,8 +1151,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, randomPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1177,8 +1163,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Disconnect all txns on a single UtxoView flushing only at the end { // Create a new UtxoView - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) for iterIndex := range testTxns { testIndex := len(testTxns) - 1 - iterIndex blockHeight := chain.blockTip().Height + 1 @@ -1278,8 +1263,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Check basic transfer signed by the owner key. // Should succeed. Flush to db. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, senderPrivString, utxoView, nil, true) require.NoError(err) @@ -1294,8 +1278,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Check basic transfer signed with authorized derived key. Now the auth txn is persisted in the db. // Should succeed. Flush to db. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.NoError(err) @@ -1314,8 +1297,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, randomPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1333,8 +1315,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { fmt.Println("currentTxn.String()", currentTxn.String()) // Disconnect the transaction - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 fmt.Printf("Disconnecting test index: %v\n", testIndex) require.NoError(utxoView.DisconnectTransaction( @@ -1360,8 +1341,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Check basic transfer signed by the owner key. // Should succeed. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, senderPrivString, utxoView, nil, true) require.NoError(err) @@ -1374,8 +1354,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Check basic transfer signed with expired authorized derived key. // Should fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1397,8 +1376,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Send an authorize transaction signed with the correct derived key. // This must pass. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, _, err := _doAuthorizeTxn( testMeta, utxoView, @@ -1449,8 +1427,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Check basic transfer signed with new authorized derived key. // Sanity check. Should pass. We're not flushing to the db yet. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivDeAuthBase58Check, utxoView, nil, false) require.NoError(err) @@ -1467,8 +1444,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Doesn't matter if it's signed by the owner or not, once a isDeleted // txn appears, the key should be forever expired. This must pass. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, _, err := _doAuthorizeTxn( testMeta, utxoView, @@ -1494,8 +1470,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Check basic transfer signed with new authorized derived key. // Now that key has been de-authorized this must fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivDeAuthBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1508,8 +1483,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Sanity check basic transfer signed by the owner key. // Should succeed. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, senderPrivString, utxoView, nil, true) require.NoError(err) @@ -1525,8 +1499,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Send an authorize transaction signed with a derived key. // Since we've already deleted this derived key, this must fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, _, err = _doAuthorizeTxn( testMeta, utxoView, @@ -1555,8 +1528,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { fmt.Println("currentTxn.String()", currentTxn.String()) // Disconnect the transaction - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 fmt.Printf("Disconnecting test index: %v\n", testIndex) require.NoError(utxoView.DisconnectTransaction( @@ -1631,8 +1603,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Check adding basic transfer signed with new authorized derived key. // Now that key has been de-authorized this must fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivDeAuthBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1645,8 +1616,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Attempt re-authorizing a previously de-authorized derived key. // Since we've already deleted this derived key, this must fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, _, err = _doAuthorizeTxn( testMeta, utxoView, @@ -1669,8 +1639,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Sanity check basic transfer signed by the owner key. // Should succeed. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, senderPrivString, utxoView, nil, true) require.NoError(err) @@ -1695,8 +1664,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { require.NoError(utxoView.DisconnectBlock(blockToDisconnect, txHashes, utxoOps, 0)) } { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) for iterIndex := range testBlocks { testIndex := len(testBlocks) - 1 - iterIndex @@ -1785,8 +1753,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Just for the sake of consistency, we run the _derivedKeyBasicTransfer on unauthorized // derived key. It should fail since blockchain hasn't seen this key yet. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1798,8 +1765,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Attempt sending an AuthorizeDerivedKey txn signed with an invalid private key. // This must fail because the txn has to be signed either by owner or derived key. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) @@ -1825,8 +1791,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Attempt sending an AuthorizeDerivedKey txn where access signature is signed with // an invalid private key. This must fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) expirationBlockByte := UintToBuf(authTxnMeta.ExpirationBlock) @@ -1855,8 +1820,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Check basic transfer signed with still unauthorized derived key. // Should fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1868,8 +1832,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Now attempt to send the same transaction but signed with the correct derived key. // This must pass. The new derived key will be flushed to the db here. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, _, err := _doAuthorizeTxn( testMeta, utxoView, @@ -1897,8 +1860,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Check basic transfer signed by the owner key. // Should succeed. Flush to db. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, senderPrivString, utxoView, nil, true) require.NoError(err) @@ -1913,8 +1875,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Check basic transfer signed with now authorized derived key. // Should succeed. Flush to db. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.NoError(err) @@ -1923,8 +1884,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { require.NoError(utxoView.FlushToDb(0)) // Attempting the basic transfer again should error because the spending limit authorized only 1 transfer. - utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView = NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyTxnTypeNotAuthorized) @@ -1940,8 +1900,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, randomPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1960,8 +1919,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { fmt.Println("currentTxn.String()", currentTxn.String()) // Disconnect the transaction - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 fmt.Printf("Disconnecting test index: %v\n", testIndex) require.NoError(utxoView.DisconnectTransaction( @@ -1978,8 +1936,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // After disconnecting, check basic transfer signed with unauthorized derived key. // Should fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -1991,8 +1948,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Connect all txns to a single UtxoView flushing only at the end. { // Create a new UtxoView - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) for testIndex, txn := range testTxns { fmt.Printf("Applying test index: %v\n", testIndex) blockHeight := chain.blockTip().Height + 1 @@ -2016,8 +1972,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, randomPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -2029,8 +1984,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Disconnect all txns on a single UtxoView flushing only at the end { // Create a new UtxoView - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) for iterIndex := range testTxns { testIndex := len(testTxns) - 1 - iterIndex blockHeight := chain.blockTip().Height + 1 @@ -2130,8 +2084,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Check basic transfer signed by the owner key. // Should succeed. Flush to db. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, senderPrivString, utxoView, nil, true) require.NoError(err) @@ -2148,8 +2101,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { { // We authorize an additional basic transfer before the derived key can do this. - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) addlBasicTransferMap := make(map[TxnType]uint64) addlBasicTransferMap[TxnTypeBasicTransfer] = 1 addlBasicTransferMap[TxnTypeAuthorizeDerivedKey] = 1 @@ -2198,8 +2150,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { randomPrivateKey, err := btcec.NewPrivateKey() require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, randomPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -2217,8 +2168,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { fmt.Println("currentTxn.String()", currentTxn.String()) // Disconnect the transaction - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 fmt.Printf("Disconnecting test index: %v\n", testIndex) require.NoError(utxoView.DisconnectTransaction( @@ -2244,8 +2194,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Check basic transfer signed by the owner key. // Should succeed. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, senderPrivString, utxoView, nil, true) require.NoError(err) @@ -2258,8 +2207,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Check basic transfer signed with expired authorized derived key. // Should fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -2284,8 +2232,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Send an authorize transaction signed with the correct derived key. // This must pass. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, _, err := _doAuthorizeTxn( testMeta, utxoView, @@ -2336,8 +2283,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Check basic transfer signed with new authorized derived key. // Sanity check. Should pass. We're not flushing to the db yet. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivDeAuthBase58Check, utxoView, nil, false) require.NoError(err) @@ -2354,8 +2300,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Doesn't matter if it's signed by the owner or not, once a isDeleted // txn appears, the key should be forever expired. This must pass. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, _, err := _doAuthorizeTxn( testMeta, utxoView, @@ -2381,8 +2326,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Check basic transfer signed with new authorized derived key. // Now that key has been de-authorized this must fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivDeAuthBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -2395,8 +2339,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Sanity check basic transfer signed by the owner key. // Should succeed. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) utxoOps, txn, err := _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, senderPrivString, utxoView, nil, true) require.NoError(err) @@ -2412,8 +2355,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Send an authorize transaction signed with a derived key. // Since we've already deleted this derived key, this must fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, _, err = _doAuthorizeTxn( testMeta, utxoView, @@ -2442,8 +2384,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { fmt.Println("currentTxn.String()", currentTxn.String()) // Disconnect the transaction - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 fmt.Printf("Disconnecting test index: %v\n", testIndex) require.NoError(utxoView.DisconnectTransaction( @@ -2518,8 +2459,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Check adding basic transfer signed with new authorized derived key. // Now that key has been de-authorized this must fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, derivedPrivDeAuthBase58Check, utxoView, nil, false) require.Contains(err.Error(), RuleErrorDerivedKeyNotAuthorized) @@ -2532,8 +2472,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Attempt re-authorizing a previously de-authorized derived key. // Since we've already deleted this derived key, this must fail. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, _, err = _doAuthorizeTxn( testMeta, utxoView, @@ -2556,8 +2495,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Sanity check basic transfer signed by the owner key. // Should succeed. { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, senderPrivString, utxoView, nil, true) require.NoError(err) @@ -2582,8 +2520,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { require.NoError(utxoView.DisconnectBlock(blockToDisconnect, txHashes, utxoOps, 0)) } { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) for iterIndex := range testBlocks { testIndex := len(testBlocks) - 1 - iterIndex diff --git a/lib/block_view_follow_test.go b/lib/block_view_follow_test.go index 10a6bd6da..ef3823f63 100644 --- a/lib/block_view_follow_test.go +++ b/lib/block_view_follow_test.go @@ -25,8 +25,7 @@ func _doFollowTxn(t *testing.T, chain *Blockchain, db *badger.DB, followedPkBytes, _, err := Base58CheckDecode(followedPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateFollowTxn( senderPkBytes, followedPkBytes, isUnfollow, feeRateNanosPerKB, nil, []*DeSoOutput{}) @@ -566,8 +565,7 @@ func TestFollowTxns(t *testing.T) { currentTxn := txns[backwardIter] fmt.Printf("Disconnecting transaction with type %v index %d (going backwards)\n", currentTxn.TxnMeta.GetTxnType(), backwardIter) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) currentHash := currentTxn.Hash() err = utxoView.DisconnectTransaction(currentTxn, currentHash, currentOps, savedHeight) @@ -644,8 +642,7 @@ func TestFollowTxns(t *testing.T) { } // Apply all the transactions to a view and flush the view to the db. - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) for ii, txn := range txns { fmt.Printf("Adding txn %v of type %v to UtxoView\n", ii, txn.TxnMeta.GetTxnType()) @@ -663,8 +660,7 @@ func TestFollowTxns(t *testing.T) { // Disconnect the transactions from a single view in the same way as above // i.e. without flushing each time. - utxoView2, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView2 := NewUtxoView(db, params, nil, chain.snapshot, nil) for ii := 0; ii < len(txnOps); ii++ { backwardIter := len(txnOps) - 1 - ii fmt.Printf("Disconnecting transaction with index %d (going backwards)\n", backwardIter) @@ -708,8 +704,7 @@ func TestFollowTxns(t *testing.T) { // Roll back the block and make sure we don't hit any errors. { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) // Fetch the utxo operations for the block we're detaching. We need these // in order to be able to detach the block. diff --git a/lib/block_view_like_test.go b/lib/block_view_like_test.go index 3b376cf75..a757c7f70 100644 --- a/lib/block_view_like_test.go +++ b/lib/block_view_like_test.go @@ -22,8 +22,7 @@ func _doLikeTxn(t *testing.T, chain *Blockchain, db *badger.DB, senderPkBytes, _, err := Base58CheckDecode(senderPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateLikeTxn( senderPkBytes, likedPostHash, isUnfollow, feeRateNanosPerKB, nil, []*DeSoOutput{}) @@ -482,8 +481,7 @@ func TestLikeTxns(t *testing.T) { "Disconnecting transaction with type %v index %d (going backwards)\n", currentTxn.TxnMeta.GetTxnType(), backwardIter) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) currentHash := currentTxn.Hash() err = utxoView.DisconnectTransaction(currentTxn, currentHash, currentOps, savedHeight) @@ -567,8 +565,7 @@ func TestLikeTxns(t *testing.T) { } // Apply all the transactions to a view and flush the view to the db. - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) for ii, txn := range txns { fmt.Printf("Adding txn %v of type %v to UtxoView\n", ii, txn.TxnMeta.GetTxnType()) @@ -687,8 +684,7 @@ func TestLikeTxns(t *testing.T) { // Disconnect the transactions from a single view in the same way as above // i.e. without flushing each time. - utxoView2, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView2 := NewUtxoView(db, params, nil, chain.snapshot, nil) for ii := 0; ii < len(txnOps); ii++ { backwardIter := len(txnOps) - 1 - ii fmt.Printf("Disconnecting transaction with index %d (going backwards)\n", backwardIter) @@ -731,8 +727,7 @@ func TestLikeTxns(t *testing.T) { // Roll back the block and make sure we don't hit any errors. { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) // Fetch the utxo operations for the block we're detaching. We need these // in order to be able to detach the block. diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 58cd4c5f9..33c52bc9f 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -792,8 +792,7 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { 1000, 1000, MaxUint256, 0) // Ensure CoinsInCirculationNanos and NumberOfHolders are now zero - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) profileEntry := utxoView.GetProfileEntryForPublicKey(m2PkBytes) require.Equal(t, *uint256.NewInt(0), profileEntry.DAOCoinEntry.CoinsInCirculationNanos) require.Equal(t, uint64(0), profileEntry.DAOCoinEntry.NumberOfHolders) @@ -1037,9 +1036,8 @@ func TestLockupStandardProfileFlows(t *testing.T) { // We expect this to create a locked balance entry with 10000 base units locked inside. { // Get the PKID associated with m1. - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID @@ -1056,9 +1054,8 @@ func TestLockupStandardProfileFlows(t *testing.T) { 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10000 base units. - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m1PKID, @@ -1082,9 +1079,8 @@ func TestLockupStandardProfileFlows(t *testing.T) { // We expect this to create a locked balance entry with 10500 base units locked inside. { // Get the PKID associated with m1. - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID @@ -1101,9 +1097,8 @@ func TestLockupStandardProfileFlows(t *testing.T) { 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10500 base units. - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m1PKID, @@ -1136,9 +1131,8 @@ func TestLockupStandardProfileFlows(t *testing.T) { 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10500 base units. - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID lockedBalanceEntry, err := @@ -1164,9 +1158,8 @@ func TestLockupStandardProfileFlows(t *testing.T) { 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 12000 base units. - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID lockedBalanceEntry, err := @@ -1213,9 +1206,8 @@ func TestLockupStandardProfileFlows(t *testing.T) { ) // Check to ensure the resulting locked balance entry for m1 has 9000 base units. - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m1PKIDEntry := utxoView.GetPKIDForPublicKey(m1PkBytes) m1PKID := m1PKIDEntry.PKID lockedBalanceEntry, err := @@ -1247,9 +1239,8 @@ func TestLockupStandardProfileFlows(t *testing.T) { // 500 base units of m1 DAO coins was given by m2 during the distribution phase. { // Get the original BalanceEntry for the associated DAO coins. - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) originalBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( m2PkBytes, m1PkBytes, true) @@ -1261,12 +1252,10 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1Pub, 2*365*24*60*60*1e9+1, ) - require.NoError(t, err) // Get the updated BalanceEntry for the associated DAO coins. - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) newBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( m2PkBytes, m1PkBytes, true) require.True(t, newBalanceEntry.BalanceNanos.Gt(&originalBalanceEntry.BalanceNanos)) @@ -1286,9 +1275,8 @@ func TestLockupWithDerivedKey(t *testing.T) { // Initialize m0, m1, m2, m3, m4, and paramUpdater _setUpProfilesAndMintM0M1DAOCoins(testMeta) - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m0PKID := utxoView.GetPKIDForPublicKey(m0PkBytes).PKID m1PKID := utxoView.GetPKIDForPublicKey(m1PkBytes).PKID //m2PKID := utxoView.GetPKIDForPublicKey(m2PkBytes).PKID @@ -1299,9 +1287,8 @@ func TestLockupWithDerivedKey(t *testing.T) { // Setup helper functions for creating m0 derived keys newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) return utxoView } _submitAuthorizeDerivedKeyTxn := func(txnSpendingLimit *TransactionSpendingLimit) (string, string, error) { @@ -1451,9 +1438,8 @@ func TestLockupWithDerivedKey(t *testing.T) { } derivedKeyPriv, derivedKeyPub, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) require.NoError(t, err) - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) derivedPubKeyBytes, _, err := Base58CheckDecode(derivedKeyPub) require.NoError(t, err) derivedKeyEntry := utxoView.GetDerivedKeyMappingForOwner(m0PkBytes, derivedPubKeyBytes) @@ -1764,9 +1750,8 @@ func TestLockupWithDerivedKey(t *testing.T) { m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: updateCoinLockupParamsMetadata}, 0, ) require.NoError(t, err) - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) profileEntry := utxoView.GetProfileEntryForPKID(m0PKID) require.Equal(t, TransferRestrictionStatusProfileOwnerOnly, profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus) leftYCP, rightYCP, err := utxoView.GetLocalYieldCurvePoints(m0PKID, 365*24*60*60*1e9) @@ -1787,9 +1772,8 @@ func TestLockupWithDerivedKey(t *testing.T) { m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: updateCoinLockupParamsMetadata}, 0, ) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) profileEntry = utxoView.GetProfileEntryForPKID(m0PKID) require.Equal(t, TransferRestrictionStatusProfileOwnerOnly, profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus) leftYCP, rightYCP, err = utxoView.GetLocalYieldCurvePoints(m0PKID, 365*24*60*60*1e9) @@ -1809,9 +1793,8 @@ func TestLockupWithDerivedKey(t *testing.T) { m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, ) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -1834,9 +1817,8 @@ func TestLockupWithDerivedKey(t *testing.T) { m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, ) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -1858,9 +1840,8 @@ func TestLockupWithDerivedKey(t *testing.T) { m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupTransferMetadata}, 0, ) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m1PKID, @@ -1882,9 +1863,8 @@ func TestLockupWithDerivedKey(t *testing.T) { m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupTransferMetadata}, 0, ) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m1PKID, @@ -1896,9 +1876,8 @@ func TestLockupWithDerivedKey(t *testing.T) { require.Equal(t, int64(2*365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) // Perform the first unlock operation of 500 m1 tokens @ 1yr - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) balanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) startingBalance := balanceEntry.BalanceNanos coinUnlockMetadata := &CoinUnlockMetadata{ProfilePublicKey: NewPublicKey(m0PkBytes)} @@ -1906,9 +1885,8 @@ func TestLockupWithDerivedKey(t *testing.T) { m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinUnlockMetadata}, 365*24*60*60*1e9+1, ) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) balanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) require.True(t, balanceEntry.BalanceNanos.Gt(&startingBalance)) require.Equal(t, *uint256.NewInt(500), @@ -1923,16 +1901,14 @@ func TestLockupWithDerivedKey(t *testing.T) { require.True(t, lockedBalanceEntry == nil) // Perform the second unlock operation of 500 m1 tokens @ 2yrs - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinUnlockMetadata}, 2*365*24*60*60*1e9+1, ) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) balanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) require.True(t, balanceEntry.BalanceNanos.Gt(&startingBalance)) require.Equal(t, *uint256.NewInt(1000), @@ -1992,15 +1968,13 @@ func TestLockupDisconnects(t *testing.T) { require.NoError(t, err) txHash := txn2.Hash() blockHeight := testMeta.chain.BlockTip().Height + 1 - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) err = utxoView.DisconnectTransaction(txn2, txHash, utxoOps2, blockHeight) require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m0PKID := utxoView.GetPKIDForPublicKey(m0PkBytes).PKID lockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( @@ -2015,9 +1989,8 @@ func TestLockupDisconnects(t *testing.T) { err = utxoView.DisconnectTransaction(txn1, txn1.Hash(), utxoOps1, blockHeight) require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2046,9 +2019,8 @@ func TestLockupDisconnects(t *testing.T) { TransferRestrictionStatusProfileOwnerOnly, ) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) utxoOps, txn, _, err := _updateCoinLockupParams( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, @@ -2066,9 +2038,8 @@ func TestLockupDisconnects(t *testing.T) { err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m1PKID := utxoView.GetPKIDForPublicKey(m1PkBytes).PKID leftYieldCurvePoint, rightYieldCurvePoint, err := utxoView.GetLocalYieldCurvePoints(m1PKID, 365*24*60*60*1e9) @@ -2080,9 +2051,8 @@ func TestLockupDisconnects(t *testing.T) { require.Equal(t, profileEntry.DAOCoinEntry.LockupTransferRestrictionStatus, TransferRestrictionStatusProfileOwnerOnly) // Test Deleting a Yield Curve Point and Reverting Said Transaction - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) utxoOps, txn, _, err = _updateCoinLockupParams( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, @@ -2105,9 +2075,8 @@ func TestLockupDisconnects(t *testing.T) { err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) leftYieldCurvePoint, rightYieldCurvePoint, err = utxoView.GetLocalYieldCurvePoints(m1PKID, 365*24*60*60*1e9) require.NoError(t, err) @@ -2122,9 +2091,8 @@ func TestLockupDisconnects(t *testing.T) { // // Create an on-chain profile for m3 with MaxUint256 Locked Tokens - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) _updateProfileWithTestMeta( testMeta, testMeta.feeRateNanosPerKb, @@ -2163,9 +2131,8 @@ func TestLockupDisconnects(t *testing.T) { NewPublicKey(m3PkBytes), 1000, MaxUint256) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m3PKID := utxoView.GetPKIDForPublicKey(m3PkBytes).PKID m4PKID := utxoView.GetPKIDForPublicKey(m4PkBytes).PKID m3BalanceEntry, err := @@ -2187,9 +2154,8 @@ func TestLockupDisconnects(t *testing.T) { err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m3PKID = utxoView.GetPKIDForPublicKey(m3PkBytes).PKID m4PKID = utxoView.GetPKIDForPublicKey(m4PkBytes).PKID m3BalanceEntry, err = @@ -2212,9 +2178,8 @@ func TestLockupDisconnects(t *testing.T) { // // Create an on-chain profile for m4 with MaxUint256 Locked Tokens - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) _updateProfileWithTestMeta( testMeta, testMeta.feeRateNanosPerKb, @@ -2245,9 +2210,8 @@ func TestLockupDisconnects(t *testing.T) { m4Pub, m4Priv, m4Pub, m4Pub, 1000, 1000, MaxUint256, 0) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m4LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m4PKID, @@ -2268,9 +2232,8 @@ func TestLockupDisconnects(t *testing.T) { 1001) // Ensure unlock functioned properly - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m4PKID, @@ -2283,17 +2246,15 @@ func TestLockupDisconnects(t *testing.T) { require.Equal(t, *MaxUint256, m4be.BalanceNanos) // Execute the disconnect and ensure it functions correctly - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) txHash = txn.Hash() blockHeight = testMeta.chain.BlockTip().Height + 1 err = utxoView.DisconnectTransaction(txn, txHash, utxoOps, blockHeight) require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) require.NoError(t, err) - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m4LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m4PKID, @@ -2317,9 +2278,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { tipTimestamp := int64(testMeta.chain.blockTip().Header.TstampNanoSecs) // Validate the starting state - utxoView, err := NewUtxoView( + utxoView := NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m0PKID := utxoView.GetPKIDForPublicKey(m0PkBytes).PKID m3PKID := utxoView.GetPKIDForPublicKey(m3PkBytes).PKID m0Profile := utxoView.GetProfileEntryForPKID(m0PKID) @@ -2396,9 +2356,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.NoError(t, err) // Validate state update - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m0PKID = utxoView.GetPKIDForPublicKey(m0PkBytes).PKID m3PKID = utxoView.GetPKIDForPublicKey(m3PkBytes).PKID m0Profile = utxoView.GetProfileEntryForPKID(m0PKID) @@ -2461,9 +2420,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.NoError(t, err) // Validate state update - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) require.Equal(t, *uint256.NewInt(999000), m0BalanceEntry.BalanceNanos) @@ -2490,9 +2448,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { // // Disconnect the second block - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) blk2Hash, err := blk2.Hash() require.NoError(t, err) utxoOps, err := GetUtxoOperationsForBlock(testMeta.db, nil, blk2Hash) @@ -2507,9 +2464,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { testMeta.chain.bestChain = testMeta.chain.bestChain[:len(testMeta.chain.bestChain)-1] // Validate the state update - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m0PKID = utxoView.GetPKIDForPublicKey(m0PkBytes).PKID m3PKID = utxoView.GetPKIDForPublicKey(m3PkBytes).PKID m0Profile = utxoView.GetProfileEntryForPKID(m0PKID) @@ -2545,7 +2501,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { // // Disconnect the first block - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) blk1Hash, err := blk1.Hash() require.NoError(t, err) @@ -2563,9 +2519,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { testMeta.chain.bestChain = testMeta.chain.bestChain[:len(testMeta.chain.bestChain)-1] // Verify we return back to the initial state - utxoView, err = NewUtxoView( + utxoView = NewUtxoView( testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m0Profile = utxoView.GetProfileEntryForPKID(m0PKID) require.Equal(t, TransferRestrictionStatusUnrestricted, m0Profile.DAOCoinEntry.LockupTransferRestrictionStatus) m0LeftYieldCurvePoint, m0RightYieldCurvePoint, err = utxoView.GetLocalYieldCurvePoints(m0PKID, 365*24*60*60*1e9+1) @@ -2611,8 +2566,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { } // Verify that m3 received the lockup (not m0) and that m0 was credited properly. - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) m3PKIDEntry := utxoView.GetPKIDForPublicKey(m3PkBytes) m3PKID := m3PKIDEntry.PKID m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) @@ -2687,8 +2641,7 @@ func TestSimpleVestedLockup(t *testing.T) { } // Get the original m0 balance entry base units. - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID originalBalanceEntry, _, _ := @@ -2707,8 +2660,7 @@ func TestSimpleVestedLockup(t *testing.T) { } // Verify that the locked balance entry was credited. - utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) + utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) // Check m0 LockedBalanceEntry m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ @@ -2730,8 +2682,7 @@ func TestSimpleVestedLockup(t *testing.T) { require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) // Get the updated m0 balance entry base units and ensure it's been credited 500 base units. - utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) + utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) updatedBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) require.True(t, uint256.NewInt(500).Eq( @@ -2753,8 +2704,7 @@ func TestSimpleVestedLockup(t *testing.T) { } // Verify that the locked balance entry was credited. - utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) + utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) // Check m0 LockedBalanceEntry m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ @@ -2776,8 +2726,7 @@ func TestSimpleVestedLockup(t *testing.T) { require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(250))) // Get the updated m0 balance entry base units and ensure it's been credited 250 base units. - utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) + utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) updatedBalanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) require.True(t, uint256.NewInt(250).Eq( @@ -2799,8 +2748,7 @@ func TestSimpleVestedLockup(t *testing.T) { } // Verify that the locked balance entry was credited. - utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) + utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) // Check m0 LockedBalanceEntry m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ @@ -2821,8 +2769,7 @@ func TestSimpleVestedLockup(t *testing.T) { require.True(t, m0LockedBalanceEntry == nil) // Get the updated m0 balance entry base units and ensure it's been credited 250 base units. - utxoView, err = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) + utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) updatedBalanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) require.True(t, uint256.NewInt(250).Eq( @@ -2861,9 +2808,8 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { } // Verify that the left overhang was computed correctly. - utxoView, err := + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) @@ -2914,9 +2860,8 @@ func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { } // Verify that the left overhang was computed correctly. - utxoView, err := + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) @@ -2965,9 +2910,8 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { } // Verify that the left overhang was computed correctly. - utxoView, err := + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) @@ -3072,9 +3016,8 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { } // Verify that the left overhang was computed correctly. - utxoView, err := + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) @@ -3178,9 +3121,8 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { } // Validate that the split was performed correctly. - utxoView, err := + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) @@ -3309,9 +3251,8 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { } // Validate that the split was performed correctly. - utxoView, err := + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) @@ -3441,9 +3382,8 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { } // Validate that the split was performed correctly. - utxoView, err := + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) @@ -3509,9 +3449,8 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { } // Validate that the split was performed correctly. - utxoView, err := + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) // Check m0 LockedBalanceEntry m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) @@ -3571,9 +3510,8 @@ func TestVestingIntersectionLimit(t *testing.T) { _setUpProfilesAndMintM0M1DAOCoins(testMeta) // Validate the default value of the MaximumVestedIntersectionsPerLockupTransaction parameter. - utxoView, err := + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) require.Equal(t, utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction, 1000) // Generate consecutive vested locked balance entries equal to this limit. @@ -3618,9 +3556,8 @@ func TestVestingIntersectionLimit(t *testing.T) { // Validate the consolidation. m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID - utxoView, err = + utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) for ii := 0; ii < utxoView.GetCurrentGlobalParamsEntry().MaximumVestedIntersectionsPerLockupTransaction; ii++ { m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, @@ -3668,7 +3605,7 @@ func TestVestingIntersectionLimit(t *testing.T) { } // Now we try to unlock all previous entries just to ensure GetUnlockableLockedBalanceEntries is functioning. - utxoView, err = + utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) startingBalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) require.True(t, startingBalanceEntry != nil) @@ -3682,7 +3619,7 @@ func TestVestingIntersectionLimit(t *testing.T) { ) require.NoError(t, err) } - utxoView, err = + utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) finalBalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) require.True(t, finalBalanceEntry != nil) @@ -3787,9 +3724,8 @@ func TestRealWorldLockupsUseCase(t *testing.T) { } // Verify the locked balance entries in the db. - utxoView, err := + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(t, err) m0PKIDEntry := utxoView.GetPKIDForPublicKey(m0PkBytes) m0PKID := m0PKIDEntry.PKID lockedBalanceEntries, err := utxoView.GetAllLockedBalanceEntriesForHodlerPKID(m0PKID) @@ -4010,8 +3946,7 @@ func _coinLockupWithConnectTimestamp( recipientPkBytes, _, err := Base58CheckDecode(recipientPublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) // Create the coin lockup transaction. txn, totalInputMake, _, feesMake, err := chain.CreateCoinLockupTxn( @@ -4099,8 +4034,7 @@ func _updateCoinLockupParams(t *testing.T, chain *Blockchain, db *badger.DB, transactorPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) // Create the update coin lockup params transaction. txn, totalInputMake, _, feesMake, err := chain.CreateUpdateCoinLockupParamsTxn( @@ -4178,8 +4112,7 @@ func _coinLockupTransfer(t *testing.T, chain *Blockchain, db *badger.DB, transactorPkBytes, _, err := Base58CheckDecode(transactorPublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) // Create the update coin lockup params transaction. txn, totalInputMake, _, feesMake, err := chain.CreateCoinLockupTransferTxn( @@ -4256,8 +4189,7 @@ func _coinUnlockWithConnectTimestamp(t *testing.T, chain *Blockchain, db *badger profilePkBytes, _, err := Base58CheckDecode(profilePublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) // Create the coin unlock transaction. txn, totalInputMake, _, feesMake, err := chain.CreateCoinUnlockTxn( diff --git a/lib/block_view_message_test.go b/lib/block_view_message_test.go index a7e08fb46..63dee2e69 100644 --- a/lib/block_view_message_test.go +++ b/lib/block_view_message_test.go @@ -67,8 +67,7 @@ func _privateMessageWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB recipientPkBytes, _, err := Base58CheckDecode(recipientPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreatePrivateMessageTxn( senderPkBytes, recipientPkBytes, unencryptedMessageText, "", @@ -404,8 +403,7 @@ func TestPrivateMessage(t *testing.T) { currentTxn := txns[backwardIter] fmt.Printf("Disconnecting transaction with type %v index %d (going backwards)\n", currentTxn.TxnMeta.GetTxnType(), backwardIter) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) currentHash := currentTxn.Hash() err = utxoView.DisconnectTransaction(currentTxn, currentHash, currentOps, savedHeight) @@ -466,8 +464,7 @@ func TestPrivateMessage(t *testing.T) { } // Apply all the transactions to a view and flush the view to the db. - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) for ii, txn := range txns { fmt.Printf("Adding txn %v of type %v to UtxoView\n", ii, txn.TxnMeta.GetTxnType()) @@ -484,8 +481,7 @@ func TestPrivateMessage(t *testing.T) { // Disonnect the transactions from a single view in the same way as above // i.e. without flushing each time. - utxoView2, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView2 := NewUtxoView(db, params, nil, chain.snapshot, nil) for ii := 0; ii < len(txnOps); ii++ { backwardIter := len(txnOps) - 1 - ii fmt.Printf("Disconnecting transaction with index %d (going backwards)\n", backwardIter) @@ -567,8 +563,7 @@ func TestPrivateMessage(t *testing.T) { // Roll back the block and make sure we don't hit any errors. { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) // Fetch the utxo operations for the block we're detaching. We need these // in order to be able to detach the block. @@ -653,8 +648,7 @@ func _messagingKeyWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB, require.NoError(err) require.Equal(totalInputMake, changeAmountMake+feesMake) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) _signTxn(t, txn, signerPriv) txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 @@ -719,10 +713,8 @@ func _messagingKeyWithExtraDataWithTestMeta(testMeta *TestMeta, senderPk []byte, func _verifyMessagingKey(testMeta *TestMeta, publicKey *PublicKey, entry *MessagingGroupEntry) bool { var utxoMessagingEntry *MessagingGroupEntry - require := require.New(testMeta.t) messagingKey := NewMessagingGroupKey(publicKey, entry.MessagingGroupKeyName[:]) - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, nil, testMeta.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, nil, testMeta.chain.snapshot, nil) utxoMessagingEntry = utxoView.GetMessagingGroupKeyToMessagingGroupEntryMapping(messagingKey) if utxoMessagingEntry == nil || utxoMessagingEntry.isDeleted { @@ -1685,7 +1677,7 @@ func _connectPrivateMessageWithPartyWithExtraData(testMeta *TestMeta, senderPkBy txHash := txn.Hash() // Always use height+1 for validation since it's assumed the transaction will // get mined into the next block. - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, nil, testMeta.chain.snapshot, nil) + utxoView := NewUtxoView(testMeta.db, testMeta.params, nil, testMeta.chain.snapshot, nil) blockHeight := testMeta.chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) @@ -1731,11 +1723,8 @@ func _helpConnectPrivateMessageWithParty(testMeta *TestMeta, senderPrivBase58 st func _verifyMessageParty(testMeta *TestMeta, expectedMessageEntries map[PublicKey][]MessageEntry, expectedEntry MessageEntry, groupOwner bool) bool { - require := require.New(testMeta.t) - // First validate that the expected entry was properly added to the UtxoView. - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, nil, testMeta.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, nil, testMeta.chain.snapshot, nil) messageKey := MakeMessageKey(expectedEntry.SenderMessagingPublicKey[:], expectedEntry.TstampNanos) messageEntrySender := utxoView._getMessageEntryForMessageKey(&messageKey) if messageEntrySender == nil || messageEntrySender.isDeleted { @@ -1787,8 +1776,7 @@ func _verifyMessages(testMeta *TestMeta, expectedMessageEntries map[PublicKey][] require := require.New(testMeta.t) assert := assert.New(testMeta.t) - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, nil, testMeta.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, nil, testMeta.chain.snapshot, nil) for key, messageEntries := range expectedMessageEntries { dbMessageEntries, _, err := utxoView.GetLimitedMessagesForUser(key[:], 100) @@ -1953,8 +1941,7 @@ func TestGroupMessages(t *testing.T) { _verifyMessages(testMeta, expectedMessageEntries) // Just to sanity-check, verify that the number of messages is as intended. - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) messages, _, err := utxoView.GetMessagesForUser(senderPkBytes) require.NoError(err) assert.Equal(1, len(messages)) @@ -2065,8 +2052,7 @@ func TestGroupMessages(t *testing.T) { // Verify that all the messages are correct. _verifyMessages(testMeta, expectedMessageEntries) // Just to sanity-check, verify that the number of messages is as intended. - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) messages, _, err := utxoView.GetMessagesForUser(senderPkBytes) require.NoError(err) assert.Equal(5, len(messages)) @@ -2121,9 +2107,9 @@ func TestGroupMessages(t *testing.T) { require.Equal(true, _verifyMessageParty(testMeta, expectedMessageEntries, messageEntry1, true)) _verifyMessages(testMeta, expectedMessageEntries) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) messages, _, err := utxoView.GetMessagesForUser(m1PubKey) + require.NoError(err) assert.Equal(1, len(messages)) } @@ -2218,8 +2204,7 @@ func TestGroupMessages(t *testing.T) { // To add some entropy, the message will be sent by recipient and through default key. tstampNanos3 := uint64(time.Now().UnixNano()) testMessage3 := []byte{1, 2, 5, 4, 5, 6, 7, 8, 15, 22, 27} - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) messagingKey := utxoView.GetMessagingGroupKeyToMessagingGroupEntryMapping(&MessagingGroupKey{ *recipientPublicKey, *DefaultGroupKeyName(), @@ -2254,8 +2239,7 @@ func TestGroupMessages(t *testing.T) { // Verify all messages. _verifyMessages(testMeta, expectedMessageEntries) // Just to sanity-check, verify that the number of messages is as intended. - utxoView, err = NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView = NewUtxoView(db, params, nil, chain.snapshot, nil) messages, _, err := utxoView.GetMessagesForUser(recipientPkBytes) require.NoError(err) assert.Equal(6, len(messages)) @@ -2330,8 +2314,7 @@ func TestGroupMessages(t *testing.T) { // Now let's have m0 send the first message to the group chat. // We will fetch the encrypted messaging key from m0, decrypt it, and use it to make the message. - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) messagingGroupEntries, err := utxoView.GetMessagingGroupEntriesForUser(m0PubKey) require.NoError(err) require.NotNil(messagingGroupEntries) @@ -2377,8 +2360,7 @@ func TestGroupMessages(t *testing.T) { // Verify the messages. _verifyMessages(testMeta, expectedMessageEntries) // Just to sanity-check, verify that the number of messages is as intended. - utxoView, err = NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView = NewUtxoView(db, params, nil, chain.snapshot, nil) messages, _, err := utxoView.GetMessagesForUser(senderPkBytes) require.NoError(err) assert.Equal(6, len(messages)) @@ -2439,8 +2421,7 @@ func TestGroupMessages(t *testing.T) { _connectBlockThenDisconnectBlockAndFlush(testMeta) // Sanity-check that all entries were reverted from the DB. - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) messages, _, err := utxoView.GetMessagesForUser(senderPkBytes) require.NoError(err) assert.Equal(0, len(messages)) diff --git a/lib/block_view_new_message_test.go b/lib/block_view_new_message_test.go index 9da3775c8..4a0a45475 100644 --- a/lib/block_view_new_message_test.go +++ b/lib/block_view_new_message_test.go @@ -657,8 +657,7 @@ func TestNewMessage(t *testing.T) { tvv4 = append(tvv4, groupChatEnumerationTestVectors...) tvb4ConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyDmThreadKeysWithUtxoView(t, utxoView, *m0PublicKey, dmThreads31M0) _verifyDmThreadKeysWithUtxoView(t, utxoView, *m1PublicKey, dmThreads31M1) _verifyDmMessageEntries(t, utxoView, dmThreadEnumeration, dmEnumerationMessages) @@ -718,8 +717,7 @@ func TestNewMessage(t *testing.T) { tvv5 = append(tvv5, groupChatEnumerationUpdateTestVectors...) tvb5ConnectCallback := func(tvb *transactionTestVectorBlock, tm *transactionTestMeta) { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) _verifyDmThreadKeysWithUtxoView(t, utxoView, *m0PublicKey, dmThreads31M0) _verifyDmThreadKeysWithUtxoView(t, utxoView, *m1PublicKey, dmThreads31M1) _verifyDmMessageEntries(t, utxoView, dmThreadEnumeration, dmEnumerationUpdateMessages) diff --git a/lib/block_view_nft_test.go b/lib/block_view_nft_test.go index 6fe5186f9..2f4cfd64c 100644 --- a/lib/block_view_nft_test.go +++ b/lib/block_view_nft_test.go @@ -51,8 +51,7 @@ func _createNFTWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB, par updaterPkBytes, _, err := Base58CheckDecode(updaterPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateCreateNFTTxn( updaterPkBytes, @@ -286,8 +285,7 @@ func _createNFTBid(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoP updaterPkBytes, _, err := Base58CheckDecode(updaterPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) txn, totalInputMake, _, _, err := chain.CreateNFTBidTxn( updaterPkBytes, @@ -373,8 +371,7 @@ func _acceptNFTBid(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoP bidderPkBytes, _, err := Base58CheckDecode(bidderPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) bidderPKID := utxoView.GetPKIDForPublicKey(bidderPkBytes) require.NotNil(bidderPKID) @@ -489,8 +486,7 @@ func _updateNFT(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoPara updaterPkBytes, _, err := Base58CheckDecode(updaterPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateUpdateNFTTxn( updaterPkBytes, @@ -587,8 +583,7 @@ func _transferNFT(t *testing.T, chain *Blockchain, db *badger.DB, params *DeSoPa receiverPkBytes, _, err := Base58CheckDecode(receiverPk) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateNFTTransferTxn( senderPkBytes, @@ -676,8 +671,7 @@ func _acceptNFTTransfer(t *testing.T, chain *Blockchain, db *badger.DB, updaterPkBytes, _, err := Base58CheckDecode(updaterPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateAcceptNFTTransferTxn( updaterPkBytes, @@ -759,8 +753,7 @@ func _burnNFT(t *testing.T, chain *Blockchain, db *badger.DB, updaterPkBytes, _, err := Base58CheckDecode(updaterPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateBurnNFTTxn( updaterPkBytes, @@ -1278,8 +1271,7 @@ func TestNFTBasic(t *testing.T) { // This time set HasUnlockable to 'true'. // Add some extra data to the NFT entries { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) numCopies := uint64(10) nftFee := utxoView.GetCurrentGlobalParamsEntry().CreateNFTFeeNanos * numCopies diff --git a/lib/block_view_post_test.go b/lib/block_view_post_test.go index 9ed958875..264df7d69 100644 --- a/lib/block_view_post_test.go +++ b/lib/block_view_post_test.go @@ -33,8 +33,7 @@ func _submitPost(t *testing.T, chain *Blockchain, db *badger.DB, updaterPkBytes, _, err := Base58CheckDecode(updaterPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) body, err := json.Marshal(bodyObj) require.NoError(err) @@ -140,8 +139,7 @@ func _giveDeSoDiamonds(t *testing.T, chain *Blockchain, db *badger.DB, params *D senderPkBytes, _, err := Base58CheckDecode(senderPkBase58Check) require.NoError(t, err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) txn, totalInputMake, spendAmount, changeAmountMake, feesMake, err := chain.CreateBasicTransferTxnWithDiamonds( senderPkBytes, @@ -238,8 +236,7 @@ func _doSubmitPostTxn(t *testing.T, chain *Blockchain, db *badger.DB, updaterPkBytes, _, err := Base58CheckDecode(UpdaterPublicKeyBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) txn, totalInputMake, _, _, err := chain.CreateSubmitPostTxn( updaterPkBytes, @@ -359,8 +356,7 @@ func TestSubmitPost(t *testing.T) { registerOrTransfer("", senderPkString, m3Pub, senderPrivString) checkPostsDeleted := func() { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) corePosts, commentsByPostHash, err := utxoView.GetAllPosts() require.NoError(err) require.Equal(4, len(corePosts)) @@ -1252,8 +1248,7 @@ func TestSubmitPost(t *testing.T) { } checkPostsExist := func() { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) corePosts, commentsByPostHash, err := utxoView.GetAllPosts() require.NoError(err) // 4 posts from seed txns @@ -1498,8 +1493,7 @@ func TestSubmitPost(t *testing.T) { currentTxn := txns[backwardIter] fmt.Printf("Disconnecting transaction with type %v index %d (going backwards)\n", currentTxn.TxnMeta.GetTxnType(), backwardIter) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) currentHash := currentTxn.Hash() err = utxoView.DisconnectTransaction(currentTxn, currentHash, currentOps, savedHeight) @@ -1528,8 +1522,7 @@ func TestSubmitPost(t *testing.T) { } // Apply all the transactions to a view and flush the view to the db. - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) for ii, txn := range txns { fmt.Printf("Adding txn %v of type %v to UtxoView\n", ii, txn.TxnMeta.GetTxnType()) @@ -1577,8 +1570,7 @@ func TestSubmitPost(t *testing.T) { // Disonnect the transactions from a single view in the same way as above // i.e. without flushing each time. - utxoView2, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView2 := NewUtxoView(db, params, nil, chain.snapshot, nil) for ii := 0; ii < len(txnOps); ii++ { backwardIter := len(txnOps) - 1 - ii fmt.Printf("Disconnecting transaction with index %d (going backwards)\n", backwardIter) @@ -1632,8 +1624,7 @@ func TestSubmitPost(t *testing.T) { // Roll back the block and make sure we don't hit any errors. { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) // Fetch the utxo operations for the block we're detaching. We need these // in order to be able to detach the block. @@ -1935,8 +1926,7 @@ func TestDeSoDiamondErrorCases(t *testing.T) { receiverPkBytes, _, err := Base58CheckDecode(receiverPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) // Build the basic transfer txn. txn := &MsgDeSoTxn{ @@ -2148,8 +2138,7 @@ func TestFreezingPosts(t *testing.T) { // The UTXO view in these tests differs from the UTXO view used to connect // transactions. To avoid retrieving stale data in the UTXO view used in // these tests, we re-create a new UTXO view each time we need one. - newUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + newUtxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) return newUtxoView } diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index d3cd11cd2..aa4cf5d3e 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -46,8 +46,7 @@ func _swapIdentity(t *testing.T, chain *Blockchain, db *badger.DB, updaterPkBytes, _, err := Base58CheckDecode(updaterPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateSwapIdentityTxn( updaterPkBytes, @@ -122,8 +121,7 @@ func _updateProfileWithExtraData(t *testing.T, chain *Blockchain, db *badger.DB, updaterPkBytes, _, err := Base58CheckDecode(updaterPkBase58Check) require.NoError(err) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) txn, totalInputMake, changeAmountMake, feesMake, err := chain.CreateUpdateProfileTxn( updaterPkBytes, @@ -942,8 +940,7 @@ func TestUpdateProfile(t *testing.T) { require.NoError(err) txns = append(txns, currentTxn) txnOps = append(txnOps, currentOps) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) m4ProfileEntry := utxoView.GetProfileEntryForPublicKey(m4PkBytes) require.Equal(len(m4ProfileEntry.ExtraData), 2) require.Equal(m4ProfileEntry.ExtraData["m4extradata"], []byte("hello")) @@ -978,8 +975,7 @@ func TestUpdateProfile(t *testing.T) { require.NoError(err) txns = append(txns, currentTxn) txnOps = append(txnOps, currentOps) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) m4ProfileEntry := utxoView.GetProfileEntryForPublicKey(m4PkBytes) require.Equal(len(m4ProfileEntry.ExtraData), 3) require.Equal(m4ProfileEntry.ExtraData["m4extradata"], []byte("update!")) @@ -1007,8 +1003,7 @@ func TestUpdateProfile(t *testing.T) { // user5 // m5Pub, m5_paramUpdater, m5 created by paramUpdater, otherShortPic, 11*100, 1.5*100*100, false checkProfilesExist := func() { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) profileEntriesByPublicKey, _, _, _, err := utxoView.GetAllProfiles(nil) require.NoError(err) // 3 profiles from seed txns @@ -1062,8 +1057,7 @@ func TestUpdateProfile(t *testing.T) { checkProfilesExist() checkProfilesDeleted := func() { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) profileEntriesByPublicKey, _, _, _, err := utxoView.GetAllProfiles(nil) require.NoError(err) // 3 remain because of the seed txns @@ -1106,8 +1100,7 @@ func TestUpdateProfile(t *testing.T) { currentTxn := txns[backwardIter] fmt.Printf("Disconnecting transaction with type %v index %d (going backwards)\n", currentTxn.TxnMeta.GetTxnType(), backwardIter) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) currentHash := currentTxn.Hash() err = utxoView.DisconnectTransaction(currentTxn, currentHash, currentOps, savedHeight) @@ -1137,8 +1130,7 @@ func TestUpdateProfile(t *testing.T) { } // Apply all the transactions to a view and flush the view to the db. - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) for ii, txn := range txns { fmt.Printf("Adding txn %v of type %v to UtxoView\n", ii, txn.TxnMeta.GetTxnType()) @@ -1158,8 +1150,7 @@ func TestUpdateProfile(t *testing.T) { // Disonnect the transactions from a single view in the same way as above // i.e. without flushing each time. - utxoView2, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView2 := NewUtxoView(db, params, nil, chain.snapshot, nil) for ii := 0; ii < len(txnOps); ii++ { backwardIter := len(txnOps) - 1 - ii fmt.Printf("Disconnecting transaction with index %d (going backwards)\n", backwardIter) @@ -1185,8 +1176,7 @@ func TestUpdateProfile(t *testing.T) { // Roll back the block and make sure we don't hit any errors. { - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) // Fetch the utxo operations for the block we're detaching. We need these // in order to be able to detach the block. diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 793d4373d..0055aa88d 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1100,13 +1100,9 @@ func (bc *Blockchain) CreateStakeTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrap( - err, "Blockchain.CreateStakeTxn: problem creating new utxo view: ", - ) - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if !isInterfaceValueNil(mempool) { + var err error utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( @@ -1117,7 +1113,7 @@ func (bc *Blockchain) CreateStakeTxn( // Validate txn metadata. blockHeight := bc.blockTip().Height + 1 - if err = utxoView.IsValidStakeMetadata(transactorPublicKey, metadata, blockHeight); err != nil { + if err := utxoView.IsValidStakeMetadata(transactorPublicKey, metadata, blockHeight); err != nil { return nil, 0, 0, 0, errors.Wrapf( err, "Blockchain.CreateStakeTxn: invalid txn metadata: ", ) @@ -1177,13 +1173,9 @@ func (bc *Blockchain) CreateUnstakeTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrap( - err, "Blockchain.CreateUnstakeTxn: problem creating new utxo view: ", - ) - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if !isInterfaceValueNil(mempool) { + var err error utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( @@ -1193,7 +1185,7 @@ func (bc *Blockchain) CreateUnstakeTxn( } // Validate txn metadata. - if err = utxoView.IsValidUnstakeMetadata(transactorPublicKey, metadata); err != nil { + if err := utxoView.IsValidUnstakeMetadata(transactorPublicKey, metadata); err != nil { return nil, 0, 0, 0, errors.Wrapf( err, "Blockchain.CreateUnstakeTxn: invalid txn metadata: ", ) @@ -1253,13 +1245,9 @@ func (bc *Blockchain) CreateUnlockStakeTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrap( - err, "Blockchain.CreateUnlockStakeTxn: problem creating new utxo view: ", - ) - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if !isInterfaceValueNil(mempool) { + var err error utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( @@ -1269,7 +1257,7 @@ func (bc *Blockchain) CreateUnlockStakeTxn( } // Validate txn metadata. - if err = utxoView.IsValidUnlockStakeMetadata(transactorPublicKey, metadata); err != nil { + if err := utxoView.IsValidUnlockStakeMetadata(transactorPublicKey, metadata); err != nil { return nil, 0, 0, 0, errors.Wrapf( err, "Blockchain.CreateUnlockStakeTxn: invalid txn metadata: ", ) diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index 7bde74817..d36e0762a 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -76,8 +76,7 @@ func _testStaking(t *testing.T, flushToDB bool) { } // Seed a CurrentEpochEntry. - epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + epochUtxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) currentEpochNumber, err := utxoView().GetCurrentEpochNumber() @@ -820,8 +819,7 @@ func TestStakingWithDerivedKey(t *testing.T) { senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) return utxoView } @@ -1763,8 +1761,7 @@ func TestGetLockedStakeEntriesInRange(t *testing.T) { // Initialize test chain and UtxoView. chain, params, db := NewLowDifficultyBlockchain(t) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) blockHeight := uint64(chain.blockTip().Height + 1) m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID @@ -1780,6 +1777,7 @@ func TestGetLockedStakeEntriesInRange(t *testing.T) { require.NoError(t, utxoView.FlushToDb(blockHeight)) // Verify LockedStakeEntry is in the db. + var err error lockedStakeEntry, err = DBGetLockedStakeEntry(db, chain.snapshot, m0PKID, m0PKID, 1) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) @@ -1913,8 +1911,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) return utxoView } @@ -2097,8 +2094,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { require.NoError(t, err) // Jail the validator. - tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + tmpUtxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 372447b19..2b16849b1 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -720,8 +720,7 @@ func (tes *transactionTestSuite) testDisconnectBlock(tm *transactionTestMeta, te // Disconnect the block on a dummy UtxoView using DisconnectBlock to run all sanity-checks on the block. { - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, nil, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, nil, nil) txHashes, err := ComputeTransactionHashes(lastBlock.Txns) require.NoError(err) err = utxoView.DisconnectBlock(lastBlock, txHashes, utxoOps, blockHeight) @@ -729,8 +728,7 @@ func (tes *transactionTestSuite) testDisconnectBlock(tm *transactionTestMeta, te } // Disconnect the block transaction by transaction using DisconnectTransaction. - utxoView, err := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(tm.db, tm.params, tm.pg, tm.chain.snapshot, nil) for ii := len(lastBlock.Txns) - 1; ii >= 0; ii-- { currentTxn := lastBlock.Txns[ii] txnHash := currentTxn.Hash() @@ -883,8 +881,7 @@ func _doBasicTransferWithViewFlush(t *testing.T, chain *Blockchain, db *badger.D txn := _assembleBasicTransferTxnFullySigned( t, chain, amountNanos, feeRateNanosPerKB, pkSenderStr, pkReceiverStr, privStr, nil) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) // Always use height+1 for validation since it's assumed the transaction will // get mined into the next block. @@ -991,9 +988,7 @@ func _updateGlobalParamsEntryWithMempool(t *testing.T, chain *Blockchain, db *ba // Sign the transaction now that its inputs are set up. _signTxn(t, txn, updaterPrivBase58Check) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - - require.NoError(err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) txHash := txn.Hash() // Always use height+1 for validation since it's assumed the transaction will @@ -1162,11 +1157,10 @@ func _rollBackTestMetaTxnsAndFlush(testMeta *TestMeta) { fmt.Printf( "Disconnecting transaction with type %v index %d (going backwards)\n", currentTxn.TxnMeta.GetTxnType(), backwardIter) - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) - require.NoError(testMeta.t, err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) currentHash := currentTxn.Hash() - err = utxoView.DisconnectTransaction(currentTxn, currentHash, currentOps, testMeta.savedHeight) + err := utxoView.DisconnectTransaction(currentTxn, currentHash, currentOps, testMeta.savedHeight) require.NoError(testMeta.t, err) blockHeight := uint64(testMeta.chain.BlockTip().Height) @@ -1200,8 +1194,7 @@ func _applyTestMetaTxnsToMempool(testMeta *TestMeta) { func _applyTestMetaTxnsToViewAndFlush(testMeta *TestMeta) { // Apply all the transactions to a view and flush the view to the db. - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) - require.NoError(testMeta.t, err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) for ii, txn := range testMeta.txns { fmt.Printf("Adding txn %v of type %v to UtxoView\n", ii, txn.TxnMeta.GetTxnType()) @@ -1209,7 +1202,7 @@ func _applyTestMetaTxnsToViewAndFlush(testMeta *TestMeta) { // get mined into the next block. txHash := txn.Hash() blockHeight := testMeta.chain.blockTip().Height + 1 - _, _, _, _, err = + _, _, _, _, err := utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.NoError(testMeta.t, err) } @@ -1221,8 +1214,7 @@ func _applyTestMetaTxnsToViewAndFlush(testMeta *TestMeta) { func _disconnectTestMetaTxnsFromViewAndFlush(testMeta *TestMeta) { // Disonnect the transactions from a single view in the same way as above // i.e. without flushing each time. - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) - require.NoError(testMeta.t, err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) for ii := 0; ii < len(testMeta.txnOps); ii++ { backwardIter := len(testMeta.txnOps) - 1 - ii fmt.Printf("Disconnecting transaction with index %d (going backwards)\n", backwardIter) @@ -1230,7 +1222,7 @@ func _disconnectTestMetaTxnsFromViewAndFlush(testMeta *TestMeta) { currentTxn := testMeta.txns[backwardIter] currentHash := currentTxn.Hash() - err = utxoView.DisconnectTransaction(currentTxn, currentHash, currentOps, testMeta.savedHeight) + err := utxoView.DisconnectTransaction(currentTxn, currentHash, currentOps, testMeta.savedHeight) require.NoError(testMeta.t, err) } blockHeight := uint64(testMeta.chain.BlockTip().Height) @@ -1246,8 +1238,7 @@ func _connectBlockThenDisconnectBlockAndFlush(testMeta *TestMeta) { // Roll back the block and make sure we don't hit any errors. { - utxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) - require.NoError(testMeta.t, err) + utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) // Fetch the utxo operations for the block we're detaching. We need these // in order to be able to detach the block. @@ -1341,8 +1332,7 @@ func TestUpdateGlobalParams(t *testing.T) { false) require.NoError(err) - utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 utxoOps, totalInput, totalOutput, fees, err := utxoView.ConnectTransaction(updateGlobalParamsTxn, updateGlobalParamsTxn.Hash(), blockHeight, 0, true, false) @@ -1401,8 +1391,7 @@ func TestUpdateGlobalParams(t *testing.T) { require.Equal(DbGetGlobalParamsEntry(db, chain.snapshot), expectedGlobalParams) // Now let's do a disconnect and make sure the values reflect the previous entry. - utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) - require.NoError(err) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) blockHeight := chain.blockTip().Height + 1 utxoView.DisconnectTransaction( updateGlobalParamsTxn, updateGlobalParamsTxn.Hash(), utxoOps, blockHeight) @@ -1467,8 +1456,7 @@ func TestUpdateGlobalParams(t *testing.T) { require.Contains(err.Error(), TxErrorNonceExpired) // Now let's do a disconnect and make sure the values reflect the previous entry. - utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, nil) blockHeight := chain.blockTip().Height + 1 utxoView.DisconnectTransaction( updateGlobalParamsTxn, updateGlobalParamsTxn.Hash(), utxoOps, blockHeight) @@ -1574,8 +1562,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { true, mempool) require.NoError(err) - utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, nil) require.Equal(utxoView.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS, uint64(5000)) } // SoftMaxBlockSizeBytesPoS tests. @@ -1613,8 +1600,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { mempool) require.ErrorIs(err, RuleErrorSoftMaxBlockSizeBytesTooHigh) // Make sure setting soft max block size to a value greater than max block size fails. - utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, nil) _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, @@ -1646,8 +1632,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { true, mempool) require.NoError(err) - utxoView, err = NewUtxoView(db, params, postgres, chain.snapshot, nil) - require.NoError(err) + utxoView = NewUtxoView(db, params, postgres, chain.snapshot, nil) require.Equal(utxoView.GetCurrentGlobalParamsEntry().SoftMaxBlockSizeBytesPoS, uint64(4000)) } // MaxTxnSizeBytesPoS tests. @@ -1685,8 +1670,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { mempool) require.ErrorIs(err, RuleErrorMaxTxnSizeBytesTooHigh) // Make sure setting max txn size to a value greater than max block size fails. - utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, nil) _, _, _, err = _updateGlobalParamsEntryWithMempool(t, chain, db, params, 1000, moneyPkString, moneyPrivString, @@ -1718,15 +1702,13 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { true, mempool) require.NoError(err) - utxoView, err = NewUtxoView(db, params, postgres, chain.snapshot, nil) - require.NoError(err) + utxoView = NewUtxoView(db, params, postgres, chain.snapshot, nil) require.Equal(utxoView.GetCurrentGlobalParamsEntry().MaxTxnSizeBytesPoS, uint64(4000)) } // MinFeeBucket size tests. { - utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, nil) require.Equal(utxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB, uint64(1000)) require.Equal(utxoView.GetCurrentGlobalParamsEntry().FeeBucketGrowthRateBasisPoints, uint64(1000)) // Make sure setting min fee bucket size too low fails. 1% of 1000 is 10, so anything less than that @@ -1813,8 +1795,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { true, mempool) require.NoError(err) - utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, nil) require.Equal(utxoView.GetCurrentGlobalParamsEntry().BlockProductionIntervalMillisecondsPoS, uint64(5000)) } { @@ -1867,8 +1848,7 @@ func TestUpdateGlobalParamsPoS(t *testing.T) { true, mempool) require.NoError(err) - utxoView, err := NewUtxoView(db, params, postgres, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, nil) require.Equal(utxoView.GetCurrentGlobalParamsEntry().TimeoutIntervalMillisecondsPoS, uint64(5000)) } } @@ -1929,7 +1909,7 @@ func TestBasicTransfer(t *testing.T) { txn.PublicKey = recipientPkBytes _signTxn(t, txn, recipientPrivString) - utxoView, _ := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = @@ -1958,7 +1938,7 @@ func TestBasicTransfer(t *testing.T) { } blockHeight := chain.blockTip().Height + 1 - utxoView, _ := NewUtxoView(db, params, postgres, chain.snapshot, nil) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, nil) if blockHeight < params.ForkHeights.BalanceModelBlockHeight { totalInput, spendAmount, changeAmount, fees, err := chain.AddInputsAndChangeToTransaction(txn, 10, nil) @@ -1997,7 +1977,7 @@ func TestBasicTransfer(t *testing.T) { }, } _signTxn(t, txn, senderPrivString) - utxoView, _ := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = @@ -2031,7 +2011,7 @@ func TestBasicTransfer(t *testing.T) { require.Greater(totalInput, uint64(0)) _signTxn(t, txn, senderPrivString) - utxoView, _ := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) txHash := txn.Hash() blockHeight := chain.blockTip().Height + 1 _, _, _, _, err = @@ -2060,7 +2040,7 @@ func TestBasicTransfer(t *testing.T) { txHashes, err := ComputeTransactionHashes(blockToMine.Txns) require.NoError(err) - utxoView, _ := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) + utxoView := NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) _, err = utxoView.ConnectBlock(blockToMine, txHashes, true /*verifySignatures*/, nil, 0) require.Error(err) require.Contains(err.Error(), RuleErrorBlockRewardExceedsMaxAllowed) @@ -2069,7 +2049,7 @@ func TestBasicTransfer(t *testing.T) { // A block with less than the max block reward should be OK. { - utxoView, _ := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) minerBalanceBefore, _ := utxoView.GetDeSoBalanceNanosForPublicKey(senderPkBytes) blockToMine.Txns[0].TxOutputs[0].AmountNanos = allowedBlockReward - 1 @@ -2080,7 +2060,7 @@ func TestBasicTransfer(t *testing.T) { txHashes, err := ComputeTransactionHashes(blockToMine.Txns) require.NoError(err) - utxoView, _ = NewUtxoView(db, params, postgres, chain.snapshot, nil) + utxoView = NewUtxoView(db, params, postgres, chain.snapshot, nil) _, err = utxoView.ConnectBlock(blockToMine, txHashes, true /*verifySignatures*/, nil, 0) require.NoError(err) @@ -2121,7 +2101,7 @@ func TestBasicTransfer(t *testing.T) { require.Greater(totalInput, uint64(0)) _signTxn(t, txn, senderPrivString) - utxoView, _ := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) txHash := txn.Hash() _, _, _, _, err = utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) @@ -2556,7 +2536,7 @@ func TestBlockRewardPatch(t *testing.T) { txHashes, err := ComputeTransactionHashes(blkToMine.Txns) require.NoError(t, err) blkToMine.Header.Nonce = bestNonce - utxoView, _ := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) _, err = utxoView.ConnectBlock(blkToMine, txHashes, true, nil, uint64(chain.blockTip().Height+1)) require.Error(t, err) require.Contains(t, err.Error(), RuleErrorBlockRewardTxnMustHaveOneOutput) @@ -2590,8 +2570,7 @@ func TestBlockRewardPatch(t *testing.T) { chain.AddInputsAndChangeToTransaction(txn, testMeta.feeRateNanosPerKb, nil) require.NoError(t, err) _signTxn(t, txn, senderPrivString) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) _, _, _, fees, err := utxoView._connectTransaction(txn, txn.Hash(), chain.blockTip().Height+1, 0, true, false) require.NoError(t, err) blkToMine, _, _, err := miner._getBlockToMine(0) @@ -2605,13 +2584,11 @@ func TestBlockRewardPatch(t *testing.T) { txHashes, err := ComputeTransactionHashes(blkToMine.Txns) require.NoError(t, err) blkToMine.Header.Nonce = bestNonce - utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView = NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) _, err = utxoView.ConnectBlock(blkToMine, txHashes, true, nil, uint64(chain.blockTip().Height+1)) require.Contains(t, err.Error(), RuleErrorBlockRewardExceedsMaxAllowed) - utxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView = NewUtxoView(db, params, chain.postgres, chain.snapshot, nil) // Reduce fees and try again, should succeed. blkToMine.Txns[0].TxOutputs[0].AmountNanos -= fees _, err = utxoView.ConnectBlock(blkToMine, txHashes, true, nil, uint64(chain.blockTip().Height+1)) diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index d55b43160..4a27dc019 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -351,8 +351,7 @@ func TestUtxoEntryEncodeDecode(t *testing.T) { // in the middle. utxoOpsList := [][]*UtxoOperation{} { - utxoView, err := NewUtxoView(db, paramsCopy, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, paramsCopy, nil, chain.snapshot, nil) // Add a placeholder where the rate update is going to be fff := append([]*MsgDeSoTxn{}, bitcoinExchangeTxns[:rateUpdateIndex]...) @@ -396,6 +395,7 @@ func TestUtxoEntryEncodeDecode(t *testing.T) { utxoOpsList = append(utxoOpsList, utxoOps) } utxoEntries, err := chain.GetSpendableUtxosForPublicKey(pkBytes1, nil, utxoView) + require.NoError(err) for _, entry := range utxoEntries { entryBytes := EncodeToBytes(0, entry) newEntry := &UtxoEntry{} diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 6ee17feaf..238ecb255 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1004,13 +1004,9 @@ func (bc *Blockchain) CreateRegisterAsValidatorTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrap( - err, "Blockchain.CreateRegisterAsValidatorTxn: problem creating new utxo view: ", - ) - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if !isInterfaceValueNil(mempool) { + var err error utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( @@ -1021,7 +1017,7 @@ func (bc *Blockchain) CreateRegisterAsValidatorTxn( // Validate txn metadata. blockHeight := uint64(bc.blockTip().Height) + 1 - if err = utxoView.IsValidRegisterAsValidatorMetadata(transactorPublicKey, metadata, blockHeight); err != nil { + if err := utxoView.IsValidRegisterAsValidatorMetadata(transactorPublicKey, metadata, blockHeight); err != nil { return nil, 0, 0, 0, errors.Wrapf( err, "Blockchain.CreateRegisterAsValidatorTxn: invalid txn metadata: ", ) @@ -1081,13 +1077,9 @@ func (bc *Blockchain) CreateUnregisterAsValidatorTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrap( - err, "Blockchain.CreateUnregisterAsValidatorTxn: problem creating new utxo view: ", - ) - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if !isInterfaceValueNil(mempool) { + var err error utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( @@ -1097,7 +1089,7 @@ func (bc *Blockchain) CreateUnregisterAsValidatorTxn( } // Validate txn metadata. - if err = utxoView.IsValidUnregisterAsValidatorMetadata(transactorPublicKey); err != nil { + if err := utxoView.IsValidUnregisterAsValidatorMetadata(transactorPublicKey); err != nil { return nil, 0, 0, 0, errors.Wrapf( err, "Blockchain.CreateUnregisterAsValidatorTxn: invalid txn metadata: ", ) @@ -1157,13 +1149,9 @@ func (bc *Blockchain) CreateUnjailValidatorTxn( // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrap( - err, "Blockchain.CreateUnjailValidatorTxn: problem creating new utxo view: ", - ) - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if !isInterfaceValueNil(mempool) { + var err error utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { return nil, 0, 0, 0, errors.Wrapf( @@ -1173,7 +1161,7 @@ func (bc *Blockchain) CreateUnjailValidatorTxn( } // Validate txn metadata. - if err = utxoView.IsValidUnjailValidatorMetadata(transactorPublicKey); err != nil { + if err := utxoView.IsValidUnjailValidatorMetadata(transactorPublicKey); err != nil { return nil, 0, 0, 0, errors.Wrapf( err, "Blockchain.CreateUnjailValidatorTxn: invalid txn metadata: ", ) diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 517302916..979766d53 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -489,8 +489,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) return utxoView } @@ -626,8 +625,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.NoError(t, err) // Validate the ValidatorEntry exists. - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) validatorEntry, err := utxoView.GetValidatorByPKID(senderPKID) require.NoError(t, err) require.NotNil(t, validatorEntry) @@ -664,8 +662,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.NoError(t, err) // Validate the ValidatorEntry no longer exists. - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) validatorEntry, err := utxoView.GetValidatorByPKID(senderPKID) require.NoError(t, err) require.Nil(t, validatorEntry) @@ -959,8 +956,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { // Initialize test chain and UtxoView. chain, params, db := NewLowDifficultyBlockchain(t) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) blockHeight := uint64(chain.blockTip().Height + 1) // m0 will be stored in the db with Stake=100. @@ -988,6 +984,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.NoError(t, utxoView.FlushToDb(blockHeight)) // Verify m0 is stored in the db. + var err error validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m0PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) @@ -1559,8 +1556,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID // Seed a CurrentEpochEntry. - epochUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + epochUtxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) epochUtxoView._setCurrentEpochEntry(&EpochEntry{EpochNumber: 1, FinalBlockHeight: blockHeight + 10}) require.NoError(t, epochUtxoView.FlushToDb(blockHeight)) currentEpochNumber, err := utxoView().GetCurrentEpochNumber() @@ -1606,8 +1602,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // trying to unjail himself, but he was never jailed. // Jail m0. - tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + tmpUtxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) @@ -1671,8 +1666,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { mempool.readOnlyUtxoView.CurrentEpochEntry = nil // Store a new CurrentEpochEntry in the db. - epochUtxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + epochUtxoView = NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) epochUtxoView._setCurrentEpochEntry( &EpochEntry{EpochNumber: currentEpochNumber + 3, FinalBlockHeight: blockHeight + 10}, ) @@ -1767,8 +1761,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) return utxoView } @@ -1892,8 +1885,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // trying to unjail himself, but he was never jailed. // Jail the sender. - tmpUtxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + tmpUtxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) require.NoError(t, tmpUtxoView.JailValidator(validatorEntry)) require.NoError(t, tmpUtxoView.FlushToDb(blockHeight)) @@ -1926,8 +1918,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { mempool.readOnlyUtxoView.CurrentEpochEntry = nil // Store a new CurrentEpochEntry in the db. - epochUtxoView, err = NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + epochUtxoView = NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) epochUtxoView._setCurrentEpochEntry( &EpochEntry{EpochNumber: currentEpochNumber + 3, FinalBlockHeight: blockHeight + 10}, ) diff --git a/lib/blockchain.go b/lib/blockchain.go index 262624f9a..e1c80ce83 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -2463,12 +2463,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // almost certainly be more efficient than doing a separate db call for each input // and output. if bc.blockView == nil { - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return false, false, errors.Wrapf(err, "ProcessBlock: Problem initializing UtxoView in simple connect to tip") - } - - bc.blockView = utxoView + bc.blockView = NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) } // Preload the view with almost all of the data it will need to connect the block @@ -2664,7 +2659,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // the txns to account for txns that spend previous txns in the block, but it would // almost certainly be more efficient than doing a separate db call for each input // and output - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if err != nil { return false, false, errors.Wrapf(err, "processblock: Problem initializing UtxoView in reorg") } @@ -2940,11 +2935,10 @@ func (bc *Blockchain) ValidateTransaction( // Create a new UtxoView. If we have access to a mempool object, use it to // get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return errors.Wrapf(err, "ValidateTransaction: Problem Problem creating new utxo view: ") - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + if !isInterfaceValueNil(mempool) { + var err error utxoView, err = mempool.GetAugmentedUtxoViewForPublicKey(txnMsg.PublicKey, txnMsg) if err != nil { return errors.Wrapf(err, "ValidateTransaction: Problem getting augmented UtxoView from mempool: ") @@ -2954,7 +2948,7 @@ func (bc *Blockchain) ValidateTransaction( // Hash the transaction. txHash := txnMsg.Hash() // We don't care about the utxoOps or the fee it returns. - _, _, _, _, err = utxoView._connectTransaction( + _, _, _, _, err := utxoView._connectTransaction( txnMsg, txHash, blockHeight, time.Now().UnixNano(), verifySignatures, false, ) if err != nil { @@ -3041,16 +3035,14 @@ func ComputeMerkleRoot(txns []*MsgDeSoTxn) (_merkle *BlockHash, _txHashes []*Blo func (bc *Blockchain) GetSpendableUtxosForPublicKey(spendPublicKeyBytes []byte, mempool Mempool, referenceUtxoView *UtxoView) ([]*UtxoEntry, error) { // If we have access to a mempool, use it to account for utxos we might not // get otherwise. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, errors.Wrapf(err, "Blockchain.GetSpendableUtxosForPublicKey: Problem initializing UtxoView: ") - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) // Use the reference UtxoView if provided. Otherwise try to get one from the mempool. // This improves efficiency when we have a UtxoView already handy. if referenceUtxoView != nil { utxoView = referenceUtxoView } else { if !isInterfaceValueNil(mempool) { + var err error utxoView, err = mempool.GetAugmentedUtxoViewForPublicKey(spendPublicKeyBytes, nil) if err != nil { return nil, errors.Wrapf(err, "Blockchain.GetSpendableUtxosForPublicKey: Problem getting augmented UtxoView from mempool: ") @@ -3823,11 +3815,8 @@ func (bc *Blockchain) CreateDAOCoinLimitOrderTxn( // Create a new UtxoView. If we have access to a mempool object, use it to // get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrapf(err, - "Blockchain.CreateDAOCoinLimitOrderTxn: Problem creating new utxo view: ") - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + var err error if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { @@ -4162,7 +4151,7 @@ func (bc *Blockchain) CreateNFTBidTxn( "CreateNFTBidTxn: Problem getting augmented universal view: ") } } else { - utxoView, err = NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + utxoView = NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "CreateNFTBidTxn: Problem creating new utxo view: ") @@ -4333,11 +4322,8 @@ func (bc *Blockchain) CreateAcceptNFTBidTxn( // Create a new UtxoView. If we have access to a mempool object, use it to // get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrapf(err, - "Blockchain.CreateAcceptNFTBidTxn: Problem creating new utxo view: ") - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + var err error if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { @@ -4645,12 +4631,8 @@ func (bc *Blockchain) CreateCreatorCoinTransferTxnWithDiamonds( // Create a new UtxoView. If we have access to a mempool object, use it to // get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrapf(err, - "Blockchain.CreateCreatorCoinTransferTxnWithDiamonds: "+ - "Problem creating new utxo view: ") - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + var err error if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { @@ -4873,12 +4855,8 @@ func (bc *Blockchain) CreateBasicTransferTxnWithDiamonds( // Create a new UtxoView. If we have access to a mempool object, use it to // get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, 0, errors.Wrapf(err, - "Blockchain.CreateBasicTransferTxnWithDiamonds: "+ - "Problem creating new utxo view: ") - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + var err error if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { @@ -4985,7 +4963,7 @@ func (bc *Blockchain) CreateMaxSpend( "Blockchain.CreateMaxSpend: Problem getting augmented UtxoView from mempool: ") } } else { - utxoView, err = NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + utxoView = NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "Blockchain.CreateMaxSpend: Problem getting UtxoView: ") @@ -5132,11 +5110,8 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( txArg.TxnVersion = 1 - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return 0, 0, 0, 0, errors.Wrapf(err, - "AddInputsAndChangeToTransaction: Problem getting UtxoView: ") - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + var err error txArg.TxnNonce, err = utxoView.ConstructNonceForPublicKey(txArg.PublicKey, uint64(blockHeight)) if err != nil { return 0, 0, 0, 0, errors.Wrapf(err, @@ -5339,10 +5314,7 @@ func (bc *Blockchain) EstimateDefaultFeeRateNanosPerKB( // If the block is more than X% full, use the maximum between the min // fee rate and the median fees of all the transactions in the block. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return minFeeRateNanosPerKB - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) utxoOps, err := GetUtxoOperationsForBlock(bc.db, bc.snapshot, tipNode.Hash) if err != nil { return minFeeRateNanosPerKB @@ -5525,12 +5497,8 @@ func (bc *Blockchain) _createAssociationTxn( ) { // Create a new UtxoView. If we have access to a mempool object, use // it to get an augmented view that factors in pending transactions. - utxoView, err := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, fmt.Errorf( - "%s: problem creating new utxo view: %v", callingFuncName, err, - ) - } + utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) + var err error if !isInterfaceValueNil(mempool) { utxoView, err = mempool.GetAugmentedUniversalView() if err != nil { diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index c55e40d1b..67b63d9f8 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -379,8 +379,7 @@ func _getBalance(t *testing.T, chain *Blockchain, mempool *DeSoMempool, pkStr st utxoView, err = mempool.GetAugmentedUniversalView() require.NoError(t, err) } else { - utxoView, err = NewUtxoView(chain.db, chain.params, chain.postgres, chain.snapshot, nil) - require.NoError(t, err) + utxoView = NewUtxoView(chain.db, chain.params, chain.postgres, chain.snapshot, nil) } balanceNanos, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( @@ -414,7 +413,7 @@ func _getCreatorCoinInfo(t *testing.T, chain *Blockchain, params *DeSoParams, pk pkBytes, _, err := Base58CheckDecode(pkStr) require.NoError(t, err) - utxoView, _ := NewUtxoView(chain.db, params, nil, chain.snapshot, chain.eventManager) + utxoView := NewUtxoView(chain.db, params, nil, chain.snapshot, chain.eventManager) // Profile fields creatorProfile := utxoView.GetProfileEntryForPublicKey(pkBytes) diff --git a/lib/db_utils.go b/lib/db_utils.go index e85563681..698727011 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -5339,11 +5339,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, // think things are initialized because we set the best block hash at the // top. We should fix this at some point so that an error in this step // wipes out the best hash. - utxoView, err := NewUtxoView(handle, params, postgres, snap, eventManager) - if err != nil { - return fmt.Errorf( - "InitDbWithDeSoGenesisBlock: Error initializing UtxoView") - } + utxoView := NewUtxoView(handle, params, postgres, snap, eventManager) // Add the seed balances to the view. for index, txOutput := range params.SeedBalances { @@ -5361,7 +5357,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, UtxoKey: &outputKey, } - if _, err = utxoView._addDESO(txOutput.AmountNanos, txOutput.PublicKey, &utxoEntry, 0); err != nil { + if _, err := utxoView._addDESO(txOutput.AmountNanos, txOutput.PublicKey, &utxoEntry, 0); err != nil { return fmt.Errorf("InitDbWithDeSoGenesisBlock: Error adding "+ "seed balance at index %v ; output: %v: %v", index, txOutput, err) } @@ -5409,7 +5405,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, }) } // Flush all the data in the view. - err = utxoView.FlushToDb(0) + err := utxoView.FlushToDb(0) if err != nil { return fmt.Errorf( "InitDbWithDeSoGenesisBlock: Error flushing seed txns to DB: %v", err) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index e240048aa..1c304d550 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -979,11 +979,7 @@ func (mp *DeSoMempool) _quickCheckBitcoinExchangeTxn( // Note that it is safe to use this because we expect that the blockchain // lock is held for the duration of this function call so there shouldn't // be any shifting of the db happening beneath our fee. - utxoView, err := NewUtxoView(mp.bc.db, mp.bc.params, mp.bc.postgres, mp.bc.snapshot, mp.bc.eventManager) - if err != nil { - return 0, errors.Wrapf(err, - "_helpConnectDepsAndFinalTxn: Problem initializing UtxoView") - } + utxoView := NewUtxoView(mp.bc.db, mp.bc.params, mp.bc.postgres, mp.bc.snapshot, mp.bc.eventManager) // Connnect all of this transaction's dependencies to the UtxoView in order. Note // that we can do this because _findMempoolDependencies returns the transactions in @@ -2740,9 +2736,9 @@ func NewDeSoMempool(_bc *Blockchain, _rateLimitFeerateNanosPerKB uint64, _minFeerateNanosPerKB uint64, _blockCypherAPIKey string, _runReadOnlyViewUpdater bool, _dataDir string, _mempoolDumpDir string, useDefaultBadgerOptions bool) *DeSoMempool { - utxoView, _ := NewUtxoView(_bc.db, _bc.params, _bc.postgres, _bc.snapshot, _bc.eventManager) - backupUtxoView, _ := NewUtxoView(_bc.db, _bc.params, _bc.postgres, _bc.snapshot, _bc.eventManager) - readOnlyUtxoView, _ := NewUtxoView(_bc.db, _bc.params, _bc.postgres, _bc.snapshot, _bc.eventManager) + utxoView := NewUtxoView(_bc.db, _bc.params, _bc.postgres, _bc.snapshot, _bc.eventManager) + backupUtxoView := NewUtxoView(_bc.db, _bc.params, _bc.postgres, _bc.snapshot, _bc.eventManager) + readOnlyUtxoView := NewUtxoView(_bc.db, _bc.params, _bc.postgres, _bc.snapshot, _bc.eventManager) newPool := &DeSoMempool{ quit: make(chan struct{}), bc: _bc, diff --git a/lib/load_test.go b/lib/load_test.go index 8c8f95f8d..2092ad91c 100644 --- a/lib/load_test.go +++ b/lib/load_test.go @@ -129,8 +129,7 @@ func TestComputeMaxTPS(t *testing.T) { require.NoError(err) pprof.StartCPUProfile(ff) - utxoView, err := NewUtxoView(db, params, nil, chain.snapshot, nil) - require.NoError(err) + utxoView := NewUtxoView(db, params, nil, chain.snapshot, nil) timeStart := time.Now() for _, tx := range txns { diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 95f5851f4..34007227b 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -27,8 +27,7 @@ func TestCreateBlockTemplate(t *testing.T) { params, db := _posTestBlockchainSetupWithBalances(t, 200000, 200000) params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(err) + latestBlockView := NewUtxoView(db, params, nil, nil, nil) dir := _dbDirSetup(t) mempool := NewPosMempool() @@ -103,8 +102,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { maxMempoolPosSizeBytes := uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(err) + latestBlockView := NewUtxoView(db, params, nil, nil, nil) dir := _dbDirSetup(t) mempool := NewPosMempool() @@ -198,8 +196,7 @@ func TestGetBlockTransactions(t *testing.T) { m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(err) + latestBlockView := NewUtxoView(db, params, nil, nil, nil) dir := _dbDirSetup(t) mempool := NewPosMempool() diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 495b31076..4fa791439 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1800,7 +1800,7 @@ func (bc *Blockchain) GetUncommittedBlocks(tipHash *BlockHash) ([]*BlockNode, er // GetCommittedTipView builds a UtxoView to the committed tip. func (bc *Blockchain) GetCommittedTipView() (*UtxoView, error) { - return NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, nil, bc.snapshotCache) + return NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, nil, bc.snapshotCache), nil } // BlockViewAndUtxoOps is a struct that contains a UtxoView and the UtxoOperations @@ -1908,11 +1908,8 @@ func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( return viewAndUtxoOpsCopy, nil } // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks - utxoView, err := NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager, + utxoView := NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager, bc.snapshotCache) - if err != nil { - return nil, errors.Wrapf(err, "getUtxoViewAndUtxoOpsAtBlockHash: Problem initializing UtxoView") - } // TODO: there's another performance enhancement we can make here. If we have a view in the // cache for one of the ancestors, we can skip fetching the block and connecting it by taking // a copy of it and replacing the existing view. diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index b018dbf5c..38b90ad73 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2839,8 +2839,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { require.NoError(t, err) oldPool.Stop() miner.Stop() - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(t, err) + latestBlockView := NewUtxoView(db, params, nil, nil, nil) maxMempoolPosSizeBytes := uint64(1024 * 1024 * 1000) mempoolBackupIntervalMillis := uint64(30000) diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 4150b4244..42b56012c 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -25,10 +25,10 @@ func TestIsLastBlockInCurrentEpoch(t *testing.T) { GlobalDeSoParams.EncoderMigrationHeights = GetEncoderMigrationHeights(¶ms.ForkHeights) GlobalDeSoParams.EncoderMigrationHeightsList = GetEncoderMigrationHeightsList(¶ms.ForkHeights) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) // The BlockHeight is before the PoS snapshotting fork height. + var err error isLastBlockInCurrentEpoch, err = utxoView.IsLastBlockInCurrentEpoch(0) require.NoError(t, err) require.False(t, isLastBlockInCurrentEpoch) @@ -928,8 +928,7 @@ func _stakeToValidator( } func _newUtxoView(testMeta *TestMeta) *UtxoView { - newUtxoView, err := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) - require.NoError(testMeta.t, err) + newUtxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, testMeta.chain.eventManager) return newUtxoView } diff --git a/lib/pos_epoch_test.go b/lib/pos_epoch_test.go index a8e6afc8a..817e82420 100644 --- a/lib/pos_epoch_test.go +++ b/lib/pos_epoch_test.go @@ -14,8 +14,7 @@ func TestCurrentEpoch(t *testing.T) { chain, params, db := NewLowDifficultyBlockchain(t) blockHeight := uint64(chain.blockTip().Height) + 1 blockTimestampNanoSecs := chain.blockTip().Header.TstampNanoSecs + 1e9 - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) // Test that the CurrentEpoch is nil in the db. epochEntry, err = DBGetCurrentEpochEntry(db, utxoView.Snapshot) diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index b61e93580..12c37799f 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -19,12 +19,11 @@ func TestFeeEstimator(t *testing.T) { m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(t, err) + latestBlockView := NewUtxoView(db, params, nil, nil, nil) dir := _dbDirSetup(t) mempool := NewPosMempool() - err = mempool.Init( + err := mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100, ) diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 52ea92d07..f0dae661d 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -60,8 +60,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { // Helper utils newUtxoView := func() *UtxoView { - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) return utxoView } diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 6b0c9eb6d..1d7b38ed8 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -47,8 +47,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { params, db := _posTestBlockchainSetup(t) m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(err) + latestBlockView := NewUtxoView(db, params, nil, nil, nil) dir := _dbDirSetup(t) mempool := NewPosMempool() @@ -102,8 +101,7 @@ func TestPosMempoolPrune(t *testing.T) { m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(err) + latestBlockView := NewUtxoView(db, params, nil, nil, nil) dir := _dbDirSetup(t) mempool := NewPosMempool() @@ -200,8 +198,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(err) + latestBlockView := NewUtxoView(db, params, nil, nil, nil) dir := _dbDirSetup(t) mempool := NewPosMempool() @@ -262,8 +259,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(err) + latestBlockView := NewUtxoView(db, params, nil, nil, nil) dir := _dbDirSetup(t) mempool := NewPosMempool() @@ -302,7 +298,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { _signTxn(t, txn2Low, m1Priv) added2Low := time.Now() mtxn2Low := NewMempoolTransaction(txn2Low, added2Low, false) - err = mempool.AddTransaction(mtxn2Low) + err := mempool.AddTransaction(mtxn2Low) require.Contains(err.Error(), MempoolFailedReplaceByHigherFee) // Now generate a proper new transaction for m1, with same nonce, and higher fee. @@ -341,8 +337,7 @@ func TestPosMempoolTransactionValidation(t *testing.T) { params, db := _posTestBlockchainSetup(t) m0PubBytes, _, _ := Base58CheckDecode(m0Pub) m1PubBytes, _, _ := Base58CheckDecode(m1Pub) - latestBlockView, err := NewUtxoView(db, params, nil, nil, nil) - require.NoError(t, err) + latestBlockView := NewUtxoView(db, params, nil, nil, nil) dir := _dbDirSetup(t) mempool := NewPosMempool() diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go index 61cb7b25a..3dcd8aa5e 100644 --- a/lib/pos_random_seed_test.go +++ b/lib/pos_random_seed_test.go @@ -10,8 +10,7 @@ import ( func TestCurrentRandomSeedHash(t *testing.T) { chain, params, db := NewLowDifficultyBlockchain(t) - utxoView, err := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - require.NoError(t, err) + utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) blockHeight := uint64(0) // Generate two BLS public + private key pairs. diff --git a/lib/pos_snapshot_cache.go b/lib/pos_snapshot_cache.go index d07470eec..f320e99b0 100644 --- a/lib/pos_snapshot_cache.go +++ b/lib/pos_snapshot_cache.go @@ -90,10 +90,7 @@ func (sc *SnapshotCache) GetSnapshotValidatorSetEntriesByStakeAtEpochNumber( return validatorEntries, nil } // If not found in the cache, create a new view and use it to fetch the validator entries. - tempView, err := NewUtxoView(handle, params, nil, snapshot, nil) - if err != nil { - return nil, errors.Wrap(err, "sc.GetSnapshotLeaderSchedule: Error creating new UtxoView: ") - } + tempView := NewUtxoView(handle, params, nil, snapshot, nil) // Get the snapshot global params so we know how many validators to fetch. snapshotGlobalParams, err := sc.GetSnapshotGlobalParams(snapshotAtEpochNumber, handle, snapshot, params) if err != nil { @@ -137,10 +134,8 @@ func (sc *SnapshotCache) GetSnapshotGlobalParams( return globalParams, nil } // If not found in the cache, create a new view and use it to fetch the global params. - tempView, err := NewUtxoView(handle, params, nil, snapshot, nil) - if err != nil { - return nil, errors.Wrap(err, "sc.GetSnapshotLeaderSchedule: Error creating new UtxoView: ") - } + tempView := NewUtxoView(handle, params, nil, snapshot, nil) + var err error globalParams, err = tempView.GetSnapshotGlobalParamsEntryByEpochNumber(snapshotAtEpoch) if err != nil { return nil, errors.Wrap(err, "sc.GetSnapshotGlobalParams: Error fetching global params: ") @@ -176,10 +171,7 @@ func (sc *SnapshotCache) GetSnapshotLeaderSchedule( } // Fetch the leader schedule for the snapshot epoch number - tempView, err := NewUtxoView(handle, params, nil, snapshot, nil) - if err != nil { - return nil, errors.Wrap(err, "sc.GetSnapshotLeaderSchedule: Error creating new UtxoView: ") - } + tempView := NewUtxoView(handle, params, nil, snapshot, nil) leaderPKIDs, err := tempView.GetSnapshotLeaderScheduleAtEpochNumber(snapshotAtEpoch) if err != nil { return nil, errors.Wrap(err, "sc.GetSnapshotLeaderSchedule: Error fetching leader schedule: ") diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 0bf81bc29..fa6ede201 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -672,7 +672,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser err = mempoolUtxoView.FlushToDbWithTxn(txn, uint64(server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Height)) - mempoolTxUtxoView, err := NewUtxoView(server.blockchain.db, server.blockchain.params, server.blockchain.postgres, nil, &mempoolEventManager) + mempoolTxUtxoView := NewUtxoView(server.blockchain.db, server.blockchain.params, server.blockchain.postgres, nil, &mempoolEventManager) if err != nil { return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer: CreateMempoolTxUtxoView: ") } diff --git a/lib/txindex.go b/lib/txindex.go index 9009e67dd..7a2ef725e 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -324,7 +324,7 @@ func (txi *TXIndex) Update() error { // Now that all the transactions have been deleted from our txindex, // it's safe to disconnect the block from our txindex chain. - utxoView, err := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) + utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) if err != nil { return fmt.Errorf( "Update: Error initializing UtxoView: %v", err) @@ -408,7 +408,7 @@ func (txi *TXIndex) Update() error { // us to extract custom metadata fields that we can show in our block explorer. // // Only set a BitcoinManager if we have one. This makes some tests pass. - utxoView, err := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) + utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) if err != nil { return fmt.Errorf( "Update: Error initializing UtxoView: %v", err) From 4925bb6dc51d2af96bc14991a8c0a172366e032e Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 11 Apr 2024 20:12:34 -0400 Subject: [PATCH 628/762] CopyUtxoView doesn't return an error (#1198) --- lib/block_producer.go | 5 +---- lib/block_view.go | 10 +++------ lib/block_view_profile_test.go | 3 +-- lib/legacy_mempool.go | 17 +++------------- lib/pos_block_producer.go | 10 ++------- lib/pos_block_producer_test.go | 6 ++---- lib/pos_blockchain.go | 34 +++++++++---------------------- lib/pos_consensus.go | 10 ++------- lib/pos_mempool.go | 37 +++++++--------------------------- lib/state_change_syncer.go | 5 +---- 10 files changed, 31 insertions(+), 106 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index b1bad6b74..62bbdda98 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -230,10 +230,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) // integrity of the view. // TODO: This is inefficient but we're doing it short-term to fix a bug. Also PoS is // coming soon anyway. - utxoViewCopy, err := utxoView.CopyUtxoView() - if err != nil { - return nil, nil, nil, errors.Wrapf(err, "Error copying UtxoView: ") - } + utxoViewCopy := utxoView.CopyUtxoView() _, _, _, _, err = utxoViewCopy._connectTransaction(mempoolTx.Tx, mempoolTx.Hash, uint32(blockRet.Header.Height), int64(blockRet.Header.TstampNanoSecs), true, false) if err != nil { diff --git a/lib/block_view.go b/lib/block_view.go index fe55b0373..23552e902 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -324,7 +324,7 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { bav.SnapshotStakesToReward = make(map[SnapshotStakeMapKey]*StakeEntry) } -func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { +func (bav *UtxoView) CopyUtxoView() *UtxoView { newView := NewUtxoView(bav.Handle, bav.Params, bav.Postgres, bav.Snapshot, bav.EventManager) // Copy the UtxoEntry data @@ -666,7 +666,7 @@ func (bav *UtxoView) CopyUtxoView() (*UtxoView, error) { newView.TipHash = bav.TipHash.NewBlockHash() - return newView, nil + return newView } func NewUtxoViewWithSnapshotCache( @@ -4207,11 +4207,7 @@ func (bav *UtxoView) _connectTransactionsFailSafe( // Connect the transactions in the order they are given. for ii, txn := range txns { // Create a copy of the view to connect the transactions to in the event we have a failing txn. - copiedView, err := bav.CopyUtxoView() - if err != nil { - return nil, nil, nil, nil, nil, - errors.Wrapf(err, "_connectTransactionsFailSafe: Problem copying UtxoView") - } + copiedView := bav.CopyUtxoView() // Connect the transaction to the copied view. utxoOpsForTxn, totalInput, totalOutput, fee, err := copiedView.ConnectTransaction( diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index aa4cf5d3e..c59e37cd5 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -3398,8 +3398,7 @@ func TestUpdateProfileChangeBack(t *testing.T) { require.NoError(err) // This ensure that the read-only version of the utxoView accurately reflects the current set of profile names taken. - utxoViewCopy, err := mempool.universalUtxoView.CopyUtxoView() - require.NoError(err) + utxoViewCopy := mempool.universalUtxoView.CopyUtxoView() _, _, _, _, err = utxoViewCopy.ConnectTransaction(txn, txn.Hash(), chain.blockTip().Height+1, 0, false, false) require.NoError(err) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 1c304d550..2b78f9f03 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -951,10 +951,7 @@ func (mp *DeSoMempool) GetAugmentedUniversalView() (*UtxoView, error) { if mp.stopped { return nil, fmt.Errorf("GetAugmentedUniversalView: Problem getting UtxoView, Mempool is closed") } - newView, err := mp.readOnlyUtxoView.CopyUtxoView() - if err != nil { - return nil, err - } + newView := mp.readOnlyUtxoView.CopyUtxoView() return newView, nil } @@ -1008,12 +1005,7 @@ func (mp *DeSoMempool) _quickCheckBitcoinExchangeTxn( func (mp *DeSoMempool) rebuildBackupView() { // We need to rebuild the backup view since the _connectTransaction broke it. - var copyErr error - mp.backupUniversalUtxoView, copyErr = mp.universalUtxoView.CopyUtxoView() - if copyErr != nil { - glog.Errorf("ERROR tryAcceptTransaction: Problem copying "+ - "view. This should NEVER happen: %v", copyErr) - } + mp.backupUniversalUtxoView = mp.universalUtxoView.CopyUtxoView() } // See TryAcceptTransaction. The write lock must be held when calling this function. @@ -2604,10 +2596,7 @@ func (mp *DeSoMempool) StartReadOnlyUtxoViewRegenerator() { } func (mp *DeSoMempool) regenerateReadOnlyView() error { - newView, err := mp.universalUtxoView.CopyUtxoView() - if err != nil { - return fmt.Errorf("Error generating readOnlyUtxoView: %v", err) - } + newView := mp.universalUtxoView.CopyUtxoView() // Update the view and bump the sequence number. This is how callers will // know that the view was updated. diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 70fbb0b07..a88247fd5 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -301,10 +301,7 @@ func (pbp *PosBlockProducer) getBlockTransactions( blocksTxns := []*MsgDeSoTxn{} maxUtilityFee := uint64(0) currentBlockSize := uint64(0) - blockUtxoView, err := latestBlockView.CopyUtxoView() - if err != nil { - return nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") - } + blockUtxoView := latestBlockView.CopyUtxoView() for _, txn := range feeTimeTxns { // If we've exceeded the soft max block size, we exit. We want to allow at least one txn that moves the // cumulative block size past the soft max, but don't want to add more txns beyond that. @@ -322,10 +319,7 @@ func (pbp *PosBlockProducer) getBlockTransactions( continue } - blockUtxoViewCopy, err := blockUtxoView.CopyUtxoView() - if err != nil { - return nil, 0, errors.Wrapf(err, "Error copying UtxoView: ") - } + blockUtxoViewCopy := blockUtxoView.CopyUtxoView() _, _, _, fees, err := blockUtxoViewCopy._connectTransaction( txn.GetTxn(), txn.Hash(), uint32(newBlockHeight), newBlockTimestampNanoSecs, true, false) diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 34007227b..08661fad2 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -261,8 +261,7 @@ func TestGetBlockTransactions(t *testing.T) { // In this case, some transactions should not make it into the block, despite being valid. The transactions // that are rejected should have the lowest Fee-Time priority. - latestBlockViewCopy, err := latestBlockView.CopyUtxoView() - require.NoError(err) + latestBlockViewCopy := latestBlockView.CopyUtxoView() txns, maxUtilityFee, err := pbp.getBlockTransactions(NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 1000, 1000) require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) @@ -305,8 +304,7 @@ func _testProduceBlockNoSizeLimit(t *testing.T, mp *PosMempool, pbp *PosBlockPro totalTxns := numPassing + numFailing + numInvalid require.Equal(totalTxns, len(mp.GetTransactions())) - latestBlockViewCopy, err := latestBlockView.CopyUtxoView() - require.NoError(err) + latestBlockViewCopy := latestBlockView.CopyUtxoView() txns, maxUtilityFee, err := pbp.getBlockTransactions(pbp.proposerPublicKey, latestBlockView, blockHeight, 0, math.MaxUint64, math.MaxUint64) require.NoError(err) require.Equal(latestBlockViewCopy, latestBlockView) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 4fa791439..5ae5683b3 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -402,11 +402,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // as validate failed. func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // Construct a UtxoView, so we can perform the QC and leader checks. - utxoView, err := bc.GetCommittedTipView() - if err != nil { - // We can't validate the QC without a UtxoView. Return an error. - return errors.Wrap(err, "processOrphanBlockPoS: Problem initializing UtxoView") - } + utxoView := bc.GetCommittedTipView() epochEntry, err := utxoView.GetCurrentEpochEntry() if err != nil { @@ -891,10 +887,7 @@ func (bc *Blockchain) isBlockTimestampTooFarInFuturePoS(header *MsgDeSoHeader) ( // the block's height is within 3600 blocks of the committed tip, this will always work. In practice, // the incoming block never be more than 3600 blocks behind or ahead of the tip, while also failing the // above header.TstampNanoSecs <= currentTstampNanoSecs check. - utxoView, err := bc.GetCommittedTipView() - if err != nil { - return false, errors.Wrap(err, "isBlockTimestampTooFarInFuturePoS: Problem initializing UtxoView") - } + utxoView := bc.GetCommittedTipView() simulatedEpochEntryForBlock, err := utxoView.SimulateAdjacentEpochEntryForBlockHeight(header.Height) if err != nil { @@ -1799,8 +1792,8 @@ func (bc *Blockchain) GetUncommittedBlocks(tipHash *BlockHash) ([]*BlockNode, er } // GetCommittedTipView builds a UtxoView to the committed tip. -func (bc *Blockchain) GetCommittedTipView() (*UtxoView, error) { - return NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, nil, bc.snapshotCache), nil +func (bc *Blockchain) GetCommittedTipView() *UtxoView { + return NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, nil, bc.snapshotCache) } // BlockViewAndUtxoOps is a struct that contains a UtxoView and the UtxoOperations @@ -1815,16 +1808,13 @@ type BlockViewAndUtxoOps struct { Block *MsgDeSoBlock } -func (viewAndUtxoOps *BlockViewAndUtxoOps) Copy() (*BlockViewAndUtxoOps, error) { - copiedView, err := viewAndUtxoOps.UtxoView.CopyUtxoView() - if err != nil { - return nil, errors.Wrapf(err, "BlockViewAndUtxoOps.Copy: Problem copying UtxoView") - } +func (viewAndUtxoOps *BlockViewAndUtxoOps) Copy() *BlockViewAndUtxoOps { + copiedView := viewAndUtxoOps.UtxoView.CopyUtxoView() return &BlockViewAndUtxoOps{ UtxoView: copiedView, UtxoOps: viewAndUtxoOps.UtxoOps, Block: viewAndUtxoOps.Block, - }, nil + } } // GetUncommittedTipView builds a UtxoView to the uncommitted tip. @@ -1901,10 +1891,7 @@ func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( return nil, errors.Wrapf(err, "getUtxoViewAndUtxoOpsAtBlockHash: Problem getting cached BlockViewAndUtxoOps") } if exists { - viewAndUtxoOpsCopy, err := viewAndUtxoOpsAtHash.Copy() - if err != nil { - return nil, errors.Wrapf(err, "getUtxoViewAndUtxoOpsAtBlockHash: Problem copying BlockViewAndUtxoOps from cache") - } + viewAndUtxoOpsCopy := viewAndUtxoOpsAtHash.Copy() return viewAndUtxoOpsCopy, nil } // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks @@ -1935,10 +1922,7 @@ func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( // Update the TipHash saved on the UtxoView to the blockHash provided. utxoView.TipHash = &blockHash // Save a copy of the UtxoView to the cache. - copiedView, err := utxoView.CopyUtxoView() - if err != nil { - return nil, errors.Wrapf(err, "getUtxoViewAndUtxoOpsAtBlockHash: Problem copying UtxoView to store in cache") - } + copiedView := utxoView.CopyUtxoView() bc.blockViewCache.Add(blockHash, &BlockViewAndUtxoOps{ UtxoView: copiedView, UtxoOps: utxoOps, diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index cae354e8d..4ee24b2b3 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -796,10 +796,7 @@ func (fc *FastHotStuffConsensus) fetchValidatorListsForSafeBlocks(blocks []*MsgD // Create a UtxoView for the committed tip block. We will use this to fetch the validator set for // all the safe blocks. - utxoView, err := fc.blockchain.GetCommittedTipView() - if err != nil { - return nil, errors.Errorf("Error creating UtxoView: %v", err) - } + utxoView := fc.blockchain.GetCommittedTipView() // Fetch the current epoch entry for the committed tip epochEntryAtCommittedTip, err := utxoView.GetCurrentEpochEntry() @@ -884,10 +881,7 @@ func (fc *FastHotStuffConsensus) createBlockProducer(bav *UtxoView, previousBloc func (fc *FastHotStuffConsensus) updateActiveValidatorConnections() error { // Fetch the committed tip view. This ends up being as good as using the uncommitted tip view // but without the overhead of connecting at least two blocks' worth of txns to the view. - utxoView, err := fc.blockchain.GetCommittedTipView() - if err != nil { - return errors.Errorf("FastHotStuffConsensus.Start: Error fetching uncommitted tip view: %v", err) - } + utxoView := fc.blockchain.GetCommittedTipView() // Get the current snapshot epoch number from the committed tip. This will be behind the uncommitted tip // by up to two blocks, but this is fine since we fetch both the current epoch's and next epoch's validator diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index ae431b91e..e2814887a 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -300,19 +300,9 @@ func (mp *PosMempool) Init( mp.globalParams = globalParams var err error if readOnlyLatestBlockView != nil { - mp.readOnlyLatestBlockView, err = readOnlyLatestBlockView.CopyUtxoView() - if err != nil { - return errors.Wrapf(err, "PosMempool.Init: Problem copying utxo view to readOnlyLatestBlockView") - } - mp.augmentedReadOnlyLatestBlockView, err = readOnlyLatestBlockView.CopyUtxoView() - if err != nil { - return errors.Wrapf(err, "PosMempool.Init: Problem copying utxo view to augmentedReadOnlyLatestBlockView") - } - mp.validateTransactionsReadOnlyLatestBlockView, err = readOnlyLatestBlockView.CopyUtxoView() - if err != nil { - return errors.Wrapf(err, - "PosMempool.Init: Problem copying utxo view to validateTransactionsReadOnlyLatestBlockView") - } + mp.readOnlyLatestBlockView = readOnlyLatestBlockView.CopyUtxoView() + mp.augmentedReadOnlyLatestBlockView = readOnlyLatestBlockView.CopyUtxoView() + mp.validateTransactionsReadOnlyLatestBlockView = readOnlyLatestBlockView.CopyUtxoView() } mp.latestBlockHeight = latestBlockHeight mp.dir = dir @@ -953,10 +943,7 @@ func (mp *PosMempool) validateTransactions() error { txHashes = append(txHashes, txn.Hash) } // Copy the validation view to avoid modifying the readOnlyLatestBlockView. - copyValidationView, err := validationView.CopyUtxoView() - if err != nil { - return errors.Wrapf(err, "PosMempool.validateTransactions: Problem copying utxo view") - } + copyValidationView := validationView.CopyUtxoView() // Connect the transactions to the validation view. We use the latest block height + 1 as the block height to connect // the transactions. This is because the mempool contains transactions that we use for producing the next block. _, _, _, _, errorsFound, err := copyValidationView.ConnectTransactionsFailSafeWithLimit( @@ -1107,15 +1094,8 @@ func (mp *PosMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) } if blockView != nil { - var err error - mp.readOnlyLatestBlockView, err = blockView.CopyUtxoView() - if err != nil { - glog.Errorf("PosMempool.UpdateLatestBlock: Problem copying utxo view: %v", err) - } - mp.validateTransactionsReadOnlyLatestBlockView, err = blockView.CopyUtxoView() - if err != nil { - glog.Errorf("PosMempool.UpdateLatestBlock: Problem copying utxo view: %v", err) - } + mp.readOnlyLatestBlockView = blockView.CopyUtxoView() + mp.validateTransactionsReadOnlyLatestBlockView = blockView.CopyUtxoView() } mp.latestBlockHeight = blockHeight } @@ -1148,10 +1128,7 @@ func (mp *PosMempool) GetAugmentedUniversalView() (*UtxoView, error) { mp.augmentedReadOnlyLatestBlockViewMutex.RLock() readOnlyViewPointer := mp.augmentedReadOnlyLatestBlockView mp.augmentedReadOnlyLatestBlockViewMutex.RUnlock() - newView, err := readOnlyViewPointer.CopyUtxoView() - if err != nil { - return nil, errors.Wrapf(err, "PosMempool.GetAugmentedUniversalView: Problem copying utxo view") - } + newView := readOnlyViewPointer.CopyUtxoView() return newView, nil } func (mp *PosMempool) GetAugmentedUtxoViewForPublicKey(pk []byte, optionalTx *MsgDeSoTxn) (*UtxoView, error) { diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index fa6ede201..00efd3137 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -734,10 +734,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser // and try to connect it as a failing transaction. If that fails as well, we just continue // and the mempoolTxUtxoView is unmodified. var copiedView *UtxoView - copiedView, err = mempoolTxUtxoView.CopyUtxoView() - if err != nil { - return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer CopyUtxoView: ") - } + copiedView = mempoolTxUtxoView.CopyUtxoView() utxoOpsForTxn, _, _, _, err = copiedView.ConnectTransaction( mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), currentTimestamp, false, false /*ignoreUtxos*/) From 27ca5ac54ab1f4a2b29e0ffaba79340964fd41e2 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 12 Apr 2024 12:07:03 -0400 Subject: [PATCH 629/762] Adjust bundle size based on pos vs pow (#1209) --- lib/server.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/server.go b/lib/server.go index 77b4852f8..f279e48d0 100644 --- a/lib/server.go +++ b/lib/server.go @@ -847,7 +847,12 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { for _, blockNode := range srv.blockchain.bestChain { // We find the first block that's not stored and get ready to download blocks starting from this block onwards. if blockNode.Status&StatusBlockStored == 0 { - numBlocksToFetch := MaxBlocksInFlight - len(pp.requestedBlocks) + maxBlocksInFlight := MaxBlocksInFlight + if pp.Params.ProtocolVersion >= ProtocolVersion2 && + srv.params.IsPoSBlockHeight(uint64(blockNode.Height)) { + maxBlocksInFlight = MaxBlocksInFlightPoS + } + numBlocksToFetch := maxBlocksInFlight - len(pp.requestedBlocks) currentHeight := int(blockNode.Height) blockNodesToFetch := []*BlockNode{} // In case there are blocks at tip that are already stored (which shouldn't really happen), we'll not download them. @@ -902,7 +907,8 @@ func (srv *Server) GetBlocks(pp *Peer, maxHeight int) { // If our peer is on PoS then we can safely request a lot more blocks from them in // each flight. maxBlocksInFlight := MaxBlocksInFlight - if pp.Params.ProtocolVersion >= ProtocolVersion2 { + if pp.Params.ProtocolVersion >= ProtocolVersion2 && + srv.params.IsPoSBlockHeight(uint64(srv.blockchain.blockTip().Height)) { maxBlocksInFlight = MaxBlocksInFlightPoS } numBlocksToFetch := maxBlocksInFlight - len(pp.requestedBlocks) From a94a8e0472d473bdb0c954c4de5d97b9c8e266ae Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 12 Apr 2024 13:18:15 -0400 Subject: [PATCH 630/762] Simplify NetworkManager Routines (#1210) --- integration_testing/tools.go | 1 + lib/network_manager.go | 62 ++++++++++++++++++------------------ 2 files changed, 32 insertions(+), 31 deletions(-) diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 763d3dbe7..6657f9656 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -87,6 +87,7 @@ func _generateConfig(t *testing.T, config *cmd.Config, port uint32, dataDir stri config.StallTimeoutSeconds = 900 config.MinFeerate = 1000 config.OneInboundPerIp = false + config.PeerConnectionRefreshIntervalMillis = 1000 config.MaxBlockTemplatesCache = 100 config.MaxSyncBlockHeight = 100 config.MinBlockUpdateInterval = 10 diff --git a/lib/network_manager.go b/lib/network_manager.go index b78be864c..78bfb41a8 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -56,12 +56,15 @@ type NetworkManager struct { // Used to set remote node ids. Must be incremented atomically. remoteNodeNextId uint64 + // AllRemoteNodes is a map storing all remote nodes by their IDs. AllRemoteNodes *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] - // Indices for various types of remote nodes. - ValidatorOutboundIndex *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] - ValidatorInboundIndex *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] + // Indices for validator remote nodes + ValidatorOutboundIndex *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] + ValidatorInboundIndex *collections.ConcurrentMap[bls.SerializedPublicKey, *RemoteNode] + + // Indices for non-validator remote nodes NonValidatorOutboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] NonValidatorInboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] @@ -96,7 +99,7 @@ type NetworkManager struct { limitOneInboundRemoteNodePerIP bool // The frequency at which the NetworkManager goroutines should run. - peerConnectionRefreshIntervalMillis time.Duration + peerConnectionRefreshInterval time.Duration startGroup sync.WaitGroup exitChan chan struct{} @@ -140,7 +143,7 @@ func NewNetworkManager( targetNonValidatorOutboundRemoteNodes: targetNonValidatorOutboundRemoteNodes, targetNonValidatorInboundRemoteNodes: targetNonValidatorInboundRemoteNodes, limitOneInboundRemoteNodePerIP: limitOneInboundConnectionPerIP, - peerConnectionRefreshIntervalMillis: time.Duration(peerConnectionRefreshIntervalMillis) * time.Millisecond, + peerConnectionRefreshInterval: time.Duration(peerConnectionRefreshIntervalMillis) * time.Millisecond, exitChan: make(chan struct{}), } } @@ -153,8 +156,7 @@ func (nm *NetworkManager) Start() { // Start the NetworkManager goroutines. The startGroup is used to ensure that all goroutines have started before // exiting the context of this function. - nm.startGroup.Add(4) - go nm.startPersistentConnector() + nm.startGroup.Add(3) go nm.startValidatorConnector() go nm.startNonValidatorConnector() go nm.startRemoteNodeCleanup() @@ -164,37 +166,17 @@ func (nm *NetworkManager) Start() { func (nm *NetworkManager) Stop() { if !nm.params.DisableNetworkManagerRoutines { - nm.exitGroup.Add(4) + nm.exitGroup.Add(3) close(nm.exitChan) nm.exitGroup.Wait() } nm.DisconnectAll() } -func (nm *NetworkManager) SetTargetOutboundPeers(numPeers uint32) { - nm.targetNonValidatorOutboundRemoteNodes = numPeers -} - // ########################### // ## NetworkManager Routines // ########################### -// startPersistentConnector is responsible for ensuring that the node is connected to all persistent IP addresses. It -// does this by periodically checking the persistentIpToRemoteNodeIdsMap, and connecting to any persistent IP addresses -// that are not already connected. -func (nm *NetworkManager) startPersistentConnector() { - nm.startGroup.Done() - for { - select { - case <-nm.exitChan: - nm.exitGroup.Done() - return - case <-time.After(nm.peerConnectionRefreshIntervalMillis): - nm.refreshConnectIps() - } - } -} - // startValidatorConnector is responsible for ensuring that the node is connected to all active validators. It does // this in two steps. First, it looks through the already established connections and checks if any of these connections // are validators. If they are, it adds them to the validator index. It also checks if any of the existing validators @@ -207,7 +189,7 @@ func (nm *NetworkManager) startValidatorConnector() { case <-nm.exitChan: nm.exitGroup.Done() return - case <-time.After(nm.peerConnectionRefreshIntervalMillis): + case <-time.After(nm.peerConnectionRefreshInterval): nm.refreshValidatorIndices() nm.connectValidators() } @@ -226,7 +208,7 @@ func (nm *NetworkManager) startNonValidatorConnector() { case <-nm.exitChan: nm.exitGroup.Done() return - case <-time.After(nm.peerConnectionRefreshIntervalMillis): + case <-time.After(nm.peerConnectionRefreshInterval): nm.refreshNonValidatorOutboundIndex() nm.refreshNonValidatorInboundIndex() nm.connectNonValidators() @@ -244,7 +226,7 @@ func (nm *NetworkManager) startRemoteNodeCleanup() { case <-nm.exitChan: nm.exitGroup.Done() return - case <-time.After(nm.peerConnectionRefreshIntervalMillis): + case <-time.After(nm.peerConnectionRefreshInterval): nm.Cleanup() } } @@ -780,6 +762,24 @@ func (nm *NetworkManager) refreshNonValidatorInboundIndex() { // connectNonValidators attempts to connect to new outbound nonValidator remote nodes. It is called periodically by the // nonValidator connector. func (nm *NetworkManager) connectNonValidators() { + // Connect to addresses passed via the --connect-ips flag. These addresses are persistent in the sense that if we + // disconnect from one, we will try to reconnect to the same one. + for _, connectIp := range nm.connectIps { + if _, ok := nm.persistentIpToRemoteNodeIdsMap.Get(connectIp); ok { + continue + } + + glog.Infof("NetworkManager.initiatePersistentConnections: Connecting to connectIp: %v", connectIp) + id, err := nm.CreateNonValidatorPersistentOutboundConnection(connectIp) + if err != nil { + glog.Errorf("NetworkManager.initiatePersistentConnections: Problem connecting "+ + "to connectIp %v: %v", connectIp, err) + continue + } + + nm.persistentIpToRemoteNodeIdsMap.Set(connectIp, id) + } + // If the NetworkManager is configured with a list of connectIps, then we don't need to connect to any // non-validators using the address manager. We will only connect to the connectIps, and potentially validators. if len(nm.connectIps) != 0 { From f47d255e24027357142ebc0efba65a45632cae2a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 12 Apr 2024 13:37:08 -0400 Subject: [PATCH 631/762] Fix IsInbound and IsOutbound Checks on RemoteNode (#1211) * Fix IsInboud and IsOutbound Checks on RemoteNode * Fix compilation errors --- lib/network_manager.go | 33 ++++++++++++++++++++----------- lib/remote_node.go | 45 +++++++++++++++++++++++++++++++++++------- 2 files changed, 60 insertions(+), 18 deletions(-) diff --git a/lib/network_manager.go b/lib/network_manager.go index 78bfb41a8..7f6a52adb 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -865,7 +865,7 @@ func (nm *NetworkManager) CreateValidatorConnection(ipStr string, publicKey *bls return fmt.Errorf("NetworkManager.CreateValidatorConnection: RemoteNode already exists for public key: %v", publicKey) } - remoteNode := nm.newRemoteNode(publicKey, false) + remoteNode := nm.newRemoteNode(publicKey, true, false) if err := remoteNode.DialOutboundConnection(netAddr); err != nil { return errors.Wrapf(err, "NetworkManager.CreateValidatorConnection: Problem calling DialPersistentOutboundConnection "+ "for addr: (%s:%v)", netAddr.Addr.String(), netAddr.Port) @@ -885,7 +885,7 @@ func (nm *NetworkManager) CreateNonValidatorPersistentOutboundConnection(ipStr s return 0, fmt.Errorf("NetworkManager.CreateNonValidatorPersistentOutboundConnection: netAddr is nil") } - remoteNode := nm.newRemoteNode(nil, true) + remoteNode := nm.newRemoteNode(nil, true, true) if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { return 0, errors.Wrapf(err, "NetworkManager.CreateNonValidatorPersistentOutboundConnection: Problem calling DialPersistentOutboundConnection "+ "for addr: (%s:%v)", netAddr.Addr.String(), netAddr.Port) @@ -908,7 +908,7 @@ func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.Net return fmt.Errorf("NetworkManager.CreateNonValidatorOutboundConnection: netAddr is nil") } - remoteNode := nm.newRemoteNode(nil, false) + remoteNode := nm.newRemoteNode(nil, true, false) if err := remoteNode.DialOutboundConnection(netAddr); err != nil { return errors.Wrapf(err, "NetworkManager.CreateNonValidatorOutboundConnection: Problem calling DialOutboundConnection "+ "for addr: (%s:%v)", netAddr.Addr.String(), netAddr.Port) @@ -918,10 +918,9 @@ func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.Net return nil } -func (nm *NetworkManager) AttachInboundConnection(conn net.Conn, - na *wire.NetAddressV2) (*RemoteNode, error) { +func (nm *NetworkManager) AttachInboundConnection(conn net.Conn, na *wire.NetAddressV2) (*RemoteNode, error) { - remoteNode := nm.newRemoteNode(nil, false) + remoteNode := nm.newRemoteNode(nil, false, false) if err := remoteNode.AttachInboundConnection(conn, na); err != nil { return remoteNode, errors.Wrapf(err, "NetworkManager.AttachInboundConnection: Problem calling AttachInboundConnection "+ "for addr: (%s)", conn.RemoteAddr().String()) @@ -932,8 +931,9 @@ func (nm *NetworkManager) AttachInboundConnection(conn net.Conn, return remoteNode, nil } -func (nm *NetworkManager) AttachOutboundConnection(conn net.Conn, na *wire.NetAddressV2, - remoteNodeId uint64, isPersistent bool) (*RemoteNode, error) { +func (nm *NetworkManager) AttachOutboundConnection( + conn net.Conn, na *wire.NetAddressV2, remoteNodeId uint64, isPersistent bool, +) (*RemoteNode, error) { id := NewRemoteNodeId(remoteNodeId) remoteNode := nm.GetRemoteNodeById(id) @@ -963,12 +963,23 @@ func (nm *NetworkManager) DisconnectAll() { } } -func (nm *NetworkManager) newRemoteNode(validatorPublicKey *bls.PublicKey, isPersistent bool) *RemoteNode { +func (nm *NetworkManager) newRemoteNode(validatorPublicKey *bls.PublicKey, isOutbound bool, isPersistent bool) *RemoteNode { id := atomic.AddUint64(&nm.remoteNodeNextId, 1) remoteNodeId := NewRemoteNodeId(id) latestBlockHeight := uint64(nm.bc.BlockTip().Height) - return NewRemoteNode(remoteNodeId, validatorPublicKey, isPersistent, nm.srv, nm.cmgr, nm.keystore, - nm.params, nm.minTxFeeRateNanosPerKB, latestBlockHeight, nm.nodeServices) + return NewRemoteNode( + remoteNodeId, + validatorPublicKey, + isPersistent, + isOutbound, + nm.srv, + nm.cmgr, + nm.keystore, + nm.params, + nm.minTxFeeRateNanosPerKB, + latestBlockHeight, + nm.nodeServices, + ) } func (nm *NetworkManager) ProcessCompletedHandshake(remoteNode *RemoteNode) { diff --git a/lib/remote_node.go b/lib/remote_node.go index 086abb41f..a90214b9b 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -80,6 +80,7 @@ type RemoteNode struct { // attemptId of the OutboundConnectionAttempt, and the subsequent id of the outbound peer. For inbound connections, // the id will be the same as the inbound peer's id. id RemoteNodeId + // validatorPublicKey is the BLS public key of the validator node. This is only set for validator nodes. For // non-validator nodes, this will be nil. For outbound validators nodes, the validatorPublicKey will be set when // the RemoteNode is instantiated. And for inbound validator nodes, the validatorPublicKey will be set when the @@ -91,6 +92,10 @@ type RemoteNode struct { // non-persistent RemoteNode is terminated after the first failed dial, while a persistent RemoteNode will keep // trying to dial the peer indefinitely until the connection is established, or the node stops. isPersistent bool + // isOutbound identifies whether the RemoteNode is an outbound or inbound connection. This property is stored + // here in addition to being stored in the peer object because the RemoteNode object can be initialized before + // the peer object is initialized. + isOutbound bool connectionStatus RemoteNodeStatus @@ -151,13 +156,24 @@ func NewHandshakeMetadata() *HandshakeMetadata { return &HandshakeMetadata{} } -func NewRemoteNode(id RemoteNodeId, validatorPublicKey *bls.PublicKey, isPersistent bool, srv *Server, - cmgr *ConnectionManager, keystore *BLSKeystore, params *DeSoParams, minTxFeeRateNanosPerKB uint64, - latestBlockHeight uint64, nodeServices ServiceFlag) *RemoteNode { +func NewRemoteNode( + id RemoteNodeId, + validatorPublicKey *bls.PublicKey, + isPersistent bool, + isOutbound bool, + srv *Server, + cmgr *ConnectionManager, + keystore *BLSKeystore, + params *DeSoParams, + minTxFeeRateNanosPerKB uint64, + latestBlockHeight uint64, + nodeServices ServiceFlag, +) *RemoteNode { return &RemoteNode{ id: id, validatorPublicKey: validatorPublicKey, isPersistent: isPersistent, + isOutbound: isOutbound, connectionStatus: RemoteNodeStatus_NotConnected, handshakeMetadata: NewHandshakeMetadata(), srv: srv, @@ -236,11 +252,11 @@ func (rn *RemoteNode) GetNetAddress() *wire.NetAddressV2 { } func (rn *RemoteNode) IsInbound() bool { - return rn.peer != nil && !rn.peer.IsOutbound() + return !rn.isOutbound } func (rn *RemoteNode) IsOutbound() bool { - return rn.peer != nil && rn.peer.IsOutbound() + return rn.isOutbound } func (rn *RemoteNode) IsPersistent() bool { @@ -283,7 +299,7 @@ func (rn *RemoteNode) IsValidator() bool { } func (rn *RemoteNode) IsExpectedValidator() bool { - return rn.GetValidatorPublicKey() != nil + return rn.validatorPublicKey != nil } func (rn *RemoteNode) hasValidatorServiceFlag() bool { @@ -323,6 +339,11 @@ func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddress rn.mtx.Lock() defer rn.mtx.Unlock() + // It should not be possible to attach an inbound connection to an outbound RemoteNode. + if rn.isOutbound { + return fmt.Errorf("RemoteNode.AttachInboundConnection: RemoteNode is not an inbound connection") + } + // At this point, the RemoteNode must be in the NotConnected state. If the RemoteNode already progressed to // another state, we return an error. if !rn.IsNotConnected() { @@ -342,12 +363,22 @@ func (rn *RemoteNode) AttachOutboundConnection(conn net.Conn, na *wire.NetAddres rn.mtx.Lock() defer rn.mtx.Unlock() + // It should not be possible to attach an outbound connection to an inbound RemoteNode. + if !rn.isOutbound { + return fmt.Errorf("RemoteNode.AttachOutboundConnection: RemoteNode is not an outbound connection") + } + + // It should not be possible to attach an outbound persistent connection to a non-persistent RemoteNode. + if rn.isPersistent != isPersistent { + return fmt.Errorf("RemoteNode.AttachOutboundConnection: RemoteNode is not persistent") + } + if rn.connectionStatus != RemoteNodeStatus_Attempted { return fmt.Errorf("RemoteNode.AttachOutboundConnection: RemoteNode is not in the Attempted state") } id := rn.GetId().ToUint64() - rn.peer = rn.cmgr.ConnectPeer(id, conn, na, true, isPersistent) + rn.peer = rn.cmgr.ConnectPeer(id, conn, na, true, rn.isPersistent) versionTimeExpected := time.Now().Add(rn.params.VersionNegotiationTimeout) rn.versionTimeExpected = &versionTimeExpected rn.setStatusConnected() From 91ed9823450c077f8e681b4f777d06ede3710814 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 12 Apr 2024 14:29:31 -0400 Subject: [PATCH 632/762] add LatestView to checkpoint block info, refresh when starting fast hot stuff event loop, and use this value to override tip's view (#1213) --- consensus/event_loop.go | 8 +++- consensus/event_loop_test.go | 60 ++++++++++++++++++++++++++--- consensus/integration_test_types.go | 2 + consensus/mocking.go | 2 +- consensus/types.go | 2 +- lib/blockchain.go | 22 ++++++++--- lib/pos_consensus.go | 17 +++++++- lib/server.go | 11 ++++++ 8 files changed, 109 insertions(+), 15 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 50a1d927d..8560e6d97 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -45,6 +45,7 @@ func (fc *fastHotStuffEventLoop) Init( genesisQC QuorumCertificate, tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList, + currentView uint64, ) error { // Grab the event loop's lock fc.lock.Lock() @@ -71,8 +72,11 @@ func (fc *fastHotStuffEventLoop) Init( return errors.Wrap(err, "FastHotStuffEventLoop.Init: ") } - // We track the current view here so we know which view to time out on later on. - fc.currentView = tip.Block.GetView() + 1 + // The currentView must be higher than the tip block's current view + if currentView < tip.Block.GetView()+1 { + return errors.New("FastHotStuffEventLoop.Init: currentView is lower than the tip block's view") + } + fc.currentView = currentView // Reset QC construction status for the current view fc.hasCrankTimerRunForCurrentView = false diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index df0057129..87e86f752 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -27,6 +27,7 @@ func TestInit(t *testing.T) { genesisBlock.GetQC(), // genesisQC BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetView()+1, ) require.Error(t, err) } @@ -39,6 +40,7 @@ func TestInit(t *testing.T) { genesisBlock.GetQC(), // genesisQC BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetView()+1, ) require.Error(t, err) } @@ -52,6 +54,7 @@ func TestInit(t *testing.T) { fakeGenesisBlock.GetQC(), // genesisQC BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetView()+1, ) require.Error(t, err) } @@ -64,6 +67,7 @@ func TestInit(t *testing.T) { genesisBlock.GetQC(), // genesisQC BlockWithValidatorList{nil, createDummyValidatorList()}, // tip []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetView()+1, ) require.Error(t, err) } @@ -76,6 +80,7 @@ func TestInit(t *testing.T) { genesisBlock.GetQC(), // genesisQC BlockWithValidatorList{genesisBlock, nil}, // tip []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetView()+1, ) require.Error(t, err) } @@ -88,6 +93,7 @@ func TestInit(t *testing.T) { genesisBlock.GetQC(), // genesisQC BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip []BlockWithValidatorList{{nil, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetView()+1, ) require.Error(t, err) } @@ -100,6 +106,7 @@ func TestInit(t *testing.T) { genesisBlock.GetQC(), // genesisQC BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip []BlockWithValidatorList{{genesisBlock, nil}}, // safeBlocks + genesisBlock.GetView()+1, ) require.Error(t, err) } @@ -112,6 +119,7 @@ func TestInit(t *testing.T) { genesisBlock.GetQC(), // genesisQC BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetView()+1, ) require.NoError(t, err) @@ -149,7 +157,14 @@ func TestProcessTipBlock(t *testing.T) { { genesisBlock := createDummyBlock(2) tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock}) + err := fc.Init( + oneHourInNanoSecs, + oneHourInNanoSecs, + genesisBlock.GetQC(), + tipBlock, + []BlockWithValidatorList{tipBlock}, + tipBlock.Block.GetView()+1, + ) require.NoError(t, err) } @@ -291,7 +306,14 @@ func TestAdvanceViewOnTimeout(t *testing.T) { // BlockHeight = 1, Current View = 3 genesisBlock := createDummyBlock(2) tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock}) + err := fc.Init( + oneHourInNanoSecs, + oneHourInNanoSecs, + genesisBlock.GetQC(), + tipBlock, + []BlockWithValidatorList{tipBlock}, + tipBlock.Block.GetView()+1, + ) require.NoError(t, err) } @@ -383,7 +405,14 @@ func TestProcessValidatorVote(t *testing.T) { // BlockHeight = 1, Current View = 3 genesisBlock := createDummyBlock(2) tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, oneHourInNanoSecs, genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock}) + err := fc.Init( + oneHourInNanoSecs, + oneHourInNanoSecs, + genesisBlock.GetQC(), + tipBlock, + []BlockWithValidatorList{tipBlock}, + tipBlock.Block.GetView()+1, + ) require.NoError(t, err) } @@ -482,6 +511,7 @@ func TestProcessValidatorTimeout(t *testing.T) { genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock, {genesisBlock, validatorList}}, + tipBlock.Block.GetView()+1, ) require.NoError(t, err) } @@ -596,6 +626,7 @@ func TestTimeoutScheduledTaskExecuted(t *testing.T) { dummyBlock.GetQC(), // genesisQC BlockWithValidatorList{dummyBlock, createDummyValidatorList()}, // tip []BlockWithValidatorList{{dummyBlock, createDummyValidatorList()}}, // safeBlocks + dummyBlock.GetView()+1, ) require.NoError(t, err) @@ -639,7 +670,14 @@ func TestResetEventLoopSignal(t *testing.T) { // Init the event loop genesisBlock := createDummyBlock(2) tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock}) + err := fc.Init( + oneHourInNanoSecs, + 2*oneHourInNanoSecs, + genesisBlock.GetQC(), + tipBlock, + []BlockWithValidatorList{tipBlock}, + tipBlock.Block.GetView()+1, + ) require.NoError(t, err) // Start the event loop @@ -709,6 +747,7 @@ func TestVoteQCConstructionSignal(t *testing.T) { block.GetQC(), // genesisQC BlockWithValidatorList{block, validatorList}, // tip []BlockWithValidatorList{{block, validatorList}}, // safeBlocks + block.GetView()+1, ) require.NoError(t, err) @@ -747,6 +786,7 @@ func TestVoteQCConstructionSignal(t *testing.T) { block.GetQC(), // genesisQC BlockWithValidatorList{block, validatorList}, // tip []BlockWithValidatorList{{block, validatorList}}, // safeBlocks + block.GetView()+1, ) require.NoError(t, err) @@ -801,6 +841,7 @@ func TestVoteQCConstructionSignal(t *testing.T) { block.GetQC(), // genesisQC BlockWithValidatorList{block, validatorList}, // tip []BlockWithValidatorList{{block, validatorList}}, // safeBlocks + block.GetView()+1, ) require.NoError(t, err) @@ -893,6 +934,7 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { {block1, validatorList}, {block2, validatorList}, }, + block2.GetView()+1, ) require.NoError(t, err) @@ -937,6 +979,7 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { {block1, validatorList}, {block2, validatorList}, }, + block2.GetView()+1, ) require.NoError(t, err) @@ -1016,7 +1059,14 @@ func TestFastHotStuffEventLoopStartStop(t *testing.T) { // Init the event loop genesisBlock := createDummyBlock(2) tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} - err := fc.Init(oneHourInNanoSecs, 2*oneHourInNanoSecs, genesisBlock.GetQC(), tipBlock, []BlockWithValidatorList{tipBlock}) + err := fc.Init( + oneHourInNanoSecs, + 2*oneHourInNanoSecs, + genesisBlock.GetQC(), + tipBlock, + []BlockWithValidatorList{tipBlock}, + tipBlock.Block.GetView()+1, + ) require.NoError(t, err) // Start the event loop diff --git a/consensus/integration_test_types.go b/consensus/integration_test_types.go index c3cf8ca79..088253ce8 100644 --- a/consensus/integration_test_types.go +++ b/consensus/integration_test_types.go @@ -67,6 +67,7 @@ func (node *validatorNode) Init( []BlockWithValidatorList{ {genesisBlock, node.getValidators()}, }, + genesisBlock.GetView()+1, ) } @@ -87,6 +88,7 @@ func (node *validatorNode) Resync(genesisBlock *block, tipBlock *block, safeBloc collections.Transform(safeBlocks, func(bb *block) BlockWithValidatorList { return BlockWithValidatorList{bb, node.getValidators()} }), + genesisBlock.GetView()+1, ) } diff --git a/consensus/mocking.go b/consensus/mocking.go index 96b3e438e..4dfa027da 100644 --- a/consensus/mocking.go +++ b/consensus/mocking.go @@ -20,7 +20,7 @@ type MockFastHotStuffEventLoop struct { func (fc *MockFastHotStuffEventLoop) GetEvents() chan *FastHotStuffEvent { return fc.OnGetEvents() } -func (fc *MockFastHotStuffEventLoop) Init(crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, genesisQC QuorumCertificate, tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { +func (fc *MockFastHotStuffEventLoop) Init(crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, genesisQC QuorumCertificate, tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList, _ uint64) error { return fc.OnInit(crankTimerInterval, timeoutBaseDuration, genesisQC, tip, safeBlocks) } diff --git a/consensus/types.go b/consensus/types.go index a84fba44b..252809f82 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -67,7 +67,7 @@ type BlockHashValue = [32]byte type FastHotStuffEventLoop interface { GetEvents() chan *FastHotStuffEvent - Init(time.Duration, time.Duration, QuorumCertificate, BlockWithValidatorList, []BlockWithValidatorList) error + Init(time.Duration, time.Duration, QuorumCertificate, BlockWithValidatorList, []BlockWithValidatorList, uint64) error GetCurrentView() uint64 AdvanceViewOnTimeout() (uint64, error) ProcessTipBlock(BlockWithValidatorList, []BlockWithValidatorList, time.Duration, time.Duration) error diff --git a/lib/blockchain.go b/lib/blockchain.go index e1c80ce83..08d481f3c 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -458,16 +458,21 @@ type OrphanBlock struct { } type CheckpointBlockInfo struct { - Height uint64 - Hash *BlockHash - HashHex string + Height uint64 + Hash *BlockHash + HashHex string + LatestView uint64 } func (checkpointBlockInfo *CheckpointBlockInfo) String() string { if checkpointBlockInfo == nil { return "" } - return fmt.Sprintf("< Height: %d, Hash: %v >", checkpointBlockInfo.Height, checkpointBlockInfo.HashHex) + return fmt.Sprintf( + "< Height: %d, Hash: %v, Latest View: %v >", + checkpointBlockInfo.Height, + checkpointBlockInfo.HashHex, + checkpointBlockInfo.LatestView) } type CheckpointBlockInfoAndError struct { @@ -568,8 +573,11 @@ func (bc *Blockchain) updateCheckpointBlockInfo() { checkpointBlockInfos[ii] = <-ch } - // Find the checkpoint block info with the highest height. + // Find the checkpoint block info with the highest height and find the highest view reported + // from the checkpoint syncing providers. We'll combine these two pieces of information to + // form the final checkpoint block info. var highestHeightCheckpointBlockInfo *CheckpointBlockInfo + highestView := uint64(0) for _, checkpointBlockInfo := range checkpointBlockInfos { if checkpointBlockInfo.Error != nil { glog.Errorf("updateCheckpointBlockInfo: Error getting checkpoint block info: %v", checkpointBlockInfo.Error) @@ -579,6 +587,9 @@ func (bc *Blockchain) updateCheckpointBlockInfo() { checkpointBlockInfo.CheckpointBlockInfo.Height > highestHeightCheckpointBlockInfo.Height { highestHeightCheckpointBlockInfo = checkpointBlockInfo.CheckpointBlockInfo } + if highestView < checkpointBlockInfo.CheckpointBlockInfo.LatestView { + highestView = checkpointBlockInfo.CheckpointBlockInfo.LatestView + } } if highestHeightCheckpointBlockInfo == nil { glog.Errorf("updateCheckpointBlockInfo: No valid checkpoint block info found.") @@ -587,6 +598,7 @@ func (bc *Blockchain) updateCheckpointBlockInfo() { glog.V(2).Infof("updateCheckpointBlockInfo: Setting checkpoint block info to: %v", highestHeightCheckpointBlockInfo) bc.checkpointBlockInfoLock.Lock() bc.checkpointBlockInfo = highestHeightCheckpointBlockInfo + bc.checkpointBlockInfo.LatestView = highestView bc.checkpointBlockInfoLock.Unlock() } diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index 4ee24b2b3..b0431c850 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -109,9 +109,24 @@ func (fc *FastHotStuffConsensus) Start() error { time.Duration(currentSnapshotGlobalParams.BlockProductionIntervalMillisecondsPoS) timeoutBaseDuration := time.Millisecond * time.Duration(currentSnapshotGlobalParams.TimeoutIntervalMillisecondsPoS) + // Refresh the checkpoint block info, so we can get tha latest view. + fc.blockchain.updateCheckpointBlockInfo() + checkpointBlockInfo := fc.blockchain.GetCheckpointBlockInfo() + currentView := tipBlock.Header.GetView() + 1 + if checkpointBlockInfo != nil && checkpointBlockInfo.LatestView > currentView { + currentView = checkpointBlockInfo.LatestView + } + // Initialize the event loop. This should never fail. If it does, we return the error to the caller. // The caller handle the error and decide when to retry. - err = fc.fastHotStuffEventLoop.Init(blockProductionInterval, timeoutBaseDuration, genesisQC, tipBlockWithValidators[0], safeBlocksWithValidators) + err = fc.fastHotStuffEventLoop.Init( + blockProductionInterval, + timeoutBaseDuration, + genesisQC, + tipBlockWithValidators[0], + safeBlocksWithValidators, + currentView, + ) if err != nil { return errors.Errorf("FastHotStuffConsensus.Start: Error initializing FastHotStuffEventLoop: %v", err) } diff --git a/lib/server.go b/lib/server.go index f279e48d0..131c90563 100644 --- a/lib/server.go +++ b/lib/server.go @@ -3309,3 +3309,14 @@ func (progress *SyncProgress) PrintLoop() { func (srv *Server) GetNetworkManagerConnections() []*RemoteNode { return srv.networkManager.GetAllRemoteNodes().GetAll() } + +func (srv *Server) GetLatestView() uint64 { + if srv.fastHotStuffConsensus == nil || !srv.fastHotStuffConsensus.IsRunning() { + return 0 + } + if srv.fastHotStuffConsensus.fastHotStuffEventLoop == nil || + !srv.fastHotStuffConsensus.fastHotStuffEventLoop.IsRunning() { + return 0 + } + return srv.fastHotStuffConsensus.fastHotStuffEventLoop.GetCurrentView() +} From 50ae431abd800f08fc8f591b3de42ae065d8ceb0 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 12 Apr 2024 16:01:35 -0400 Subject: [PATCH 633/762] Remove Mempool Refresh On Block Connect and Disconnect (#1214) * Remove Mempool Refresh On Block Connect and Disconnect * Add TODOs --- lib/pos_mempool.go | 81 ++++------------------------------------- lib/pos_mempool_test.go | 2 +- 2 files changed, 9 insertions(+), 74 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index e2814887a..f5c4f5178 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -2,13 +2,14 @@ package lib import ( "fmt" - "github.com/decred/dcrd/lru" "path/filepath" "strings" "sync" "sync/atomic" "time" + "github.com/decred/dcrd/lru" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" @@ -457,10 +458,6 @@ func (mp *PosMempool) OnBlockConnected(block *MsgDeSoBlock) { } } - if err := mp.refreshNoLock(); err != nil { - glog.Errorf("PosMempool.OnBlockConnected: Problem refreshing mempool: %v", err) - } - // Add the block to the fee estimator. This is a best effort operation. If we fail to add the block // to the fee estimator, we log an error and continue. if err := mp.feeEstimator.AddBlock(block); err != nil { @@ -508,8 +505,6 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { } } - mp.refreshNoLock() - // This is a best effort operation. If we fail to prune the mempool, we log an error and continue. if err := mp.pruneNoLock(); err != nil { glog.Errorf("PosMempool.AddTransaction: Problem pruning mempool: %v", err) @@ -1000,69 +995,6 @@ func (mp *PosMempool) validateTransactions() error { return nil } -// refreshNoLock can be used to evict stale transactions from the mempool. However, it is a bit expensive and should be used -// sparingly. Upon being called, refreshNoLock will create an in-memory temp PosMempool and populate it with transactions from -// the main mempool. The temp mempool will have the most up-to-date readOnlyLatestBlockView, Height, and globalParams. Any -// transaction that fails to add to the temp mempool will be removed from the main mempool. -func (mp *PosMempool) refreshNoLock() error { - // Create the temporary in-memory mempool with the most up-to-date readOnlyLatestBlockView, Height, and globalParams. - tempPool := NewPosMempool() - err := tempPool.Init( - mp.params, - mp.globalParams, - mp.readOnlyLatestBlockView, - mp.latestBlockHeight, - "", - true, - mp.maxMempoolPosSizeBytes, - mp.mempoolBackupIntervalMillis, - mp.feeEstimator.numMempoolBlocks, - mp.feeEstimator.cachedBlocks, - mp.feeEstimator.numPastBlocks, - mp.maxValidationViewConnects, - mp.transactionValidationRefreshIntervalMillis, - ) - if err != nil { - return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem initializing temp pool") - } - if err := tempPool.Start(); err != nil { - return errors.Wrapf(err, "PosMempool.refreshNoLock: Problem starting temp pool") - } - defer tempPool.Stop() - - // Add all transactions from the main mempool to the temp mempool. Skip signature verification. - var txnsToRemove []*MempoolTx - txns := mp.getTransactionsNoLock() - for _, txn := range txns { - mtxn := NewMempoolTransaction(txn.Tx, txn.Added, txn.IsValidated()) - err := tempPool.AddTransaction(mtxn) - if err == nil { - continue - } - - // If we've encountered an error while adding the transaction to the temp mempool, we add it to our txnsToRemove list. - txnsToRemove = append(txnsToRemove, txn) - } - - // Now remove all transactions from the txnsToRemove list from the main mempool. - for _, txn := range txnsToRemove { - if err := mp.removeTransactionNoLock(txn, true); err != nil { - glog.Errorf("PosMempool.refreshNoLock: Problem removing transaction with hash (%v): %v", txn.Hash, err) - } - } - - // Log the hashes for transactions that were removed. - if len(txnsToRemove) > 0 { - var removedTxnHashes []string - for _, txn := range txnsToRemove { - removedTxnHashes = append(removedTxnHashes, txn.Hash.String()) - } - glog.V(1).Infof("PosMempool.refreshNoLock: Transactions with the following hashes were removed: %v", - strings.Join(removedTxnHashes, ",")) - } - return nil -} - // pruneNoLock removes transactions from the mempool until the mempool size is below the maximum allowed size. The transactions // are removed in lowest to highest Fee-Time priority, i.e. opposite way that transactions are ordered in // GetTransactions(). @@ -1113,9 +1045,12 @@ func (mp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { } mp.globalParams = globalParams - if err := mp.refreshNoLock(); err != nil { - glog.Errorf("PosMempool.UpdateGlobalParams: Problem refreshing mempool: %v", err) - } + + // TODO: Trim the mempool size + + // TODO: Update the fee bucketing parameters + + // TODO: Update the fee estimator } // Implementation of the Mempool interface diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 1d7b38ed8..a850d3316 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -226,7 +226,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { newGlobalParams := _testGetDefaultGlobalParams() newGlobalParams.MinimumNetworkFeeNanosPerKB = 20000 mempool.UpdateGlobalParams(newGlobalParams) - require.Equal(0, len(mempool.GetTransactions())) + require.Equal(100, len(mempool.GetTransactions())) mempool.Stop() require.False(mempool.IsRunning()) From 5d7f9436035c460ad5c9853e909028d5b2268b4e Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 12 Apr 2024 16:32:45 -0400 Subject: [PATCH 634/762] Add IsPoSCommitBlockEvent to block event (#1215) --- lib/blockchain.go | 6 ++++++ lib/db_utils.go | 5 +++++ lib/event_manager.go | 11 +++++++++++ lib/pos_blockchain.go | 2 +- 4 files changed, 23 insertions(+), 1 deletion(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 08d481f3c..3551ee41e 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -2629,6 +2629,11 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures UtxoView: bc.blockView, UtxoOps: utxoOpsForBlock, }) + bc.eventManager.blockCommitted(&BlockEvent{ + Block: desoBlock, + UtxoView: bc.blockView, + UtxoOps: utxoOpsForBlock, + }) } bc.blockView = nil @@ -2903,6 +2908,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // FIXME: We need to add the UtxoOps here to handle reorgs properly in Rosetta // For now it's fine because reorgs are virtually impossible. bc.eventManager.blockConnected(&BlockEvent{Block: blockToAttach}) + bc.eventManager.blockCommitted(&BlockEvent{Block: blockToAttach}) } } } diff --git a/lib/db_utils.go b/lib/db_utils.go index 698727011..98bcf6666 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -5403,6 +5403,11 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, UtxoView: utxoView, UtxoOps: utxoOpsForBlock, }) + eventManager.blockCommitted(&BlockEvent{ + Block: genesisBlock, + UtxoView: utxoView, + UtxoOps: utxoOpsForBlock, + }) } // Flush all the data in the view. err := utxoView.FlushToDb(0) diff --git a/lib/event_manager.go b/lib/event_manager.go index 55c4b7bd1..4584b0101 100644 --- a/lib/event_manager.go +++ b/lib/event_manager.go @@ -56,6 +56,7 @@ type EventManager struct { stateSyncerFlushedHandlers []StateSyncerFlushedEventFunc blockConnectedHandlers []BlockEventFunc blockDisconnectedHandlers []BlockEventFunc + blockCommittedHandlers []BlockEventFunc blockAcceptedHandlers []BlockEventFunc snapshotCompletedHandlers []SnapshotCompletedEventFunc isMempoolManager bool @@ -95,6 +96,16 @@ func (em *EventManager) transactionConnected(event *TransactionEvent) { } } +func (em *EventManager) OnBlockCommitted(handler BlockEventFunc) { + em.blockCommittedHandlers = append(em.blockCommittedHandlers, handler) +} + +func (em *EventManager) blockCommitted(event *BlockEvent) { + for _, handler := range em.blockCommittedHandlers { + handler(event) + } +} + func (em *EventManager) OnBlockConnected(handler BlockEventFunc) { em.blockConnectedHandlers = append(em.blockConnectedHandlers, handler) } diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 5ae5683b3..154268389 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1726,7 +1726,7 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool } if bc.eventManager != nil { - bc.eventManager.blockConnected(&BlockEvent{ + bc.eventManager.blockCommitted(&BlockEvent{ Block: block, UtxoView: utxoView, UtxoOps: utxoOps, From a1acdd30bef8835032242f4585579509b8ca1304 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 12 Apr 2024 16:37:41 -0400 Subject: [PATCH 635/762] Check txn register for duplicate txn earlier in add transaction (#1216) --- lib/pos_mempool.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index f5c4f5178..0d0aad067 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -524,6 +524,11 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { return fmt.Errorf("PosMempool.AddTransaction: Cannot add a nil transaction") } + // If the transaction is already in the transaction register, then we return an error. + if mp.txnRegister.GetTransaction(mtxn.GetTxn().Hash()) != nil { + return errors.New("PosMempool.AddTransaction: Transaction already in mempool") + } + // Acquire the mempool lock for all operations related to adding the transaction // TODO: Do we need to wrap all of our validation logic in a write-lock? We should revisit // this later and try to pull as much as we can out of the critical section here. The reason From d4f02bb8fb91ae345bd31a546a84560da6e16be4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 12 Apr 2024 16:45:07 -0400 Subject: [PATCH 636/762] Remove Gossipped Txn Error Logging (#1217) * Remove Gosspped Txn Error Logging * Change log level --- lib/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/server.go b/lib/server.go index 131c90563..9abcf05ce 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2544,7 +2544,7 @@ func (srv *Server) _processTransactions(pp *Peer, transactions []*MsgDeSoTxn) [] // verifying signatures. newlyAcceptedTxns, err := srv.ProcessSingleTxnWithChainLock(pp, txn) if err != nil { - glog.Errorf(fmt.Sprintf("Server._handleTransactionBundle: Rejected "+ + glog.V(4).Info(fmt.Sprintf("Server._handleTransactionBundle: Rejected "+ "transaction %v from peer %v from mempool: %v", txn, pp, err)) // A peer should know better than to send us a transaction that's below // our min feerate, which they see when we send them a version message. From a24ec7eab05ae047bd5326618c49caeac3ab0845 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 12 Apr 2024 17:40:37 -0400 Subject: [PATCH 637/762] Use alpine:latest instead of edge (#1219) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index ebaeefd76..c43024ee1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -28,7 +28,7 @@ COPY main.go . RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go # create tiny image -FROM alpine:edge +FROM alpine:latest RUN apk add --update vips-dev From 192060a2d94250ea0a5d4a916369b6f4acbd46b0 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Sat, 13 Apr 2024 19:16:15 -0400 Subject: [PATCH 638/762] Create Helper ConnectTransactionIntoNewUtxoView Function For Fail Safe Txn Connects (#1221) --- lib/block_view.go | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 23552e902..5a4418561 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4,14 +4,15 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec/v2" - "github.com/dgraph-io/badger/v4" "math" "math/big" "reflect" "strings" "time" + "github.com/btcsuite/btcd/btcec/v2" + "github.com/dgraph-io/badger/v4" + "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" @@ -4154,6 +4155,38 @@ func (bav *UtxoView) _connectSingleTxn( return utxoOpsForTxn, totalInput, totalOutput, fees, nil } +// ConnectTransactionIntoNewUtxoView is a fail safe way to connect a transaction to a view. It connects the transaction +// to a copy of the view and returns the new view, the UtxoOperations, total inputs, total outputs, fees, and an error if +// one occurred. The function does not modify the original view. The function is useful when you want to connect a +// transaction to a view without modifying the original view through unintended side effects. +func (bav *UtxoView) ConnectTransactionIntoNewUtxoView( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + blockTimestampNanoSecs int64, + verifySignatures bool, + ignoreUtxos bool, +) ( + _finalUtxoView *UtxoView, // The new view with the transaction connected. + _utxoOps []*UtxoOperation, + _totalInput uint64, + _totalOutput uint64, + _fees uint64, + _err error, +) { + // Copy the view so we can try connecting the transaction without modifying the original view. + copiedView := bav.CopyUtxoView() + + // Connect the transaction to the copied view. + utxoOpsForTxn, totalInput, totalOutput, fees, err := copiedView.ConnectTransaction( + txn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) + if err != nil { + return nil, nil, 0, 0, 0, errors.Wrapf(err, "TryConnectTransaction: Problem connecting txn on copy view") + } + + return copiedView, utxoOpsForTxn, totalInput, totalOutput, fees, nil +} + // ConnectTransactionsFailSafe connects a list of transactions to the view and returns the combined UtxoOperations, // total inputs, total outputs, fees, and success flags. If the ignoreFailing flag is set, the function will not return // an error when transactions fail to connect. Instead, a failed transaction will be skipped over, and the success flag From 5c5a4e6ff9900490efe00d623ef7e377c6fe3912 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Sat, 13 Apr 2024 19:24:49 -0400 Subject: [PATCH 639/762] Simplify Mempool Transaction Validation Routine (#1222) * Simplify Mempool Transaction Validation Routine * Remove TODO comment --- lib/block_view.go | 93 ----------------------------- lib/pos_mempool.go | 126 +++++++++++++++++----------------------- lib/pos_mempool_test.go | 7 ++- 3 files changed, 59 insertions(+), 167 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 5a4418561..3dced2df2 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4187,99 +4187,6 @@ func (bav *UtxoView) ConnectTransactionIntoNewUtxoView( return copiedView, utxoOpsForTxn, totalInput, totalOutput, fees, nil } -// ConnectTransactionsFailSafe connects a list of transactions to the view and returns the combined UtxoOperations, -// total inputs, total outputs, fees, and success flags. If the ignoreFailing flag is set, the function will not return -// an error when transactions fail to connect. Instead, a failed transaction will be skipped over, and the success flag -// at the corresponding index will be set to false. If ignoreFailing is set to false, the function will return an error -// if any of the transactions fail to connect. The other parameters have the same behavior as in ConnectTransaction. -func (bav *UtxoView) ConnectTransactionsFailSafe( - txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, - verifySignatures bool, ignoreUtxos bool, ignoreFailing bool) ( - _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, - _fees []uint64, _errorsFound []error, _err error) { - - return bav._connectTransactionsFailSafe(txns, txHashes, blockHeight, blockTimestampNanoSecs, verifySignatures, - ignoreUtxos, ignoreFailing, 0) -} - -// ConnectTransactionsFailSafeWithLimit works similarly to ConnectTransactionsFailSafe, but it limits the number of -// transactions that can be connected to the view. If the transactionConnectLimit is set to 0, all transactions will be -// connected. If the transactionConnectLimit is set to a positive number, the function will return after successfully -// connecting the specified number of transactions. Failing transactions do not count towards the transactionConnectLimit. -func (bav *UtxoView) ConnectTransactionsFailSafeWithLimit( - txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, - verifySignatures bool, ignoreUtxos bool, ignoreFailing bool, transactionConnectLimit uint64) ( - _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, - _fees []uint64, _errorsFound []error, _err error) { - - return bav._connectTransactionsFailSafe(txns, txHashes, blockHeight, blockTimestampNanoSecs, verifySignatures, - ignoreUtxos, ignoreFailing, transactionConnectLimit) -} - -func (bav *UtxoView) _connectTransactionsFailSafe( - txns []*MsgDeSoTxn, txHashes []*BlockHash, blockHeight uint32, blockTimestampNanoSecs int64, - verifySignatures bool, ignoreUtxos bool, ignoreFailing bool, transactionConnectLimit uint64) ( - _combinedUtxoOps [][]*UtxoOperation, _totalInputs []uint64, _totalOutputs []uint64, - _fees []uint64, _errorsFound []error, _err error) { - - var combinedUtxoOps [][]*UtxoOperation - var totalInputs []uint64 - var totalOutputs []uint64 - var fees []uint64 - var errorsFound []error - var totalConnectedTxns uint64 - - updateValues := func(utxoOps []*UtxoOperation, totalInput uint64, totalOutput uint64, fee uint64, errFound error) { - combinedUtxoOps = append(combinedUtxoOps, utxoOps) - totalInputs = append(totalInputs, totalInput) - totalOutputs = append(totalOutputs, totalOutput) - fees = append(fees, fee) - errorsFound = append(errorsFound, errFound) - } - - // Connect the transactions in the order they are given. - for ii, txn := range txns { - // Create a copy of the view to connect the transactions to in the event we have a failing txn. - copiedView := bav.CopyUtxoView() - - // Connect the transaction to the copied view. - utxoOpsForTxn, totalInput, totalOutput, fee, err := copiedView.ConnectTransaction( - txn, txHashes[ii], blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) - if err != nil && ignoreFailing { - // If ignoreFailing was set, we mark the transaction as failing and continue. - glog.V(2).Infof("_connectTransactionsFailSafe: Ignoring failing txn %d: %v", ii, err) - updateValues(nil, 0, 0, 0, err) - continue - } else if err != nil { - return nil, nil, nil, nil, nil, - errors.Wrapf(err, "_connectTransactionsFailSafe: Problem connecting txn %d on copy view", ii) - } - - // At this point, we know the transaction will connect successfully, so we connect it to the main view. - utxoOpsForTxn, totalInput, totalOutput, fee, err = bav.ConnectTransaction( - txn, txHashes[ii], blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) - if err != nil { - return nil, nil, nil, nil, nil, - errors.Wrapf(err, "_connectTransactionsFailSafe: Problem connecting txn %d", ii) - } - updateValues(utxoOpsForTxn, totalInput, totalOutput, fee, nil) - - // If the transactionConnectLimit was set to 0, we will try to connect all the provided transactions. - if transactionConnectLimit == 0 { - continue - } - - // Otherwise, if transactionConnectLimit is non-zero, we'll keep track of how many transactions we've - // successfully connected so far. - totalConnectedTxns++ - if totalConnectedTxns >= transactionConnectLimit { - break - } - } - - return combinedUtxoOps, totalInputs, totalOutputs, fees, errorsFound, nil -} - func (bav *UtxoView) ValidateTransactionNonce(txn *MsgDeSoTxn, blockHeight uint64) error { if txn == nil || txn.TxnNonce == nil { return fmt.Errorf("ValidateTransactionNonce: Nonce or txn is nil for public key %v", diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 0d0aad067..9bd5eddde 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -3,7 +3,6 @@ package lib import ( "fmt" "path/filepath" - "strings" "sync" "sync/atomic" "time" @@ -639,23 +638,6 @@ func (mp *PosMempool) checkTransactionSanity(txn *MsgDeSoTxn, expectInnerAtomicT return nil } -// updateTransactionValidatedStatus updates the validated status of a mempool transaction with the provided txnHash. -func (mp *PosMempool) updateTransactionValidatedStatus(txnHash *BlockHash, validated bool) { - mp.Lock() - defer mp.Unlock() - - if !mp.IsRunning() || txnHash == nil { - return - } - - txn := mp.txnRegister.GetTransaction(txnHash) - if txn == nil { - return - } - - txn.SetValidated(validated) -} - func (mp *PosMempool) checkNonceTracker(txn *MempoolTx, userPk *PublicKey) (*MempoolTx, error) { // Check the nonceTracker to see if this transaction is meant to replace an existing one. @@ -813,6 +795,13 @@ func (mp *PosMempool) RemoveTransaction(txnHash *BlockHash) error { return mp.removeTransactionNoLock(txn, true) } +func (mp *PosMempool) removeTransaction(txn *MempoolTx, persistToDb bool) error { + mp.Lock() + defer mp.Unlock() + + return mp.removeTransactionNoLock(txn, persistToDb) +} + func (mp *PosMempool) removeTransactionNoLock(txn *MempoolTx, persistToDb bool) error { // First, sanity check our reserved balance. userPk := NewPublicKey(txn.Tx.PublicKey) @@ -921,13 +910,17 @@ func (mp *PosMempool) validateTransactions() error { if !mp.IsRunning() { return nil } + // We hold a read-lock on the mempool to get the transactions and the latest block view. mp.RLock() - // We copy the reference to the readOnlyLatestBlockView. Since the utxoView is immutable, we don't need to copy the - // entire view while we hold the lock. - // We hold a read-lock on the mempool to get the transactions and the latest block view. + + // It's fine to create a copy of the pointer to the readOnlyLatestBlockView. Since the + // utxoView is immutable, we don't need to copy the entire view while we hold the lock. validationView := mp.validateTransactionsReadOnlyLatestBlockView mempoolTxns := mp.getTransactionsNoLock() + nextBlockHeight := mp.latestBlockHeight + 1 + nextBlockTimestamp := time.Now().UnixNano() + mp.RUnlock() // If the validation view is nil, there's nothing to do so we return early. @@ -935,68 +928,53 @@ func (mp *PosMempool) validateTransactions() error { return nil } - // Convert the mempool transactions to the MsgDeSoTxn format, which we can use for connecting to the validation view. - var txns []*MsgDeSoTxn - var txHashes []*BlockHash - for _, txn := range mempoolTxns { - txns = append(txns, txn.Tx) - txHashes = append(txHashes, txn.Hash) - } - // Copy the validation view to avoid modifying the readOnlyLatestBlockView. - copyValidationView := validationView.CopyUtxoView() - // Connect the transactions to the validation view. We use the latest block height + 1 as the block height to connect - // the transactions. This is because the mempool contains transactions that we use for producing the next block. - _, _, _, _, errorsFound, err := copyValidationView.ConnectTransactionsFailSafeWithLimit( - txns, txHashes, uint32(mp.latestBlockHeight)+1, - time.Now().UnixNano(), true, false, true, mp.maxValidationViewConnects) - if err != nil { - return errors.Wrapf(err, "PosMempool.validateTransactions: Problem connecting transactions") - } - - // We iterate through the successFlags and update the validated status of the transactions in the mempool. - var txnsToRemove []*MempoolTx - for ii, errFound := range errorsFound { - if ii >= len(mempoolTxns) { + // Iterate through all the transactions in the mempool and connect them to copies of the validation view. + for ii, txn := range mempoolTxns { + // Break out if we've attempted to connect the maximum number of txns to the view + if uint64(ii) >= mp.maxValidationViewConnects { break } - // If the transaction successfully connected to the validation view, we update the validated status of the - // transaction in the mempool. If the transaction failed to connect to the validation view, we add it to the - // txnsToRemove list. Note that we don't need to hold a lock while updating the validated status of the - // transactions in the mempool, since the updateTransactionValidatedStatus already holds the lock. - if errFound == nil { - mp.updateTransactionValidatedStatus(mempoolTxns[ii].Hash, true) - } else { - txnsToRemove = append(txnsToRemove, mempoolTxns[ii]) - // Add an error for the txn to our cache so we can return it to the user if they - // ask for it later. - mp.recentRejectedTxnCache.Add(*mempoolTxns[ii].Hash, errFound) - } - } - // Now remove all transactions from the txnsToRemove list from the main mempool. - mp.Lock() - for _, txn := range txnsToRemove { - if err := mp.removeTransactionNoLock(txn, true); err != nil { - glog.Errorf("PosMempool.validateTransactions: Problem removing transaction with hash (%v): %v", txn.Hash, err) + // Connect the transaction to a copy of the validation view. We can skip signatures on the transaction + // connect if the transaction has already previously been validated and been found to have a valid + // signature. This optimizes the connect by not repeating signature verification on a transaction + // more than once. + resultingUtxoView, _, _, _, _, err := validationView.ConnectTransactionIntoNewUtxoView( + txn.Tx, txn.Hash, uint32(nextBlockHeight), nextBlockTimestamp, !txn.IsValidated(), false, + ) + + // If the txn fails to connect, then we set its validated status to false and remove it from the + // mempool. We also mark it as having been rejected so that it can't get re-submitted to the mempool. + if err != nil { + // Try to remove the transaction with a lock. + mp.removeTransaction(txn, true) + + // Mark the txn as invalid and add an error to the cache so we can return it to the user if they + // try to resubmit it. + txn.SetValidated(false) + mp.recentRejectedTxnCache.Add(*mempoolTxns[ii].Hash, err) + + continue } + + // The txn successfully connected. We set its validated status to true. + txn.SetValidated(true) + + // We do a simple pointer update on the validation view here because the txn has already been + // connected to the resulting UtxoView. This allows us to avoid performing a second + // UtxoView.ConnectTransaction operation. + validationView = resultingUtxoView } - mp.Unlock() - // We also update the augmentedLatestBlockView with the view after the transactions have been connected. + + // Update the augmentedLatestBlockView with the latest validationView after the transactions + // have been connected. mp.augmentedReadOnlyLatestBlockViewMutex.Lock() - mp.augmentedReadOnlyLatestBlockView = copyValidationView + mp.augmentedReadOnlyLatestBlockView = validationView mp.augmentedReadOnlyLatestBlockViewMutex.Unlock() + // Increment the augmentedLatestBlockViewSequenceNumber. atomic.AddInt64(&mp.augmentedLatestBlockViewSequenceNumber, 1) - // Log the hashes for transactions that were removed. - if len(txnsToRemove) > 0 { - var removedTxnHashes []string - for _, txn := range txnsToRemove { - removedTxnHashes = append(removedTxnHashes, txn.Hash.String()) - } - glog.V(1).Infof("PosMempool.validateTransactions: Transactions with the following hashes were removed: %v", - strings.Join(removedTxnHashes, ",")) - } return nil } @@ -1071,9 +1049,11 @@ func (mp *PosMempool) GetAugmentedUniversalView() (*UtxoView, error) { newView := readOnlyViewPointer.CopyUtxoView() return newView, nil } + func (mp *PosMempool) GetAugmentedUtxoViewForPublicKey(pk []byte, optionalTx *MsgDeSoTxn) (*UtxoView, error) { return mp.GetAugmentedUniversalView() } + func (mp *PosMempool) BlockUntilReadOnlyViewRegenerated() { oldSeqNum := atomic.LoadInt64(&mp.augmentedLatestBlockViewSequenceNumber) newSeqNum := oldSeqNum diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index a850d3316..448c56906 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -380,12 +380,14 @@ func TestPosMempoolTransactionValidation(t *testing.T) { passingTxns = append(passingTxns, txn) _wrappedPosMempoolAddTransaction(t, mempool, txn) } + for ii := 0; ii < 10; ii++ { // Make sure the transaction fails the signature verification. txn := _generateTestTxnWithOutputs(t, rand, feeMin, feeMax, m0PubBytes, m1Priv, 100, 25, output) failingTxns = append(failingTxns, txn) _wrappedPosMempoolAddTransaction(t, mempool, txn) } + // Wait for the validation routine to finish. time.Sleep(20 * time.Millisecond) totalValidatedTxns := 0 @@ -394,8 +396,10 @@ func TestPosMempoolTransactionValidation(t *testing.T) { totalValidatedTxns++ } } + // Make sure that the number of validated transactions is equal to the maxValidationViewConnects. - require.Equal(t, 5, totalValidatedTxns) + require.Equal(t, 4, totalValidatedTxns) + // Now make sure that failing transactions were either removed, or remained unvalidated. for _, txn := range failingTxns { fetchedTxn := mempool.GetTransaction(txn.Hash()) @@ -403,6 +407,7 @@ func TestPosMempoolTransactionValidation(t *testing.T) { require.False(t, fetchedTxn.IsValidated()) } } + mempool.Stop() } From 9a99f4a240ba446f22eaf4709c119116f61209de Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 15 Apr 2024 19:51:32 -0400 Subject: [PATCH 640/762] Use TxSizeBytes in estimateFeeRateNanosPerKBGivenTransactionRegister (#1225) --- lib/pos_fee_estimator.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 2462428e3..7900f2543 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -622,11 +622,7 @@ func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactio if !ok { break } - txnBytes, err := tx.Tx.ToBytes(false) - if err != nil { - return 0, errors.Wrap(err, "estimateFeeRateNanosPerKBGivenTransactionRegister: Problem serializing txn") - } - totalTxnsSize += uint64(len(txnBytes)) + totalTxnsSize += tx.TxSizeBytes txns = append(txns, tx) // TODO: I think we want to include the txn that puts us over the limit, but // we can just move this check up a few lines if that's wrong. From 729ff8376da15f302236df93cebce02967840bc0 Mon Sep 17 00:00:00 2001 From: Jon Pollock <135658176+poolcoke@users.noreply.github.com> Date: Mon, 15 Apr 2024 20:57:05 -0700 Subject: [PATCH 641/762] Fix PoW mempool and fees for Atomics (#1220) --- lib/block_producer.go | 68 ++++++++++++++++++++++++++++------- lib/block_view.go | 58 +++++++++++++++++++++--------- lib/block_view_atomic_txns.go | 21 +++++++++++ lib/legacy_mempool.go | 46 +++++++++++++++++++----- lib/pos_block_producer.go | 28 ++++++++++++--- 5 files changed, 180 insertions(+), 41 deletions(-) diff --git a/lib/block_producer.go b/lib/block_producer.go index 62bbdda98..5e70d22a8 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -305,12 +305,36 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) includeFeesInBlockReward := true if blockRet.Header.Height >= uint64(desoBlockProducer.params.ForkHeights.BlockRewardPatchBlockHeight) { - // Parse the transactor's public key to compare with the block reward output public key. - transactorPublicKey, err := btcec.ParsePubKey(txnInBlock.PublicKey) - if err != nil { - return nil, nil, nil, errors.Wrapf(err, "DeSoBlockProducer._getBlockTemplate: problem parsing transactor public key: ") + if txnInBlock.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { + // Parse the transactor's public key to compare with the block reward output public key. + transactorPublicKey, err := btcec.ParsePubKey(txnInBlock.PublicKey) + if err != nil { + return nil, nil, nil, + errors.Wrapf(err, + "DeSoBlockProducer._getBlockTemplate: problem parsing transactor public key: ") + } + includeFeesInBlockReward = !transactorPublicKey.IsEqual(blockRewardOutputPublicKey) + } else { + // In the case of atomic transaction wrappers, we must parse and process each inner transaction + // independently. We let includeFeesInBlockRewards remain true but decrement feeNanos whenever + // transactor public key equals block reward output public key. In effect, we ignore + // fees in atomic transactions where the transactor is equivalent to the block producer. + txnMeta, ok := txnInBlock.TxnMeta.(*AtomicTxnsWrapperMetadata) + if !ok { + return nil, nil, nil, + errors.Wrapf(err, + "DeSoBlockProducer._getBlockTemplate: "+ + "problem casting txn metadata to AtomicTxnsWrapperMetadata: ") + } + feeNanos, err = filterOutBlockRewardRecipientFees( + txnMeta.Txns, blockRewardOutputPublicKey) + if err != nil { + return nil, nil, nil, + errors.Wrapf(err, + "DeSoBlockProducer._getBlockTemplate: "+ + "problem filtering out block reward recipient fees: ") + } } - includeFeesInBlockReward = !transactorPublicKey.IsEqual(blockRewardOutputPublicKey) } // If the transactor is not the block reward output (or we're before the BlockRewardPatchBlockHeight), @@ -321,7 +345,8 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) if includeFeesInBlockReward { // Check for overflow if totalFeeNanos > math.MaxUint64-feeNanos { - return nil, nil, nil, fmt.Errorf("DeSoBlockProducer._getBlockTemplate: Total fee overflowed uint64") + return nil, nil, nil, + fmt.Errorf("DeSoBlockProducer._getBlockTemplate: Total fee overflowed uint64") } // Add the fee to the block reward output as we go. Note this has some risk of // increasing the size of the block by one byte, but it seems like this is an @@ -452,17 +477,36 @@ func RecomputeBlockRewardWithBlockRewardOutputPublicKey( // and sum fees to calculate the block reward totalFees := uint64(0) for _, txn := range block.Txns[1:] { - transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey) - if err != nil { - glog.Errorf("DeSoMiner._startThread: Error parsing transactor public key: %v", err) - continue - } - if !transactorPublicKey.IsEqual(blockRewardOutputPublicKey) { + if txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { + transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey) + if err != nil { + glog.Errorf("DeSoMiner._startThread: Error parsing transactor public key: %v", err) + continue + } + if transactorPublicKey.IsEqual(blockRewardOutputPublicKey) { + continue + } totalFees, err = SafeUint64().Add(totalFees, txn.TxnFeeNanos) if err != nil { glog.Errorf("DeSoMiner._startThread: Error adding txn fee: %v", err) continue } + } else { + txnMeta, ok := txn.TxnMeta.(*AtomicTxnsWrapperMetadata) + if !ok { + glog.Errorf("DeSoMiner._startThread: Error casting txn metadata to AtomicTxnsWrapperMetadata") + continue + } + nonBlockRewardRecipientFees, err := filterOutBlockRewardRecipientFees(txnMeta.Txns, blockRewardOutputPublicKey) + if err != nil { + glog.Errorf("DeSoMiner._startThread: Error filtering out block reward recipient fees: %v", err) + continue + } + totalFees, err = SafeUint64().Add(totalFees, nonBlockRewardRecipientFees) + if err != nil { + glog.Errorf("DeSoMiner._startThread: Error adding txn fee: %v", err) + continue + } } } block.Txns[0].TxOutputs[0].AmountNanos = CalcBlockRewardNanos(uint32(block.Header.Height), params) + totalFees diff --git a/lib/block_view.go b/lib/block_view.go index 3dced2df2..9217a3637 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4343,7 +4343,6 @@ func (bav *UtxoView) ConnectBlock( if err != nil { return nil, errors.Wrapf(err, "ConnectBlock: error connecting txn #%d", txIndex) } - _, utilityFee = computeBMF(currentFees) // After the block reward patch block height, we only include fees from transactions // where the transactor is not the block reward output public key. This prevents @@ -4351,7 +4350,8 @@ func (bav *UtxoView) ConnectBlock( // included in blocks for free. includeFeesInBlockReward := true if blockHeight >= uint64(bav.Params.ForkHeights.BlockRewardPatchBlockHeight) && - txn.TxnMeta.GetTxnType() != TxnTypeBlockReward { + txn.TxnMeta.GetTxnType() != TxnTypeBlockReward && + txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { transactorPubKey, err := btcec.ParsePubKey(txn.PublicKey) if err != nil { return nil, fmt.Errorf("ConnectBlock: Problem parsing transactor public key: %v", err) @@ -4360,21 +4360,47 @@ func (bav *UtxoView) ConnectBlock( } if includeFeesInBlockReward { - // Add the fees from this txn to the total fees. If any overflow occurs - // mark the block as invalid and return a rule error. Note that block reward - // txns should count as having zero fees. - if totalFees > (math.MaxUint64 - currentFees) { - return nil, RuleErrorTxnOutputWithInvalidAmount - } - totalFees += currentFees + if txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { + // Compute the BMF given the current fees paid in the block. + _, utilityFee = computeBMF(currentFees) + + // Add the fees from this txn to the total fees. If any overflow occurs + // mark the block as invalid and return a rule error. Note that block reward + // txns should count as having zero fees. + if totalFees > (math.MaxUint64 - currentFees) { + return nil, RuleErrorTxnOutputWithInvalidAmount + } + totalFees += currentFees - // For PoS, the maximum block reward is based on the maximum utility fee. - // Add the utility fees to the max utility fees. If any overflow - // occurs mark the block as invalid and return a rule error. - maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) - if err != nil { - return nil, errors.Wrapf(RuleErrorPoSBlockRewardWithInvalidAmount, - "ConnectBlock: error computing maxUtilityFee: %v", err) + // For PoS, the maximum block reward is based on the maximum utility fee. + // Add the utility fees to the max utility fees. If any overflow + // occurs mark the block as invalid and return a rule error. + maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) + if err != nil { + return nil, errors.Wrapf(RuleErrorPoSBlockRewardWithInvalidAmount, + "ConnectBlock: error computing maxUtilityFee: %v", err) + } + } else { + txnMeta, ok := txn.TxnMeta.(*AtomicTxnsWrapperMetadata) + if !ok { + return nil, fmt.Errorf("ConnectBlock: AtomicTxnsWrapperMetadata type assertion failed") + } + nonBlockRewardRecipientFees, err := filterOutBlockRewardRecipientFees( + txnMeta.Txns, blockRewardOutputPublicKey) + if err != nil { + return nil, errors.Wrapf(err, "ConnectBlock: error filtering out block reward recipient fees") + } + totalFees, err = SafeUint64().Add(totalFees, nonBlockRewardRecipientFees) + if err != nil { + return nil, errors.Wrap( + err, "ConnectBlock: error adding non-block-reward recipient fees from atomic transaction") + } + _, utilityFee = computeBMF(nonBlockRewardRecipientFees) + maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) + if err != nil { + return nil, errors.Wrap(err, + "ConnectBlock: error computing maxUtilityFee: %v") + } } } diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 8f0055161..bc15c5cf5 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -3,6 +3,7 @@ package lib import ( "bytes" "fmt" + "github.com/btcsuite/btcd/btcec/v2" "github.com/pkg/errors" "io" ) @@ -443,6 +444,26 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { return nil } +func filterOutBlockRewardRecipientFees(txns []*MsgDeSoTxn, publicRewardPublicKey *btcec.PublicKey) (uint64, error) { + var nonBlockRewardRecipientFees uint64 + for _, txn := range txns { + // If the transaction is performed by any public key other than block reward recipient transaction, + // add the fees to the total. + transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey) + if err != nil { + return 0, errors.Wrap(err, "filterBlockRewardRecipientFees: failed to parse public key") + } + if transactorPublicKey.IsEqual(publicRewardPublicKey) { + continue + } + nonBlockRewardRecipientFees, err = SafeUint64().Add(nonBlockRewardRecipientFees, txn.TxnFeeNanos) + if err != nil { + return 0, errors.Wrap(err, "filterBlockRewardRecipientFees: failed to add fees") + } + } + return nonBlockRewardRecipientFees, nil +} + func (bav *UtxoView) _disconnectAtomicTxnsWrapper( operationType OperationType, currentTxn *MsgDeSoTxn, diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 2b78f9f03..f12b8966c 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -1008,6 +1008,32 @@ func (mp *DeSoMempool) rebuildBackupView() { mp.backupUniversalUtxoView = mp.universalUtxoView.CopyUtxoView() } +// tryAcceptSingleTransactionNonce is similar to _connectSingleTxn in that +// it's meant to be used with non-atomic transactions. Here, we check that +// given a mempool and a block height a nonce is logically consistent with the +// current state of the chain. Possible errors are no nonce being set or +// the transaction nonce being expired. +// +// NOTE: Because atomic transaction wrappers do not have a functional nonce, +// this function should not be used for them. +func (mp *DeSoMempool) tryAcceptSingleTransactionNonce( + tx *MsgDeSoTxn, + blockHeight uint64, +) error { + if tx.TxnNonce == nil { + return TxErrorNoNonceAfterBalanceModelBlockHeight + } + if tx.TxnNonce.ExpirationBlockHeight < blockHeight { + return TxErrorNonceExpired + } + if mp.universalUtxoView.GetCurrentGlobalParamsEntry().MaxNonceExpirationBlockHeightOffset != 0 && + tx.TxnNonce.ExpirationBlockHeight > + blockHeight+mp.universalUtxoView.GetCurrentGlobalParamsEntry().MaxNonceExpirationBlockHeightOffset { + return TxErrorNonceExpirationBlockHeightOffsetExceeded + } + return nil +} + // See TryAcceptTransaction. The write lock must be held when calling this function. // // TODO: Allow replacing a transaction with a higher fee. @@ -1021,16 +1047,18 @@ func (mp *DeSoMempool) tryAcceptTransaction( return nil, nil, TxErrorIndividualBlockReward } - if blockHeight >= uint64(mp.bc.params.ForkHeights.BalanceModelBlockHeight) { - if tx.TxnNonce == nil { - return nil, nil, TxErrorNoNonceAfterBalanceModelBlockHeight - } - if tx.TxnNonce.ExpirationBlockHeight < blockHeight { - return nil, nil, TxErrorNonceExpired + // Try accepting the nonce. + if blockHeight >= uint64(mp.bc.params.ForkHeights.BalanceModelBlockHeight) && + tx.TxnMeta.GetTxnType() == TxnTypeAtomicTxnsWrapper { + for _, innerTxn := range tx.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns { + if err := mp.tryAcceptSingleTransactionNonce(innerTxn, blockHeight); err != nil { + return nil, nil, err + } } - if mp.universalUtxoView.GetCurrentGlobalParamsEntry().MaxNonceExpirationBlockHeightOffset != 0 && - tx.TxnNonce.ExpirationBlockHeight > blockHeight+mp.universalUtxoView.GetCurrentGlobalParamsEntry().MaxNonceExpirationBlockHeightOffset { - return nil, nil, TxErrorNonceExpirationBlockHeightOffsetExceeded + } else if blockHeight >= uint64(mp.bc.params.ForkHeights.BalanceModelBlockHeight) && + tx.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { + if err := mp.tryAcceptSingleTransactionNonce(tx, blockHeight); err != nil { + return nil, nil, err } } diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index a88247fd5..348c8c440 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -1,6 +1,7 @@ package lib import ( + "github.com/btcsuite/btcd/btcec/v2" "math" "time" @@ -332,11 +333,30 @@ func (pbp *PosBlockProducer) getBlockTransactions( blocksTxns = append(blocksTxns, txn.GetTxn()) currentBlockSize += uint64(len(txnBytes)) - // If the transactor is the block producer, then they won't receive the utility fee. - if blockProducerPublicKey.Equal(*NewPublicKey(txn.PublicKey)) { - continue + if txn.GetTxn().TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { + // If the transactor is the block producer, then they won't receive the utility fee. + if blockProducerPublicKey.Equal(*NewPublicKey(txn.PublicKey)) { + continue + } + } else { + txnMeta, ok := txn.GetTxn().TxnMeta.(*AtomicTxnsWrapperMetadata) + if !ok { + return nil, 0, + errors.New("Error casting txn meta to AtomicSwapMetadata") + } + blockProducerPublicKeyBtcec, err := btcec.ParsePubKey(blockProducerPublicKey.ToBytes()) + if err != nil { + return nil, 0, + errors.Wrapf(err, "Error parsing block producer public key: ") + } + // Set fees to the sum of fees paid by public keys other than the block producer. + fees, err = filterOutBlockRewardRecipientFees( + txnMeta.Txns, blockProducerPublicKeyBtcec) + if err != nil { + return nil, 0, + errors.Wrapf(err, "error filtering out block reward recipient fees") + } } - // Compute BMF for the transaction. _, utilityFee := computeBMF(fees) maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) From eb64c96f46bcca8d50d01943b80ffca8cf298473 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 16 Apr 2024 10:42:08 -0400 Subject: [PATCH 642/762] No error from estimateFeeRateGivenTxnRegister (#1226) --- lib/legacy_mempool.go | 8 ++++---- lib/pos_fee_estimator.go | 33 ++++++++++++--------------------- lib/pos_fee_estimator_test.go | 30 ++++++++++-------------------- lib/pos_mempool.go | 4 ++-- 4 files changed, 28 insertions(+), 47 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index f12b8966c..a03cd7d1a 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2482,7 +2482,7 @@ func EstimateMaxTxnFeeV1(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) uint64 { func (mp *DeSoMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, _ uint64, _ uint64, _ uint64, _ uint64, _ uint64) (uint64, error) { - feeRate, _ := mp.EstimateFeeRate(minFeeRateNanosPerKB, 0, 0, 0, 0, 0) + feeRate := mp.EstimateFeeRate(minFeeRateNanosPerKB, 0, 0, 0, 0, 0) return EstimateMaxTxnFeeV1(txn, feeRate), nil } @@ -2492,11 +2492,11 @@ func (mp *DeSoMempool) EstimateFeeRate( _ uint64, _ uint64, _ uint64, - _ uint64) (uint64, error) { + _ uint64) uint64 { if minFeeRateNanosPerKB < mp.readOnlyUtxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB { - return mp.readOnlyUtxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB, nil + return mp.readOnlyUtxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB } - return minFeeRateNanosPerKB, nil + return minFeeRateNanosPerKB } func convertMempoolTxsToSummaryStats(mempoolTxs []*MempoolTx) map[string]*SummaryStats { diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 7900f2543..aed8409e6 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -289,36 +289,30 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( pastBlocksCongestionFactorBasisPoints uint64, pastBlocksPriorityPercentileBasisPoints uint64, maxBlockSize uint64, -) (uint64, error) { +) uint64 { posFeeEstimator.rwLock.RLock() defer posFeeEstimator.rwLock.RUnlock() - pastBlockFeeRate, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + pastBlockFeeRate := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, posFeeEstimator.numPastBlocks, maxBlockSize, ) - if err != nil { - return 0, errors.Wrap(err, "EstimateFeeRateNanosPerKB: Problem computing past block fee rate") - } - mempoolFeeRate, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + mempoolFeeRate := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.mempoolTransactionRegister, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, posFeeEstimator.numMempoolBlocks, maxBlockSize, ) - if err != nil { - return 0, errors.Wrap(err, "EstimateFeeRateNanosPerKB: Problem computing mempool fee rate") - } if minFeeRateNanosPerKB > pastBlockFeeRate && minFeeRateNanosPerKB > mempoolFeeRate { - return minFeeRateNanosPerKB, nil + return minFeeRateNanosPerKB } if pastBlockFeeRate < mempoolFeeRate { - return mempoolFeeRate, nil + return mempoolFeeRate } - return pastBlockFeeRate, nil + return pastBlockFeeRate } // EstimateFee estimates the fee in nanos for the provided transaction by taking the @@ -513,16 +507,13 @@ func (posFeeEstimator *PoSFeeEstimator) estimateTxnFeeGivenTransactionRegister( numBlocks uint64, maxBlockSize uint64, ) (uint64, error) { - feeRateNanosPerKB, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + feeRateNanosPerKB := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( txnRegister, congestionFactorBasisPoints, priorityPercentileBasisPoints, numBlocks, maxBlockSize, ) - if err != nil { - return 0, errors.Wrap(err, "estimateTxnFeeGivenTransactionRegister: Problem computing fee rate") - } txnFee, err := computeFeeGivenTxnAndFeeRate(txn, feeRateNanosPerKB) if err != nil { return 0, errors.Wrap(err, "estimateTxnFeeGivenTransactionRegister: Problem computing txn fee") @@ -610,7 +601,7 @@ func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactio priorityPercentileBasisPoints uint64, numBlocks uint64, maxBlockSize uint64, -) (uint64, error) { +) uint64 { txnRegister.RLock() defer txnRegister.RUnlock() it := txnRegister.GetFeeTimeIterator() @@ -633,7 +624,7 @@ func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactio globalMinFeeRate, _ := txnRegister.minimumNetworkFeeNanosPerKB.Uint64() if len(txns) == 0 { // If there are no txns in the transaction register, we simply return the minimum network fee. - return globalMinFeeRate, nil + return globalMinFeeRate } bucketMinFee, bucketMaxFee := getPriorityFeeBucketFromTxns( @@ -643,7 +634,7 @@ func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactio txnRegister.feeBucketGrowthRateBasisPoints) // If the bucketMinFee is less than or equal to the global min fee rate, we return the global min fee rate. if bucketMinFee <= globalMinFeeRate { - return globalMinFeeRate, nil + return globalMinFeeRate } // Compute the congestion threshold. If our congestion factor is 100% (or 10,000 bps), @@ -657,10 +648,10 @@ func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactio bucketExponent := computeFeeTimeBucketExponentFromFeeNanosPerKB( bucketMinFee, txnRegister.minimumNetworkFeeNanosPerKB, txnRegister.feeBucketGrowthRateBasisPoints) return computeFeeTimeBucketMinFromExponent( - bucketExponent-1, txnRegister.minimumNetworkFeeNanosPerKB, txnRegister.feeBucketGrowthRateBasisPoints), nil + bucketExponent-1, txnRegister.minimumNetworkFeeNanosPerKB, txnRegister.feeBucketGrowthRateBasisPoints) } // Otherwise, we return one bucket higher than Priority fee - return bucketMaxFee + 1, nil + return bucketMaxFee + 1 } // getPriorityFeeBucketFromTxns computes the priority fee bucket for the given transactions using the diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index 12c37799f..25f37f3bf 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -45,14 +45,12 @@ func TestFeeEstimator(t *testing.T) { }}, 1, mempool.globalParams) require.NoError(t, err) // When there's nothing in the mempool, we return the global minimum fee rate. - baseFeeRate, err := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + baseFeeRate := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.mempoolTransactionRegister, 10000, 10000, 1, 1000) - require.NoError(t, err) require.Equal(t, globalParams.MinimumNetworkFeeNanosPerKB, baseFeeRate) // When there's nothing in the past blocks, we return the global minimum fee rate. - baseFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + baseFeeRate = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, 10000, 10000, 1, 1000) - require.NoError(t, err) require.Equal(t, globalParams.MinimumNetworkFeeNanosPerKB, baseFeeRate) // Make a dummy transaction, so we can check the fee rate. txn := _generateTestTxnWithFeeRate(t, randSource, feeMin, m0PubBytes, m0Priv, 100, 25) @@ -137,10 +135,9 @@ func TestFeeEstimator(t *testing.T) { if maxBlockSizeMempool > maxBlockSizePastBlocks { maxBlockSizeHybrid = maxBlockSizeMempool } - estimatedMempoolFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + estimatedMempoolFeeRate = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.mempoolTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, maxBlockSizeMempool) - require.NoError(t, err) require.Equal(t, nextFeeBucketMin, estimatedMempoolFeeRate) estimatedMempoolFee, err = posFeeEstimator.mempoolFeeEstimate(txn, congestionFactor, priorityPercentileBasisPoints, maxBlockSizeMempool) @@ -148,10 +145,9 @@ func TestFeeEstimator(t *testing.T) { validateTxnFee(t, txn, estimatedMempoolFee, estimatedMempoolFeeRate) // Let's do the same for past blocks estimator - estimatedPastBlocksFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + estimatedPastBlocksFeeRate = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, maxBlockSizePastBlocks) - require.NoError(t, err) require.Equal(t, nextFeeBucketMin, estimatedPastBlocksFeeRate) estimatedPastBlocksFee, err = posFeeEstimator.pastBlocksFeeEstimate(txn, congestionFactor, priorityPercentileBasisPoints, maxBlockSizePastBlocks) @@ -184,10 +180,9 @@ func TestFeeEstimator(t *testing.T) { if maxBlockSizeMempool > maxBlockSizePastBlocks { maxBlockSizeHybrid = maxBlockSizeMempool } - estimatedMempoolFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + estimatedMempoolFeeRate = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.mempoolTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, maxBlockSizeMempool) - require.NoError(t, err) require.Equal(t, nextFeeBucketMin, estimatedMempoolFeeRate) estimatedMempoolFee, err = posFeeEstimator.mempoolFeeEstimate(txn, congestionFactor, priorityPercentileBasisPoints, maxBlockSizeMempool) @@ -195,10 +190,9 @@ func TestFeeEstimator(t *testing.T) { validateTxnFee(t, txn, estimatedMempoolFee, estimatedMempoolFeeRate) // Let's do the same for past blocks estimator - estimatedPastBlocksFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + estimatedPastBlocksFeeRate = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, maxBlockSizePastBlocks) - require.NoError(t, err) require.Equal(t, nextFeeBucketMin, estimatedPastBlocksFeeRate) estimatedPastBlocksFee, err = posFeeEstimator.pastBlocksFeeEstimate(txn, congestionFactor, priorityPercentileBasisPoints, maxBlockSizePastBlocks) @@ -231,10 +225,9 @@ func TestFeeEstimator(t *testing.T) { if maxBlockSizeMempool > maxBlockSizePastBlocks { maxBlockSizeHybrid = maxBlockSizeMempool } - estimatedMempoolFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + estimatedMempoolFeeRate = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.mempoolTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, maxBlockSizeMempool) - require.NoError(t, err) require.Equal(t, nextFeeBucketMin, estimatedMempoolFeeRate) estimatedMempoolFee, err = posFeeEstimator.mempoolFeeEstimate(txn, congestionFactor, priorityPercentileBasisPoints, maxBlockSizeMempool) @@ -242,10 +235,9 @@ func TestFeeEstimator(t *testing.T) { validateTxnFee(t, txn, estimatedMempoolFee, estimatedMempoolFeeRate) // Let's do the same for past blocks estimator - estimatedPastBlocksFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + estimatedPastBlocksFeeRate = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, maxBlockSizePastBlocks) - require.NoError(t, err) require.Equal(t, nextFeeBucketMin, estimatedPastBlocksFeeRate) estimatedPastBlocksFee, err = posFeeEstimator.pastBlocksFeeEstimate(txn, congestionFactor, priorityPercentileBasisPoints, maxBlockSizePastBlocks) @@ -278,10 +270,9 @@ func TestFeeEstimator(t *testing.T) { if maxBlockSizeMempool > maxBlockSizePastBlocks { maxBlockSizeHybrid = maxBlockSizeMempool } - estimatedMempoolFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + estimatedMempoolFeeRate = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.mempoolTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, maxBlockSizeMempool) - require.NoError(t, err) require.Equal(t, minFeeBucketMin, estimatedMempoolFeeRate) estimatedMempoolFee, err = posFeeEstimator.mempoolFeeEstimate(txn, congestionFactor, priorityPercentileBasisPoints, maxBlockSizeMempool) @@ -289,10 +280,9 @@ func TestFeeEstimator(t *testing.T) { validateTxnFee(t, txn, estimatedMempoolFee, estimatedMempoolFeeRate) // Let's do the same for past blocks estimator - estimatedPastBlocksFeeRate, err = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( + estimatedPastBlocksFeeRate = posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, congestionFactor, priorityPercentileBasisPoints, 1, maxBlockSizePastBlocks) - require.NoError(t, err) require.Equal(t, minFeeBucketMin, estimatedPastBlocksFeeRate) estimatedPastBlocksFee, err = posFeeEstimator.pastBlocksFeeEstimate(txn, congestionFactor, priorityPercentileBasisPoints, maxBlockSizePastBlocks) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 9bd5eddde..fc63e5734 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -60,7 +60,7 @@ type Mempool interface { pastBlocksCongestionFactorBasisPoints uint64, pastBlocksPriorityPercentileBasisPoints uint64, maxBlockSize uint64, - ) (uint64, error) + ) uint64 } // GetAugmentedUniversalViewWithAdditionalTransactions is meant as a helper function @@ -1165,7 +1165,7 @@ func (mp *PosMempool) EstimateFeeRate( mempoolPriorityPercentileBasisPoints uint64, pastBlocksCongestionFactorBasisPoints uint64, pastBlocksPriorityPercentileBasisPoints uint64, - maxBlockSize uint64) (uint64, error) { + maxBlockSize uint64) uint64 { return mp.feeEstimator.EstimateFeeRateNanosPerKB( minFeeRateNanosPerKB, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, maxBlockSize) From 45a0350976fb3fb5b27a4a3807a1261a579a6986 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 16 Apr 2024 10:43:33 -0400 Subject: [PATCH 643/762] Add BigIntFromUint64 helper (#1227) --- lib/block_view.go | 20 +++++++++----------- lib/block_view_dao_coin_limit_order.go | 6 +++--- lib/deso_math.go | 9 +++++++++ lib/network.go | 2 +- lib/network_test.go | 2 +- 5 files changed, 23 insertions(+), 16 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 9217a3637..df58d0654 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4042,7 +4042,7 @@ func (bav *UtxoView) _connectSingleTxn( if txn.TxnMeta.GetTxnType() != TxnTypeBlockReward && txn.TxnMeta.GetTxnType() != TxnTypeBitcoinExchange && blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight { - balanceDelta, _, err := bav._compareBalancesToSnapshot(balanceSnapshot) + balanceDelta, err := bav._compareBalancesToSnapshot(balanceSnapshot) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "ConnectTransaction: error comparing current balances to snapshot") } @@ -4053,8 +4053,8 @@ func (bav *UtxoView) _connectSingleTxn( if creatorProfile == nil || creatorProfile.IsDeleted() { return nil, 0, 0, 0, fmt.Errorf("ConnectTransaction: Profile for CreatorCoin being sold does not exist") } - desoLockedDelta = big.NewInt(0).Sub(big.NewInt(0).SetUint64(creatorProfile.CreatorCoinEntry.DeSoLockedNanos), - big.NewInt(0).SetUint64(creatorCoinSnapshot.DeSoLockedNanos)) + desoLockedDelta = big.NewInt(0).Sub(BigIntFromUint64(creatorProfile.CreatorCoinEntry.DeSoLockedNanos), + BigIntFromUint64(creatorCoinSnapshot.DeSoLockedNanos)) } if txn.TxnMeta.GetTxnType() == TxnTypeAcceptNFTBid || txn.TxnMeta.GetTxnType() == TxnTypeNFTBid { @@ -4066,8 +4066,8 @@ func (bav *UtxoView) _connectSingleTxn( } desoLockedDelta = desoLockedDelta.Sub(desoLockedDelta, big.NewInt(0).Sub( - big.NewInt(0).SetUint64(creatorProfile.CreatorCoinEntry.DeSoLockedNanos), - big.NewInt(0).SetUint64(coinEntry.DeSoLockedNanos)), + BigIntFromUint64(creatorProfile.CreatorCoinEntry.DeSoLockedNanos), + BigIntFromUint64(coinEntry.DeSoLockedNanos)), ) } } @@ -4247,26 +4247,24 @@ func computeBMF(fee uint64) (_burnFee uint64, _utilityFee uint64) { } func (bav *UtxoView) _compareBalancesToSnapshot(balanceSnapshot map[PublicKey]uint64) ( - *big.Int, map[PublicKey]*big.Int, error) { + *big.Int, error) { runningTotal := big.NewInt(0) - balanceDeltasMap := make(map[PublicKey]*big.Int) for publicKey, balance := range bav.PublicKeyToDeSoBalanceNanos { snapshotBalance, exists := balanceSnapshot[publicKey] if !exists { // Get it from the DB dbBalance, err := bav.GetDbAdapter().GetDeSoBalanceForPublicKey(publicKey.ToBytes()) if err != nil { - return nil, nil, err + return nil, err } snapshotBalance = dbBalance balanceSnapshot[publicKey] = snapshotBalance } // New - Old - delta := big.NewInt(0).Sub(big.NewInt(0).SetUint64(balance), big.NewInt(0).SetUint64(snapshotBalance)) - balanceDeltasMap[publicKey] = delta + delta := big.NewInt(0).Sub(BigIntFromUint64(balance), BigIntFromUint64(snapshotBalance)) runningTotal = big.NewInt(0).Add(runningTotal, delta) } - return runningTotal, balanceDeltasMap, nil + return runningTotal, nil } func (bav *UtxoView) ConnectBlock( diff --git a/lib/block_view_dao_coin_limit_order.go b/lib/block_view_dao_coin_limit_order.go index 93f09696c..19b924f2c 100644 --- a/lib/block_view_dao_coin_limit_order.go +++ b/lib/block_view_dao_coin_limit_order.go @@ -814,7 +814,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( pubKey := bav.GetPublicKeyForPKID(&userPKID) desoSurplus := desoAllowedToSpendByPublicKey[*NewPublicKey(pubKey)] newDESOSurplus := big.NewInt(0).Add( - delta, big.NewInt(0).SetUint64(desoSurplus)) + delta, BigIntFromUint64(desoSurplus)) // If the current delta is for the transactor, we need // to deduct the fees specified in the metadata from the output @@ -823,7 +823,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( if blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight && transactorPKIDEntry.PKID.Eq(&userPKID) { - newDESOSurplus = big.NewInt(0).Sub(newDESOSurplus, big.NewInt(0).SetUint64(txMeta.FeeNanos)) + newDESOSurplus = big.NewInt(0).Sub(newDESOSurplus, BigIntFromUint64(txMeta.FeeNanos)) } if blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight { @@ -1975,7 +1975,7 @@ func ScaleFloatFormatStringToUint256(floatStr string, scaleFactor *uint256.Int) } newWholePart := big.NewInt(0).Mul(wholePart, scaleFactor.ToBig()) newDecimalPart := big.NewInt(0).Mul(decimalPart, big.NewInt(0).Exp( - big.NewInt(0).SetUint64(10), big.NewInt(0).SetUint64(uint64(decimalExponent)), nil)) + big.NewInt(10), big.NewInt(int64(decimalExponent)), nil)) sumBig := big.NewInt(0).Add(newWholePart, newDecimalPart) ret, overflow := uint256.FromBig(sumBig) diff --git a/lib/deso_math.go b/lib/deso_math.go index 55655dd3f..f522ef0b0 100644 --- a/lib/deso_math.go +++ b/lib/deso_math.go @@ -55,6 +55,15 @@ func IntAdd(a *big.Int, b *big.Int) *big.Int { return big.NewInt(0).Add(a, b) } +// BigIntFromUint64 returns a big.Int from a uint64. It is 20x faster to +// simply cast a uint64 to a big.Int than to use big.NewInt(0).SetUint64(). +func BigIntFromUint64(x uint64) *big.Int { + if x < math.MaxInt64 { + return big.NewInt(int64(x)) + } + return big.NewInt(0).SetUint64(x) +} + func Sub(a *big.Float, b *big.Float) *big.Float { // TODO(performance): This code currently calls NewFloat() too often. It // does this in order to make the code easier to read but if it ever becomes diff --git a/lib/network.go b/lib/network.go index 4ddcb9eb2..d0993c06f 100644 --- a/lib/network.go +++ b/lib/network.go @@ -6073,7 +6073,7 @@ func (tsl *TransactionSpendingLimit) ToMetamaskString(params *DeSoParams) string // GlobalDESOLimit if tsl.GlobalDESOLimit > 0 { str += _indt(indentationCounter) + "Total $DESO Limit: " + FormatScaledUint256AsDecimalString( - big.NewInt(0).SetUint64(tsl.GlobalDESOLimit), big.NewInt(int64(NanosPerUnit))) + " $DESO\n" + BigIntFromUint64(tsl.GlobalDESOLimit), big.NewInt(int64(NanosPerUnit))) + " $DESO\n" } // Sort an array of strings and add them to the spending limit string str. This will come in handy below, diff --git a/lib/network_test.go b/lib/network_test.go index 878bf0c6d..7928bb7d5 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -1654,7 +1654,7 @@ func TestSpendingLimitMetamaskString(t *testing.T) { encoding := spendingLimit.ToMetamaskString(params) if tsl.GlobalDESOLimit > 0 { if !strings.Contains(encoding, FormatScaledUint256AsDecimalString( - big.NewInt(0).SetUint64(tsl.GlobalDESOLimit), big.NewInt(int64(NanosPerUnit)))) { + BigIntFromUint64(tsl.GlobalDESOLimit), big.NewInt(int64(NanosPerUnit)))) { return false } } From 9a9c7f3ebc7dc434c6a9b1a431f057700ba4d82c Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 16 Apr 2024 10:45:41 -0400 Subject: [PATCH 644/762] Use Sign instead of comparing big ints to 0 (#1228) --- lib/block_view_dao_coin_limit_order.go | 15 +++++++-------- lib/block_view_nft.go | 2 +- lib/block_view_stake.go | 2 +- lib/deso_math.go | 2 +- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/lib/block_view_dao_coin_limit_order.go b/lib/block_view_dao_coin_limit_order.go index 19b924f2c..6c65159cb 100644 --- a/lib/block_view_dao_coin_limit_order.go +++ b/lib/block_view_dao_coin_limit_order.go @@ -7,7 +7,6 @@ import ( "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "math" "math/big" "sort" "strings" @@ -20,7 +19,7 @@ func adjustBalance( retBig := big.NewInt(0).Add(balanceBig, delta) // If we're below zero, just return zero. The caller should generally // prevent this from happening. - if retBig.Cmp(big.NewInt(0)) < 0 { + if retBig.Sign() < 0 { return nil, fmt.Errorf("adjustBalance: Went below zero. This should never happen.") } if retBig.Cmp(MaxUint256.ToBig()) > 0 { @@ -186,7 +185,7 @@ func (bav *UtxoView) _sanityCheckLimitOrderMoneyPrinting( // we did not print money. for creatorPKID, deltaBalanceBaseUnits := range finalDeltasMap { // If delta is > 0, throw an error. - if deltaBalanceBaseUnits.Cmp(big.NewInt(0)) > 0 { + if deltaBalanceBaseUnits.Sign() > 0 { return fmt.Errorf( "_connectDAOCoinLimitOrder: printing %v new coin base units for creatorPKID %v", deltaBalanceBaseUnits, creatorPKID) @@ -775,7 +774,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( } } for creatorPKIDIter, balanceDelta := range balanceDeltaSanityCheckMap { - if balanceDelta.Cmp(big.NewInt(0)) != 0 { + if balanceDelta.Sign() != 0 { return 0, 0, nil, errors.Wrapf( RuleErrorDAOCoinLimitOrderBalanceDeltasNonZero, "_connectDAOCoinLimitOrder: Balance for PKID %v is %v", creatorPKIDIter, balanceDelta.String(), @@ -827,7 +826,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( } if blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight { - cmpVal := newDESOSurplus.Cmp(big.NewInt(0)) + cmpVal := newDESOSurplus.Sign() if cmpVal == 0 { continue } @@ -874,10 +873,10 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( // Note that if we ever go negative then that's an error because // we already maxed out the DESO we're allowed to spend before // entering this loop. - if newDESOSurplus.Cmp(big.NewInt(0)) < 0 { + if newDESOSurplus.Sign() < 0 { return 0, 0, nil, RuleErrorDAOCoinLimitOrderOverspendingDESO } - if newDESOSurplus.Cmp(big.NewInt(0).SetUint64(math.MaxUint64)) > 0 { + if !newDESOSurplus.IsUint64() { return 0, 0, nil, RuleErrorDAOCoinLimitOrderOverflowsDESO } @@ -924,7 +923,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( } newBalance := big.NewInt(0).Add(newBalanceEntry.BalanceNanos.ToBig(), delta) - if newBalance.Cmp(big.NewInt(0)) < 0 { + if newBalance.Sign() < 0 { return 0, 0, nil, RuleErrorDAOCoinLimitOrderOverspendingDAOCoin } if newBalance.Cmp(MaxUint256.ToBig()) > 0 { diff --git a/lib/block_view_nft.go b/lib/block_view_nft.go index 0892c2a26..fbeaefd26 100644 --- a/lib/block_view_nft.go +++ b/lib/block_view_nft.go @@ -1665,7 +1665,7 @@ func (bav *UtxoView) _helpConnectNFTSold(args HelpConnectNFTSoldStruct) ( totalDiff := big.NewInt(0).Add(sellerPlusBidderDiff, creatorPlusCoinDiff) totalDiff = totalDiff.Add(totalDiff, totalAdditionalRoyaltiesDiff) - if totalDiff.Cmp(big.NewInt(0)) > 0 { + if totalDiff.Sign() > 0 { return 0, 0, nil, fmt.Errorf( "_helpConnectNFTSold: Sum of participant diffs is >0 (%d, %d, %d, %d, %d, %d)", sellerDiff, bidderDiff, creatorDiff, coinDiff, additionalDESORoyaltiesDiff.Int64(), diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 0055aa88d..12c0be2b3 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1617,7 +1617,7 @@ func (bav *UtxoView) _connectUnstake( } // 2. Create a currentStakeEntry, if updated StakeAmountNanos > 0. var currentStakeEntry *StakeEntry - if stakeAmountNanos.Cmp(uint256.NewInt(0)) > 0 { + if stakeAmountNanos.Sign() > 0 { currentStakeEntry = prevStakeEntry.Copy() currentStakeEntry.StakeAmountNanos = stakeAmountNanos.Clone() } diff --git a/lib/deso_math.go b/lib/deso_math.go index f522ef0b0..7d593cdf6 100644 --- a/lib/deso_math.go +++ b/lib/deso_math.go @@ -200,7 +200,7 @@ func BigFloatPow(z *big.Float, w *big.Float) *big.Float { if z.Sign() < 0 { panic("Pow: negative base") } - if z.Cmp(bigZero) == 0 { + if z.Sign() == 0 { return bigZero } From 7f8465733ffa2b297eae13de972563dfe5236b12 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 16 Apr 2024 12:49:50 -0400 Subject: [PATCH 645/762] Add TransactionRegister Functions to Rebucket With New Global Params (#1230) --- lib/pos_mempool.go | 2 +- lib/pos_transaction_register.go | 26 +++++++++++ lib/pos_transaction_register_test.go | 64 +++++++++++++++++++++++++++- 3 files changed, 90 insertions(+), 2 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index fc63e5734..b5410eb26 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -1029,7 +1029,7 @@ func (mp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { mp.globalParams = globalParams - // TODO: Trim the mempool size + // Trim the mempool size to the new maximum size. // TODO: Update the fee bucketing parameters diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index eaa313d58..989d77d54 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -64,6 +64,32 @@ func (tr *TransactionRegister) Init(globalParams *GlobalParamsEntry) { tr.feeBucketGrowthRateBasisPoints = bucketMultiplier } +func (tr *TransactionRegister) HasGlobalParamChange(globalParams *GlobalParamsEntry) bool { + tr.Lock() + defer tr.Unlock() + + minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + + return minNetworkFee.Cmp(tr.minimumNetworkFeeNanosPerKB) != 0 || bucketMultiplier.Cmp(tr.feeBucketGrowthRateBasisPoints) != 0 +} + +func (tr *TransactionRegister) CopyWithNewGlobalParams(globalParams *GlobalParamsEntry) (*TransactionRegister, error) { + tr.RLock() + defer tr.RUnlock() + + newRegister := NewTransactionRegister() + newRegister.Init(globalParams) + + // Re-bucket all transactions in the new register. + for _, txn := range tr.GetFeeTimeTransactions() { + if err := newRegister.addTransactionNoLock(txn); err != nil { + return nil, errors.Wrapf(err, "TransactionRegister.CopyWithNewGlobalParams: Error adding transaction to new register") + } + } + + return newRegister, nil +} + // feeTimeBucketComparator is a comparator function for FeeTimeBucket objects. It is used to order FeeTimeBucket objects // in the TransactionRegister's feeTimeBucketSet based on fee ranges (higher fee ranges are ordered first). func feeTimeBucketComparator(a, b interface{}) int { diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index 33a3651ae..0a197c83d 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -2,13 +2,14 @@ package lib import ( "bytes" - ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "math" "math/rand" "sort" "testing" "time" + ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" + "github.com/btcsuite/btcd/btcec/v2" "github.com/stretchr/testify/require" ) @@ -557,3 +558,64 @@ func TestComputeFeeBucketWithFee(t *testing.T) { require.True(verifyFeeBucket(n, ii)) } } + +func TestHasGlobalParamChange(t *testing.T) { + // Create the transaction register. + globalParams := _testGetDefaultGlobalParams() + tr := NewTransactionRegister() + tr.Init(globalParams) + + // Test with no changes. + unchangedGlobalParams := _testGetDefaultGlobalParams() + require.False(t, tr.HasGlobalParamChange(unchangedGlobalParams)) + + // Test with MinimumNetworkFeeNanosPerKB change. + globalParamsWithNewFeeRate := _testGetDefaultGlobalParams() + globalParamsWithNewFeeRate.MinimumNetworkFeeNanosPerKB = 2000 + require.True(t, tr.HasGlobalParamChange(globalParamsWithNewFeeRate)) + + // Test with FeeBucketGrowthRateBasisPoints change. + globalParamsWithNewGrowthRate := _testGetDefaultGlobalParams() + globalParamsWithNewGrowthRate.FeeBucketGrowthRateBasisPoints = 2000 + require.True(t, tr.HasGlobalParamChange(globalParamsWithNewGrowthRate)) +} + +func TestCopyWithNewGlobalParams(t *testing.T) { + // Create the transaction register. + globalParams := _testGetDefaultGlobalParams() + tr := NewTransactionRegister() + tr.Init(globalParams) + + seed := int64(44) + testCases := 1000 + exponentRange := 100 + timestampRange := uint64(10000) + + rand := rand.New(rand.NewSource(seed)) + randomExponent := uint32(rand.Intn(exponentRange)) + baseRate, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + feeMin, feeMax := computeFeeTimeBucketRangeFromExponent(randomExponent, baseRate, bucketMultiplier) + + // Create the txns + txnPool := _testGetRandomMempoolTxns(rand, feeMin, feeMax, 1000, timestampRange, testCases) + + // Add the txns to the transaction register. + for _, txn := range txnPool { + err := tr.AddTransaction(txn) + require.Nil(t, err) + } + + // Copy the transaction register with new global params. + newGlobalParams := _testGetDefaultGlobalParams() + newGlobalParams.MinimumNetworkFeeNanosPerKB = 500 + + newTr, err := tr.CopyWithNewGlobalParams(newGlobalParams) + require.Nil(t, err) + require.NotNil(t, newTr) + + originalTxns := tr.GetFeeTimeTransactions() + rebucketedTxns := newTr.GetFeeTimeTransactions() + + // Make sure the number of txns is the same. + require.Equal(t, len(originalTxns), len(rebucketedTxns)) +} From 396e594762b5156d0c150d935c8f0e4328b01ec6 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 16 Apr 2024 13:04:46 -0400 Subject: [PATCH 646/762] Rebucket Mempool Transactions When Global Params Change (#1231) * Rebucket Mempool Transactions When Global Params Change * Fix naming --- lib/pos_fee_estimator.go | 6 ++++++ lib/pos_mempool.go | 32 +++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index aed8409e6..aaaa2cf2d 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -97,6 +97,12 @@ func (posFeeEstimator *PoSFeeEstimator) Init( return nil } +func (posFeeEstimator *PoSFeeEstimator) SetMempoolTransactionRegister(mempoolTransactionRegister *TransactionRegister) { + posFeeEstimator.rwLock.Lock() + defer posFeeEstimator.rwLock.Unlock() + posFeeEstimator.mempoolTransactionRegister = mempoolTransactionRegister +} + // AddBlock adds a block to the PoSFeeEstimator. This will add all the transactions from the block // to the pastBlocksTransactionRegister and cache the block. If there are now more blocks cached // than the numPastBlocks param provided to Init, the oldest block will be removed from the cache diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index b5410eb26..1514e8907 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -999,6 +999,28 @@ func (mp *PosMempool) pruneNoLock() error { return nil } +func (mp *PosMempool) rebucketTransactionRegisterNoLock() error { + // Check if the global params haven't changed in a way that requires rebucketing the + // transaction register + if mp.txnRegister.HasGlobalParamChange(mp.globalParams) { + return nil + } + + // Rebucket the transaction register + newTxnRegister, err := mp.txnRegister.CopyWithNewGlobalParams(mp.globalParams) + if err != nil { + return errors.Wrapf(err, "PosMempool.rebucketTransactionRegisterNoLock: Problem rebucketing transaction register") + } + + // Swap the transaction register with the new transaction register + mp.txnRegister = newTxnRegister + + // Update the fee estimator's transaction register + mp.feeEstimator.SetMempoolTransactionRegister(mp.txnRegister) + + return nil +} + // UpdateLatestBlock updates the latest block view and latest block node in the mempool. func (mp *PosMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) { mp.Lock() @@ -1030,8 +1052,16 @@ func (mp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { mp.globalParams = globalParams // Trim the mempool size to the new maximum size. + if err := mp.pruneNoLock(); err != nil { + glog.Errorf("PosMempool.UpdateGlobalParams: Problem pruning mempool: %v", err) + return + } - // TODO: Update the fee bucketing parameters + // Update the fee bucketing in the transaction register + if err := mp.rebucketTransactionRegisterNoLock(); err != nil { + glog.Errorf("PosMempool.UpdateGlobalParams: Problem rebucketing transaction register: %v", err) + return + } // TODO: Update the fee estimator } From b58a9e23b35284bc39150943cad1c037274f84b4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 16 Apr 2024 13:32:52 -0400 Subject: [PATCH 647/762] Update Fee Estimator When Global Params Change (#1232) --- lib/pos_fee_estimator.go | 101 ++++++++++++++------------- lib/pos_fee_estimator_test.go | 8 ++- lib/pos_mempool.go | 17 ++--- lib/pos_transaction_register_test.go | 2 + 4 files changed, 65 insertions(+), 63 deletions(-) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index aaaa2cf2d..8ce80752c 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -11,27 +11,27 @@ import ( type PoSFeeEstimator struct { // globalParams is the latest GlobalParams used by the PoSFeeEstimator. The fee estimation uses - // this whenever the cached blocks are refreshed + // this whenever the cached blocks are refreshed. The global params contain two relevant params: + // + // 1. NumMempoolBlocks is a parameter to manage how many blocks in the future we're willing to wait + // to have our block included in the chain. This will most likely be set to 1, meaning that we + // want to estimate a fee such that the transaction will be included in the next block. + // + // 2. NumPastBlocks is a parameter to manage how many blocks in the past we're willing to look at + // to estimate fees. This will most likely be set to 60, meaning that we want to estimate a fee + // such that it would have been included in the past 60 blocks (assuming 1 block per second, this + // means the past minute of blocks). This parameter also controls how many blocks we store in + // cachedBlocks. When AddBlock is called and we have more than numPastBlocks, we remove the + // oldest block from cachedBlocks and pastBlocksTransactionRegister. globalParams *GlobalParamsEntry // mempoolTransactionRegister is a pointer to the mempool's transaction register. The fee estimator // uses this to estimate fees based on congestion in the mempool. mempoolTransactionRegister *TransactionRegister - // numMempoolBlocks is a parameter to manage how many blocks in the future we're willing to wait - // to have our block included in the chain. This will most likely be set to 1, meaning that we - // want to estimate a fee such that the transaction will be included in the next block. - numMempoolBlocks uint64 // pastBlocksTransactionRegister is an internal transaction register to the fee estimator that // is used to estimate fees based on congestion in the past blocks. The caller is responsible // for calling AddBlock to add blocks to this transaction register whenever a new block is added // to the best chain. pastBlocksTransactionRegister *TransactionRegister - // numPastBlocks is a parameter to manage how many blocks in the past we're willing to look at - // to estimate fees. This will most likely be set to 60, meaning that we want to estimate a fee - // such that it would have been included in the past 60 blocks (assuming 1 block per second, this - // means the past minute of blocks). This parameter also controls how many blocks we store in - // cachedBlocks. When AddBlock is called and we have more than numPastBlocks, we remove the - // oldest block from cachedBlocks and pastBlocksTransactionRegister. - numPastBlocks uint64 // cachedBlocks is a cache of the past blocks that we use to estimate fees. This is used to // avoid having to recompute the fee buckets for all the past blocks every time we want to // estimate a fee. @@ -50,9 +50,7 @@ func NewPoSFeeEstimator() *PoSFeeEstimator { // and cache the initial past blocks. func (posFeeEstimator *PoSFeeEstimator) Init( mempoolTransactionRegister *TransactionRegister, - numMempoolBlocks uint64, pastBlocks []*MsgDeSoBlock, - numPastBlocks uint64, globalParams *GlobalParamsEntry, ) error { posFeeEstimator.globalParams = globalParams.Copy() @@ -64,17 +62,13 @@ func (posFeeEstimator *PoSFeeEstimator) Init( if mempoolTransactionRegister == nil { return errors.New("PoSFeeEstimator.Init: mempoolTransactionRegister cannot be nil") } - if numMempoolBlocks == 0 { + if globalParams.MempoolFeeEstimatorNumMempoolBlocks == 0 { return errors.New("PoSFeeEstimator.Init: numMempoolBlocks cannot be zero") } - if numPastBlocks == 0 { + if globalParams.MempoolFeeEstimatorNumPastBlocks == 0 { return errors.New("PoSFeeEstimator.Init: numPastBlocks cannot be zero") } - // Store the num blocks params used to estimate fees. - posFeeEstimator.numMempoolBlocks = numMempoolBlocks - posFeeEstimator.numPastBlocks = numPastBlocks - // Store the mempool's transaction register. posFeeEstimator.mempoolTransactionRegister = mempoolTransactionRegister @@ -124,19 +118,27 @@ func (posFeeEstimator *PoSFeeEstimator) addBlockNoLock(block *MsgDeSoBlock) erro newPastBlocks := append(posFeeEstimator.cachedBlocks, block) newPastBlocks = posFeeEstimator.cleanUpPastBlocks(newPastBlocks) + if err := posFeeEstimator.updatePastBlocksTransactionRegister(newPastBlocks); err != nil { + return errors.Wrap(err, "PoSFeeEstimator.addBlockNoLock: error updating pastBlocksTransactionRegister") + } + + return nil +} + +func (posFeeEstimator *PoSFeeEstimator) updatePastBlocksTransactionRegister(pastBlocks []*MsgDeSoBlock) error { // Create a clean transaction register to add the blocks' transactions. newTransactionRegister := NewTransactionRegister() newTransactionRegister.Init(posFeeEstimator.globalParams.Copy()) // Add all transactions from the block to the pastBlocksTransactionRegister. - for _, pastBlock := range newPastBlocks { + for _, pastBlock := range pastBlocks { if err := addBlockToTransactionRegister(newTransactionRegister, pastBlock); err != nil { - return errors.Wrap(err, "PoSFeeEstimator.addBlockNoLock: error adding block to pastBlocksTransactionRegister") + return errors.Wrap(err, "error adding block to pastBlocksTransactionRegister") } } // Update the cached blocks and pastBlocksTransactionRegister. - posFeeEstimator.cachedBlocks = newPastBlocks + posFeeEstimator.cachedBlocks = pastBlocks posFeeEstimator.pastBlocksTransactionRegister = newTransactionRegister return nil @@ -192,21 +194,10 @@ func (posFeeEstimator *PoSFeeEstimator) removeBlockNoLock(block *MsgDeSoBlock) e return !blockHash.IsEqual(cachedBlockHash) }) - // Create a clean transaction register to add the blocks' transactions. - newTransactionRegister := NewTransactionRegister() - newTransactionRegister.Init(posFeeEstimator.globalParams.Copy()) - - // Add all transactions from the past blocks to the transaction register. - for _, pastBlock := range newPastBlocks { - if err := addBlockToTransactionRegister(newTransactionRegister, pastBlock); err != nil { - return errors.Wrap(err, "PoSFeeEstimator.removeBlockNoLock: error adding block to transaction register") - } + if err := posFeeEstimator.updatePastBlocksTransactionRegister(newPastBlocks); err != nil { + return errors.Wrap(err, "PoSFeeEstimator.removeBlockNoLock: error updating pastBlocksTransactionRegister") } - // Update the cached blocks and pastBlocksTransactionRegister. - posFeeEstimator.cachedBlocks = newPastBlocks - posFeeEstimator.pastBlocksTransactionRegister = newTransactionRegister - return nil } @@ -217,17 +208,27 @@ func (posFeeEstimator *PoSFeeEstimator) UpdateGlobalParams(globalParams *GlobalP posFeeEstimator.rwLock.Lock() defer posFeeEstimator.rwLock.Unlock() - // Create a temporary transaction register to test the new global params. - tempTransactionRegister := NewTransactionRegister() - tempTransactionRegister.Init(globalParams.Copy()) + // Check if the fee bucketing or the num past blocks params have changed have changed. If so, we need to + // rebucket the past blocks transaction register + hasPastBlocksParamChange := posFeeEstimator.pastBlocksTransactionRegister.HasGlobalParamChange(globalParams) || + posFeeEstimator.globalParams.MempoolFeeEstimatorNumPastBlocks != globalParams.MempoolFeeEstimatorNumPastBlocks - for _, block := range posFeeEstimator.cachedBlocks { - if err := addBlockToTransactionRegister(tempTransactionRegister, block); err != nil { - return errors.Wrap(err, "PosFeeEstimator.UpdateGlobalParams: error adding block to tempTransactionRegister") - } + // Now that we've check if the global params have changed, we can update the global params. + posFeeEstimator.globalParams = globalParams + + // If the global params haven't changed in a way to require a reconstruction of the past blocks transaction + // register, we can simply update the global params and return. + if !hasPastBlocksParamChange { + return nil + } + + // At this point, we know we need to reconstruct the past blocks transaction register. + pastBlocks := posFeeEstimator.pruneBlocksToMaxNumPastBlocks(posFeeEstimator.cachedBlocks) + + if err := posFeeEstimator.updatePastBlocksTransactionRegister(pastBlocks); err != nil { + return errors.Wrap(err, "PoSFeeEstimator.UpdateGlobalParams: error updating pastBlocksTransactionRegister") } - posFeeEstimator.globalParams = globalParams return nil } @@ -276,13 +277,13 @@ func (posFeeEstimator *PoSFeeEstimator) sortBlocksByBlockHeight(blocks []*MsgDeS // pruneBlocksToMaxNumPastBlocks reduces the number of blocks to the numPastBlocks param func (posFeeEstimator *PoSFeeEstimator) pruneBlocksToMaxNumPastBlocks(blocks []*MsgDeSoBlock) []*MsgDeSoBlock { numCachedBlocks := uint64(len(blocks)) - if numCachedBlocks <= posFeeEstimator.numPastBlocks { + if numCachedBlocks <= posFeeEstimator.globalParams.MempoolFeeEstimatorNumPastBlocks { return blocks } // Prune the blocks with the lowest block heights. We do this by removing the // first len(blocks) - numPastBlocks blocks from the blocks slice. - return blocks[numCachedBlocks-posFeeEstimator.numPastBlocks:] + return blocks[numCachedBlocks-posFeeEstimator.globalParams.MempoolFeeEstimatorNumPastBlocks:] } // EstimateFeeRateNanosPerKB estimates the fee rate in nanos per KB for the current mempool @@ -302,14 +303,14 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( posFeeEstimator.pastBlocksTransactionRegister, pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, - posFeeEstimator.numPastBlocks, + posFeeEstimator.globalParams.MempoolFeeEstimatorNumPastBlocks, maxBlockSize, ) mempoolFeeRate := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.mempoolTransactionRegister, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, - posFeeEstimator.numMempoolBlocks, + posFeeEstimator.globalParams.MempoolFeeEstimatorNumMempoolBlocks, maxBlockSize, ) if minFeeRateNanosPerKB > pastBlockFeeRate && minFeeRateNanosPerKB > mempoolFeeRate { @@ -375,7 +376,7 @@ func (posFeeEstimator *PoSFeeEstimator) pastBlocksFeeEstimate( posFeeEstimator.pastBlocksTransactionRegister, congestionFactorBasisPoints, priorityPercentileBasisPoints, - posFeeEstimator.numPastBlocks, + posFeeEstimator.globalParams.MempoolFeeEstimatorNumPastBlocks, maxBlockSize, ) if err != nil { @@ -397,7 +398,7 @@ func (posFeeEstimator *PoSFeeEstimator) mempoolFeeEstimate( posFeeEstimator.mempoolTransactionRegister, congestionFactorBasisPoints, priorityPercentileBasisPoints, - posFeeEstimator.numMempoolBlocks, + posFeeEstimator.globalParams.MempoolFeeEstimatorNumMempoolBlocks, maxBlockSize, ) if err != nil { diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index 25f37f3bf..4746fc05e 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -40,9 +40,11 @@ func TestFeeEstimator(t *testing.T) { // Construct a FeeEstimator with no transactions in it. We should get the minimum fee bucket. // We make some dummy block to get around validations. posFeeEstimator := &PoSFeeEstimator{} - err = posFeeEstimator.Init(mempool.txnRegister, 1, []*MsgDeSoBlock{{ - Header: &MsgDeSoHeader{Height: 10}, - }}, 1, mempool.globalParams) + err = posFeeEstimator.Init( + mempool.txnRegister, + []*MsgDeSoBlock{{Header: &MsgDeSoHeader{Height: 10}}}, + mempool.globalParams, + ) require.NoError(t, err) // When there's nothing in the mempool, we return the global minimum fee rate. baseFeeRate := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 1514e8907..601d02cd6 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -314,14 +314,7 @@ func (mp *PosMempool) Init( mp.recentBlockTxnCache = lru.NewKVCache(100000) // cache 100K latest txns from blocks. mp.recentRejectedTxnCache = lru.NewKVCache(100000) // cache 100K rejected txns. - // TODO: parameterize num blocks. Also, how to pass in blocks. - err = mp.feeEstimator.Init( - mp.txnRegister, - feeEstimatorNumMempoolBlocks, - feeEstimatorPastBlocks, - feeEstimatorNumPastBlocks, - mp.globalParams, - ) + err = mp.feeEstimator.Init(mp.txnRegister, feeEstimatorPastBlocks, mp.globalParams) if err != nil { return errors.Wrapf(err, "PosMempool.Start: Problem initializing fee estimator") } @@ -1002,7 +995,7 @@ func (mp *PosMempool) pruneNoLock() error { func (mp *PosMempool) rebucketTransactionRegisterNoLock() error { // Check if the global params haven't changed in a way that requires rebucketing the // transaction register - if mp.txnRegister.HasGlobalParamChange(mp.globalParams) { + if !mp.txnRegister.HasGlobalParamChange(mp.globalParams) { return nil } @@ -1063,7 +1056,11 @@ func (mp *PosMempool) UpdateGlobalParams(globalParams *GlobalParamsEntry) { return } - // TODO: Update the fee estimator + // Update the fee estimator's global params + if err := mp.feeEstimator.UpdateGlobalParams(mp.globalParams); err != nil { + glog.Errorf("PosMempool.UpdateGlobalParams: Problem updating fee estimator global params: %v", err) + return + } } // Implementation of the Mempool interface diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index 0a197c83d..baa4c2fbe 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -371,6 +371,8 @@ func _testGetDefaultGlobalParams() *GlobalParamsEntry { globalParams := InitialGlobalParamsEntry globalParams.MinimumNetworkFeeNanosPerKB = 1000 globalParams.FeeBucketGrowthRateBasisPoints = 1000 + globalParams.MempoolFeeEstimatorNumMempoolBlocks = 1 + globalParams.MempoolFeeEstimatorNumPastBlocks = 1 return &globalParams } From e9de21821713ea342b2769a6c676750e2c37853f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 16 Apr 2024 14:17:40 -0400 Subject: [PATCH 648/762] Fix Mempool UpdateGlobalParams Bugs (#1233) --- lib/pos_mempool.go | 7 +++++++ lib/pos_mempool_test.go | 5 ++++- lib/pos_transaction_register.go | 4 +--- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 601d02cd6..5930d0d02 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -1005,6 +1005,13 @@ func (mp *PosMempool) rebucketTransactionRegisterNoLock() error { return errors.Wrapf(err, "PosMempool.rebucketTransactionRegisterNoLock: Problem rebucketing transaction register") } + for _, txn := range mp.txnRegister.GetFeeTimeTransactions() { + txnInNewRegister := newTxnRegister.GetTransaction(txn.Hash) + if txnInNewRegister == nil { + mp.removeTransactionNoLock(txn, true) + } + } + // Swap the transaction register with the new transaction register mp.txnRegister = newTxnRegister diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 448c56906..93344a65c 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -226,7 +226,10 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { newGlobalParams := _testGetDefaultGlobalParams() newGlobalParams.MinimumNetworkFeeNanosPerKB = 20000 mempool.UpdateGlobalParams(newGlobalParams) - require.Equal(100, len(mempool.GetTransactions())) + require.Equal(0, len(mempool.GetTransactions())) + require.Equal(mempool.txnRegister.minimumNetworkFeeNanosPerKB.String(), "20000") + require.Equal(mempool.feeEstimator.globalParams.MinimumNetworkFeeNanosPerKB, uint64(20000)) + _checkPosMempoolIntegrity(t, mempool) mempool.Stop() require.False(mempool.IsRunning()) diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index 989d77d54..3560d155c 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -82,9 +82,7 @@ func (tr *TransactionRegister) CopyWithNewGlobalParams(globalParams *GlobalParam // Re-bucket all transactions in the new register. for _, txn := range tr.GetFeeTimeTransactions() { - if err := newRegister.addTransactionNoLock(txn); err != nil { - return nil, errors.Wrapf(err, "TransactionRegister.CopyWithNewGlobalParams: Error adding transaction to new register") - } + newRegister.addTransactionNoLock(txn) } return newRegister, nil From 3e6a225cd521285d1d0afb9980d3cf5b98957cff Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 17 Apr 2024 10:21:33 -0400 Subject: [PATCH 649/762] Fix inner utxo ops error in ComputeTransactionMetadata (#1234) --- lib/legacy_mempool.go | 44 +++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index a03cd7d1a..dbadae617 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2038,7 +2038,24 @@ func ComputeTransactionMetadata( realTxMeta := txn.TxnMeta.(*AtomicTxnsWrapperMetadata) txnMeta.AtomicTxnsWrapperTxindexMetadata = &AtomicTxnsWrapperTxindexMetadata{} txnMeta.AtomicTxnsWrapperTxindexMetadata.InnerTxnsTransactionMetadata = []*TransactionMetadata{} - for _, innerTxn := range realTxMeta.Txns { + // Find the utxo op for the atomic txn wrapper + var atomicWrapperUtxoOp *UtxoOperation + for _, utxoOp := range utxoOps { + if utxoOp.Type == OperationTypeAtomicTxnsWrapper { + atomicWrapperUtxoOp = utxoOp + } + } + // This should never happen. + if atomicWrapperUtxoOp == nil { + glog.V(2).Info("UpdateTxIndex: Could not find utxo op for atomic txn wrapper") + return txnMeta + } + innerUtxoOps := atomicWrapperUtxoOp.AtomicTxnsInnerUtxoOps + if len(innerUtxoOps) != len(realTxMeta.Txns) { + glog.V(2).Infof("UpdateTxIndex: Number of inner utxo ops does not match number of inner txns") + return txnMeta + } + for ii, innerTxn := range realTxMeta.Txns { // Compute the transaction metadata for each inner transaction. innerTxnsTxnMetadata := ComputeTransactionMetadata( innerTxn, @@ -2046,11 +2063,11 @@ func ComputeTransactionMetadata( blockHash, totalNanosPurchasedBefore, usdCentsPerBitcoinBefore, - totalInput, - totalOutput, - fees, + totalInput, // TODO: this value is incorrect for inner atomic transactions + totalOutput, // TODO: this value is incorrect for inner atomic transactions + innerTxn.TxnFeeNanos, txnIndexInBlock, - utxoOps, + innerUtxoOps[ii], blockHeight, ) txnMeta.AtomicTxnsWrapperTxindexMetadata.InnerTxnsTransactionMetadata = append( @@ -2077,23 +2094,6 @@ func ComputeTransactionMetadata( }) } } - // Check if the transactor is an affected public key. If not, add them. - // We skip this for atomic transactions as their transactor is the ZeroPublicKey. - if txnMeta.TransactorPublicKeyBase58Check != "" && txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { - transactorPublicKeyFound := false - for _, affectedPublicKey := range txnMeta.AffectedPublicKeys { - if affectedPublicKey.PublicKeyBase58Check == txnMeta.TransactorPublicKeyBase58Check { - transactorPublicKeyFound = true - break - } - } - if !transactorPublicKeyFound { - txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, &AffectedPublicKey{ - PublicKeyBase58Check: txnMeta.TransactorPublicKeyBase58Check, - Metadata: "TransactorPublicKeyBase58Check", - }) - } - } return txnMeta } From 7a087209dd756d64d19502274b1efc870c6d20c7 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 17 Apr 2024 10:23:37 -0400 Subject: [PATCH 650/762] Add affected public keys for coin lockups (#1235) --- lib/legacy_mempool.go | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index dbadae617..9eae0fd63 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2034,6 +2034,42 @@ func ComputeTransactionMetadata( txindexMetadata, affectedPublicKeys := utxoView.CreateUnjailValidatorTxindexMetadata(utxoOps[len(utxoOps)-1], txn) txnMeta.UnjailValidatorTxindexMetadata = txindexMetadata txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, affectedPublicKeys...) + case TxnTypeCoinLockup: + realTxMeta := txn.TxnMeta.(*CoinLockupMetadata) + profilePublicKey := realTxMeta.ProfilePublicKey.ToBytes() + recipientPublicKey := realTxMeta.RecipientPublicKey.ToBytes() + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, &AffectedPublicKey{ + PublicKeyBase58Check: PkToString(profilePublicKey, utxoView.Params), + Metadata: "CoinLockupProfilePublicKeyBase58Check", + }) + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, &AffectedPublicKey{ + PublicKeyBase58Check: PkToString(recipientPublicKey, utxoView.Params), + Metadata: "CoinLockupRecipientPublicKeyBase58Check", + }) + case TxnTypeUpdateCoinLockupParams: + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, &AffectedPublicKey{ + PublicKeyBase58Check: PkToString(txn.PublicKey, utxoView.Params), + Metadata: "UpdateCoinLockupParamsPublicKeyBase58Check", + }) + case TxnTypeCoinLockupTransfer: + realTxMeta := txn.TxnMeta.(*CoinLockupTransferMetadata) + profilePublicKey := realTxMeta.ProfilePublicKey.ToBytes() + recipientPublicKey := realTxMeta.RecipientPublicKey.ToBytes() + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, &AffectedPublicKey{ + PublicKeyBase58Check: PkToString(profilePublicKey, utxoView.Params), + Metadata: "CoinLockupTransferProfilePublicKeyBase58Check", + }) + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, &AffectedPublicKey{ + PublicKeyBase58Check: PkToString(recipientPublicKey, utxoView.Params), + Metadata: "CoinLockupTransferRecipientPublicKeyBase58Check", + }) + case TxnTypeCoinUnlock: + realTxMeta := txn.TxnMeta.(*CoinUnlockMetadata) + profilePublicKey := realTxMeta.ProfilePublicKey.ToBytes() + txnMeta.AffectedPublicKeys = append(txnMeta.AffectedPublicKeys, &AffectedPublicKey{ + PublicKeyBase58Check: PkToString(profilePublicKey, utxoView.Params), + Metadata: "CoinUnlockProfilePublicKeyBase58Check", + }) case TxnTypeAtomicTxnsWrapper: realTxMeta := txn.TxnMeta.(*AtomicTxnsWrapperMetadata) txnMeta.AtomicTxnsWrapperTxindexMetadata = &AtomicTxnsWrapperTxindexMetadata{} From 34c383aa43af9218dc74f09b0e9c49cb35491ebc Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 17 Apr 2024 10:25:43 -0400 Subject: [PATCH 651/762] Add map of staker PKID to public key for state consumer consumption (#1237) --- lib/block_view_validator.go | 10 +++++++ lib/state_change_metadata.go | 56 ++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 238ecb255..54404a33a 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1524,12 +1524,22 @@ func (bav *UtxoView) _connectUnregisterAsValidator( ) } + // Compute map of staker PKIDs to public key base58check for state change entry. + stakerPKIDToPublicKeyBase58CheckMap := make(map[PKID]string) + for _, stakerPKID := range prevStakeEntries { + stakerPKIDToPublicKeyBase58CheckMap[*stakerPKID.StakerPKID] = PkToString( + bav.GetPublicKeyForPKID(stakerPKID.StakerPKID), bav.Params) + } + // Create a UTXO operation. utxoOpForTxn := &UtxoOperation{ Type: OperationTypeUnregisterAsValidator, PrevValidatorEntry: prevValidatorEntry, PrevStakeEntries: prevStakeEntries, PrevLockedStakeEntries: prevLockedStakeEntries, + StateChangeMetadata: &UnregisterAsValidatorStateChangeMetadata{ + StakerPKIDToPublicKeyBase58CheckMap: stakerPKIDToPublicKeyBase58CheckMap, + }, } if err = bav.SanityCheckUnregisterAsValidatorTxn(transactorPKIDEntry.PKID, utxoOpForTxn, totalUnstakedAmountNanos); err != nil { return 0, 0, nil, errors.Wrapf(err, "_connectUnregisterAsValidator: ") diff --git a/lib/state_change_metadata.go b/lib/state_change_metadata.go index 73f6cec10..f320d1091 100644 --- a/lib/state_change_metadata.go +++ b/lib/state_change_metadata.go @@ -2,6 +2,7 @@ package lib import ( "bytes" + "github.com/deso-protocol/core/collections" "github.com/pkg/errors" ) @@ -23,6 +24,7 @@ const ( EncoderTypeDeletePostAssociationStateChangeMetadata EncoderType = 2000014 EncoderTypeStakeRewardStateChangeMetadata EncoderType = 2000015 EncoderTypeUnjailValidatorStateChangeMetadata EncoderType = 2000016 + EncoderTypeUnregisterAsValidatorStateChangeMetadata EncoderType = 2000017 ) func GetStateChangeMetadataFromOpType(opType OperationType) DeSoEncoder { @@ -61,6 +63,8 @@ func GetStateChangeMetadataFromOpType(opType OperationType) DeSoEncoder { return &StakeRewardStateChangeMetadata{} case OperationTypeUnjailValidator: return &UnjailValidatorStateChangeMetadata{} + case OperationTypeUnregisterAsValidator: + return &UnregisterAsValidatorStateChangeMetadata{} default: return nil } @@ -636,3 +640,55 @@ func (metadata *UnjailValidatorStateChangeMetadata) GetVersionByte(blockHeight u func (metadata *UnjailValidatorStateChangeMetadata) GetEncoderType() EncoderType { return EncoderTypeUnjailValidatorStateChangeMetadata } + +type UnregisterAsValidatorStateChangeMetadata struct { + StakerPKIDToPublicKeyBase58CheckMap map[PKID]string +} + +func (metadata *UnregisterAsValidatorStateChangeMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { + var data []byte + // Sort the map by key so that the encoding is deterministic. + keys := make([]PKID, 0) + for key := range metadata.StakerPKIDToPublicKeyBase58CheckMap { + keys = append(keys, key) + } + sortedKeys := collections.SortStable(keys, func(ii, jj PKID) bool { + return bytes.Compare(ii.ToBytes(), jj.ToBytes()) < 0 + }) + data = append(data, UintToBuf(uint64(len(sortedKeys)))...) + for _, key := range sortedKeys { + data = append(data, key.ToBytes()...) + data = append(data, EncodeByteArray([]byte(metadata.StakerPKIDToPublicKeyBase58CheckMap[key]))...) + } + return data +} + +func (metadata *UnregisterAsValidatorStateChangeMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { + var err error + numEntries, err := ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "UnregisterAsValidatorStateChangeMetadata.Decode: Problem reading numEntries") + } + metadata.StakerPKIDToPublicKeyBase58CheckMap = make(map[PKID]string) + for ii := uint64(0); ii < numEntries; ii++ { + pkid := PKID{} + if err := pkid.FromBytes(rr); err != nil { + return errors.Wrapf(err, "UnregisterAsValidatorStateChangeMetadata.Decode: Problem reading PKID") + } + publicKeyBase58Check, err := DecodeByteArray(rr) + if err != nil { + return errors.Wrapf(err, "UnregisterAsValidatorStateChangeMetadata.Decode: Problem reading PublicKeyBase58Check") + } + metadata.StakerPKIDToPublicKeyBase58CheckMap[pkid] = string(publicKeyBase58Check) + + } + return nil +} + +func (metadata *UnregisterAsValidatorStateChangeMetadata) GetVersionByte(blockHeight uint64) byte { + return 0 +} + +func (metadata *UnregisterAsValidatorStateChangeMetadata) GetEncoderType() EncoderType { + return EncoderTypeUnregisterAsValidatorStateChangeMetadata +} From f6d7f25b8132230120074243617132ce2c7c4a9f Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:51:04 -0400 Subject: [PATCH 652/762] Fix tests that rely on waiting for txn validation (#1239) Co-authored-by: iamsofonias --- lib/pos_mempool.go | 8 ++++---- lib/pos_mempool_test.go | 12 ++++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 5930d0d02..ec26a2b28 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -939,13 +939,13 @@ func (mp *PosMempool) validateTransactions() error { // If the txn fails to connect, then we set its validated status to false and remove it from the // mempool. We also mark it as having been rejected so that it can't get re-submitted to the mempool. if err != nil { - // Try to remove the transaction with a lock. - mp.removeTransaction(txn, true) - // Mark the txn as invalid and add an error to the cache so we can return it to the user if they // try to resubmit it. txn.SetValidated(false) - mp.recentRejectedTxnCache.Add(*mempoolTxns[ii].Hash, err) + mp.recentRejectedTxnCache.Add(*txn.Hash, err) + + // Try to remove the transaction with a lock. + mp.removeTransaction(txn, true) continue } diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 93344a65c..3e52217ae 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -346,7 +346,7 @@ func TestPosMempoolTransactionValidation(t *testing.T) { mempool := NewPosMempool() require.NoError(t, mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 5, 10, + nil, 1, 100, 10, )) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) @@ -362,8 +362,11 @@ func TestPosMempoolTransactionValidation(t *testing.T) { txn2 := _generateTestTxnWithOutputs(t, rand, feeMin, feeMax, m0PubBytes, m1Priv, 100, 25, output) _wrappedPosMempoolAddTransaction(t, mempool, txn1) _wrappedPosMempoolAddTransaction(t, mempool, txn2) + // Wait for the validation routine to finish. - time.Sleep(20 * time.Millisecond) + mempool.BlockUntilReadOnlyViewRegenerated() + mempool.BlockUntilReadOnlyViewRegenerated() + require.Equal(t, true, mempool.GetTransaction(txn1.Hash()).IsValidated()) require.Nil(t, mempool.GetTransaction(txn2.Hash())) require.NoError(t, mempool.RemoveTransaction(txn1.Hash())) @@ -392,7 +395,8 @@ func TestPosMempoolTransactionValidation(t *testing.T) { } // Wait for the validation routine to finish. - time.Sleep(20 * time.Millisecond) + _ = mempool.WaitForTxnValidation(failingTxns[len(failingTxns)-1].Hash()) + totalValidatedTxns := 0 for _, txn := range passingTxns { if mempool.GetTransaction(txn.Hash()).IsValidated() { @@ -401,7 +405,7 @@ func TestPosMempoolTransactionValidation(t *testing.T) { } // Make sure that the number of validated transactions is equal to the maxValidationViewConnects. - require.Equal(t, 4, totalValidatedTxns) + require.Equal(t, 10, totalValidatedTxns) // Now make sure that failing transactions were either removed, or remained unvalidated. for _, txn := range failingTxns { From 71d25d86cbcc270b4cf31f1439ab64c55f48edd8 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 17 Apr 2024 17:21:00 -0400 Subject: [PATCH 653/762] Mempool Code Cleanup (#1241) * Mempool code cleanup * Remove .DS_Store --- lib/legacy_mempool.go | 5 --- lib/pos_fee_estimator.go | 30 +++++++++++++++++- lib/pos_mempool.go | 55 +-------------------------------- lib/pos_mempool_test.go | 10 ++---- lib/pos_transaction_register.go | 4 +-- lib/server.go | 2 ++ 6 files changed, 37 insertions(+), 69 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 9eae0fd63..a9511c08a 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -265,11 +265,6 @@ func (mp *DeSoMempool) GetTransactions() []*MempoolTransaction { ) } -func (mp *DeSoMempool) GetIterator() MempoolIterator { - //TODO implement me - panic("implement me") -} - func (mp *DeSoMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) { //TODO implement me panic("implement me") diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 8ce80752c..e3175a67e 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -94,6 +94,7 @@ func (posFeeEstimator *PoSFeeEstimator) Init( func (posFeeEstimator *PoSFeeEstimator) SetMempoolTransactionRegister(mempoolTransactionRegister *TransactionRegister) { posFeeEstimator.rwLock.Lock() defer posFeeEstimator.rwLock.Unlock() + posFeeEstimator.mempoolTransactionRegister = mempoolTransactionRegister } @@ -299,6 +300,7 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( ) uint64 { posFeeEstimator.rwLock.RLock() defer posFeeEstimator.rwLock.RUnlock() + pastBlockFeeRate := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, pastBlocksCongestionFactorBasisPoints, @@ -306,6 +308,7 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( posFeeEstimator.globalParams.MempoolFeeEstimatorNumPastBlocks, maxBlockSize, ) + mempoolFeeRate := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.mempoolTransactionRegister, mempoolCongestionFactorBasisPoints, @@ -313,12 +316,15 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( posFeeEstimator.globalParams.MempoolFeeEstimatorNumMempoolBlocks, maxBlockSize, ) + if minFeeRateNanosPerKB > pastBlockFeeRate && minFeeRateNanosPerKB > mempoolFeeRate { return minFeeRateNanosPerKB } + if pastBlockFeeRate < mempoolFeeRate { return mempoolFeeRate } + return pastBlockFeeRate } @@ -335,6 +341,7 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFee( ) (uint64, error) { posFeeEstimator.rwLock.RLock() defer posFeeEstimator.rwLock.RUnlock() + mempoolFeeEstimate, err := posFeeEstimator.mempoolFeeEstimate( txn, mempoolCongestionFactorBasisPoints, @@ -344,6 +351,7 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFee( if err != nil { return 0, errors.Wrap(err, "PoSFeeEstimator.EstimateFee: Problem computing mempool fee estimate") } + pastBlocksFeeEstimate, err := posFeeEstimator.pastBlocksFeeEstimate( txn, pastBlocksCongestionFactorBasisPoints, @@ -353,13 +361,16 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFee( if err != nil { return 0, errors.Wrap(err, "PoSFeeEstimator.EstimateFee: Problem computing past blocks fee estimate") } + minFeeRateEstimate, err := computeFeeGivenTxnAndFeeRate(txn, minFeeRateNanosPerKB) if minFeeRateEstimate > mempoolFeeEstimate && minFeeRateEstimate > pastBlocksFeeEstimate { return minFeeRateEstimate, nil } + if mempoolFeeEstimate < pastBlocksFeeEstimate { return pastBlocksFeeEstimate, nil } + return mempoolFeeEstimate, nil } @@ -382,6 +393,7 @@ func (posFeeEstimator *PoSFeeEstimator) pastBlocksFeeEstimate( if err != nil { return 0, errors.Wrap(err, "pastBlocksFeeEstimate: Problem computing txn fee") } + return txnFee, nil } @@ -404,6 +416,7 @@ func (posFeeEstimator *PoSFeeEstimator) mempoolFeeEstimate( if err != nil { return 0, errors.Wrap(err, "mempoolFeeEstimate: Problem computing txn fee") } + return txnFee, nil } @@ -415,6 +428,7 @@ func computeFeeGivenTxnAndFeeRate(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (ui if err != nil { return 0, errors.Wrap(err, "computeFeeGivenTxnAndFeeRate: Problem copying txn") } + // Set the nonce to the maximum value if it's not already set. if txnClone.TxnNonce == nil { txnClone.TxnNonce = &DeSoNonce{ @@ -422,6 +436,7 @@ func computeFeeGivenTxnAndFeeRate(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (ui PartialID: math.MaxUint64, } } + // Account for the possible size of the nonce. If we're dealing with an atomic txn, // the nonce values are intentionally set to zero so this is a special case. if txnClone.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { @@ -439,6 +454,7 @@ func computeFeeGivenTxnAndFeeRate(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (ui if err != nil { return 0, errors.Wrap(err, "computeFeeGivenTxnAndFeeRate: Problem computing fee rate recursively") } + return txnFeeNanos, nil } @@ -474,6 +490,7 @@ func computeFeeRecursive(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (uint64, err if err != nil { return 0, errors.Wrap(err, "computeFeeRecursive: Problem serializing txn") } + // We need to add a buffer for the signature. The maximum DER signature length is 74 bytes. const MaxDERSigLen = 74 var txnBytesLen uint64 @@ -499,6 +516,7 @@ func computeFeeRecursive(txn *MsgDeSoTxn, feeRateNanosPerKB uint64) (uint64, err UpdateTxnFee(txn, txnFeeNanos) return computeFeeRecursive(txn, feeRateNanosPerKB) } + return txnFeeNanos, nil } @@ -521,10 +539,12 @@ func (posFeeEstimator *PoSFeeEstimator) estimateTxnFeeGivenTransactionRegister( numBlocks, maxBlockSize, ) + txnFee, err := computeFeeGivenTxnAndFeeRate(txn, feeRateNanosPerKB) if err != nil { return 0, errors.Wrap(err, "estimateTxnFeeGivenTransactionRegister: Problem computing txn fee") } + return txnFee, nil } @@ -611,10 +631,13 @@ func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactio ) uint64 { txnRegister.RLock() defer txnRegister.RUnlock() + it := txnRegister.GetFeeTimeIterator() maxSizeOfNumBlocks := maxBlockSize * numBlocks totalTxnsSize := uint64(0) + var txns []*MempoolTx + for it.Next() { tx, ok := it.Value() if !ok { @@ -638,7 +661,8 @@ func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactio txns, priorityPercentileBasisPoints, txnRegister.minimumNetworkFeeNanosPerKB, - txnRegister.feeBucketGrowthRateBasisPoints) + txnRegister.feeBucketGrowthRateBasisPoints, + ) // If the bucketMinFee is less than or equal to the global min fee rate, we return the global min fee rate. if bucketMinFee <= globalMinFeeRate { return globalMinFeeRate @@ -657,6 +681,7 @@ func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactio return computeFeeTimeBucketMinFromExponent( bucketExponent-1, txnRegister.minimumNetworkFeeNanosPerKB, txnRegister.feeBucketGrowthRateBasisPoints) } + // Otherwise, we return one bucket higher than Priority fee return bucketMaxFee + 1 } @@ -674,15 +699,18 @@ func getPriorityFeeBucketFromTxns( ) (uint64, uint64) { percentilePosition := uint64( len(feeTimeOrderedTxns)) - ((priorityPercentileBasisPoints * uint64(len(feeTimeOrderedTxns))) / MaxBasisPoints) + // The percentile position should never be greater than the length of feeTimeOrderedTxns, but may be equal to // it if priorityPercentileBasisPoints is 0. In this case, we simply return the last txn's fee bucket range. if percentilePosition >= uint64(len(feeTimeOrderedTxns)) { percentilePosition = uint64(len(feeTimeOrderedTxns)) - 1 } + bucketMin, bucketMax := computeFeeTimeBucketRangeFromFeeNanosPerKB( feeTimeOrderedTxns[percentilePosition].FeePerKB, minimumNetworkFeeNanosPerKB, feeBucketGrowthRateBasisPoints, ) + return bucketMin, bucketMax } diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index ec26a2b28..db68d288c 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -30,7 +30,6 @@ type Mempool interface { RemoveTransaction(txnHash *BlockHash) error GetTransaction(txnHash *BlockHash) *MempoolTransaction GetTransactions() []*MempoolTransaction - GetIterator() MempoolIterator UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) UpdateGlobalParams(globalParams *GlobalParamsEntry) @@ -116,12 +115,6 @@ func GetAugmentedUniversalViewWithAdditionalTransactions( return newView, nil } -type MempoolIterator interface { - Next() bool - Value() (*MempoolTransaction, bool) - Initialized() bool -} - // MempoolTransaction is a simple wrapper around MsgDeSoTxn that adds a timestamp field. type MempoolTransaction struct { *MsgDeSoTxn @@ -241,31 +234,6 @@ type PosMempool struct { recentRejectedTxnCache lru.KVCache } -// PosMempoolIterator is a wrapper around FeeTimeIterator, modified to return MsgDeSoTxn instead of MempoolTx. -type PosMempoolIterator struct { - it *FeeTimeIterator -} - -func (it *PosMempoolIterator) Next() bool { - return it.it.Next() -} - -func (it *PosMempoolIterator) Value() (*MempoolTransaction, bool) { - txn, ok := it.it.Value() - if txn == nil || txn.Tx == nil { - return nil, ok - } - return NewMempoolTransaction(txn.Tx, txn.Added, txn.IsValidated()), ok -} - -func (it *PosMempoolIterator) Initialized() bool { - return it.it.Initialized() -} - -func NewPosMempoolIterator(it *FeeTimeIterator) *PosMempoolIterator { - return &PosMempoolIterator{it: it} -} - func NewPosMempool() *PosMempool { return &PosMempool{ status: PosMempoolStatusNotInitialized, @@ -874,27 +842,6 @@ func (mp *PosMempool) getTransactionsNoLock() []*MempoolTx { return mp.txnRegister.GetFeeTimeTransactions() } -// GetIterator returns an iterator for the mempool transactions. The iterator can be used to peek transactions in the -// mempool ordered by the Fee-Time algorithm. Transactions can be fetched with the following pattern: -// -// for it.Next() { -// if txn, ok := it.Value(); ok { -// // Do something with txn. -// } -// } -// -// Note that the iteration pattern is not thread-safe. Another lock should be used to ensure thread-safety. -func (mp *PosMempool) GetIterator() MempoolIterator { - mp.RLock() - defer mp.RUnlock() - - if !mp.IsRunning() { - return nil - } - - return NewPosMempoolIterator(mp.txnRegister.GetFeeTimeIterator()) -} - // validateTransactions updates the validated status of transactions in the mempool. The function connects the Fee-Time ordered // mempool transactions to the readOnlyLatestBlockView, creating a cumulative validationView. Transactions that fail to // connect to the validationView are removed from the mempool, as they would have also failed to connect during @@ -1016,7 +963,7 @@ func (mp *PosMempool) rebucketTransactionRegisterNoLock() error { mp.txnRegister = newTxnRegister // Update the fee estimator's transaction register - mp.feeEstimator.SetMempoolTransactionRegister(mp.txnRegister) + mp.feeEstimator.SetMempoolTransactionRegister(newTxnRegister) return nil } diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 3e52217ae..1e49ffe58 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -164,13 +164,9 @@ func TestPosMempoolPrune(t *testing.T) { require.Equal(3, len(newPool.GetTransactions())) // Iterate through the transactions. - it := newPool.GetIterator() - index := 0 - for it.Next() { - tx, ok := it.Value() - require.True(ok) - require.True(bytes.Equal(tx.Hash().ToBytes(), fetchedTxns[index].Hash().ToBytes())) - index++ + newPoolTxns := newPool.GetTransactions() + for ii, tx := range newPoolTxns { + require.True(bytes.Equal(tx.Hash().ToBytes(), fetchedTxns[ii].Hash().ToBytes())) } require.Equal(len(newPool.GetTransactions()), len(newPool.nonceTracker.nonceMap)) require.NoError(newPool.validateTransactions()) diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index 3560d155c..1815bcd44 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -65,8 +65,8 @@ func (tr *TransactionRegister) Init(globalParams *GlobalParamsEntry) { } func (tr *TransactionRegister) HasGlobalParamChange(globalParams *GlobalParamsEntry) bool { - tr.Lock() - defer tr.Unlock() + tr.RLock() + defer tr.RUnlock() minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() diff --git a/lib/server.go b/lib/server.go index 9abcf05ce..22f812332 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2143,9 +2143,11 @@ func (srv *Server) _updatePosMempoolAfterTipChange() error { if err != nil { return err } + currentGlobalParams := currentUtxoView.GetCurrentGlobalParamsEntry() srv.posMempool.UpdateLatestBlock(currentUtxoView, uint64(currentBlockHeight)) srv.posMempool.UpdateGlobalParams(currentGlobalParams) + return nil } From 7700cc9cdf50793920189cc7752bba9c5886e288 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 17 Apr 2024 17:29:46 -0400 Subject: [PATCH 654/762] Fix Mempool TransactionRegister Deadlock (#1242) --- lib/pos_transaction_register.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index 1815bcd44..25b63f263 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -274,12 +274,10 @@ func (tr *TransactionRegister) Reset() { tr.totalTxnsSizeBytes = 0 } -// GetFeeTimeIterator returns an iterator over the transactions in the register. The iterator goes through all transactions -// as ordered by Fee-Time. +// GetFeeTimeIterator returns an iterator over the transactions in the register. The iterator goes through all +// transactions as ordered by Fee-Time. The caller must hold a read lock on the transaction register before +// calling this function and while iterating over the transactions. func (tr *TransactionRegister) GetFeeTimeIterator() *FeeTimeIterator { - tr.RLock() - defer tr.RUnlock() - return &FeeTimeIterator{ bucketIterator: tr.feeTimeBucketSet.Iterator(), mempoolTxIterator: nil, From 1dcb6f83d7bd7a8d8040d3dbad12ca93ecdbf7d0 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 17 Apr 2024 17:47:51 -0400 Subject: [PATCH 655/762] Minor Bug Fixes in Mempool and Fee Estimator (#1243) --- lib/pos_fee_estimator.go | 4 ++++ lib/pos_transaction_register.go | 3 +++ 2 files changed, 7 insertions(+) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index e3175a67e..3a89d220d 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -363,6 +363,10 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFee( } minFeeRateEstimate, err := computeFeeGivenTxnAndFeeRate(txn, minFeeRateNanosPerKB) + if err != nil { + return 0, errors.Wrap(err, "PoSFeeEstimator.EstimateFee: Problem computing min fee rate estimate") + } + if minFeeRateEstimate > mempoolFeeEstimate && minFeeRateEstimate > pastBlocksFeeEstimate { return minFeeRateEstimate, nil } diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index 25b63f263..e351a1869 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -69,6 +69,9 @@ func (tr *TransactionRegister) HasGlobalParamChange(globalParams *GlobalParamsEn defer tr.RUnlock() minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() + if !_isValidMinimumFeeAndMultiplier(minNetworkFee, bucketMultiplier) { + minNetworkFee, bucketMultiplier = _getFallbackSafeMinimumFeeAndMultiplier() + } return minNetworkFee.Cmp(tr.minimumNetworkFeeNanosPerKB) != 0 || bucketMultiplier.Cmp(tr.feeBucketGrowthRateBasisPoints) != 0 } From 32c097ad3316d675eb1ccae7f87e49fa78883a5b Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 18 Apr 2024 15:20:38 -0400 Subject: [PATCH 656/762] Refactor CopyUtxoView to prevent DB lookups (#1229) --- lib/block_view.go | 50 ++++++++++++++++++++++++++++++---- lib/pos_block_producer.go | 1 - lib/pos_block_producer_test.go | 4 --- 3 files changed, 45 insertions(+), 10 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index df58d0654..129d13052 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -326,7 +326,13 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { } func (bav *UtxoView) CopyUtxoView() *UtxoView { - newView := NewUtxoView(bav.Handle, bav.Params, bav.Postgres, bav.Snapshot, bav.EventManager) + newView := initNewUtxoView(bav.Handle, bav.Params, bav.Postgres, bav.Snapshot, bav.EventManager) + + // Handle items loaded from DB with _ResetViewMappingsAfterFlush + newView.NumUtxoEntries = bav.NumUtxoEntries + newView.NanosPurchased = bav.NanosPurchased + newView.USDCentsPerBitcoin = bav.USDCentsPerBitcoin + newView.GlobalParamsEntry = bav.GlobalParamsEntry.Copy() // Copy the UtxoEntry data // Note that using _setUtxoMappings is dangerous because the Pos within @@ -356,6 +362,17 @@ func (bav *UtxoView) CopyUtxoView() *UtxoView { newGlobalParamsEntry := *bav.GlobalParamsEntry newView.GlobalParamsEntry = &newGlobalParamsEntry + // Copy the forbidden public keys map + newView.ForbiddenPubKeyToForbiddenPubKeyEntry = make( + map[PkMapKey]*ForbiddenPubKeyEntry, len(bav.ForbiddenPubKeyToForbiddenPubKeyEntry)) + for pkMapKey, forbiddenPubKeyEntry := range bav.ForbiddenPubKeyToForbiddenPubKeyEntry { + if forbiddenPubKeyEntry == nil { + continue + } + newForbiddenPubKeyEntry := *forbiddenPubKeyEntry + newView.ForbiddenPubKeyToForbiddenPubKeyEntry[pkMapKey] = &newForbiddenPubKeyEntry + } + // Copy the post data newView.PostHashToPostEntry = make(map[BlockHash]*PostEntry, len(bav.PostHashToPostEntry)) for postHash, postEntry := range bav.PostHashToPostEntry { @@ -630,37 +647,50 @@ func (bav *UtxoView) CopyUtxoView() *UtxoView { } // Copy the SnapshotGlobalParamEntries + newView.SnapshotGlobalParamEntries = make(map[uint64]*GlobalParamsEntry, len(bav.SnapshotGlobalParamEntries)) for epochNumber, globalParamsEntry := range bav.SnapshotGlobalParamEntries { newView.SnapshotGlobalParamEntries[epochNumber] = globalParamsEntry.Copy() } // Copy the SnapshotValidatorSet + newView.SnapshotValidatorSet = make(map[SnapshotValidatorSetMapKey]*ValidatorEntry, len(bav.SnapshotValidatorSet)) for mapKey, validatorEntry := range bav.SnapshotValidatorSet { newView.SnapshotValidatorSet[mapKey] = validatorEntry.Copy() } + newView.HasFullSnapshotValidatorSetByEpoch = make(map[uint64]bool, len(bav.HasFullSnapshotValidatorSetByEpoch)) for mapKey, hasFullSnapshotValidatorSet := range bav.HasFullSnapshotValidatorSetByEpoch { newView.HasFullSnapshotValidatorSetByEpoch[mapKey] = hasFullSnapshotValidatorSet } + newView.SnapshotValidatorBLSPublicKeyPKIDPairEntries = make( + map[SnapshotValidatorBLSPublicKeyMapKey]*BLSPublicKeyPKIDPairEntry, + len(bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries), + ) for mapKey, blsPublicKeyPKIDPairEntry := range bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries { newView.SnapshotValidatorBLSPublicKeyPKIDPairEntries[mapKey] = blsPublicKeyPKIDPairEntry.Copy() } // Copy the SnapshotValidatorSetTotalStakeAmountNanos + newView.SnapshotValidatorSetTotalStakeAmountNanos = make(map[uint64]*uint256.Int, + len(bav.SnapshotValidatorSetTotalStakeAmountNanos), + ) for epochNumber, totalStakeAmountNanos := range bav.SnapshotValidatorSetTotalStakeAmountNanos { newView.SnapshotValidatorSetTotalStakeAmountNanos[epochNumber] = totalStakeAmountNanos.Clone() } // Copy the SnapshotLeaderSchedule + newView.SnapshotLeaderSchedule = make(map[SnapshotLeaderScheduleMapKey]*PKID, len(bav.SnapshotLeaderSchedule)) for mapKey, validatorPKID := range bav.SnapshotLeaderSchedule { newView.SnapshotLeaderSchedule[mapKey] = validatorPKID.NewPKID() } + newView.HasFullSnapshotLeaderScheduleByEpoch = make(map[uint64]bool, len(bav.HasFullSnapshotLeaderScheduleByEpoch)) for mapKey, hasFullSnapshotLeaderSchedule := range bav.HasFullSnapshotLeaderScheduleByEpoch { newView.HasFullSnapshotLeaderScheduleByEpoch[mapKey] = hasFullSnapshotLeaderSchedule } // Copy the SnapshotStakesToReward + newView.SnapshotStakesToReward = make(map[SnapshotStakeMapKey]*StakeEntry, len(bav.SnapshotStakesToReward)) for mapKey, snapshotStakeToReward := range bav.SnapshotStakesToReward { newView.SnapshotStakesToReward[mapKey] = snapshotStakeToReward.Copy() } @@ -709,15 +739,14 @@ func NewUtxoViewWithSnapshotCache( return utxoView } -func NewUtxoView( +func initNewUtxoView( _handle *badger.DB, _params *DeSoParams, _postgres *Postgres, _snapshot *Snapshot, _eventManager *EventManager, ) *UtxoView { - - view := UtxoView{ + return &UtxoView{ Handle: _handle, Params: _params, // Note that the TipHash does not get reset as part of @@ -732,6 +761,17 @@ func NewUtxoView( EventManager: _eventManager, // Set everything else in _ResetViewMappings() } +} + +func NewUtxoView( + _handle *badger.DB, + _params *DeSoParams, + _postgres *Postgres, + _snapshot *Snapshot, + _eventManager *EventManager, +) *UtxoView { + + view := initNewUtxoView(_handle, _params, _postgres, _snapshot, _eventManager) // Note that the TipHash does not get reset as part of // _ResetViewMappingsAfterFlush because it is not something that is affected by a @@ -752,7 +792,7 @@ func NewUtxoView( // but we can use it here to initialize the mappings. view._ResetViewMappingsAfterFlush() - return &view + return view } func (bav *UtxoView) _deleteUtxoMappings(utxoEntry *UtxoEntry) error { diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 348c8c440..fecd242fd 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -297,7 +297,6 @@ func (pbp *PosBlockProducer) getBlockTransactions( ) { // Get Fee-Time ordered transactions from the mempool feeTimeTxns := pbp.mp.GetTransactions() - // Try to connect transactions one by one. blocksTxns := []*MsgDeSoTxn{} maxUtilityFee := uint64(0) diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index 08661fad2..a3430fc23 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -261,10 +261,8 @@ func TestGetBlockTransactions(t *testing.T) { // In this case, some transactions should not make it into the block, despite being valid. The transactions // that are rejected should have the lowest Fee-Time priority. - latestBlockViewCopy := latestBlockView.CopyUtxoView() txns, maxUtilityFee, err := pbp.getBlockTransactions(NewPublicKey(m1PubBytes), latestBlockView, 3, 0, 1000, 1000) require.NoError(err) - require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(true, len(passingTxns) > len(txns)) totalUtilityFee := uint64(0) for _, txn := range txns { @@ -304,10 +302,8 @@ func _testProduceBlockNoSizeLimit(t *testing.T, mp *PosMempool, pbp *PosBlockPro totalTxns := numPassing + numFailing + numInvalid require.Equal(totalTxns, len(mp.GetTransactions())) - latestBlockViewCopy := latestBlockView.CopyUtxoView() txns, maxUtilityFee, err := pbp.getBlockTransactions(pbp.proposerPublicKey, latestBlockView, blockHeight, 0, math.MaxUint64, math.MaxUint64) require.NoError(err) - require.Equal(latestBlockViewCopy, latestBlockView) require.Equal(totalAcceptedTxns, len(txns)) return txns, maxUtilityFee From 7bf4626e801b61a8bb667928343c13ae5363c87c Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Thu, 18 Apr 2024 13:14:30 -0700 Subject: [PATCH 657/762] Relax check transaction signature checking (#1244) This check causes CreateDAOCoinLimitOrder to fail during transaction construction because it calls simulateSubmitTransaction, which calls getDAOCoinLimitOrderSimulatedExecutionResult, which then hits this. Easiest to just remove it. The signatures are always checked later anyway. --- lib/blockchain.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 3551ee41e..36a08bf88 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1865,13 +1865,6 @@ func CheckTransactionSanity(txn *MsgDeSoTxn, blockHeight uint32, params *DeSoPar existingInputs[*txin] = true } - // Make sure the transaction has a signature. - if txn.TxnMeta.GetTxnType() != TxnTypeBitcoinExchange && - txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper && - txn.Signature.Sign == nil { - return RuleErrorTransactionHasNoSignature - } - return nil } From a7a46448e9fd06ecf6e354284d14ff5a9bfe9c00 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 18 Apr 2024 16:16:28 -0400 Subject: [PATCH 658/762] Move txn signature check to ValidateDeSoTxnSanityBalanceModel (#1245) --- lib/validation_deso_txn.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/lib/validation_deso_txn.go b/lib/validation_deso_txn.go index 15f2091b0..1df6cc4df 100644 --- a/lib/validation_deso_txn.go +++ b/lib/validation_deso_txn.go @@ -9,13 +9,25 @@ import ( // ValidateDeSoTxnSanityBalanceModel performs a variety of sanity checks to ensure transaction is correctly formatted // under the balance model. The test checks pretty much everything, except validating the transaction's signature or // that the transaction is valid given a BlockView. -func ValidateDeSoTxnSanityBalanceModel(txn *MsgDeSoTxn, blockHeight uint64, - params *DeSoParams, globalParams *GlobalParamsEntry) error { +func ValidateDeSoTxnSanityBalanceModel( + txn *MsgDeSoTxn, + blockHeight uint64, + params *DeSoParams, + globalParams *GlobalParamsEntry, +) error { if txn == nil || params == nil || globalParams == nil { return fmt.Errorf("ValidateDeSoTxnSanityBalanceModel: Transaction, params, and globalParams cannot be nil") } + // Make sure the transaction has a signature. + if txn.TxnMeta.GetTxnType() != TxnTypeBitcoinExchange && + txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper && + txn.Signature.Sign == nil { + return errors.Wrap( + RuleErrorTransactionHasNoSignature, "ValidateDeSoTxnSanityBalanceModel: Transaction has no signature") + } + // Validate encoding if err := ValidateDeSoTxnEncoding(txn, blockHeight, globalParams, params); err != nil { return errors.Wrapf(err, "ValidateDeSoTxnSanityBalanceModel: ") From d1d56d5450fd6b7018738ae28485fa349ec60df5 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 18 Apr 2024 17:13:44 -0400 Subject: [PATCH 659/762] Create SafeUtxoView Construct For Side Effect Free Connects (#1246) --- lib/block_view_utils.go | 72 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 lib/block_view_utils.go diff --git a/lib/block_view_utils.go b/lib/block_view_utils.go new file mode 100644 index 000000000..a03850867 --- /dev/null +++ b/lib/block_view_utils.go @@ -0,0 +1,72 @@ +package lib + +import "github.com/pkg/errors" + +// SafeUtxoView is a wrapper around a UtxoView that provides a safe way to connect transactions +// into a UtxoView without side effects when the connect fails. +type SafeUtxoView struct { + primaryView *UtxoView + backupView *UtxoView +} + +// NewSafeUtxoView create a new instance of a SafeUtxoView using the input UtxoView as a template. +// The input UtxoView should never get mutated and only copies of it are used internally. +func NewSafeUtxoView(utxoView *UtxoView) *SafeUtxoView { + return &SafeUtxoView{ + primaryView: utxoView.CopyUtxoView(), + backupView: utxoView.CopyUtxoView(), + } +} + +// ConnectTransaction is a safe way to connect a transaction to a view: +// - If the transaction successfully connects, then the view is updated, and the result of the connect is +// returned. In the success case, this operation is O(1). +// - If the transaction fails to connect, then the view is left unchanged and an error is returned. +// In the failure case, this operation is O(N) where N is the number of entries in the view. +// +// The primary view is the view that is used to connect transactions. If a transaction fails to connect +// to the primary view, then the secondary view is used to restore the primary view. +func (safeUtxoView *SafeUtxoView) ConnectTransaction( + txn *MsgDeSoTxn, + txHash *BlockHash, + blockHeight uint32, + blockTimestampNanoSecs int64, + verifySignatures bool, + ignoreUtxos bool, +) ( + _utxoOps []*UtxoOperation, + _totalInput uint64, + _totalOutput uint64, + _fees uint64, + _err error, +) { + // Connect the transaction to the primary view. + utxoOpsForTxn, totalInput, totalOutput, fees, err := safeUtxoView.primaryView.ConnectTransaction( + txn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos, + ) + + // If the transaction failed to connect, then restore the primary view and return the error. + if err != nil { + safeUtxoView.primaryView = safeUtxoView.backupView.CopyUtxoView() + return nil, 0, 0, 0, errors.Wrapf(err, "TryConnectTransaction: Problem connecting txn on copy view") + } + + // Connect the transaction to the backup view. + _, _, _, _, err = safeUtxoView.backupView.ConnectTransaction( + txn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos, + ) + + // If the transaction failed to connect to the backup view, then restore the backup view from + // the primary view and swallow the error. This should never happen. + if err != nil { + safeUtxoView.backupView = safeUtxoView.primaryView.CopyUtxoView() + } + + // Return the result from connecting the transaction to the primary view. + return utxoOpsForTxn, totalInput, totalOutput, fees, nil +} + +// GetUtxoView returns a copy of the primary view that is safe to be used and mutated by the caller. +func (safeUtxoView *SafeUtxoView) GetUtxoView() *UtxoView { + return safeUtxoView.primaryView.CopyUtxoView() +} From d8c36beadcaac789f3252ac95fed19f7883a4a5e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 18 Apr 2024 17:15:51 -0400 Subject: [PATCH 660/762] Use SafeUtxoView in The PoS Mempool Txn Validation (#1247) --- lib/pos_mempool.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index db68d288c..e510e9527 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -868,6 +868,9 @@ func (mp *PosMempool) validateTransactions() error { return nil } + // Create a SafeUtxoView instance to connect the transactions into. + safeUtxoView := NewSafeUtxoView(validationView) + // Iterate through all the transactions in the mempool and connect them to copies of the validation view. for ii, txn := range mempoolTxns { // Break out if we've attempted to connect the maximum number of txns to the view @@ -875,11 +878,11 @@ func (mp *PosMempool) validateTransactions() error { break } - // Connect the transaction to a copy of the validation view. We can skip signatures on the transaction + // Connect the transaction into the SafeUtxoView. We can skip signatures on the transaction // connect if the transaction has already previously been validated and been found to have a valid // signature. This optimizes the connect by not repeating signature verification on a transaction // more than once. - resultingUtxoView, _, _, _, _, err := validationView.ConnectTransactionIntoNewUtxoView( + _, _, _, _, err := safeUtxoView.ConnectTransaction( txn.Tx, txn.Hash, uint32(nextBlockHeight), nextBlockTimestamp, !txn.IsValidated(), false, ) @@ -899,13 +902,11 @@ func (mp *PosMempool) validateTransactions() error { // The txn successfully connected. We set its validated status to true. txn.SetValidated(true) - - // We do a simple pointer update on the validation view here because the txn has already been - // connected to the resulting UtxoView. This allows us to avoid performing a second - // UtxoView.ConnectTransaction operation. - validationView = resultingUtxoView } + // Get the final UtxoView from the SafeUtxoView. + validationView = safeUtxoView.GetUtxoView() + // Update the augmentedLatestBlockView with the latest validationView after the transactions // have been connected. mp.augmentedReadOnlyLatestBlockViewMutex.Lock() From 69139c46de35712743ff9c09b70792da7da6c7e6 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 18 Apr 2024 17:18:47 -0400 Subject: [PATCH 661/762] Use SafeUtxoView in the Pos Block Producer Block Construction (#1248) --- lib/pos_block_producer.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index fecd242fd..e6e75337e 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -1,10 +1,11 @@ package lib import ( - "github.com/btcsuite/btcd/btcec/v2" "math" "time" + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/bls" @@ -301,7 +302,10 @@ func (pbp *PosBlockProducer) getBlockTransactions( blocksTxns := []*MsgDeSoTxn{} maxUtilityFee := uint64(0) currentBlockSize := uint64(0) - blockUtxoView := latestBlockView.CopyUtxoView() + + // Create an instance of SafeUtxoView to connect transactions to. + safeUtxoView := NewSafeUtxoView(latestBlockView) + for _, txn := range feeTimeTxns { // If we've exceeded the soft max block size, we exit. We want to allow at least one txn that moves the // cumulative block size past the soft max, but don't want to add more txns beyond that. @@ -319,16 +323,16 @@ func (pbp *PosBlockProducer) getBlockTransactions( continue } - blockUtxoViewCopy := blockUtxoView.CopyUtxoView() - _, _, _, fees, err := blockUtxoViewCopy._connectTransaction( - txn.GetTxn(), txn.Hash(), uint32(newBlockHeight), newBlockTimestampNanoSecs, - true, false) + // Connect the transaction to the SafeUtxoView to test if it connects. + _, _, _, fees, err := safeUtxoView.ConnectTransaction( + txn.GetTxn(), txn.Hash(), uint32(newBlockHeight), newBlockTimestampNanoSecs, true, false, + ) - // Check if the transaction connected. + // If the transaction fails to connect, then we skip it. if err != nil { continue } - blockUtxoView = blockUtxoViewCopy + blocksTxns = append(blocksTxns, txn.GetTxn()) currentBlockSize += uint64(len(txnBytes)) From ee6c14754d86f5805bc81c229ba84758aec86450 Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Fri, 19 Apr 2024 06:38:23 -0700 Subject: [PATCH 662/762] fix (#1250) --- lib/network.go | 67 +++++++++++++++++++++++++++++++++++++++++++++ lib/network_test.go | 42 ++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) diff --git a/lib/network.go b/lib/network.go index d0993c06f..3dc9ff253 100644 --- a/lib/network.go +++ b/lib/network.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "github.com/decred/dcrd/dcrec/secp256k1/v4" ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "io" "math" @@ -3504,6 +3505,72 @@ func SignRecoverable(bb []byte, privateKey *btcec.PrivateKey) (*DeSoSignature, e }, nil } +// We define this struct to support JSON encoding of DeSoSignature. We didn't need this +// initially, but then at some point we upgraded our signature from btcec.Signature to +// ecdsa2.Signature, and the latter keeps its R and S values *private* rather than public, which +// then breaks the ability to naively JSON-serialize the signature of a txn. To workaround this +// we define this legacy struct that makes R and S *public*, we copy the ecdsa2.Signature +// into it, and then JSON serialize it. To deserialize, we do the reverse: read the bytes into +// the legacy struct and then convert them into the ecdsa2.Signature. +type legacySigStruct struct { + R [32]byte + S [32]byte +} + +// See comment on legacySigStruct for why we need this +func (sig DeSoSignature) MarshalJSON() ([]byte, error) { + var legacySig *legacySigStruct + if sig.Sign != nil { + r := sig.Sign.R() + rr := &r + + s := sig.Sign.S() + ss := &s + + legacySig = &legacySigStruct{ + R: rr.Bytes(), + S: ss.Bytes(), + } + } + + return json.Marshal(struct { + Sign *legacySigStruct + RecoveryId byte + IsRecoverable bool + }{ + Sign: legacySig, + RecoveryId: sig.RecoveryId, + IsRecoverable: sig.IsRecoverable, + }) +} + +// See comment on legacySigStruct for why we need this +func (sig *DeSoSignature) UnmarshalJSON(data []byte) error { + aux := struct { + Sign *legacySigStruct + RecoveryId byte + IsRecoverable bool + }{} + + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.Sign != nil { + r := secp256k1.ModNScalar{} + rr := aux.Sign.R + r.SetBytes(&rr) + s := secp256k1.ModNScalar{} + ss := aux.Sign.S + s.SetBytes(&ss) + + sig.Sign = ecdsa2.NewSignature(&r, &s) + } + sig.RecoveryId = aux.RecoveryId + sig.IsRecoverable = aux.IsRecoverable + + return nil +} + // DeSoNonce is a nonce that can be used to prevent replay attacks. It is used in the DeSo protocol // to prevent replay attacks when a user is trying to create a transaction. The nonce comprises // two uint64s: the expiration block height and the partial ID. The expiration block height is the diff --git a/lib/network_test.go b/lib/network_test.go index 7928bb7d5..af1ca2e92 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -3,6 +3,7 @@ package lib import ( "bytes" "encoding/hex" + "encoding/json" ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "math/big" "math/rand" @@ -1800,3 +1801,44 @@ func TestDeSoSignature_SerializeCompact(t *testing.T) { } } } + +func TestTxnJsonEncodeDecode(t *testing.T) { + priv, err := btcec.NewPrivateKey() + require.NoError(t, err) + + // We want to test with v0 and v1 txns. + for _, includeV1Txns := range []bool{true, false} { + // The case without a signature can fail with a nil pointer so we + // want to make sure we test it explicitly. + for _, withSignature := range []bool{true, false} { + for _, txn := range expectedTransactions(includeV1Txns) { + // Copy the txn + txnCopy, err := txn.Copy() + require.NoError(t, err) + + // Sign the txn + txBytes, err := txn.ToBytes(true /*preSignature*/) + require.NoError(t, err) + txHash := Sha256DoubleHash(txBytes)[:] + desoSignature, err := SignRecoverable(txHash, priv) + require.NoError(t, err) + if withSignature { + txnCopy.Signature = *desoSignature + } + + // Serialize the txn using json encoding + txnJson, err := json.Marshal(txnCopy) + require.NoError(t, err) + + // Deserialize the txn using json decoding + txn2 := MsgDeSoTxn{} + err = json.Unmarshal(txnJson, &txn2) + require.NoError(t, err) + + // Make sure it's the same + require.Equal(t, txnCopy, &txn2) + } + } + + } +} From 9a56c60775447a949b020345b7573370cbc331e2 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 19 Apr 2024 14:47:31 -0400 Subject: [PATCH 663/762] Transfer changes from z/update-state-change-entry-encoder to pos (#1194) --- lib/blockchain.go | 26 +- lib/db_utils.go | 4 +- lib/pos_blockchain.go | 6 +- lib/snapshot.go | 1 + lib/state_change_syncer.go | 409 +++++++++++++++++++++++--------- lib/state_change_syncer_test.go | 1 + 6 files changed, 318 insertions(+), 129 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 36a08bf88..00490864e 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -2551,17 +2551,15 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures } bc.timer.End("Blockchain.ProcessBlock: Transactions Db snapshot & operations") if innerErr = bc.blockView.FlushToDbWithTxn(txn, blockHeight); innerErr != nil { + // If we're in the middle of a sync, we should notify the event manager that we failed to sync the block. + if bc.eventManager != nil && !bc.eventManager.isMempoolManager { + bc.eventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ + FlushId: uuid.Nil, + Succeeded: false, + }) + } return errors.Wrapf(innerErr, "ProcessBlock: Problem writing utxo view to db on simple add to tip") } - // Immediately after the utxo view is flushed to badger, emit a state syncer flushed event, so that - // state syncer maintains a consistent view of the blockchain. - // Note: We ignore the mempool manager here, as that process handles state syncer flush events itself. - if bc.eventManager != nil && !bc.eventManager.isMempoolManager { - bc.eventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ - FlushId: uuid.Nil, - Succeeded: innerErr == nil, - }) - } bc.timer.End("Blockchain.ProcessBlock: Transactions Db utxo flush") bc.timer.Start("Blockchain.ProcessBlock: Transactions Db snapshot & operations") @@ -2920,6 +2918,16 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Signal the server that we've accepted this block in some way. if bc.eventManager != nil { bc.eventManager.blockAccepted(&BlockEvent{Block: desoBlock}) + // Immediately after the utxo view is flushed to badger, emit a state syncer flushed event, so that + // state syncer maintains a consistent view of the blockchain. + // Note: We ignore the mempool manager here, as that process handles state syncer flush events itself. + if !bc.eventManager.isMempoolManager { + fmt.Printf("Emitting state syncer flushed event for synced block\n") + bc.eventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ + FlushId: uuid.Nil, + Succeeded: true, + }) + } } bc.timer.Print("Blockchain.ProcessBlock: Initial") diff --git a/lib/db_utils.go b/lib/db_utils.go index 98bcf6666..64ad6366d 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -464,7 +464,7 @@ type DBPrefixes struct { // The Minor/Major distinction is used to deterministically map the two accessGroupIds of message's sender/recipient // into a single pair based on the lexicographical ordering of the two accessGroupIds. This is done to ensure that // both sides of the conversation have the same key for the same conversation, and we can store just a single message. - PrefixDmMessagesIndex []byte `prefix_id:"[75]" is_state:"true"` + PrefixDmMessagesIndex []byte `prefix_id:"[75]" is_state:"true" core_state:"true"` // PrefixDmThreadIndex is modified by the NewMessage transaction and is used to store a DmThreadEntry // for each existing dm thread. It answers the question: "Give me all the threads for a particular user." @@ -1149,6 +1149,7 @@ func DBSetWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, value []byte, eve KeyBytes: key, EncoderBytes: value, AncestralRecordBytes: ancestralValue, + IsReverted: false, }, FlushId: uuid.Nil, IsMempoolTxn: eventManager.isMempoolManager, @@ -1251,6 +1252,7 @@ func DBDeleteWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, eventManager * KeyBytes: key, EncoderBytes: nil, AncestralRecordBytes: ancestralValue, + IsReverted: false, }, FlushId: uuid.Nil, IsMempoolTxn: eventManager.isMempoolManager, diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 154268389..7be62f35a 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1725,6 +1725,9 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool return errors.Wrapf(err, "commitBlockPoS: Problem putting block in db: ") } + if bc.snapshot != nil { + bc.snapshot.FinishProcessBlock(blockNode) + } if bc.eventManager != nil { bc.eventManager.blockCommitted(&BlockEvent{ Block: block, @@ -1741,9 +1744,6 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool }) } } - if bc.snapshot != nil { - bc.snapshot.FinishProcessBlock(blockNode) - } currentEpochNumber, err := utxoView.GetCurrentEpochNumber() if err != nil { return errors.Wrapf(err, "commitBlockPoS: Problem getting current epoch number") diff --git a/lib/snapshot.go b/lib/snapshot.go index 787a8f347..3ddeb7244 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -1175,6 +1175,7 @@ func (snap *Snapshot) SetSnapshotChunk(mainDb *badger.DB, mainDbMutex *deadlock. OperationType: DbOperationTypeInsert, KeyBytes: dbEntry.Key, EncoderBytes: dbEntry.Value, + IsReverted: false, }, FlushId: dbFlushId, }) diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 00efd3137..62017e7f0 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -60,18 +60,26 @@ type StateChangeEntry struct { BlockHeight uint64 // The block associated with this state change event. Only applicable to utxo operations. Block *MsgDeSoBlock + // For mempool state changes, whether this operation has been booted from the mempool and should be reverted + // from the state change record. + IsReverted bool } // RawEncodeWithoutMetadata constructs the bytes to represent a StateChangeEntry. // The format is: -// [operation type (varint)][encoder type (varint)][key length (varint)][key bytes] +// [operation type (varint)][is reverted bool][encoder type (varint)][key length (varint)][key bytes] // [encoder length (varint)][encoder bytes][is mempool (1 byte)][utxo ops length (varint)][utxo ops bytes] func (stateChangeEntry *StateChangeEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { // Get byte length of keyBytes (will be nil for mempool transactions) var data []byte + // OperationType data = append(data, UintToBuf(uint64(stateChangeEntry.OperationType))...) + // IsReverted + data = append(data, BoolToByte(stateChangeEntry.IsReverted)) + // EncoderType data = append(data, UintToBuf(uint64(stateChangeEntry.EncoderType))...) + // KeyBytes data = append(data, EncodeByteArray(stateChangeEntry.KeyBytes)...) // The encoder can either be represented in raw bytes or as an encoder. If it's represented in bytes, we use that. @@ -115,8 +123,14 @@ func (stateChangeEntry *StateChangeEntry) RawEncodeWithoutMetadata(blockHeight u // Encode the block height. data = append(data, UintToBuf(blockHeight)...) - // Encode the transaction. - data = append(data, EncodeToBytes(blockHeight, stateChangeEntry.Block)...) + // Encode the block, only for utxo operations. + if stateChangeEntry.EncoderType == EncoderTypeUtxoOperation { + data = append(data, EncodeToBytes(blockHeight, stateChangeEntry.Block)...) + } else { + // If the encoder type is not a utxo operation, encode a nil value. + // We do this to simplify the decode logic and avoid an encoder migration. + data = append(data, EncodeToBytes(blockHeight, nil)...) + } return data } @@ -129,6 +143,13 @@ func (stateChangeEntry *StateChangeEntry) RawDecodeWithoutMetadata(blockHeight u } stateChangeEntry.OperationType = StateSyncerOperationType(operationType) + // Decode IsReverted + isReverted, err := ReadBoolByte(rr) + if err != nil { + return errors.Wrapf(err, "StateChangeEntry.RawDecodeWithoutMetadata: error decoding is reverted") + } + stateChangeEntry.IsReverted = isReverted + // Decode EncoderType encoderType, err := ReadUvarint(rr) if err != nil { @@ -181,7 +202,6 @@ func (stateChangeEntry *StateChangeEntry) RawDecodeWithoutMetadata(blockHeight u } else if err != nil { return errors.Wrapf(err, "StateChangeEntry.RawDecodeWithoutMetadata: error decoding block") } - return nil } @@ -221,16 +241,18 @@ type StateChangeSyncer struct { // we write the correct entries to the state change file. // During blocksync, all flushes are synchronous, so we don't need to worry about this. As such, those flushes // are given the uuid.Nil ID. - UnflushedBytes map[uuid.UUID]UnflushedStateSyncerBytes + UnflushedCommittedBytes map[uuid.UUID]UnflushedStateSyncerBytes + UnflushedMempoolBytes map[uuid.UUID]UnflushedStateSyncerBytes // This map is used to keep track of all the key and value pairs that state syncer is tracking (and therefore // don't need to be re-emitted to the state change file). // The key is the stringifyed key of the entry, plus the operation type. // The value is the badger entry that was flushed to the db. - MempoolKeyValueMap map[string][]byte + MempoolSyncedKeyValueMap map[string]*StateChangeEntry + + MempoolNewlyFlushedTxns map[string]*StateChangeEntry // This map tracks the keys that were flushed to the mempool in a single flush. - // Every time a flush occurs, this map is cleared, as opposed to the MempoolKeyValueMap, which is only cleared - // when a new block is processed. + // Every time a flush occurs, this map is cleared, as opposed to the MempoolSyncedKeyValueMap, which is only cleared // This is used to determine if there are any tracked mempool transactions that have been ejected from the current // mempool state. // When this occurs, the mempool is reset, and all tracked mempool transactions are re-emitted to the state change file. @@ -238,6 +260,11 @@ type StateChangeSyncer struct { // clear out any mempool entries that were ejected from the mempool. MempoolFlushKeySet map[string]bool + // This cache stores the transactions and their associated utxo ops that are currently in the mempool. + // This allows us to reduce the number of connect transaction calls when syncing the mempool + MempoolCachedTxns map[string][]*StateChangeEntry + + MempoolCachedUtxoView *UtxoView // Tracks the flush IDs of the last block sync flush and the last mempool flush. // These are not used during hypersync, as many flushes are being processed asynchronously. BlockSyncFlushId uuid.UUID @@ -320,9 +347,12 @@ func NewStateChangeSyncer(stateChangeDir string, nodeSyncType NodeSyncType, memp StateChangeMempoolFile: stateChangeMempoolFile, StateChangeMempoolIndexFile: stateChangeMempoolIndexFile, StateChangeMempoolFileSize: uint64(stateChangeMempoolFileInfo.Size()), - UnflushedBytes: make(map[uuid.UUID]UnflushedStateSyncerBytes), - MempoolKeyValueMap: make(map[string][]byte), + UnflushedCommittedBytes: make(map[uuid.UUID]UnflushedStateSyncerBytes), + UnflushedMempoolBytes: make(map[uuid.UUID]UnflushedStateSyncerBytes), + MempoolSyncedKeyValueMap: make(map[string]*StateChangeEntry), + MempoolNewlyFlushedTxns: make(map[string]*StateChangeEntry), MempoolFlushKeySet: make(map[string]bool), + MempoolCachedTxns: make(map[string][]*StateChangeEntry), StateSyncerMutex: &sync.Mutex{}, SyncType: nodeSyncType, BlocksyncCompleteEntriesFlushed: blocksyncCompleteEntriesFlushed, @@ -375,41 +405,13 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerOperation(event *S } if event.IsMempoolTxn { - // Create a mempool flush ID if one doesn't already exist. - if event.FlushId == uuid.Nil && stateChangeSyncer.MempoolFlushId == uuid.Nil { - stateChangeSyncer.MempoolFlushId = uuid.New() - } + // Set the flushId to the mempool flush ID. + //flushId = stateChangeSyncer.BlockSyncFlushI // If the event flush ID is nil, then we need to use the global mempool flush ID. if flushId == uuid.Nil { flushId = stateChangeSyncer.MempoolFlushId } - - // The current state of the tracked mempool is stored in the MempoolKeyValueMap. If this entry is already in there - // then we don't need to re-write it to the state change file. - // Create key for op + key map - txKey := createMempoolTxKey(stateChangeEntry.OperationType, stateChangeEntry.KeyBytes) - - // Track the key in the MempoolFlushKeySet. - if stateChangeEntry.OperationType == DbOperationTypeDelete { - delete(stateChangeSyncer.MempoolFlushKeySet, txKey) - } else { - stateChangeSyncer.MempoolFlushKeySet[txKey] = true - } - - // Check to see if the key is in the map, and if the value is the same as the value in the event. - if valueBytes, ok := stateChangeSyncer.MempoolKeyValueMap[txKey]; ok && bytes.Equal(valueBytes, event.StateChangeEntry.EncoderBytes) { - // If the key is in the map, and the entry bytes are the same as those that are already tracked by state syncer, - // then we don't need to write the state change entry to the state change file - it's already being tracked. - return - } - - // Track the key and value if this is a new entry to the mempool. - if stateChangeEntry.OperationType == DbOperationTypeDelete { - delete(stateChangeSyncer.MempoolKeyValueMap, txKey) - } else { - stateChangeSyncer.MempoolKeyValueMap[txKey] = event.StateChangeEntry.EncoderBytes - } } else { // If the flush ID is nil, then we need to use the global block sync flush ID. if flushId == uuid.Nil { @@ -449,13 +451,44 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerOperation(event *S // Set the flush ID. stateChangeEntry.FlushId = flushId + if event.IsMempoolTxn { + // The current state of the tracked mempool is stored in the MempoolSyncedKeyValueMap. If this entry is already in there + // then we don't need to re-write it to the state change file. + // Create key for op + key map + txKey := createMempoolTxKey(stateChangeEntry.KeyBytes) + + // Track the key in the MempoolFlushKeySet. + stateChangeSyncer.MempoolFlushKeySet[txKey] = true + + // Check to see if the key is in the map, and if the value is the same as the value in the event. + if cachedSCE, ok := stateChangeSyncer.MempoolSyncedKeyValueMap[txKey]; ok && bytes.Equal(cachedSCE.EncoderBytes, event.StateChangeEntry.EncoderBytes) && cachedSCE.OperationType == event.StateChangeEntry.OperationType { + // If the key is in the map, and the entry bytes are the same as those that are already tracked by state syncer, + // then we don't need to write the state change entry to the state change file - it's already being tracked. + return + } else if ok { + // If the key is in the map, and the entry bytes are different, then we need to track the new entry. + // Skip if the entry is already being tracked as a new flush. + if _, newFlushExists := stateChangeSyncer.MempoolNewlyFlushedTxns[txKey]; !newFlushExists { + // If the key is in the map, and the entry bytes are different, then we need to track the new entry. + stateChangeSyncer.MempoolNewlyFlushedTxns[txKey] = cachedSCE + } + } else { + // If the key is not in the map, then we need to track the new entry. + stateChangeSyncer.MempoolNewlyFlushedTxns[txKey] = nil + } + + // Track the key and value if this is a new entry to the mempool, or if the encoder bytes or operation type + // changed since it was last synced. + stateChangeSyncer.MempoolSyncedKeyValueMap[txKey] = event.StateChangeEntry + } + // Encode the state change entry. We encode as a byte array, so the consumer can buffer just the bytes needed // to decode this entry when reading from file. entryBytes := EncodeToBytes(stateChangeSyncer.BlockHeight, stateChangeEntry, false) writeBytes := EncodeByteArray(entryBytes) // Add the StateChangeEntry bytes to the queue of bytes to be written to the state change file upon Badger db flush. - stateChangeSyncer.addTransactionToQueue(stateChangeEntry.FlushId, writeBytes) + stateChangeSyncer.addTransactionToQueue(stateChangeEntry.FlushId, writeBytes, event.IsMempoolTxn) } // _handleStateSyncerFlush is called when a Badger db flush takes place. It calls a helper function that takes the bytes that @@ -464,11 +497,17 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerFlush(event *State stateChangeSyncer.StateSyncerMutex.Lock() defer stateChangeSyncer.StateSyncerMutex.Unlock() + glog.V(2).Infof("Handling state syncer flush: %+v", event) + if event.IsMempoolFlush { // If this is a mempool flush, make sure a block hasn't mined since the mempool entries were added to queue. // If not, reset the mempool maps and file, and start from scratch. The consumer will revert the mempool transactions // it currently has and sync from scratch. - if stateChangeSyncer.BlockSyncFlushId != event.BlockSyncFlushId { + if (stateChangeSyncer.BlockSyncFlushId != event.BlockSyncFlushId && event.BlockSyncFlushId != uuid.Nil) || + stateChangeSyncer.BlockSyncFlushId != event.FlushId { + glog.V(2).Infof( + "The flush ID has changed, bailing now. Event: %v, Event block sync: %v, Global block sync: %v\n", + event.FlushId, event.BlockSyncFlushId, stateChangeSyncer.BlockSyncFlushId) stateChangeSyncer.ResetMempool() return } @@ -477,15 +516,55 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerFlush(event *State // This would mean that an entry was ejected from the mempool. // When this happens, we need to reset the mempool and start from scratch, so that the consumer can revert the // mempool transactions it currently has and sync the mempool from scratch. - for key := range stateChangeSyncer.MempoolKeyValueMap { - // If any of the keys that the mempool is currently tracking weren't included in the flush, the state syncer - // mempool is bad and needs to be reset. - if _, ok := stateChangeSyncer.MempoolFlushKeySet[key]; !ok { - stateChangeSyncer.ResetMempool() - return + // + // Example: + // + // Flush: + // Key: a + // Key: b + // Key: d + + // Synced: + // Key: a + // Key: b + // Key: c <- Revert this one + // Key: d + + if event.Succeeded { + for key, cachedSCE := range stateChangeSyncer.MempoolSyncedKeyValueMap { + // If any of the keys that the mempool is currently tracking weren't included in the flush, that entry + // needs to be reverted from the mempool. + if _, ok := stateChangeSyncer.MempoolFlushKeySet[key]; !ok { + // Confirm that the block sync ID hasn't shifted. If it has, bail now. + if cachedSCE.FlushId != stateChangeSyncer.BlockSyncFlushId { + glog.V(2).Infof("The flush ID has changed, inside key/value check, bailing now.\n") + stateChangeSyncer.ResetMempool() + return + } + + cachedSCE.IsReverted = true + + // Create a revert state change entry and add it to the queue. This will signal the state change + // consumer to revert the synced entry. + entryBytes := EncodeToBytes(stateChangeSyncer.BlockHeight, cachedSCE, false) + writeBytes := EncodeByteArray(entryBytes) + + glog.V(2).Infof("Reverting entry %d\n", cachedSCE.EncoderType) + + // Add the StateChangeEntry bytes to the queue of bytes to be written to the state change file upon Badger db flush. + stateChangeSyncer.addTransactionToQueue(cachedSCE.FlushId, writeBytes, true) + + // Remove this entry from the synced map + delete(stateChangeSyncer.MempoolSyncedKeyValueMap, key) + } } } + + // Reset the mempool flush set. stateChangeSyncer.MempoolFlushKeySet = make(map[string]bool) + } else { + glog.V(2).Infof("Here is the flush ID: %v\n", event.FlushId) + glog.V(2).Infof("Here is the block sync flush ID: %v\n", event.BlockSyncFlushId) } err := stateChangeSyncer.FlushTransactionsToFile(event) @@ -496,15 +575,19 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerFlush(event *State if !event.IsMempoolFlush { // After flushing blocksync transactions to file, reset the block sync flush ID, and reset the mempool. stateChangeSyncer.BlockSyncFlushId = uuid.New() + glog.V(2).Infof("Setting a new blocksync flush ID: %v\n", stateChangeSyncer.BlockSyncFlushId) stateChangeSyncer.ResetMempool() } } func (stateChangeSyncer *StateChangeSyncer) ResetMempool() { - stateChangeSyncer.MempoolKeyValueMap = make(map[string][]byte) + glog.V(2).Info("Resetting mempool.\n") + stateChangeSyncer.MempoolSyncedKeyValueMap = make(map[string]*StateChangeEntry) + stateChangeSyncer.MempoolNewlyFlushedTxns = make(map[string]*StateChangeEntry) stateChangeSyncer.MempoolFlushKeySet = make(map[string]bool) - delete(stateChangeSyncer.UnflushedBytes, stateChangeSyncer.MempoolFlushId) + delete(stateChangeSyncer.UnflushedMempoolBytes, stateChangeSyncer.MempoolFlushId) stateChangeSyncer.MempoolFlushId = uuid.Nil + stateChangeSyncer.MempoolCachedTxns = make(map[string][]*StateChangeEntry) // Truncate the mempool files. stateChangeSyncer.StateChangeMempoolFile.Truncate(0) stateChangeSyncer.StateChangeMempoolIndexFile.Truncate(0) @@ -512,15 +595,23 @@ func (stateChangeSyncer *StateChangeSyncer) ResetMempool() { } // Add a transaction to the queue of transactions to be flushed to disk upon badger db flush. -func (stateChangeSyncer *StateChangeSyncer) addTransactionToQueue(flushId uuid.UUID, writeBytes []byte) { - unflushedBytes, exists := stateChangeSyncer.UnflushedBytes[flushId] +func (stateChangeSyncer *StateChangeSyncer) addTransactionToQueue(flushId uuid.UUID, writeBytes []byte, isMempool bool) { + + var unflushedBytes UnflushedStateSyncerBytes + var exists bool + + if isMempool { + unflushedBytes, exists = stateChangeSyncer.UnflushedMempoolBytes[flushId] + } else { + unflushedBytes, exists = stateChangeSyncer.UnflushedCommittedBytes[flushId] + } + if !exists { unflushedBytes = UnflushedStateSyncerBytes{ StateChangeBytes: []byte{}, StateChangeOperationIndexes: []uint64{}, } } - // Get the byte index of where this transaction occurs in the unflushed bytes, and add it to the list of // indexes that should be written to the index file. dbOperationIndex := uint64(len(unflushedBytes.StateChangeBytes)) @@ -528,12 +619,18 @@ func (stateChangeSyncer *StateChangeSyncer) addTransactionToQueue(flushId uuid.U unflushedBytes.StateChangeBytes = append(unflushedBytes.StateChangeBytes, writeBytes...) - stateChangeSyncer.UnflushedBytes[flushId] = unflushedBytes + if isMempool { + stateChangeSyncer.UnflushedMempoolBytes[flushId] = unflushedBytes + } else { + stateChangeSyncer.UnflushedCommittedBytes[flushId] = unflushedBytes + } } // FlushTransactionsToFile writes the bytes that have been cached on the StateChangeSyncer to the state change file. func (stateChangeSyncer *StateChangeSyncer) FlushTransactionsToFile(event *StateSyncerFlushedEvent) error { flushId := event.FlushId + + glog.V(2).Infof("Flushing to file: %+v", event) // Get the relevant global flush ID from the state change syncer if the flush ID is nil. if event.FlushId == uuid.Nil { if event.IsMempoolFlush { @@ -559,12 +656,34 @@ func (stateChangeSyncer *StateChangeSyncer) FlushTransactionsToFile(event *State // If the flush failed, delete the unflushed bytes and associated metadata. // Also delete any unconnected mempool txns from our cache. if !event.Succeeded { - delete(stateChangeSyncer.UnflushedBytes, flushId) + glog.V(2).Infof("Deleting unflushed bytes for id: %s", flushId) + if event.IsMempoolFlush { + delete(stateChangeSyncer.UnflushedMempoolBytes, flushId) + // Loop through the unflushed mempool transactions and delete them from the cache. + for key, sce := range stateChangeSyncer.MempoolNewlyFlushedTxns { + if sce != nil { + stateChangeSyncer.MempoolSyncedKeyValueMap[key] = sce + } else { + delete(stateChangeSyncer.MempoolSyncedKeyValueMap, key) + delete(stateChangeSyncer.MempoolFlushKeySet, key) + } + } + } else { + delete(stateChangeSyncer.UnflushedCommittedBytes, flushId) + } + return nil } - unflushedBytes, exists := stateChangeSyncer.UnflushedBytes[flushId] + var unflushedBytes UnflushedStateSyncerBytes + var exists bool + if event.IsMempoolFlush { + unflushedBytes, exists = stateChangeSyncer.UnflushedMempoolBytes[flushId] + } else { + unflushedBytes, exists = stateChangeSyncer.UnflushedCommittedBytes[flushId] + } if !exists { + glog.V(2).Infof("Unflushed bytes for flush ID doesn't exist: %s", flushId.String()) return nil } @@ -617,26 +736,39 @@ func (stateChangeSyncer *StateChangeSyncer) FlushTransactionsToFile(event *State } // Update unflushed bytes map to remove the flushed bytes. - delete(stateChangeSyncer.UnflushedBytes, flushId) + if event.IsMempoolFlush { + delete(stateChangeSyncer.UnflushedMempoolBytes, flushId) + stateChangeSyncer.MempoolNewlyFlushedTxns = make(map[string]*StateChangeEntry) + } else { + delete(stateChangeSyncer.UnflushedCommittedBytes, flushId) + } + return nil } -func createMempoolTxKey(operationType StateSyncerOperationType, keyBytes []byte) string { - return fmt.Sprintf("%v%v", operationType, string(keyBytes)) +func createMempoolTxKey(keyBytes []byte) string { + return fmt.Sprintf("%v", string(keyBytes)) } // SyncMempoolToStateSyncer flushes all mempool transactions to the db, capturing those state changes // in the mempool state change file. It also loops through all unconnected transactions and their associated // utxo ops and adds them to the mempool state change file. func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Server) (bool, error) { + startTime := time.Now() originalCommittedFlushId := stateChangeSyncer.BlockSyncFlushId + if originalCommittedFlushId == uuid.Nil { + return false, nil + } + if !server.GetMempool().IsRunning() { return true, nil } blockHeight := uint64(server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Height) + stateChangeSyncer.MempoolFlushId = originalCommittedFlushId + stateChangeSyncer.BlockHeight = blockHeight mempoolUtxoView, err := server.GetMempool().GetAugmentedUniversalView() @@ -669,13 +801,21 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser // more than once in the mempool transactions. txn := server.blockchain.db.NewTransaction(true) defer txn.Discard() - + glog.V(2).Infof("Time since mempool sync start: %v", time.Since(startTime)) + startTime = time.Now() err = mempoolUtxoView.FlushToDbWithTxn(txn, uint64(server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Height)) - - mempoolTxUtxoView := NewUtxoView(server.blockchain.db, server.blockchain.params, server.blockchain.postgres, nil, &mempoolEventManager) if err != nil { - return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer: CreateMempoolTxUtxoView: ") + mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ + FlushId: originalCommittedFlushId, + Succeeded: false, + IsMempoolFlush: true, + }) + return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer: FlushToDbWithTxn: ") } + glog.V(2).Infof("Time since db flush: %v", time.Since(startTime)) + mempoolTxUtxoView := NewUtxoView(server.blockchain.db, server.blockchain.params, server.blockchain.postgres, nil, &mempoolEventManager) + glog.V(2).Infof("Time since utxo view: %v", time.Since(startTime)) + startTime = time.Now() // Loop through all the transactions in the mempool and connect them and their utxo ops to the mempool view. mempoolTxns := server.GetMempool().GetOrderedTransactions() @@ -684,13 +824,22 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser uncommittedBlocks, err := server.blockchain.GetUncommittedBlocks(mempoolUtxoView.TipHash) if err != nil { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ - FlushId: uuid.Nil, + FlushId: originalCommittedFlushId, + Succeeded: false, + IsMempoolFlush: true, + }) + glog.V(2).Infof("After the mempool flush: %+v", &StateSyncerFlushedEvent{ + FlushId: originalCommittedFlushId, Succeeded: false, IsMempoolFlush: true, }) return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer: ") } + glog.V(2).Infof("Time since getting transactions: %v", time.Since(startTime)) + startTime = time.Now() + glog.V(2).Infof("Mempool synced len after flush: %d", len(stateChangeSyncer.MempoolSyncedKeyValueMap)) + // TODO: Have Z look at if we need to do some caching in the uncommitted blocks logic. // First connect the uncommitted blocks to the mempool view. for _, uncommittedBlock := range uncommittedBlocks { utxoViewAndOpsAtBlockHash, err := server.blockchain.getUtxoViewAndUtxoOpsAtBlockHash(*uncommittedBlock.Hash) @@ -722,78 +871,103 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser mempoolTxUtxoView.EventManager = &mempoolEventManager } - currentTimestamp := time.Now().UnixNano() + //Check to see if every txn hash in our cached txns is in the first n txns in the mempool. + //N represents the length of our cached txn map. for ii, mempoolTx := range mempoolTxns { - if server.params.IsPoSBlockHeight(blockHeight) && uint64(ii) > stateChangeSyncer.MempoolTxnSyncLimit { + if _, ok := stateChangeSyncer.MempoolCachedTxns[mempoolTx.Hash.String()]; !ok { + // If any of the transaction hashes in the first n transactions don't line up with our cache map, the mempool + // has changed since the last cache, and we need to reset it. + stateChangeSyncer.MempoolCachedTxns = make(map[string][]*StateChangeEntry) + stateChangeSyncer.MempoolCachedUtxoView = nil + fmt.Printf("Txn not in cache, resetting\n") break } - var utxoOpsForTxn []*UtxoOperation - if server.params.IsPoSBlockHeight(blockHeight + 1) { - // We need to create a copy of the view in the event that the transaction fails to - // connect. If it fails to connect, we need to reset the view to its original state. - // and try to connect it as a failing transaction. If that fails as well, we just continue - // and the mempoolTxUtxoView is unmodified. - var copiedView *UtxoView - copiedView = mempoolTxUtxoView.CopyUtxoView() - utxoOpsForTxn, _, _, _, err = copiedView.ConnectTransaction( - mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), - currentTimestamp, false, false /*ignoreUtxos*/) - // If the transaction successfully connected, we update mempoolTxUtxoView to the copied view. - if err == nil { - mempoolTxUtxoView = copiedView - } else { - glog.V(2).Infof( - "StateChangeSyncer.SyncMempoolToStateSyncer failed connecting mempool tx with (hash= %v): (err=%v)", - mempoolTx.Hash, - err, - ) - // If the txn fails to connect, then we should not emit any state changes for it. - continue + + // Once we're past the number of cached txns, we have confirmed that nothing in our cache is out of date and can break. + if ii >= len(stateChangeSyncer.MempoolCachedTxns)-1 { + if stateChangeSyncer.MempoolCachedUtxoView != nil { + // If we know that all our transactions are good, set the state of the utxo view to the cached one, and exit. + mempoolUtxoView = stateChangeSyncer.MempoolCachedUtxoView } + fmt.Printf("All txns match, continueing: %v\n", ii) + break + } + } + + currentTimestamp := time.Now().UnixNano() + for _, mempoolTx := range mempoolTxns { + var txnStateChangeEntry *StateChangeEntry + var utxoOpStateChangeEntry *StateChangeEntry + // Check if the transaction is already in the cache. If so, skip it. + txHash := mempoolTx.Hash.String() + if stateChangeEntries, ok := stateChangeSyncer.MempoolCachedTxns[txHash]; ok { + txnStateChangeEntry = stateChangeEntries[0] + utxoOpStateChangeEntry = stateChangeEntries[1] } else { - // For PoW block heights, we can just connect the transaction to the mempool view. - utxoOpsForTxn, _, _, _, err = mempoolTxUtxoView.ConnectTransaction( - mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), - currentTimestamp, false, false /*ignoreUtxos*/) + utxoOpsForTxn, _, _, _, err := mempoolTxUtxoView.ConnectTransaction( + mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), currentTimestamp, false, false /*ignoreUtxos*/) if err != nil { + //fmt.Printf("Right before the mempool flush error: %v\n", err) + //continue + mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ + FlushId: originalCommittedFlushId, + Succeeded: false, + IsMempoolFlush: true, + }) + stateChangeSyncer.MempoolCachedTxns = make(map[string][]*StateChangeEntry) + stateChangeSyncer.MempoolCachedUtxoView = nil return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer ConnectTransaction: ") } - } - - // Emit transaction state change. - mempoolUtxoView.EventManager.stateSyncerOperation(&StateSyncerOperationEvent{ - StateChangeEntry: &StateChangeEntry{ + txnStateChangeEntry = &StateChangeEntry{ OperationType: DbOperationTypeUpsert, KeyBytes: TxnHashToTxnKey(mempoolTx.Hash), EncoderBytes: EncodeToBytes(blockHeight, mempoolTx.Tx, false), - }, - FlushId: uuid.Nil, - IsMempoolTxn: true, - }) + IsReverted: false, + } - // Capture the utxo ops for the transaction in a UTXOOp bundle. - utxoOpBundle := &UtxoOperationBundle{ - UtxoOpBundle: [][]*UtxoOperation{}, - } + // Capture the utxo ops for the transaction in a UTXOOp bundle. + utxoOpBundle := &UtxoOperationBundle{ + UtxoOpBundle: [][]*UtxoOperation{}, + } - utxoOpBundle.UtxoOpBundle = append(utxoOpBundle.UtxoOpBundle, utxoOpsForTxn) + utxoOpBundle.UtxoOpBundle = append(utxoOpBundle.UtxoOpBundle, utxoOpsForTxn) - // Emit UTXOOp bundle event - mempoolUtxoView.EventManager.stateSyncerOperation(&StateSyncerOperationEvent{ - StateChangeEntry: &StateChangeEntry{ + utxoOpStateChangeEntry = &StateChangeEntry{ OperationType: DbOperationTypeUpsert, KeyBytes: _DbKeyForTxnUtxoOps(mempoolTx.Hash), EncoderBytes: EncodeToBytes(blockHeight, utxoOpBundle, false), - }, - FlushId: uuid.Nil, - IsMempoolTxn: true, + IsReverted: false, + } + + // Add both state change entries to the mempool sync map. + stateChangeSyncer.MempoolCachedTxns[txHash] = []*StateChangeEntry{txnStateChangeEntry, utxoOpStateChangeEntry} + } + + // Emit transaction state change. + mempoolUtxoView.EventManager.stateSyncerOperation(&StateSyncerOperationEvent{ + StateChangeEntry: txnStateChangeEntry, + FlushId: originalCommittedFlushId, + IsMempoolTxn: true, + }) + + // Emit UTXOOp bundle event + mempoolUtxoView.EventManager.stateSyncerOperation(&StateSyncerOperationEvent{ + StateChangeEntry: utxoOpStateChangeEntry, + FlushId: originalCommittedFlushId, + IsMempoolTxn: true, }) } + // Update the cached utxo view to represent the new cached state. + stateChangeSyncer.MempoolCachedUtxoView = mempoolTxUtxoView.CopyUtxoView() + glog.V(2).Infof("Time to connect all %d txns: %v", len(mempoolTxns), time.Since(startTime)) + startTime = time.Now() + glog.V(2).Infof("Mempool flushed len: %d", len(stateChangeSyncer.MempoolFlushKeySet)) + glog.V(2).Infof("Mempool synced len after all: %d", len(stateChangeSyncer.MempoolSyncedKeyValueMap)) // Before flushing the mempool to the state change file, check if a block has mined. If so, abort the flush. - if err != nil || originalCommittedFlushId != stateChangeSyncer.BlockSyncFlushId { + if originalCommittedFlushId != stateChangeSyncer.BlockSyncFlushId { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ - FlushId: uuid.Nil, + FlushId: originalCommittedFlushId, Succeeded: false, IsMempoolFlush: true, }) @@ -801,11 +975,12 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser } mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ - FlushId: uuid.Nil, + FlushId: originalCommittedFlushId, Succeeded: true, IsMempoolFlush: true, BlockSyncFlushId: originalCommittedFlushId, }) + glog.V(2).Infof("Time to flush: %v", time.Since(startTime)) return false, nil } @@ -815,9 +990,10 @@ func (stateChangeSyncer *StateChangeSyncer) StartMempoolSyncRoutine(server *Serv // Wait for mempool to be initialized. for server.GetMempool() == nil || server.blockchain.chainState() != SyncStateFullyCurrent { time.Sleep(15000 * time.Millisecond) + glog.V(2).Infof("Mempool: %v", server.mempool) + glog.V(2).Infof("Chain state: %v", server.blockchain.chainState()) } if !stateChangeSyncer.BlocksyncCompleteEntriesFlushed && stateChangeSyncer.SyncType == NodeSyncTypeBlockSync { - fmt.Printf("Flushing to file") err := stateChangeSyncer.FlushAllEntriesToFile(server) if err != nil { fmt.Printf("StateChangeSyncer.StartMempoolSyncRoutine: Error flushing all entries to file: %v", err) @@ -886,6 +1062,7 @@ func (stateChangeSyncer *StateChangeSyncer) FlushAllEntriesToFile(server *Server OperationType: DbOperationTypeInsert, KeyBytes: dbEntry.Key, EncoderBytes: dbEntry.Value, + IsReverted: false, } // If this prefix is the prefix for UTXO Ops, fetch the transaction for each UTXO Op and attach it to the UTXO Op. diff --git a/lib/state_change_syncer_test.go b/lib/state_change_syncer_test.go index 083c851bd..a77559c4c 100644 --- a/lib/state_change_syncer_test.go +++ b/lib/state_change_syncer_test.go @@ -44,6 +44,7 @@ func TestStateChangeEntryEncoder(t *testing.T) { KeyBytes: []byte{1, 2, 3}, Encoder: postEntry, EncoderType: postEntry.GetEncoderType(), + IsReverted: false, } stateChangeEntryBytes := EncodeToBytes(0, stateChangeEntry) From c85f2c27b2143fc3d854e49f8b656b2273b8d4e9 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 22 Apr 2024 14:18:28 -0400 Subject: [PATCH 664/762] Mempool Minor Naming and Testing Cleanup (#1253) * Mempool Minor Naming and Testing Cleanup * Fix build --- lib/legacy_mempool.go | 2 +- lib/pos_transaction_register_test.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 652f9536e..a9511c08a 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -16,7 +16,7 @@ import ( "sync/atomic" "time" - "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcd/btcutil" "github.com/gernest/mention" "github.com/dgraph-io/badger/v4" diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index baa4c2fbe..1fe85d311 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -147,7 +147,7 @@ func TestTransactionRegisterPrune(t *testing.T) { txnRegister.Init(globalParams) totalSize := uint64(0) for _, tx := range txnPool { - require.Nil(txnRegister.AddTransaction(tx)) + require.NoError(txnRegister.AddTransaction(tx)) totalSize += tx.TxSizeBytes } @@ -229,7 +229,7 @@ func TestTransactionRegisterPrune(t *testing.T) { // Re-add all transactions totalSize = 0 for _, tx := range txnPool { - require.Nil(txnRegister.AddTransaction(tx)) + require.NoError(txnRegister.AddTransaction(tx)) totalSize += tx.TxSizeBytes } require.Equal(totalSize, txnRegister.totalTxnsSizeBytes) @@ -244,7 +244,7 @@ func TestTransactionRegisterPrune(t *testing.T) { // Re-add all transactions again totalSize = 0 for _, tx := range txnPool { - require.Nil(txnRegister.AddTransaction(tx)) + require.NoError(txnRegister.AddTransaction(tx)) totalSize += tx.TxSizeBytes } @@ -270,7 +270,7 @@ func TestTransactionRegisterWithRemoves(t *testing.T) { txnRegister.Init(globalParams) _testBucketStandardRemoveTest(t, txnPool, globalParams, false, func(tx *MempoolTx) { - require.Nil(txnRegister.AddTransaction(tx)) + require.NoError(txnRegister.AddTransaction(tx)) }, func(tx *MempoolTx) { txnRegister.RemoveTransaction(tx) @@ -299,7 +299,7 @@ func TestTransactionRegisterBasic(t *testing.T) { txnRegister.Init(globalParams) _testBucketStandardAddTest(t, txnPool, globalParams, false, func(tx *MempoolTx) { - require.Nil(txnRegister.AddTransaction(tx)) + require.NoError(txnRegister.AddTransaction(tx)) }, func() []*MempoolTx { return txnRegister.GetFeeTimeTransactions() From c0787db5f870545fbbde87f825c330c9a8fcb869 Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Mon, 22 Apr 2024 11:41:12 -0700 Subject: [PATCH 665/762] Fix PoS Fee Estimation Bugs (#1252) Co-authored-by: iamsofonias --- lib/block_view.go | 3 +- lib/block_view_types.go | 25 +++++++---- lib/blockchain.go | 43 +++++++++++++++---- lib/constants.go | 74 +++++++++++++++++++++++++++++++-- lib/pos_fee_estimator.go | 22 ++++++---- lib/pos_mempool.go | 16 ++++--- lib/pos_transaction_register.go | 62 ++++++++++++++++++--------- 7 files changed, 191 insertions(+), 54 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 25e78f106..8f3c57098 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3421,8 +3421,7 @@ func (bav *UtxoView) _connectUpdateGlobalParams( // Validate that the minimum fee bucket size is greater than the minimum allowed. mergedGlobalParams := MergeGlobalParamEntryDefaults(&newGlobalParamsEntry, bav.Params) - minFeeRateNanosPerKB, feeBucketMultiplier := mergedGlobalParams. - ComputeFeeTimeBucketMinimumFeeAndMultiplier() + minFeeRateNanosPerKB, feeBucketMultiplier := mergedGlobalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() nextFeeBucketMin := computeFeeTimeBucketMinFromExponent(1, minFeeRateNanosPerKB, feeBucketMultiplier) if nextFeeBucketMin < mergedGlobalParams.MinimumNetworkFeeNanosPerKB+MinFeeBucketSize { return 0, 0, nil, RuleErrorFeeBucketSizeTooSmall diff --git a/lib/block_view_types.go b/lib/block_view_types.go index d132a7bfe..ebb676dde 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4438,15 +4438,26 @@ func (gp *GlobalParamsEntry) GetEncoderType() EncoderType { return EncoderTypeGlobalParamsEntry } -// ComputeFeeTimeBucketMinimumFeeAndMultiplier takes the MinimumNetworkFeeNanosPerKB and FeeBucketGrowthRateBasisPoints for -// the GlobalParamsEntry, and returns them as big.Floats. +// ComputeFeeTimeBucketMinimumFeeAndMultiplier takes the MinimumNetworkFeeNanosPerKB and FeeBucketGrowthRateBasisPoints, +// scales the growth rate into a multiplier, and returns the result as big.Floats. func (gp *GlobalParamsEntry) ComputeFeeTimeBucketMinimumFeeAndMultiplier() ( - _minimumRate *big.Float, _bucketMultiplier *big.Float) { - + _minimumRate *big.Float, + _bucketMultiplier *big.Float, +) { + minimumNetworkFeeNanosPerKB, growthRateBasisPoints := gp.GetFeeTimeBucketMinimumFeeAndGrowthRateBasisPoints() + return minimumNetworkFeeNanosPerKB, ComputeMultiplierFromGrowthRateBasisPoints(growthRateBasisPoints) +} + +// GetFeeTimeBucketMinimumFeeAndGrowthRateBasisPoints returns the the MinimumNetworkFeeNanosPerKB and +// FeeBucketGrowthRateBasisPoints params as returns them as big.Floats. +func (gp *GlobalParamsEntry) GetFeeTimeBucketMinimumFeeAndGrowthRateBasisPoints() ( + _minimumRate *big.Float, + _bucketMultiplier *big.Float, +) { minimumNetworkFeeNanosPerKB := NewFloat().SetUint64(gp.MinimumNetworkFeeNanosPerKB) - feeBucketMultiplier := NewFloat().SetUint64(10000 + gp.FeeBucketGrowthRateBasisPoints) - feeBucketMultiplier.Quo(feeBucketMultiplier, NewFloat().SetUint64(10000)) - return minimumNetworkFeeNanosPerKB, feeBucketMultiplier + growthRateBasisPoints := NewFloat().SetUint64(gp.FeeBucketGrowthRateBasisPoints) + + return minimumNetworkFeeNanosPerKB, growthRateBasisPoints } // This struct holds info on a readers interactions (e.g. likes) with a post. diff --git a/lib/blockchain.go b/lib/blockchain.go index b19cdf554..01772668b 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5010,9 +5010,15 @@ func (bc *Blockchain) CreateMaxSpend( if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() } - // TODO: replace MaxBasisPoints with variables configured by flags. - feeAmountNanos, err = mempool.EstimateFee(txn, minFeeRateNanosPerKB, - MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, maxBlockSizeBytes) + feeAmountNanos, err = mempool.EstimateFee( + txn, + minFeeRateNanosPerKB, + // TODO: Make these flags or GlobalParams + bc.params.MempoolCongestionFactorBasisPoints, + bc.params.MempoolPriorityPercentileBasisPoints, + bc.params.PastBlocksCongestionFactorBasisPoints, + bc.params.PastBlocksPriorityPercentileBasisPoints, + maxBlockSizeBytes) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "CreateMaxSpend: Problem estimating fee: ") } @@ -5147,9 +5153,15 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() } - // TODO: replace MaxBasisPoints with variables configured by flags. - newTxFee, err := mempool.EstimateFee(txArg, minFeeRateNanosPerKB, MaxBasisPoints, - MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, maxBlockSizeBytes) + newTxFee, err := mempool.EstimateFee( + txArg, + minFeeRateNanosPerKB, + // TODO: Make these flags or GlobalParams + bc.params.MempoolCongestionFactorBasisPoints, + bc.params.MempoolPriorityPercentileBasisPoints, + bc.params.PastBlocksCongestionFactorBasisPoints, + bc.params.PastBlocksPriorityPercentileBasisPoints, + maxBlockSizeBytes) UpdateTxnFee(txArg, newTxFee) if err != nil { return 0, 0, 0, 0, errors.Wrapf(err, @@ -5863,7 +5875,15 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( txn.ExtraData[NextAtomicTxnPreHash] = dummyAtomicHashBytes txn.ExtraData[PreviousAtomicTxnPreHash] = dummyAtomicHashBytes newFeeEstimate, err := mempool.EstimateFee( - txn, 0, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, maxBlockSizeBytes) + txn, + // TODO: Allow the caller to specify minFeeRateNanosPerKB + 0, + // TODO: Make these flags or GlobalParams + bc.params.MempoolCongestionFactorBasisPoints, + bc.params.MempoolPriorityPercentileBasisPoints, + bc.params.PastBlocksCongestionFactorBasisPoints, + bc.params.PastBlocksPriorityPercentileBasisPoints, + maxBlockSizeBytes) if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to recompute fee estimate") } @@ -5949,7 +5969,14 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( // Use EstimateFee to set the fee INCLUDING the wrapper. Note that this fee should generally be a bit // higher than the totalFee computed above because the atomic wrapper adds overhead. newFeeEstimate, err := mempool.EstimateFee( - atomicTxn, 0, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, MaxBasisPoints, maxBlockSizeBytes) + atomicTxn, + 0, + // TODO: Make these flags or GlobalParams + bc.params.MempoolCongestionFactorBasisPoints, + bc.params.MempoolPriorityPercentileBasisPoints, + bc.params.PastBlocksCongestionFactorBasisPoints, + bc.params.PastBlocksPriorityPercentileBasisPoints, + maxBlockSizeBytes) if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute "+ "fee on full txn") diff --git a/lib/constants.go b/lib/constants.go index 373d8122e..b6db7de56 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -785,12 +785,13 @@ type DeSoParams struct { // DefaultMempoolFeeEstimatorNumMempoolBlocks is the default value for // GlobalParamsEntry.MempoolFeeEstimatorNumMempoolBlocks. See the comment in GlobalParamsEntry - // for a description of its usage. + // for a description of its usage. Also see the comment on the setting in DeSoMainnetParams DefaultMempoolFeeEstimatorNumMempoolBlocks uint64 // DefaultMempoolFeeEstimatorNumPastBlocks is the default value for // GlobalParamsEntry.MempoolFeeEstimatorNumPastBlocks. See the comment in GlobalParamsEntry - // for a description of its usage. + // for a description of its usage. Also see the comment on the DeSoMainnetParams value of + // this setting. DefaultMempoolFeeEstimatorNumPastBlocks uint64 // DefaultMaxBlockSizeBytesPoS is the default value for GlobalParamsEntry.MaxBlockSizeBytesPoS. @@ -822,6 +823,12 @@ type DeSoParams struct { ForkHeights ForkHeights + // See comment on the DeSoMainnetParams settings of these values + MempoolCongestionFactorBasisPoints uint64 + MempoolPriorityPercentileBasisPoints uint64 + PastBlocksCongestionFactorBasisPoints uint64 + PastBlocksPriorityPercentileBasisPoints uint64 + EncoderMigrationHeights *EncoderMigrationHeights EncoderMigrationHeightsList []*MigrationHeight } @@ -1288,10 +1295,39 @@ var DeSoMainnetParams = DeSoParams{ // The maximum size of the mempool in bytes. DefaultMempoolMaxSizeBytes: 3 * 1024 * 1024 * 1024, // 3GB - // The number of future blocks to consider when estimating the mempool fee. + // The number of future blocks to consider when estimating the mempool fee. Setting this + // value to 1 means we will start to increase fees if the mempool has 1 block's worth of + // txns in it, and decrease them if it has less. Note that a setting of 1 is somewhat + // aggresive, but it's good because it ensures that the typical fee estimate we give will + // be highly likely to get one's transaction included in the next block. + // + // Note that if you are *blasting* txns at the mempool, then having this value set to 1 may + // cause the fee estimator to report a higher and higher fee as you're constructing and + // submitting txns (assuming you are sending txns faster than they are going into blocks). + // This can cause txns that you submit later to have higher fees, which will cause them to + // sort to the *front* of the mempool, potentially causing dependency issues for you. If you + // absolutely need txns to run in a specific order, you have several options: + // + // 1. Query the fee estimator for the fee you should use for your txns *before* you construct + // them, and then construct your txns by explicitly specifying that fee. As long as you use + // the same fee for all of your txns, and as long as you submit them directly to the current leader, + // you should be guaranteed to have them go into the next blocks in order. This is because the + // mempool uses a smart fee bucketing approach, whereby txns that pay similar fees are ordered + // by time (within a fee bucket). Alternatively, you can just set a fee above the minimum + // manually, which will get your txn included in the blocks eventually. At the time of this + // writing, a fee of 1,000 nanos per kb was well above what was needed to get into the next + // block but still quite cheap (1/10,000th of a cent). + // + // 2. Use an atomic txn to submit all of your txns at once. This will ensure that they either + // all go through or all fail together. + // + // 3. Slow down your txn submission to ensure that txns are going into blocks before + // their dependencies are submitted. DefaultMempoolFeeEstimatorNumMempoolBlocks: 1, - // The number of past blocks to consider when estimating the mempool fee. + // The number of past blocks to consider when estimating the mempool fee. This is + // means that we will increase or decrease fees based on the past minute's worth of + // blocks dynamically. DefaultMempoolFeeEstimatorNumPastBlocks: 50, // The maximum size of blocks for PoS. @@ -1315,6 +1351,30 @@ var DeSoMainnetParams = DeSoParams{ // DisableNetworkManagerRoutines is a testing flag that disables the network manager routines. DisableNetworkManagerRoutines: false, + // The congestion factor determines when we will start to increase or decrease fees. + // We set the congestion factor to 90% for past blocks and mempool. This makes it so that we will + // start to increase fees when the past N blocks (DefaultMempoolFeeEstimatorNumPastBlocks) are + // 90% full on average or the mempool has 90% of 1 block's worth of txns in it (actually 90% of + // DefaultMempoolFeeEstimatorNumMempoolBlocks). This is good because it ensures that the typical + // fee estimate we give will be highly likely to get one's transaction included in the next block + // or, at worst, a block within about a minute (for N=50). + // + // Using the 90th percentile allows the fee market to be aggressive, but it's better than using + // 100% because that can have some rounding issues. For example, if you use 100% and blocks are + // 99% full, the fee market won't adapt. So it's better to have a little slack. + MempoolCongestionFactorBasisPoints: uint64(9000), + PastBlocksCongestionFactorBasisPoints: uint64(9000), + // The priority percentile determines what benchmark we use to increase the fee we're paying. For + // past blocks, we set a percentile of 90%, which means we'll take the fee paid by the 90th percentile + // txn in the past N blocks and increase it by one fee bucket. This works nicely with N=50 blocks + // because the 90th percentile will be within 5 blocks if you sorted all txns by their fees. For the + // mempool, we set a percentile of 10%, which means we use the fee paid by the 10th percentile txn in + // the highest 1 block's worth of txns in the mempool. We use a lower percentile here because the mempool + // has a much tighter window of a single block, and so by outbidding *anybody* in that block, you're + // already highly likely to get in. + MempoolPriorityPercentileBasisPoints: uint64(1000), + PastBlocksPriorityPercentileBasisPoints: uint64(9000), + ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&MainnetForkHeights), @@ -1623,6 +1683,12 @@ var DeSoTestnetParams = DeSoParams{ // DisableNetworkManagerRoutines is a testing flag that disables the network manager routines. DisableNetworkManagerRoutines: false, + // See comment on DeSoMainnetParams + MempoolCongestionFactorBasisPoints: uint64(9000), + PastBlocksCongestionFactorBasisPoints: uint64(9000), + MempoolPriorityPercentileBasisPoints: uint64(1000), + PastBlocksPriorityPercentileBasisPoints: uint64(9000), + ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), EncoderMigrationHeightsList: GetEncoderMigrationHeightsList(&TestnetForkHeights), diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 3a89d220d..e1d2b3b27 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -667,26 +667,32 @@ func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactio txnRegister.minimumNetworkFeeNanosPerKB, txnRegister.feeBucketGrowthRateBasisPoints, ) - // If the bucketMinFee is less than or equal to the global min fee rate, we return the global min fee rate. - if bucketMinFee <= globalMinFeeRate { - return globalMinFeeRate - } // Compute the congestion threshold. If our congestion factor is 100% (or 10,000 bps), // then congestion threshold is simply max block size * numPastBlocks - // TODO: I don't know if I like this name really. congestionThreshold := (congestionFactorBasisPoints * maxSizeOfNumBlocks) / MaxBasisPoints // If the total size of the txns in the transaction register is less than the computed congestion threshold, // we return one bucket lower than the Priority fee. if totalTxnsSize <= congestionThreshold { + // When we're below the congestion threshold, we want to suggest one fee bucket *lower* + // than the Priority fee we got in the previous step. This mechanism allows fees to drop + // dynamically during times of low congestion. + if bucketMinFee <= globalMinFeeRate { + // If the Priority fee we got from the previous step is <= the global min, then we *can't* suggest + // a lower fee, so just return the global min. + return globalMinFeeRate + } // Return one bucket lower than Priority fee + feeBucketMultiplier := ComputeMultiplierFromGrowthRateBasisPoints(txnRegister.feeBucketGrowthRateBasisPoints) bucketExponent := computeFeeTimeBucketExponentFromFeeNanosPerKB( - bucketMinFee, txnRegister.minimumNetworkFeeNanosPerKB, txnRegister.feeBucketGrowthRateBasisPoints) + bucketMinFee, txnRegister.minimumNetworkFeeNanosPerKB, feeBucketMultiplier) return computeFeeTimeBucketMinFromExponent( - bucketExponent-1, txnRegister.minimumNetworkFeeNanosPerKB, txnRegister.feeBucketGrowthRateBasisPoints) + bucketExponent-1, txnRegister.minimumNetworkFeeNanosPerKB, feeBucketMultiplier) } - // Otherwise, we return one bucket higher than Priority fee + // Otherwise, if we're above the congestion threshold, we return one bucket higher than + // the Priority fee. This mechanism allows fees to rise dynamically during times of high + // congestion. return bucketMaxFee + 1 } diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index e510e9527..040d5c6ba 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -259,6 +259,9 @@ func (mp *PosMempool) Init( maxValidationViewConnects uint64, transactionValidationRefreshIntervalMillis uint64, ) error { + mp.Lock() + defer mp.Unlock() + if mp.status != PosMempoolStatusNotInitialized { return errors.New("PosMempool.Init: PosMempool already initialized") } @@ -282,6 +285,12 @@ func (mp *PosMempool) Init( mp.recentBlockTxnCache = lru.NewKVCache(100000) // cache 100K latest txns from blocks. mp.recentRejectedTxnCache = lru.NewKVCache(100000) // cache 100K rejected txns. + // Recreate and initialize the transaction register and the nonce tracker. + mp.txnRegister = NewTransactionRegister() + mp.txnRegister.Init(mp.globalParams) + mp.nonceTracker = NewNonceTracker() + + // Initialize the fee estimator err = mp.feeEstimator.Init(mp.txnRegister, feeEstimatorPastBlocks, mp.globalParams) if err != nil { return errors.Wrapf(err, "PosMempool.Start: Problem initializing fee estimator") @@ -298,11 +307,6 @@ func (mp *PosMempool) Start() error { return errors.New("PosMempool.Start: PosMempool not initialized") } - // Create the transaction register, the ledger, and the nonce tracker, - mp.txnRegister = NewTransactionRegister() - mp.txnRegister.Init(mp.globalParams) - mp.nonceTracker = NewNonceTracker() - // Setup the database and create the persister if !mp.inMemoryOnly { mempoolDirectory := filepath.Join(mp.dir, "mempool") @@ -321,11 +325,13 @@ func (mp *PosMempool) Start() error { return errors.Wrapf(err, "PosMempool.Start: Problem loading persisted transactions") } } + mp.startGroup.Add(1) mp.exitGroup.Add(1) mp.startTransactionValidationRoutine() mp.startGroup.Wait() mp.status = PosMempoolStatusRunning + return nil } diff --git a/lib/pos_transaction_register.go b/lib/pos_transaction_register.go index e351a1869..b0b3b59ea 100644 --- a/lib/pos_transaction_register.go +++ b/lib/pos_transaction_register.go @@ -42,7 +42,7 @@ type TransactionRegister struct { func NewTransactionRegister() *TransactionRegister { feeTimeBucketSet := treeset.NewWith(feeTimeBucketComparator) - minimumNetworkFeeNanosPerKB, feeBucketMultiplier := _getFallbackSafeMinimumFeeAndMultiplier() + minimumNetworkFeeNanosPerKB, feeBucketGrowthRateBasisPoints := _getFallbackSafeMinimumFeeAndGrowthRateBasisPoints() return &TransactionRegister{ feeTimeBucketSet: feeTimeBucketSet, feeTimeBucketsByMinFeeMap: make(map[uint64]*FeeTimeBucket), @@ -50,30 +50,36 @@ func NewTransactionRegister() *TransactionRegister { totalTxnsSizeBytes: 0, // Set default values for the uninitialized fields. This is safe because any transactions // added to the register will be re-bucketed once the params are updated. - minimumNetworkFeeNanosPerKB: minimumNetworkFeeNanosPerKB, // Default to 100 nanos per KB - feeBucketGrowthRateBasisPoints: feeBucketMultiplier, // Default to 10% + minimumNetworkFeeNanosPerKB: minimumNetworkFeeNanosPerKB, // Default to 100 nanos per KB + feeBucketGrowthRateBasisPoints: feeBucketGrowthRateBasisPoints, // Default to 10% } } func (tr *TransactionRegister) Init(globalParams *GlobalParamsEntry) { - minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() - if !_isValidMinimumFeeAndMultiplier(minNetworkFee, bucketMultiplier) { - minNetworkFee, bucketMultiplier = _getFallbackSafeMinimumFeeAndMultiplier() + tr.Lock() + defer tr.Unlock() + + minNetworkFee, growthRateBasisPoints := globalParams.GetFeeTimeBucketMinimumFeeAndGrowthRateBasisPoints() + + if !_isValidMinimumFeeAndGrowthRate(minNetworkFee, growthRateBasisPoints) { + minNetworkFee, growthRateBasisPoints = _getFallbackSafeMinimumFeeAndGrowthRateBasisPoints() } + tr.minimumNetworkFeeNanosPerKB = minNetworkFee - tr.feeBucketGrowthRateBasisPoints = bucketMultiplier + tr.feeBucketGrowthRateBasisPoints = growthRateBasisPoints } func (tr *TransactionRegister) HasGlobalParamChange(globalParams *GlobalParamsEntry) bool { tr.RLock() defer tr.RUnlock() - minNetworkFee, bucketMultiplier := globalParams.ComputeFeeTimeBucketMinimumFeeAndMultiplier() - if !_isValidMinimumFeeAndMultiplier(minNetworkFee, bucketMultiplier) { - minNetworkFee, bucketMultiplier = _getFallbackSafeMinimumFeeAndMultiplier() + minNetworkFee, growthRateBasisPoints := globalParams.GetFeeTimeBucketMinimumFeeAndGrowthRateBasisPoints() + + if !_isValidMinimumFeeAndGrowthRate(minNetworkFee, growthRateBasisPoints) { + minNetworkFee, growthRateBasisPoints = _getFallbackSafeMinimumFeeAndGrowthRateBasisPoints() } - return minNetworkFee.Cmp(tr.minimumNetworkFeeNanosPerKB) != 0 || bucketMultiplier.Cmp(tr.feeBucketGrowthRateBasisPoints) != 0 + return minNetworkFee.Cmp(tr.minimumNetworkFeeNanosPerKB) != 0 || growthRateBasisPoints.Cmp(tr.feeBucketGrowthRateBasisPoints) != 0 } func (tr *TransactionRegister) CopyWithNewGlobalParams(globalParams *GlobalParamsEntry) (*TransactionRegister, error) { @@ -627,11 +633,22 @@ func (tb *FeeTimeBucket) Clear() { // Fee-Time Bucket Math //============================================ +func ComputeMultiplierFromGrowthRateBasisPoints(growthRateBasisPoints *big.Float) *big.Float { + return NewFloat().Quo( + NewFloat().Add( + NewFloat().SetUint64(10000), + growthRateBasisPoints, + ), + NewFloat().SetUint64(10000), + ) +} + // computeFeeTimeBucketRangeFromFeeNanosPerKB takes a fee rate, minimumNetworkFeeNanosPerKB, and feeBucketMultiplier, // and returns the [minFeeNanosPerKB, maxFeeNanosPerKB] of the fee range. func computeFeeTimeBucketRangeFromFeeNanosPerKB(feeNanosPerKB uint64, minimumNetworkFeeNanosPerKB *big.Float, - feeBucketMultiplier *big.Float) (uint64, uint64) { + feeBucketGrowthRateBasisPoints *big.Float) (uint64, uint64) { + feeBucketMultiplier := ComputeMultiplierFromGrowthRateBasisPoints(feeBucketGrowthRateBasisPoints) bucketExponent := computeFeeTimeBucketExponentFromFeeNanosPerKB(feeNanosPerKB, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) return computeFeeTimeBucketRangeFromExponent(bucketExponent, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) } @@ -642,7 +659,12 @@ func computeFeeTimeBucketRangeFromExponent(exponent uint32, minimumNetworkFeeNan _minFeeNanosPerKB uint64, _maxFeeNanosPerKB uint64) { minFeeNanosPerKB := computeFeeTimeBucketMinFromExponent(exponent, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) - maxFeeNanosPerKB := computeFeeTimeBucketMinFromExponent(exponent+1, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) - 1 + maxFeeNanosPerKB := computeFeeTimeBucketMinFromExponent(exponent+1, minimumNetworkFeeNanosPerKB, feeBucketMultiplier) + if maxFeeNanosPerKB != minFeeNanosPerKB { + // These two should generally never be equal, and if they are it likely means the fee bucket growth + // rate is too small and the fee bucketing won't work right. But we guard against it just in case. + maxFeeNanosPerKB-- + } return minFeeNanosPerKB, maxFeeNanosPerKB } @@ -707,20 +729,20 @@ func computeFeeTimeBucketExponentFromFeeNanosPerKB(feeNanosPerKB uint64, minimum return feeTimeBucketExponent } -func _isValidMinimumFeeAndMultiplier(minimumNetworkFeeNanosPerKB *big.Float, feeBucketMultiplier *big.Float) bool { - if minimumNetworkFeeNanosPerKB == nil || feeBucketMultiplier == nil { +func _isValidMinimumFeeAndGrowthRate(minimumNetworkFeeNanosPerKB *big.Float, feeBucketGrowthRateBasisPoints *big.Float) bool { + if minimumNetworkFeeNanosPerKB == nil || feeBucketGrowthRateBasisPoints == nil { return false } - if minimumNetworkFeeNanosPerKB.Sign() <= 0 || feeBucketMultiplier.Sign() <= 0 { + if minimumNetworkFeeNanosPerKB.Sign() <= 0 || feeBucketGrowthRateBasisPoints.Sign() <= 0 { return false } return true } -func _getFallbackSafeMinimumFeeAndMultiplier() (*big.Float, *big.Float) { - minimumNetworkFeeNanosPerKB := big.NewFloat(100) // Default to 100 nanos per KB - feeBucketMultiplier := big.NewFloat(1000) // Default to 10% - return minimumNetworkFeeNanosPerKB, feeBucketMultiplier +func _getFallbackSafeMinimumFeeAndGrowthRateBasisPoints() (*big.Float, *big.Float) { + minimumNetworkFeeNanosPerKB := big.NewFloat(100) // Default to 100 nanos per KB + feeBucketGrowthRateBasisPoints := big.NewFloat(1000) // Default to 10% + return minimumNetworkFeeNanosPerKB, feeBucketGrowthRateBasisPoints } From 1d49eeb2ce16c2a7775e12999a7e3a8e349ebbc5 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 22 Apr 2024 14:56:55 -0400 Subject: [PATCH 666/762] Allow specifying min fee rate in atomic txn construction (#1257) --- lib/block_view_atomic_txns_test.go | 56 +++++++++++++++++++++++++----- lib/blockchain.go | 4 +-- 2 files changed, 50 insertions(+), 10 deletions(-) diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index f4daee10b..8ab7afc63 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -28,7 +28,12 @@ func TestAtomicTxnsWrapperTxIndexMetadataEncoder(t *testing.T) { atomicTxns, signerPrivKeysBase58 := _generateUnsignedDependentAtomicTransactions(testMeta, int(100)) // Construct an atomic transaction. - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper( + atomicTxns, + nil, + testMeta.mempool, + testMeta.feeRateNanosPerKb, + ) require.NoError(t, err) // Sign the internal atomic transactions, making the atomic transaction valid. @@ -94,7 +99,12 @@ func TestAtomicTxnsWrapperAtomicity(t *testing.T) { // Wrap the transactions in an atomic wrapper. // NOTE: This must be done before signing to ensure the extra data is present. - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper( + atomicTxns, + nil, + testMeta.mempool, + 0, + ) require.NoError(t, err) // Sign all but the final transaction, sign the last one incorrectly. @@ -176,7 +186,12 @@ func TestAtomicTxnsSignatureFailure(t *testing.T) { nextIndex := (ii + 1) % len(atomicTxns) _signTxn(t, txn, signerPrivKeysBase58[nextIndex]) } - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper( + atomicTxns, + nil, + testMeta.mempool, + 0, + ) require.NoError(t, err) // Try to connect them atomically. @@ -198,7 +213,12 @@ func TestConnectAtomicTxnsWrapperRuleErrors(t *testing.T) { // (This should fail -- RuleErrorTxnTooBig) numTxnsToGenerate := testMeta.params.MaxBlockSizeBytesPoW / 200 atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, int(numTxnsToGenerate)) - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper( + atomicTxns, + nil, + testMeta.mempool, + testMeta.feeRateNanosPerKb, + ) require.NoError(t, err) _, err = _atomicTransactionsWrapperWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, atomicTxnsWrapper, 0) @@ -221,7 +241,12 @@ func TestConnectAtomicTxnsWrapperRuleErrors(t *testing.T) { // Try and cause overflow in the atomic transactions wrapper fee verification. // (This should fail -- RuleErrorOverflowDetectedInFeeRateCalculation) atomicTxns, _ = _generateSignedDependentAtomicTransactions(testMeta, int(100)) - atomicTxnsWrapper, _, err = testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) + atomicTxnsWrapper, _, err = testMeta.chain.CreateAtomicTxnsWrapper( + atomicTxns, + nil, + testMeta.mempool, + testMeta.feeRateNanosPerKb, + ) require.NoError(t, err) atomicTxnsWrapper.TxnFeeNanos = math.MaxUint64 _, err = _atomicTransactionsWrapperWithConnectTimestamp( @@ -247,7 +272,12 @@ func TestVerifyAtomicTxnsWrapperRuleErrors(t *testing.T) { atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, 100) // Bundle the transactions together in a (valid) wrapper. - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper( + atomicTxns, + nil, + testMeta.mempool, + testMeta.feeRateNanosPerKb, + ) require.NoError(t, err) // Try to use a public key other than the zero public key in the wrapper. @@ -331,7 +361,12 @@ func TestVerifyAtomicTxnsChain(t *testing.T) { atomicTxns, _ := _generateSignedDependentAtomicTransactions(testMeta, 100) // Bundle the transactions together in a (valid) wrapper. - atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns, nil, testMeta.mempool) + atomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper( + atomicTxns, + nil, + testMeta.mempool, + testMeta.feeRateNanosPerKb, + ) require.NoError(t, err) // Try to remove all the inner transactions. @@ -348,7 +383,12 @@ func TestVerifyAtomicTxnsChain(t *testing.T) { // (This should fail -- RuleErrorAtomicTxnsHasAtomicTxnsInnerTxn) atomicTxnsWrapperDuplicate, err = atomicTxnsWrapper.Copy() require.NoError(t, err) - innerAtomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper(atomicTxns[:100], nil, testMeta.mempool) + innerAtomicTxnsWrapper, _, err := testMeta.chain.CreateAtomicTxnsWrapper( + atomicTxns[:100], + nil, + testMeta.mempool, + testMeta.feeRateNanosPerKb, + ) require.NoError(t, err) atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns = atomicTxnsWrapperDuplicate.TxnMeta.(*AtomicTxnsWrapperMetadata).Txns[100:] diff --git a/lib/blockchain.go b/lib/blockchain.go index 01772668b..02d4ebf3d 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5831,6 +5831,7 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( unsignedTransactions []*MsgDeSoTxn, extraData map[string][]byte, mempool Mempool, + minFeeRateNanosPerKB uint64, ) ( _txn *MsgDeSoTxn, _fees uint64, @@ -5876,8 +5877,7 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( txn.ExtraData[PreviousAtomicTxnPreHash] = dummyAtomicHashBytes newFeeEstimate, err := mempool.EstimateFee( txn, - // TODO: Allow the caller to specify minFeeRateNanosPerKB - 0, + minFeeRateNanosPerKB, // TODO: Make these flags or GlobalParams bc.params.MempoolCongestionFactorBasisPoints, bc.params.MempoolPriorityPercentileBasisPoints, From 8045c908c726bd0c9e3b3a57fbc77cae526e1b9b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 22 Apr 2024 22:40:00 -0400 Subject: [PATCH 667/762] Add Default Prefix to New Fee Estimation Constant Names (#1256) --- lib/blockchain.go | 32 ++++++++++++++++---------------- lib/constants.go | 36 +++++++++++++++++++++--------------- 2 files changed, 37 insertions(+), 31 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 02d4ebf3d..504b1f1bc 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5014,10 +5014,10 @@ func (bc *Blockchain) CreateMaxSpend( txn, minFeeRateNanosPerKB, // TODO: Make these flags or GlobalParams - bc.params.MempoolCongestionFactorBasisPoints, - bc.params.MempoolPriorityPercentileBasisPoints, - bc.params.PastBlocksCongestionFactorBasisPoints, - bc.params.PastBlocksPriorityPercentileBasisPoints, + bc.params.DefaultMempoolCongestionFactorBasisPoints, + bc.params.DefaultMempoolPriorityPercentileBasisPoints, + bc.params.DefaultMempoolPastBlocksCongestionFactorBasisPoints, + bc.params.DefaultMempoolPastBlocksPriorityPercentileBasisPoints, maxBlockSizeBytes) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "CreateMaxSpend: Problem estimating fee: ") @@ -5157,10 +5157,10 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( txArg, minFeeRateNanosPerKB, // TODO: Make these flags or GlobalParams - bc.params.MempoolCongestionFactorBasisPoints, - bc.params.MempoolPriorityPercentileBasisPoints, - bc.params.PastBlocksCongestionFactorBasisPoints, - bc.params.PastBlocksPriorityPercentileBasisPoints, + bc.params.DefaultMempoolCongestionFactorBasisPoints, + bc.params.DefaultMempoolPriorityPercentileBasisPoints, + bc.params.DefaultMempoolPastBlocksCongestionFactorBasisPoints, + bc.params.DefaultMempoolPastBlocksPriorityPercentileBasisPoints, maxBlockSizeBytes) UpdateTxnFee(txArg, newTxFee) if err != nil { @@ -5879,10 +5879,10 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( txn, minFeeRateNanosPerKB, // TODO: Make these flags or GlobalParams - bc.params.MempoolCongestionFactorBasisPoints, - bc.params.MempoolPriorityPercentileBasisPoints, - bc.params.PastBlocksCongestionFactorBasisPoints, - bc.params.PastBlocksPriorityPercentileBasisPoints, + bc.params.DefaultMempoolCongestionFactorBasisPoints, + bc.params.DefaultMempoolPriorityPercentileBasisPoints, + bc.params.DefaultMempoolPastBlocksCongestionFactorBasisPoints, + bc.params.DefaultMempoolPastBlocksPriorityPercentileBasisPoints, maxBlockSizeBytes) if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to recompute fee estimate") @@ -5972,10 +5972,10 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( atomicTxn, 0, // TODO: Make these flags or GlobalParams - bc.params.MempoolCongestionFactorBasisPoints, - bc.params.MempoolPriorityPercentileBasisPoints, - bc.params.PastBlocksCongestionFactorBasisPoints, - bc.params.PastBlocksPriorityPercentileBasisPoints, + bc.params.DefaultMempoolCongestionFactorBasisPoints, + bc.params.DefaultMempoolPriorityPercentileBasisPoints, + bc.params.DefaultMempoolPastBlocksCongestionFactorBasisPoints, + bc.params.DefaultMempoolPastBlocksPriorityPercentileBasisPoints, maxBlockSizeBytes) if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute "+ diff --git a/lib/constants.go b/lib/constants.go index b6db7de56..eced4d227 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -794,6 +794,18 @@ type DeSoParams struct { // this setting. DefaultMempoolFeeEstimatorNumPastBlocks uint64 + // DefaultMempoolCongestionFactorBasisPoints and DefaultMempoolPastBlocksCongestionFactorBasisPoints are the default values + // for GlobalParams.MempoolCongestionFactorBasisPoints and GlobalParams.DefaultMempoolPastBlocksCongestionFactorBasisPoints. + /// See comments in DeSoMainnetParams for a description of their usage. + DefaultMempoolCongestionFactorBasisPoints uint64 + DefaultMempoolPastBlocksCongestionFactorBasisPoints uint64 + + // DefaultMempoolPriorityPercentileBasisPoints and DefaultMempoolPastBlocksPriorityPercentileBasisPoints are the default values + // for GlobalParams.DefaultMempoolPriorityPercentileBasisPoints and GlobalParams.DefaultMempoolPastBlocksPriorityPercentileBasisPoints. + // See comments in DeSoMainnetParams for a description of their usage. + DefaultMempoolPriorityPercentileBasisPoints uint64 + DefaultMempoolPastBlocksPriorityPercentileBasisPoints uint64 + // DefaultMaxBlockSizeBytesPoS is the default value for GlobalParamsEntry.MaxBlockSizeBytesPoS. // This is the initial value for the maximum block size in bytes that we allow for PoS blocks. DefaultMaxBlockSizeBytesPoS uint64 @@ -823,12 +835,6 @@ type DeSoParams struct { ForkHeights ForkHeights - // See comment on the DeSoMainnetParams settings of these values - MempoolCongestionFactorBasisPoints uint64 - MempoolPriorityPercentileBasisPoints uint64 - PastBlocksCongestionFactorBasisPoints uint64 - PastBlocksPriorityPercentileBasisPoints uint64 - EncoderMigrationHeights *EncoderMigrationHeights EncoderMigrationHeightsList []*MigrationHeight } @@ -1353,7 +1359,7 @@ var DeSoMainnetParams = DeSoParams{ // The congestion factor determines when we will start to increase or decrease fees. // We set the congestion factor to 90% for past blocks and mempool. This makes it so that we will - // start to increase fees when the past N blocks (DefaultMempoolFeeEstimatorNumPastBlocks) are + // start to increase fees when the past N blocks (DefaultMempoolPastBlocksCongestionFactorBasisPoints) are // 90% full on average or the mempool has 90% of 1 block's worth of txns in it (actually 90% of // DefaultMempoolFeeEstimatorNumMempoolBlocks). This is good because it ensures that the typical // fee estimate we give will be highly likely to get one's transaction included in the next block @@ -1362,8 +1368,8 @@ var DeSoMainnetParams = DeSoParams{ // Using the 90th percentile allows the fee market to be aggressive, but it's better than using // 100% because that can have some rounding issues. For example, if you use 100% and blocks are // 99% full, the fee market won't adapt. So it's better to have a little slack. - MempoolCongestionFactorBasisPoints: uint64(9000), - PastBlocksCongestionFactorBasisPoints: uint64(9000), + DefaultMempoolCongestionFactorBasisPoints: uint64(9000), + DefaultMempoolPastBlocksCongestionFactorBasisPoints: uint64(9000), // The priority percentile determines what benchmark we use to increase the fee we're paying. For // past blocks, we set a percentile of 90%, which means we'll take the fee paid by the 90th percentile // txn in the past N blocks and increase it by one fee bucket. This works nicely with N=50 blocks @@ -1372,8 +1378,8 @@ var DeSoMainnetParams = DeSoParams{ // the highest 1 block's worth of txns in the mempool. We use a lower percentile here because the mempool // has a much tighter window of a single block, and so by outbidding *anybody* in that block, you're // already highly likely to get in. - MempoolPriorityPercentileBasisPoints: uint64(1000), - PastBlocksPriorityPercentileBasisPoints: uint64(9000), + DefaultMempoolPriorityPercentileBasisPoints: uint64(1000), + DefaultMempoolPastBlocksPriorityPercentileBasisPoints: uint64(9000), ForkHeights: MainnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&MainnetForkHeights), @@ -1684,10 +1690,10 @@ var DeSoTestnetParams = DeSoParams{ DisableNetworkManagerRoutines: false, // See comment on DeSoMainnetParams - MempoolCongestionFactorBasisPoints: uint64(9000), - PastBlocksCongestionFactorBasisPoints: uint64(9000), - MempoolPriorityPercentileBasisPoints: uint64(1000), - PastBlocksPriorityPercentileBasisPoints: uint64(9000), + DefaultMempoolCongestionFactorBasisPoints: uint64(9000), + DefaultMempoolPastBlocksCongestionFactorBasisPoints: uint64(9000), + DefaultMempoolPriorityPercentileBasisPoints: uint64(1000), + DefaultMempoolPastBlocksPriorityPercentileBasisPoints: uint64(9000), ForkHeights: TestnetForkHeights, EncoderMigrationHeights: GetEncoderMigrationHeights(&TestnetForkHeights), From 617825f0bbaae71d0e4d1aa04ec80a7c33df8012 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 22 Apr 2024 22:41:50 -0400 Subject: [PATCH 668/762] Create Global Params For Fee Estimator Congestion and Priority Params (#1258) Create Global Params For Fee Estimator Congestion and Priority Params Implement encode and decode Rename --- lib/block_view.go | 68 +++++++++++++++++++++++++++++++++++++++++ lib/block_view_types.go | 36 ++++++++++++++++++++++ lib/constants.go | 4 +++ 3 files changed, 108 insertions(+) diff --git a/lib/block_view.go b/lib/block_view.go index 8f3c57098..e38f72dee 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -3491,6 +3491,74 @@ func (bav *UtxoView) _connectUpdateGlobalParams( } newGlobalParamsEntry.MempoolFeeEstimatorNumPastBlocks = val } + if len(extraData[MempoolCongestionFactorBasisPointsKey]) > 0 { + val, bytesRead := Uvarint( + extraData[MempoolCongestionFactorBasisPointsKey], + ) + if val > MaxBasisPoints { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: MempoolCongestionFactorBasisPoints must be <= %d", + MaxBasisPoints, + ) + } + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode MempoolCongestionFactorBasisPoints as uint64", + ) + } + newGlobalParamsEntry.MempoolCongestionFactorBasisPoints = val + } + if len(extraData[MempoolPastBlocksCongestionFactorBasisPointsKey]) > 0 { + val, bytesRead := Uvarint( + extraData[MempoolPastBlocksCongestionFactorBasisPointsKey], + ) + if val > MaxBasisPoints { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: MempoolPastBlocksCongestionFactorBasisPoints must be <= %d", + MaxBasisPoints, + ) + } + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode MempoolPastBlocksCongestionFactorBasisPoints as uint64", + ) + } + newGlobalParamsEntry.MempoolPastBlocksCongestionFactorBasisPoints = val + } + if len(extraData[MempoolPriorityPercentileBasisPointsKey]) > 0 { + val, bytesRead := Uvarint( + extraData[MempoolPriorityPercentileBasisPointsKey], + ) + if val > MaxBasisPoints { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: MempoolPriorityPercentileBasisPoints must be <= %d", + MaxBasisPoints, + ) + } + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode MempoolPriorityPercentileBasisPoints as uint64", + ) + } + newGlobalParamsEntry.MempoolPriorityPercentileBasisPoints = val + } + if len(extraData[MempoolPastBlocksPriorityPercentileBasisPointsKey]) > 0 { + val, bytesRead := Uvarint( + extraData[MempoolPastBlocksPriorityPercentileBasisPointsKey], + ) + if val > MaxBasisPoints { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: MempoolPastBlocksPriorityPercentileBasisPoints must be <= %d", + MaxBasisPoints, + ) + } + if bytesRead <= 0 { + return 0, 0, nil, fmt.Errorf( + "_connectUpdateGlobalParams: unable to decode MempoolPastBlocksPriorityPercentileBasisPoints as uint64", + ) + } + newGlobalParamsEntry.MempoolPastBlocksPriorityPercentileBasisPoints = val + } if len(extraData[MaxBlockSizeBytesPoSKey]) > 0 { val, bytesRead := Uvarint( extraData[MaxBlockSizeBytesPoSKey], diff --git a/lib/block_view_types.go b/lib/block_view_types.go index ebb676dde..9bc0d5fae 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -4236,6 +4236,18 @@ type GlobalParamsEntry struct { // the fee for a new txn. MempoolFeeEstimatorNumPastBlocks uint64 + // DefaultMempoolCongestionFactorBasisPoints and DefaultMempoolPastBlocksCongestionFactorBasisPoints are the default values + // for GlobalParams.MempoolCongestionFactorBasisPoints and GlobalParams.DefaultMempoolPastBlocksCongestionFactorBasisPoints. + /// See comments in GlobalParamsEntry for a description of their usage. + MempoolCongestionFactorBasisPoints uint64 + MempoolPastBlocksCongestionFactorBasisPoints uint64 + + // MempoolPriorityPercentileBasisPoints and MempoolPastBlocksPriorityPercentileBasisPoints are the default values + // for GlobalParams.DefaultMempoolPriorityPercentileBasisPoints and GlobalParams.DefaultMempoolPastBlocksPriorityPercentileBasisPoints. + // See comments in GlobalParamsEntry for a description of their usage. + MempoolPriorityPercentileBasisPoints uint64 + MempoolPastBlocksPriorityPercentileBasisPoints uint64 + // MaxBlockSizeBytesPoS is the maximum size of a block in bytes. MaxBlockSizeBytesPoS uint64 @@ -4275,6 +4287,10 @@ func (gp *GlobalParamsEntry) Copy() *GlobalParamsEntry { MempoolMaxSizeBytes: gp.MempoolMaxSizeBytes, MempoolFeeEstimatorNumMempoolBlocks: gp.MempoolFeeEstimatorNumMempoolBlocks, MempoolFeeEstimatorNumPastBlocks: gp.MempoolFeeEstimatorNumPastBlocks, + MempoolCongestionFactorBasisPoints: gp.MempoolCongestionFactorBasisPoints, + MempoolPastBlocksCongestionFactorBasisPoints: gp.MempoolPastBlocksCongestionFactorBasisPoints, + MempoolPriorityPercentileBasisPoints: gp.MempoolPriorityPercentileBasisPoints, + MempoolPastBlocksPriorityPercentileBasisPoints: gp.MempoolPastBlocksPriorityPercentileBasisPoints, MaxBlockSizeBytesPoS: gp.MaxBlockSizeBytesPoS, SoftMaxBlockSizeBytesPoS: gp.SoftMaxBlockSizeBytesPoS, MaxTxnSizeBytesPoS: gp.MaxTxnSizeBytesPoS, @@ -4309,6 +4325,10 @@ func (gp *GlobalParamsEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMe data = append(data, UintToBuf(gp.MempoolMaxSizeBytes)...) data = append(data, UintToBuf(gp.MempoolFeeEstimatorNumMempoolBlocks)...) data = append(data, UintToBuf(gp.MempoolFeeEstimatorNumPastBlocks)...) + data = append(data, UintToBuf(gp.MempoolCongestionFactorBasisPoints)...) + data = append(data, UintToBuf(gp.MempoolPastBlocksCongestionFactorBasisPoints)...) + data = append(data, UintToBuf(gp.MempoolPriorityPercentileBasisPoints)...) + data = append(data, UintToBuf(gp.MempoolPastBlocksPriorityPercentileBasisPoints)...) data = append(data, UintToBuf(gp.MaxBlockSizeBytesPoS)...) data = append(data, UintToBuf(gp.SoftMaxBlockSizeBytesPoS)...) data = append(data, UintToBuf(gp.MaxTxnSizeBytesPoS)...) @@ -4406,6 +4426,22 @@ func (gp *GlobalParamsEntry) RawDecodeWithoutMetadata(blockHeight uint64, rr *by if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MempoolFeeEstimatorNumPastBlocks") } + gp.MempoolCongestionFactorBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MempoolCongestionFactorBasisPoints") + } + gp.MempoolPastBlocksCongestionFactorBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MempoolPastBlocksCongestionFactorBasisPoints") + } + gp.MempoolPriorityPercentileBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MempoolPriorityPercentileBasisPoints") + } + gp.MempoolPastBlocksPriorityPercentileBasisPoints, err = ReadUvarint(rr) + if err != nil { + return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MempoolPastBlocksPriorityPercentileBasisPoints") + } gp.MaxBlockSizeBytesPoS, err = ReadUvarint(rr) if err != nil { return errors.Wrapf(err, "GlobalParamsEntry.Decode: Problem reading MaxBlockSizeBytesPoS") diff --git a/lib/constants.go b/lib/constants.go index eced4d227..66960a993 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1753,6 +1753,10 @@ const ( MempoolMaxSizeBytesKey = "MempoolMaxSizeBytes" MempoolFeeEstimatorNumMempoolBlocksKey = "MempoolFeeEstimatorNumMempoolBlocks" MempoolFeeEstimatorNumPastBlocksKey = "MempoolFeeEstimatorNumPastBlocks" + MempoolCongestionFactorBasisPointsKey = "MempoolCongestionFactorBasisPoints" + MempoolPastBlocksCongestionFactorBasisPointsKey = "MempoolPastBlocksCongestionFactorBasisPoints" + MempoolPriorityPercentileBasisPointsKey = "MempoolPriorityPercentileBasisPoints" + MempoolPastBlocksPriorityPercentileBasisPointsKey = "MempoolPastBlocksPriorityPercentileBasisPoints" MaxBlockSizeBytesPoSKey = "MaxBlockSizeBytesPoS" SoftMaxBlockSizeBytesPoSKey = "SoftMaxBlockSizeBytesPoS" MaxTxnSizeBytesPoSKey = "MaxTxnSizeBytesPoS" From 99dd7cea0e8feaba79a1b20a81d0974086f95281 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 22 Apr 2024 22:43:38 -0400 Subject: [PATCH 669/762] Use Global Params For Fee Estimator Congestion and Priority Params (#1259) Use Global Params For Fee Estimator Congestion and Priority Params Revert minFeeRate usage in inner atomic txn transaction --- lib/blockchain.go | 40 +++------------------- lib/legacy_mempool.go | 13 ++------ lib/pos_fee_estimator.go | 23 ++++++------- lib/pos_fee_estimator_test.go | 50 ++++++++++++++++++++-------- lib/pos_mempool.go | 45 ++++--------------------- lib/pos_transaction_register_test.go | 6 ++++ 6 files changed, 67 insertions(+), 110 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 504b1f1bc..7bab25320 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5010,15 +5010,7 @@ func (bc *Blockchain) CreateMaxSpend( if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() } - feeAmountNanos, err = mempool.EstimateFee( - txn, - minFeeRateNanosPerKB, - // TODO: Make these flags or GlobalParams - bc.params.DefaultMempoolCongestionFactorBasisPoints, - bc.params.DefaultMempoolPriorityPercentileBasisPoints, - bc.params.DefaultMempoolPastBlocksCongestionFactorBasisPoints, - bc.params.DefaultMempoolPastBlocksPriorityPercentileBasisPoints, - maxBlockSizeBytes) + feeAmountNanos, err = mempool.EstimateFee(txn, minFeeRateNanosPerKB, maxBlockSizeBytes) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "CreateMaxSpend: Problem estimating fee: ") } @@ -5153,15 +5145,7 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() } - newTxFee, err := mempool.EstimateFee( - txArg, - minFeeRateNanosPerKB, - // TODO: Make these flags or GlobalParams - bc.params.DefaultMempoolCongestionFactorBasisPoints, - bc.params.DefaultMempoolPriorityPercentileBasisPoints, - bc.params.DefaultMempoolPastBlocksCongestionFactorBasisPoints, - bc.params.DefaultMempoolPastBlocksPriorityPercentileBasisPoints, - maxBlockSizeBytes) + newTxFee, err := mempool.EstimateFee(txArg, minFeeRateNanosPerKB, maxBlockSizeBytes) UpdateTxnFee(txArg, newTxFee) if err != nil { return 0, 0, 0, 0, errors.Wrapf(err, @@ -5875,15 +5859,7 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( } txn.ExtraData[NextAtomicTxnPreHash] = dummyAtomicHashBytes txn.ExtraData[PreviousAtomicTxnPreHash] = dummyAtomicHashBytes - newFeeEstimate, err := mempool.EstimateFee( - txn, - minFeeRateNanosPerKB, - // TODO: Make these flags or GlobalParams - bc.params.DefaultMempoolCongestionFactorBasisPoints, - bc.params.DefaultMempoolPriorityPercentileBasisPoints, - bc.params.DefaultMempoolPastBlocksCongestionFactorBasisPoints, - bc.params.DefaultMempoolPastBlocksPriorityPercentileBasisPoints, - maxBlockSizeBytes) + newFeeEstimate, err := mempool.EstimateFee(txn, minFeeRateNanosPerKB, maxBlockSizeBytes) if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to recompute fee estimate") } @@ -5968,15 +5944,7 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( // Use EstimateFee to set the fee INCLUDING the wrapper. Note that this fee should generally be a bit // higher than the totalFee computed above because the atomic wrapper adds overhead. - newFeeEstimate, err := mempool.EstimateFee( - atomicTxn, - 0, - // TODO: Make these flags or GlobalParams - bc.params.DefaultMempoolCongestionFactorBasisPoints, - bc.params.DefaultMempoolPriorityPercentileBasisPoints, - bc.params.DefaultMempoolPastBlocksCongestionFactorBasisPoints, - bc.params.DefaultMempoolPastBlocksPriorityPercentileBasisPoints, - maxBlockSizeBytes) + newFeeEstimate, err := mempool.EstimateFee(atomicTxn, 0, maxBlockSizeBytes) if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute "+ "fee on full txn") diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index a9511c08a..8014e5c9f 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2511,19 +2511,12 @@ func EstimateMaxTxnFeeV1(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) uint64 { return feeAmountNanos } -func (mp *DeSoMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, - _ uint64, _ uint64, _ uint64, _ uint64, _ uint64) (uint64, error) { - feeRate := mp.EstimateFeeRate(minFeeRateNanosPerKB, 0, 0, 0, 0, 0) +func (mp *DeSoMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, _ uint64) (uint64, error) { + feeRate := mp.EstimateFeeRate(minFeeRateNanosPerKB, 0) return EstimateMaxTxnFeeV1(txn, feeRate), nil } -func (mp *DeSoMempool) EstimateFeeRate( - minFeeRateNanosPerKB uint64, - _ uint64, - _ uint64, - _ uint64, - _ uint64, - _ uint64) uint64 { +func (mp *DeSoMempool) EstimateFeeRate(minFeeRateNanosPerKB uint64, _ uint64) uint64 { if minFeeRateNanosPerKB < mp.readOnlyUtxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB { return mp.readOnlyUtxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB } diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index e1d2b3b27..aa3b13ab6 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -290,17 +290,15 @@ func (posFeeEstimator *PoSFeeEstimator) pruneBlocksToMaxNumPastBlocks(blocks []* // EstimateFeeRateNanosPerKB estimates the fee rate in nanos per KB for the current mempool // and past blocks using the congestionFactorBasisPoints, priorityPercentileBasisPoints, and // maxBlockSize params. -func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( - minFeeRateNanosPerKB uint64, - mempoolCongestionFactorBasisPoints uint64, - mempoolPriorityPercentileBasisPoints uint64, - pastBlocksCongestionFactorBasisPoints uint64, - pastBlocksPriorityPercentileBasisPoints uint64, - maxBlockSize uint64, -) uint64 { +func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB(minFeeRateNanosPerKB uint64, maxBlockSize uint64) uint64 { posFeeEstimator.rwLock.RLock() defer posFeeEstimator.rwLock.RUnlock() + mempoolCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolCongestionFactorBasisPoints + mempoolPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPriorityPercentileBasisPoints + pastBlocksCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksCongestionFactorBasisPoints + pastBlocksPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksPriorityPercentileBasisPoints + pastBlockFeeRate := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, pastBlocksCongestionFactorBasisPoints, @@ -333,15 +331,16 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB( func (posFeeEstimator *PoSFeeEstimator) EstimateFee( txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, - mempoolCongestionFactorBasisPoints uint64, - mempoolPriorityPercentileBasisPoints uint64, - pastBlocksCongestionFactorBasisPoints uint64, - pastBlocksPriorityPercentileBasisPoints uint64, maxBlockSize uint64, ) (uint64, error) { posFeeEstimator.rwLock.RLock() defer posFeeEstimator.rwLock.RUnlock() + mempoolCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolCongestionFactorBasisPoints + mempoolPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPriorityPercentileBasisPoints + pastBlocksCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksCongestionFactorBasisPoints + pastBlocksPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksPriorityPercentileBasisPoints + mempoolFeeEstimate, err := posFeeEstimator.mempoolFeeEstimate( txn, mempoolCongestionFactorBasisPoints, diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index 4746fc05e..9a8b59909 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -63,7 +63,7 @@ func TestFeeEstimator(t *testing.T) { require.NoError(t, err) validateTxnFee(t, txn, computedFee, baseFeeRate) // Hybrid estimator will also return the base fee rate * number of bytes. - computedFee, err = posFeeEstimator.EstimateFee(txn, 0, 10000, 10000, 1000, 10000, 1000) + computedFee, err = posFeeEstimator.EstimateFee(txn, 0, 1000) require.NoError(t, err) validateTxnFee(t, txn, computedFee, baseFeeRate) @@ -161,10 +161,16 @@ func TestFeeEstimator(t *testing.T) { require.Equal(t, estimatedMempoolFee, estimatedPastBlocksFee) require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) + // Update the global params + globalParams := _testGetDefaultGlobalParams() + globalParams.MempoolCongestionFactorBasisPoints = congestionFactor + globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor + globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints + globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints + posFeeEstimator.UpdateGlobalParams(globalParams) + // And the hybrid estimator is just the max, but for completeness, we check it. - estimatedHybridFee, err = posFeeEstimator.EstimateFee( - txn, 0, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, - maxBlockSizeHybrid) + estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0, maxBlockSizeHybrid) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) @@ -206,10 +212,16 @@ func TestFeeEstimator(t *testing.T) { require.Equal(t, estimatedMempoolFee, estimatedPastBlocksFee) require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) + // Update the global params + globalParams := _testGetDefaultGlobalParams() + globalParams.MempoolCongestionFactorBasisPoints = congestionFactor + globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor + globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints + globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints + posFeeEstimator.UpdateGlobalParams(globalParams) + // And the hybrid estimator is just the max, but for completeness, we check it. - estimatedHybridFee, err = posFeeEstimator.EstimateFee( - txn, 0, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, - maxBlockSizeHybrid) + estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0, maxBlockSizeHybrid) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) @@ -251,10 +263,16 @@ func TestFeeEstimator(t *testing.T) { require.Equal(t, estimatedMempoolFee, estimatedPastBlocksFee) require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) + // Update the global params + globalParams := _testGetDefaultGlobalParams() + globalParams.MempoolCongestionFactorBasisPoints = congestionFactor + globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor + globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints + globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints + posFeeEstimator.UpdateGlobalParams(globalParams) + // And the hybrid estimator is just the max, but for completeness, we check it. - estimatedHybridFee, err = posFeeEstimator.EstimateFee( - txn, 0, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, - maxBlockSizeHybrid) + estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0, maxBlockSizeHybrid) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) @@ -296,10 +314,16 @@ func TestFeeEstimator(t *testing.T) { require.Equal(t, estimatedMempoolFee, estimatedPastBlocksFee) require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) + // Update the global params + globalParams := _testGetDefaultGlobalParams() + globalParams.MempoolCongestionFactorBasisPoints = congestionFactor + globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor + globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints + globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints + posFeeEstimator.UpdateGlobalParams(globalParams) + // And the hybrid estimator is just the max, but for completeness, we check it. - estimatedHybridFee, err = posFeeEstimator.EstimateFee( - txn, 0, congestionFactor, priorityPercentileBasisPoints, congestionFactor, priorityPercentileBasisPoints, - maxBlockSizeHybrid) + estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0, maxBlockSizeHybrid) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 040d5c6ba..02bd3dc0b 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -43,23 +43,8 @@ type Mempool interface { GetMempoolTipBlockHeight() uint64 GetMempoolTx(txHash *BlockHash) *MempoolTx GetMempoolSummaryStats() map[string]*SummaryStats - EstimateFee( - txn *MsgDeSoTxn, - minFeeRateNanosPerKB uint64, - mempoolCongestionFactorBasisPoints uint64, - mempoolPriorityPercentileBasisPoints uint64, - pastBlocksCongestionFactorBasisPoints uint64, - pastBlocksPriorityPercentileBasisPoints uint64, - maxBlockSize uint64, - ) (uint64, error) - EstimateFeeRate( - minFeeRateNanosPerKB uint64, - mempoolCongestionFactorBasisPoints uint64, - mempoolPriorityPercentileBasisPoints uint64, - pastBlocksCongestionFactorBasisPoints uint64, - pastBlocksPriorityPercentileBasisPoints uint64, - maxBlockSize uint64, - ) uint64 + EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, maxBlockSize uint64) (uint64, error) + EstimateFeeRate(minFeeRateNanosPerKB uint64, maxBlockSize uint64) uint64 } // GetAugmentedUniversalViewWithAdditionalTransactions is meant as a helper function @@ -1133,28 +1118,10 @@ func (mp *PosMempool) GetMempoolSummaryStats() map[string]*SummaryStats { return convertMempoolTxsToSummaryStats(mp.txnRegister.GetFeeTimeTransactions()) } -func (mp *PosMempool) EstimateFee( - txn *MsgDeSoTxn, - minFeeRateNanosPerKB uint64, - mempoolCongestionFactorBasisPoints uint64, - mempoolPriorityPercentileBasisPoints uint64, - pastBlocksCongestionFactorBasisPoints uint64, - pastBlocksPriorityPercentileBasisPoints uint64, - maxBlockSize uint64, -) (uint64, error) { - return mp.feeEstimator.EstimateFee( - txn, minFeeRateNanosPerKB, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, - pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, maxBlockSize) +func (mp *PosMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, maxBlockSize uint64) (uint64, error) { + return mp.feeEstimator.EstimateFee(txn, minFeeRateNanosPerKB, maxBlockSize) } -func (mp *PosMempool) EstimateFeeRate( - minFeeRateNanosPerKB uint64, - mempoolCongestionFactorBasisPoints uint64, - mempoolPriorityPercentileBasisPoints uint64, - pastBlocksCongestionFactorBasisPoints uint64, - pastBlocksPriorityPercentileBasisPoints uint64, - maxBlockSize uint64) uint64 { - return mp.feeEstimator.EstimateFeeRateNanosPerKB( - minFeeRateNanosPerKB, mempoolCongestionFactorBasisPoints, mempoolPriorityPercentileBasisPoints, - pastBlocksCongestionFactorBasisPoints, pastBlocksPriorityPercentileBasisPoints, maxBlockSize) +func (mp *PosMempool) EstimateFeeRate(minFeeRateNanosPerKB uint64, maxBlockSize uint64) uint64 { + return mp.feeEstimator.EstimateFeeRateNanosPerKB(minFeeRateNanosPerKB, maxBlockSize) } diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index 1fe85d311..f155ad118 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -374,6 +374,12 @@ func _testGetDefaultGlobalParams() *GlobalParamsEntry { globalParams.MempoolFeeEstimatorNumMempoolBlocks = 1 globalParams.MempoolFeeEstimatorNumPastBlocks = 1 + globalParams.MempoolCongestionFactorBasisPoints = 10000 + globalParams.MempoolPastBlocksCongestionFactorBasisPoints = 10000 + + globalParams.MempoolPriorityPercentileBasisPoints = 10000 + globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = 10000 + return &globalParams } From da73e345bc578e9bc98bd3f21316b5d85b5385b5 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 22 Apr 2024 22:45:49 -0400 Subject: [PATCH 670/762] Read SoftMaxBlockSizeBytesPoS From Internal Fee Estimator GlobalParams (#1260) Read SoftMaxBlockSizeBytesPoS From Internal Fee Estimator GlobalParams Fix merge Use correct param --- lib/blockchain.go | 24 ++++-------------------- lib/legacy_mempool.go | 6 +++--- lib/pos_fee_estimator.go | 12 ++++++------ lib/pos_fee_estimator_test.go | 14 +++++++++----- lib/pos_mempool.go | 12 ++++++------ lib/pos_transaction_register_test.go | 2 ++ 6 files changed, 30 insertions(+), 40 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 7bab25320..eda35c213 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5006,11 +5006,7 @@ func (bc *Blockchain) CreateMaxSpend( for feeAmountNanos == 0 || feeAmountNanos != prevFeeAmountNanos { prevFeeAmountNanos = feeAmountNanos if !isInterfaceValueNil(mempool) { - maxBlockSizeBytes := bc.params.MaxBlockSizeBytesPoW - if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { - maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() - } - feeAmountNanos, err = mempool.EstimateFee(txn, minFeeRateNanosPerKB, maxBlockSizeBytes) + feeAmountNanos, err = mempool.EstimateFee(txn, minFeeRateNanosPerKB) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "CreateMaxSpend: Problem estimating fee: ") } @@ -5141,11 +5137,7 @@ func (bc *Blockchain) AddInputsAndChangeToTransactionWithSubsidy( if txArg.TxnMeta.GetTxnType() != TxnTypeBlockReward { if !isInterfaceValueNil(mempool) { - maxBlockSizeBytes := bc.params.MaxBlockSizeBytesPoW - if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { - maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() - } - newTxFee, err := mempool.EstimateFee(txArg, minFeeRateNanosPerKB, maxBlockSizeBytes) + newTxFee, err := mempool.EstimateFee(txArg, minFeeRateNanosPerKB) UpdateTxnFee(txArg, newTxFee) if err != nil { return 0, 0, 0, 0, errors.Wrapf(err, @@ -5842,14 +5834,6 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( } chainedUnsignedTransactions[0].ExtraData[AtomicTxnsChainLength] = UintToBuf(uint64(len(unsignedTransactions))) - utxoView, err := bc.GetUncommittedTipView() - if err != nil { - return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to get uncommitted tip view") - } - maxBlockSizeBytes := bc.params.MaxBlockSizeBytesPoW - if bc.params.IsPoSBlockHeight(uint64(bc.BlockTip().Height)) { - maxBlockSizeBytes = utxoView.GetSoftMaxBlockSizeBytesPoS() - } // First iterate over the transactions, giving them a dummy value for the atomic hash and update the fee nanos. // If the newly computed fee nanos is less than the original fee nanos, we do not update the fees. dummyAtomicHashBytes := RandomBytes(32) @@ -5859,7 +5843,7 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( } txn.ExtraData[NextAtomicTxnPreHash] = dummyAtomicHashBytes txn.ExtraData[PreviousAtomicTxnPreHash] = dummyAtomicHashBytes - newFeeEstimate, err := mempool.EstimateFee(txn, minFeeRateNanosPerKB, maxBlockSizeBytes) + newFeeEstimate, err := mempool.EstimateFee(txn, minFeeRateNanosPerKB) if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to recompute fee estimate") } @@ -5944,7 +5928,7 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( // Use EstimateFee to set the fee INCLUDING the wrapper. Note that this fee should generally be a bit // higher than the totalFee computed above because the atomic wrapper adds overhead. - newFeeEstimate, err := mempool.EstimateFee(atomicTxn, 0, maxBlockSizeBytes) + newFeeEstimate, err := mempool.EstimateFee(atomicTxn, 0) if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute "+ "fee on full txn") diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 8014e5c9f..f60edd9a1 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2511,12 +2511,12 @@ func EstimateMaxTxnFeeV1(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) uint64 { return feeAmountNanos } -func (mp *DeSoMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, _ uint64) (uint64, error) { - feeRate := mp.EstimateFeeRate(minFeeRateNanosPerKB, 0) +func (mp *DeSoMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) (uint64, error) { + feeRate := mp.EstimateFeeRate(minFeeRateNanosPerKB) return EstimateMaxTxnFeeV1(txn, feeRate), nil } -func (mp *DeSoMempool) EstimateFeeRate(minFeeRateNanosPerKB uint64, _ uint64) uint64 { +func (mp *DeSoMempool) EstimateFeeRate(minFeeRateNanosPerKB uint64) uint64 { if minFeeRateNanosPerKB < mp.readOnlyUtxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB { return mp.readOnlyUtxoView.GetCurrentGlobalParamsEntry().MinimumNetworkFeeNanosPerKB } diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index aa3b13ab6..90b4c98ef 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -290,7 +290,7 @@ func (posFeeEstimator *PoSFeeEstimator) pruneBlocksToMaxNumPastBlocks(blocks []* // EstimateFeeRateNanosPerKB estimates the fee rate in nanos per KB for the current mempool // and past blocks using the congestionFactorBasisPoints, priorityPercentileBasisPoints, and // maxBlockSize params. -func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB(minFeeRateNanosPerKB uint64, maxBlockSize uint64) uint64 { +func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB(minFeeRateNanosPerKB uint64) uint64 { posFeeEstimator.rwLock.RLock() defer posFeeEstimator.rwLock.RUnlock() @@ -299,6 +299,8 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB(minFeeRateNano pastBlocksCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksCongestionFactorBasisPoints pastBlocksPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksPriorityPercentileBasisPoints + maxBlockSize := posFeeEstimator.globalParams.SoftMaxBlockSizeBytesPoS + pastBlockFeeRate := posFeeEstimator.estimateFeeRateNanosPerKBGivenTransactionRegister( posFeeEstimator.pastBlocksTransactionRegister, pastBlocksCongestionFactorBasisPoints, @@ -328,11 +330,7 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB(minFeeRateNano // EstimateFee estimates the fee in nanos for the provided transaction by taking the // max of the mempoolFeeEstimate and pastBlocksFeeEstimate. -func (posFeeEstimator *PoSFeeEstimator) EstimateFee( - txn *MsgDeSoTxn, - minFeeRateNanosPerKB uint64, - maxBlockSize uint64, -) (uint64, error) { +func (posFeeEstimator *PoSFeeEstimator) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) (uint64, error) { posFeeEstimator.rwLock.RLock() defer posFeeEstimator.rwLock.RUnlock() @@ -341,6 +339,8 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFee( pastBlocksCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksCongestionFactorBasisPoints pastBlocksPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksPriorityPercentileBasisPoints + maxBlockSize := posFeeEstimator.globalParams.SoftMaxBlockSizeBytesPoS + mempoolFeeEstimate, err := posFeeEstimator.mempoolFeeEstimate( txn, mempoolCongestionFactorBasisPoints, diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index 9a8b59909..b3596e4ae 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -63,7 +63,7 @@ func TestFeeEstimator(t *testing.T) { require.NoError(t, err) validateTxnFee(t, txn, computedFee, baseFeeRate) // Hybrid estimator will also return the base fee rate * number of bytes. - computedFee, err = posFeeEstimator.EstimateFee(txn, 0, 1000) + computedFee, err = posFeeEstimator.EstimateFee(txn, 0) require.NoError(t, err) validateTxnFee(t, txn, computedFee, baseFeeRate) @@ -167,10 +167,11 @@ func TestFeeEstimator(t *testing.T) { globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints + globalParams.SoftMaxBlockSizeBytesPoS = maxBlockSizeHybrid posFeeEstimator.UpdateGlobalParams(globalParams) // And the hybrid estimator is just the max, but for completeness, we check it. - estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0, maxBlockSizeHybrid) + estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) @@ -218,10 +219,11 @@ func TestFeeEstimator(t *testing.T) { globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints + globalParams.SoftMaxBlockSizeBytesPoS = maxBlockSizeHybrid posFeeEstimator.UpdateGlobalParams(globalParams) // And the hybrid estimator is just the max, but for completeness, we check it. - estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0, maxBlockSizeHybrid) + estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) @@ -269,10 +271,11 @@ func TestFeeEstimator(t *testing.T) { globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints + globalParams.SoftMaxBlockSizeBytesPoS = maxBlockSizeHybrid posFeeEstimator.UpdateGlobalParams(globalParams) // And the hybrid estimator is just the max, but for completeness, we check it. - estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0, maxBlockSizeHybrid) + estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) @@ -320,10 +323,11 @@ func TestFeeEstimator(t *testing.T) { globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints + globalParams.SoftMaxBlockSizeBytesPoS = maxBlockSizeHybrid posFeeEstimator.UpdateGlobalParams(globalParams) // And the hybrid estimator is just the max, but for completeness, we check it. - estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0, maxBlockSizeHybrid) + estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0) require.NoError(t, err) require.Equal(t, estimatedMempoolFee, estimatedHybridFee) require.Equal(t, estimatedPastBlocksFee, estimatedHybridFee) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 02bd3dc0b..58360181c 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -43,8 +43,8 @@ type Mempool interface { GetMempoolTipBlockHeight() uint64 GetMempoolTx(txHash *BlockHash) *MempoolTx GetMempoolSummaryStats() map[string]*SummaryStats - EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, maxBlockSize uint64) (uint64, error) - EstimateFeeRate(minFeeRateNanosPerKB uint64, maxBlockSize uint64) uint64 + EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) (uint64, error) + EstimateFeeRate(minFeeRateNanosPerKB uint64) uint64 } // GetAugmentedUniversalViewWithAdditionalTransactions is meant as a helper function @@ -1118,10 +1118,10 @@ func (mp *PosMempool) GetMempoolSummaryStats() map[string]*SummaryStats { return convertMempoolTxsToSummaryStats(mp.txnRegister.GetFeeTimeTransactions()) } -func (mp *PosMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64, maxBlockSize uint64) (uint64, error) { - return mp.feeEstimator.EstimateFee(txn, minFeeRateNanosPerKB, maxBlockSize) +func (mp *PosMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) (uint64, error) { + return mp.feeEstimator.EstimateFee(txn, minFeeRateNanosPerKB) } -func (mp *PosMempool) EstimateFeeRate(minFeeRateNanosPerKB uint64, maxBlockSize uint64) uint64 { - return mp.feeEstimator.EstimateFeeRateNanosPerKB(minFeeRateNanosPerKB, maxBlockSize) +func (mp *PosMempool) EstimateFeeRate(minFeeRateNanosPerKB uint64) uint64 { + return mp.feeEstimator.EstimateFeeRateNanosPerKB(minFeeRateNanosPerKB) } diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index f155ad118..f05f9a7fd 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -380,6 +380,8 @@ func _testGetDefaultGlobalParams() *GlobalParamsEntry { globalParams.MempoolPriorityPercentileBasisPoints = 10000 globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = 10000 + globalParams.SoftMaxBlockSizeBytesPoS = 1000 + return &globalParams } From afd3a2190585fa098e6fa609993959ba47c80b77 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 22 Apr 2024 23:01:57 -0400 Subject: [PATCH 671/762] Set min fee rate on create atomic txns wrapper and fix comment (#1261) --- lib/blockchain.go | 12 +++++++++--- lib/constants.go | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index eda35c213..ee3cf1fc3 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -5928,7 +5928,7 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( // Use EstimateFee to set the fee INCLUDING the wrapper. Note that this fee should generally be a bit // higher than the totalFee computed above because the atomic wrapper adds overhead. - newFeeEstimate, err := mempool.EstimateFee(atomicTxn, 0) + newFeeEstimate, err := mempool.EstimateFee(atomicTxn, minFeeRateNanosPerKB) if err != nil { return nil, 0, errors.Wrapf(err, "CreateAtomicTxnsWrapper: failed to compute "+ "fee on full txn") @@ -5937,7 +5937,12 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( // that we computed by summing all the fees on the inner txns, which is computed // by previousFeeEstimate. if newFeeEstimate <= previousFeeEstimate { - return atomicTxn, newFeeEstimate, nil + // We explicitly set the fee on the atomic txn to the fee we computed + // before so that it matches the sum of the fees on the inner txns + // in the event that the fee we computed is less than the sum of the + // fees on the inner txns. + atomicTxn.TxnFeeNanos = previousFeeEstimate + return atomicTxn, previousFeeEstimate, nil } // If the fees we currently have set in all of our txns come up short, then // add the extra we need to the first txn. After we do this, we also need to @@ -5946,6 +5951,7 @@ func (bc *Blockchain) CreateAtomicTxnsWrapper( feeDelta := newFeeEstimate - previousFeeEstimate UpdateTxnFee( chainedUnsignedTransactions[0], - chainedUnsignedTransactions[0].TxnFeeNanos+feeDelta) + chainedUnsignedTransactions[0].TxnFeeNanos+feeDelta, + ) } } diff --git a/lib/constants.go b/lib/constants.go index 66960a993..b79a70885 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1359,7 +1359,7 @@ var DeSoMainnetParams = DeSoParams{ // The congestion factor determines when we will start to increase or decrease fees. // We set the congestion factor to 90% for past blocks and mempool. This makes it so that we will - // start to increase fees when the past N blocks (DefaultMempoolPastBlocksCongestionFactorBasisPoints) are + // start to increase fees when the past N blocks (DefaultMempoolFeeEstimatorNumPastBlocks) are // 90% full on average or the mempool has 90% of 1 block's worth of txns in it (actually 90% of // DefaultMempoolFeeEstimatorNumMempoolBlocks). This is good because it ensures that the typical // fee estimate we give will be highly likely to get one's transaction included in the next block From 8893e58a6472b460887ca410892fb4e442f8ce9e Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 22 Apr 2024 23:59:37 -0400 Subject: [PATCH 672/762] Fix nil arg to hash to big int, stop old block producer after cutover (#1262) --- lib/block_producer.go | 9 +++++++++ lib/blockchain.go | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/lib/block_producer.go b/lib/block_producer.go index fd48e3840..fc09daf9b 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -652,6 +652,15 @@ func (desoBlockProducer *DeSoBlockProducer) Start() { return } + // Stop the block producer if we're past the pos cutover or if the tip block is the last pow block. + blockHeight := uint64(desoBlockProducer.chain.blockTip().Height) + if blockHeight >= desoBlockProducer.params.GetFinalPoWBlockHeight() { + desoBlockProducer.Stop() + glog.V(1).Infof("DeSoBlockProducer.Start() Stopping block producer because we're past the PoS cutover" + + " or the last PoW block.") + return + } + secondsLeft := float64(desoBlockProducer.minBlockUpdateIntervalSeconds) - time.Since(lastBlockUpdate).Seconds() glog.V(1).Infof("DeSoBlockProducer.Start(): timings for next run: %v %v %v", float64(desoBlockProducer.minBlockUpdateIntervalSeconds), time.Since(lastBlockUpdate).Seconds(), secondsLeft) diff --git a/lib/blockchain.go b/lib/blockchain.go index ee3cf1fc3..69b70aaf7 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -436,6 +436,10 @@ func CalcNextDifficultyTarget( clippedTimeDiffSecs = maxRetargetTimeSecs } + if lastNode.DifficultyTarget == nil { + return nil, fmt.Errorf("CalcNextDifficultyTarget: Difficulty target for last node is nil") + } + numerator := new(big.Int).Mul( HashToBigint(lastNode.DifficultyTarget), big.NewInt(clippedTimeDiffSecs)) From 67bbacfb84bd4014e059206fb973b971e4ca659f Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 23 Apr 2024 09:40:00 -0400 Subject: [PATCH 673/762] Set PoS Testnet Fork Heights (DRAFT) (#1254) --- lib/constants.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index b79a70885..363458262 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1464,14 +1464,14 @@ var TestnetForkHeights = ForkHeights{ // Tues May 23 2023 @ 9am PT BlockRewardPatchBlockHeight: uint32(729753), - // FIXME: set to real block height when ready - ProofOfStake1StateSetupBlockHeight: uint32(math.MaxUint32), + // Wed May 1 2024 @ 12pm PT + ProofOfStake1StateSetupBlockHeight: uint32(1113866), - // FIXME: set to real block height when ready - ProofOfStake2ConsensusCutoverBlockHeight: uint32(math.MaxUint32), + // Mon May 6 2024 @ 12pm PT + ProofOfStake2ConsensusCutoverBlockHeight: uint32(1119482), - // FIXME: set to real block height when ready - LockupsBlockHeight: uint32(math.MaxUint32), + // Wed May 1 2024 @ 12pm PT + LockupsBlockHeight: uint32(1113866), // Be sure to update EncoderMigrationHeights as well via // GetEncoderMigrationHeights if you're modifying schema. From c000c7d33a63111229cdac80e602ae7f0a8d1863 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 23 Apr 2024 13:22:16 -0400 Subject: [PATCH 674/762] Send Requested HeaderBundle Even When Syncing (#1263) --- lib/server.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lib/server.go b/lib/server.go index c1deeaa81..0a72e3d73 100644 --- a/lib/server.go +++ b/lib/server.go @@ -713,14 +713,6 @@ func (srv *Server) _handleGetHeaders(pp *Peer, msg *MsgDeSoGetHeaders) { glog.V(1).Infof("Server._handleGetHeadersMessage: called with locator: (%v), "+ "stopHash: (%v) from Peer %v", msg.BlockLocator, msg.StopHash, pp) - // Ignore GetHeaders requests we're still syncing. - if srv.blockchain.isSyncing() { - chainState := srv.blockchain.chainState() - glog.V(1).Infof("Server._handleGetHeadersMessage: Ignoring GetHeaders from Peer %v"+ - "because node is syncing with ChainState (%v)", pp, chainState) - return - } - // Find the most recent known block in the best block chain based // on the block locator and fetch all of the headers after it until either // MaxHeadersPerMsg have been fetched or the provided stop From 70cca57047f8f38f6eec5b7fbc88a7584850334e Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 23 Apr 2024 15:01:45 -0400 Subject: [PATCH 675/762] Bump transaction validation interval in TestGetBlockTransactions (#1265) --- lib/pos_block_producer_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index a3430fc23..c4cf3c3e6 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -202,7 +202,7 @@ func TestGetBlockTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 10000, 100000, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -277,7 +277,7 @@ func TestGetBlockTransactions(t *testing.T) { testMempool := NewPosMempool() testMempool.Init( params, globalParams, latestBlockView, 2, "", true, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + nil, 1, 10000, 100000, ) require.NoError(testMempool.Start()) defer testMempool.Stop() From 87f4299ccc566f33d1990042f36d19828a43e67c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 23 Apr 2024 15:01:59 -0400 Subject: [PATCH 676/762] Remove Sync Peer Disconnect When Header Tip Is Not Current After Syncing Headers (#1264) --- lib/server.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/lib/server.go b/lib/server.go index 0a72e3d73..3ce1f07f6 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1117,19 +1117,6 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { maxHeadersPerMsg = MaxHeadersPerMsgPos } if uint32(len(msg.Headers)) < maxHeadersPerMsg || srv.blockchain.isTipMaxed(srv.blockchain.headerTip()) { - // If we have exhausted the peer's headers but our header chain still isn't - // current it means the peer we chose isn't current either. So disconnect - // from her and try to sync with someone else. - if srv.blockchain.chainState() == SyncStateSyncingHeaders { - glog.V(1).Infof("Server._handleHeaderBundle: Disconnecting from peer %v because "+ - "we have exhausted their headers but our tip is still only "+ - "at time=%v height=%d", pp, - time.Unix(int64(srv.blockchain.headerTip().Header.GetTstampSecs()), 0), - srv.blockchain.headerTip().Header.Height) - pp.Disconnect() - return - } - // If we get here it means that we've just finished syncing headers and we will proceed to // syncing state either through hyper sync or block sync. First let's check if the peer // supports hypersync and if our block tip is old enough so that it makes sense to sync state. From 9d024d0456048a264d90054357299a7004db95ec Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 23 Apr 2024 15:53:38 -0400 Subject: [PATCH 677/762] Start FastHotStuffConsensus If Sync Peers are Not Eligible To Sync From (#1266) --- lib/server.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/lib/server.go b/lib/server.go index 3ce1f07f6..84591bab3 100644 --- a/lib/server.go +++ b/lib/server.go @@ -3021,8 +3021,8 @@ func (srv *Server) getFastHotStuffConsensusEventChannel() chan *consensus.FastHo } func (srv *Server) resetFastHotStuffConsensusTransitionCheckTime() { - // Check once every 30 seconds if the FastHotStuffConsensus is ready to start. - srv.fastHotStuffConsensusTransitionCheckTime = time.Now().Add(30 * time.Second) + // Check once every 60 seconds if the FastHotStuffConsensus is ready to start. + srv.fastHotStuffConsensusTransitionCheckTime = time.Now().Add(60 * time.Second) } func (srv *Server) getFastHotStuffTransitionCheckTime() <-chan time.Time { @@ -3077,13 +3077,6 @@ func (srv *Server) tryTransitionToFastHotStuffConsensus() { return } - // If we have at least one sync peer configured but are not connected to any sync peers, then it - // means that we are still in the process of connecting to a sync peer. We can exit early and wait - // for the network manager to connect to a sync peer. - if len(srv.networkManager.connectIps) != 0 && srv.SyncPeer == nil { - return - } - // If we have a sync peer and have not reached the sync peer's starting block height, then // we should sync all remaining blocks from the sync peer before transitioning to the // FastHotStuffConsensus. From d125bc71cd38ec1c1690a979f9a2c43dd9223005 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 24 Apr 2024 17:54:02 -0400 Subject: [PATCH 678/762] Fix MergeGlobalParamEntryDefaults For Fee Estimator (#1267) --- lib/pos_fee_estimator.go | 6 ++++-- lib/pos_snapshot_entries.go | 12 ++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 90b4c98ef..1d31bded2 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -295,8 +295,9 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFeeRateNanosPerKB(minFeeRateNano defer posFeeEstimator.rwLock.RUnlock() mempoolCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolCongestionFactorBasisPoints - mempoolPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPriorityPercentileBasisPoints pastBlocksCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksCongestionFactorBasisPoints + + mempoolPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPriorityPercentileBasisPoints pastBlocksPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksPriorityPercentileBasisPoints maxBlockSize := posFeeEstimator.globalParams.SoftMaxBlockSizeBytesPoS @@ -335,8 +336,9 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFee(txn *MsgDeSoTxn, minFeeRateN defer posFeeEstimator.rwLock.RUnlock() mempoolCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolCongestionFactorBasisPoints - mempoolPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPriorityPercentileBasisPoints pastBlocksCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksCongestionFactorBasisPoints + + mempoolPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPriorityPercentileBasisPoints pastBlocksPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksPriorityPercentileBasisPoints maxBlockSize := posFeeEstimator.globalParams.SoftMaxBlockSizeBytesPoS diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index e6052f736..f49093a08 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -157,6 +157,18 @@ func MergeGlobalParamEntryDefaults(globalParamsEntry *GlobalParamsEntry, params if globalParamsEntryCopy.MempoolFeeEstimatorNumPastBlocks == 0 { globalParamsEntryCopy.MempoolFeeEstimatorNumPastBlocks = params.DefaultMempoolFeeEstimatorNumPastBlocks } + if globalParamsEntryCopy.MempoolCongestionFactorBasisPoints == 0 { + globalParamsEntryCopy.MempoolCongestionFactorBasisPoints = params.DefaultMempoolCongestionFactorBasisPoints + } + if globalParamsEntryCopy.MempoolPastBlocksCongestionFactorBasisPoints == 0 { + globalParamsEntryCopy.MempoolPastBlocksCongestionFactorBasisPoints = params.DefaultMempoolPastBlocksCongestionFactorBasisPoints + } + if globalParamsEntryCopy.MempoolPriorityPercentileBasisPoints == 0 { + globalParamsEntryCopy.MempoolPriorityPercentileBasisPoints = params.DefaultMempoolPriorityPercentileBasisPoints + } + if globalParamsEntryCopy.MempoolPastBlocksPriorityPercentileBasisPoints == 0 { + globalParamsEntryCopy.MempoolPastBlocksPriorityPercentileBasisPoints = params.DefaultMempoolPastBlocksPriorityPercentileBasisPoints + } if globalParamsEntryCopy.MaxBlockSizeBytesPoS == 0 { globalParamsEntryCopy.MaxBlockSizeBytesPoS = params.DefaultMaxBlockSizeBytesPoS } From 98cbb3570c387448a3175a0b43e9150728c53aa2 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 25 Apr 2024 11:50:20 -0400 Subject: [PATCH 679/762] Fix SyncPeer Refresh When Current Sync Peer Disconnects (#1268) --- lib/server.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/server.go b/lib/server.go index 84591bab3..758e28523 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1924,10 +1924,11 @@ func (srv *Server) _handleDisconnectedPeerMessage(pp *Peer) { srv._cleanupDonePeerState(pp) - // Attempt to find a new peer to sync from if the quitting peer is the - // sync peer and if our blockchain isn't current. - if srv.SyncPeer == pp && srv.blockchain.isSyncing() { - + // Attempt to find a new peer to sync from if the quitting peer is the sync peer. + // We need to refresh the sync peer regardless of whether we're syncing or not. + // In the event that we fall behind, this allows us to switch to a peer allows us + // to continue syncing. + if srv.SyncPeer != nil && srv.SyncPeer.ID == pp.ID { srv.SyncPeer = nil srv._startSync() } From 1ec1efe3cfaba0bc78733e2b30a0d2fac8138290 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:21:18 -0400 Subject: [PATCH 680/762] Create New Validator Domain Parsing and Validation (#1269) --- lib/block_view_validator.go | 28 ++++++++++++++- lib/block_view_validator_test.go | 62 ++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 1 deletion(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 54404a33a..05c309a9c 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -4,11 +4,14 @@ import ( "bytes" "crypto/sha256" "fmt" - "github.com/deso-protocol/core/consensus" "io" "math" + "net" "net/url" "sort" + "strconv" + + "github.com/deso-protocol/core/consensus" "github.com/deso-protocol/core/bls" "github.com/dgraph-io/badger/v4" @@ -2477,6 +2480,29 @@ func SumValidatorEntriesTotalStakeAmountNanos(validatorEntries []*ValidatorEntry return totalStakeAmountNanos } +// ParseValidatorDomain expects a domain string in the format "host:port" and returns the host and port. +// If the domain is not in the expected format, it returns an error. +func ParseValidatorDomain(domain string) (_host string, _port uint64, _err error) { + // Split the domain into host and port. + hostStr, portStr, err := net.SplitHostPort(domain) + if err != nil { + return "", 0, errors.Wrapf(err, "error splitting host and port %s", domain) + } + + // The host should not be empty. + if hostStr == "" { + return "", 0, errors.Errorf("invalid host %s", hostStr) + } + + // The port should parse into an unsigned integer + port, err := strconv.ParseUint(portStr, 10, 0) + if err != nil { + return "", 0, errors.Wrapf(err, "invalid port %s", portStr) + } + + return hostStr, port, nil +} + // // BLS UTILS // diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 979766d53..314ed402b 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -1951,6 +1951,68 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { } } +func TestParseValidatorDomain(t *testing.T) { + // Sad path. Test invalid domain with an http scheme + { + domain := "https://example.com" + _, _, err := ParseValidatorDomain(domain) + require.Error(t, err) + } + + // Sad path. Test invalid domain with an http scheme and port number + { + domain := "https://example.com:8080" + _, _, err := ParseValidatorDomain(domain) + require.Error(t, err) + } + + // Sad path. Test invalid domain with a path but no scheme + { + domain := "example.com/path" + _, _, err := ParseValidatorDomain(domain) + require.Error(t, err) + } + + // Sad path. Test invalid domain with a path, a port, but no scheme + { + domain := "example.com:18000/path" + _, _, err := ParseValidatorDomain(domain) + require.Error(t, err) + } + + // Sad path. Test invalid domain with no scheme, no path, and no port + { + domain := "example.com" + _, _, err := ParseValidatorDomain(domain) + require.Error(t, err) + } + + // Sad path. Test invalid IP address with a port + { + domain := "127.0.0.1" + _, _, err := ParseValidatorDomain(domain) + require.Error(t, err) + } + + // Happy path. Test valid domain with a port, but no scheme and no path + { + domain := "example.com:18000" + host, port, err := ParseValidatorDomain(domain) + require.NoError(t, err) + require.Equal(t, host, "example.com") + require.Equal(t, port, uint64(18000)) + } + + // Happy path. Test valid IP address with a port + { + domain := "127.0.0.1:18000" + host, port, err := ParseValidatorDomain(domain) + require.NoError(t, err) + require.Equal(t, host, "127.0.0.1") + require.Equal(t, port, uint64(18000)) + } +} + func _submitUnjailValidatorTxn( testMeta *TestMeta, transactorPublicKeyBase58Check string, From b6f96a971e250ef4e167f3b8cfe654ce3e2d18ef Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:23:30 -0400 Subject: [PATCH 681/762] Tighten RegisterAsValidator Domain Validation (#1270) Tighten RegisterAsValidator Domain Validation Update regtest registration --- lib/block_view_stake_test.go | 16 ++++----- lib/block_view_validator.go | 3 +- lib/block_view_validator_test.go | 54 ++++++++++++++--------------- lib/pos_epoch_complete_hook_test.go | 2 +- lib/pos_leader_schedule_test.go | 2 +- lib/pos_server_regtest.go | 4 +-- 6 files changed, 40 insertions(+), 41 deletions(-) diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index d36e0762a..a931e343d 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -101,7 +101,7 @@ func _testStaking(t *testing.T, flushToDB bool) { votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -112,7 +112,7 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.NotNil(t, validatorEntry) require.Len(t, validatorEntry.Domains, 1) - require.Equal(t, validatorEntry.Domains[0], []byte("https://example.com")) + require.Equal(t, validatorEntry.Domains[0], []byte("example.com:18000")) require.True(t, validatorEntry.TotalStakeAmountNanos.IsZero()) } // @@ -945,7 +945,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // m0 registers as a validator. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com")}, + Domains: [][]byte{[]byte("example1.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -956,7 +956,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // m1 registers as a validator. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m1PkBytes) registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example2.com")}, + Domains: [][]byte{[]byte("example2.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1672,8 +1672,8 @@ func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { { // m0 and m1 register as validators. - constructAndSubmitRegisterValidatorTxn(m0Pub, m0Priv, m0PkBytes, "https://example.com") - constructAndSubmitRegisterValidatorTxn(m1Pub, m1Priv, m1PkBytes, "https://example2.com") + constructAndSubmitRegisterValidatorTxn(m0Pub, m0Priv, m0PkBytes, "example.com:18000") + constructAndSubmitRegisterValidatorTxn(m1Pub, m1Priv, m1PkBytes, "example2.com:18000") } // Cache the validator set for easy access. @@ -1937,7 +1937,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { // m0 registers as a validator. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, + Domains: [][]byte{[]byte("m1.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -2124,7 +2124,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.example.com")}, + Domains: [][]byte{[]byte("m0.example.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 05c309a9c..cf08cb623 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -7,7 +7,6 @@ import ( "io" "math" "net" - "net/url" "sort" "strconv" @@ -1803,7 +1802,7 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( } var domainStrings []string for _, domain := range metadata.Domains { - _, err := url.ParseRequestURI(string(domain)) + _, _, err := ParseValidatorDomain(string(domain)) if err != nil { return fmt.Errorf("UtxoView.IsValidRegisterAsValidatorMetadata: %s: %v", RuleErrorValidatorInvalidDomain, string(domain)) } diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 314ed402b..1733a6a60 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -91,7 +91,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, DisableDelegatedStake: false, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, @@ -108,7 +108,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // RuleErrorValidatorInvalidCommissionBasisPoints votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, DisableDelegatedStake: true, DelegatedStakeCommissionBasisPoints: MaxDelegatedStakeCommissionBasisPoints + 1, VotingPublicKey: votingPublicKey, @@ -132,7 +132,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // RuleErrorValidatorTooManyDomains var domains [][]byte for ii := 0; ii <= MaxValidatorNumDomains+1; ii++ { - domains = append(domains, []byte(fmt.Sprintf("https://example.com/%d", ii))) + domains = append(domains, []byte(fmt.Sprintf("example.com:18000/%d", ii))) } registerMetadata = &RegisterAsValidatorMetadata{ Domains: domains, @@ -155,7 +155,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { { // RuleErrorValidatorDuplicateDomains registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com"), []byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000"), []byte("example.com:18000")}, DisableDelegatedStake: false, } _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) @@ -165,7 +165,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { { // RuleErrorValidatorMissingVotingPublicKey registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, } _, err = _submitRegisterAsValidatorTxn( testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB, @@ -177,7 +177,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // RuleErrorValidatorMissingVotingAuthorization votingPublicKey, _ := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, VotingPublicKey: votingPublicKey, } _, err = _submitRegisterAsValidatorTxn( @@ -190,7 +190,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // RuleErrorValidatorInvalidVotingAuthorization: invalid TransactorPkBytes votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m1PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -205,7 +205,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { votingPublicKey, _ := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) _, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -219,7 +219,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // Happy path: register a validator votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, DisableDelegatedStake: false, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, @@ -235,7 +235,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.ValidatorPKID, m0PKID) require.Len(t, validatorEntry.Domains, 1) - require.Equal(t, string(validatorEntry.Domains[0]), "https://example.com") + require.Equal(t, string(validatorEntry.Domains[0]), "example.com:18000") require.False(t, validatorEntry.DisableDelegatedStake) require.Equal(t, string(validatorEntry.ExtraData["TestKey"]), "TestValue1") @@ -256,7 +256,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { // Happy path: update a validator votingPrivateKey, votingPublicKey, votingAuthorization := _generateVotingPrivateKeyPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example1.com"), []byte("https://example2.com")}, + Domains: [][]byte{[]byte("example1.com:18000"), []byte("example2.com:18000")}, DisableDelegatedStake: false, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, @@ -273,8 +273,8 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Equal(t, validatorEntry.ValidatorPKID, m0PKID) require.Len(t, validatorEntry.Domains, 2) - require.Equal(t, string(validatorEntry.Domains[0]), "https://example1.com") - require.Equal(t, string(validatorEntry.Domains[1]), "https://example2.com") + require.Equal(t, string(validatorEntry.Domains[0]), "example1.com:18000") + require.Equal(t, string(validatorEntry.Domains[1]), "example2.com:18000") require.False(t, validatorEntry.DisableDelegatedStake) require.Equal(t, string(validatorEntry.ExtraData["TestKey"]), "TestValue2") @@ -291,7 +291,7 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { votingAuthorization, err = prevBLSPrivateKey.Sign(CreateValidatorVotingAuthorizationPayload(m1PkBytes)) require.NoError(t, err) registerMetadata = &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, DisableDelegatedStake: false, VotingPublicKey: prevBLSPublicKey, VotingAuthorization: votingAuthorization, @@ -615,7 +615,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { // Perform a RegisterAsValidator txn. No error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -630,7 +630,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.NoError(t, err) require.NotNil(t, validatorEntry) require.Len(t, validatorEntry.Domains, 1) - require.Equal(t, validatorEntry.Domains[0], []byte("https://example.com")) + require.Equal(t, validatorEntry.Domains[0], []byte("example.com:18000")) // Perform a second RegisterAsValidator txn. Error expected. err = _submitValidatorTxnWithDerivedKey( @@ -679,7 +679,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { // Perform a RegisterAsValidator txn. Error expected. registerAsValidatorMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -772,7 +772,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, + Domains: [][]byte{[]byte("m0.com:1800")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -788,7 +788,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { // m1 registers as a validator. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m1PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m1.com")}, + Domains: [][]byte{[]byte("m1.com:1800")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -804,7 +804,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { // m2 registers as a validator. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m2PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m2.com")}, + Domains: [][]byte{[]byte("m2.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1203,7 +1203,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 registers as a validator with DisableDelegatedStake = FALSE. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, + Domains: [][]byte{[]byte("m0.com:1800")}, DisableDelegatedStake: false, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, @@ -1224,7 +1224,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 updates DisableDelegatedStake = TRUE. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, + Domains: [][]byte{[]byte("m0.com:18000")}, DisableDelegatedStake: true, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, @@ -1269,7 +1269,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 updates DisableDelegatedStake = FALSE. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, + Domains: [][]byte{[]byte("m0.com:18000")}, DisableDelegatedStake: false, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, @@ -1301,7 +1301,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 tries to update DisableDelegateStake = TRUE. Errors. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, + Domains: [][]byte{[]byte("m0.com:18000")}, DisableDelegatedStake: true, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, @@ -1394,7 +1394,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://m0.com")}, + Domains: [][]byte{[]byte("m0.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1577,7 +1577,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // m0 registers as a validator. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1868,7 +1868,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { // sender registers as a validator. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, senderPkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte("https://example.com")}, + Domains: [][]byte{[]byte("example.com:18000")}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 42b56012c..8312638a7 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -890,7 +890,7 @@ func _registerValidatorAndStake( testMeta.pubKeyToBLSKeyMap[publicKey] = votingPrivateKey testMeta.blsPubKeyToBLSKeyMap[votingPublicKey.ToString()] = votingPrivateKey registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, + Domains: [][]byte{[]byte(fmt.Sprintf("%s.com:18000", publicKey))}, VotingPublicKey: votingPublicKey, DelegatedStakeCommissionBasisPoints: commissionBasisPoints, VotingAuthorization: votingAuthorization, diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index f0dae661d..95684a4a6 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -72,7 +72,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { // Validator registers. votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, pkBytes) registerMetadata := &RegisterAsValidatorMetadata{ - Domains: [][]byte{[]byte(fmt.Sprintf("https://%s.com", publicKey))}, + Domains: [][]byte{[]byte(fmt.Sprintf("%s.com:18000", publicKey))}, VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } diff --git a/lib/pos_server_regtest.go b/lib/pos_server_regtest.go index a7e441ab8..93be1d628 100644 --- a/lib/pos_server_regtest.go +++ b/lib/pos_server_regtest.go @@ -26,9 +26,9 @@ func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { var domain string if len(srv.GetConnectionManager().listeners) == 0 { - domain = "http://localhost:18000" + domain = "localhost:18000" } - domain = "http://" + srv.GetConnectionManager().listeners[0].Addr().String() + domain = srv.GetConnectionManager().listeners[0].Addr().String() txnMeta := RegisterAsValidatorMetadata{ Domains: [][]byte{[]byte(domain)}, From f348398054ab17078158551446a70c673f0ead5e Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Fri, 26 Apr 2024 07:43:45 -0700 Subject: [PATCH 682/762] fix iota bug (#1271) --- lib/blockchain.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 69b70aaf7..98ed63ee9 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -71,18 +71,18 @@ const ( // don't store orphan headers and therefore any header that we do // have in our node index will be known definitively to be valid or // invalid one way or the other. - StatusHeaderValidated = 1 << 0 - StatusHeaderValidateFailed = 1 << 1 + StatusHeaderValidated = 1 << 1 + StatusHeaderValidateFailed = 1 << 2 - StatusBlockProcessed = 1 << 2 // Process means that the block is not an orphan and has been processed. This helps prevent us from reprocessing a block that we've already attempted to validate and add to the block index. - StatusBlockStored = 1 << 3 // Stored means that the block has been added to the block index and stored in the DB. - StatusBlockValidated = 1 << 4 // Validated means that the block has passed validations and is eligible to be part of the best chain. - StatusBlockValidateFailed = 1 << 5 // Validate Failed means that the block did not pass validations and will never be part of the best chain. + StatusBlockProcessed = 1 << 3 // Process means that the block is not an orphan and has been processed. This helps prevent us from reprocessing a block that we've already attempted to validate and add to the block index. + StatusBlockStored = 1 << 4 // Stored means that the block has been added to the block index and stored in the DB. + StatusBlockValidated = 1 << 5 // Validated means that the block has passed validations and is eligible to be part of the best chain. + StatusBlockValidateFailed = 1 << 6 // Validate Failed means that the block did not pass validations and will never be part of the best chain. - StatusBitcoinHeaderValidated = 1 << 6 // Deprecated - StatusBitcoinHeaderValidateFailed = 1 << 7 // Deprecated + StatusBitcoinHeaderValidated = 1 << 7 // Deprecated + StatusBitcoinHeaderValidateFailed = 1 << 8 // Deprecated - StatusBlockCommitted = 1 << 8 // Committed means that the block has been committed to the blockchain according to the Fast HotStuff commit rule. Only set on blocks after the cutover for PoS + StatusBlockCommitted = 1 << 9 // Committed means that the block has been committed to the blockchain according to the Fast HotStuff commit rule. Only set on blocks after the cutover for PoS ) // IsHeaderValidated returns true if a BlockNode has passed all the block header integrity checks. From 4c68b17db0a9d601475bd9ab99e95588ef5d9eb5 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 29 Apr 2024 13:05:30 -0400 Subject: [PATCH 683/762] Add snapshot validators to core state (#1272) --- lib/db_utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index 64ad6366d..735fa597b 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -550,7 +550,7 @@ type DBPrefixes struct { // PrefixSnapshotValidatorSetByPKID: Retrieve a ValidatorEntry from a snapshot validator set by // . // Prefix, , -> *ValidatorEntry - PrefixSnapshotValidatorSetByPKID []byte `prefix_id:"[88]" is_state:"true"` + PrefixSnapshotValidatorSetByPKID []byte `prefix_id:"[88]" is_state:"true" core_state:"true"` // PrefixSnapshotValidatorSetByStakeAmount: Retrieve stake-ordered ValidatorEntries from a snapshot validator set // by SnapshotAtEpochNumber. From 27f62697a5a6e388142371447174dd6ac43b9760 Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Mon, 29 Apr 2024 11:08:37 -0700 Subject: [PATCH 684/762] Add buying/selling pkid to GetAllDAOCoinLimitOrders with tests (#1275) * Add buying/selling pkid to GetAllDAOCoinLimitOrders with tests * Prohibit setting one of buying/selling pkid --- lib/block_view.go | 2 +- lib/block_view_dao_coin_limit_order.go | 28 ++++++-- lib/block_view_dao_coin_limit_order_test.go | 76 +++++++++++++++------ lib/block_view_types.go | 4 ++ lib/db_adapter.go | 13 +++- lib/db_utils.go | 24 ++++++- 6 files changed, 116 insertions(+), 31 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index e38f72dee..654221a30 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -2504,7 +2504,7 @@ func (bav *UtxoView) _checkAndUpdateDerivedKeySpendingLimit( var buyingCoinPublicKey []byte var sellingCoinPublicKey []byte if txnMeta.CancelOrderID != nil { - orderEntry, err := bav._getDAOCoinLimitOrderEntry(txnMeta.CancelOrderID) + orderEntry, err := bav.GetDAOCoinLimitOrderEntry(txnMeta.CancelOrderID) if err != nil || orderEntry == nil { return utxoOpsForTxn, errors.Wrapf( RuleErrorDerivedKeyInvalidDAOCoinLimitOrderOrderID, diff --git a/lib/block_view_dao_coin_limit_order.go b/lib/block_view_dao_coin_limit_order.go index 6c65159cb..aa520caff 100644 --- a/lib/block_view_dao_coin_limit_order.go +++ b/lib/block_view_dao_coin_limit_order.go @@ -287,7 +287,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( } // Search for an existing order by OrderID. - existingTransactorOrder, err := bav._getDAOCoinLimitOrderEntry(txMeta.CancelOrderID) + existingTransactorOrder, err := bav.GetDAOCoinLimitOrderEntry(txMeta.CancelOrderID) if err != nil { return 0, 0, nil, err } @@ -1490,10 +1490,10 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( // ## API Getter Functions // ########################### -func (bav *UtxoView) _getDAOCoinLimitOrderEntry(orderID *BlockHash) (*DAOCoinLimitOrderEntry, error) { +func (bav *UtxoView) GetDAOCoinLimitOrderEntry(orderID *BlockHash) (*DAOCoinLimitOrderEntry, error) { // This function shouldn't be called with nil. if orderID == nil { - return nil, errors.Errorf("_getDAOCoinLimitOrderEntry: Called with nil orderID; this should never happen") + return nil, errors.Errorf("GetDAOCoinLimitOrderEntry: Called with nil orderID; this should never happen") } // First check if we have the order entry in the UTXO view. @@ -1583,10 +1583,18 @@ func (bav *UtxoView) GetAllDAOCoinLimitOrdersForThisDAOCoinPair( return outputEntries, nil } -func (bav *UtxoView) GetAllDAOCoinLimitOrdersForThisTransactor(transactorPKID *PKID) ([]*DAOCoinLimitOrderEntry, error) { +func (bav *UtxoView) GetAllDAOCoinLimitOrdersForThisTransactor( + transactorPKID *PKID, + buyingCoinPkid *PKID, + sellingCoinPkid *PKID, +) ( + []*DAOCoinLimitOrderEntry, + error, +) { // This function is used by the API to construct all open orders for the input transactor. if transactorPKID == nil { - return nil, errors.Errorf("GetAllDAOCoinLimitOrdersForThisTransactor: Called with nil transactor PKID; this should never happen") + return nil, errors.Errorf("GetAllDAOCoinLimitOrdersForThisTransactor: Called with nil " + + "transactor PKID; this should never happen") } outputEntries := []*DAOCoinLimitOrderEntry{} @@ -1594,7 +1602,8 @@ func (bav *UtxoView) GetAllDAOCoinLimitOrdersForThisTransactor(transactorPKID *P // Iterate over matching database orders and add them to the // UTXO view if they are not already there. This dedups orders // from the database + orders from the UTXO view as well. - dbOrderEntries, err := bav.GetDbAdapter().GetAllDAOCoinLimitOrdersForThisTransactor(transactorPKID) + dbOrderEntries, err := bav.GetDbAdapter().GetAllDAOCoinLimitOrdersForThisTransactor( + transactorPKID, buyingCoinPkid, sellingCoinPkid) if err != nil { return nil, err } @@ -1609,9 +1618,16 @@ func (bav *UtxoView) GetAllDAOCoinLimitOrdersForThisTransactor(transactorPKID *P // Get matching orders from the UTXO view. // + TransactorPKID should match. + // + buying/selling pkids should match if they're specified // + orderEntry is not deleted. for _, orderEntry := range bav.DAOCoinLimitOrderMapKeyToDAOCoinLimitOrderEntry { if !orderEntry.isDeleted && transactorPKID.Eq(orderEntry.TransactorPKID) { + if buyingCoinPkid != nil && !buyingCoinPkid.Eq(orderEntry.BuyingDAOCoinCreatorPKID) { + continue + } + if sellingCoinPkid != nil && !sellingCoinPkid.Eq(orderEntry.SellingDAOCoinCreatorPKID) { + continue + } outputEntries = append(outputEntries, orderEntry) } } diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index 1f74a5c1d..80fcd6fe4 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -962,19 +962,55 @@ func TestDAOCoinLimitOrder(t *testing.T) { { // Test database query. // Confirm 1 existing limit order, and it's from m0. - orderEntries, err := dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID) + orderEntries, err := dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) require.True(orderEntries[0].Eq(metadataM0.ToEntry(m0PKID.PKID, savedHeight, toPKID))) // Test UTXO view query. // Confirm 1 existing limit order, and it's from m0. - orderEntries, err = utxoView.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID) + orderEntries, err = utxoView.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) require.True(orderEntries[0].Eq(metadataM0.ToEntry(m0PKID.PKID, savedHeight, toPKID))) } + // Test GetAllDAOCoinLimitOrdersForThisTransactor() with buying/selling pkid specified + { + buyingPkid := NewPKID(m0PkBytes) + sellingPkid := NewPKID(ZeroPublicKey[:]) + // Test database query. + // Confirm 1 existing limit order, and it's from m0. + orderEntries, err := dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, buyingPkid, sellingPkid) + require.NoError(err) + require.Equal(len(orderEntries), 1) + require.True(orderEntries[0].Eq(metadataM0.ToEntry(m0PKID.PKID, savedHeight, toPKID))) + + // Test UTXO view query. + // Confirm 1 existing limit order, and it's from m0. + orderEntries, err = utxoView.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, buyingPkid, sellingPkid) + require.NoError(err) + require.Equal(len(orderEntries), 1) + require.True(orderEntries[0].Eq(metadataM0.ToEntry(m0PKID.PKID, savedHeight, toPKID))) + } + + // Test GetAllDAOCoinLimitOrdersForThisTransactor() with WRONG buying/selling pkid specified + { + buyingPkid := NewPKID(m0PkBytes) + sellingPkid := NewPKID(ZeroPublicKey[:]) + // Test database query. + // Confirm 1 existing limit order, and it's from m0. + orderEntries, err := dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, sellingPkid, buyingPkid) + require.NoError(err) + require.Equal(len(orderEntries), 0) + + // Test UTXO view query. + // Confirm 1 existing limit order, and it's from m0. + orderEntries, err = utxoView.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, sellingPkid, buyingPkid) + require.NoError(err) + require.Equal(len(orderEntries), 0) + } + // Construct metadata for a m1 limit order: // * Buying: $DESO // * Selling: DAO coins @@ -1367,7 +1403,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(len(orderEntries), 2) // m0 cancels their order. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) cancelMetadataM0 := DAOCoinLimitOrderMetadata{CancelOrderID: orderEntries[0].OrderID} @@ -1511,7 +1547,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM1, ) - m2Orders, err := dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m2PKID.PKID) + m2Orders, err := dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m2PKID.PKID, nil, nil) require.NoError(err) require.Len(m2Orders, 0) } @@ -1708,7 +1744,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // Selling: $DESO // Price: 0.1 $DESO / DAO coin // Quantity: 100 DAO coin units - orderEntries, err = utxoView.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID) + orderEntries, err = utxoView.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) exchangeRate, err = CalculateScaledExchangeRate(0.1) @@ -1827,12 +1863,12 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(len(orderEntries), 3) // m0 has 3 open orders. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 3) // No open orders for m1. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID, nil, nil) require.NoError(err) require.Empty(orderEntries) @@ -1878,7 +1914,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(len(orderEntries), 2) // m0 has 2 remaining open orders. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 2) @@ -1926,7 +1962,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.True(orderEntries[1].Eq(metadataM1.ToEntry(m1PKID.PKID, savedHeight, toPKID))) // m0 has 1 remaining open orders. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) } @@ -2352,12 +2388,12 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(desoBalanceM0Before, desoBalanceM0After) // m1 cancels the above txn. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) metadataM1 = DAOCoinLimitOrderMetadata{CancelOrderID: orderEntries[0].OrderID} _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID, nil, nil) require.NoError(err) require.Empty(orderEntries) } @@ -3394,18 +3430,18 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(len(orderEntries), 2) // Confirm 1 order belonging to m0. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) // Confirm 1 order belonging to m1. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) // Confirm 0 orders belonging to m3. m3PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m3PkBytes) - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m3PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m3PKID.PKID, nil, nil) require.NoError(err) require.Empty(orderEntries) @@ -3419,10 +3455,10 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.True(m3PKID.PKID.Eq(originalM0PKID)) // Validate m0's 1 existing order was transferred to m3. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m0PKID.PKID, nil, nil) require.NoError(err) require.Empty(orderEntries) - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m3PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m3PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) @@ -3446,17 +3482,17 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Contains(err.Error(), RuleErrorDAOCoinLimitOrderMatchingOwnOrder) // Validate m3 can cancel their open order. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m3PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m3PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) metadataM3 = DAOCoinLimitOrderMetadata{CancelOrderID: orderEntries[0].OrderID} _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m3Pub, m3Priv, metadataM3) - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m3PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m3PKID.PKID, nil, nil) require.NoError(err) require.Empty(orderEntries) // Validate m1's orders for m3 DAO coins still persist. - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 1) require.True(orderEntries[0].SellingDAOCoinCreatorPKID.Eq(m3PKID.PKID)) @@ -3476,7 +3512,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) - orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID) + orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisTransactor(m1PKID.PKID, nil, nil) require.NoError(err) require.Equal(len(orderEntries), 2) } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 9bc0d5fae..68a3fbeee 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -5747,6 +5747,10 @@ type DAOCoinLimitOrderEntry struct { isDeleted bool } +func (order *DAOCoinLimitOrderEntry) IsDeleted() bool { + return order.isDeleted +} + type DAOCoinLimitOrderOperationType uint8 const ( diff --git a/lib/db_adapter.go b/lib/db_adapter.go index b92ff601d..8cff5d757 100644 --- a/lib/db_adapter.go +++ b/lib/db_adapter.go @@ -253,13 +253,22 @@ func (adapter *DbAdapter) GetAllDAOCoinLimitOrdersForThisDAOCoinPair(buyingDAOCo return DBGetAllDAOCoinLimitOrdersForThisDAOCoinPair(adapter.badgerDb, buyingDAOCoinCreatorPKID, sellingDAOCoinCreatorPKID) } -func (adapter *DbAdapter) GetAllDAOCoinLimitOrdersForThisTransactor(transactorPKID *PKID) ([]*DAOCoinLimitOrderEntry, error) { +func (adapter *DbAdapter) GetAllDAOCoinLimitOrdersForThisTransactor( + transactorPKID *PKID, + buyingCoinPkid *PKID, + sellingCoinPkid *PKID, +) ( + []*DAOCoinLimitOrderEntry, + error, +) { + // Temporarily use badger to support DAO Coin limit order DB operations //if adapter.postgresDb != nil { // return adapter.postgresDb.GetAllDAOCoinLimitOrdersForThisTransactor(transactorPKID) //} - return DBGetAllDAOCoinLimitOrdersForThisTransactor(adapter.badgerDb, transactorPKID) + return DBGetAllDAOCoinLimitOrdersForThisTransactor( + adapter.badgerDb, transactorPKID, buyingCoinPkid, sellingCoinPkid) } func (adapter *DbAdapter) GetMatchingDAOCoinLimitOrders(inputOrder *DAOCoinLimitOrderEntry, lastSeenOrder *DAOCoinLimitOrderEntry, orderEntriesInView map[DAOCoinLimitOrderMapKey]bool) ([]*DAOCoinLimitOrderEntry, error) { diff --git a/lib/db_utils.go b/lib/db_utils.go index 735fa597b..5acc94602 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -9822,10 +9822,30 @@ func DBGetAllDAOCoinLimitOrdersForThisDAOCoinPair( return _DBGetAllDAOCoinLimitOrdersByPrefix(handle, key) } -func DBGetAllDAOCoinLimitOrdersForThisTransactor(handle *badger.DB, transactorPKID *PKID) ([]*DAOCoinLimitOrderEntry, error) { - // Get all DAO coin limit orders for this transactor. +func DBGetAllDAOCoinLimitOrdersForThisTransactor( + handle *badger.DB, + transactorPKID *PKID, + buyingCoinPKID *PKID, + sellingCoinPKID *PKID, +) ( + []*DAOCoinLimitOrderEntry, + error, +) { + if buyingCoinPKID != nil && sellingCoinPKID == nil || + buyingCoinPKID == nil && sellingCoinPKID != nil { + + return nil, errors.New("GetAllDAOCoinLimitOrdersForThisTransactor: Must specify " + + "NONE or BOTH buying and selling coin PKIDs") + } + + // Get all DAO coin limit orders for this transactor. Potentially filter by the + // buying/selling coin pkids if provided key := append([]byte{}, Prefixes.PrefixDAOCoinLimitOrderByTransactorPKID...) key = append(key, transactorPKID[:]...) + if buyingCoinPKID != nil && sellingCoinPKID != nil { + key = append(key, buyingCoinPKID[:]...) + key = append(key, sellingCoinPKID[:]...) + } return _DBGetAllDAOCoinLimitOrdersByPrefix(handle, key) } From b67523c60e94502594fe3d4c47f6dfa854679e4a Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 29 Apr 2024 16:05:21 -0400 Subject: [PATCH 685/762] Ignore PoS Mempool Txn Addition When Running PoW (#1277) * Ignore PoS Mempool Txn Addition When Running PoW * Rename PoS directory --- lib/pos_mempool.go | 2 +- lib/server.go | 52 ++++++++++++++++++++++++++++------------------ 2 files changed, 33 insertions(+), 21 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 58360181c..efbd3bcfe 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -294,7 +294,7 @@ func (mp *PosMempool) Start() error { // Setup the database and create the persister if !mp.inMemoryOnly { - mempoolDirectory := filepath.Join(mp.dir, "mempool") + mempoolDirectory := filepath.Join(mp.dir, "pos_mempool") opts := DefaultBadgerOptions(mempoolDirectory) db, err := badger.Open(opts) if err != nil { diff --git a/lib/server.go b/lib/server.go index 758e28523..9819dd360 100644 --- a/lib/server.go +++ b/lib/server.go @@ -289,15 +289,17 @@ func (srv *Server) VerifyAndBroadcastTransaction(txn *MsgDeSoTxn) error { tipHeight := srv.blockchain.BlockTip().Height srv.blockchain.ChainLock.RUnlock() - // Only add the txn to the PoW mempool if we are below the PoS cutover height. - if srv.params.IsPoWBlockHeight(uint64(tipHeight)) { + // Only add the txn to the PoW mempool if we are below the PoS cutover height. The + // final block height of the PoW chain is the cut-off point for PoW transactions. + if uint64(tipHeight) < srv.params.GetFinalPoWBlockHeight() { err := srv.blockchain.ValidateTransaction( txn, // blockHeight is set to the next block since that's where this // transaction will be mined at the earliest. tipHeight+1, true, - srv.mempool) + srv.mempool, + ) if err != nil { return fmt.Errorf("VerifyAndBroadcastTransaction: Problem validating txn: %v", err) } @@ -2009,8 +2011,7 @@ func (srv *Server) _addNewTxn( // We will error in two cases: // - the chainState is not need blocks state // - the chainState is need blocks state but the chain is not on PoS. - if chainState != SyncStateNeedBlocksss || - !srv.blockchain.params.IsPoSBlockHeight(tipHeight) { + if chainState != SyncStateNeedBlocksss || !srv.blockchain.params.IsPoSBlockHeight(tipHeight) { err := fmt.Errorf("Server._addNewTxnAndRelay: Cannot process txn "+ "from peer %v while syncing: %v %v", pp, srv.blockchain.chainState(), txn.Hash()) glog.Error(err) @@ -2033,12 +2034,9 @@ func (srv *Server) _addNewTxn( // Only attempt to add the transaction to the PoW mempool if we're on the // PoW protocol. If we're on the PoW protocol, then we use the PoW mempool's, - // txn validity checks to signal whether the txn has been added or not. The PoW - // mempool has stricter txn validity checks than the PoS mempool, so this works - // out conveniently, as it allows us to always add a txn to the PoS mempool. - if srv.params.IsPoWBlockHeight(tipHeight) { - _, err := srv.mempool.ProcessTransaction( - txn, true /*allowUnconnectedTxn*/, rateLimit, peerID, verifySignatures) + // txn validity checks to signal whether the txn has been added or not. + if uint64(tipHeight) < srv.params.GetFinalPoWBlockHeight() { + _, err := srv.mempool.ProcessTransaction(txn, true, rateLimit, peerID, verifySignatures) if err != nil { return nil, errors.Wrapf(err, "Server._addNewTxn: Problem adding transaction to mempool: ") } @@ -2046,11 +2044,18 @@ func (srv *Server) _addNewTxn( glog.V(1).Infof("Server._addNewTxn: newly accepted txn: %v, Peer: %v", txn, pp) } - // Always add the txn to the PoS mempool. This should always succeed if the txn - // addition into the PoW mempool succeeded above. + // Always add the txn to the PoS mempool. This will usually succeed if the txn + // addition into the PoW mempool succeeded above. However, we only return an error + // here if the block height is at or above the final PoW block height. In the event + // of an edge case where txns in the mempool are reordered, it is possible for the + // txn addition into the PoW mempool to succeed, while the addition into the PoS + // mempool fails. This error handling catches that and gives the user the correct + // feedback on the txn addition's success. mempoolTxn := NewMempoolTransaction(txn, time.Now(), false) if err := srv.posMempool.AddTransaction(mempoolTxn); err != nil { - return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") + if uint64(tipHeight) >= srv.params.GetFinalPoWBlockHeight() { + return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") + } } return []*MsgDeSoTxn{txn}, nil @@ -2484,7 +2489,7 @@ func (srv *Server) ProcessSingleTxnWithChainLock(pp *Peer, txn *MsgDeSoTxn) ([]* // mempool has stricter txn validity checks than the PoS mempool, so this works // out conveniently, as it allows us to always add a txn to the PoS mempool. tipHeight := uint64(srv.blockchain.blockTip().Height) - if srv.params.IsPoWBlockHeight(tipHeight) { + if uint64(tipHeight) < srv.params.GetFinalPoWBlockHeight() { _, err := srv.mempool.ProcessTransaction( txn, true, /*allowUnconnectedTxn*/ @@ -2500,11 +2505,18 @@ func (srv *Server) ProcessSingleTxnWithChainLock(pp *Peer, txn *MsgDeSoTxn) ([]* } } - // Regardless of the consensus protocol we're running (PoW or PoS), we use the PoS mempool's to house all - // mempool txns. If a txn can't make it into the PoS mempool, which uses a looser unspent balance check for - // the the transactor, then it must be invalid. - if err := srv.posMempool.AddTransaction(NewMempoolTransaction(txn, time.Now(), false)); err != nil { - return nil, errors.Wrapf(err, "Server.ProcessSingleTxnWithChainLock: Problem adding transaction to PoS mempool: ") + // Always add the txn to the PoS mempool. This will usually succeed if the txn + // addition into the PoW mempool succeeded above. However, we only return an error + // here if the block height is at or above the final PoW block height. In the event + // of an edge case where txns in the mempool are reordered, it is possible for the + // txn addition into the PoW mempool to succeed, while the addition into the PoS + // mempool fails. This error handling catches that and gives the user the correct + // feedback on the txn addition's success. + mempoolTxn := NewMempoolTransaction(txn, time.Now(), false) + if err := srv.posMempool.AddTransaction(mempoolTxn); err != nil { + if uint64(tipHeight) >= srv.params.GetFinalPoWBlockHeight() { + return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") + } } // Happy path, the txn was successfully added to the PoS (and optionally PoW) mempool. From d8197f14d615163f2961fbafaa113302a872a840 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 30 Apr 2024 11:21:22 -0400 Subject: [PATCH 686/762] Remove Redundant ValidateTransaction During Txn Submission (#1278) * Remove Redundant ValidateTransaction During Txn Submission * Fix typos --- lib/server.go | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/lib/server.go b/lib/server.go index 9819dd360..373159855 100644 --- a/lib/server.go +++ b/lib/server.go @@ -284,27 +284,9 @@ func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MsgDeSoTxn, error) } func (srv *Server) VerifyAndBroadcastTransaction(txn *MsgDeSoTxn) error { - // Grab the block tip and use it as the height for validation. - srv.blockchain.ChainLock.RLock() - tipHeight := srv.blockchain.BlockTip().Height - srv.blockchain.ChainLock.RUnlock() - - // Only add the txn to the PoW mempool if we are below the PoS cutover height. The - // final block height of the PoW chain is the cut-off point for PoW transactions. - if uint64(tipHeight) < srv.params.GetFinalPoWBlockHeight() { - err := srv.blockchain.ValidateTransaction( - txn, - // blockHeight is set to the next block since that's where this - // transaction will be mined at the earliest. - tipHeight+1, - true, - srv.mempool, - ) - if err != nil { - return fmt.Errorf("VerifyAndBroadcastTransaction: Problem validating txn: %v", err) - } - } - + // The BroadcastTransaction call validates the transaction internally according to the + // mempool txn addition rules. If the transaction is valid, it will broadcast the txn to + // peers. Otherwise, it returns an error. if _, err := srv.BroadcastTransaction(txn); err != nil { return fmt.Errorf("VerifyAndBroadcastTransaction: Problem broadcasting txn: %v", err) } From e49f44ee9221e09b849fee4e1de0001f24c49d58 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 1 May 2024 10:09:18 -0400 Subject: [PATCH 687/762] Reduce logging in state syncer logic (#1273) --- lib/blockchain.go | 2 +- lib/state_change_syncer.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 98ed63ee9..019fffe67 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -2925,7 +2925,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // state syncer maintains a consistent view of the blockchain. // Note: We ignore the mempool manager here, as that process handles state syncer flush events itself. if !bc.eventManager.isMempoolManager { - fmt.Printf("Emitting state syncer flushed event for synced block\n") + glog.V(3).Info("Emitting state syncer flushed event for synced block\n") bc.eventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: uuid.Nil, Succeeded: true, diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 62017e7f0..4331460bf 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -879,7 +879,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser // has changed since the last cache, and we need to reset it. stateChangeSyncer.MempoolCachedTxns = make(map[string][]*StateChangeEntry) stateChangeSyncer.MempoolCachedUtxoView = nil - fmt.Printf("Txn not in cache, resetting\n") + glog.V(2).Info("Txn not in cache, resetting\n") break } @@ -889,7 +889,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser // If we know that all our transactions are good, set the state of the utxo view to the cached one, and exit. mempoolUtxoView = stateChangeSyncer.MempoolCachedUtxoView } - fmt.Printf("All txns match, continueing: %v\n", ii) + glog.V(2).Infof("All txns match, continuing: %v\n", ii) break } } @@ -996,7 +996,7 @@ func (stateChangeSyncer *StateChangeSyncer) StartMempoolSyncRoutine(server *Serv if !stateChangeSyncer.BlocksyncCompleteEntriesFlushed && stateChangeSyncer.SyncType == NodeSyncTypeBlockSync { err := stateChangeSyncer.FlushAllEntriesToFile(server) if err != nil { - fmt.Printf("StateChangeSyncer.StartMempoolSyncRoutine: Error flushing all entries to file: %v", err) + glog.Errorf("StateChangeSyncer.StartMempoolSyncRoutine: Error flushing all entries to file: %v", err) } } mempoolClosed := !server.GetMempool().IsRunning() @@ -1046,7 +1046,7 @@ func (stateChangeSyncer *StateChangeSyncer) FlushAllEntriesToFile(server *Server // Loop through all the batches of entries for the prefix until we get a non-full chunk. for chunkFull { - fmt.Printf("Processing chunk for prefix: %+v\n", prefix) + glog.V(2).Infof("Processing chunk for prefix: %+v\n", prefix) // Create a flush ID for this chunk. dbFlushId := uuid.New() // Fetch the batch from main DB records with a batch size of about snap.BatchSize. From b2ad7419a9c1db57a846d8f62f759d8083fee6cd Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 1 May 2024 15:30:10 -0400 Subject: [PATCH 688/762] Disable Regtest Validator Auto-Registration When Not on Regtest (#1280) --- cmd/node.go | 1 + lib/server.go | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/node.go b/cmd/node.go index af8675756..29943e397 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -253,6 +253,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { shouldRestart := false node.Server, err, shouldRestart = lib.NewServer( node.Params, + node.Config.Regtest, node.Listeners, desoAddrMgr, node.Config.ConnectIPs, diff --git a/lib/server.go b/lib/server.go index 373159855..177804969 100644 --- a/lib/server.go +++ b/lib/server.go @@ -361,6 +361,7 @@ func ValidateHyperSyncFlags(isHypersync bool, syncType NodeSyncType) { // TODO: Refactor all these arguments into a config object or something. func NewServer( _params *DeSoParams, + _isRegtest bool, _listeners []net.Listener, _desoAddrMgr *addrmgr.AddrManager, _connectIps []string, @@ -639,7 +640,7 @@ func NewServer( // On testnet, if the node is configured to be a PoW block producer, and it is configured // to be also a PoS validator, then we attach block mined listeners to the miner to kick // off the PoS consensus once the miner is done. - if _params.NetworkType == NetworkType_TESTNET && _miner != nil && _blockProducer != nil { + if _isRegtest && _params.NetworkType == NetworkType_TESTNET && _miner != nil && _blockProducer != nil { _miner.AddBlockMinedListener(srv.submitRegtestValidatorRegistrationTxns) } } From 6edd64ffcd1caef6a1b1eda3d8134b2c6f94617d Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 1 May 2024 16:04:27 -0400 Subject: [PATCH 689/762] Use ProtocolVersion2 For Both Mainnet and Testnet (#1281) * Define ProtocolVersion2 for Both Mainnet and Testnet * Disable override for regtest --- lib/constants.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index 363458262..e1a444ebc 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -900,9 +900,6 @@ func (params *DeSoParams) EnableRegtest() { // Clear the seeds params.DNSSeeds = []string{} - // Set the protocol version - params.ProtocolVersion = ProtocolVersion2 - // Mine blocks incredibly quickly params.TimeBetweenBlocks = 2 * time.Second params.TimeBetweenDifficultyRetargets = 6 * time.Second @@ -1080,7 +1077,7 @@ var MainnetForkHeights = ForkHeights{ // DeSoMainnetParams defines the DeSo parameters for the mainnet. var DeSoMainnetParams = DeSoParams{ NetworkType: NetworkType_MAINNET, - ProtocolVersion: ProtocolVersion1, + ProtocolVersion: ProtocolVersion2, MinProtocolVersion: 1, UserAgent: "Architect", DNSSeeds: []string{ @@ -1480,7 +1477,7 @@ var TestnetForkHeights = ForkHeights{ // DeSoTestnetParams defines the DeSo parameters for the testnet. var DeSoTestnetParams = DeSoParams{ NetworkType: NetworkType_TESTNET, - ProtocolVersion: ProtocolVersion0, + ProtocolVersion: ProtocolVersion2, MinProtocolVersion: 0, UserAgent: "Architect", DNSSeeds: []string{ From e39ac5a0b4561191c02891b4fdbdb39f52ffbf15 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 2 May 2024 16:03:51 -0400 Subject: [PATCH 690/762] Fix GetSnapshotBlockHeightPeriod (#1282) --- lib/server.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/server.go b/lib/server.go index 177804969..69bbf7c8f 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1122,6 +1122,10 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // PoW default value. After the fork height, it's expected to be the value defined in the params. snapshotBlockHeightPeriod := srv.params.GetSnapshotBlockHeightPeriod(bestHeaderHeight, srv.snapshot.SnapshotBlockHeightPeriod) expectedSnapshotHeight := bestHeaderHeight - (bestHeaderHeight % snapshotBlockHeightPeriod) + posSetupForkHeight := uint64(srv.params.ForkHeights.ProofOfStake1StateSetupBlockHeight) + if expectedSnapshotHeight < posSetupForkHeight { + expectedSnapshotHeight = posSetupForkHeight - (posSetupForkHeight % srv.params.DefaultPoWSnapshotBlockHeightPeriod) + } srv.blockchain.snapshot.Migrations.CleanupMigrations(expectedSnapshotHeight) if len(srv.HyperSyncProgress.PrefixProgress) != 0 { From 9ea61c595489b451e82a63d6cb5b7ef53fe76e6b Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 2 May 2024 16:37:44 -0400 Subject: [PATCH 691/762] Encode block for utxo operation bundle as well as utxo operation (#1274) --- lib/state_change_syncer.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 4331460bf..2844b74f5 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -124,7 +124,8 @@ func (stateChangeEntry *StateChangeEntry) RawEncodeWithoutMetadata(blockHeight u data = append(data, UintToBuf(blockHeight)...) // Encode the block, only for utxo operations. - if stateChangeEntry.EncoderType == EncoderTypeUtxoOperation { + if stateChangeEntry.EncoderType == EncoderTypeUtxoOperation || + stateChangeEntry.EncoderType == EncoderTypeUtxoOperationBundle { data = append(data, EncodeToBytes(blockHeight, stateChangeEntry.Block)...) } else { // If the encoder type is not a utxo operation, encode a nil value. From a608cc30a678662fcd281d03fc53210d3ff71f69 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 6 May 2024 20:56:01 -0400 Subject: [PATCH 692/762] Fix mempool global params issues (#1284) --- cmd/config.go | 6 ----- cmd/node.go | 3 --- cmd/run.go | 6 ----- integration_testing/tools.go | 3 --- lib/pos_block_producer_test.go | 20 ++++++----------- lib/pos_blockchain_test.go | 5 ++--- lib/pos_fee_estimator_test.go | 5 ++--- lib/pos_mempool.go | 10 ++------- lib/pos_mempool_test.go | 40 ++++++++++++++-------------------- lib/server.go | 6 ----- 10 files changed, 29 insertions(+), 75 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index dadcefb67..c9bc27de2 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -52,9 +52,6 @@ type Config struct { // Mempool MempoolBackupIntervalMillis uint64 - MaxMempoolPosSizeBytes uint64 - MempoolFeeEstimatorNumMempoolBlocks uint64 - MempoolFeeEstimatorNumPastBlocks uint64 MempoolMaxValidationViewConnects uint64 TransactionValidationRefreshIntervalMillis uint64 @@ -132,9 +129,6 @@ func LoadConfig() *Config { // Mempool config.MempoolBackupIntervalMillis = viper.GetUint64("mempool-backup-time-millis") - config.MaxMempoolPosSizeBytes = viper.GetUint64("max-mempool-pos-size-bytes") - config.MempoolFeeEstimatorNumMempoolBlocks = viper.GetUint64("mempool-fee-estimator-num-mempool-blocks") - config.MempoolFeeEstimatorNumPastBlocks = viper.GetUint64("mempool-fee-estimator-num-past-blocks") config.MempoolMaxValidationViewConnects = viper.GetUint64("mempool-max-validation-view-connects") config.TransactionValidationRefreshIntervalMillis = viper.GetUint64("transaction-validation-refresh-interval-millis") diff --git a/cmd/node.go b/cmd/node.go index 29943e397..43453ca01 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -292,10 +292,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { node.Config.StateChangeDir, node.Config.HypersyncMaxQueueSize, blsKeystore, - node.Config.MaxMempoolPosSizeBytes, node.Config.MempoolBackupIntervalMillis, - node.Config.MempoolFeeEstimatorNumMempoolBlocks, - node.Config.MempoolFeeEstimatorNumPastBlocks, node.Config.MempoolMaxValidationViewConnects, node.Config.TransactionValidationRefreshIntervalMillis, node.Config.StateSyncerMempoolTxnSyncLimit, diff --git a/cmd/run.go b/cmd/run.go index 659901824..fed3e4c6d 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -98,12 +98,6 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().Uint64("mempool-backup-time-millis", 30000, "The frequency in milliseconds with which the mempool will persist its state to disk. "+ "The default value is 30 seconds, or 30,000 milliseconds.") - cmd.PersistentFlags().Uint64("max-mempool-pos-size-bytes", 3000000000, - "The maximum size of the PoS mempool in bytes. The default value is 3GB.") - cmd.PersistentFlags().Uint64("mempool-fee-estimator-num-mempool-blocks", 1, - "The number of future blocks to break the PoS mempool into when estimating txn fee for the next block.") - cmd.PersistentFlags().Uint64("mempool-fee-estimator-num-past-blocks", 50, - "The number of past blocks to use when estimating txn fee for the next block from the PoS mempool.") cmd.PersistentFlags().Uint64("mempool-max-validation-view-connects", 10000, "The maximum number of connects that the PoS mempool transaction validation routine will perform.") cmd.PersistentFlags().Uint64("transaction-validation-refresh-interval-millis", 10, diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 6657f9656..220bf383a 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -95,9 +95,6 @@ func _generateConfig(t *testing.T, config *cmd.Config, port uint32, dataDir stri config.MaxSyncBlockHeight = MaxSyncBlockHeight config.SyncType = lib.NodeSyncTypeBlockSync config.MempoolBackupIntervalMillis = 30000 - config.MaxMempoolPosSizeBytes = 3000000000 - config.MempoolFeeEstimatorNumMempoolBlocks = 1 - config.MempoolFeeEstimatorNumPastBlocks = 50 config.MempoolMaxValidationViewConnects = 10000 config.TransactionValidationRefreshIntervalMillis = 10 diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index c4cf3c3e6..d94d16f6c 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -22,7 +22,6 @@ func TestCreateBlockTemplate(t *testing.T) { m0PubBytes, _, _ := Base58CheckDecode(m0Pub) // Set the frequency of mempool's database backup. - maxMempoolPosSizeBytes := uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) params, db := _posTestBlockchainSetupWithBalances(t, 200000, 200000) @@ -32,8 +31,7 @@ func TestCreateBlockTemplate(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 10000, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -99,7 +97,6 @@ func TestCreateBlockWithoutHeader(t *testing.T) { params, db := _posTestBlockchainSetupWithBalances(t, 200000, 200000) params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight = 1 params.DefaultMaxBlockSizeBytesPoS = params.MaxBlockSizeBytesPoW - maxMempoolPosSizeBytes := uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) latestBlockView := NewUtxoView(db, params, nil, nil, nil) @@ -107,8 +104,7 @@ func TestCreateBlockWithoutHeader(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 10000, 100, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -188,7 +184,7 @@ func TestGetBlockTransactions(t *testing.T) { globalParams := _testGetDefaultGlobalParams() feeMin := globalParams.MinimumNetworkFeeNanosPerKB feeMax := uint64(2000) - maxMempoolPosSizeBytes := uint64(3000000000) + globalParams.MempoolMaxSizeBytes = uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) params, db := _posTestBlockchainSetupWithBalances(t, 200000, m1InitialBalance) @@ -201,8 +197,7 @@ func TestGetBlockTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100000, + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 10000, 100000, )) require.NoError(mempool.Start()) defer mempool.Stop() @@ -275,10 +270,9 @@ func TestGetBlockTransactions(t *testing.T) { // Simulated Transaction Timestamp and afterward, mempool will be queried for the transactions. The transactions should // be returned in the same order as the transaction from getBlockTransactions. testMempool := NewPosMempool() - testMempool.Init( - params, globalParams, latestBlockView, 2, "", true, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100000, - ) + require.NoError(testMempool.Init( + params, globalParams, latestBlockView, 2, "", true, mempoolBackupIntervalMillis, nil, 10000, 100000, + )) require.NoError(testMempool.Start()) defer testMempool.Stop() currentTime := time.Now() diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 38b90ad73..9013d9573 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2841,12 +2841,11 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { miner.Stop() latestBlockView := NewUtxoView(db, params, nil, nil, nil) - maxMempoolPosSizeBytes := uint64(1024 * 1024 * 1000) mempoolBackupIntervalMillis := uint64(30000) mempool := NewPosMempool() require.NoError(t, mempool.Init( - params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis, 1, nil, 1, 10000, 100, + params, _testGetDefaultGlobalParams(), latestBlockView, 11, _dbDirSetup(t), false, + mempoolBackupIntervalMillis, nil, 10000, 100, )) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index b3596e4ae..657b083d9 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -12,7 +12,7 @@ func TestFeeEstimator(t *testing.T) { randSource := rand.New(rand.NewSource(2373)) globalParams := _testGetDefaultGlobalParams() - maxMempoolPosSizeBytes := uint64(1e9) + globalParams.MempoolMaxSizeBytes = uint64(1e9) mempoolBackupIntervalMillis := uint64(30000) params, db := _posTestBlockchainSetup(t) @@ -24,8 +24,7 @@ func TestFeeEstimator(t *testing.T) { mempool := NewPosMempool() err := mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 10000, 100, + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 10000, 100, ) require.NoError(t, err) require.NoError(t, mempool.Start()) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index efbd3bcfe..16ad11a4b 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -187,8 +187,6 @@ type PosMempool struct { // latestBlockNode is used to infer the latest block height. The latestBlockNode should be updated whenever a new // block is added to the blockchain via UpdateLatestBlock. latestBlockHeight uint64 - // maxMempoolPosSizeBytes is the maximum aggregate number of bytes of transactions included in the PoS mempool. - maxMempoolPosSizeBytes uint64 // mempoolBackupIntervalMillis is the frequency with which pos mempool persists transactions to storage. mempoolBackupIntervalMillis uint64 @@ -236,11 +234,8 @@ func (mp *PosMempool) Init( latestBlockHeight uint64, dir string, inMemoryOnly bool, - maxMempoolPosSizeBytes uint64, mempoolBackupIntervalMillis uint64, - feeEstimatorNumMempoolBlocks uint64, feeEstimatorPastBlocks []*MsgDeSoBlock, - feeEstimatorNumPastBlocks uint64, maxValidationViewConnects uint64, transactionValidationRefreshIntervalMillis uint64, ) error { @@ -263,7 +258,6 @@ func (mp *PosMempool) Init( mp.latestBlockHeight = latestBlockHeight mp.dir = dir mp.inMemoryOnly = inMemoryOnly - mp.maxMempoolPosSizeBytes = maxMempoolPosSizeBytes mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis mp.maxValidationViewConnects = maxValidationViewConnects mp.transactionValidationRefreshIntervalMillis = transactionValidationRefreshIntervalMillis @@ -914,11 +908,11 @@ func (mp *PosMempool) validateTransactions() error { // are removed in lowest to highest Fee-Time priority, i.e. opposite way that transactions are ordered in // GetTransactions(). func (mp *PosMempool) pruneNoLock() error { - if mp.txnRegister.Size() < mp.maxMempoolPosSizeBytes { + if mp.txnRegister.Size() < mp.globalParams.MempoolMaxSizeBytes { return nil } - prunedTxns, err := mp.txnRegister.PruneToSize(mp.maxMempoolPosSizeBytes) + prunedTxns, err := mp.txnRegister.PruneToSize(mp.globalParams.MempoolMaxSizeBytes) if err != nil { return errors.Wrapf(err, "PosMempool.pruneNoLock: Problem pruning mempool") } diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 1e49ffe58..3771e1269 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -18,13 +18,11 @@ func TestPosMempoolStart(t *testing.T) { params := DeSoTestnetParams globalParams := _testGetDefaultGlobalParams() dir := _dbDirSetup(t) - maxMempoolPosSizeBytes := uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) mempool := NewPosMempool() require.NoError(mempool.Init( - ¶ms, globalParams, nil, 0, dir, false, maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis, 1, nil, 1, 1000, 100, + ¶ms, globalParams, nil, 0, dir, false, mempoolBackupIntervalMillis, nil, 1000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -41,7 +39,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { globalParams := _testGetDefaultGlobalParams() feeMin := globalParams.MinimumNetworkFeeNanosPerKB feeMax := uint64(10000) - maxMempoolPosSizeBytes := uint64(3000000000) + globalParams.MempoolMaxSizeBytes = uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) params, db := _posTestBlockchainSetup(t) @@ -52,8 +50,7 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 1000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -71,8 +68,9 @@ func TestPosMempoolRestartWithTransactions(t *testing.T) { require.False(mempool.IsRunning()) newPool := NewPosMempool() - require.NoError(newPool.Init(params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, - mempoolBackupIntervalMillis, 1, nil, 1, 1000, 100)) + require.NoError(newPool.Init( + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 1000, 100), + ) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) newPoolTxns := newPool.GetTransactions() @@ -94,7 +92,7 @@ func TestPosMempoolPrune(t *testing.T) { globalParams := _testGetDefaultGlobalParams() feeMin := globalParams.MinimumNetworkFeeNanosPerKB feeMax := uint64(2000) - maxMempoolPosSizeBytes := uint64(500) + globalParams.MempoolMaxSizeBytes = uint64(500) mempoolBackupIntervalMillis := uint64(30000) params, db := _posTestBlockchainSetup(t) @@ -106,8 +104,7 @@ func TestPosMempoolPrune(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 1000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -143,8 +140,7 @@ func TestPosMempoolPrune(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 1000, 100, )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) @@ -187,7 +183,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { globalParams := _testGetDefaultGlobalParams() feeMin := globalParams.MinimumNetworkFeeNanosPerKB feeMax := uint64(2000) - maxMempoolPosSizeBytes := uint64(3000000000) + globalParams.MempoolMaxSizeBytes = uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) params, db := _posTestBlockchainSetup(t) @@ -199,8 +195,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 1000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -231,8 +226,7 @@ func TestPosMempoolUpdateGlobalParams(t *testing.T) { newPool := NewPosMempool() require.NoError(newPool.Init( - params, newGlobalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, + params, newGlobalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 1000, 100, )) require.NoError(newPool.Start()) require.True(newPool.IsRunning()) @@ -251,7 +245,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { globalParams := _testGetDefaultGlobalParams() feeMin := globalParams.MinimumNetworkFeeNanosPerKB feeMax := uint64(2000) - maxMempoolPosSizeBytes := uint64(3000000000) + globalParams.MempoolMaxSizeBytes = uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) params, db := _posTestBlockchainSetup(t) @@ -263,8 +257,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { mempool := NewPosMempool() require.NoError(mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 1000, 100, + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 1000, 100, )) require.NoError(mempool.Start()) require.True(mempool.IsRunning()) @@ -330,7 +323,7 @@ func TestPosMempoolTransactionValidation(t *testing.T) { globalParams := _testGetDefaultGlobalParams() feeMin := globalParams.MinimumNetworkFeeNanosPerKB feeMax := uint64(2000) - maxMempoolPosSizeBytes := uint64(3000000000) + globalParams.MempoolMaxSizeBytes = uint64(3000000000) mempoolBackupIntervalMillis := uint64(30000) params, db := _posTestBlockchainSetup(t) @@ -341,8 +334,7 @@ func TestPosMempoolTransactionValidation(t *testing.T) { mempool := NewPosMempool() require.NoError(t, mempool.Init( - params, globalParams, latestBlockView, 2, dir, false, maxMempoolPosSizeBytes, mempoolBackupIntervalMillis, 1, - nil, 1, 100, 10, + params, globalParams, latestBlockView, 2, dir, false, mempoolBackupIntervalMillis, nil, 100, 10, )) require.NoError(t, mempool.Start()) require.True(t, mempool.IsRunning()) diff --git a/lib/server.go b/lib/server.go index 69bbf7c8f..1c74e6fae 100644 --- a/lib/server.go +++ b/lib/server.go @@ -400,10 +400,7 @@ func NewServer( _stateChangeDir string, _hypersyncMaxQueueSize uint32, _blsKeystore *BLSKeystore, - _maxMempoolPosSizeBytes uint64, _mempoolBackupIntervalMillis uint64, - _mempoolFeeEstimatorNumMempoolBlocks uint64, - _mempoolFeeEstimatorNumPastBlocks uint64, _mempoolMaxValidationViewConnects uint64, _transactionValidationRefreshIntervalMillis uint64, _stateSyncerMempoolTxnSyncLimit uint64, @@ -560,11 +557,8 @@ func NewServer( uint64(_chain.blockTip().Height), _mempoolDumpDir, _mempoolDumpDir == "", // If no mempool dump dir is set, then the mempool will be in memory only - _maxMempoolPosSizeBytes, _mempoolBackupIntervalMillis, - _mempoolFeeEstimatorNumMempoolBlocks, []*MsgDeSoBlock{latestBlock}, - _mempoolFeeEstimatorNumPastBlocks, _mempoolMaxValidationViewConnects, _transactionValidationRefreshIntervalMillis, ) From aea3663da043e5a6107df3969940ec7289af12de Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 7 May 2024 09:37:33 -0400 Subject: [PATCH 693/762] Compute growth multiplier once for all staking rewards (#1286) --- lib/pos_staking_rewards.go | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index 5ff9e2599..6672f69e6 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -45,6 +45,10 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64 // As long as the scaled interest rate is > 0, the converted float is guaranteed to be non-zero as well. apy := convertAPYBasisPointsToFloat(apyBasisPoints) + // Compute the growth multiplier for the staking rewards. The growth multiplier is computed as: + // e ^ (apy * elapsedTime / 1 year) + growthMultiplier := computeGrowthMultiplier(apy, elapsedFractionOfYear) + // We reward all snapshotted stakes from the current snapshot validator set. This is an O(n) operation // that loops through all of the snapshotted stakes and rewards them one by one. snapshotStakesToReward, err := bav.GetAllSnapshotStakesToReward() @@ -69,7 +73,7 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64 // Compute the staker's portion of the staking reward, and the validator's commission. stakerRewardNanos, validatorCommissionNanos, err := bav.computeStakerRewardAndValidatorCommission( - snapshotStakeEntry, elapsedFractionOfYear, apy, + snapshotStakeEntry, growthMultiplier, ) if err != nil { return nil, errors.Wrapf( @@ -111,8 +115,7 @@ func (bav *UtxoView) DistributeStakingRewardsToSnapshotStakes(blockHeight uint64 func (bav *UtxoView) computeStakerRewardAndValidatorCommission( snapshotStakeEntry *StakeEntry, - elapsedFractionOfYear *big.Float, - apy *big.Float, + growthMultiplier *big.Float, ) ( _stakerRewardNanos uint64, _validatorCommissionNanos uint64, @@ -122,7 +125,7 @@ func (bav *UtxoView) computeStakerRewardAndValidatorCommission( // so we can do the remainder of the math using integer operations. This is the only operation where // we need float math. stakerRewardNanos := convertBigFloatToBigInt( - computeStakingReward(snapshotStakeEntry.StakeAmountNanos, elapsedFractionOfYear, apy), + computeStakingReward(snapshotStakeEntry.StakeAmountNanos, growthMultiplier), ) // If the reward is 0, then there's nothing to be done. In practice, the reward should never be < 0 @@ -293,14 +296,18 @@ func computeFractionOfYearAsFloat(nanoSecs int64) *big.Float { return NewFloat().Quo(nanoSecsAsFloat, _nanoSecsPerYearAsFloat) } +func computeGrowthMultiplier(apy *big.Float, elapsedTimeFractionOfYear *big.Float) *big.Float { + growthExponent := NewFloat().Mul(apy, elapsedTimeFractionOfYear) // apy * elapsedTime / 1 year + return BigFloatExp(growthExponent) // e ^ (apy * elapsedTime / 1 year) +} + // computeStakingReward uses float math to compute the compound interest on the stake amounts based on the -// elapsed time since the last staking reward distribution and the APY. +// elapsed time since the last staking reward distribution and the APY. The growthMultiplier is computed as: +// e ^ (apy * elapsedTime / 1 year) // // It produces the result for: stakeAmount * [e ^ (apy * elapsedTime / 1 year) - 1] -func computeStakingReward(stakeAmountNanos *uint256.Int, elapsedFractionOfYear *big.Float, apy *big.Float) *big.Float { +func computeStakingReward(stakeAmountNanos *uint256.Int, growthMultiplier *big.Float) *big.Float { stakeAmountFloat := NewFloat().SetInt(stakeAmountNanos.ToBig()) - growthExponent := NewFloat().Mul(elapsedFractionOfYear, apy) // apy * elapsedTime / 1 year - growthMultiplier := BigFloatExp(growthExponent) // e ^ (apy * elapsedTime / 1 year) finalStakeAmountNanos := NewFloat().Mul(stakeAmountFloat, growthMultiplier) // stakeAmount * [e ^ (apy * elapsedTime / 1 year)] rewardAmountNanos := NewFloat().Sub(finalStakeAmountNanos, stakeAmountFloat) // stakeAmount * [e ^ (apy * elapsedTime / 1 year) - 1] if rewardAmountNanos.Sign() < 0 { From 1fa1d8a37a6eaac3beedc323c717dd5dbd614ffc Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 7 May 2024 09:39:33 -0400 Subject: [PATCH 694/762] LN PoS Review (#1283) --- bls/signature.go | 4 +- bls/signature_test.go | 81 +++++++++++++++ cmd/config.go | 6 +- cmd/run.go | 3 +- collections/bitset/bitset_test.go | 4 + collections/concurrent_map_test.go | 21 +++- collections/map_test.go | 30 ++++++ collections/slice_test.go | 86 ++++++++++++++++ consensus/event_loop_test.go | 158 ++++++++++++++++++++++++++++- test.Dockerfile | 2 +- 10 files changed, 382 insertions(+), 13 deletions(-) create mode 100644 collections/map_test.go diff --git a/bls/signature.go b/bls/signature.go index 143eca3f7..ed6351885 100644 --- a/bls/signature.go +++ b/bls/signature.go @@ -187,7 +187,9 @@ type PublicKey struct { } func (publicKey *PublicKey) loadFlowPublicKey() error { - if publicKey.flowPublicKey == nil && len(publicKey.flowPublicKeyBytes) > 0 { + if publicKey != nil && + publicKey.flowPublicKey == nil && + len(publicKey.flowPublicKeyBytes) > 0 { var err error publicKey.flowPublicKey, err = flowCrypto.DecodePublicKey(signingAlgorithm, publicKey.flowPublicKeyBytes) return err diff --git a/bls/signature_test.go b/bls/signature_test.go index acfd012f5..036692f62 100644 --- a/bls/signature_test.go +++ b/bls/signature_test.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/rand" "encoding/json" + "strings" "testing" "github.com/stretchr/testify/require" @@ -17,6 +18,10 @@ func TestVerifyingBLSSignatures(t *testing.T) { blsPrivateKey2 := _generateRandomBLSPrivateKey(t) blsPublicKey2 := blsPrivateKey2.PublicKey() + malformedBlsPublicKey := &PublicKey{ + flowPublicKeyBytes: _generateRandomBytes(t, 12), + } + // Test bls.PrivateKey.Sign() and bls.PublicKey.Verify(). // 1. PrivateKey1 signs a random payload. randomPayload1 := _generateRandomBytes(t, 256) @@ -43,6 +48,13 @@ func TestVerifyingBLSSignatures(t *testing.T) { isVerified, err = blsPublicKey2.Verify(blsSignature2, randomPayload2) require.NoError(t, err) require.True(t, isVerified) + // 7. Malformed public key fails to verify with an error. + isVerified, err = malformedBlsPublicKey.Verify(blsSignature2, randomPayload2) + require.Error(t, err) + + // Aggregating empty list of signatures fails. + _, err = AggregateSignatures([]*Signature{}) + require.Error(t, err) // Test AggregateSignatures() and VerifyAggregateSignatureSinglePayload(). // 1. PrivateKey1 signs a random payload. @@ -76,6 +88,12 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.NoError(t, err) require.False(t, isVerified) + // VerifyAggregateSignatureSinglePayload fails if a public key is malformed. + isVerified, err = VerifyAggregateSignatureSinglePayload( + []*PublicKey{blsPublicKey1, malformedBlsPublicKey}, aggregateSignature, randomPayload3, + ) + require.Error(t, err) + // Test AggregateSignatures() and VerifyMultiPayloadAggregateSignature() on different payloads. // 1. PrivateKey1 signs a random payload. randomPayload4 := _generateRandomBytes(t, 256) @@ -112,12 +130,29 @@ func TestVerifyingBLSSignatures(t *testing.T) { ) require.NoError(t, err) require.False(t, isVerified) + // 8. Verify the AggregateSignature doesn't work if the number of public keys doesn't match the number of payloads. + isVerified, err = VerifyAggregateSignatureMultiplePayloads( + []*PublicKey{blsPublicKey1}, aggregateSignature, [][]byte{randomPayload4, randomPayload5}, + ) + require.Error(t, err) + + // 9. Verify the AggregateSignature doesn't work if a public key is malformed. + isVerified, err = VerifyAggregateSignatureMultiplePayloads( + []*PublicKey{blsPublicKey1, malformedBlsPublicKey}, aggregateSignature, [][]byte{randomPayload4, randomPayload5}, + ) + require.Error(t, err) // Test bls.PrivateKey.Eq(). require.True(t, blsPrivateKey1.Eq(blsPrivateKey1)) require.True(t, blsPrivateKey2.Eq(blsPrivateKey2)) require.False(t, blsPrivateKey1.Eq(blsPrivateKey2)) + // Test bls.PrivateKey.FromSeed + seed := _generateRandomBytes(t, 64) + testBlsPrivateKey, err := (&PrivateKey{}).FromSeed(seed) + require.NoError(t, err) + require.NotNil(t, testBlsPrivateKey) + // Test bls.PrivateKey.ToString() and bls.PrivateKey.FromString(). blsPrivateKeyString := blsPrivateKey1.ToString() copyBLSPrivateKey1, err := (&PrivateKey{}).FromString(blsPrivateKeyString) @@ -141,6 +176,13 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.NoError(t, err) require.True(t, blsPublicKey1.Eq(copyBLSPublicKey1)) + // Test bls.PublicKey.ToAbbreviatedString + abbrString := blsPublicKey1.ToAbbreviatedString() + require.True(t, strings.HasPrefix(abbrString, blsPublicKeyString[:5])) + require.True(t, strings.HasSuffix(abbrString, blsPublicKeyString[len(blsPublicKeyString)-5:])) + _, err = (&PublicKey{}).FromString(abbrString) + require.Error(t, err) + // Test bls.Signature.Eq(). require.True(t, blsSignature1.Eq(blsSignature1)) require.True(t, blsSignature2.Eq(blsSignature2)) @@ -158,6 +200,13 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.NoError(t, err) require.True(t, blsSignature1.Eq(copyBLSSignature)) + // Test bls.Signature.ToAbbreviatedString() + abbrString = blsSignature1.ToAbbreviatedString() + require.True(t, strings.HasPrefix(abbrString, blsSignatureString[:5])) + require.True(t, strings.HasSuffix(abbrString, blsSignatureString[len(blsSignatureString)-5:])) + _, err = (&Signature{}).FromString(abbrString) + require.Error(t, err) + // Test bls.PublicKey.Copy(). blsPublicKey1Copy := blsPublicKey1.Copy() require.True(t, blsPublicKey1.Eq(blsPublicKey1Copy)) @@ -185,6 +234,9 @@ func TestVerifyingBLSSignatures(t *testing.T) { blsPrivateKey, err := (&PrivateKey{}).FromString("") require.NoError(t, err) require.Nil(t, blsPrivateKey) + // FromString malfored formed. + blsPrivateKey, err = (&PrivateKey{}).FromString("malformed") + require.Error(t, err) // Eq() require.False(t, (&PrivateKey{}).Eq(nil)) require.False(t, (&PrivateKey{}).Eq(&PrivateKey{})) @@ -193,6 +245,11 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.False(t, _generateRandomBLSPrivateKey(t).Eq(&PrivateKey{})) require.False(t, _generateRandomBLSPrivateKey(t).Eq(_generateRandomBLSPrivateKey(t))) + var nilPrivateKey *PrivateKey + privKey, err := nilPrivateKey.FromSeed(nil) + require.NoError(t, err) + require.Nil(t, privKey) + // Test nil bls.PublicKey edge cases. // Verify() _, err = (&PublicKey{}).Verify(blsSignature1, randomPayload1) @@ -209,6 +266,8 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.Nil(t, blsPublicKey) // ToString() require.Equal(t, (&PublicKey{}).ToString(), "") + // ToAbbreviatedString() + require.Equal(t, (&PublicKey{}).ToAbbreviatedString(), "") // FromString() blsPublicKey, err = (&PublicKey{}).FromString("") require.NoError(t, err) @@ -222,6 +281,19 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.False(t, _generateRandomBLSPrivateKey(t).PublicKey().Eq(_generateRandomBLSPrivateKey(t).PublicKey())) // Copy() require.Nil(t, (&PublicKey{}).Copy().flowPublicKey) + var nilPublicKey *PublicKey + require.Nil(t, nilPublicKey.Copy()) + // IsEmpty + require.True(t, (&PublicKey{}).IsEmpty()) + + // Test SerializedPublicKey + serializedPublicKey := blsPublicKey1.Serialize() + require.NotNil(t, serializedPublicKey) + require.True(t, len(serializedPublicKey) > 0) + // DeserializePublicKey + deserializedPublicKey, err := serializedPublicKey.Deserialize() + require.NoError(t, err) + require.True(t, blsPublicKey1.Eq(deserializedPublicKey)) // Test nil bls.Signature edge cases. // ToBytes() @@ -247,6 +319,15 @@ func TestVerifyingBLSSignatures(t *testing.T) { require.False(t, blsSignature1.Eq(&Signature{})) // Copy() require.Nil(t, (&Signature{}).Copy().flowSignature) + var nilSignature *Signature + require.Nil(t, nilSignature.Copy()) + // FromString with malformed signature. + blsSignature, err = (&Signature{}).FromString("malformed") + require.Error(t, err) + // ToAbbreviatedString + require.Equal(t, (&Signature{}).ToAbbreviatedString(), "") + // IsEmpty + require.True(t, (&Signature{}).IsEmpty()) } func TestJsonMarshalingBLSKeys(t *testing.T) { diff --git a/cmd/config.go b/cmd/config.go index c9bc27de2..ced1afb18 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -187,10 +187,10 @@ func LoadConfig() *Config { if _, err := url.ParseRequestURI(provider); err != nil { glog.Fatalf("Invalid checkpoint syncing provider URL: %v", provider) } - // TODO: do we want to make a request to the checkpoint syncing provider to ensure it's valid? } - // TODO: add default provider here based on network. However, if someone wants to sync w/o checkpoint - // syncing, they should be able to do so. How do we support this? another flag I guess. + // TODO: If someone wants to sync w/o checkpoint syncing, they should be able to do so. + // How do we support this? another flag I guess or they just provide an invalid value + // to the checkpoint-syncing-providers flag. if len(config.CheckpointSyncingProviders) == 0 && !config.Regtest { if testnet { config.CheckpointSyncingProviders = []string{lib.DefaultTestnetCheckpointProvider} diff --git a/cmd/run.go b/cmd/run.go index fed3e4c6d..e003ac8dc 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -64,7 +64,7 @@ func SetupRunFlags(cmd *cobra.Command) { "Max sync block height") // Hyper Sync cmd.PersistentFlags().Bool("hypersync", true, "Use hyper sync protocol for faster block syncing") - cmd.PersistentFlags().Bool("force-checksum", true, "When true, the node will panic if the "+ + cmd.PersistentFlags().Bool("force-checksum", false, "When true, the node will panic if the "+ "local state checksum differs from the network checksum reported by its peers.") // Snapshot cmd.PersistentFlags().Uint64("snapshot-block-height-period", 600000, "Set the snapshot epoch period. Snapshots are taken at block heights divisible by the period.") @@ -229,7 +229,6 @@ func SetupRunFlags(cmd *cobra.Command) { "process in the mempool tx state syncer at a time.") // PoS Checkpoint Syncing - // TODO: a default value is a little tricky here since we don't know what network we're using. cmd.PersistentFlags().StringSlice("checkpoint-syncing-providers", []string{}, fmt.Sprintf("A comma-separated list of URLs that "+ "supports the committed tip block info endpoint to be used for checkpoint syncing. "+ "If unset, the field will default to %v on mainnet and %v on testnet", diff --git a/collections/bitset/bitset_test.go b/collections/bitset/bitset_test.go index ff24b2014..e7bbca6c8 100644 --- a/collections/bitset/bitset_test.go +++ b/collections/bitset/bitset_test.go @@ -1,6 +1,7 @@ package bitset import ( + "bytes" "testing" "github.com/stretchr/testify/require" @@ -102,6 +103,9 @@ func TestEquality(t *testing.T) { var bitset2 *Bitset require.False(t, bitset1.Eq(bitset2)) + + require.True(t, bytes.Equal(bitset1.ToBytes(), []byte{})) + require.Zero(t, (&Bitset{}).FromBytes(nil).Size()) } // Test one nil and one non-nil bitset diff --git a/collections/concurrent_map_test.go b/collections/concurrent_map_test.go index aac89b2fb..ef8734c05 100644 --- a/collections/concurrent_map_test.go +++ b/collections/concurrent_map_test.go @@ -29,21 +29,32 @@ func TestConcurrentMap(t *testing.T) { delete(control, key) } + // test remove not exists + m.Remove("not exists") + for key, val := range control { if mVal, ok := m.Get(key); !ok || mVal != val { t.Errorf("Expected %d, got %d", val, m.m[key]) } } - // test copy - copy := m.ToMap() + // test Clone + mClone := m.Clone() + for key, val := range control { + if mVal, ok := mClone.Get(key); !ok || mVal != val { + t.Errorf("Expected %d, got %d", val, m.m[key]) + } + } + + // test toMap + mapCopy := m.ToMap() for key, val := range control { - if mVal, ok := copy[key]; !ok || mVal != val { + if mVal, ok := mapCopy[key]; !ok || mVal != val { t.Errorf("Expected %d, got %d", val, m.m[key]) } } - if len(copy) != len(control) { - t.Errorf("Expected %d, got %d", len(control), len(copy)) + if len(mapCopy) != len(control) { + t.Errorf("Expected %d, got %d", len(control), len(mapCopy)) } // test get all diff --git a/collections/map_test.go b/collections/map_test.go new file mode 100644 index 000000000..60e7d3718 --- /dev/null +++ b/collections/map_test.go @@ -0,0 +1,30 @@ +package collections + +import ( + "fmt" + "github.com/stretchr/testify/require" + "testing" +) + +func TestMap(t *testing.T) { + control := make(map[string]int) + + for ii := 0; ii < 100; ii++ { + key := fmt.Sprintf("%v", ii) + control[key] = ii + } + + // Test Contains + for ii := 0; ii < 100; ii++ { + require.True(t, MapContains(control, fmt.Sprintf("%v", ii))) + } + + // Make sure Contains doesn't return true for a key that doesn't exist + require.False(t, MapContains(control, "not exists")) + + // Test MapValues + values := MapValues(control) + for ii := 0; ii < 100; ii++ { + require.Contains(t, values, ii) + } +} diff --git a/collections/slice_test.go b/collections/slice_test.go index 480b57aef..e2d129577 100644 --- a/collections/slice_test.go +++ b/collections/slice_test.go @@ -185,3 +185,89 @@ func TestFilter(t *testing.T) { require.Equal(t, []int{1, 2, 3, 4, 5}, result) } } + +func TestContains(t *testing.T) { + slice := []int{-1, -2, -3, -4, -5} + require.False(t, Contains(slice, 0)) + require.True(t, Contains(slice, -1)) +} + +func TestTransform(t *testing.T) { + // Transform all values to their square + transform := func(val int) int { + return val * val + } + + slice := []int{1, 2, 3, 4, 5} + result := Transform(slice, transform) + require.Equal(t, []int{1, 4, 9, 16, 25}, result) +} + +func TestRandomElement(t *testing.T) { + // Test sad path where slice is empty + slice := []int{} + _, err := RandomElement(slice) + require.Error(t, err) + + // Test happy path where slice has elements. + slice = []int{1, 2, 3, 4, 5} + result, err := RandomElement(slice) + require.NoError(t, err) + require.True(t, Contains(slice, result)) +} + +func TestSortStable(t *testing.T) { + // Test sorting a slice of integers + { + slice := []int{1, 5, 4, 3, 2, 1} + sorted := SortStable(slice, func(i, j int) bool { + return i < j + }) + // Make sure sorted is a new slice and slice is not modified + require.Equal(t, []int{1, 1, 2, 3, 4, 5}, sorted) + require.NotEqual(t, slice, sorted) + } + + // Test sorting a struct by a field + { + type testStruct struct { + Value int + Key string + } + slice := []testStruct{ + {Value: 1, Key: "a"}, + {Value: 5, Key: "b"}, + {Value: 4, Key: "c"}, + {Value: 3, Key: "d"}, + {Value: 1, Key: "e"}, + } + sorted := SortStable(slice, func(i, j testStruct) bool { + return i.Value < j.Value + }) + // Make sure sorted is a new slice and slice is not modified + require.Equal(t, []testStruct{ + {Value: 1, Key: "a"}, + {Value: 1, Key: "e"}, + {Value: 3, Key: "d"}, + {Value: 4, Key: "c"}, + {Value: 5, Key: "b"}, + }, sorted) + require.NotEqual(t, slice, sorted) + } +} + +func TestReverse(t *testing.T) { + // Test reversing a slice of integers + { + slice := []int{1, 2, 3, 4, 5} + reversed := Reverse(slice) + require.Equal(t, []int{5, 4, 3, 2, 1}, reversed) + } + + // Test reversing a slice of strings + { + slice := []string{"a", "b", "c", "d", "e"} + reversed := Reverse(slice) + require.Equal(t, []string{"e", "d", "c", "b", "a"}, reversed) + } +} diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 87e86f752..f025047c5 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -1,6 +1,7 @@ package consensus import ( + "strings" "testing" "time" @@ -16,7 +17,11 @@ func TestInit(t *testing.T) { { fc := NewFastHotStuffEventLoop() require.Equal(t, eventLoopStatusNotInitialized, fc.status) - require.NotPanics(t, fc.Stop) // Calling Stop() on an uninitialized instance should be a no-op + require.NotPanics(t, fc.Stop) // Calling Stop() on an uninitialized instance should be a no-op + require.NotPanics(t, fc.Start) // Calling Start() on an uninitialized instance should be a no-op + require.False(t, fc.IsInitialized()) + require.False(t, fc.IsRunning()) + require.Equal(t, fc.ToString(), "FastHotStuffEventLoop is not running") } // Test Init() function with invalid block construction interval @@ -145,9 +150,38 @@ func TestInit(t *testing.T) { require.Equal(t, fc.safeBlocks[0].block.GetHeight(), uint64(2)) require.Equal(t, len(fc.safeBlocks[0].validatorList), 2) require.Equal(t, len(fc.safeBlocks[0].validatorLookup), 2) + require.True(t, fc.IsInitialized()) + require.False(t, fc.IsRunning()) + + // Test init on already running event loop. Should error. + fc.Start() + require.True(t, fc.IsInitialized()) + require.True(t, fc.IsRunning()) + require.True(t, strings.HasPrefix(fc.ToString(), "Printing FastHotStuffEventLoop state: ")) + err = fc.Init(100, 101, + genesisBlock.GetQC(), // genesisQC + BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetView()+1, + ) + require.Error(t, err) } } +func TestGetEventsAndCurrentView(t *testing.T) { + fc := NewFastHotStuffEventLoop() + genesisBlock := createDummyBlock(2) + err := fc.Init(100, 101, + genesisBlock.GetQC(), // genesisQC + BlockWithValidatorList{genesisBlock, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{genesisBlock, createDummyValidatorList()}}, // safeBlocks + genesisBlock.GetView()+1, + ) + require.NoError(t, err) + require.Len(t, fc.GetEvents(), 0) + require.Equal(t, fc.GetCurrentView(), genesisBlock.GetView()+1) +} + func TestProcessTipBlock(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) @@ -178,6 +212,28 @@ func TestProcessTipBlock(t *testing.T) { // Start the event loop fc.Start() + // Sad path: crankTimerDuration <= 0 + { + err := fc.ProcessTipBlock( + BlockWithValidatorList{nil, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks + 0, + oneHourInNanoSecs, + ) + require.Error(t, err) + } + + // Sad path: timeoutTimerDuration <= 0 + { + err := fc.ProcessTipBlock( + BlockWithValidatorList{nil, createDummyValidatorList()}, // tip + []BlockWithValidatorList{{createDummyBlock(2), createDummyValidatorList()}}, // safeBlocks + oneHourInNanoSecs, + 0, + ) + require.Error(t, err) + } + // Test ProcessTipBlock() function with malformed tip block { err := fc.ProcessTipBlock( @@ -296,6 +352,74 @@ func TestProcessTipBlock(t *testing.T) { fc.Stop() } +func TestUpdateSafeBlocks(t *testing.T) { + oneHourInNanoSecs := time.Duration(3600000000000) + + fc := NewFastHotStuffEventLoop() + genesisBlock := createDummyBlock(2) + tipBlock := BlockWithValidatorList{genesisBlock, createDummyValidatorList()} + // Initialize the event loop + { + err := fc.Init( + oneHourInNanoSecs, + oneHourInNanoSecs, + genesisBlock.GetQC(), + tipBlock, + []BlockWithValidatorList{tipBlock}, + tipBlock.Block.GetView()+1, + ) + require.NoError(t, err) + } + + // Test UpdateSafeBlocks() function when event loop is not running + { + tipBlock := BlockWithValidatorList{createDummyBlock(2), createDummyValidatorList()} + err := fc.UpdateSafeBlocks([]BlockWithValidatorList{tipBlock}) + require.Error(t, err) + } + + // Start the event loop + fc.Start() + + // Test UpdateSafeBlocks() function with malformed tip block + { + err := fc.UpdateSafeBlocks( + []BlockWithValidatorList{{nil, createDummyValidatorList()}}, + ) + require.Error(t, err) + } + + // Test UpdateSafeBlocks() function with no blocks. + { + err := fc.UpdateSafeBlocks([]BlockWithValidatorList{}) + require.Error(t, err) + } + + // Test UpdateSafeBlocks() function with a block with a view less than the genesis view. + { + err := fc.UpdateSafeBlocks( + []BlockWithValidatorList{{createDummyBlock(0), createDummyValidatorList()}}, + ) + require.Error(t, err) + } + + // Test UpdateSafeBlocks() function without tip block. + { + err := fc.UpdateSafeBlocks( + []BlockWithValidatorList{{createDummyBlock(3), createDummyValidatorList()}}, + ) + require.Error(t, err) + } + + // Test UpdateSafeBlocks() function with tip block. + { + err := fc.UpdateSafeBlocks( + []BlockWithValidatorList{tipBlock, {createDummyBlock(3), createDummyValidatorList()}}, + ) + require.NoError(t, err) + } +} + func TestAdvanceViewOnTimeout(t *testing.T) { oneHourInNanoSecs := time.Duration(3600000000000) @@ -416,6 +540,12 @@ func TestProcessValidatorVote(t *testing.T) { require.NoError(t, err) } + // Test ProcessValidatorVote when event loop is not running + { + err := fc.ProcessValidatorVote(createDummyVoteMessage(4)) + require.Error(t, err) + } + // Start the event loop fc.Start() @@ -477,6 +607,14 @@ func TestProcessValidatorVote(t *testing.T) { require.Contains(t, err.Error(), "has already timed out for view") } + // Test vote on non-tip block hash. + { + vote := createDummyVoteMessage(4) + vote.blockHash = createDummyBlockHash() + err := fc.ProcessValidatorVote(vote) + require.Error(t, err) + } + // Test happy path { vote := createDummyVoteMessage(4) @@ -516,6 +654,12 @@ func TestProcessValidatorTimeout(t *testing.T) { require.NoError(t, err) } + // Test ProcessValidatorTimeout when event loop is not running + { + err := fc.ProcessValidatorTimeout(createDummyTimeoutMessage(4)) + require.Error(t, err) + } + // Start the event loop fc.Start() @@ -604,6 +748,13 @@ func TestProcessValidatorTimeout(t *testing.T) { require.Contains(t, err.Error(), "Invalid high QC") } + // Test view is not equal to current view - 1. + { + timeout := createTimeoutMessageWithPrivateKeyAndHighQC(3, validatorPrivateKey1, fc.tip.block.(*block).GetQC()) + err := fc.ProcessValidatorTimeout(timeout) + require.Error(t, err) + } + // Test happy path { timeout := createTimeoutMessageWithPrivateKeyAndHighQC(4, validatorPrivateKey1, fc.tip.block.(*block).GetQC()) @@ -764,6 +915,11 @@ func TestVoteQCConstructionSignal(t *testing.T) { vote.publicKey.ToString(): &vote, } + // Test onCrankTimerTaskExecuted on non-running event loop. + fc.onCrankTimerTaskExecuted(fc.currentView) + // Make sure it doesn't set hasCrankTimerRunForCurrentView + require.False(t, fc.hasCrankTimerRunForCurrentView) + // Start the event loop fc.Start() diff --git a/test.Dockerfile b/test.Dockerfile index d34b2d179..f962f2377 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -28,4 +28,4 @@ COPY main.go . # build backend RUN GOOS=linux go build -mod=mod -a -installsuffix cgo -o bin/core main.go -ENTRYPOINT ["go", "test", "-v", "-failfast", "-p", "1", "github.com/deso-protocol/core/bls", "github.com/deso-protocol/core/collections", "github.com/deso-protocol/core/consensus", "github.com/deso-protocol/core/lib"] +ENTRYPOINT ["go", "test", "-v", "-failfast", "-p", "1", "github.com/deso-protocol/core/bls", "github.com/deso-protocol/core/collections", "github.com/deso-protocol/core/collections/bitset", "github.com/deso-protocol/core/consensus", "github.com/deso-protocol/core/lib"] From c4d48b7552563c7640ab17ffaf9ae14a95811442 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 7 May 2024 09:41:45 -0400 Subject: [PATCH 695/762] Replace usage of uint256.NewInt(0).SetUint64(xx) with uint256.NewInt(xx) (#1285) --- consensus/event_loop_test.go | 8 +++---- consensus/integration_test.go | 22 +++++++++---------- consensus/utils.go | 10 ++++----- consensus/utils_test.go | 40 +++++++++++++++++------------------ 4 files changed, 40 insertions(+), 40 deletions(-) diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index f025047c5..9353fa4c9 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -878,11 +878,11 @@ func TestVoteQCConstructionSignal(t *testing.T) { validatorList := []Validator{ &validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(70), + stakeAmount: uint256.NewInt(70), }, &validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(30), + stakeAmount: uint256.NewInt(30), }, } @@ -1064,11 +1064,11 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { validatorList := []Validator{ &validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(70), + stakeAmount: uint256.NewInt(70), }, &validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(30), + stakeAmount: uint256.NewInt(30), }, } diff --git a/consensus/integration_test.go b/consensus/integration_test.go index 2f34a9069..1e399ec59 100644 --- a/consensus/integration_test.go +++ b/consensus/integration_test.go @@ -16,10 +16,10 @@ import ( func TestNetworkWithOfflineValidators(t *testing.T) { // Create 4 nodes with equal stake. The network has super-majority of stake online // as long as 3 out of 4 nodes are online. - node1 := newValidatorNode(uint256.NewInt(0).SetUint64(50), true) // block proposer - node2 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator - node3 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator - node4 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node1 := newValidatorNode(uint256.NewInt(50), true) // block proposer + node2 := newValidatorNode(uint256.NewInt(50), false) // validator + node3 := newValidatorNode(uint256.NewInt(50), false) // validator + node4 := newValidatorNode(uint256.NewInt(50), false) // validator allNodes := []*validatorNode{node1, node2, node3, node4} @@ -104,10 +104,10 @@ func TestNetworkWithOfflineValidators(t *testing.T) { func TestNetworkWithOfflineBlockProposer(t *testing.T) { // Create 4 nodes with equal stake. The network has super-majority of stake online // as long as 3 out of 4 nodes are online. - node1 := newValidatorNode(uint256.NewInt(0).SetUint64(50), true) // block proposer - node2 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator - node3 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator - node4 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node1 := newValidatorNode(uint256.NewInt(50), true) // block proposer + node2 := newValidatorNode(uint256.NewInt(50), false) // validator + node3 := newValidatorNode(uint256.NewInt(50), false) // validator + node4 := newValidatorNode(uint256.NewInt(50), false) // validator allNodes := []*validatorNode{node1, node2, node3, node4} @@ -181,9 +181,9 @@ func TestNetworkWithOfflineBlockProposer(t *testing.T) { func TestNetworkRecoveryAfterCatastrophicFailure(t *testing.T) { // Create 3 nodes with equal stake. Node 3 has a super-majority of the the stake // and needs to stay online for the network to remain live. - node1 := newValidatorNode(uint256.NewInt(0).SetUint64(10), true) // block proposer - node2 := newValidatorNode(uint256.NewInt(0).SetUint64(10), false) // validator - node3 := newValidatorNode(uint256.NewInt(0).SetUint64(80), false) // validator + node1 := newValidatorNode(uint256.NewInt(10), true) // block proposer + node2 := newValidatorNode(uint256.NewInt(10), false) // validator + node3 := newValidatorNode(uint256.NewInt(80), false) // validator allNodes := []*validatorNode{node1, node2, node3} diff --git a/consensus/utils.go b/consensus/utils.go index 6f0903591..8328c1774 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -396,11 +396,11 @@ func isSuperMajorityStake(stake *uint256.Int, totalStake *uint256.Int) bool { } // Compute 3Cq - honestStakeComponent := uint256.NewInt(0).Mul(stake, uint256.NewInt(0).SetUint64(3)) + honestStakeComponent := uint256.NewInt(0).Mul(stake, uint256.NewInt(3)) // Compute 2N + 1 - totalStakeComponent := uint256.NewInt(0).Mul(totalStake, uint256.NewInt(0).SetUint64(2)) - totalStakeComponent = uint256.NewInt(0).Add(totalStakeComponent, uint256.NewInt(0).SetUint64(1)) + totalStakeComponent := uint256.NewInt(0).Mul(totalStake, uint256.NewInt(2)) + totalStakeComponent = uint256.NewInt(0).Add(totalStakeComponent, uint256.NewInt(1)) // Check if 3Cq >= 2N + 1 return honestStakeComponent.Cmp(totalStakeComponent) >= 0 @@ -454,11 +454,11 @@ func createValidatorListForPrivateKeys(pk1 *bls.PrivateKey, pk2 *bls.PrivateKey) validators := []*validator{ { publicKey: pk1.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(100), + stakeAmount: uint256.NewInt(100), }, { publicKey: pk2.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(50), + stakeAmount: uint256.NewInt(50), }, } // Cast the slice of concrete structs []*validators to a slice of interfaces []Validator diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 25c2290f6..5c4266773 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -27,17 +27,17 @@ func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { validator1 := validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(3), + stakeAmount: uint256.NewInt(3), } validator2 := validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(2), + stakeAmount: uint256.NewInt(2), } validator3 := validator{ publicKey: validatorPrivateKey3.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(1), + stakeAmount: uint256.NewInt(1), } validators := []Validator{&validator1, &validator2, &validator3} @@ -110,17 +110,17 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { validator1 := validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(3), + stakeAmount: uint256.NewInt(3), } validator2 := validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(1), + stakeAmount: uint256.NewInt(1), } validator3 := validator{ publicKey: validatorPrivateKey3.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(1), + stakeAmount: uint256.NewInt(1), } validators := []Validator{&validator1, &validator2, &validator3} @@ -357,7 +357,7 @@ func TestIsProperlyFormedValidatorSet(t *testing.T) { // Test nil public key { - validator := validator{publicKey: nil, stakeAmount: uint256.NewInt(0).SetUint64(1)} + validator := validator{publicKey: nil, stakeAmount: uint256.NewInt(1)} require.False(t, isProperlyFormedValidatorSet([]Validator{&validator})) } @@ -375,7 +375,7 @@ func TestIsProperlyFormedValidatorSet(t *testing.T) { // Test valid validator { - validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt(0).SetUint64(1)} + validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt(1)} require.True(t, isProperlyFormedValidatorSet([]Validator{&validator})) } } @@ -481,47 +481,47 @@ func TestIsSuperMajorityStake(t *testing.T) { // Test stake amount greater than total stake { - require.False(t, isSuperMajorityStake(uint256.NewInt(0).SetUint64(2), uint256.NewInt(0).SetUint64(1))) + require.False(t, isSuperMajorityStake(uint256.NewInt(2), uint256.NewInt(1))) } // Test stake amount much less than super majority { - stake := uint256.NewInt(0).SetUint64(1) - totalStake := uint256.NewInt(0).SetUint64(1000) + stake := uint256.NewInt(1) + totalStake := uint256.NewInt(1000) require.False(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount less than super majority { - stake := uint256.NewInt(0).SetUint64(666) - totalStake := uint256.NewInt(0).SetUint64(1000) + stake := uint256.NewInt(666) + totalStake := uint256.NewInt(1000) require.False(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount equal to super majority { - stake := uint256.NewInt(0).SetUint64(667) - totalStake := uint256.NewInt(0).SetUint64(1000) + stake := uint256.NewInt(667) + totalStake := uint256.NewInt(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount greater than super majority { - stake := uint256.NewInt(0).SetUint64(668) - totalStake := uint256.NewInt(0).SetUint64(1000) + stake := uint256.NewInt(668) + totalStake := uint256.NewInt(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount much greater than super majority { - stake := uint256.NewInt(0).SetUint64(999) - totalStake := uint256.NewInt(0).SetUint64(1000) + stake := uint256.NewInt(999) + totalStake := uint256.NewInt(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount equal to total stake { - totalStake := uint256.NewInt(0).SetUint64(1000) + totalStake := uint256.NewInt(1000) require.True(t, isSuperMajorityStake(totalStake, totalStake)) } } From 79bbb2978672f1f90030a80048a071be5ed7f903 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 7 May 2024 12:31:32 -0400 Subject: [PATCH 696/762] Use real payload for pos validator handshake (#1287) --- lib/bls_keystore.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/bls_keystore.go b/lib/bls_keystore.go index 106348e4f..bcdf4b621 100644 --- a/lib/bls_keystore.go +++ b/lib/bls_keystore.go @@ -125,10 +125,14 @@ func (signer *BLSSigner) SignRandomSeedHash(randomSeedHash *RandomSeedHash) (*bl return SignRandomSeedHash(signer.privateKey, randomSeedHash) } +func getPoSValidatorHandshakePayload(nonceSent uint64, nonceReceived uint64, tstampMicro uint64) []byte { + payload := append(UintToBuf(nonceSent), UintToBuf(nonceReceived)...) + payload = append(payload, UintToBuf(tstampMicro)...) + return payload +} + func (signer *BLSSigner) SignPoSValidatorHandshake(nonceSent uint64, nonceReceived uint64, tstampMicro uint64) (*bls.Signature, error) { - // FIXME - payload := []byte{} - return signer.privateKey.Sign(payload[:]) + return signer.privateKey.Sign(getPoSValidatorHandshakePayload(nonceSent, nonceReceived, tstampMicro)) } ////////////////////////////////////////////////////////// @@ -156,7 +160,6 @@ func BLSVerifyPoSValidatorHandshake( signature *bls.Signature, publicKey *bls.PublicKey, ) (bool, error) { - // FIXME - payload := []byte{} + payload := getPoSValidatorHandshakePayload(nonceSent, nonceReceived, tstampMicro) return _blsVerify(payload[:], signature, publicKey) } From 810a0f9d2e0fd6b9cb7102b640697bf18b2b323a Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 7 May 2024 12:42:31 -0400 Subject: [PATCH 697/762] Use MaxTxnSizeBytesPoS to check max size of txn in ValidateDeSoTxnEncoding (#1288) --- lib/validation_deso_txn.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/validation_deso_txn.go b/lib/validation_deso_txn.go index 1df6cc4df..601f85226 100644 --- a/lib/validation_deso_txn.go +++ b/lib/validation_deso_txn.go @@ -85,14 +85,14 @@ func ValidateDeSoTxnEncoding( } // TODO: Do we want a separate parameter for transaction size? Should it be a part of GlobalDeSoParams? - maxBlockSizeBytes := params.MaxBlockSizeBytesPoW + maxTxnSizeBytes := params.MaxBlockSizeBytesPoW / 2 if params.IsPoSBlockHeight(blockHeight) { - maxBlockSizeBytes = MergeGlobalParamEntryDefaults(globalParams, params).MaxBlockSizeBytesPoS + maxTxnSizeBytes = MergeGlobalParamEntryDefaults(globalParams, params).MaxTxnSizeBytesPoS } // Validate transaction size - if uint64(len(txnBytes)) > maxBlockSizeBytes/2 { + if uint64(len(txnBytes)) > maxTxnSizeBytes { return errors.Wrapf(RuleErrorTxnTooBig, "ValidateDeSoTxnEncoding: Transaction size %d is greater than "+ - "MaxBlockSizeBytesPoW/2 %d", len(txnBytes), maxBlockSizeBytes/2) + "max txn size allowed %d", len(txnBytes), maxTxnSizeBytes) } return nil } From b34b7faae6071826bd48a3385bb60c0b14b7f08f Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 8 May 2024 14:32:30 -0400 Subject: [PATCH 698/762] Simplify EstimateFee (#1289) --- .gitignore | 2 ++ lib/pos_fee_estimator.go | 42 ++++------------------------------------ 2 files changed, 6 insertions(+), 38 deletions(-) diff --git a/.gitignore b/.gitignore index 608a45416..065ec7bef 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,5 @@ core local_scripts/* lib/mem.log + +cover.out \ No newline at end of file diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 1d31bded2..6bb306577 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -335,48 +335,14 @@ func (posFeeEstimator *PoSFeeEstimator) EstimateFee(txn *MsgDeSoTxn, minFeeRateN posFeeEstimator.rwLock.RLock() defer posFeeEstimator.rwLock.RUnlock() - mempoolCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolCongestionFactorBasisPoints - pastBlocksCongestionFactorBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksCongestionFactorBasisPoints - - mempoolPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPriorityPercentileBasisPoints - pastBlocksPriorityPercentileBasisPoints := posFeeEstimator.globalParams.MempoolPastBlocksPriorityPercentileBasisPoints - - maxBlockSize := posFeeEstimator.globalParams.SoftMaxBlockSizeBytesPoS + feeRateEstimate := posFeeEstimator.EstimateFeeRateNanosPerKB(minFeeRateNanosPerKB) - mempoolFeeEstimate, err := posFeeEstimator.mempoolFeeEstimate( - txn, - mempoolCongestionFactorBasisPoints, - mempoolPriorityPercentileBasisPoints, - maxBlockSize, - ) + feeEstimate, err := computeFeeGivenTxnAndFeeRate(txn, feeRateEstimate) if err != nil { - return 0, errors.Wrap(err, "PoSFeeEstimator.EstimateFee: Problem computing mempool fee estimate") - } - - pastBlocksFeeEstimate, err := posFeeEstimator.pastBlocksFeeEstimate( - txn, - pastBlocksCongestionFactorBasisPoints, - pastBlocksPriorityPercentileBasisPoints, - maxBlockSize, - ) - if err != nil { - return 0, errors.Wrap(err, "PoSFeeEstimator.EstimateFee: Problem computing past blocks fee estimate") - } - - minFeeRateEstimate, err := computeFeeGivenTxnAndFeeRate(txn, minFeeRateNanosPerKB) - if err != nil { - return 0, errors.Wrap(err, "PoSFeeEstimator.EstimateFee: Problem computing min fee rate estimate") - } - - if minFeeRateEstimate > mempoolFeeEstimate && minFeeRateEstimate > pastBlocksFeeEstimate { - return minFeeRateEstimate, nil - } - - if mempoolFeeEstimate < pastBlocksFeeEstimate { - return pastBlocksFeeEstimate, nil + return 0, errors.Wrap(err, "PoSFeeEstimator.EstimateFee: Problem computing fee") } - return mempoolFeeEstimate, nil + return feeEstimate, nil } // pastBlocksFeeEstimate estimates the fee in nanos for the provided transaction using the From c76c888c75d8e0470f65c282bca335a2b866b606 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 8 May 2024 14:34:58 -0400 Subject: [PATCH 699/762] enhanced cached blocks in pos fee estimator (#1290) --- .gitignore | 2 +- lib/pos_fee_estimator.go | 115 +++++++++++++++++++++------------- lib/pos_fee_estimator_test.go | 16 ++--- lib/snapshot.go | 23 +++---- 4 files changed, 92 insertions(+), 64 deletions(-) diff --git a/.gitignore b/.gitignore index 065ec7bef..43d06fa60 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,4 @@ core local_scripts/* lib/mem.log -cover.out \ No newline at end of file +cover.out diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 6bb306577..ced226b92 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -34,12 +34,18 @@ type PoSFeeEstimator struct { pastBlocksTransactionRegister *TransactionRegister // cachedBlocks is a cache of the past blocks that we use to estimate fees. This is used to // avoid having to recompute the fee buckets for all the past blocks every time we want to - // estimate a fee. - cachedBlocks []*MsgDeSoBlock + // estimate a fee. We only keep the most recent numPastBlocks blocks in this cache. + cachedBlocks []*CachedBlock // rwLock is a read-write lock that protects the PoSFeeEstimator from concurrent access. rwLock *sync.RWMutex } +type CachedBlock struct { + Block *MsgDeSoBlock + Hash BlockHash + Txns []*MempoolTx +} + func NewPoSFeeEstimator() *PoSFeeEstimator { return &PoSFeeEstimator{} } @@ -76,8 +82,17 @@ func (posFeeEstimator *PoSFeeEstimator) Init( posFeeEstimator.pastBlocksTransactionRegister = NewTransactionRegister() posFeeEstimator.pastBlocksTransactionRegister.Init(globalParams.Copy()) + pastCachedBlocks := make([]*CachedBlock, len(pastBlocks)) + for ii, block := range pastBlocks { + cachedBlock, err := blockToCachedBlock(block) + if err != nil { + return errors.Wrap(err, "PoSFeeEstimator.Init: error converting block to CachedBlock") + } + pastCachedBlocks[ii] = cachedBlock + } + // Sort the past blocks by height just to be safe. - sortedPastBlocks := posFeeEstimator.cleanUpPastBlocks(pastBlocks) + sortedPastBlocks := posFeeEstimator.cleanUpPastBlocks(pastCachedBlocks) // Add all the txns from the past blocks to the new pastBlocksTransactionRegister. for _, block := range sortedPastBlocks { @@ -106,17 +121,49 @@ func (posFeeEstimator *PoSFeeEstimator) AddBlock(block *MsgDeSoBlock) error { posFeeEstimator.rwLock.Lock() defer posFeeEstimator.rwLock.Unlock() - if err := posFeeEstimator.addBlockNoLock(block); err != nil { + cachedBlock, err := blockToCachedBlock(block) + if err != nil { + return errors.Wrap(err, "PoSFeeEstimator.AddBlock: error converting block to CachedBlock") + } + + if err := posFeeEstimator.addBlockNoLock(cachedBlock); err != nil { return errors.Wrap(err, "PoSFeeEstimator.AddBlock: error adding block to PoSFeeEstimator") } return nil } +func blockToCachedBlock(block *MsgDeSoBlock) (*CachedBlock, error) { + // Convert txns to MempoolTx. + txns := make([]*MempoolTx, len(block.Txns)) + for ii, txn := range block.Txns { + mtxn, err := NewMempoolTx(txn, NanoSecondsToTime(block.Header.TstampNanoSecs), block.Header.Height) + if err != nil { + return nil, errors.Wrap(err, "blockToCachedBlock: error creating MempoolTx") + } + txns[ii] = mtxn + } + // Get the block hash + blockHash, err := block.Hash() + if err != nil { + return nil, errors.Wrap(err, "blockToCachedBlock: error computing blockHash") + } + if blockHash == nil { + return nil, errors.New("blockToCachedBlock: blockHash is nil") + } + + return &CachedBlock{ + Block: block, + Hash: *blockHash, + Txns: txns, + }, nil +} + // addBlockNoLock is the same as AddBlock but assumes the caller has already acquired the rwLock. -func (posFeeEstimator *PoSFeeEstimator) addBlockNoLock(block *MsgDeSoBlock) error { +func (posFeeEstimator *PoSFeeEstimator) addBlockNoLock(cachedBlock *CachedBlock) error { + // Create a new slice to house the new past blocks and add the new block to it. - newPastBlocks := append(posFeeEstimator.cachedBlocks, block) + newPastBlocks := append(posFeeEstimator.cachedBlocks, cachedBlock) newPastBlocks = posFeeEstimator.cleanUpPastBlocks(newPastBlocks) if err := posFeeEstimator.updatePastBlocksTransactionRegister(newPastBlocks); err != nil { @@ -126,7 +173,7 @@ func (posFeeEstimator *PoSFeeEstimator) addBlockNoLock(block *MsgDeSoBlock) erro return nil } -func (posFeeEstimator *PoSFeeEstimator) updatePastBlocksTransactionRegister(pastBlocks []*MsgDeSoBlock) error { +func (posFeeEstimator *PoSFeeEstimator) updatePastBlocksTransactionRegister(pastBlocks []*CachedBlock) error { // Create a clean transaction register to add the blocks' transactions. newTransactionRegister := NewTransactionRegister() newTransactionRegister.Init(posFeeEstimator.globalParams.Copy()) @@ -147,17 +194,13 @@ func (posFeeEstimator *PoSFeeEstimator) updatePastBlocksTransactionRegister(past // addBlockToTransactionRegister adds all the transactions from the block to the given transaction register. // Should only be called when the rwLock over a TransactionRegister is held. -func addBlockToTransactionRegister(txnRegister *TransactionRegister, block *MsgDeSoBlock) error { +func addBlockToTransactionRegister(txnRegister *TransactionRegister, block *CachedBlock) error { for _, txn := range block.Txns { // We explicitly exclude block reward transactions as they do not have fees. - if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { + if txn.Tx.TxnMeta.GetTxnType() == TxnTypeBlockReward { continue } - mtxn, err := NewMempoolTx(txn, NanoSecondsToTime(block.Header.TstampNanoSecs), block.Header.Height) - if err != nil { - return errors.Wrap(err, "PoSFeeEstimator.addBlockToTransactionRegister: error creating MempoolTx") - } - if err = txnRegister.AddTransaction(mtxn); err != nil { + if err := txnRegister.AddTransaction(txn); err != nil { return errors.Wrap(err, "PoSFeeEstimator.addBlockToTransactionRegister: error adding txn to pastBlocksTransactionRegister") } @@ -187,12 +230,8 @@ func (posFeeEstimator *PoSFeeEstimator) removeBlockNoLock(block *MsgDeSoBlock) e } // Remove the block from the cached blocks, maintaining the relative ordering of all other blocks. - newPastBlocks := collections.Filter(posFeeEstimator.cachedBlocks, func(cachedBlock *MsgDeSoBlock) bool { - cachedBlockHash, err := cachedBlock.Hash() - if err != nil { - return false - } - return !blockHash.IsEqual(cachedBlockHash) + newPastBlocks := collections.Filter(posFeeEstimator.cachedBlocks, func(cachedBlock *CachedBlock) bool { + return !blockHash.IsEqual(&cachedBlock.Hash) }) if err := posFeeEstimator.updatePastBlocksTransactionRegister(newPastBlocks); err != nil { @@ -234,7 +273,7 @@ func (posFeeEstimator *PoSFeeEstimator) UpdateGlobalParams(globalParams *GlobalP } // cleanUpPastBlocks cleans up the input blocks slice, deduping, sorting, and pruning the blocks by height. -func (posFeeEstimator *PoSFeeEstimator) cleanUpPastBlocks(blocks []*MsgDeSoBlock) []*MsgDeSoBlock { +func (posFeeEstimator *PoSFeeEstimator) cleanUpPastBlocks(blocks []*CachedBlock) []*CachedBlock { dedupedBlocks := posFeeEstimator.dedupeBlocksByBlockHeight(blocks) sortedBlocks := posFeeEstimator.sortBlocksByBlockHeight(dedupedBlocks) return posFeeEstimator.pruneBlocksToMaxNumPastBlocks(sortedBlocks) @@ -242,41 +281,33 @@ func (posFeeEstimator *PoSFeeEstimator) cleanUpPastBlocks(blocks []*MsgDeSoBlock // dedupeBlocksByBlockHeight deduplicates the blocks by block height. If multiple blocks have the same // height, it keeps the one with the highest view. -func (posFeeEstimator *PoSFeeEstimator) dedupeBlocksByBlockHeight(blocks []*MsgDeSoBlock) []*MsgDeSoBlock { - blocksByBlockHeight := make(map[uint64]*MsgDeSoBlock) +func (posFeeEstimator *PoSFeeEstimator) dedupeBlocksByBlockHeight(blocks []*CachedBlock) []*CachedBlock { + blocksByBlockHeight := make(map[uint64]*CachedBlock) for _, block := range blocks { - existingBlock, hasExistingBlock := blocksByBlockHeight[block.Header.Height] - if !hasExistingBlock || existingBlock.Header.GetView() < block.Header.GetView() { - blocksByBlockHeight[block.Header.Height] = block + existingBlock, hasExistingBlock := blocksByBlockHeight[block.Block.Header.Height] + if !hasExistingBlock || existingBlock.Block.Header.GetView() < block.Block.Header.GetView() { + blocksByBlockHeight[block.Block.Header.Height] = block } } return collections.MapValues(blocksByBlockHeight) } // sortBlocksByBlockHeightAndTstamp sorts the blocks by height. -func (posFeeEstimator *PoSFeeEstimator) sortBlocksByBlockHeight(blocks []*MsgDeSoBlock) []*MsgDeSoBlock { +func (posFeeEstimator *PoSFeeEstimator) sortBlocksByBlockHeight(blocks []*CachedBlock) []*CachedBlock { return collections.SortStable(blocks, - func(ii, jj *MsgDeSoBlock) bool { - if ii.Header.Height != jj.Header.Height { - return ii.Header.Height < jj.Header.Height - } - if ii.Header.GetView() != jj.Header.GetView() { - return ii.Header.GetView() < jj.Header.GetView() - } - iiHash, err := ii.Hash() - if iiHash == nil || err != nil { - return false + func(ii, jj *CachedBlock) bool { + if ii.Block.Header.Height != jj.Block.Header.Height { + return ii.Block.Header.Height < jj.Block.Header.Height } - jjHash, err := jj.Hash() - if jjHash == nil || err != nil { - return true + if ii.Block.Header.GetView() != jj.Block.Header.GetView() { + return ii.Block.Header.GetView() < jj.Block.Header.GetView() } - return iiHash.String() < jjHash.String() + return ii.Hash.String() < jj.Hash.String() }) } // pruneBlocksToMaxNumPastBlocks reduces the number of blocks to the numPastBlocks param -func (posFeeEstimator *PoSFeeEstimator) pruneBlocksToMaxNumPastBlocks(blocks []*MsgDeSoBlock) []*MsgDeSoBlock { +func (posFeeEstimator *PoSFeeEstimator) pruneBlocksToMaxNumPastBlocks(blocks []*CachedBlock) []*CachedBlock { numCachedBlocks := uint64(len(blocks)) if numCachedBlocks <= posFeeEstimator.globalParams.MempoolFeeEstimatorNumPastBlocks { return blocks diff --git a/lib/pos_fee_estimator_test.go b/lib/pos_fee_estimator_test.go index 657b083d9..ffb2eb908 100644 --- a/lib/pos_fee_estimator_test.go +++ b/lib/pos_fee_estimator_test.go @@ -161,13 +161,13 @@ func TestFeeEstimator(t *testing.T) { require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) // Update the global params - globalParams := _testGetDefaultGlobalParams() + globalParams = _testGetDefaultGlobalParams() globalParams.MempoolCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.SoftMaxBlockSizeBytesPoS = maxBlockSizeHybrid - posFeeEstimator.UpdateGlobalParams(globalParams) + require.NoError(t, posFeeEstimator.UpdateGlobalParams(globalParams)) // And the hybrid estimator is just the max, but for completeness, we check it. estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0) @@ -213,13 +213,13 @@ func TestFeeEstimator(t *testing.T) { require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) // Update the global params - globalParams := _testGetDefaultGlobalParams() + globalParams = _testGetDefaultGlobalParams() globalParams.MempoolCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.SoftMaxBlockSizeBytesPoS = maxBlockSizeHybrid - posFeeEstimator.UpdateGlobalParams(globalParams) + require.NoError(t, posFeeEstimator.UpdateGlobalParams(globalParams)) // And the hybrid estimator is just the max, but for completeness, we check it. estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0) @@ -265,13 +265,13 @@ func TestFeeEstimator(t *testing.T) { require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) // Update the global params - globalParams := _testGetDefaultGlobalParams() + globalParams = _testGetDefaultGlobalParams() globalParams.MempoolCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.SoftMaxBlockSizeBytesPoS = maxBlockSizeHybrid - posFeeEstimator.UpdateGlobalParams(globalParams) + require.NoError(t, posFeeEstimator.UpdateGlobalParams(globalParams)) // And the hybrid estimator is just the max, but for completeness, we check it. estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0) @@ -317,13 +317,13 @@ func TestFeeEstimator(t *testing.T) { require.Equal(t, estimatedMempoolFeeRate, estimatedPastBlocksFeeRate) // Update the global params - globalParams := _testGetDefaultGlobalParams() + globalParams = _testGetDefaultGlobalParams() globalParams.MempoolCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPastBlocksCongestionFactorBasisPoints = congestionFactor globalParams.MempoolPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.MempoolPastBlocksPriorityPercentileBasisPoints = priorityPercentileBasisPoints globalParams.SoftMaxBlockSizeBytesPoS = maxBlockSizeHybrid - posFeeEstimator.UpdateGlobalParams(globalParams) + require.NoError(t, posFeeEstimator.UpdateGlobalParams(globalParams)) // And the hybrid estimator is just the max, but for completeness, we check it. estimatedHybridFee, err = posFeeEstimator.EstimateFee(txn, 0) diff --git a/lib/snapshot.go b/lib/snapshot.go index 3ddeb7244..a9399a25a 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -453,9 +453,6 @@ func (snap *Snapshot) Run() { for { operation := snap.OperationChannel.DequeueOperationStateless() switch operation.operationType { - case SnapshotOperationFlush: - glog.V(2).Infof("Snapshot.Run: Flushing ancestral records with counter") - snap.FlushAncestralRecords() case SnapshotOperationProcessBlock: glog.V(2).Infof("Snapshot.Run: Getting into the process block with height (%v)", @@ -497,6 +494,9 @@ func (snap *Snapshot) Run() { snap.OperationChannel.FinishOperation() snap.updateWaitGroup.Done() return + default: + glog.Errorf("Snapshot.Run: Unknown operation type (%v)", operation.operationType) + return } snap.OperationChannel.FinishOperation() } @@ -540,9 +540,7 @@ func (snap *Snapshot) StartAncestralRecordsFlush(shouldIncrement bool) { } glog.V(2).Infof("Snapshot.StartAncestralRecordsFlush: Sending counter (%v) to the CounterChannel", snap.AncestralFlushCounter) // We send the flush counter to the counter to indicate that a flush should take place. - snap.OperationChannel.EnqueueOperation(&SnapshotOperation{ - operationType: SnapshotOperationFlush, - }) + snap.FlushAncestralRecords() } func (snap *Snapshot) PersistChecksumAndMigration() error { @@ -1781,19 +1779,18 @@ type SnapshotOperationType uint8 const ( // SnapshotOperationFlush operation enqueues a flush to the ancestral records. - SnapshotOperationFlush SnapshotOperationType = iota // SnapshotOperationProcessBlock operation signals that a new block has been added to the blockchain. - SnapshotOperationProcessBlock + SnapshotOperationProcessBlock = 1 // SnapshotOperationProcessChunk operation is enqueued when we receive a snapshot chunk during syncing. - SnapshotOperationProcessChunk + SnapshotOperationProcessChunk = 2 // SnapshotOperationChecksumAdd operation is enqueued when we want to add bytes to the state checksum. - SnapshotOperationChecksumAdd + SnapshotOperationChecksumAdd = 3 // SnapshotOperationChecksumRemove operation is enqueued when we want to remove bytes to the state checksum. - SnapshotOperationChecksumRemove + SnapshotOperationChecksumRemove = 4 // SnapshotOperationChecksumPrint is called when we want to print the state checksum. - SnapshotOperationChecksumPrint + SnapshotOperationChecksumPrint = 5 // SnapshotOperationExit is used to quit the snapshot loop - SnapshotOperationExit + SnapshotOperationExit = 6 ) // SnapshotOperation is passed in the snapshot's OperationChannel. From 07fa590627ddd0daac3b1b77f62d1fa51772201f Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 8 May 2024 14:37:44 -0400 Subject: [PATCH 700/762] Revert snapshot.go changes (#1294) --- lib/snapshot.go | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/lib/snapshot.go b/lib/snapshot.go index a9399a25a..3ddeb7244 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -453,6 +453,9 @@ func (snap *Snapshot) Run() { for { operation := snap.OperationChannel.DequeueOperationStateless() switch operation.operationType { + case SnapshotOperationFlush: + glog.V(2).Infof("Snapshot.Run: Flushing ancestral records with counter") + snap.FlushAncestralRecords() case SnapshotOperationProcessBlock: glog.V(2).Infof("Snapshot.Run: Getting into the process block with height (%v)", @@ -494,9 +497,6 @@ func (snap *Snapshot) Run() { snap.OperationChannel.FinishOperation() snap.updateWaitGroup.Done() return - default: - glog.Errorf("Snapshot.Run: Unknown operation type (%v)", operation.operationType) - return } snap.OperationChannel.FinishOperation() } @@ -540,7 +540,9 @@ func (snap *Snapshot) StartAncestralRecordsFlush(shouldIncrement bool) { } glog.V(2).Infof("Snapshot.StartAncestralRecordsFlush: Sending counter (%v) to the CounterChannel", snap.AncestralFlushCounter) // We send the flush counter to the counter to indicate that a flush should take place. - snap.FlushAncestralRecords() + snap.OperationChannel.EnqueueOperation(&SnapshotOperation{ + operationType: SnapshotOperationFlush, + }) } func (snap *Snapshot) PersistChecksumAndMigration() error { @@ -1779,18 +1781,19 @@ type SnapshotOperationType uint8 const ( // SnapshotOperationFlush operation enqueues a flush to the ancestral records. + SnapshotOperationFlush SnapshotOperationType = iota // SnapshotOperationProcessBlock operation signals that a new block has been added to the blockchain. - SnapshotOperationProcessBlock = 1 + SnapshotOperationProcessBlock // SnapshotOperationProcessChunk operation is enqueued when we receive a snapshot chunk during syncing. - SnapshotOperationProcessChunk = 2 + SnapshotOperationProcessChunk // SnapshotOperationChecksumAdd operation is enqueued when we want to add bytes to the state checksum. - SnapshotOperationChecksumAdd = 3 + SnapshotOperationChecksumAdd // SnapshotOperationChecksumRemove operation is enqueued when we want to remove bytes to the state checksum. - SnapshotOperationChecksumRemove = 4 + SnapshotOperationChecksumRemove // SnapshotOperationChecksumPrint is called when we want to print the state checksum. - SnapshotOperationChecksumPrint = 5 + SnapshotOperationChecksumPrint // SnapshotOperationExit is used to quit the snapshot loop - SnapshotOperationExit = 6 + SnapshotOperationExit ) // SnapshotOperation is passed in the snapshot's OperationChannel. From d83d595858183cf29f4c9fbf1fc420616da8adc6 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 8 May 2024 14:41:03 -0400 Subject: [PATCH 701/762] Reemove deprecated flags from run.go (#1291) --- cmd/run.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cmd/run.go b/cmd/run.go index e003ac8dc..bbfba9430 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -88,11 +88,6 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().String("pos-validator-seed", "", "A BIP39 seed phrase or seed hex used to generate the "+ "private key of the Proof of Stake validator. Setting this flag automatically makes the node run as a Proof "+ "of Stake Validator.") - cmd.PersistentFlags().Uint64("pos-block-production-interval-milliseconds", 1500, "The interval in milliseconds "+ - "between blocks produced by the Proof of Stake block proposer. The default value is 1.5 seconds, or 1500 milliseconds.") - cmd.PersistentFlags().Uint64("pos-timeout-base-duration-milliseconds", 30000, "The base duration in milliseconds "+ - "that the Proof of Stake block proposer waits before timing out and producing a block. The default value is "+ - "30 seconds, or 30000 milliseconds.") // Mempool cmd.PersistentFlags().Uint64("mempool-backup-time-millis", 30000, From c663ffdcb406c508d8244d17b969a1874132b50c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 8 May 2024 19:40:43 -0400 Subject: [PATCH 702/762] Fix PosFeeEstimator Crash on Node Startup (#1295) --- lib/pos_fee_estimator.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index ced226b92..34212a9f6 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -135,19 +135,29 @@ func (posFeeEstimator *PoSFeeEstimator) AddBlock(block *MsgDeSoBlock) error { func blockToCachedBlock(block *MsgDeSoBlock) (*CachedBlock, error) { // Convert txns to MempoolTx. - txns := make([]*MempoolTx, len(block.Txns)) - for ii, txn := range block.Txns { + txns := []*MempoolTx{} + + // Add all non-nil and non-block reward txns to the txns slice. This filters out malformed txns + // txns and the genesis block block reward txn. + for _, txn := range block.Txns { + if txn == nil || txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { + continue + } + mtxn, err := NewMempoolTx(txn, NanoSecondsToTime(block.Header.TstampNanoSecs), block.Header.Height) if err != nil { return nil, errors.Wrap(err, "blockToCachedBlock: error creating MempoolTx") } - txns[ii] = mtxn + + txns = append(txns, mtxn) } + // Get the block hash blockHash, err := block.Hash() if err != nil { return nil, errors.Wrap(err, "blockToCachedBlock: error computing blockHash") } + if blockHash == nil { return nil, errors.New("blockToCachedBlock: blockHash is nil") } @@ -200,6 +210,7 @@ func addBlockToTransactionRegister(txnRegister *TransactionRegister, block *Cach if txn.Tx.TxnMeta.GetTxnType() == TxnTypeBlockReward { continue } + if err := txnRegister.AddTransaction(txn); err != nil { return errors.Wrap(err, "PoSFeeEstimator.addBlockToTransactionRegister: error adding txn to pastBlocksTransactionRegister") From a5a71914c952f39bcd37acd16f23a1f8ad788610 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 8 May 2024 19:46:05 -0400 Subject: [PATCH 703/762] Remove PoS Mempool Assignment in dirtyHackUpdateDbOpts (#1296) --- lib/server.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/server.go b/lib/server.go index 1c74e6fae..b3dd2e5bd 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1705,13 +1705,6 @@ func (srv *Server) dirtyHackUpdateDbOpts(opts badger.Options) { srv.mempool.bc.db = srv.blockchain.db srv.mempool.backupUniversalUtxoView.Handle = srv.blockchain.db srv.mempool.universalUtxoView.Handle = srv.blockchain.db - srv.posMempool.db = srv.blockchain.db - if srv.posMempool.readOnlyLatestBlockView != nil { - srv.posMempool.readOnlyLatestBlockView.Handle = srv.blockchain.db - } - if srv.posMempool.augmentedReadOnlyLatestBlockView != nil { - srv.posMempool.augmentedReadOnlyLatestBlockView.Handle = srv.blockchain.db - } // Save the new options to the DB so that we know what to use if the node restarts. isPerformanceOptions := DbOptsArePerformance(&opts) From 1cf8f8581e7cc4029b27352018104a61f398c3ad Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 9 May 2024 11:20:14 -0400 Subject: [PATCH 704/762] Revert PosMempool UtoView DB replacements in dirtyHackUpdateDbOpts (#1298) * Revert PosMempool UtoView DB replacements in dirtyHackUpdateDbOpts * Update validateTransactionsReadOnlyLatestBlockView --- lib/server.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/server.go b/lib/server.go index b3dd2e5bd..c18fdb857 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1706,6 +1706,16 @@ func (srv *Server) dirtyHackUpdateDbOpts(opts badger.Options) { srv.mempool.backupUniversalUtxoView.Handle = srv.blockchain.db srv.mempool.universalUtxoView.Handle = srv.blockchain.db + if srv.posMempool.readOnlyLatestBlockView != nil { + srv.posMempool.readOnlyLatestBlockView.Handle = srv.blockchain.db + } + if srv.posMempool.validateTransactionsReadOnlyLatestBlockView != nil { + srv.posMempool.validateTransactionsReadOnlyLatestBlockView.Handle = srv.blockchain.db + } + if srv.posMempool.augmentedReadOnlyLatestBlockView != nil { + srv.posMempool.augmentedReadOnlyLatestBlockView.Handle = srv.blockchain.db + } + // Save the new options to the DB so that we know what to use if the node restarts. isPerformanceOptions := DbOptsArePerformance(&opts) err = SaveBoolToFile(GetDbPerformanceOptionsFilePath(filepath.Dir(opts.ValueDir)), isPerformanceOptions) From 1e93743fb035c17ffad69bb91d97448cd6668960 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 10 May 2024 13:23:12 -0400 Subject: [PATCH 705/762] Fix ProcessHeaderPoS Crash On Node Restart While Syncing (#1301) --- lib/pos_blockchain.go | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 7be62f35a..9fdf90ffc 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1331,6 +1331,8 @@ func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock) (* return newBlockNode, nil } +// storeBlockInBlockIndex upserts the blocks into the in-memory block index & badger and updates its status to +// StatusBlockStored. It also writes the block to the block index in badger func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) if err != nil { @@ -1347,12 +1349,6 @@ func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (* ) } blockNode.Status |= StatusHeaderValidated - - // TODO: this seems to be slowing down the sync process. - // If the DB update fails, then we should return an error. - if err = bc.upsertBlockNodeToDB(blockNode); err != nil { - return nil, errors.Wrapf(err, "storeValidatedHeaderInBlockIndex: Problem upserting block node to DB") - } return blockNode, nil } @@ -1363,6 +1359,8 @@ func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(head return wrapperError } +// storeValidateFailedHeaderInBlockIndex stores the header in the block index only and sets its status to +// StatusHeaderValidateFailed. It does not write the header to the DB. func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) if err != nil { @@ -1378,15 +1376,11 @@ func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeade "storeValidatedHeaderInBlockIndex: can't set block node to header validate failed after it's already been set to validated", ) } - blockNode.Status |= StatusHeaderValidated - // If the DB update fails, then we should return an error. - if err = bc.upsertBlockNodeToDB(blockNode); err != nil { - return nil, errors.Wrapf(err, "storeValidateFailedHeaderInBlockIndex: Problem upserting block node to DB") - } + blockNode.Status |= StatusHeaderValidateFailed return blockNode, nil } -// storeBlockInBlockIndex upserts the blocks into the in-memory block index and updates its status to +// storeBlockInBlockIndex upserts the blocks into the in-memory block index & badger and updates its status to // StatusBlockStored. It also writes the block to the block index in badger // by calling upsertBlockAndBlockNodeToDB. func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { @@ -1406,10 +1400,10 @@ func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, e return blockNode, nil } -// storeValidatedBlockInBlockIndex upserts the blocks into the in-memory block index and updates its status to -// StatusBlockValidated. If it does not have the status StatusBlockStored already, we add that as we will -// store the block in the DB after updating its status. It also writes the block to the block index in badger -// by calling upsertBlockAndBlockNodeToDB. +// storeValidatedBlockInBlockIndex upserts the blocks into the in-memory block index & badger and updates its +// status to StatusBlockValidated. If it does not have the status StatusBlockStored already, we add that as we +// will store the block in the DB after updating its status. It also writes the block to the block index in +// badger by calling upsertBlockAndBlockNodeToDB. func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) if err != nil { @@ -1436,9 +1430,9 @@ func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock) (*Blo return blockNode, nil } -// storeValidateFailedBlockInBlockIndex upserts the blocks into the in-memory block index and updates its status to -// StatusBlockValidateFailed. If it does not have the status StatusBlockStored already, we add that as we will -// store the block in the DB after updating its status. It also writes the block to the block index in badger +// storeValidateFailedBlockInBlockIndex upserts the blocks into the in-memory block index & badger and updates its +// status to StatusBlockValidateFailed. If it does not have the status StatusBlockStored already, we add that as we +// will store the block in the DB after updating its status. It also writes the block to the block index in badger // by calling upsertBlockAndBlockNodeToDB. func (bc *Blockchain) storeValidateFailedBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) From 7ac528c901558315a8a5d528785687be9da78e57 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 10 May 2024 13:33:49 -0400 Subject: [PATCH 706/762] Clear Out The blocksToSend Map When Sending MsgTypeBlockBundle (#1302) --- lib/peer.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/peer.go b/lib/peer.go index e28d69e8c..631a87c91 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -967,6 +967,17 @@ out: pp.blocksToSendMtx.Unlock() } + // If we're sending a block bundle, remove all the blocks from our blocksToSend + // to allow the peer to request more blocks after receiving this bundle. + if msg.GetMsgType() == MsgTypeBlockBundle { + pp.blocksToSendMtx.Lock() + for _, block := range msg.(*MsgDeSoBlockBundle).Blocks { + hash, _ := block.Hash() + delete(pp.blocksToSend, *hash) + } + pp.blocksToSendMtx.Unlock() + } + // Before we send an addr message to the peer, filter out the addresses // the peer is already aware of. if msg.GetMsgType() == MsgTypeAddr { From 2b58c526276bdb74a77231848bc342740a855665 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 10 May 2024 19:42:04 -0400 Subject: [PATCH 707/762] hypersync pos fixes (#1297) --- cmd/node.go | 1 - cmd/run.go | 2 +- integration_testing/tools.go | 3 +- lib/block_view.go | 16 +- lib/block_view_flush.go | 3 +- lib/block_view_test.go | 1 - lib/blockchain.go | 17 +- lib/blockchain_test.go | 5 +- lib/db_utils.go | 140 ++++--- lib/db_utils_test.go | 76 ---- lib/peer.go | 20 +- lib/pos_blockchain.go | 2 +- lib/pos_epoch_complete_hook.go | 13 +- lib/server.go | 34 +- lib/snapshot.go | 690 +++++++++++++++++---------------- 15 files changed, 491 insertions(+), 532 deletions(-) diff --git a/cmd/node.go b/cmd/node.go index 43453ca01..fdca7939f 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -380,7 +380,6 @@ func (node *Node) Stop() { if snap != nil { glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Stopping snapshot...")) snap.Stop() - node.closeDb(snap.SnapshotDb, "snapshot") glog.Infof(lib.CLog(lib.Yellow, "Node.Stop: Snapshot successfully stopped.")) } diff --git a/cmd/run.go b/cmd/run.go index bbfba9430..8c19bfa12 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -67,7 +67,7 @@ func SetupRunFlags(cmd *cobra.Command) { cmd.PersistentFlags().Bool("force-checksum", false, "When true, the node will panic if the "+ "local state checksum differs from the network checksum reported by its peers.") // Snapshot - cmd.PersistentFlags().Uint64("snapshot-block-height-period", 600000, "Set the snapshot epoch period. Snapshots are taken at block heights divisible by the period.") + cmd.PersistentFlags().Uint64("snapshot-block-height-period", lib.DefaultSnapshotEpochPeriodPoS, "Set the snapshot epoch period. Snapshots are taken at block heights divisible by the period.") // Archival mode cmd.PersistentFlags().Bool("archival-mode", true, "Download all historical blocks after finishing hypersync.") // Disable encoder migrations diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 220bf383a..ff50d3bf9 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -7,6 +7,7 @@ import ( "os" "reflect" "sort" + "sync" "testing" "time" @@ -442,7 +443,7 @@ func computeNodeStateChecksum(t *testing.T, node *cmd.Node, blockHeight uint64) key := item.Key() err := item.Value(func(value []byte) error { return carrierChecksum.AddOrRemoveBytesWithMigrations(key, value, blockHeight, - nil, true) + nil, &sync.RWMutex{}, true) }) if err != nil { return err diff --git a/lib/block_view.go b/lib/block_view.go index 654221a30..390ac3e7a 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4568,17 +4568,11 @@ func (bav *UtxoView) ConnectBlock( // after connecting all transactions in the block. These operations // are always the last utxo operation in a given block. var blockLevelUtxoOps []*UtxoOperation - if blockHeight >= uint64(bav.Params.ForkHeights.BalanceModelBlockHeight) && - !bav.Params.IsPoSBlockHeight(blockHeight) { - prevNonces := bav.GetTransactorNonceEntriesToDeleteAtBlockHeight(blockHeight) - blockLevelUtxoOps = append(blockLevelUtxoOps, &UtxoOperation{ - Type: OperationTypeDeleteExpiredNonces, - PrevNonceEntries: prevNonces, - }) - for _, prevNonceEntry := range prevNonces { - bav.DeleteTransactorNonceEntry(prevNonceEntry) - } - } + + // TODO: To prevent the state from bloating, we should delete nonces periodically. + // We used to do that here but it was causing badger seeks to be slow due to a bug + // in badger whereby deleting keys slows down seeks. Eventually, we should go back + // to deleting txn nonces if we fix that badger bug or find a workaround. // If we're past the PoS cutover, we need to track which validators were active. if blockHeight >= uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 99401d8da..53b07ab31 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -44,10 +44,9 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error // This function prepares the data structures in the snapshot. if bav.Snapshot != nil { bav.Snapshot.PrepareAncestralRecordsFlush() - // When we finish flushing to the main DB, we'll also flush to ancestral records. // This happens concurrently, which is why we have the 2-phase prepare-flush happening for snapshot. - defer bav.Snapshot.StartAncestralRecordsFlush(true) + defer bav.Snapshot.FlushAncestralRecordsWithTxn(txn) } return bav.FlushToDBWithoutAncestralRecordsFlushWithTxn(txn, blockHeight) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 2b16849b1..3df861ece 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -215,7 +215,6 @@ func (tm *transactionTestMeta) Quit() { if tm.chain.snapshot != nil { tm.chain.snapshot.Stop() - require.NoError(tm.chain.snapshot.SnapshotDb.Close()) } if tm.chain.db != nil { diff --git a/lib/blockchain.go b/lib/blockchain.go index 019fffe67..b13420bdf 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -1550,7 +1550,10 @@ func (bc *Blockchain) isHyperSyncCondition() bool { blockTip := bc.blockTip() headerTip := bc.headerTip() - snapshotBlockHeightPeriod := bc.params.GetSnapshotBlockHeightPeriod(uint64(headerTip.Height), bc.Snapshot().GetSnapshotBlockHeightPeriod()) + snapshotBlockHeightPeriod := bc.params.GetSnapshotBlockHeightPeriod( + uint64(headerTip.Height), + bc.Snapshot().GetSnapshotBlockHeightPeriod(), + ) if uint64(headerTip.Height-blockTip.Height) >= snapshotBlockHeightPeriod { return true } @@ -2415,7 +2418,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures err = bc.db.Update(func(txn *badger.Txn) error { if bc.snapshot != nil { bc.snapshot.PrepareAncestralRecordsFlush() - defer bc.snapshot.StartAncestralRecordsFlush(true) + defer bc.snapshot.FlushAncestralRecordsWithTxn(txn) glog.V(2).Infof("ProcessBlock: Preparing snapshot flush") } // Store the new block in the db under the @@ -2425,14 +2428,20 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // set in PutBlockWithTxn. Block rewards are part of the state, and they should be identical to the ones // we've fetched during Hypersync. Is there an edge-case where for some reason they're not identical? Or // somehow ancestral records get corrupted? - if err := PutBlockWithTxn(txn, bc.snapshot, desoBlock, bc.eventManager); err != nil { + if innerErr := PutBlockWithTxn(txn, bc.snapshot, desoBlock, bc.eventManager); innerErr != nil { return errors.Wrapf(err, "ProcessBlock: Problem calling PutBlock") } // Store the new block's node in our node index in the db under the // -> // index. - if err := PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, nodeToValidate, false /*bitcoinNodes*/, bc.eventManager); err != nil { + if innerErr := PutHeightHashToNodeInfoWithTxn( + txn, + bc.snapshot, + nodeToValidate, + false, /*bitcoinNodes*/ + bc.eventManager, + ); innerErr != nil { return errors.Wrapf(err, "ProcessBlock: Problem calling PutHeightHashToNodeInfo before validation") } diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 67b63d9f8..13da5dd31 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -225,7 +225,7 @@ func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, var embpg *embeddedpostgres.EmbeddedPostgres var err error - db, dbDir := GetTestBadgerDb() + db, _ := GetTestBadgerDb() if usePostgres { if len(os.Getenv("POSTGRES_URI")) > 0 { glog.Infof("NewLowDifficultyBlockchainWithParamsAndDb: Using Postgres DB from provided POSTGRES_URI") @@ -250,7 +250,7 @@ func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, // key have some DeSo var snap *Snapshot if !usePostgres { - snap, err, _ = NewSnapshot(db, dbDir, SnapshotBlockHeightPeriod, false, false, &testParams, false, HypersyncDefaultMaxQueueSize, nil) + snap, err, _ = NewSnapshot(db, SnapshotBlockHeightPeriod, false, false, &testParams, false, HypersyncDefaultMaxQueueSize, nil) if err != nil { log.Fatal(err) } @@ -266,7 +266,6 @@ func NewLowDifficultyBlockchainWithParamsAndDb(t *testing.T, params *DeSoParams, resetTestDeSoEncoder(t) if snap != nil { snap.Stop() - CleanUpBadger(snap.SnapshotDb) } if embpg != nil { err = embpg.Stop() diff --git a/lib/db_utils.go b/lib/db_utils.go index 5acc94602..c598ecbcc 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -597,7 +597,14 @@ type DBPrefixes struct { // Prefix, , -> *BLSPublicKeyPKIDPairEntry PrefixSnapshotValidatorBLSPublicKeyPKIDPairEntry []byte `prefix_id:"[96]" is_state:"true" core_state:"true"` - // NEXT_TAG: 97 + // PrefixHypersyncSnapshotDBPrefix is used to store all the prefixes that are used in the hypersync snapshot logic. + // This migrates the old snapshot DB logic into the same badger instance and adds a single byte before the old + // prefix. To get the new prefix, you'll use getMainDbPrefix and then supply one of the prefixes specified at + // the top of snapshot.go. Only the hypersync snapshot logic will use this prefix and should write data here. + // When reading and writing data to this prefixes, please acquire the snapshotDbMutex in the snapshot. + PrefixHypersyncSnapshotDBPrefix []byte `prefix_id:"[97]"` + + // NEXT_TAG: 98 } // DecodeStateKey decodes a state key into a DeSoEncoder type. This is useful for encoders which don't have a stored @@ -1106,7 +1113,7 @@ func DBSetWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, value []byte, eve ancestralValue, getError = DBGetWithTxn(txn, snap, key) // If there is some error with the DB read, other than non-existent key, we return. - if getError != nil && getError != badger.ErrKeyNotFound { + if getError != nil && !errors.Is(getError, badger.ErrKeyNotFound) { return errors.Wrapf(getError, "DBSetWithTxn: problem reading record "+ "from DB with key: %v", key) } @@ -1124,7 +1131,8 @@ func DBSetWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, value []byte, eve keyString := hex.EncodeToString(key) // Update ancestral record structures depending on the existing DB record. - if err := snap.PrepareAncestralRecord(keyString, ancestralValue, getError != badger.ErrKeyNotFound); err != nil { + if err = snap.PrepareAncestralRecord( + keyString, ancestralValue, !errors.Is(getError, badger.ErrKeyNotFound)); err != nil { return errors.Wrapf(err, "DBSetWithTxn: Problem preparing ancestral record") } // Now save the newest record to cache. @@ -1184,14 +1192,12 @@ func DBGetWithTxn(txn *badger.Txn, snap *Snapshot, key []byte) ([]byte, error) { return nil, err } - // If a flush takes place, we don't update cache. It will be updated in DBSetWithTxn. + // TODO: Do we want to update the database cache when performing GETs? I think it would be + // safer to ONLY update the cache when performing SETs. This way, we can avoid the possibility + // of the cache getting out of sync with the database when a badger view transaction is started + // before a badger update transaction begins. if isState { - // Hold the snapshot memory lock just to be e - snap.Status.MemoryLock.Lock() - defer snap.Status.MemoryLock.Unlock() - if !snap.Status.IsFlushingWithoutLock() { - snap.DatabaseCache.Add(keyString, itemData) - } + snap.DatabaseCache.Add(keyString, itemData) } return itemData, nil } @@ -1265,42 +1271,50 @@ func DBDeleteWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, eventManager * // and beginning with the provided startKey. The chunk will have a total size of at least targetBytes. // If the startKey is a valid key in the db, it will be the first entry in the returned dbEntries. // If we have exhausted all entries for a prefix then _isChunkFull will be set as false, and true otherwise, -// when there are more entries in the db at the prefix. +// when there are more entries in the db at the prefix. This function calls DBIteratePrefixKeysWithTxn +// with a new transaction. func DBIteratePrefixKeys(db *badger.DB, prefix []byte, startKey []byte, targetBytes uint32) ( + _dbEntries []*DBEntry, _isChunkFull bool, _err error) { + var dbEntries []*DBEntry + var isChunkFull bool + err := db.View(func(txn *badger.Txn) error { + var innerErr error + dbEntries, isChunkFull, innerErr = DBIteratePrefixKeysWithTxn(txn, prefix, startKey, targetBytes) + return innerErr + }) + return dbEntries, isChunkFull, err +} + +// DBIteratePrefixKeysWithTxn performs the same operation as DBIteratePrefixKeys but with a provided transaction. +func DBIteratePrefixKeysWithTxn(txn *badger.Txn, prefix []byte, startKey []byte, targetBytes uint32) ( _dbEntries []*DBEntry, _isChunkFull bool, _err error) { var dbEntries []*DBEntry var totalBytes int var isChunkFull bool - err := db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions + opts := badger.DefaultIteratorOptions - // Iterate over the prefix as long as there are valid keys in the DB. - it := txn.NewIterator(opts) - defer it.Close() - for it.Seek(startKey); it.ValidForPrefix(prefix) && !isChunkFull; it.Next() { - item := it.Item() - key := item.Key() - // Add the key, value pair to our dbEntries list. - err := item.Value(func(value []byte) error { - dbEntries = append(dbEntries, KeyValueToDBEntry(key, value)) - // If total amount of bytes in the dbEntries exceeds the target bytes size, we set the chunk as full. - totalBytes += len(key) + len(value) - if totalBytes > int(targetBytes) && len(dbEntries) > 1 { - isChunkFull = true - } - return nil - }) - if err != nil { - return err + // Iterate over the prefix as long as there are valid keys in the DB. + it := txn.NewIterator(opts) + defer it.Close() + for it.Seek(startKey); it.ValidForPrefix(prefix) && !isChunkFull; it.Next() { + item := it.Item() + key := item.Key() + // Add the key, value pair to our dbEntries list. + err := item.Value(func(value []byte) error { + dbEntries = append(dbEntries, KeyValueToDBEntry(key, value)) + // If total amount of bytes in the dbEntries exceeds the target bytes size, we set the chunk as full. + totalBytes += len(key) + len(value) + if totalBytes > int(targetBytes) && len(dbEntries) > 1 { + isChunkFull = true } + return nil + }) + if err != nil { + // Return false for _isChunkFull to indicate that we shouldn't query this prefix again because + // something is wrong. + return nil, false, err } - return nil - }) - if err != nil { - // Return false for _isChunkFull to indicate that we shouldn't query this prefix again because - // something is wrong. - return nil, false, err } return dbEntries, isChunkFull, nil } @@ -5306,30 +5320,31 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, // Set the best hash to the genesis block in the db since its the only node // we're currently aware of. Set it for both the header chain and the block // chain. - if snap != nil { - snap.PrepareAncestralRecordsFlush() - } - - if err := PutBestHash(handle, snap, blockHash, ChainTypeDeSoBlock, eventManager); err != nil { - return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block hash into db for block chain") - } - // Add the genesis block to the (hash -> block) index. - if err := PutBlock(handle, snap, genesisBlock, eventManager); err != nil { - return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block into db") - } - // Add the genesis block to the (height, hash -> node info) index in the db. - if err := PutHeightHashToNodeInfo(handle, snap, genesisNode, false /*bitcoinNodes*/, eventManager); err != nil { - return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting (height, hash -> node) in db") - } - if err := DbPutNanosPurchased(handle, snap, params.DeSoNanosPurchasedAtGenesis, eventManager); err != nil { - return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block hash into db for block chain") - } - if err := DbPutGlobalParamsEntry(handle, snap, 0, InitialGlobalParamsEntry, eventManager); err != nil { - return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting GlobalParamsEntry into db for block chain") - } - - if snap != nil { - snap.StartAncestralRecordsFlush(true) + if err := handle.Update(func(txn *badger.Txn) error { + if snap != nil { + snap.PrepareAncestralRecordsFlush() + defer snap.FlushAncestralRecordsWithTxn(txn) + } + if err := PutBestHashWithTxn(txn, snap, blockHash, ChainTypeDeSoBlock, eventManager); err != nil { + return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block hash into db for block chain") + } + // Add the genesis block to the (hash -> block) index. + if err := PutBlockWithTxn(txn, snap, genesisBlock, eventManager); err != nil { + return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block into db") + } + // Add the genesis block to the (height, hash -> node info) index in the db. + if err := PutHeightHashToNodeInfoWithTxn(txn, snap, genesisNode, false /*bitcoinNodes*/, eventManager); err != nil { + return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting (height, hash -> node) in db") + } + if err := DbPutNanosPurchasedWithTxn(txn, snap, params.DeSoNanosPurchasedAtGenesis, eventManager); err != nil { + return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block hash into db for block chain") + } + if err := DbPutGlobalParamsEntryWithTxn(txn, snap, 0, InitialGlobalParamsEntry, eventManager); err != nil { + return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting GlobalParamsEntry into db for block chain") + } + return nil + }); err != nil { + return err } // We apply seed transactions here. This step is useful for setting @@ -5412,8 +5427,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, }) } // Flush all the data in the view. - err := utxoView.FlushToDb(0) - if err != nil { + if err := utxoView.FlushToDb(0); err != nil { return fmt.Errorf( "InitDbWithDeSoGenesisBlock: Error flushing seed txns to DB: %v", err) } diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index cc9713de2..1dd32fb9a 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -606,82 +606,6 @@ func TestFollows(t *testing.T) { } } -func TestDeleteExpiredTransactorNonceEntries(t *testing.T) { - setBalanceModelBlockHeights(t) - - assert := assert.New(t) - require := require.New(t) - _ = assert - _ = require - - chain, params, db := NewLowDifficultyBlockchain(t) - mempool, miner := NewTestMiner(t, chain, params, true /*isSender*/) - - testMeta := &TestMeta{ - t: t, - chain: chain, - params: params, - mempool: mempool, - miner: miner, - db: db, - } - - _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, mempool) - require.NoError(err) - _, err = miner.MineAndProcessSingleBlock(0 /*threadIndex*/, mempool) - require.NoError(err) - - _registerOrTransferWithTestMeta(testMeta, "m0", senderPkString, m0Pub, senderPrivString, 7000) - _registerOrTransferWithTestMeta(testMeta, "", senderPkString, paramUpdaterPub, senderPrivString, 1000) - - params.ExtraRegtestParamUpdaterKeys[MakePkMapKey(paramUpdaterPkBytes)] = true - // Param Updater sets the max nonce expiration block buffer to 1. - { - _updateGlobalParamsEntryWithMaxNonceExpirationBlockHeightOffsetAndTestMeta( - testMeta, - 10, - paramUpdaterPub, - paramUpdaterPriv, - -1, - -1, - -1, - -1, - -1, - 1, - ) - // There should be three nonces in the db. - nonceEntries := DbGetAllTransactorNonceEntries(testMeta.db) - require.Equal(3, len(nonceEntries)) - globalParamsEntry := DbGetGlobalParamsEntry(db, chain.snapshot) - require.Equal(globalParamsEntry.MaxNonceExpirationBlockHeightOffset, uint64(1)) - } - - // Now new txns should have a single block expiration buffer. - // We'll have m0 sends 10 nanos to m1 and m2 - { - // m0 sends 10 nanos to m1 - _registerOrTransferWithTestMeta(testMeta, "m1", m0Pub, m1Pub, m0Priv, 10) - // m0 sends 10 nanos to m2 - _registerOrTransferWithTestMeta(testMeta, "m2", m0Pub, m2Pub, m0Priv, 10) - // There should be 5 nonce entries in the db. - nonceEntries := DbGetAllTransactorNonceEntries(testMeta.db) - require.Equal(5, len(nonceEntries)) - } - - // Mine two blocks. This should delete the nonce entries for m0's txns. - _, err = miner.MineAndProcessSingleBlock(0 /*threadIndex*/, mempool) - require.NoError(err) - _, err = miner.MineAndProcessSingleBlock(0 /*threadIndex*/, mempool) - require.NoError(err) - - // There should be 3 nonce entries in the db after mining these blocks - { - nonceEntries := DbGetAllTransactorNonceEntries(testMeta.db) - require.Equal(3, len(nonceEntries)) - } - -} - func TestEncodeUint16(t *testing.T) { for _, num := range []uint16{0, 5819, math.MaxUint16} { // Encode to bytes. diff --git a/lib/peer.go b/lib/peer.go index 631a87c91..dc767df75 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -501,7 +501,12 @@ func (pp *Peer) HandleGetSnapshot(msg *MsgDeSoGetSnapshot) { // to the main DB or the ancestral records DB, and we don't want to slow down any of these updates. // Because of that, we will detect whenever concurrent access takes place with the concurrencyFault // variable. If concurrency is detected, we will re-queue the GetSnapshot message. - var concurrencyFault bool + + // 05/09/2024: Leaving the above comment around for posterity. The concurrencyFault variable is no longer + // used because we performant all snapshot operations in a synchronous manner that blocks until the operation + // is completed, so badger is guaranteed to have the snapshot db prefix populated with the most + // up-to-date data based on the most recently committed block. For more information and rationale on the change + // to make the snapshot operations synchronous, read the long comment in snapshot.go. var err error snapshotDataMsg := &MsgDeSoSnapshotData{ @@ -509,8 +514,8 @@ func (pp *Peer) HandleGetSnapshot(msg *MsgDeSoGetSnapshot) { SnapshotMetadata: pp.srv.snapshot.CurrentEpochSnapshotMetadata, } if isStateKey(msg.GetPrefix()) { - snapshotDataMsg.SnapshotChunk, snapshotDataMsg.SnapshotChunkFull, concurrencyFault, err = - pp.srv.snapshot.GetSnapshotChunk(pp.srv.blockchain.db, msg.GetPrefix(), msg.SnapshotStartKey) + snapshotDataMsg.SnapshotChunk, snapshotDataMsg.SnapshotChunkFull, err = + pp.srv.snapshot.GetSnapshotChunk(msg.GetPrefix(), msg.SnapshotStartKey) } else { // If the received prefix is not a state key, then it is likely that the peer has newer code. // A peer would be requesting state data for the newly added state prefix, though this node @@ -525,15 +530,6 @@ func (pp *Peer) HandleGetSnapshot(msg *MsgDeSoGetSnapshot) { "snapshot chunk for peer (%v), error (%v)", pp, err) return } - // When concurrencyFault occurs, we will wait a bit and then enqueue the message again. - if concurrencyFault { - glog.Errorf("Peer.HandleGetSnapshot: concurrency fault occurred so we enqueue the msg again to peer (%v)", pp) - go func() { - time.Sleep(GetSnapshotTimeout) - pp.AddDeSoMessage(msg, true) - }() - return - } pp.AddDeSoMessage(snapshotDataMsg, false) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 9fdf90ffc..6e8fb1218 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1684,7 +1684,7 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool err = bc.db.Update(func(txn *badger.Txn) error { if bc.snapshot != nil { bc.snapshot.PrepareAncestralRecordsFlush() - defer bc.snapshot.StartAncestralRecordsFlush(true) + defer bc.snapshot.FlushAncestralRecordsWithTxn(txn) glog.V(2).Infof("commitBlockPoS: Preparing snapshot flush") } diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 8e60ddab3..370d9f218 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -118,15 +118,10 @@ func (bav *UtxoView) runEpochCompleteStateTransition(blockHeight uint64, blockTi return nil, errors.Wrapf(err, "runEpochCompleteStateTransition: problem rewarding snapshot stakes: ") } - // Delete expired nonces - prevNonces := bav.GetTransactorNonceEntriesToDeleteAtBlockHeight(blockHeight) - utxoOperations = append(utxoOperations, &UtxoOperation{ - Type: OperationTypeDeleteExpiredNonces, - PrevNonceEntries: prevNonces, - }) - for _, prevNonceEntry := range prevNonces { - bav.DeleteTransactorNonceEntry(prevNonceEntry) - } + // TODO: To prevent the state from bloating, we should delete nonces periodically. + // We used to do that here but it was causing badger seeks to be slow due to a bug + // in badger whereby deleting keys slows down seeks. Eventually, we should go back + // to deleting txn nonces if we fix that badger bug or find a workaround. return utxoOperations, nil } diff --git a/lib/server.go b/lib/server.go index c18fdb857..cb043aac7 100644 --- a/lib/server.go +++ b/lib/server.go @@ -56,6 +56,7 @@ type ServerReply struct { type Server struct { cmgr *ConnectionManager blockchain *Blockchain + datadir string snapshot *Snapshot forceChecksum bool mempool *DeSoMempool @@ -428,8 +429,8 @@ func NewServer( shouldRestart := false archivalMode := false if _hyperSync { - _snapshot, err, shouldRestart = NewSnapshot(_db, _dataDir, _snapshotBlockHeightPeriod, - false, false, _params, _disableEncoderMigrations, _hypersyncMaxQueueSize, eventManager) + _snapshot, err, shouldRestart = NewSnapshot(_db, _snapshotBlockHeightPeriod, false, false, _params, + _disableEncoderMigrations, _hypersyncMaxQueueSize, eventManager) if err != nil { panic(err) } @@ -452,6 +453,7 @@ func NewServer( AddrMgr: _desoAddrMgr, params: _params, connectIps: _connectIps, + datadir: _dataDir, } if stateChangeSyncer != nil { @@ -1114,7 +1116,10 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { bestHeaderHeight := uint64(srv.blockchain.headerTip().Height) // The peer's snapshot block height period before the first PoS fork height is expected to be the // PoW default value. After the fork height, it's expected to be the value defined in the params. - snapshotBlockHeightPeriod := srv.params.GetSnapshotBlockHeightPeriod(bestHeaderHeight, srv.snapshot.SnapshotBlockHeightPeriod) + snapshotBlockHeightPeriod := srv.params.GetSnapshotBlockHeightPeriod( + bestHeaderHeight, + srv.snapshot.GetSnapshotBlockHeightPeriod(), + ) expectedSnapshotHeight := bestHeaderHeight - (bestHeaderHeight % snapshotBlockHeightPeriod) posSetupForkHeight := uint64(srv.params.ForkHeights.ProofOfStake1StateSetupBlockHeight) if expectedSnapshotHeight < posSetupForkHeight { @@ -1169,12 +1174,12 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // Initialize the snapshot checksum so that it's reset. It got modified during chain initialization // when processing seed transaction from the genesis block. So we need to clear it. srv.snapshot.Checksum.ResetChecksum() - if err := srv.snapshot.Checksum.SaveChecksum(); err != nil { + if err = srv.snapshot.Checksum.SaveChecksum(); err != nil { glog.Errorf("Server._handleHeaderBundle: Problem saving snapshot to database, error (%v)", err) } // Reset the migrations along with the main checksum. srv.snapshot.Migrations.ResetChecksums() - if err := srv.snapshot.Migrations.SaveMigrations(); err != nil { + if err = srv.snapshot.Migrations.SaveMigrations(); err != nil { glog.Errorf("Server._handleHeaderBundle: Problem saving migration checksums to database, error (%v)", err) } @@ -1574,7 +1579,7 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { // and re-opening it with the new options. // This is necessary because the blocksync process syncs indexes with records that are too large for the default // badger options. The large records overflow the default setting value log size and cause the DB to crash. - dbDir := GetBadgerDbPath(srv.snapshot.mainDbDirectory) + dbDir := GetBadgerDbPath(srv.datadir) opts := PerformanceBadgerOptions(dbDir) opts.ValueDir = dbDir srv.dirtyHackUpdateDbOpts(opts) @@ -1630,8 +1635,8 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { // Update the snapshot epoch metadata in the snapshot DB. for ii := 0; ii < MetadataRetryCount; ii++ { srv.snapshot.SnapshotDbMutex.Lock() - err = srv.snapshot.SnapshotDb.Update(func(txn *badger.Txn) error { - return txn.Set(_prefixLastEpochMetadata, srv.snapshot.CurrentEpochSnapshotMetadata.ToBytes()) + err = srv.snapshot.mainDb.Update(func(txn *badger.Txn) error { + return txn.Set(getMainDbPrefix(_prefixLastEpochMetadata), srv.snapshot.CurrentEpochSnapshotMetadata.ToBytes()) }) srv.snapshot.SnapshotDbMutex.Unlock() if err != nil { @@ -1691,6 +1696,8 @@ func (srv *Server) dirtyHackUpdateDbOpts(opts badger.Options) { defer srv.posMempool.Unlock() srv.posMempool.augmentedReadOnlyLatestBlockViewMutex.Lock() defer srv.posMempool.augmentedReadOnlyLatestBlockViewMutex.Unlock() + srv.snapshot.SnapshotDbMutex.Lock() + defer srv.snapshot.SnapshotDbMutex.Unlock() // Make sure that a server process doesn't try to access the DB while we're closing and re-opening it. srv.DbMutex.Lock() defer srv.DbMutex.Unlock() @@ -1702,6 +1709,11 @@ func (srv *Server) dirtyHackUpdateDbOpts(opts badger.Options) { } srv.blockchain.db = db srv.snapshot.mainDb = srv.blockchain.db + srv.snapshot.CurrentEpochSnapshotMetadata.mainDb = srv.blockchain.db + srv.snapshot.Status.mainDb = srv.blockchain.db + srv.snapshot.Checksum.mainDb = srv.blockchain.db + srv.snapshot.Migrations.mainDb = srv.blockchain.db + srv.snapshot.OperationChannel.mainDb = srv.blockchain.db srv.mempool.bc.db = srv.blockchain.db srv.mempool.backupUniversalUtxoView.Handle = srv.blockchain.db srv.mempool.universalUtxoView.Handle = srv.blockchain.db @@ -1715,7 +1727,6 @@ func (srv *Server) dirtyHackUpdateDbOpts(opts badger.Options) { if srv.posMempool.augmentedReadOnlyLatestBlockView != nil { srv.posMempool.augmentedReadOnlyLatestBlockView.Handle = srv.blockchain.db } - // Save the new options to the DB so that we know what to use if the node restarts. isPerformanceOptions := DbOptsArePerformance(&opts) err = SaveBoolToFile(GetDbPerformanceOptionsFilePath(filepath.Dir(opts.ValueDir)), isPerformanceOptions) @@ -2417,6 +2428,9 @@ func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { // gracefully fail. srv._handleBlock(pp, blk, ii == len(bundle.Blocks)-1 /*isLastBlock*/) numLogBlocks := 1000 + if srv.params.IsPoWBlockHeight(blk.Header.Height) { + numLogBlocks = 25 + } if ii%numLogBlocks == 0 { glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlockBundle: Processed block ( %v / %v ) = ( %v / %v ) from Peer %v", bundle.Blocks[ii].Header.Height, @@ -2428,7 +2442,7 @@ func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { // Reset the blockProcessingStartTime so that each 1k blocks is timed individually blockProcessingStartTime = time.Now() if ii != 0 { - fmt.Printf("We are processing %v blocks per second\n", float64(1000)/(float64(elapsed)/1e9)) + fmt.Printf("We are processing %v blocks per second\n", float64(numLogBlocks)/(float64(elapsed)/1e9)) } } } diff --git a/lib/snapshot.go b/lib/snapshot.go index 3ddeb7244..518e6c19d 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -6,7 +6,6 @@ import ( "encoding/hex" "fmt" "math" - "path/filepath" "reflect" "runtime" "sort" @@ -25,6 +24,7 @@ import ( "golang.org/x/sync/semaphore" ) +// Note: The prefixes defined below are actually prefixed by the PrefixHypersyncSnapshotDBPrefix in the main db now. var ( // Prefix to store ancestral records. Ancestral records represent historical values of main db entries that were // modified during a snapshot epoch. For instance if we modified some record -> in @@ -55,16 +55,26 @@ var ( _prefixMigrationStatus = []byte{5} ) +// getMainDbPrefix is a helper function thatused to get the main db prefix for a given snapshot db prefix. +// When the snapshot db was moved to the same badger instance as the main db, we made a single prefix to +// house all the snapshot db prefixes. +func getMainDbPrefix(prefix []byte) []byte { + return append(Prefixes.PrefixHypersyncSnapshotDBPrefix, prefix...) +} + const ( // Default value for limiting the number of items stored in the OperationChannel. Because the snapshot chunks are // 100MB each, this limits the number of operations stored at one time to 2GB HypersyncDefaultMaxQueueSize = 20 + // Default value for snapshot epoch period for PoS + DefaultSnapshotEpochPeriodPoS = 600000 ) // ------------------------------------------------------------------------------------- // Snapshot // ------------------------------------------------------------------------------------- +// Snapshot // Before explaining what a snapshot is and how to create one, it is important to understand // some context on how snapshots came about, and therefore why all this code is even needed // in the first place. @@ -249,13 +259,50 @@ const ( // of the snapshot database by scanning a section of the main db as well as relevant ancestral // records, to combine them into a chunk representing the database at a past snapshot heights. // +// 5/9/2024 Update: All of the above worked very well with the chain running Proof-of-Work (PoW). +// However, the move to Proof-of-Stake has introduced a new challenge. In PoW, the expected time +// between blocks was 5 minutes. With the move to PoS, the expected time between blocks is 1 second. +// This required a move to a synchronous snapshot approach. More details are provided below after +// the explanation of the issues with the asynchronous approach. +// When trying to request a snapshot from a peer that is on PoS, the old approach would prevent +// the peer from computing the snapshot chunk from the DB and would return a concurrency fault for +// one of two reasons. +// 1. Before iterating over the prefixes in the main and snapshot DBs, the snapshot would check if +// there was a flush in process. If there was, it would return a concurrency fault. Hitting this +// is quite likely with Proof-of-Stake because blocks are being committed roughly once per second +// and the asynchronous snapshot logic would happen in the background. +// 2. After iterating over the prefixes in the main and snapshot DBs and computing the DBEntry +// objects, the snapshot would check if the main and snapshot db semaphores were the same +// as when the prefix iteration happened. If they were not, it would return a concurrency fault. +// This was required since the iteration was happening in two separate badger instances. Once again +// this was likely to happen because blocks are being committed so quickly. +// +// To circumvent these issues, we moved to a synchronous snapshot approach. The "snapshot DB" referenced +// below is the same prefixes defined earlier in this file, but are in a single prefix designated the +// snapshot DB. +// In order to do make this synchronous approach work, the following major changes were made to the +// snapshot logic: +// 1. The snapshot no longer uses a separate badger instance for the snapshot DB. Instead, it uses +// the same badger instance as the main DB. This allows the snapshot to use the same transaction +// to read and write to the main and snapshot DBs. This is important because it allows the snapshot +// to read the main DB and "snapshot DB" in the same transaction, which prevents the concurrency faults, +// and by writing to the main. +// 2. Flushing ancestral records and processing block logic is now synchronous and blocking, meaning that +// the main db and the "snapshot db" are always in sync. +// 3. When fetching a snapshot, we iterate over both the main db prefix and "snapshot db" prefixes in a +// single badger View transaction. This allows us to ensure that the DBEntry objects we create are +// reflecting a consistent state of the main and snapshot DBs. +// 4. Some additional locks were added to ensure proper concurrent access and many of the asynchronous +// operations and attributes were removed. +// // Summarizing, Snapshot serves three main purposes: // - maintaining ancestral records // - managing the state checksum // - serving snapshot chunks to syncing nodes. type Snapshot struct { - // SnapshotDb is used to store snapshot-related records. - SnapshotDb *badger.DB + // We keep this mutex around even though we got rid of the snapshot DB. This lock + // was used to keep concurrent operations in line, so it is still used to keep + // reads and writes to the snapshot DB in order. SnapshotDbMutex *sync.Mutex // AncestralMemory stores information about the ancestral records that should be flushed into the db. // We use a concurrency-safe deque which allows us to push objects to the end of the AncestralMemory @@ -271,8 +318,8 @@ type Snapshot struct { // AncestralFlushCounter is used to offset ancestral records flush to occur only after x blocks. AncestralFlushCounter uint64 - // SnapshotBlockHeightPeriod is the constant height offset between individual snapshot epochs. - SnapshotBlockHeightPeriod uint64 + // snapshotBlockHeightPeriod is the constant height offset between individual snapshot epochs. + snapshotBlockHeightPeriod uint64 // OperationChannel is used to enqueue actions to the main snapshot Run loop. It is used to // schedule actions such as ancestral records updates, checksum computation, snapshot operations. @@ -290,7 +337,7 @@ type Snapshot struct { Migrations *EncoderMigration // CurrentEpochSnapshotMetadata is the information about the currently stored complete snapshot, which - // reflects the state of the blockchain at the largest height divisible by SnapshotBlockHeightPeriod. + // reflects the state of the blockchain at the largest height divisible by snapshotBlockHeightPeriod. // The metadata includes the block height and its block hash of when the snapshot was taken, and the // state checksum. CurrentEpochSnapshotMetadata *SnapshotEpochMetadata @@ -305,9 +352,7 @@ type Snapshot struct { // mainDb is the main database that contains consensus state. mainDb *badger.DB - // mainDbDirectory is the directory where the main db is stored. - mainDbDirectory string - params *DeSoParams + params *DeSoParams isTxIndex bool disableChecksum bool @@ -324,22 +369,20 @@ type Snapshot struct { } // NewSnapshot creates a new snapshot instance. -func NewSnapshot(mainDb *badger.DB, mainDbDirectory string, snapshotBlockHeightPeriod uint64, isTxIndex bool, - disableChecksum bool, params *DeSoParams, disableMigrations bool, hypersyncMaxQueueSize uint32, eventManager *EventManager) (_snap *Snapshot, _err error, _shouldRestart bool) { - - // Initialize the ancestral records database - snapshotDirectory := filepath.Join(GetBadgerDbPath(mainDbDirectory), "snapshot") - snapshotOpts := DefaultBadgerOptions(snapshotDirectory) - snapshotOpts.ValueDir = GetBadgerDbPath(snapshotDirectory) - snapshotDb, err := badger.Open(snapshotOpts) - if err != nil { - return nil, errors.Wrapf(err, "NewSnapshot: Problem creating SnapshotDb"), true - } - glog.Infof("Snapshot BadgerDB Dir: %v", snapshotOpts.Dir) - glog.Infof("Snapshot BadgerDB ValueDir: %v", snapshotOpts.ValueDir) - if snapshotBlockHeightPeriod == 0 { - snapshotBlockHeightPeriod = params.DefaultPoWSnapshotBlockHeightPeriod - } +func NewSnapshot( + mainDb *badger.DB, + snapshotBlockHeightPeriod uint64, + isTxIndex bool, + disableChecksum bool, + params *DeSoParams, + disableMigrations bool, + hypersyncMaxQueueSize uint32, + eventManager *EventManager, +) ( + _snap *Snapshot, + _err error, + _shouldRestart bool, +) { var snapshotDbMutex sync.Mutex // If the max queue size is unset, use the default. @@ -349,32 +392,38 @@ func NewSnapshot(mainDb *badger.DB, mainDbDirectory string, snapshotBlockHeightP // Retrieve and initialize the checksum. checksum := &StateChecksum{} - if err := checksum.Initialize(snapshotDb, &snapshotDbMutex); err != nil { + if err := checksum.Initialize(mainDb, &snapshotDbMutex); err != nil { return nil, errors.Wrapf(err, "NewSnapshot: Problem reading Checksum"), true } // Retrieve the snapshot epoch metadata from the snapshot db. metadata := &SnapshotEpochMetadata{} - if err := metadata.Initialize(snapshotDb, &snapshotDbMutex); err != nil { + if err := metadata.Initialize(mainDb, &snapshotDbMutex); err != nil { return nil, errors.Wrapf(err, "NewSnapshot: Problem reading SnapshotEpochMetadata"), true } operationChannel := &SnapshotOperationChannel{} - // Initialize the SnapshotOperationChannel. We don't set any of the handlers yet because we don't have a snapshot instance yet. - if err := operationChannel.Initialize(snapshotDb, &snapshotDbMutex, nil, nil); err != nil { + // Initialize the SnapshotOperationChannel. We don't set any of the handlers yet because we don't have a + // snapshot instance yet. + if err := operationChannel.Initialize(mainDb, &snapshotDbMutex, nil, nil); err != nil { return nil, errors.Wrapf(err, "NewSnapshot: Problem reading SnapshotOperationChannel"), true } // Retrieve and initialize the snapshot status. status := &SnapshotStatus{} - if err := status.Initialize(snapshotDb, &snapshotDbMutex); err != nil { + if err := status.Initialize(mainDb, &snapshotDbMutex); err != nil { return nil, errors.Wrapf(err, "NewSnapshot: Problem reading SnapshotStatus"), true } // Retrieve and initialize snapshot migrations. migrations := &EncoderMigration{} if err := migrations.Initialize( - mainDb, snapshotDb, &snapshotDbMutex, status.CurrentBlockHeight, params, disableMigrations); err != nil { + mainDb, + &snapshotDbMutex, + status.CurrentBlockHeight, + params, + disableMigrations, + ); err != nil { return nil, errors.Wrapf(err, "NewSnapshot: Problem reading EncoderMigration"), true } @@ -394,10 +443,8 @@ func NewSnapshot(mainDb *badger.DB, mainDbDirectory string, snapshotBlockHeightP // Either way, it means our snapshot was compromised and we need to recompute it as described // in the previous bullet. shouldRestart := false - if operationChannel.StateSemaphore > 0 || status.IsFlushing() { + if operationChannel.StateSemaphore > 0 { operationChannel.StateSemaphore = 0 - status.MainDBSemaphore = 0 - status.AncestralDBSemaphore = 0 glog.Errorf(CLog(Red, fmt.Sprintf("NewSnapshot: Node didn't shut down properly last time. Entering a "+ "recovery mode. The node will roll back to last snapshot epoch block height (%v) and hash (%v), then restart.", metadata.SnapshotBlockHeight, metadata.CurrentEpochBlockHash))) @@ -415,14 +462,22 @@ func NewSnapshot(mainDb *badger.DB, mainDbDirectory string, snapshotBlockHeightP timer := &Timer{} timer.Initialize() + if snapshotBlockHeightPeriod == 0 { + snapshotBlockHeightPeriod = DefaultSnapshotEpochPeriodPoS + } + + if snapshotBlockHeightPeriod != DefaultSnapshotEpochPeriodPoS { + glog.Warning("NewSnapshot: snapshotBlockHeightPeriod is not set to the default value. " + + "This may lead to unexpected behavior.") + } + // Set the snapshot. snap := &Snapshot{ - mainDbDirectory: mainDbDirectory, - SnapshotDb: snapshotDb, + mainDb: mainDb, SnapshotDbMutex: &snapshotDbMutex, DatabaseCache: lru.NewKVCache(DatabaseCacheSize), AncestralFlushCounter: uint64(0), - SnapshotBlockHeightPeriod: snapshotBlockHeightPeriod, + snapshotBlockHeightPeriod: snapshotBlockHeightPeriod, OperationChannel: operationChannel, operationQueueSemaphore: make(chan struct{}, hypersyncMaxQueueSize), Checksum: checksum, @@ -454,13 +509,16 @@ func (snap *Snapshot) Run() { operation := snap.OperationChannel.DequeueOperationStateless() switch operation.operationType { case SnapshotOperationFlush: - glog.V(2).Infof("Snapshot.Run: Flushing ancestral records with counter") - snap.FlushAncestralRecords() + // When the snapshot logic was changed to be synchronous and blocking, + // there was no longer a need for the SnapshotOperationFlush operation. + // This is handled by the FlushAncestralRecordsWithTxn function now. + glog.Error("Snapshot.Run: SnapshotOperationFlush is deprecated") case SnapshotOperationProcessBlock: - glog.V(2).Infof("Snapshot.Run: Getting into the process block with height (%v)", - operation.blockNode.Height) - snap.SnapshotProcessBlock(operation.blockNode) + // When the snapshot logic was changed to be synchronous and blocking, + // there was no longer a need for the SnapshotOperationProcessBlock operation. + // This is handled by the FinishProcessBlock function now. + glog.Errorf("Snapshot.Run: SnapshotOperationProcessBlock is deprecated") case SnapshotOperationProcessChunk: glog.V(1).Infof("Snapshot.Run: Number of operations in the operation channel (%v)", @@ -472,13 +530,15 @@ func (snap *Snapshot) Run() { case SnapshotOperationChecksumAdd: if err := snap.Checksum.AddOrRemoveBytesWithMigrations(operation.checksumKey, operation.checksumValue, - snap.Status.CurrentBlockHeight, snap.Migrations.migrationChecksums, true); err != nil { + snap.Status.CurrentBlockHeight, snap.Migrations.migrationChecksums, + snap.Migrations.migrationChecksumLock, true); err != nil { glog.Errorf("Snapshot.Run: Problem adding checksum bytes operation (%v)", operation) } case SnapshotOperationChecksumRemove: if err := snap.Checksum.AddOrRemoveBytesWithMigrations(operation.checksumKey, operation.checksumValue, - snap.Status.CurrentBlockHeight, snap.Migrations.migrationChecksums, false); err != nil { + snap.Status.CurrentBlockHeight, snap.Migrations.migrationChecksums, + snap.Migrations.migrationChecksumLock, false); err != nil { glog.Errorf("Snapshot.Run: Problem removing checksum bytes operation (%v)", operation) } @@ -526,26 +586,16 @@ func (snap *Snapshot) Stop() { // // See comment at the top of this file to understand how to use this function to generate // ancestral records needed to support hypersync. -func (snap *Snapshot) StartAncestralRecordsFlush(shouldIncrement bool) { - // If snapshot is broken then there's nothing to do. - glog.V(2).Infof("Snapshot.StartAncestralRecordsFlush: Initiated the flush, shouldIncrement: (%v)", shouldIncrement) - - // Signal that the main db update has finished by incrementing the main semaphore. - // Also signal that the ancestral db write started by increasing the ancestral semaphore. - if shouldIncrement { - snap.Status.MemoryLock.Lock() - snap.Status.IncrementMainDbSemaphoreMemoryLockRequired() - snap.Status.IncrementAncestralDBSemaphoreMemoryLockRequired() - snap.Status.MemoryLock.Unlock() - } - glog.V(2).Infof("Snapshot.StartAncestralRecordsFlush: Sending counter (%v) to the CounterChannel", snap.AncestralFlushCounter) - // We send the flush counter to the counter to indicate that a flush should take place. - snap.OperationChannel.EnqueueOperation(&SnapshotOperation{ - operationType: SnapshotOperationFlush, - }) -} +//func (snap *Snapshot) StartAncestralRecordsFlush() { +// glog.V(2).Infof("Snapshot.StartAncestralRecordsFlush: Flushing Ancestral Records") +// snap.FlushAncestralRecords() +//} func (snap *Snapshot) PersistChecksumAndMigration() error { + // If we haven't hit the first snapshot block height yet, don't bother. + if !snap.shouldPerformSnapshotOperations() { + return nil + } if err := snap.Checksum.SaveChecksum(); err != nil { return errors.Wrapf(err, "PersistChecksumAndMigration: Problem saving checksum") } @@ -572,7 +622,7 @@ func (snap *Snapshot) FinishProcessBlock(blockNode *BlockNode) { // If the block height is divisible by the snapshot block height period, we update the snapshot metadata. // For PoW blocks, until the first PoS fork height, we use the default snapshot block height period of 1000. // For blocks after the first PoS fork height, we use the snapshot block height period defined in the params. - snapshotBlockHeightPeriod := snap.params.GetSnapshotBlockHeightPeriod(uint64(blockNode.Height), snap.SnapshotBlockHeightPeriod) + snapshotBlockHeightPeriod := snap.params.GetSnapshotBlockHeightPeriod(uint64(blockNode.Height), snap.GetSnapshotBlockHeightPeriod()) if uint64(blockNode.Height)%snapshotBlockHeightPeriod == 0 && uint64(blockNode.Height) > snap.CurrentEpochSnapshotMetadata.SnapshotBlockHeight { @@ -581,10 +631,7 @@ func (snap *Snapshot) FinishProcessBlock(blockNode *BlockNode) { snap.CurrentEpochSnapshotMetadata.CurrentEpochBlockHash = blockNode.Hash } - snap.OperationChannel.EnqueueOperation(&SnapshotOperation{ - operationType: SnapshotOperationProcessBlock, - blockNode: blockNode, - }) + snap.snapshotProcessBlockNoLock(blockNode) } func (snap *Snapshot) ProcessSnapshotChunk(mainDb *badger.DB, mainDbMutex *deadlock.RWMutex, @@ -617,6 +664,10 @@ func (snap *Snapshot) RemoveChecksumBytes(key []byte, value []byte) { // WaitForAllOperationsToFinish will busy-wait for the snapshot channel to process all // current operations. Spinlocks are undesired but it's the easiest solution in this case, func (snap *Snapshot) WaitForAllOperationsToFinish() { + // If we haven't hit the first snapshot block height yet, don't bother. + if !snap.shouldPerformSnapshotOperations() { + return + } // Define some helper variables so that the node prints nice logs. initialLen := int(snap.OperationChannel.GetStatus()) printMap := make(map[int]bool) @@ -663,18 +714,10 @@ func (snap *Snapshot) WaitForAllOperationsToFinish() { // See comment at the top of this file to understand how to use this function to generate // ancestral records needed to support hypersync. func (snap *Snapshot) PrepareAncestralRecordsFlush() { - // Signal that the main db update has started by holding the MemoryLock and incrementing the MainDBSemaphore. - snap.Status.MemoryLock.Lock() - // If at this point we're flushing to the main DB, i.e. the MainDBSemaphore is odd, then it means we're nesting - // calls to PrepareAncestralRecordsFlush() - if snap.Status.IsFlushingToMainDBMemoryLockRequired() { - glog.Fatalf("Nested calls to PrepareAncestralRecordsFlush() " + - "detected. Make sure you call StartAncestralRecordsFlush before " + - "calling PrepareAncestralRecordsFlush() again") - } - snap.Status.IncrementMainDbSemaphoreMemoryLockRequired() - snap.Status.MemoryLock.Unlock() - + // If we haven't hit the first snapshot block height yet, don't bother. + if !snap.shouldPerformSnapshotOperations() { + return + } // Add an entry to the ancestral memory. snap.AncestralFlushCounter += 1 index := snap.AncestralFlushCounter @@ -689,6 +732,10 @@ func (snap *Snapshot) PrepareAncestralRecordsFlush() { // See comment at the top of this file to understand how to use this function to generate // ancestral records needed to support hypersync. func (snap *Snapshot) PrepareAncestralRecord(key string, value []byte, existed bool) error { + // If we haven't hit the first snapshot block height yet, don't bother. + if !snap.shouldPerformSnapshotOperations() { + return nil + } // If the record was not found, we add it to the NonExistingRecordsMap, otherwise to ExistingRecordsMap. index := snap.AncestralFlushCounter @@ -717,10 +764,15 @@ func (snap *Snapshot) PrepareAncestralRecord(key string, value []byte, existed b return nil } -// FlushAncestralRecords updates the ancestral records after a UtxoView flush. -// This function should be called in a go-routine after all UtxoView flushes. -func (snap *Snapshot) FlushAncestralRecords() { - glog.V(2).Infof("Snapshot.StartAncestralRecordsFlush: Initiated the flush") +// FlushAncestralRecordsWithTxn updates the ancestral records after a UtxoView flush. +// This function should be called in the same badger transaction at the end of UtxoView +// flushes. +func (snap *Snapshot) FlushAncestralRecordsWithTxn(txn *badger.Txn) { + // If we haven't hit the first snapshot block height yet, don't bother. + if !snap.shouldPerformSnapshotOperations() { + return + } + glog.V(2).Infof("Snapshot.FlushAncestralRecords: Initiated the flush") // Make sure we've finished all checksum computation before we proceed with the flush. // Since this gets called after all snapshot operations are enqueued after the main db @@ -728,9 +780,10 @@ func (snap *Snapshot) FlushAncestralRecords() { // snapshot worker threads running so we want to wait until they're done. err := snap.Checksum.Wait() if err != nil { - glog.Errorf("Snapshot.StartAncestralRecordsFlush: Error while waiting "+ + glog.Errorf("Snapshot.FlushAncestralRecords: Error while waiting "+ "for checksum: (%v)", err) - snap.StartAncestralRecordsFlush(false) + // TODO: do we need this still? + snap.FlushAncestralRecordsWithTxn(txn) return } @@ -739,15 +792,9 @@ func (snap *Snapshot) FlushAncestralRecords() { blockHeight := oldestAncestralCache.blockHeight if blockHeight != snap.CurrentEpochSnapshotMetadata.SnapshotBlockHeight { - glog.Infof("Snapshot.StartAncestralRecordsFlush: AncestralMemory blockHeight (%v) doesn't match current "+ + glog.Infof("Snapshot.FlushAncestralRecords: AncestralMemory blockHeight (%v) doesn't match current "+ "metadata blockHeight (%v), number of operations in operationChannel (%v)", blockHeight, snap.CurrentEpochSnapshotMetadata.SnapshotBlockHeight, len(snap.OperationChannel.OperationChannel)) - // Signal that the ancestral db write has finished by incrementing the semaphore. - snap.Status.MemoryLock.Lock() - snap.Status.IncrementAncestralDBSemaphoreMemoryLockRequired() - snap.Status.MemoryLock.Unlock() - - snap.AncestralMemory.Shift() return } // First sort the keys so that we write to BadgerDB in order. @@ -756,96 +803,103 @@ func (snap *Snapshot) FlushAncestralRecords() { recordsKeyList = append(recordsKeyList, kk) } sort.Strings(recordsKeyList) - glog.V(2).Infof("Snapshot.StartAncestralRecordsFlush: Finished sorting map keys") + glog.V(2).Infof("Snapshot.FlushAncestralRecords: Finished sorting map keys") // We launch a new read-write transaction to set the records. snap.SnapshotDbMutex.Lock() - err = snap.SnapshotDb.Update(func(txn *badger.Txn) error { - // This update is called after a change to the main db records and so the current checksum reflects the state of - // the main db. In case we restart the node, we want to be able to retrieve the most recent checksum and resume - // from it when adding new records. Therefore, we save the current checksum bytes in the db. - currentChecksum, err := snap.Checksum.ToBytes() - if err != nil { - return errors.Wrapf(err, "Snapshot.StartAncestralRecordsFlush: Problem getting checksum bytes") - } - err = txn.Set(_prefixSnapshotChecksum, currentChecksum) - if err != nil { - return errors.Wrapf(err, "Snapshot.StartAncestralRecordsFlush: Problem flushing checksum bytes") - } - // Iterate through all now-sorted keys. - glog.V(2).Infof("Snapshot.StartAncestralRecordsFlush: Adding (%v) new records", len(recordsKeyList)) - glog.V(2).Infof("Snapshot.StartAncestralRecordsFlush: Adding (%v) ancestral records", len(oldestAncestralCache.AncestralRecordsMap)) - for _, key := range recordsKeyList { - // We store keys as strings because they're easier to store and sort this way. - keyBytes, err := hex.DecodeString(key) - if err != nil { - return errors.Wrapf(err, "Snapshot.StartAncestralRecordsFlush: Problem "+ - "decoding copyMapKeyList key: %v", key) - } - - // We check whether this record is already present in ancestral records, - // if so then there's nothing to do. What we want is err == badger.ErrKeyNotFound - _, err = snap.GetAncestralRecordsKeyWithTxn(txn, keyBytes, blockHeight) - if err != badger.ErrKeyNotFound { - if err != nil { - // In this case, we hit a real error with Badger, so we should return. - return errors.Wrapf(err, "Snapshot.StartAncestralRecordsFlush: Problem "+ - "reading exsiting record in the DB at key: %v", key) - } else { - // In this case, there was no error, which means the key already exists. - // No need to set it in that case. - continue - } - } - - // If we get here, it means that no record existed in ancestral records at key, - // so we set it here. - value, exists := oldestAncestralCache.AncestralRecordsMap[key] - if !exists { - return fmt.Errorf("Snapshot.StartAncestralRecordsFlush: Error, key is not " + - "in AncestralRecordsMap. This should never happen") - } - err = snap.DBSetAncestralRecordWithTxn(txn, blockHeight, keyBytes, value) - if err != nil { - return errors.Wrapf(err, "Snapshot.StartAncestralRecordsFlush: Problem "+ - "flushing a record from copyAncestralMap at key %v:", key) - } - } - return nil - }) + err = snap.flushAncestralRecordsHelper(txn, recordsKeyList, oldestAncestralCache, blockHeight) snap.SnapshotDbMutex.Unlock() if err != nil { // If any error occurred, then we should redo this memory write. During the restart, we will re-write all // entries. If the error happened during a partial write, e.g. we didn't write all records in recordsKeyList, // we'll redo them in the next write of this ancestralCache. The only scenario where that wouldn't happen // is if the node stopped suddenly. We can detect that via comparing semaphore counters on boot. - glog.Errorf("Snapshot.StartAncestralRecordsFlush: Problem flushing snapshot, error %v", err) - snap.StartAncestralRecordsFlush(false) + glog.Errorf("Snapshot.FlushAncestralRecords: Problem flushing snapshot, error %v", err) + // TODO: @diamondhands - do we need this still? Before this would queue up another operation + // but just calling it recursively seems like a bad idea. + snap.FlushAncestralRecordsWithTxn(txn) return } - // Signal that the ancestral db write has finished by incrementing the semaphore. - snap.Status.MemoryLock.Lock() - snap.Status.IncrementAncestralDBSemaphoreMemoryLockRequired() - snap.Status.MemoryLock.Unlock() - snap.AncestralMemory.Shift() } +func (snap *Snapshot) flushAncestralRecordsHelper( + txn *badger.Txn, + recordsKeyList []string, + oldestAncestralCache *AncestralCache, + blockHeight uint64) error { + // This update is called after a change to the main db records and so the current checksum reflects the state of + // the main db. In case we restart the node, we want to be able to retrieve the most recent checksum and resume + // from it when adding new records. Therefore, we save the current checksum bytes in the db. + currentChecksum, err := snap.Checksum.ToBytes() + if err != nil { + return errors.Wrapf(err, "Snapshot.FlushAncestralRecords: Problem getting checksum bytes") + } + err = txn.Set(getMainDbPrefix(_prefixSnapshotChecksum), currentChecksum) + if err != nil { + return errors.Wrapf(err, "Snapshot.FlushAncestralRecords: Problem flushing checksum bytes") + } + // Iterate through all now-sorted keys. + glog.V(2).Infof("Snapshot.FlushAncestralRecords: Adding (%v) new records", len(recordsKeyList)) + glog.V(2).Infof("Snapshot.FlushAncestralRecords: Adding (%v) ancestral records", len(oldestAncestralCache.AncestralRecordsMap)) + for _, key := range recordsKeyList { + var keyBytes []byte + // We store keys as strings because they're easier to store and sort this way. + keyBytes, err = hex.DecodeString(key) + if err != nil { + return errors.Wrapf(err, "Snapshot.FlushAncestralRecords: Problem "+ + "decoding copyMapKeyList key: %v", key) + } + + // We check whether this record is already present in ancestral records, + // if so then there's nothing to do. What we want is err == badger.ErrKeyNotFound + _, err = snap.GetAncestralRecordsKeyWithTxn(txn, keyBytes, blockHeight) + if err == nil { + // In this case, there was no error, which means the key already exists. + // No need to set it in that case. + continue + } + if !errors.Is(err, badger.ErrKeyNotFound) { + // In this case, we hit a real error with Badger, so we should return. + return errors.Wrapf(err, "Snapshot.FlushAncestralRecords: Problem "+ + "reading exsiting record in the DB at key: %v", key) + } + + // If we get here, it means that no record existed in ancestral records at key, + // so we set it here. + value, exists := oldestAncestralCache.AncestralRecordsMap[key] + if !exists { + return fmt.Errorf("Snapshot.FlushAncestralRecords: Error, key is not " + + "in AncestralRecordsMap. This should never happen") + } + err = snap.DBSetAncestralRecordWithTxn(txn, blockHeight, keyBytes, value) + if err != nil { + return errors.Wrapf(err, "Snapshot.FlushAncestralRecords: Problem "+ + "flushing a record from copyAncestralMap at key %v:", key) + } + } + return nil +} + // DeleteAncestralRecords is used to delete ancestral records for the provided height. func (snap *Snapshot) DeleteAncestralRecords(height uint64) error { + // If we haven't hit the first snapshot block height yet, don't bother. + if !snap.shouldPerformSnapshotOperations() { + return nil + } glog.V(2).Infof("Snapshot.DeleteAncestralRecords: Deleting snapshotDb for height (%v)", height) snap.timer.Start("Snapshot.DeleteAncestralRecords") var prefix []byte - prefix = append(prefix, _prefixAncestralRecord...) + prefix = append(prefix, getMainDbPrefix(_prefixAncestralRecord)...) prefix = append(prefix, EncodeUint64(height)...) snap.SnapshotDbMutex.Lock() defer snap.SnapshotDbMutex.Unlock() var keys [][]byte - err := snap.SnapshotDb.View(func(txn *badger.Txn) error { + err := snap.mainDb.Update(func(txn *badger.Txn) error { opts := badger.DefaultIteratorOptions opts.AllVersions = false opts.PrefetchValues = false @@ -857,12 +911,6 @@ func (snap *Snapshot) DeleteAncestralRecords(height uint64) error { key := it.Item().KeyCopy(nil) keys = append(keys, key) } - return nil - }) - if err != nil { - return errors.Wrapf(err, "DeleteAncestralRecords: Problem iterating through the height") - } - err = snap.SnapshotDb.Update(func(txn *badger.Txn) error { for _, key := range keys { err := txn.Delete(key) if err != nil { @@ -886,7 +934,7 @@ func (snap *Snapshot) GetAncestralRecordsKey(key []byte, blockHeight uint64) []b var prefix []byte // Append the ancestral records prefix. - prefix = append(prefix, _prefixAncestralRecord...) + prefix = append(prefix, getMainDbPrefix(_prefixAncestralRecord)...) // Append block height, which is the current snapshot identifier. prefix = append(prefix, EncodeUint64(blockHeight)...) @@ -905,7 +953,7 @@ func (snap *Snapshot) GetAncestralRecordsKeyWithTxn(txn *badger.Txn, key []byte, } func (snap *Snapshot) GetSnapshotBlockHeightPeriod() uint64 { - return snap.SnapshotBlockHeightPeriod + return snap.snapshotBlockHeightPeriod } // DBSetAncestralRecordWithTxn sets a record corresponding to our ExistingRecordsMap. @@ -926,14 +974,14 @@ func (snap *Snapshot) DBSetAncestralRecordWithTxn( // AncestralRecordToDBEntry is used to translate the pairs into // the actual pairs. Ancestral records have the format: // -// -> +// -> // // So we need to trim the first 9 bytes off of the ancestral_key to get the actual key. // And we need to trim the last 1 byte off of the ancestral_value to get the actual value. func (snap *Snapshot) AncestralRecordToDBEntry(ancestralEntry *DBEntry) *DBEntry { var dbKey, dbVal []byte // Trim the prefix and the block height from the ancestral record key. - dbKey = ancestralEntry.Key[9:] + dbKey = ancestralEntry.Key[10:] // Trim the existence_byte from the ancestral record value. if len(ancestralEntry.Value) > 0 { @@ -945,19 +993,24 @@ func (snap *Snapshot) AncestralRecordToDBEntry(ancestralEntry *DBEntry) *DBEntry } } -// CheckAnceststralRecordExistenceByte checks the existence_byte in the ancestral record value. -func (snap *Snapshot) CheckAnceststralRecordExistenceByte(value []byte) bool { +// CheckAncestralRecordExistenceByte checks the existence_byte in the ancestral record value. +func (snap *Snapshot) CheckAncestralRecordExistenceByte(value []byte) bool { if len(value) > 0 { return value[len(value)-1] == 1 } return false } -// SnapshotProcessBlock updates the snapshot information after a block has been added. -func (snap *Snapshot) SnapshotProcessBlock(blockNode *BlockNode) { +// snapshotProcessBlock updates the snapshot information after a block has been added. The +// snapshot's CurrentEpochSnapshotMetadata's updateMutex must be held before calling this. +func (snap *Snapshot) snapshotProcessBlockNoLock(blockNode *BlockNode) { height := uint64(blockNode.Height) if height > snap.Status.CurrentBlockHeight { snap.Status.CurrentBlockHeight = height + // If we haven't hit the first snapshot block height yet, don't bother. + if !snap.shouldPerformSnapshotOperations() { + return + } // Check if we've reached a migration blockheight and so should upgrade the checksum. if migrationChecksum := snap.Migrations.GetMigrationChecksumAtBlockheight(height); migrationChecksum != nil { for ii := 0; ii < MetadataRetryCount; ii++ { @@ -967,7 +1020,7 @@ func (snap *Snapshot) SnapshotProcessBlock(blockNode *BlockNode) { "bytes: Error (%v)", err) continue } - if err := snap.Checksum.FromBytes(checksumBytes); err != nil { + if err = snap.Checksum.FromBytes(checksumBytes); err != nil { glog.Errorf("Snapshot.SnapshotProcessBlock: Problem calling FromBytes on checksum "+ "migration: Error (%v)", err) continue @@ -979,8 +1032,6 @@ func (snap *Snapshot) SnapshotProcessBlock(blockNode *BlockNode) { } } - snap.CurrentEpochSnapshotMetadata.updateMutex.Lock() - defer snap.CurrentEpochSnapshotMetadata.updateMutex.Unlock() if height == snap.CurrentEpochSnapshotMetadata.SnapshotBlockHeight { var err error // Delete the previous blockHeight, it is not useful anymore. @@ -998,8 +1049,8 @@ func (snap *Snapshot) SnapshotProcessBlock(blockNode *BlockNode) { continue } snap.SnapshotDbMutex.Lock() - err = snap.SnapshotDb.Update(func(txn *badger.Txn) error { - return txn.Set(_prefixLastEpochMetadata, snap.CurrentEpochSnapshotMetadata.ToBytes()) + err = snap.mainDb.Update(func(txn *badger.Txn) error { + return txn.Set(getMainDbPrefix(_prefixLastEpochMetadata), snap.CurrentEpochSnapshotMetadata.ToBytes()) }) snap.SnapshotDbMutex.Unlock() if err != nil { @@ -1040,30 +1091,33 @@ func (snap *Snapshot) String() string { // GetSnapshotChunk fetches a batch of records from the nodes DB that match the provided prefix and // have a key at least equal to the startKey lexicographically. The function will also fetch ancestral // records and combine them with the DB records so that the batch reflects an ancestral block. -func (snap *Snapshot) GetSnapshotChunk(mainDb *badger.DB, prefix []byte, startKey []byte) ( - _snapshotEntriesBatch []*DBEntry, _snapshotEntriesFilled bool, _concurrencyFault bool, _err error) { - - // Check if we're flushing to the main db or to the ancestral records. If a flush is currently - // taking place, we will return a concurrencyFault error because the records are getting modified. - mainDBSemaphoreBefore, ancestralDBSemaphoreBefore := snap.Status.GetSemaphores() - if snap.Status.IsFlushing() { - return nil, false, true, nil - } +func (snap *Snapshot) GetSnapshotChunk(prefix []byte, startKey []byte) ( + _snapshotEntriesBatch []*DBEntry, _snapshotEntriesFilled bool, _err error) { // This the list of fetched DB entries. var snapshotEntriesBatch []*DBEntry blockHeight := snap.CurrentEpochSnapshotMetadata.SnapshotBlockHeight - // Fetch the batch from main DB records with a batch size of about snap.BatchSize. - mainDbBatchEntries, mainDbFilled, err := DBIteratePrefixKeys(mainDb, prefix, startKey, SnapshotBatchSize) - if err != nil { - return nil, false, false, errors.Wrapf(err, "Snapshot.GetSnapshotChunk: Problem fetching main Db records: ") - } - // Fetch the batch from the ancestral DB records with a batch size of about snap.BatchSize. - ancestralDbBatchEntries, ancestralDbFilled, err := DBIteratePrefixKeys(snap.SnapshotDb, - snap.GetAncestralRecordsKey(prefix, blockHeight), snap.GetAncestralRecordsKey(startKey, blockHeight), SnapshotBatchSize) + var mainDbBatchEntries, ancestralDbBatchEntries []*DBEntry + var mainDbFilled, ancestralDbFilled bool + // We use a read transaction to fetch the records so that we have a consistent view of the DB. + err := snap.mainDb.View(func(txn *badger.Txn) error { + var innerErr error + // Fetch the batch from main DB records with a batch size of about snap.BatchSize. + mainDbBatchEntries, mainDbFilled, innerErr = DBIteratePrefixKeys(snap.mainDb, prefix, startKey, SnapshotBatchSize) + if innerErr != nil { + return errors.Wrapf(innerErr, "Snapshot.GetSnapshotChunk: Problem fetching main Db records: ") + } + // Fetch the batch from the ancestral DB records with a batch size of about snap.BatchSize. + ancestralDbBatchEntries, ancestralDbFilled, innerErr = DBIteratePrefixKeys(snap.mainDb, + snap.GetAncestralRecordsKey(prefix, blockHeight), snap.GetAncestralRecordsKey(startKey, blockHeight), SnapshotBatchSize) + if innerErr != nil { + return errors.Wrapf(innerErr, "Snapshot.GetSnapshotChunk: Problem fetching main Db records: ") + } + return nil + }) if err != nil { - return nil, false, false, errors.Wrapf(err, "Snapshot.GetSnapshotChunk: Problem fetching main Db records: ") + return nil, false, err } // To combine the main DB entries and the ancestral records DB entries, we iterate through the ancestral records and @@ -1081,9 +1135,10 @@ func (snap *Snapshot) GetSnapshotChunk(mainDb *badger.DB, prefix []byte, startKe dbEntry := snap.AncestralRecordToDBEntry(ancestralEntry) for jj := indexChunk; jj < len(mainDbBatchEntries); { - if bytes.Compare(mainDbBatchEntries[jj].Key, dbEntry.Key) == -1 { + byteCompare := bytes.Compare(mainDbBatchEntries[jj].Key, dbEntry.Key) + if byteCompare == -1 { snapshotEntriesBatch = append(snapshotEntriesBatch, mainDbBatchEntries[jj]) - } else if bytes.Compare(mainDbBatchEntries[jj].Key, dbEntry.Key) == 1 { + } else if byteCompare == 1 { break } // if keys are equal we just skip @@ -1097,7 +1152,7 @@ func (snap *Snapshot) GetSnapshotChunk(mainDb *badger.DB, prefix []byte, startKe if mainDbFilled && indexChunk == len(mainDbBatchEntries) { break } - if snap.CheckAnceststralRecordExistenceByte(ancestralEntry.Value) { + if snap.CheckAncestralRecordExistenceByte(ancestralEntry.Value) { snapshotEntriesBatch = append(snapshotEntriesBatch, dbEntry) } } @@ -1118,25 +1173,15 @@ func (snap *Snapshot) GetSnapshotChunk(mainDb *badger.DB, prefix []byte, startKe // no record from the main DB was added. lastAncestralEntry := ancestralDbBatchEntries[len(ancestralDbBatchEntries)-1] dbEntry := snap.AncestralRecordToDBEntry(lastAncestralEntry) - return snap.GetSnapshotChunk(mainDb, prefix, dbEntry.Key) + return snap.GetSnapshotChunk(prefix, dbEntry.Key) } else { snapshotEntriesBatch = append(snapshotEntriesBatch, EmptyDBEntry()) - return snapshotEntriesBatch, false, false, nil + return snapshotEntriesBatch, false, nil } } - // Check if the semaphores have changed as we were fetching the snapshot chunk. It could happen - // that a flush was taking place right when we were reading records from the database. To detect - // such edge-case, we compare the current semaphore counters with the ones we've copied when - // we started retrieving the database chunk. - mainDBSemaphoreAfter, ancestralDBSemaphoreAfter := snap.Status.GetSemaphores() - if ancestralDBSemaphoreBefore != ancestralDBSemaphoreAfter || - mainDBSemaphoreBefore != mainDBSemaphoreAfter { - return nil, false, true, nil - } - // If either of the chunks is full, we should return true. - return snapshotEntriesBatch, mainDbFilled || ancestralDbFilled, false, nil + return snapshotEntriesBatch, mainDbFilled || ancestralDbFilled, nil } // SetSnapshotChunk is called to put the snapshot chunk that we've got from a peer in the database. @@ -1205,7 +1250,7 @@ func (snap *Snapshot) SetSnapshotChunk(mainDb *badger.DB, mainDbMutex *deadlock. //snap.timer.Start("SetSnapshotChunk.Checksum") for _, dbEntry := range chunk { if localErr := snap.Checksum.AddOrRemoveBytesWithMigrations(dbEntry.Key, dbEntry.Value, blockHeight, - snap.Migrations.migrationChecksums, true); localErr != nil { + snap.Migrations.migrationChecksums, snap.Migrations.migrationChecksumLock, true); localErr != nil { glog.Errorf("Snapshot.SetSnapshotChunk: Problem adding checksum") err = localErr return @@ -1262,6 +1307,12 @@ func (snap *Snapshot) FreeOperationQueueSemaphore() { } } +// TODO: @diamondhands, this is a performance enhancements, but also makes me concerned +// that we may be missing some operations. We should investigate this further. +func (snap *Snapshot) shouldPerformSnapshotOperations() bool { + return snap.Status.CurrentBlockHeight >= snap.CurrentEpochSnapshotMetadata.FirstSnapshotBlockHeight +} + // ------------------------------------------------------------------------------------- // StateChecksum // ------------------------------------------------------------------------------------- @@ -1337,12 +1388,12 @@ type StateChecksum struct { // maxWorkers is the maximum number of workers we can have in the worker pool. maxWorkers int64 - snapshotDb *badger.DB + mainDb *badger.DB snapshotDbMutex *sync.Mutex } // Initialize starts the state checksum by initializing it to the identity element. -func (sc *StateChecksum) Initialize(snapshotDb *badger.DB, snapshotDbMutex *sync.Mutex) error { +func (sc *StateChecksum) Initialize(mainDb *badger.DB, snapshotDbMutex *sync.Mutex) error { // Set the elliptic curve group to Ristretto255 and initialize checksum as identity. sc.curve = group.Ristretto255 sc.checksum = sc.curve.Identity() @@ -1359,19 +1410,20 @@ func (sc *StateChecksum) Initialize(snapshotDb *badger.DB, snapshotDbMutex *sync sc.semaphore = semaphore.NewWeighted(sc.maxWorkers) sc.ctx = context.Background() - sc.snapshotDb = snapshotDb + sc.mainDb = mainDb sc.snapshotDbMutex = snapshotDbMutex - if snapshotDb == nil || snapshotDbMutex == nil { + if mainDb == nil || snapshotDbMutex == nil { sc.snapshotDbMutex = &sync.Mutex{} return nil } + sc.snapshotDbMutex.Lock() defer sc.snapshotDbMutex.Unlock() // Get snapshot checksum from the db. - err := sc.snapshotDb.View(func(txn *badger.Txn) error { - item, err := txn.Get(_prefixSnapshotChecksum) + err := sc.mainDb.View(func(txn *badger.Txn) error { + item, err := txn.Get(getMainDbPrefix(_prefixSnapshotChecksum)) if err != nil { return err } @@ -1382,7 +1434,7 @@ func (sc *StateChecksum) Initialize(snapshotDb *badger.DB, snapshotDbMutex *sync // If we get here, it means we've saved a checksum in the db, so we will set it to the checksum. return sc.FromBytes(value) }) - if err != nil && err != badger.ErrKeyNotFound { + if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { return errors.Wrapf(err, "StateChecksum.Initialize: Problem reading checksum from the db") } return nil @@ -1392,12 +1444,12 @@ func (sc *StateChecksum) SaveChecksum() error { sc.snapshotDbMutex.Lock() defer sc.snapshotDbMutex.Unlock() - return sc.snapshotDb.Update(func(txn *badger.Txn) error { + return sc.mainDb.Update(func(txn *badger.Txn) error { checksumBytes, err := sc.ToBytes() if err != nil { return errors.Wrapf(err, "StateChecksum.SaveChecksum: Problem getting checksum bytes") } - return txn.Set(_prefixSnapshotChecksum, checksumBytes) + return txn.Set(getMainDbPrefix(_prefixSnapshotChecksum), checksumBytes) }) } @@ -1483,7 +1535,7 @@ func (sc *StateChecksum) RemoveBytes(bytes []byte) error { // called in the context of the snapshot's epoch so that everything happens in sync with the main thread. // The parameter addBytes determines if we want to add or remove bytes from the checksums. func (sc *StateChecksum) AddOrRemoveBytesWithMigrations(keyInput []byte, valueInput []byte, blockHeight uint64, - encoderMigrationChecksums []*EncoderMigrationChecksum, addBytes bool) error { + encoderMigrationChecksums []*EncoderMigrationChecksum, checksumLock *sync.RWMutex, addBytes bool) error { key, err := SafeMakeSliceWithLength[byte](uint64(len(keyInput))) if err != nil { return err @@ -1512,6 +1564,8 @@ func (sc *StateChecksum) AddOrRemoveBytesWithMigrations(keyInput []byte, valueIn // We add the current key, value encoding and encodings for all migrations. encodings = append(encodings, EncodeKeyAndValueForChecksum(key, value, blockHeight)) + checksumLock.Lock() + defer checksumLock.Unlock() for _, migration := range encoderMigrationChecksums { added := false migrationEncoding := EncodeKeyAndValueForChecksum(key, value, migration.BlockHeight) @@ -1635,31 +1689,31 @@ type SnapshotEpochMetadata struct { // CurrentEpochBlockHash is the hash of the first block of the current epoch. It's used to identify the snapshot. CurrentEpochBlockHash *BlockHash - updateMutex sync.Mutex + updateMutex sync.RWMutex - snapshotDb *badger.DB + mainDb *badger.DB snapshotDbMutex *sync.Mutex } -func (metadata *SnapshotEpochMetadata) Initialize(snapshotDb *badger.DB, snapshotDbMutex *sync.Mutex) error { +func (metadata *SnapshotEpochMetadata) Initialize(mainDb *badger.DB, snapshotDbMutex *sync.Mutex) error { metadata.SnapshotBlockHeight = uint64(0) metadata.FirstSnapshotBlockHeight = uint64(0) metadata.CurrentEpochChecksumBytes = []byte{} metadata.CurrentEpochBlockHash = NewBlockHash([]byte{}) - metadata.snapshotDb = snapshotDb + metadata.mainDb = mainDb metadata.snapshotDbMutex = snapshotDbMutex - if snapshotDb == nil || snapshotDbMutex == nil { + if mainDb == nil || snapshotDbMutex == nil { metadata.snapshotDbMutex = &sync.Mutex{} return nil } metadata.snapshotDbMutex.Lock() defer metadata.snapshotDbMutex.Unlock() - err := snapshotDb.View(func(txn *badger.Txn) error { + err := mainDb.View(func(txn *badger.Txn) error { // Now get the last epoch metadata. - item, err := txn.Get(_prefixLastEpochMetadata) + item, err := txn.Get(getMainDbPrefix(_prefixLastEpochMetadata)) if err != nil { return err } @@ -1672,7 +1726,7 @@ func (metadata *SnapshotEpochMetadata) Initialize(snapshotDb *badger.DB, snapsho }) // If we're starting the hyper sync node for the first time, then there will be no snapshot saved // and we'll get ErrKeyNotFound error. That's why we don't error when it happens. - if err != nil && err != badger.ErrKeyNotFound { + if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { return errors.Wrapf(err, "Snapshot.NewSnapshot: Problem retrieving snapshot information from db") } return nil @@ -1781,9 +1835,9 @@ type SnapshotOperationType uint8 const ( // SnapshotOperationFlush operation enqueues a flush to the ancestral records. - SnapshotOperationFlush SnapshotOperationType = iota + SnapshotOperationFlush SnapshotOperationType = iota // deprecated // SnapshotOperationProcessBlock operation signals that a new block has been added to the blockchain. - SnapshotOperationProcessBlock + SnapshotOperationProcessBlock // deprecated // SnapshotOperationProcessChunk operation is enqueued when we receive a snapshot chunk during syncing. SnapshotOperationProcessChunk // SnapshotOperationChecksumAdd operation is enqueued when we want to add bytes to the state checksum. @@ -1839,32 +1893,36 @@ type SnapshotOperationChannel struct { StateSemaphore int32 StateSemaphoreLock sync.Mutex - snapshotDb *badger.DB + mainDb *badger.DB snapshotDbMutex *sync.Mutex startOperationHandler func(op *SnapshotOperation) error finishAllOperationsHandler func() error } -func (opChan *SnapshotOperationChannel) Initialize(snapshotDb *badger.DB, snapshotDbMutex *sync.Mutex, - startOperationHandler func(op *SnapshotOperation) error, finishAllOperationsHandler func() error) error { +func (opChan *SnapshotOperationChannel) Initialize( + mainDb *badger.DB, + snapshotDbMutex *sync.Mutex, + startOperationHandler func(op *SnapshotOperation) error, + finishAllOperationsHandler func() error, +) error { opChan.OperationChannel = make(chan *SnapshotOperation, 100000) opChan.StateSemaphore = 0 - opChan.snapshotDb = snapshotDb + opChan.mainDb = mainDb opChan.snapshotDbMutex = snapshotDbMutex opChan.startOperationHandler = startOperationHandler opChan.finishAllOperationsHandler = finishAllOperationsHandler - if snapshotDb == nil || snapshotDbMutex == nil { + if mainDb == nil || snapshotDbMutex == nil { opChan.snapshotDbMutex = &sync.Mutex{} return nil } opChan.snapshotDbMutex.Lock() defer opChan.snapshotDbMutex.Unlock() - err := snapshotDb.View(func(txn *badger.Txn) error { - item, err := txn.Get(_prefixOperationChannelStatus) + err := mainDb.View(func(txn *badger.Txn) error { + item, err := txn.Get(getMainDbPrefix(_prefixOperationChannelStatus)) if err != nil { return err } @@ -1880,7 +1938,7 @@ func (opChan *SnapshotOperationChannel) Initialize(snapshotDb *badger.DB, snapsh opChan.StateSemaphore = int32(stateSemaphore) return nil }) - if err != nil && err != badger.ErrKeyNotFound { + if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { return errors.Wrapf(err, "SnapshotOperationChannel.Initialize: Problem reading StateSemaphore from db") } @@ -1899,8 +1957,8 @@ func (opChan *SnapshotOperationChannel) SaveOperationChannel() error { opChan.snapshotDbMutex.Lock() defer opChan.snapshotDbMutex.Unlock() - return opChan.snapshotDb.Update(func(txn *badger.Txn) error { - return txn.Set(_prefixOperationChannelStatus, UintToBuf(uint64(opChan.StateSemaphore))) + return opChan.mainDb.Update(func(txn *badger.Txn) error { + return txn.Set(getMainDbPrefix(_prefixOperationChannelStatus), UintToBuf(uint64(opChan.StateSemaphore))) }) } @@ -1958,36 +2016,23 @@ func (opChan *SnapshotOperationChannel) GetStatus() int32 { // ------------------------------------------------------------------------------------- type SnapshotStatus struct { - // MainDBSemaphore and AncestralDBSemaphore are atomically accessed counter semaphores that will be - // used to control race conditions between main db and ancestral records. They basically manage the concurrency - // between writes to the main and ancestral dbs. - // - // See the comment at the top of snapshot.go for detailed information on how these semaphores work. - MainDBSemaphore uint64 - AncestralDBSemaphore uint64 // CurrentBlockHeight is the blockheight of the blockchain tip. CurrentBlockHeight uint64 - // MemoryLock is held whenever we modify the MainDBSemaphore or AncestralDBSemaphore. - MemoryLock sync.Mutex - // SnapshotStatus is called concurrently by the Server and Snapshot threads. And badger cannot handle - // concurrent writes to the database. To make sure this concurrency doesn't affect general performance, - // we use a custom badger.DB to save SnapshotStatus. - snapshotDb *badger.DB + // A handle to the mainDb of the blockchain. + mainDb *badger.DB - // snapshotDbMutex is held whenever we modify snapshotDb. + // snapshotDbMutex is held whenever we modify snapshotDb prefix of the mainDb. snapshotDbMutex *sync.Mutex } -func (status *SnapshotStatus) Initialize(snapshotDb *badger.DB, snapshotDbMutex *sync.Mutex) error { - status.MainDBSemaphore = uint64(0) - status.AncestralDBSemaphore = uint64(0) +func (status *SnapshotStatus) Initialize(mainDb *badger.DB, snapshotDbMutex *sync.Mutex) error { - status.snapshotDb = snapshotDb + status.mainDb = mainDb status.snapshotDbMutex = snapshotDbMutex - if snapshotDb == nil || snapshotDbMutex == nil { + if mainDb == nil || snapshotDbMutex == nil { status.snapshotDbMutex = &sync.Mutex{} return nil } @@ -2000,8 +2045,8 @@ func (status *SnapshotStatus) Initialize(snapshotDb *badger.DB, snapshotDbMutex func (status *SnapshotStatus) ToBytes() []byte { var data []byte - data = append(data, UintToBuf(status.MainDBSemaphore)...) - data = append(data, UintToBuf(status.AncestralDBSemaphore)...) + data = append(data, UintToBuf(0)...) // Formerly status.MainDBSemaphore + data = append(data, UintToBuf(0)...) // Formerly status.AncestralDBSemaphore) data = append(data, UintToBuf(status.CurrentBlockHeight)...) return data @@ -2009,12 +2054,12 @@ func (status *SnapshotStatus) ToBytes() []byte { func (status *SnapshotStatus) FromBytes(rr *bytes.Reader) error { var err error - status.MainDBSemaphore, err = ReadUvarint(rr) + _, err = ReadUvarint(rr) // Formerly status.MainDBSemaphore if err != nil { return errors.Wrapf(err, "SnapshotStatus: Problem reading MainDBSemaphore") } - status.AncestralDBSemaphore, err = ReadUvarint(rr) + _, err = ReadUvarint(rr) // Formerly status.AncestralDBSemaphore if err != nil { return errors.Wrapf(err, "SnapshotStatus: Problem reading AncestralDBSemaphore") } @@ -2029,9 +2074,8 @@ func (status *SnapshotStatus) FromBytes(rr *bytes.Reader) error { func (status *SnapshotStatus) SaveStatus() { status.snapshotDbMutex.Lock() defer status.snapshotDbMutex.Unlock() - - err := status.snapshotDb.Update(func(txn *badger.Txn) error { - return txn.Set(_prefixSnapshotStatus, status.ToBytes()) + err := status.mainDb.Update(func(txn *badger.Txn) error { + return txn.Set(getMainDbPrefix(_prefixSnapshotStatus), status.ToBytes()) }) if err != nil { glog.Fatalf("SnapshotStatus.SaveStatus: problem writing snapshot status error (%v)", err) @@ -2041,9 +2085,8 @@ func (status *SnapshotStatus) SaveStatus() { func (status *SnapshotStatus) ReadStatus() error { status.snapshotDbMutex.Lock() defer status.snapshotDbMutex.Unlock() - - err := status.snapshotDb.View(func(txn *badger.Txn) error { - item, err := txn.Get(_prefixSnapshotStatus) + err := status.mainDb.View(func(txn *badger.Txn) error { + item, err := txn.Get(getMainDbPrefix(_prefixSnapshotStatus)) if err != nil { return err } @@ -2054,59 +2097,12 @@ func (status *SnapshotStatus) ReadStatus() error { rr := bytes.NewReader(statusBytes) return status.FromBytes(rr) }) - if err != nil && err != badger.ErrKeyNotFound { + if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { return errors.Wrapf(err, "SnapshotStatus.ReadStatus: Problem reading status from db") } return nil } -// IncrementMainDbSemaphoreMemoryLockRequired increments the MainDBSemaphore by one, it should be called with MemoryLock. -func (status *SnapshotStatus) IncrementMainDbSemaphoreMemoryLockRequired() { - status.MainDBSemaphore++ - status.SaveStatus() -} - -// IncrementAncestralDBSemaphoreMemoryLockRequired increments the AncestralDBSemaphore by one, it should be called with MemoryLock. -func (status *SnapshotStatus) IncrementAncestralDBSemaphoreMemoryLockRequired() { - status.AncestralDBSemaphore++ - status.SaveStatus() -} - -// IsFlushingToMainDBMemoryLockRequired checks if a flush to MainDB takes place. This should be called with MemoryLock. -func (status *SnapshotStatus) IsFlushingToMainDBMemoryLockRequired() bool { - return status.MainDBSemaphore%2 == 1 -} - -// IsFlushingToAncestralMemoryLockRequired checks if a flush to AncestralDB takes place. This should be called with MemoryLock. -func (status *SnapshotStatus) IsFlushingToAncestralMemoryLockRequired() bool { - return status.AncestralDBSemaphore%2 == 1 -} - -// IsFlushing checks whether a main DB flush or ancestral record flush is taking place. -func (status *SnapshotStatus) IsFlushing() bool { - // We retrieve the ancestral record and main db semaphores. - status.MemoryLock.Lock() - defer status.MemoryLock.Unlock() - - return status.IsFlushingWithoutLock() -} - -func (status *SnapshotStatus) IsFlushingWithoutLock() bool { - // Flush is taking place if the semaphores have different counters or if they are odd. - // We increment each semaphore whenever we start the flush and when we end it so they are always - // even when the DB is not being updated. - return status.MainDBSemaphore != status.AncestralDBSemaphore || - (status.MainDBSemaphore|status.AncestralDBSemaphore)%2 == 1 -} - -// GetSemaphores retrieves main and ancestral db semaphores. -func (status *SnapshotStatus) GetSemaphores() (_mainDbSemaphore uint64, _ancestralDBSemaphore uint64) { - status.MemoryLock.Lock() - defer status.MemoryLock.Unlock() - - return status.MainDBSemaphore, status.AncestralDBSemaphore -} - // ------------------------------------------------------------------------------------- // EncoderMigrationChecksum, EncoderMigration // ------------------------------------------------------------------------------------- @@ -2120,26 +2116,31 @@ type EncoderMigrationChecksum struct { } type EncoderMigration struct { - migrationChecksums []*EncoderMigrationChecksum - completed bool - currentBlockHeight uint64 + migrationChecksumLock *sync.RWMutex + migrationChecksums []*EncoderMigrationChecksum + completed bool + currentBlockHeight uint64 mainDb *badger.DB - snapshotDb *badger.DB snapshotDbMutex *sync.Mutex params *DeSoParams } -func (migration *EncoderMigration) Initialize(mainDb *badger.DB, snapshotDb *badger.DB, - snapshotDbMutex *sync.Mutex, blockHeight uint64, params *DeSoParams, disabled bool) error { +func (migration *EncoderMigration) Initialize( + mainDb *badger.DB, + snapshotDbMutex *sync.Mutex, + blockHeight uint64, + params *DeSoParams, + disabled bool, +) error { migration.mainDb = mainDb - migration.snapshotDb = snapshotDb migration.snapshotDbMutex = snapshotDbMutex migration.currentBlockHeight = blockHeight migration.params = params + migration.migrationChecksumLock = &sync.RWMutex{} - if snapshotDb == nil || snapshotDbMutex == nil { + if mainDb == nil || snapshotDbMutex == nil { migration.snapshotDbMutex = &sync.Mutex{} return nil } @@ -2153,8 +2154,8 @@ func (migration *EncoderMigration) Initialize(mainDb *badger.DB, snapshotDb *bad } // Retrieve all migrations from the snapshot Db. - err := migration.snapshotDb.View(func(txn *badger.Txn) error { - item, err := txn.Get(_prefixMigrationStatus) + err := migration.mainDb.View(func(txn *badger.Txn) error { + item, err := txn.Get(getMainDbPrefix(_prefixMigrationStatus)) if err != nil { return err } @@ -2174,7 +2175,9 @@ func (migration *EncoderMigration) Initialize(mainDb *badger.DB, snapshotDb *bad migrationChecksum := &EncoderMigrationChecksum{} // Initialize an empty checksum struct. We use it to parse checksum bytes. migrationChecksum.Checksum = &StateChecksum{} - migrationChecksum.Checksum.Initialize(nil, nil) + if err = migrationChecksum.Checksum.Initialize(nil, nil); err != nil { + return err + } checksumBytes, err := DecodeByteArray(rr) if err != nil { @@ -2212,7 +2215,7 @@ func (migration *EncoderMigration) Initialize(mainDb *badger.DB, snapshotDb *bad migration.migrationChecksums = migrationChecksums return nil }) - if err != nil && err != badger.ErrKeyNotFound { + if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { return errors.Wrapf(err, "EncoderMigrationChecksum.Initialize: Problem reading migration from db") } @@ -2248,12 +2251,16 @@ func (migration *EncoderMigration) Initialize(mainDb *badger.DB, snapshotDb *bad return nil } -func (migration *EncoderMigration) InitializeSingleHeight(mainDb *badger.DB, snapshotDb *badger.DB, - snapshotDbMutex *sync.Mutex, blockHeight uint64, params *DeSoParams) { +// TODO: This function is unused and can be removed. +func (migration *EncoderMigration) InitializeSingleHeight( + mainDb *badger.DB, + snapshotDbMutex *sync.Mutex, + blockHeight uint64, + params *DeSoParams, +) { migration.currentBlockHeight = blockHeight migration.mainDb = mainDb - migration.snapshotDb = snapshotDb migration.snapshotDbMutex = snapshotDbMutex migration.params = params @@ -2268,9 +2275,7 @@ func (migration *EncoderMigration) InitializeSingleHeight(mainDb *badger.DB, sna } func (migration *EncoderMigration) SaveMigrations() error { - migration.snapshotDbMutex.Lock() - defer migration.snapshotDbMutex.Unlock() - + migration.migrationChecksumLock.RLock() var data []byte data = append(data, UintToBuf(uint64(len(migration.migrationChecksums)))...) for ii := range migration.migrationChecksums { @@ -2285,14 +2290,19 @@ func (migration *EncoderMigration) SaveMigrations() error { data = append(data, BoolToByte(migration.migrationChecksums[ii].Completed)) } data = append(data, BoolToByte(migration.completed)) + migration.migrationChecksumLock.RUnlock() - return migration.snapshotDb.Update(func(txn *badger.Txn) error { - return txn.Set(_prefixMigrationStatus, data) + migration.snapshotDbMutex.Lock() + defer migration.snapshotDbMutex.Unlock() + return migration.mainDb.Update(func(txn *badger.Txn) error { + return txn.Set(getMainDbPrefix(_prefixMigrationStatus), data) }) } func (migration *EncoderMigration) StartMigrations() error { + migration.migrationChecksumLock.Lock() + defer migration.migrationChecksumLock.Unlock() var outstandingChecksums []*EncoderMigrationChecksum // Look for any outstanding encoder migrations. These migrations are going to be set to not completed and their checksums @@ -2381,7 +2391,7 @@ func (migration *EncoderMigration) StartMigrations() error { key := item.Key() err := item.Value(func(value []byte) error { return carrierChecksum.AddOrRemoveBytesWithMigrations(key, value, migration.currentBlockHeight, - outstandingChecksums, true) + outstandingChecksums, migration.migrationChecksumLock, true) }) if err != nil { return err @@ -2410,6 +2420,8 @@ func (migration *EncoderMigration) StartMigrations() error { } func (migration *EncoderMigration) GetMigrationChecksumAtBlockheight(blockHeight uint64) *StateChecksum { + migration.migrationChecksumLock.RLock() + defer migration.migrationChecksumLock.RUnlock() for _, migrationChecksum := range migration.migrationChecksums { if migrationChecksum.BlockHeight == blockHeight { return migrationChecksum.Checksum @@ -2419,6 +2431,8 @@ func (migration *EncoderMigration) GetMigrationChecksumAtBlockheight(blockHeight } func (migration *EncoderMigration) CleanupMigrations(blockHeight uint64) { + migration.migrationChecksumLock.Lock() + defer migration.migrationChecksumLock.Unlock() for jj := 0; jj < len(migration.migrationChecksums); jj++ { if migration.migrationChecksums[jj].BlockHeight <= blockHeight { migration.migrationChecksums = append(migration.migrationChecksums[:jj], @@ -2429,6 +2443,8 @@ func (migration *EncoderMigration) CleanupMigrations(blockHeight uint64) { } func (migration *EncoderMigration) ResetChecksums() { + migration.migrationChecksumLock.Lock() + defer migration.migrationChecksumLock.Unlock() for _, migrationChecksum := range migration.migrationChecksums { migrationChecksum.Checksum.ResetChecksum() migrationChecksum.Completed = false From c0f9319d09805961e272a6a3fadaa0b1c474f2a8 Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Fri, 10 May 2024 16:43:37 -0700 Subject: [PATCH 708/762] Review of hypersync-enhancements (#1300) https://docs.google.com/document/d/1i9Az5nrhL2zhrQMNLS1lzuW1zbvfNp2Xa6cfe6WkX-k/edit --- lib/block_view.go | 2 +- lib/block_view_flush.go | 18 ++++++++++++++---- lib/blockchain.go | 11 +++++++++-- lib/db_utils.go | 28 ++++++++++++++++++++++------ lib/pos_blockchain.go | 8 +++++++- lib/server.go | 38 +++++++++++++++++++++++++------------- lib/snapshot.go | 30 ++++++++++++------------------ 7 files changed, 90 insertions(+), 45 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 390ac3e7a..5b95300b5 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4930,7 +4930,7 @@ func (bav *UtxoView) GetTransactorNonceEntry(nonce *DeSoNonce, pkid *PKID) (*Tra return nonceEntry, nil } var err error - nonceEntry, err = DbGetTransactorNonceEntry(bav.Handle, nonce, pkid) + nonceEntry, err = DbGetTransactorNonceEntry(bav.Handle, bav.Snapshot, nonce, pkid) if err != nil { return nil, err } diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 53b07ab31..4bd55d255 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -44,12 +44,22 @@ func (bav *UtxoView) FlushToDbWithTxn(txn *badger.Txn, blockHeight uint64) error // This function prepares the data structures in the snapshot. if bav.Snapshot != nil { bav.Snapshot.PrepareAncestralRecordsFlush() - // When we finish flushing to the main DB, we'll also flush to ancestral records. - // This happens concurrently, which is why we have the 2-phase prepare-flush happening for snapshot. - defer bav.Snapshot.FlushAncestralRecordsWithTxn(txn) } - return bav.FlushToDBWithoutAncestralRecordsFlushWithTxn(txn, blockHeight) + err := bav.FlushToDBWithoutAncestralRecordsFlushWithTxn(txn, blockHeight) + if err != nil { + return err + } + // We can exit early if we're not using a snapshot. + if bav.Snapshot == nil { + return nil + } + // Flush the ancestral records to the DB. + err = bav.Snapshot.FlushAncestralRecordsWithTxn(txn) + if err != nil { + return err + } + return nil } // FlushToDBWithoutAncestralRecordsFlushWithTxn flushes the UtxoView to the DB without diff --git a/lib/blockchain.go b/lib/blockchain.go index b13420bdf..d01a306c3 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -2418,7 +2418,6 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures err = bc.db.Update(func(txn *badger.Txn) error { if bc.snapshot != nil { bc.snapshot.PrepareAncestralRecordsFlush() - defer bc.snapshot.FlushAncestralRecordsWithTxn(txn) glog.V(2).Infof("ProcessBlock: Preparing snapshot flush") } // Store the new block in the db under the @@ -2444,7 +2443,15 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures ); innerErr != nil { return errors.Wrapf(err, "ProcessBlock: Problem calling PutHeightHashToNodeInfo before validation") } - + // We can exit early if we're not using a snapshot. + if bc.snapshot == nil { + return nil + } + glog.V(2).Infof("ProcessBlock: Flushing ancestral records") + innerErr := bc.snapshot.FlushAncestralRecordsWithTxn(txn) + if innerErr != nil { + return innerErr + } return nil }) } diff --git a/lib/db_utils.go b/lib/db_utils.go index c598ecbcc..33eb73dad 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -5323,7 +5323,6 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, if err := handle.Update(func(txn *badger.Txn) error { if snap != nil { snap.PrepareAncestralRecordsFlush() - defer snap.FlushAncestralRecordsWithTxn(txn) } if err := PutBestHashWithTxn(txn, snap, blockHash, ChainTypeDeSoBlock, eventManager); err != nil { return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block hash into db for block chain") @@ -5342,6 +5341,13 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, if err := DbPutGlobalParamsEntryWithTxn(txn, snap, 0, InitialGlobalParamsEntry, eventManager); err != nil { return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting GlobalParamsEntry into db for block chain") } + // We can exit early if we're not using a snapshot. + if snap == nil { + return nil + } + if err := snap.FlushAncestralRecordsWithTxn(txn); err != nil { + return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem flushing ancestral records") + } return nil }); err != nil { return err @@ -11771,10 +11777,15 @@ func _dbPrefixForNonceEntryIndexWithBlockHeight(blockHeight uint64) []byte { return append(prefixCopy, EncodeUint64(blockHeight)...) } -func DbGetTransactorNonceEntryWithTxn(txn *badger.Txn, nonce *DeSoNonce, pkid *PKID) (*TransactorNonceEntry, error) { +func DbGetTransactorNonceEntryWithTxn( + txn *badger.Txn, + snap *Snapshot, + nonce *DeSoNonce, + pkid *PKID, +) (*TransactorNonceEntry, error) { key := _dbKeyForTransactorNonceEntry(nonce, pkid) - _, err := txn.Get(key) - if err == badger.ErrKeyNotFound { + _, err := DBGetWithTxn(txn, snap, key) + if errors.Is(err, badger.ErrKeyNotFound) { return nil, nil } if err != nil { @@ -11786,11 +11797,16 @@ func DbGetTransactorNonceEntryWithTxn(txn *badger.Txn, nonce *DeSoNonce, pkid *P }, nil } -func DbGetTransactorNonceEntry(db *badger.DB, nonce *DeSoNonce, pkid *PKID) (*TransactorNonceEntry, error) { +func DbGetTransactorNonceEntry( + db *badger.DB, + snap *Snapshot, + nonce *DeSoNonce, + pkid *PKID, +) (*TransactorNonceEntry, error) { var ret *TransactorNonceEntry dbErr := db.View(func(txn *badger.Txn) error { var err error - ret, err = DbGetTransactorNonceEntryWithTxn(txn, nonce, pkid) + ret, err = DbGetTransactorNonceEntryWithTxn(txn, snap, nonce, pkid) return errors.Wrap(err, "DbGetTransactorNonceEntry: ") }) if dbErr != nil { diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 6e8fb1218..1705e57b3 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1684,7 +1684,6 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool err = bc.db.Update(func(txn *badger.Txn) error { if bc.snapshot != nil { bc.snapshot.PrepareAncestralRecordsFlush() - defer bc.snapshot.FlushAncestralRecordsWithTxn(txn) glog.V(2).Infof("commitBlockPoS: Preparing snapshot flush") } @@ -1713,6 +1712,13 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool txn, uint64(blockNode.Height)); innerErr != nil { return errors.Wrapf(innerErr, "commitBlockPoS: Problem flushing UtxoView to db") } + // We can exit early if we're not using a snapshot. + if bc.snapshot == nil { + return nil + } + if innerErr := bc.snapshot.FlushAncestralRecordsWithTxn(txn); innerErr != nil { + return errors.Wrapf(innerErr, "commitBlockPoS: Problem flushing ancestral records") + } return nil }) if err != nil { diff --git a/lib/server.go b/lib/server.go index cb043aac7..b31c7f66c 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1107,24 +1107,27 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { srv.blockchain.syncingState = true } + // Regardless of whether we are going to hypersync from the peer, we want to compute the + // expectedSnapshotHeight. + bestHeaderHeight := uint64(srv.blockchain.headerTip().Height) + // The peer's snapshot block height period before the first PoS fork height is expected to be the + // PoW default value. After the fork height, it's expected to be the value defined in the params. + snapshotBlockHeightPeriod := srv.params.GetSnapshotBlockHeightPeriod( + bestHeaderHeight, + srv.snapshot.GetSnapshotBlockHeightPeriod(), + ) + expectedSnapshotHeight := bestHeaderHeight - (bestHeaderHeight % snapshotBlockHeightPeriod) + posSetupForkHeight := uint64(srv.params.ForkHeights.ProofOfStake1StateSetupBlockHeight) + if expectedSnapshotHeight < posSetupForkHeight { + expectedSnapshotHeight = posSetupForkHeight - (posSetupForkHeight % srv.params.DefaultPoWSnapshotBlockHeightPeriod) + } + if srv.blockchain.chainState() == SyncStateSyncingSnapshot { glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* state starting at "+ "height %v from peer %v", srv.blockchain.headerTip().Header.Height, pp) // If node is a hyper sync node and we haven't finished syncing state yet, we will kick off state sync. if srv.cmgr.HyperSync { - bestHeaderHeight := uint64(srv.blockchain.headerTip().Height) - // The peer's snapshot block height period before the first PoS fork height is expected to be the - // PoW default value. After the fork height, it's expected to be the value defined in the params. - snapshotBlockHeightPeriod := srv.params.GetSnapshotBlockHeightPeriod( - bestHeaderHeight, - srv.snapshot.GetSnapshotBlockHeightPeriod(), - ) - expectedSnapshotHeight := bestHeaderHeight - (bestHeaderHeight % snapshotBlockHeightPeriod) - posSetupForkHeight := uint64(srv.params.ForkHeights.ProofOfStake1StateSetupBlockHeight) - if expectedSnapshotHeight < posSetupForkHeight { - expectedSnapshotHeight = posSetupForkHeight - (posSetupForkHeight % srv.params.DefaultPoWSnapshotBlockHeightPeriod) - } srv.blockchain.snapshot.Migrations.CleanupMigrations(expectedSnapshotHeight) if len(srv.HyperSyncProgress.PrefixProgress) != 0 { @@ -1208,6 +1211,15 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // If we have exhausted the peer's headers but our blocks aren't current, // send a GetBlocks message to the peer for as many blocks as we can get. if srv.blockchain.chainState() == SyncStateSyncingBlocks { + // Regardless of whether we're hypersyncing, we need to ensure that the + // FirstSnapshotBlockHeight is set correctly. This ensures that we won't do unnecessary + // hypersync computations until we absolutely have to. + if srv.snapshot != nil && + srv.snapshot.CurrentEpochSnapshotMetadata != nil && + srv.snapshot.CurrentEpochSnapshotMetadata.FirstSnapshotBlockHeight == 0 { + srv.snapshot.CurrentEpochSnapshotMetadata.FirstSnapshotBlockHeight = expectedSnapshotHeight + } + // A maxHeight of -1 tells GetBlocks to fetch as many blocks as we can // from this peer without worrying about how many blocks the peer actually // has. We can do that in this case since this usually happens during sync @@ -2429,7 +2441,7 @@ func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { srv._handleBlock(pp, blk, ii == len(bundle.Blocks)-1 /*isLastBlock*/) numLogBlocks := 1000 if srv.params.IsPoWBlockHeight(blk.Header.Height) { - numLogBlocks = 25 + numLogBlocks = 100 } if ii%numLogBlocks == 0 { glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlockBundle: Processed block ( %v / %v ) = ( %v / %v ) from Peer %v", diff --git a/lib/snapshot.go b/lib/snapshot.go index 518e6c19d..ab5658225 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -767,10 +767,10 @@ func (snap *Snapshot) PrepareAncestralRecord(key string, value []byte, existed b // FlushAncestralRecordsWithTxn updates the ancestral records after a UtxoView flush. // This function should be called in the same badger transaction at the end of UtxoView // flushes. -func (snap *Snapshot) FlushAncestralRecordsWithTxn(txn *badger.Txn) { +func (snap *Snapshot) FlushAncestralRecordsWithTxn(txn *badger.Txn) error { // If we haven't hit the first snapshot block height yet, don't bother. if !snap.shouldPerformSnapshotOperations() { - return + return nil } glog.V(2).Infof("Snapshot.FlushAncestralRecords: Initiated the flush") @@ -780,11 +780,9 @@ func (snap *Snapshot) FlushAncestralRecordsWithTxn(txn *badger.Txn) { // snapshot worker threads running so we want to wait until they're done. err := snap.Checksum.Wait() if err != nil { - glog.Errorf("Snapshot.FlushAncestralRecords: Error while waiting "+ - "for checksum: (%v)", err) - // TODO: do we need this still? - snap.FlushAncestralRecordsWithTxn(txn) - return + errMsg := fmt.Sprintf("Snapshot.FlushAncestralRecords: Error while waiting for checksum: (%v)", err) + glog.Errorf(errMsg) + return errors.New(errMsg) } // Pull items off of the deque for writing. We say "last" as in oldest, i.e. the first element of AncestralMemory. @@ -792,10 +790,11 @@ func (snap *Snapshot) FlushAncestralRecordsWithTxn(txn *badger.Txn) { blockHeight := oldestAncestralCache.blockHeight if blockHeight != snap.CurrentEpochSnapshotMetadata.SnapshotBlockHeight { - glog.Infof("Snapshot.FlushAncestralRecords: AncestralMemory blockHeight (%v) doesn't match current "+ + errMsg := fmt.Sprintf("Snapshot.FlushAncestralRecords: AncestralMemory blockHeight (%v) doesn't match current "+ "metadata blockHeight (%v), number of operations in operationChannel (%v)", blockHeight, snap.CurrentEpochSnapshotMetadata.SnapshotBlockHeight, len(snap.OperationChannel.OperationChannel)) - return + glog.Errorf(errMsg) + return errors.New(errMsg) } // First sort the keys so that we write to BadgerDB in order. recordsKeyList := make([]string, 0, len(oldestAncestralCache.AncestralRecordsMap)) @@ -810,18 +809,13 @@ func (snap *Snapshot) FlushAncestralRecordsWithTxn(txn *badger.Txn) { err = snap.flushAncestralRecordsHelper(txn, recordsKeyList, oldestAncestralCache, blockHeight) snap.SnapshotDbMutex.Unlock() if err != nil { - // If any error occurred, then we should redo this memory write. During the restart, we will re-write all - // entries. If the error happened during a partial write, e.g. we didn't write all records in recordsKeyList, - // we'll redo them in the next write of this ancestralCache. The only scenario where that wouldn't happen - // is if the node stopped suddenly. We can detect that via comparing semaphore counters on boot. - glog.Errorf("Snapshot.FlushAncestralRecords: Problem flushing snapshot, error %v", err) - // TODO: @diamondhands - do we need this still? Before this would queue up another operation - // but just calling it recursively seems like a bad idea. - snap.FlushAncestralRecordsWithTxn(txn) - return + errMsg := fmt.Sprintf("Snapshot.FlushAncestralRecords: Problem flushing snapshot, error %v", err) + glog.Error(errMsg) + return errors.New(errMsg) } snap.AncestralMemory.Shift() + return nil } func (snap *Snapshot) flushAncestralRecordsHelper( From 1b57fe0809955d6f55acdb4b599a190924d8464f Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Sat, 11 May 2024 09:43:38 -0700 Subject: [PATCH 709/762] Fix checksum computation at the snapshot height (#1305) * Fixing checksum computation * Update lib/server.go Co-authored-by: Lazy Nina <81658138+lazynina@users.noreply.github.com> --------- Co-authored-by: Lazy Nina <81658138+lazynina@users.noreply.github.com> --- lib/server.go | 15 ++++++++++----- lib/snapshot.go | 21 ++++++++++++++++----- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/lib/server.go b/lib/server.go index b31c7f66c..43221146d 100644 --- a/lib/server.go +++ b/lib/server.go @@ -824,7 +824,9 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { if blockNode.Status&StatusBlockStored == 0 { maxBlocksInFlight := MaxBlocksInFlight if pp.Params.ProtocolVersion >= ProtocolVersion2 && - srv.params.IsPoSBlockHeight(uint64(blockNode.Height)) { + (srv.params.IsPoSBlockHeight(uint64(blockNode.Height)) || + srv.params.NetworkType == NetworkType_TESTNET) { + maxBlocksInFlight = MaxBlocksInFlightPoS } numBlocksToFetch := maxBlocksInFlight - len(pp.requestedBlocks) @@ -883,7 +885,8 @@ func (srv *Server) GetBlocks(pp *Peer, maxHeight int) { // each flight. maxBlocksInFlight := MaxBlocksInFlight if pp.Params.ProtocolVersion >= ProtocolVersion2 && - srv.params.IsPoSBlockHeight(uint64(srv.blockchain.blockTip().Height)) { + (srv.params.IsPoSBlockHeight(uint64(srv.blockchain.blockTip().Height)) || + srv.params.NetworkType == NetworkType_TESTNET) { maxBlocksInFlight = MaxBlocksInFlightPoS } numBlocksToFetch := maxBlocksInFlight - len(pp.requestedBlocks) @@ -2439,10 +2442,12 @@ func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { // though as we'll just connect all the blocks after the failed one and those blocks will also // gracefully fail. srv._handleBlock(pp, blk, ii == len(bundle.Blocks)-1 /*isLastBlock*/) - numLogBlocks := 1000 - if srv.params.IsPoWBlockHeight(blk.Header.Height) { - numLogBlocks = 100 + numLogBlocks := 100 + if srv.params.IsPoSBlockHeight(blk.Header.Height) || + srv.params.NetworkType == NetworkType_TESTNET { + numLogBlocks = 1000 } + if ii%numLogBlocks == 0 { glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlockBundle: Processed block ( %v / %v ) = ( %v / %v ) from Peer %v", bundle.Blocks[ii].Header.Height, diff --git a/lib/snapshot.go b/lib/snapshot.go index ab5658225..9622b9735 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -547,7 +547,7 @@ func (snap *Snapshot) Run() { if err != nil { glog.Errorf("Snapshot.Run: Problem getting checksum bytes (%v)", err) } - glog.V(2).Infof("Snapshot.Run: PrintText (%s) Current checksum (%v)", operation.printText, stateChecksum) + glog.V(0).Infof("Snapshot.Run: PrintText (%s) Current checksum (%v)", operation.printText, stateChecksum) case SnapshotOperationExit: glog.V(2).Infof("Snapshot.Run: Exiting the operation loop") @@ -664,10 +664,6 @@ func (snap *Snapshot) RemoveChecksumBytes(key []byte, value []byte) { // WaitForAllOperationsToFinish will busy-wait for the snapshot channel to process all // current operations. Spinlocks are undesired but it's the easiest solution in this case, func (snap *Snapshot) WaitForAllOperationsToFinish() { - // If we haven't hit the first snapshot block height yet, don't bother. - if !snap.shouldPerformSnapshotOperations() { - return - } // Define some helper variables so that the node prints nice logs. initialLen := int(snap.OperationChannel.GetStatus()) printMap := make(map[int]bool) @@ -1036,12 +1032,27 @@ func (snap *Snapshot) snapshotProcessBlockNoLock(blockNode *BlockNode) { // Update the snapshot epoch metadata in the snapshot DB. for ii := 0; ii < MetadataRetryCount; ii++ { + // We have to wait for the checksum threads to finish or else we'll set a stale checksum for + // our snapshot height. + glog.V(1).Infof("Snapshot.SnapshotProcessBlock: Waiting for checksum to finish updating...") + if localErr := snap.Checksum.Wait(); localErr != nil { + glog.Errorf("Snapshot.SetSnapshotChunk: Problem waiting for the checksum: %v", localErr) + time.Sleep(1 * time.Second) + continue + } + // For good measure, let's also wait for the operation queue to empty. This will ensure + // that any pending checksum operations are processed before we proceed. + glog.V(1).Infof("Snapshot.SnapshotProcessBlock: Waiting for all operations to " + + "clear so we can update snapshot checksum...") + snap.WaitForAllOperationsToFinish() snap.CurrentEpochSnapshotMetadata.CurrentEpochChecksumBytes, err = snap.Checksum.ToBytes() if err != nil { glog.Errorf("Snapshot.SnapshotProcessBlock: Problem getting checksum bytes: Error (%v)", err) time.Sleep(1 * time.Second) continue } + glog.V(1).Infof("Set snapshot checksum at snapshot height %v to %v. Delete this...\n", + height, snap.CurrentEpochSnapshotMetadata.CurrentEpochChecksumBytes) snap.SnapshotDbMutex.Lock() err = snap.mainDb.Update(func(txn *badger.Txn) error { return txn.Set(getMainDbPrefix(_prefixLastEpochMetadata), snap.CurrentEpochSnapshotMetadata.ToBytes()) From 5cb4b5bc1925d84a274f166812e6ccf6a99e9f34 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 13 May 2024 16:25:25 -0400 Subject: [PATCH 710/762] Fix encoder migration deadlock (#1307) --- cmd/config.go | 7 +++---- lib/snapshot.go | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/config.go b/cmd/config.go index ced1afb18..e51477887 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -1,13 +1,12 @@ package cmd import ( - "net/url" - "os" - "path/filepath" - "github.com/deso-protocol/core/lib" "github.com/golang/glog" "github.com/spf13/viper" + "net/url" + "os" + "path/filepath" ) type Config struct { diff --git a/lib/snapshot.go b/lib/snapshot.go index 9622b9735..1321b5c68 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -2307,7 +2307,6 @@ func (migration *EncoderMigration) SaveMigrations() error { func (migration *EncoderMigration) StartMigrations() error { migration.migrationChecksumLock.Lock() - defer migration.migrationChecksumLock.Unlock() var outstandingChecksums []*EncoderMigrationChecksum // Look for any outstanding encoder migrations. These migrations are going to be set to not completed and their checksums @@ -2323,6 +2322,7 @@ func (migration *EncoderMigration) StartMigrations() error { if len(outstandingChecksums) == 0 { return nil } + migration.migrationChecksumLock.Unlock() // If we get to this point, it means there are some new migrations that we need to process. glog.Infof(CLog(Yellow, fmt.Sprintf("EncoderMigration: Found %v outstanding migrations. Proceeding to scan through the "+ From 4a6305748c30118fdc0846b14846e3ddb4b8aede Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 14 May 2024 14:42:01 -0400 Subject: [PATCH 711/762] Release lock before return (#1309) --- lib/snapshot.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/snapshot.go b/lib/snapshot.go index 1321b5c68..1a703a6a4 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -2319,10 +2319,11 @@ func (migration *EncoderMigration) StartMigrations() error { migrationChecksum.Checksum.ResetChecksum() outstandingChecksums = append(outstandingChecksums, migrationChecksum) } + migration.migrationChecksumLock.Unlock() + if len(outstandingChecksums) == 0 { return nil } - migration.migrationChecksumLock.Unlock() // If we get to this point, it means there are some new migrations that we need to process. glog.Infof(CLog(Yellow, fmt.Sprintf("EncoderMigration: Found %v outstanding migrations. Proceeding to scan through the "+ From ff154b6bcc4a44d6333284d29c73e69cfe6a936b Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 14 May 2024 15:00:57 -0400 Subject: [PATCH 712/762] Unlock in error case (#1310) --- lib/snapshot.go | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/snapshot.go b/lib/snapshot.go index 1a703a6a4..b5d7c294d 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -2286,6 +2286,7 @@ func (migration *EncoderMigration) SaveMigrations() error { for ii := range migration.migrationChecksums { checksumBytes, err := migration.migrationChecksums[ii].Checksum.ToBytes() if err != nil { + migration.migrationChecksumLock.RUnlock() return errors.Wrapf(err, "EncoderMigration.SaveMigrations: Problem getting migration checksum "+ "bytes, ii = (%v)", ii) } From 28495109bc2c62940b211714a69b8e18dc06e416 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 14 May 2024 17:33:36 -0400 Subject: [PATCH 713/762] Beef Up Validator Registration Validations (#1311) Beef Up Validator Registration Validations Fix error check --- lib/block_view_validator.go | 8 ++++++++ lib/block_view_validator_test.go | 23 ++++++++++++++++++++++- lib/pos_constants.go | 8 ++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index cf08cb623..b245012ff 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1821,6 +1821,13 @@ func (bav *UtxoView) IsValidRegisterAsValidatorMetadata( if metadata.VotingPublicKey == nil { return errors.Wrapf(RuleErrorValidatorMissingVotingPublicKey, "UtxoView.IsValidRegisterAsValidatorMetadata: ") } + cutoverValidator, err := BuildProofOfStakeCutoverValidatorBLSPublicKey() + if err != nil { + return errors.Wrapf(err, "UtxoView.IsValidRegisterAsValidatorMetadata: error building cutover validator for validation: ") + } + if metadata.VotingPublicKey.Eq(cutoverValidator) { + return errors.Wrapf(RuleErrorValidatorInvalidVotingPublicKey, "UtxoView.IsValidRegisterAsValidatorMetadata: ") + } // Validate VotingAuthorization. if metadata.VotingAuthorization == nil { @@ -2610,6 +2617,7 @@ const RuleErrorValidatorInvalidCommissionBasisPoints RuleError = "RuleErrorValid const RuleErrorValidatorNotFound RuleError = "RuleErrorValidatorNotFound" const RuleErrorValidatorBLSPublicKeyPKIDPairEntryNotFound RuleError = "RuleErrorValidatorBLSPublicKeyPKIDPairEntryNotFound" const RuleErrorValidatorMissingVotingPublicKey RuleError = "RuleErrorValidatorMissingVotingPublicKey" +const RuleErrorValidatorInvalidVotingPublicKey RuleError = "RuleErrorValidatorInvalidVotingPublicKey" const RuleErrorValidatorMissingVotingAuthorization RuleError = "RuleErrorValidatorMissingVotingAuthorization" const RuleErrorValidatorInvalidVotingAuthorization RuleError = "RuleErrorValidatorInvalidVotingAuthorization" const RuleErrorValidatorDisablingExistingDelegatedStakers RuleError = "RuleErrorValidatorDisablingExistingDelegatedStakers" diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 1733a6a60..9fcc6027c 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -186,6 +186,22 @@ func _testValidatorRegistration(t *testing.T, flushToDB bool) { require.Error(t, err) require.Contains(t, err.Error(), RuleErrorValidatorMissingVotingAuthorization) } + { + // RuleErrorValidatorInvalidVotingPublicKey + votingPrivateKey, err := BuildProofOfStakeCutoverValidatorBLSPrivateKey() + require.NoError(t, err) + votingPublicKey := votingPrivateKey.PublicKey() + votingAuthorization := _generateVotingAuthorization(t, votingPrivateKey, m0PkBytes) + registerMetadata = &RegisterAsValidatorMetadata{ + Domains: [][]byte{[]byte("example.com:18000")}, + DisableDelegatedStake: false, + VotingPublicKey: votingPublicKey, + VotingAuthorization: votingAuthorization, + } + _, err = _submitRegisterAsValidatorTxn(testMeta, m0Pub, m0Priv, registerMetadata, nil, flushToDB) + require.Error(t, err) + require.Contains(t, err.Error(), RuleErrorValidatorInvalidVotingPublicKey) + } { // RuleErrorValidatorInvalidVotingAuthorization: invalid TransactorPkBytes votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m1PkBytes) @@ -2069,10 +2085,15 @@ func _generateVotingPublicKeyAndAuthorization(t *testing.T, transactorPkBytes [] blsPrivateKey, err := bls.NewPrivateKey() require.NoError(t, err) votingPublicKey := blsPrivateKey.PublicKey() + votingAuthorization := _generateVotingAuthorization(t, blsPrivateKey, transactorPkBytes) + return votingPublicKey, votingAuthorization +} + +func _generateVotingAuthorization(t *testing.T, blsPrivateKey *bls.PrivateKey, transactorPkBytes []byte) *bls.Signature { votingAuthorizationPayload := CreateValidatorVotingAuthorizationPayload(transactorPkBytes) votingAuthorization, err := blsPrivateKey.Sign(votingAuthorizationPayload) require.NoError(t, err) - return votingPublicKey, votingAuthorization + return votingAuthorization } func _generateVotingPrivateKeyPublicKeyAndAuthorization(t *testing.T, transactorPkBytes []byte) (*bls.PrivateKey, *bls.PublicKey, *bls.Signature) { diff --git a/lib/pos_constants.go b/lib/pos_constants.go index d17631062..19cf429f4 100644 --- a/lib/pos_constants.go +++ b/lib/pos_constants.go @@ -23,6 +23,14 @@ func BuildProofOfStakeCutoverValidatorBLSPrivateKey() (*bls.PrivateKey, error) { return (&bls.PrivateKey{}).FromString(proofOfStakeCutoverValidatorBLSPrivateKeyHex) } +func BuildProofOfStakeCutoverValidatorBLSPublicKey() (*bls.PublicKey, error) { + privateKey, err := BuildProofOfStakeCutoverValidatorBLSPrivateKey() + if err != nil { + return nil, errors.Wrapf(err, "BuildProofOfStakeCutoverValidatorBLSPublicKey: Problem parsing BLS private key") + } + return privateKey.PublicKey(), nil +} + func BuildProofOfStakeCutoverValidator() (consensus.Validator, error) { // Parse the BLS private key blsPrivateKey, err := BuildProofOfStakeCutoverValidatorBLSPrivateKey() From bd1067d690e0257a763bc4b6766f3126af4c2a10 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 14 May 2024 17:35:51 -0400 Subject: [PATCH 714/762] Rename FastHotStuffEventLoop Recevier to fe (#1312) --- consensus/event_loop.go | 368 ++++++++++++++++++++-------------------- consensus/types.go | 8 +- 2 files changed, 186 insertions(+), 190 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 8560e6d97..b78327037 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -39,7 +39,7 @@ func NewFastHotStuffEventLoop() *fastHotStuffEventLoop { // // Given the above, This function updates the tip internally, stores the safe blocks, and re-initializes // all internal data structures that are used to track incoming votes and timeout messages for QC construction. -func (fc *fastHotStuffEventLoop) Init( +func (fe *fastHotStuffEventLoop) Init( crankTimerInterval time.Duration, timeoutBaseDuration time.Duration, genesisQC QuorumCertificate, @@ -48,11 +48,11 @@ func (fc *fastHotStuffEventLoop) Init( currentView uint64, ) error { // Grab the event loop's lock - fc.lock.Lock() - defer fc.lock.Unlock() + fe.lock.Lock() + defer fe.lock.Unlock() // Ensure the event loop is not already running - if fc.status == eventLoopStatusRunning { + if fe.status == eventLoopStatusRunning { return errors.New("FastHotStuffEventLoop.Init: event loop is already running") } @@ -65,10 +65,10 @@ func (fc *fastHotStuffEventLoop) Init( } // Store the genesis QC - fc.genesisQC = genesisQC + fe.genesisQC = genesisQC // Validate the safe blocks and validator lists, and store them - if err := fc.storeBlocks(tip, safeBlocks); err != nil { + if err := fe.storeBlocks(tip, safeBlocks); err != nil { return errors.Wrap(err, "FastHotStuffEventLoop.Init: ") } @@ -76,66 +76,66 @@ func (fc *fastHotStuffEventLoop) Init( if currentView < tip.Block.GetView()+1 { return errors.New("FastHotStuffEventLoop.Init: currentView is lower than the tip block's view") } - fc.currentView = currentView + fe.currentView = currentView // Reset QC construction status for the current view - fc.hasCrankTimerRunForCurrentView = false - fc.hasConstructedQCInCurrentView = false + fe.hasCrankTimerRunForCurrentView = false + fe.hasConstructedQCInCurrentView = false // Reset all internal data structures for votes and timeouts - fc.votesSeenByBlockHash = make(map[BlockHashValue]map[string]VoteMessage) - fc.timeoutsSeenByView = make(map[uint64]map[string]TimeoutMessage) + fe.votesSeenByBlockHash = make(map[BlockHashValue]map[string]VoteMessage) + fe.timeoutsSeenByView = make(map[uint64]map[string]TimeoutMessage) // Set the crank timer interval and timeout base duration - fc.crankTimerInterval = crankTimerInterval - fc.timeoutBaseDuration = timeoutBaseDuration + fe.crankTimerInterval = crankTimerInterval + fe.timeoutBaseDuration = timeoutBaseDuration // Update the event loop's status - fc.status = eventLoopStatusInitialized + fe.status = eventLoopStatusInitialized return nil } // GetEvents returns the event loop's external channel for signaling. We need a getter function // to ensure that this struct implements the FastHotStuffEventLoop interface type. -func (fc *fastHotStuffEventLoop) GetEvents() chan *FastHotStuffEvent { - return fc.Events +func (fe *fastHotStuffEventLoop) GetEvents() chan *FastHotStuffEvent { + return fe.Events } // GetCurrentView is a simple getter that returns the event loop's current view. It does not need // to be thread-safe. The caller is expected to use it in a thread-safe manner, at a time when // the view is guaranteed to not change. -func (fc *fastHotStuffEventLoop) GetCurrentView() uint64 { - return fc.currentView +func (fe *fastHotStuffEventLoop) GetCurrentView() uint64 { + return fe.currentView } // AdvanceViewOnTimeout is called when the tip has not changed but the event loop has timed out. This // function advances the view and resets the crank timer and timeout scheduled tasks. -func (fc *fastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { +func (fe *fastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { // Grab the event loop's lock - fc.lock.Lock() - defer fc.lock.Unlock() + fe.lock.Lock() + defer fe.lock.Unlock() // Ensure the event loop is running. This guarantees that the chain tip and validator list // have already been set. - if fc.status != eventLoopStatusRunning { + if fe.status != eventLoopStatusRunning { return 0, errors.New("FastHotStuffEventLoop.AdvanceViewOnTimeout: Event loop is not running") } // Advance the view - fc.currentView++ + fe.currentView++ // Reset QC construction status for the current view - fc.hasCrankTimerRunForCurrentView = false - fc.hasConstructedQCInCurrentView = false + fe.hasCrankTimerRunForCurrentView = false + fe.hasConstructedQCInCurrentView = false // Evict all stale votes and timeouts - fc.evictStaleVotesAndTimeouts() + fe.evictStaleVotesAndTimeouts() // Schedule the next crank timer and timeout scheduled tasks - fc.resetScheduledTasks() + fe.resetScheduledTasks() - return fc.currentView, nil + return fe.currentView, nil } // ProcessTipBlock must only be called when the server has accepted a new block, connected it @@ -149,23 +149,23 @@ func (fc *fastHotStuffEventLoop) AdvanceViewOnTimeout() (uint64, error) { // all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks // that are safe to extend from. This function does not validate the collection of blocks. It // expects the server to know and decide what blocks are safe to extend from. -func (fc *fastHotStuffEventLoop) ProcessTipBlock( +func (fe *fastHotStuffEventLoop) ProcessTipBlock( tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList, crankTimerDuration time.Duration, timeoutTimerDuration time.Duration, ) error { // Grab the event loop's lock - fc.lock.Lock() - defer fc.lock.Unlock() + fe.lock.Lock() + defer fe.lock.Unlock() // Ensure the event loop is running - if fc.status != eventLoopStatusRunning { + if fe.status != eventLoopStatusRunning { return errors.New("FastHotStuffEventLoop.ProcessTipBlock: Event loop is not running") } // Validate the safe blocks and validator lists, and store them - if err := fc.storeBlocks(tip, safeBlocks); err != nil { + if err := fe.storeBlocks(tip, safeBlocks); err != nil { return errors.Wrap(err, "FastHotStuffEventLoop.ProcessTipBlock: ") } @@ -178,28 +178,28 @@ func (fc *fastHotStuffEventLoop) ProcessTipBlock( } // We track the current view here so we know which view to time out on later on. - fc.currentView = fc.tip.block.GetView() + 1 + fe.currentView = fe.tip.block.GetView() + 1 // Reset QC construction status for the current view - fc.hasCrankTimerRunForCurrentView = false - fc.hasConstructedQCInCurrentView = false - fc.crankTimerInterval = crankTimerDuration - fc.timeoutBaseDuration = timeoutTimerDuration + fe.hasCrankTimerRunForCurrentView = false + fe.hasConstructedQCInCurrentView = false + fe.crankTimerInterval = crankTimerDuration + fe.timeoutBaseDuration = timeoutTimerDuration // Evict all stale votes and timeouts - fc.evictStaleVotesAndTimeouts() + fe.evictStaleVotesAndTimeouts() // Signal the server that we can vote for the block. The server will decide whether to construct and // broadcast the vote. - fc.emitEvent(&FastHotStuffEvent{ + fe.emitEvent(&FastHotStuffEvent{ EventType: FastHotStuffEventTypeVote, - TipBlockHash: fc.tip.block.GetBlockHash(), - TipBlockHeight: fc.tip.block.GetHeight(), - View: fc.tip.block.GetView(), + TipBlockHash: fe.tip.block.GetBlockHash(), + TipBlockHeight: fe.tip.block.GetHeight(), + View: fe.tip.block.GetView(), }) // Schedule the next crank timer and timeout scheduled tasks - fc.resetScheduledTasks() + fe.resetScheduledTasks() return nil } @@ -214,24 +214,24 @@ func (fc *fastHotStuffEventLoop) ProcessTipBlock( // all ancestors of the uncommitted tip that are safe to extend from, and all blocks from forks // that are safe to extend from. This function does not validate the collection of blocks. It // expects the server to know and decide what blocks are safe to extend from. -func (fc *fastHotStuffEventLoop) UpdateSafeBlocks(safeBlocks []BlockWithValidatorList) error { +func (fe *fastHotStuffEventLoop) UpdateSafeBlocks(safeBlocks []BlockWithValidatorList) error { // Grab the event loop's lock - fc.lock.Lock() - defer fc.lock.Unlock() + fe.lock.Lock() + defer fe.lock.Unlock() // Ensure the event loop is running - if fc.status != eventLoopStatusRunning { + if fe.status != eventLoopStatusRunning { return errors.New("FastHotStuffEventLoop.UpdateSafeBlocks: Event loop is not running") } // Fetch the current tip block tipBlock := BlockWithValidatorList{ - Block: fc.tip.block, - ValidatorList: fc.tip.validatorList, + Block: fe.tip.block, + ValidatorList: fe.tip.validatorList, } // Validate the safe blocks and validator lists, and store them - if err := fc.storeBlocks(tipBlock, safeBlocks); err != nil { + if err := fe.storeBlocks(tipBlock, safeBlocks); err != nil { return errors.Wrap(err, "FastHotStuffEventLoop.UpdateSafeBlocks: ") } @@ -241,7 +241,7 @@ func (fc *fastHotStuffEventLoop) UpdateSafeBlocks(safeBlocks []BlockWithValidato // storeBlocks is a helper function that validates the provided blocks, validator lists, and stores them. // It must be called while holding the event loop's lock. -func (fc *fastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { +func (fe *fastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlocks []BlockWithValidatorList) error { // Do a basic integrity check on the tip block and validator list if !isProperlyFormedBlockWithValidatorList(tip) { return errors.New("Invalid tip block or validator list") @@ -256,12 +256,12 @@ func (fc *fastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlo } // Sanity check: the tip block and safe blocks must not have lower views than the genesis QC's view. - if tip.Block.GetView() < fc.genesisQC.GetView() { + if tip.Block.GetView() < fe.genesisQC.GetView() { return errors.New("Tip block view must be greater than or equal to the genesis QC view") } for _, block := range safeBlocks { - if block.Block.GetView() < fc.genesisQC.GetView() { + if block.Block.GetView() < fe.genesisQC.GetView() { return errors.New("Safe block view must be greater than or equal to the genesis QC view") } } @@ -276,14 +276,14 @@ func (fc *fastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlo } // Store the tip block and validator list - fc.tip = blockWithValidatorLookup{ + fe.tip = blockWithValidatorLookup{ block: tip.Block, validatorList: tip.ValidatorList, validatorLookup: collections.ToMap(tip.ValidatorList, validatorToPublicKeyString), } // Store the blocks and validator lists - fc.safeBlocks = collections.Transform(safeBlocks, func(block BlockWithValidatorList) blockWithValidatorLookup { + fe.safeBlocks = collections.Transform(safeBlocks, func(block BlockWithValidatorList) blockWithValidatorLookup { return blockWithValidatorLookup{ block: block.Block, validatorList: block.ValidatorList, @@ -305,14 +305,14 @@ func (fc *fastHotStuffEventLoop) storeBlocks(tip BlockWithValidatorList, safeBlo // // Reference implementation: // https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L756 -func (fc *fastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { +func (fe *fastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { // Grab the event loop's lock - fc.lock.Lock() - defer fc.lock.Unlock() + fe.lock.Lock() + defer fe.lock.Unlock() // Ensure the event loop is running. This guarantees that the chain tip and validator list // have already been set. - if fc.status != eventLoopStatusRunning { + if fe.status != eventLoopStatusRunning { return errors.New("FastHotStuffEventLoop.ProcessValidatorVote: Event loop is not running") } @@ -322,13 +322,13 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { } // Check if the vote is stale - if isStaleView(fc.currentView, vote.GetView()) { + if isStaleView(fe.currentView, vote.GetView()) { return errors.Errorf("FastHotStuffEventLoop.ProcessValidatorVote: Vote has a stale view %d", vote.GetView()) } // Check if the public key has already voted for this view. The protocol does not allow // a validator to vote for more than one block in a given view. - if fc.hasVotedForView(vote.GetPublicKey(), vote.GetView()) { + if fe.hasVotedForView(vote.GetPublicKey(), vote.GetView()) { return errors.Errorf( "FastHotStuffEventLoop.ProcessValidatorVote: validator %s has already voted for view %d", vote.GetPublicKey().ToString(), @@ -338,7 +338,7 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { // Check if the public key has already timed out for this view. The protocol does not allow // for a validator to vote for a block in a view that it has already timed out for. - if fc.hasTimedOutForView(vote.GetPublicKey(), vote.GetView()) { + if fe.hasTimedOutForView(vote.GetPublicKey(), vote.GetView()) { return errors.Errorf( "FastHotStuffEventLoop.ProcessValidatorVote: validator %s has already timed out for view %d", vote.GetPublicKey().ToString(), @@ -355,25 +355,25 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { } // Cache the vote in case we need it for later - fc.storeVote(voteSignaturePayload, vote) + fe.storeVote(voteSignaturePayload, vote) // Check if the crank timer has elapsed or the event loop has constructed a QC in the current view. // If so, then there's nothing more to do. - if !fc.hasCrankTimerRunForCurrentView || fc.hasConstructedQCInCurrentView { + if !fe.hasCrankTimerRunForCurrentView || fe.hasConstructedQCInCurrentView { return nil } // Check if the vote is for the chain tip. If not, then there's nothing more to do. - if !IsEqualBlockHash(vote.GetBlockHash(), fc.tip.block.GetBlockHash()) { + if !IsEqualBlockHash(vote.GetBlockHash(), fe.tip.block.GetBlockHash()) { return nil } // Check if we have a super-majority vote for the chain tip. - if voteQCEvent := fc.tryConstructVoteQCInCurrentView(); voteQCEvent != nil { + if voteQCEvent := fe.tryConstructVoteQCInCurrentView(); voteQCEvent != nil { // Signal the server that we can construct a QC for the chain tip, and mark that we have // constructed a QC for the current view. - fc.hasConstructedQCInCurrentView = true - fc.emitEvent(voteQCEvent) + fe.hasConstructedQCInCurrentView = true + fe.emitEvent(voteQCEvent) } return nil @@ -390,14 +390,14 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorVote(vote VoteMessage) error { // // Reference implementation: // https://github.com/deso-protocol/hotstuff_pseudocode/blob/6409b51c3a9a953b383e90619076887e9cebf38d/fast_hotstuff_bls.go#L958 -func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) error { +func (fe *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) error { // Grab the event loop's lock - fc.lock.Lock() - defer fc.lock.Unlock() + fe.lock.Lock() + defer fe.lock.Unlock() // Ensure the event loop is running. This guarantees that the chain tip and validator list // have already been set. - if fc.status != eventLoopStatusRunning { + if fe.status != eventLoopStatusRunning { return errors.New("FastHotStuffEventLoop.ProcessValidatorTimeout: Event loop is not running") } @@ -407,13 +407,13 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) } // Check if the timeout is stale - if isStaleView(fc.currentView, timeout.GetView()) { + if isStaleView(fe.currentView, timeout.GetView()) { return errors.Errorf("FastHotStuffEventLoop.ProcessValidatorTimeout: Timeout has a stale view %d", timeout.GetView()) } // Check if the public key has already voted for this view. The protocol does not allow // a validator to time out for a view it has already voted on. - if fc.hasVotedForView(timeout.GetPublicKey(), timeout.GetView()) { + if fe.hasVotedForView(timeout.GetPublicKey(), timeout.GetView()) { return errors.Errorf( "FastHotStuffEventLoop.ProcessValidatorTimeout: validator %s has already voted for view %d", timeout.GetPublicKey().ToString(), @@ -423,7 +423,7 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) // Check if the public key has already timed out for this view. The protocol does not allow // for a validator to time out more than once for the same view. - if fc.hasTimedOutForView(timeout.GetPublicKey(), timeout.GetView()) { + if fe.hasTimedOutForView(timeout.GetPublicKey(), timeout.GetView()) { return errors.Errorf( "FastHotStuffEventLoop.ProcessValidatorTimeout: validator %s has already timed out for view %d", timeout.GetPublicKey().ToString(), @@ -437,7 +437,7 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) // This can happen if the timeout's creator is malicious, or if our node is far enough behind the // blockchain to not have seen the high QC before other nodes have timed out. In either case, the // simple and safe option is to reject the timeout and move on. - isSafeBlock, _, validatorList, validatorLookup := fc.fetchSafeBlockInfo(timeout.GetHighQC().GetBlockHash()) + isSafeBlock, _, validatorList, validatorLookup := fe.fetchSafeBlockInfo(timeout.GetHighQC().GetBlockHash()) if !isSafeBlock { return errors.Errorf( "FastHotStuffEventLoop.ProcessValidatorTimeout: Timeout from public key %s has an unknown high QC with view %d", @@ -469,7 +469,7 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) // Verify the high QC in the timeout message. The highQC is valid if it exactly matches the genesis QC or it is a // valid QC signed by a super-majority of validators for a safe block. - if !IsEqualQC(timeout.GetHighQC(), fc.genesisQC) && !IsValidSuperMajorityQuorumCertificate(timeout.GetHighQC(), validatorList) { + if !IsEqualQC(timeout.GetHighQC(), fe.genesisQC) && !IsValidSuperMajorityQuorumCertificate(timeout.GetHighQC(), validatorList) { return errors.Errorf( "FastHotStuffEventLoop.ProcessValidatorTimeout: Invalid high QC received in timeout message from validator %s for view %d", timeout.GetPublicKey().ToString(), @@ -478,99 +478,99 @@ func (fc *fastHotStuffEventLoop) ProcessValidatorTimeout(timeout TimeoutMessage) } // Cache the timeout message in case we need it for later - fc.storeTimeout(timeout) + fe.storeTimeout(timeout) // Check if the crank timer has elapsed or the event loop has constructed a QC in the current view. // If so, then there's nothing more to do. - if !fc.hasCrankTimerRunForCurrentView || fc.hasConstructedQCInCurrentView { + if !fe.hasCrankTimerRunForCurrentView || fe.hasConstructedQCInCurrentView { return nil } // Check if the timeout is not for the previous view. If not, then there's nothing more to do. - if timeout.GetView() != fc.currentView-1 { + if timeout.GetView() != fe.currentView-1 { return nil } // Check if we have a super-majority of stake has timed out of the previous view. If so, we signal // the server that we can construct a timeoutQC in the current view. - if timeoutQCEvent := fc.tryConstructTimeoutQCInCurrentView(); timeoutQCEvent != nil { + if timeoutQCEvent := fe.tryConstructTimeoutQCInCurrentView(); timeoutQCEvent != nil { // Signal the server that we can construct a timeout QC for the current view, and mark // that we have constructed a QC for the current view. - fc.hasConstructedQCInCurrentView = true - fc.emitEvent(timeoutQCEvent) + fe.hasConstructedQCInCurrentView = true + fe.emitEvent(timeoutQCEvent) } return nil } // Sets the initial times for the crank timer and timeouts and starts scheduled tasks. -func (fc *fastHotStuffEventLoop) Start() { - fc.lock.Lock() - defer fc.lock.Unlock() +func (fe *fastHotStuffEventLoop) Start() { + fe.lock.Lock() + defer fe.lock.Unlock() // Check if the event loop is either running or uninitialized. // If it's running or uninitialized, then there's nothing to do here. - if fc.status != eventLoopStatusInitialized { + if fe.status != eventLoopStatusInitialized { return } // Update the event loop's status to mark it as running. - fc.status = eventLoopStatusRunning + fe.status = eventLoopStatusRunning // Set the initial crank timer and timeout scheduled tasks - fc.resetScheduledTasks() + fe.resetScheduledTasks() } -func (fc *fastHotStuffEventLoop) Stop() { - fc.lock.Lock() - defer fc.lock.Unlock() +func (fe *fastHotStuffEventLoop) Stop() { + fe.lock.Lock() + defer fe.lock.Unlock() // Check if the event loop is no longer running. If it's not running // we can simply return here. - if fc.status != eventLoopStatusRunning { + if fe.status != eventLoopStatusRunning { return } // Cancel the crank timer and timeout scheduled tasks, if any. - fc.crankTimerTask.Cancel() - fc.nextTimeoutTask.Cancel() + fe.crankTimerTask.Cancel() + fe.nextTimeoutTask.Cancel() // Update the event loop's status so it is no longer marked as running. - fc.status = eventLoopStatusInitialized + fe.status = eventLoopStatusInitialized } -func (fc *fastHotStuffEventLoop) IsInitialized() bool { - fc.lock.RLock() - defer fc.lock.RUnlock() +func (fe *fastHotStuffEventLoop) IsInitialized() bool { + fe.lock.RLock() + defer fe.lock.RUnlock() - return fc.status != eventLoopStatusNotInitialized + return fe.status != eventLoopStatusNotInitialized } -func (fc *fastHotStuffEventLoop) IsRunning() bool { - fc.lock.RLock() - defer fc.lock.RUnlock() +func (fe *fastHotStuffEventLoop) IsRunning() bool { + fe.lock.RLock() + defer fe.lock.RUnlock() - return fc.status == eventLoopStatusRunning + return fe.status == eventLoopStatusRunning } -func (fc *fastHotStuffEventLoop) ToString() string { - fc.lock.RLock() - defer fc.lock.RUnlock() +func (fe *fastHotStuffEventLoop) ToString() string { + fe.lock.RLock() + defer fe.lock.RUnlock() - if fc.status != eventLoopStatusRunning { + if fe.status != eventLoopStatusRunning { return "FastHotStuffEventLoop is not running" } // Get the Tip Block - tipBlock := fc.tip.block + tipBlock := fe.tip.block // Get the votes for the tip tipBlockVotePayload := GetVoteSignaturePayload(tipBlock.GetView(), tipBlock.GetBlockHash()) - votesForTip := fc.votesSeenByBlockHash[tipBlockVotePayload] + votesForTip := fe.votesSeenByBlockHash[tipBlockVotePayload] // Get the timeouts for the current and previous view - timeoutsForCurrentView := fc.timeoutsSeenByView[fc.currentView] - timeoutsForPreviousView := fc.timeoutsSeenByView[fc.currentView-1] + timeoutsForCurrentView := fe.timeoutsSeenByView[fe.currentView] + timeoutsForPreviousView := fe.timeoutsSeenByView[fe.currentView-1] return fmt.Sprintf( "Printing FastHotStuffEventLoop state: "+ @@ -578,14 +578,14 @@ func (fc *fastHotStuffEventLoop) ToString() string { "\n Tip Height: %d, Tip Hash: %v, Tip View: %d, Num Safe Blocks: %d"+ "\n Crank Duration: %v, Timeout Interval: %v"+ "\n Votes For Tip: %d, Timeouts For Current View: %d, Timeouts For Prev View: %d", - fc.status, - fc.currentView, + fe.status, + fe.currentView, tipBlock.GetHeight(), tipBlock.GetBlockHash(), tipBlock.GetView(), - len(fc.safeBlocks), - fc.crankTimerTask.GetDuration(), - fc.nextTimeoutTask.GetDuration(), + len(fe.safeBlocks), + fe.crankTimerTask.GetDuration(), + fe.nextTimeoutTask.GetDuration(), len(votesForTip), len(timeoutsForCurrentView), len(timeoutsForPreviousView), @@ -594,71 +594,71 @@ func (fc *fastHotStuffEventLoop) ToString() string { // resetScheduledTasks recomputes the nextBlockConstructionTimeStamp and nextTimeoutTimeStamp // values, and reschedules the crank timer and timeout tasks. -func (fc *fastHotStuffEventLoop) resetScheduledTasks() { +func (fe *fastHotStuffEventLoop) resetScheduledTasks() { // Compute the next timeout ETA. We use exponential back-off for timeouts when there are // multiple consecutive timeouts. We use the difference between the current view and the // chain tip's view to determine this. The current view can only drift from the chain tip's // view as a result of timeouts. This guarantees that the number of consecutive timeouts is // always: max(currentView - tip.block.GetView() - 1, 0). - timeoutDuration := fc.timeoutBaseDuration + timeoutDuration := fe.timeoutBaseDuration - // Check if we have timed out at for the last n views. If so, we apply exponential + // Check if we have timed out for the last n views. If so, we apply exponential // back-off to the timeout base duration. - if fc.tip.block.GetView() < fc.currentView-1 { + if fe.tip.block.GetView() < fe.currentView-1 { // Note, there is no risk of underflow here because the following is guaranteed: // currentView > tip.block.GetView() + 1. - numTimeouts := fc.currentView - fc.tip.block.GetView() - 1 + numTimeouts := fe.currentView - fe.tip.block.GetView() - 1 // Compute the exponential back-off: nextTimeoutDuration * 2^numTimeouts - timeoutDuration = fc.timeoutBaseDuration * time.Duration(powerOfTwo(numTimeouts, maxConsecutiveTimeouts)) + timeoutDuration = fe.timeoutBaseDuration * time.Duration(powerOfTwo(numTimeouts, maxConsecutiveTimeouts)) } // Schedule the next crank timer task. This will run with currentView param. - fc.crankTimerTask.Schedule(fc.crankTimerInterval, fc.currentView, fc.onCrankTimerTaskExecuted) + fe.crankTimerTask.Schedule(fe.crankTimerInterval, fe.currentView, fe.onCrankTimerTaskExecuted) // Schedule the next timeout task. This will run with currentView param. - fc.nextTimeoutTask.Schedule(timeoutDuration, fc.currentView, fc.onTimeoutScheduledTaskExecuted) + fe.nextTimeoutTask.Schedule(timeoutDuration, fe.currentView, fe.onTimeoutScheduledTaskExecuted) } // When this function is triggered, it means that we have reached the crank timer // time ETA for blockConstructionView. If we have a QC or timeout QC for the view, then we // signal the server. -func (fc *fastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView uint64) { - fc.lock.Lock() - defer fc.lock.Unlock() +func (fe *fastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView uint64) { + fe.lock.Lock() + defer fe.lock.Unlock() // Check if the event loop is running. If it's not running, then there's nothing // to do here. - if fc.status != eventLoopStatusRunning { + if fe.status != eventLoopStatusRunning { return } // Check for race conditions where the view advanced at the exact moment this task began // or we have already signaled for this view. If so, then there's nothing to do here. - if fc.currentView != blockConstructionView { + if fe.currentView != blockConstructionView { return } // Mark that the crank timer has elapsed - fc.hasCrankTimerRunForCurrentView = true + fe.hasCrankTimerRunForCurrentView = true // Check if the conditions are met to construct a QC from votes for the chain tip. If so, // we send a signal to the server and cancel the crank timer task. The server will // reschedule the task when it advances the view. - if voteQCEvent := fc.tryConstructVoteQCInCurrentView(); voteQCEvent != nil { + if voteQCEvent := fe.tryConstructVoteQCInCurrentView(); voteQCEvent != nil { // Signal the server that we can construct a QC for the chain tip - fc.hasConstructedQCInCurrentView = true - fc.emitEvent(voteQCEvent) + fe.hasConstructedQCInCurrentView = true + fe.emitEvent(voteQCEvent) return } // Check if we have enough timeouts to build an aggregate QC for the previous view. If so, // we send a signal to the server and cancel all scheduled tasks. - if timeoutQCEvent := fc.tryConstructTimeoutQCInCurrentView(); timeoutQCEvent != nil { + if timeoutQCEvent := fe.tryConstructTimeoutQCInCurrentView(); timeoutQCEvent != nil { // Signal the server that we can construct a timeout QC for the current view - fc.hasConstructedQCInCurrentView = true - fc.emitEvent(timeoutQCEvent) + fe.hasConstructedQCInCurrentView = true + fe.emitEvent(timeoutQCEvent) return } } @@ -669,22 +669,22 @@ func (fc *fastHotStuffEventLoop) onCrankTimerTaskExecuted(blockConstructionView // the signers list and aggregate signature that can be used to construct the QC. // // This function must be called while holding the event loop's lock. -func (fc *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuffEvent { +func (fe *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuffEvent { // If currentView != tipBlock.View + 1, then we have timed out at some point, and can no longer // construct a block with a QC of votes for the tip block. - tipBlock := fc.tip.block - if fc.currentView != tipBlock.GetView()+1 { + tipBlock := fe.tip.block + if fe.currentView != tipBlock.GetView()+1 { return nil } // Fetch the validator list at the tip. - validatorList := fc.tip.validatorList + validatorList := fe.tip.validatorList // Compute the chain tip's signature payload. voteSignaturePayload := GetVoteSignaturePayload(tipBlock.GetView(), tipBlock.GetBlockHash()) // Fetch the validator votes for the tip block. - votesByValidator := fc.votesSeenByBlockHash[voteSignaturePayload] + votesByValidator := fe.votesSeenByBlockHash[voteSignaturePayload] // Compute the total stake and total stake with votes totalStake := uint256.NewInt(0) @@ -733,12 +733,12 @@ func (fc *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff // Happy path. Construct the QC and return as an event to signal to the server. return &FastHotStuffEvent{ EventType: FastHotStuffEventTypeConstructVoteQC, // The event type - View: fc.currentView, // The current view in which we can construct a block - TipBlockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from - TipBlockHeight: fc.tip.block.GetHeight(), // Block height for the tip, which we are extending from + View: fe.currentView, // The current view in which we can construct a block + TipBlockHash: fe.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from + TipBlockHeight: fe.tip.block.GetHeight(), // Block height for the tip, which we are extending from QC: &quorumCertificate{ - blockHash: fc.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from - view: fc.tip.block.GetView(), // The view from the tip block. This is always fc.currentView - 1 + blockHash: fe.tip.block.GetBlockHash(), // Block hash for the tip, which we are extending from + view: fe.tip.block.GetView(), // The view from the tip block. This is always fe.currentView - 1 aggregatedSignature: &aggregatedSignature{ signersList: signersList, // The signers list who voted on the tip block signature: aggregateSignature, // Aggregated signature from votes on the tip block @@ -755,12 +755,12 @@ func (fc *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff // to construct the timeout QC. // // This function must be called while holding the consensus instance's lock. -func (fc *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotStuffEvent { +func (fe *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotStuffEvent { // Fetch all timeouts for the previous view. All timeout messages for a view are aggregated and // proposed in the next view. So if we want to propose a timeout QC in the current view, we need // to aggregate timeouts from the previous one. - timeoutsByValidator := fc.timeoutsSeenByView[fc.currentView-1] + timeoutsByValidator := fe.timeoutsSeenByView[fe.currentView-1] // Tracks the highQC from validators as we go along. var validatorsHighQC QuorumCertificate @@ -772,7 +772,7 @@ func (fc *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt // happen, but may be possible in the event we receive a timeout message at the same time the block // becomes unsafe to extend from (ex: it's part of an stale reorg). We check for the edge case here to // be 100% safe. - isSafeBlock, _, _, validatorLookup := fc.fetchSafeBlockInfo(timeout.GetHighQC().GetBlockHash()) + isSafeBlock, _, _, validatorLookup := fe.fetchSafeBlockInfo(timeout.GetHighQC().GetBlockHash()) if !isSafeBlock { continue } @@ -795,7 +795,7 @@ func (fc *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt // Fetch the validator list for the block height of the high QC. This lookup is guaranteed to succeed // because it succeeded above. - ok, safeBlock, validatorList, _ := fc.fetchSafeBlockInfo(validatorsHighQC.GetBlockHash()) + ok, safeBlock, validatorList, _ := fe.fetchSafeBlockInfo(validatorsHighQC.GetBlockHash()) if !ok { return nil } @@ -854,12 +854,12 @@ func (fc *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt // Happy path return &FastHotStuffEvent{ EventType: FastHotStuffEventTypeConstructTimeoutQC, // The event type - View: fc.currentView, // The view that the timeout QC is proposed in + View: fe.currentView, // The view that the timeout QC is proposed in TipBlockHash: validatorsHighQC.GetBlockHash(), // The block hash that we extend from TipBlockHeight: safeBlock.GetHeight(), // The block height that we extend from QC: validatorsHighQC, // The high QC aggregated from the timeout messages AggregateQC: &aggregateQuorumCertificate{ - view: fc.currentView - 1, // The timed out view is always the previous view + view: fe.currentView - 1, // The timed out view is always the previous view highQC: validatorsHighQC, // The high QC aggregated from the timeout messages highQCViews: highQCViews, // The high view for each validator who timed out aggregatedSignature: &aggregatedSignature{ @@ -873,33 +873,33 @@ func (fc *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt // When this function is triggered, it means that we have reached out the timeout ETA for the // timedOutView. In the event of a timeout, we signal the server that we are ready to time out // and cancel the timeout task. -func (fc *fastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uint64) { - fc.lock.Lock() - defer fc.lock.Unlock() +func (fe *fastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uint64) { + fe.lock.Lock() + defer fe.lock.Unlock() // Check if the event loop is running. If it's not running, then there's nothing // to do here. - if fc.status != eventLoopStatusRunning { + if fe.status != eventLoopStatusRunning { return } // Check if the timed out view is stale. If it's stale, then there's nothing to do here. // The view may be stale in the race condition where the view advanced at the exact moment // this task began to execute and wait for the event loop's lock at the top of this function. - if fc.currentView != timedOutView { + if fe.currentView != timedOutView { return } // Signal the server that we are ready to time out - fc.emitEvent(&FastHotStuffEvent{ + fe.emitEvent(&FastHotStuffEvent{ EventType: FastHotStuffEventTypeTimeout, // The timeout event type View: timedOutView, // The view we timed out - TipBlockHash: fc.tip.block.GetBlockHash(), // The last block we saw - TipBlockHeight: fc.tip.block.GetHeight(), // The last block we saw + TipBlockHash: fe.tip.block.GetBlockHash(), // The last block we saw + TipBlockHeight: fe.tip.block.GetHeight(), // The last block we saw }) // Cancel the timeout task. The server will reschedule it when it advances the view. - fc.nextTimeoutTask.Cancel() + fe.nextTimeoutTask.Cancel() } // Evict all locally stored votes and timeout messages with stale views. We can safely use the current @@ -913,40 +913,40 @@ func (fc *fastHotStuffEventLoop) onTimeoutScheduledTaskExecuted(timedOutView uin // // Any votes or timeouts with a view that's less than currentView - 1 are stale because they cannot // be used in the next block or any future blocks. -func (fc *fastHotStuffEventLoop) evictStaleVotesAndTimeouts() { +func (fe *fastHotStuffEventLoop) evictStaleVotesAndTimeouts() { // Evict stale vote messages - for blockHash, voters := range fc.votesSeenByBlockHash { + for blockHash, voters := range fe.votesSeenByBlockHash { for _, vote := range voters { - if isStaleView(fc.currentView, vote.GetView()) { + if isStaleView(fe.currentView, vote.GetView()) { // Each block is proposed at a known view, and has an immutable block hash. Votes are signed on the // tuple (blockhash, view). So, if any vote message for the blockhash has a view that satisfies this // condition, then it's guaranteed that all votes for the same block hash have satisfy this condition. // We can safely evict all votes for this block hash. - delete(fc.votesSeenByBlockHash, blockHash) + delete(fe.votesSeenByBlockHash, blockHash) break } } } // Evict stale timeout messages - for view := range fc.timeoutsSeenByView { - if isStaleView(fc.currentView, view) { - delete(fc.timeoutsSeenByView, view) + for view := range fe.timeoutsSeenByView { + if isStaleView(fe.currentView, view) { + delete(fe.timeoutsSeenByView, view) } } } -func (fc *fastHotStuffEventLoop) storeVote(signaturePayload [32]byte, vote VoteMessage) { - votesForBlockHash, ok := fc.votesSeenByBlockHash[signaturePayload] +func (fe *fastHotStuffEventLoop) storeVote(signaturePayload [32]byte, vote VoteMessage) { + votesForBlockHash, ok := fe.votesSeenByBlockHash[signaturePayload] if !ok { votesForBlockHash = make(map[string]VoteMessage) - fc.votesSeenByBlockHash[signaturePayload] = votesForBlockHash + fe.votesSeenByBlockHash[signaturePayload] = votesForBlockHash } votesForBlockHash[vote.GetPublicKey().ToString()] = vote } -func (fc *fastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view uint64) bool { +func (fe *fastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view uint64) bool { // This is an O(n) operation that scales with the number of block hashes that we have stored // votes for. In practice, n will be very small because we evict stale votes, and server.go // will be smart about not processing votes for views we won't be the block proposer for. @@ -958,7 +958,7 @@ func (fc *fastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view publicKeyString := publicKey.ToString() // Search for the public key's votes across all existing block hashes - for _, votesForBlock := range fc.votesSeenByBlockHash { + for _, votesForBlock := range fe.votesSeenByBlockHash { vote, ok := votesForBlock[publicKeyString] if ok && vote.GetView() == view { return true @@ -968,18 +968,18 @@ func (fc *fastHotStuffEventLoop) hasVotedForView(publicKey *bls.PublicKey, view return false } -func (fc *fastHotStuffEventLoop) storeTimeout(timeout TimeoutMessage) { - timeoutsForView, ok := fc.timeoutsSeenByView[timeout.GetView()] +func (fe *fastHotStuffEventLoop) storeTimeout(timeout TimeoutMessage) { + timeoutsForView, ok := fe.timeoutsSeenByView[timeout.GetView()] if !ok { timeoutsForView = make(map[string]TimeoutMessage) - fc.timeoutsSeenByView[timeout.GetView()] = timeoutsForView + fe.timeoutsSeenByView[timeout.GetView()] = timeoutsForView } timeoutsForView[timeout.GetPublicKey().ToString()] = timeout } -func (fc *fastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, view uint64) bool { - timeoutsForView, ok := fc.timeoutsSeenByView[view] +func (fe *fastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, view uint64) bool { + timeoutsForView, ok := fe.timeoutsSeenByView[view] if !ok { return false } @@ -990,7 +990,7 @@ func (fc *fastHotStuffEventLoop) hasTimedOutForView(publicKey *bls.PublicKey, vi return ok } -func (fc *fastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( +func (fe *fastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( _isSafeBlock bool, _safeBlock Block, _validatorList []Validator, @@ -1000,7 +1000,7 @@ func (fc *fastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( // number of uncommitted blocks in the blockchain. During steady stake, it will have a size of 3 blocks // (one committed, two uncommitted). In the worse case, where the network has an unlucky series of // timeout -> block -> timeout -> block,... it can still be expected to have < 10 blocks. - for _, block := range fc.safeBlocks { + for _, block := range fe.safeBlocks { if IsEqualBlockHash(block.block.GetBlockHash(), blockHash) { return true, block.block, block.validatorList, block.validatorLookup } @@ -1014,8 +1014,8 @@ func (fc *fastHotStuffEventLoop) fetchSafeBlockInfo(blockHash BlockHash) ( // deadlock when a thread holding the event loop's lock is blocked from emitting an event because another // thread that needs to read an emitted event is blocked from doing so because it needs to first operate // on the event loop. -func (fc *fastHotStuffEventLoop) emitEvent(event *FastHotStuffEvent) { - go func() { fc.Events <- event }() +func (fe *fastHotStuffEventLoop) emitEvent(event *FastHotStuffEvent) { + go func() { fe.Events <- event }() } func isStaleView(currentView uint64, testView uint64) bool { diff --git a/consensus/types.go b/consensus/types.go index 252809f82..2c9130f26 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -148,12 +148,8 @@ type BlockWithValidatorList struct { ValidatorList []Validator } -// We want a large buffer for the signal channels to ensure threads don't block when trying to push new -// signals. -// -// TODO: is a size of 10000 enough? If we want to bullet-proof this, we could back it by a slice as a -// secondary buffer. That seems unnecessary since every channel will only have signals pushed by a single -// producer thread. +// Any large number is sufficient to hold the backlog of signals to be sent to the server. In practice there will +// be 0 0 - 2 signals at most in this buffer at any given time. const signalChannelBufferSize = 10000 // An instance of FastHotStuffEventLoop is a self-contained module that represents a single node running From 01f973e2d627f2860bc5500067e44e258a9de720 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 14 May 2024 17:38:03 -0400 Subject: [PATCH 715/762] Delete Redundant Signature Verifiction in QC Construction Functions (#1315) --- consensus/event_loop.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index b78327037..cacc8c80c 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -705,11 +705,6 @@ func (fe *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff continue } - // Verify the vote signature - if !isValidSignatureSinglePublicKey(vote.GetPublicKey(), vote.GetSignature(), voteSignaturePayload[:]) { - continue - } - // Track the vote's signature, stake, and place in the validator list totalVotingStake = uint256.NewInt(0).Add(totalVotingStake, validator.GetStakeAmount()) signersList.Set(ii, true) @@ -825,14 +820,6 @@ func (fe *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt continue } - // Compute the signature payload that the validator should have signed - signaturePayload := GetTimeoutSignaturePayload(timeout.GetView(), timeout.GetHighQC().GetView()) - - // Verify the timeout signature - if !isValidSignatureSinglePublicKey(timeout.GetPublicKey(), timeout.GetSignature(), signaturePayload[:]) { - continue - } - // Track the signatures, timed out stake, and high QC views for the validator totalTimedOutStake = uint256.NewInt(0).Add(totalTimedOutStake, validator.GetStakeAmount()) signersList.Set(ii, true) From c307426fb9057591e5169a7a6db2f6c47fae3960 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 14 May 2024 17:40:31 -0400 Subject: [PATCH 716/762] Delete Unused RandomSeedHash Functions (#1316) --- lib/constants.go | 12 ++++++------ lib/pos_random_seed.go | 17 ----------------- 2 files changed, 6 insertions(+), 23 deletions(-) diff --git a/lib/constants.go b/lib/constants.go index e1a444ebc..7a1877d81 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -796,7 +796,7 @@ type DeSoParams struct { // DefaultMempoolCongestionFactorBasisPoints and DefaultMempoolPastBlocksCongestionFactorBasisPoints are the default values // for GlobalParams.MempoolCongestionFactorBasisPoints and GlobalParams.DefaultMempoolPastBlocksCongestionFactorBasisPoints. - /// See comments in DeSoMainnetParams for a description of their usage. + // See comments in DeSoMainnetParams for a description of their usage. DefaultMempoolCongestionFactorBasisPoints uint64 DefaultMempoolPastBlocksCongestionFactorBasisPoints uint64 @@ -1334,19 +1334,19 @@ var DeSoMainnetParams = DeSoParams{ DefaultMempoolFeeEstimatorNumPastBlocks: 50, // The maximum size of blocks for PoS. - DefaultMaxBlockSizeBytesPoS: 32000, // 32KB TODO: verify this is a sane value. + DefaultMaxBlockSizeBytesPoS: 32000, // The soft maximum size of blocks for PoS. - DefaultSoftMaxBlockSizeBytesPoS: 16000, // 16KB TODO: verify this is a sane value. + DefaultSoftMaxBlockSizeBytesPoS: 16000, // The maximum size for a single txn in PoS. - DefaultMaxTxnSizeBytesPoS: 25000, // 25KB TODO: verify this is a sane value. + DefaultMaxTxnSizeBytesPoS: 25000, // The interval between producing blocks. - DefaultBlockProductionIntervalMillisecondsPoS: 1500, // 1.5s TODO: verify this is a sane value. + DefaultBlockProductionIntervalMillisecondsPoS: 1500, // The interval between timing out a view. - DefaultTimeoutIntervalMillisecondsPoS: 30000, // 30s TODO: verify this is a sane value. + DefaultTimeoutIntervalMillisecondsPoS: 30000, // The peer handshake certificate timeout. HandshakeTimeoutMicroSeconds: uint64(900000000), diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index acc97f5b2..fe318ee69 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -2,7 +2,6 @@ package lib import ( "bytes" - "io" "golang.org/x/crypto/sha3" @@ -45,22 +44,6 @@ func (randomSeedHash *RandomSeedHash) FromBytes(randomSeedHashBytes []byte) (*Ra return randomSeedHash, nil } -func EncodeRandomSeedHash(randomSeedHash *RandomSeedHash) []byte { - return EncodeByteArray(randomSeedHash.ToBytes()) -} - -func DecodeRandomSeedHash(rr io.Reader) (*RandomSeedHash, error) { - randomSeedHashBytes, err := DecodeByteArray(rr) - if err != nil { - return nil, errors.Wrapf(err, "DecodeRandomSeedHash: problem reading RandomSeedHash from bytes: ") - } - return (&RandomSeedHash{}).FromBytes(randomSeedHashBytes) -} - -func (randomSeedHash *RandomSeedHash) isEmpty() bool { - return randomSeedHash == nil || randomSeedHash.Eq(&RandomSeedHash{}) -} - func GenerateNextRandomSeedSignature(currentRandomSeedHash *RandomSeedHash, signerPrivateKey *bls.PrivateKey) (*bls.Signature, error) { // This function generates a RandomSeedSignature by signing the CurrentRandomSeedHash // with the provided bls.PrivateKey. This signature is deterministic: given the same From 4eeeb1f090f703fdcaa68cd6470c3cc8143aa13e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 14 May 2024 18:41:12 -0400 Subject: [PATCH 717/762] Clean up FastHotStuffEvent Validations (#1317) --- consensus/utils.go | 28 ++++++++++++---------------- lib/pos_consensus.go | 8 ++++---- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/consensus/utils.go b/consensus/utils.go index 8328c1774..966adfcd3 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -14,41 +14,37 @@ import ( ) func IsProperlyFormedConstructVoteQCEvent(event *FastHotStuffEvent) bool { - return event != nil && // Event non-nil + return isProperlyFormedGenericEvent(event) && event.EventType == FastHotStuffEventTypeConstructVoteQC && // Event type is QC construction - event.View > 0 && // The view the block was proposed in is non-zero - event.TipBlockHeight > 0 && // Tip block height is non-zero - !isInterfaceNil(event.TipBlockHash) && // Tip block hash is non-nil !isInterfaceNil(event.QC) // The high QC is non-nil } func IsProperlyFormedConstructTimeoutQCEvent(event *FastHotStuffEvent) bool { - return event != nil && // Event non-nil + return isProperlyFormedGenericEvent(event) && event.EventType == FastHotStuffEventTypeConstructTimeoutQC && // Event type is timeout QC construction - event.View > 0 && // The view the block was proposed in is non-zero - event.TipBlockHeight > 0 && // Tip block height is non-zero - !isInterfaceNil(event.TipBlockHash) && // Tip block hash is non-nil isProperlyFormedAggregateQC(event.AggregateQC) // The high QC is properly formed } func IsProperlyFormedVoteEvent(event *FastHotStuffEvent) bool { - return event != nil && // Event non-nil + return isProperlyFormedGenericEvent(event) && event.EventType == FastHotStuffEventTypeVote && // Event type is vote - event.View > 0 && // The view the tip block was proposed in is non-zero - event.TipBlockHeight > 0 && // Tip block height voted on is non-zero - !isInterfaceNil(event.TipBlockHash) && // Tip block hash voted on is non-nil isInterfaceNil(event.QC) // The high QC is nil } func IsProperlyFormedTimeoutEvent(event *FastHotStuffEvent) bool { - return event != nil && // Event non-nil + return isProperlyFormedGenericEvent(event) && event.EventType == FastHotStuffEventTypeTimeout && // Event type is timeout - event.View > 0 && // The view that was timed out is non-zero - event.TipBlockHeight > 0 && // Tip block height is non-zero - !isInterfaceNil(event.TipBlockHash) && // Tip block hash is non-nil isInterfaceNil(event.QC) // The high QC is nil. The receiver will determine their own high QC. } +// isProperlyFormedGenericEvent performs the common fields that all event types must populate. +func isProperlyFormedGenericEvent(event *FastHotStuffEvent) bool { + return event != nil && // Event non-nil + event.View > 0 && // The view the block was proposed in is non-zero + event.TipBlockHeight > 0 && // Tip block height is non-zero + !isInterfaceNil(event.TipBlockHash) // Tip block hash is non-nil +} + // Given a QC and a sorted validator list, this function returns true if the QC contains a valid // super-majority of signatures from the validator list for the QC's (View, BlockHash) pair. func IsValidSuperMajorityQuorumCertificate(qc QuorumCertificate, validators []Validator) bool { diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index b0431c850..b98109d4d 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -661,7 +661,7 @@ func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("error fetching validator lists for safe blocks: %v", err) } - // If the block was processed successfully but was not applied as the new tip, we need up date the safe + // If the block was processed successfully but was not applied as the new tip, we need update the safe // blocks in the FastHotStuffEventLoop. This is because the new block may be safe to extend even though // it did not result in a new tip. if !successfullyAppliedNewTip { @@ -698,7 +698,7 @@ func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("Error fetching UtxoView for tip block: %v", err) } utxoView := utxoViewAndUtxoOps.UtxoView - snapshotGlobalParams, err := utxoView.GetCurrentSnapshotGlobalParamsEntry() + globalParams, err := utxoView.GetCurrentSnapshotGlobalParamsEntry() if err != nil { return nil, errors.Errorf("Error fetching snapshot global params: %v", err) } @@ -706,8 +706,8 @@ func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ if err = fc.fastHotStuffEventLoop.ProcessTipBlock( tipBlockWithValidators[0], safeBlocksWithValidators, - time.Millisecond*time.Duration(snapshotGlobalParams.BlockProductionIntervalMillisecondsPoS), - time.Millisecond*time.Duration(snapshotGlobalParams.TimeoutIntervalMillisecondsPoS), + time.Millisecond*time.Duration(globalParams.BlockProductionIntervalMillisecondsPoS), + time.Millisecond*time.Duration(globalParams.TimeoutIntervalMillisecondsPoS), ); err != nil { return nil, errors.Errorf("Error processing tip block locally: %v", err) } From 2056d5760c889e3ca8f9043505ea7c2a9e0ef31e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 14 May 2024 18:42:37 -0400 Subject: [PATCH 718/762] Fix Comments and PosMempoolStatus Enum (#1319) --- lib/pos_mempool.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 16ad11a4b..2bb0ccd25 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -17,9 +17,9 @@ import ( type PosMempoolStatus int const ( - PosMempoolStatusNotInitialized PosMempoolStatus = iota - PosMempoolStatusInitialized - PosMempoolStatusRunning + PosMempoolStatusNotInitialized PosMempoolStatus = 0 + PosMempoolStatusInitialized PosMempoolStatus = 1 + PosMempoolStatusRunning PosMempoolStatus = 2 ) type Mempool interface { @@ -166,8 +166,7 @@ type PosMempool struct { // mempool transaction with a new transaction having the same nonce but higher fee. nonceTracker *NonceTracker - // readOnlyLatestBlockView is used to check if a transaction is valid before being added to the mempool. The readOnlyLatestBlockView - // checks if the transaction has a valid signature and if the transaction's sender has enough funds to cover the fee. + // readOnlyLatestBlockView is used to check if a transaction has a valid nonce before being added to the mempool. // The readOnlyLatestBlockView should be updated whenever a new block is added to the blockchain via UpdateLatestBlock. // PosMempool only needs read-access to the block view. It isn't necessary to copy the block view before passing it // to the mempool. @@ -462,8 +461,7 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { } // AddTransaction validates a MsgDeSoTxn transaction and adds it to the mempool if it is valid. -// If the mempool overflows as a result of adding the transaction, the mempool is pruned. The -// transaction signature verification can be skipped if verifySignature is passed as true. +// If the mempool overflows as a result of adding the transaction, the mempool is pruned. func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { if mtxn == nil || mtxn.GetTxn() == nil { return fmt.Errorf("PosMempool.AddTransaction: Cannot add a nil transaction") @@ -485,8 +483,7 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { defer mp.Unlock() // First, validate that the transaction is properly formatted according to BalanceModel. We acquire a read lock on - // the mempool. This allows multiple goroutines to safely perform transaction validation concurrently. In particular, - // transaction signature verification can be parallelized. + // the mempool. This allows multiple goroutines to safely perform transaction validation concurrently. if err := mp.checkTransactionSanity(mtxn.GetTxn(), false); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem verifying transaction") } From 771ebfaf671e9b627a2d17595e0db18522e0a759 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 14 May 2024 18:44:52 -0400 Subject: [PATCH 719/762] Deprecate MempoolTransaction Type (#1320) Deprecate MempoolTransaction Type Fix broken test --- lib/legacy_mempool.go | 15 ++++------ lib/peer.go | 6 ++-- lib/pos_block_producer.go | 12 ++++---- lib/pos_block_producer_test.go | 8 ++--- lib/pos_mempool.go | 54 ++++++++-------------------------- lib/pos_mempool_test.go | 52 ++++++++++++++++---------------- lib/server.go | 8 ++--- 7 files changed, 59 insertions(+), 96 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index f60edd9a1..61bfdbd7b 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -22,7 +22,6 @@ import ( "github.com/dgraph-io/badger/v4" "github.com/btcsuite/btcd/btcec/v2" - "github.com/deso-protocol/core/collections" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" "github.com/pkg/errors" @@ -241,7 +240,7 @@ func (mp *DeSoMempool) IsRunning() bool { return !mp.stopped } -func (mp *DeSoMempool) AddTransaction(txn *MempoolTransaction) error { +func (mp *DeSoMempool) AddTransaction(txn *MsgDeSoTxn, txnTimestamp time.Time) error { return errors.New("Not implemented") } @@ -249,20 +248,16 @@ func (mp *DeSoMempool) RemoveTransaction(txnHash *BlockHash) error { return errors.New("Not implemented") } -func (mp *DeSoMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { +func (mp *DeSoMempool) GetTransaction(txnHash *BlockHash) *MempoolTx { mempoolTx, exists := mp.readOnlyUniversalTransactionMap[*txnHash] if !exists { return nil } - return NewMempoolTransaction(mempoolTx.Tx, mempoolTx.Added, true) + return mempoolTx } -func (mp *DeSoMempool) GetTransactions() []*MempoolTransaction { - return collections.Transform( - mp.GetOrderedTransactions(), func(mempoolTx *MempoolTx) *MempoolTransaction { - return NewMempoolTransaction(mempoolTx.Tx, mempoolTx.Added, true) - }, - ) +func (mp *DeSoMempool) GetTransactions() []*MempoolTx { + return mp.GetOrderedTransactions() } func (mp *DeSoMempool) UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) { diff --git a/lib/peer.go b/lib/peer.go index dc767df75..4583af34e 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -172,7 +172,7 @@ func (pp *Peer) HandleGetTransactionsMsg(getTxnMsg *MsgDeSoGetTransactions) { "MsgDeSoGetTransactions message with %v txns from peer %v", len(getTxnMsg.HashList), pp) - mempoolTxs := []*MempoolTransaction{} + mempoolTxs := []*MempoolTx{} // We fetch the requested txns from either the PoW mempool or the PoS mempool // whichever one is used for the consensus protocol at the current block height. @@ -191,13 +191,13 @@ func (pp *Peer) HandleGetTransactionsMsg(getTxnMsg *MsgDeSoGetTransactions) { // Doing this helps the Peer when they go to add the transactions by reducing // unconnectedTxns and transactions being rejected due to missing dependencies. sort.Slice(mempoolTxs, func(ii, jj int) bool { - return mempoolTxs[ii].TimestampUnixMicro.Before(mempoolTxs[jj].TimestampUnixMicro) + return mempoolTxs[ii].GetTimestamp() < mempoolTxs[jj].GetTimestamp() }) // Create a list of the fetched transactions to a response. txnList := []*MsgDeSoTxn{} for _, mempoolTx := range mempoolTxs { - txnList = append(txnList, mempoolTx.MsgDeSoTxn) + txnList = append(txnList, mempoolTx.Tx) } // At this point the txnList should have all of the transactions that diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index e6e75337e..705899a5b 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -312,7 +312,7 @@ func (pbp *PosBlockProducer) getBlockTransactions( if currentBlockSize > softMaxBlockSizeBytes { break } - txnBytes, err := txn.ToBytes(false) + txnBytes, err := txn.Tx.ToBytes(false) if err != nil { return nil, 0, errors.Wrapf(err, "Error getting transaction size: ") } @@ -325,7 +325,7 @@ func (pbp *PosBlockProducer) getBlockTransactions( // Connect the transaction to the SafeUtxoView to test if it connects. _, _, _, fees, err := safeUtxoView.ConnectTransaction( - txn.GetTxn(), txn.Hash(), uint32(newBlockHeight), newBlockTimestampNanoSecs, true, false, + txn.Tx, txn.Hash, uint32(newBlockHeight), newBlockTimestampNanoSecs, true, false, ) // If the transaction fails to connect, then we skip it. @@ -333,16 +333,16 @@ func (pbp *PosBlockProducer) getBlockTransactions( continue } - blocksTxns = append(blocksTxns, txn.GetTxn()) + blocksTxns = append(blocksTxns, txn.Tx) currentBlockSize += uint64(len(txnBytes)) - if txn.GetTxn().TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { + if txn.Tx.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { // If the transactor is the block producer, then they won't receive the utility fee. - if blockProducerPublicKey.Equal(*NewPublicKey(txn.PublicKey)) { + if blockProducerPublicKey.Equal(*NewPublicKey(txn.Tx.PublicKey)) { continue } } else { - txnMeta, ok := txn.GetTxn().TxnMeta.(*AtomicTxnsWrapperMetadata) + txnMeta, ok := txn.Tx.TxnMeta.(*AtomicTxnsWrapperMetadata) if !ok { return nil, 0, errors.New("Error casting txn meta to AtomicSwapMetadata") diff --git a/lib/pos_block_producer_test.go b/lib/pos_block_producer_test.go index d94d16f6c..0ba37c019 100644 --- a/lib/pos_block_producer_test.go +++ b/lib/pos_block_producer_test.go @@ -1,12 +1,13 @@ package lib import ( - "github.com/deso-protocol/core/collections/bitset" "math" "math/rand" "testing" "time" + "github.com/deso-protocol/core/collections/bitset" + "github.com/deso-protocol/core/bls" "github.com/stretchr/testify/require" ) @@ -278,13 +279,12 @@ func TestGetBlockTransactions(t *testing.T) { currentTime := time.Now() for ii, txn := range txns { // Use the Simulated Transaction Timestamp. - mtxn := NewMempoolTransaction(txn, currentTime.Add(time.Duration(ii)*time.Microsecond), false) - require.NoError(testMempool.AddTransaction(mtxn)) + require.NoError(testMempool.AddTransaction(txn, currentTime.Add(time.Duration(ii)*time.Microsecond))) } newTxns := testMempool.GetTransactions() require.Equal(len(txns), len(newTxns)) for ii := 0; ii < len(txns); ii++ { - require.Equal(txns[ii], newTxns[ii].GetTxn()) + require.Equal(txns[ii], newTxns[ii].Tx) } } diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 2bb0ccd25..b867e5214 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -26,10 +26,10 @@ type Mempool interface { Start() error Stop() IsRunning() bool - AddTransaction(txn *MempoolTransaction) error + AddTransaction(txn *MsgDeSoTxn, txnTimestamp time.Time) error RemoveTransaction(txnHash *BlockHash) error - GetTransaction(txnHash *BlockHash) *MempoolTransaction - GetTransactions() []*MempoolTransaction + GetTransaction(txnHash *BlockHash) *MempoolTx + GetTransactions() []*MempoolTx UpdateLatestBlock(blockView *UtxoView, blockHeight uint64) UpdateGlobalParams(globalParams *GlobalParamsEntry) @@ -100,33 +100,6 @@ func GetAugmentedUniversalViewWithAdditionalTransactions( return newView, nil } -// MempoolTransaction is a simple wrapper around MsgDeSoTxn that adds a timestamp field. -type MempoolTransaction struct { - *MsgDeSoTxn - TimestampUnixMicro time.Time - Validated bool -} - -func NewMempoolTransaction(txn *MsgDeSoTxn, timestamp time.Time, validated bool) *MempoolTransaction { - return &MempoolTransaction{ - MsgDeSoTxn: txn, - TimestampUnixMicro: timestamp, - Validated: validated, - } -} - -func (mtxn *MempoolTransaction) GetTxn() *MsgDeSoTxn { - return mtxn.MsgDeSoTxn -} - -func (mtxn *MempoolTransaction) GetTimestamp() time.Time { - return mtxn.TimestampUnixMicro -} - -func (mtxn *MempoolTransaction) IsValidated() bool { - return mtxn.Validated -} - // PosMempool is used by the node to keep track of uncommitted transactions. The main responsibilities of the PosMempool // include addition/removal of transactions, back up of transaction to database, and retrieval of transactions ordered // by Fee-Time algorithm. More on the Fee-Time algorithm can be found in the documentation of TransactionRegister. @@ -462,13 +435,13 @@ func (mp *PosMempool) OnBlockDisconnected(block *MsgDeSoBlock) { // AddTransaction validates a MsgDeSoTxn transaction and adds it to the mempool if it is valid. // If the mempool overflows as a result of adding the transaction, the mempool is pruned. -func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { - if mtxn == nil || mtxn.GetTxn() == nil { +func (mp *PosMempool) AddTransaction(txn *MsgDeSoTxn, txnTimestamp time.Time) error { + if txn == nil { return fmt.Errorf("PosMempool.AddTransaction: Cannot add a nil transaction") } // If the transaction is already in the transaction register, then we return an error. - if mp.txnRegister.GetTransaction(mtxn.GetTxn().Hash()) != nil { + if mp.txnRegister.GetTransaction(txn.Hash()) != nil { return errors.New("PosMempool.AddTransaction: Transaction already in mempool") } @@ -484,7 +457,7 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { // First, validate that the transaction is properly formatted according to BalanceModel. We acquire a read lock on // the mempool. This allows multiple goroutines to safely perform transaction validation concurrently. - if err := mp.checkTransactionSanity(mtxn.GetTxn(), false); err != nil { + if err := mp.checkTransactionSanity(txn, false); err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem verifying transaction") } @@ -494,7 +467,7 @@ func (mp *PosMempool) AddTransaction(mtxn *MempoolTransaction) error { } // Construct the MempoolTx from the MsgDeSoTxn. - mempoolTx, err := NewMempoolTx(mtxn.GetTxn(), mtxn.GetTimestamp(), mp.latestBlockHeight) + mempoolTx, err := NewMempoolTx(txn, txnTimestamp, mp.latestBlockHeight) if err != nil { return errors.Wrapf(err, "PosMempool.AddTransaction: Problem constructing MempoolTx") } @@ -782,7 +755,7 @@ func (mp *PosMempool) removeTransactionNoLock(txn *MempoolTx, persistToDb bool) } // GetTransaction returns the transaction with the given hash if it exists in the mempool. This function is thread-safe. -func (mp *PosMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { +func (mp *PosMempool) GetTransaction(txnHash *BlockHash) *MempoolTx { mp.RLock() defer mp.RUnlock() @@ -795,11 +768,11 @@ func (mp *PosMempool) GetTransaction(txnHash *BlockHash) *MempoolTransaction { return nil } - return NewMempoolTransaction(txn.Tx, txn.Added, txn.IsValidated()) + return txn } // GetTransactions returns all transactions in the mempool ordered by the Fee-Time algorithm. This function is thread-safe. -func (mp *PosMempool) GetTransactions() []*MempoolTransaction { +func (mp *PosMempool) GetTransactions() []*MempoolTx { mp.RLock() defer mp.RUnlock() @@ -807,15 +780,14 @@ func (mp *PosMempool) GetTransactions() []*MempoolTransaction { return nil } - var mempoolTxns []*MempoolTransaction + var mempoolTxns []*MempoolTx poolTxns := mp.getTransactionsNoLock() for _, txn := range poolTxns { if txn == nil || txn.Tx == nil { continue } - mtxn := NewMempoolTransaction(txn.Tx, txn.Added, txn.IsValidated()) - mempoolTxns = append(mempoolTxns, mtxn) + mempoolTxns = append(mempoolTxns, txn) } return mempoolTxns } diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 3771e1269..8dced4464 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -124,15 +124,15 @@ func TestPosMempoolPrune(t *testing.T) { fetchedTxns := mempool.GetTransactions() require.Equal(3, len(fetchedTxns)) - require.Equal(uint64(1974), fetchedTxns[0].TxnFeeNanos) - require.Equal(uint64(1931), fetchedTxns[1].TxnFeeNanos) - require.Equal(uint64(1776), fetchedTxns[2].TxnFeeNanos) - require.Equal(uint64(1974), mempool.GetTransaction(fetchedTxns[0].Hash()).TxnFeeNanos) - require.Equal(uint64(1931), mempool.GetTransaction(fetchedTxns[1].Hash()).TxnFeeNanos) - require.Equal(uint64(1776), mempool.GetTransaction(fetchedTxns[2].Hash()).TxnFeeNanos) + require.Equal(uint64(1974), fetchedTxns[0].Tx.TxnFeeNanos) + require.Equal(uint64(1931), fetchedTxns[1].Tx.TxnFeeNanos) + require.Equal(uint64(1776), fetchedTxns[2].Tx.TxnFeeNanos) + require.Equal(uint64(1974), mempool.GetTransaction(fetchedTxns[0].Hash).Tx.TxnFeeNanos) + require.Equal(uint64(1931), mempool.GetTransaction(fetchedTxns[1].Hash).Tx.TxnFeeNanos) + require.Equal(uint64(1776), mempool.GetTransaction(fetchedTxns[2].Hash).Tx.TxnFeeNanos) // Remove one transaction. - _wrappedPosMempoolRemoveTransaction(t, mempool, fetchedTxns[0].Hash()) + _wrappedPosMempoolRemoveTransaction(t, mempool, fetchedTxns[0].Hash) require.NoError(mempool.validateTransactions()) require.Equal(2, len(mempool.GetTransactions())) mempool.Stop() @@ -147,29 +147,29 @@ func TestPosMempoolPrune(t *testing.T) { require.Equal(2, len(newPool.GetTransactions())) // Remove the other transactions. - _wrappedPosMempoolRemoveTransaction(t, newPool, fetchedTxns[1].Hash()) - _wrappedPosMempoolRemoveTransaction(t, newPool, fetchedTxns[2].Hash()) + _wrappedPosMempoolRemoveTransaction(t, newPool, fetchedTxns[1].Hash) + _wrappedPosMempoolRemoveTransaction(t, newPool, fetchedTxns[2].Hash) // Remove the same transaction twice - _wrappedPosMempoolRemoveTransaction(t, newPool, fetchedTxns[1].Hash()) + _wrappedPosMempoolRemoveTransaction(t, newPool, fetchedTxns[1].Hash) require.Equal(0, len(newPool.GetTransactions())) // Add the transactions back. for _, txn := range fetchedTxns { - _wrappedPosMempoolAddTransaction(t, newPool, txn.GetTxn()) + _wrappedPosMempoolAddTransaction(t, newPool, txn.Tx) } require.Equal(3, len(newPool.GetTransactions())) // Iterate through the transactions. newPoolTxns := newPool.GetTransactions() for ii, tx := range newPoolTxns { - require.True(bytes.Equal(tx.Hash().ToBytes(), fetchedTxns[ii].Hash().ToBytes())) + require.True(bytes.Equal(tx.Hash.ToBytes(), fetchedTxns[ii].Hash.ToBytes())) } require.Equal(len(newPool.GetTransactions()), len(newPool.nonceTracker.nonceMap)) require.NoError(newPool.validateTransactions()) newTxns := newPool.GetTransactions() require.Equal(3, len(newTxns)) for _, txn := range newTxns { - _wrappedPosMempoolRemoveTransaction(t, newPool, txn.Hash()) + _wrappedPosMempoolRemoveTransaction(t, newPool, txn.Hash) } newPool.Stop() require.False(newPool.IsRunning()) @@ -276,7 +276,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { _signTxn(t, txn1New, m0Priv) _wrappedPosMempoolAddTransaction(t, mempool, txn1New) require.Equal(1, len(mempool.GetTransactions())) - require.Equal(txn1New.TxnNonce, mempool.GetTransactions()[0].TxnNonce) + require.Equal(txn1New.TxnNonce, mempool.GetTransactions()[0].Tx.TxnNonce) // Now generate a transaction coming from m1 txn2 := _generateTestTxn(t, rand, feeMin, feeMax, m1PubBytes, m1Priv, 100, 25) @@ -289,8 +289,7 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { *txn2Low.TxnNonce = *txn2.TxnNonce _signTxn(t, txn2Low, m1Priv) added2Low := time.Now() - mtxn2Low := NewMempoolTransaction(txn2Low, added2Low, false) - err := mempool.AddTransaction(mtxn2Low) + err := mempool.AddTransaction(txn2Low, added2Low) require.Contains(err.Error(), MempoolFailedReplaceByHigherFee) // Now generate a proper new transaction for m1, with same nonce, and higher fee. @@ -303,11 +302,11 @@ func TestPosMempoolReplaceWithHigherFee(t *testing.T) { // Verify that only the correct transactions are present in the mempool. Notice that on this seed, txn2 is positioned // as first in the mempool's GetTransactions. - require.NotEqual(txn2, mempool.GetTransactions()[0].GetTxn()) - require.NotEqual(txn2Low, mempool.GetTransactions()[0].GetTxn()) - require.Equal(txn2New, mempool.GetTransactions()[0].GetTxn()) - require.NotEqual(txn1, mempool.GetTransactions()[1].GetTxn()) - require.Equal(txn1New, mempool.GetTransactions()[1].GetTxn()) + require.NotEqual(txn2, mempool.GetTransactions()[0].Tx) + require.NotEqual(txn2Low, mempool.GetTransactions()[0].Tx) + require.Equal(txn2New, mempool.GetTransactions()[0].Tx) + require.NotEqual(txn1, mempool.GetTransactions()[1].Tx) + require.Equal(txn1New, mempool.GetTransactions()[1].Tx) require.Equal(len(mempool.GetTransactions()), len(mempool.nonceTracker.nonceMap)) require.NoError(mempool.validateTransactions()) @@ -480,8 +479,7 @@ func _generateTestTxnWithOutputs(t *testing.T, rand *rand.Rand, feeMin uint64, f func _wrappedPosMempoolAddTransaction(t *testing.T, mp *PosMempool, txn *MsgDeSoTxn) { added := time.Now() - mtxn := NewMempoolTransaction(txn, added, false) - require.NoError(t, mp.AddTransaction(mtxn)) + require.NoError(t, mp.AddTransaction(txn, added)) require.Equal(t, true, _checkPosMempoolIntegrity(t, mp)) } @@ -503,16 +501,16 @@ func _checkPosMempoolIntegrity(t *testing.T, mp *PosMempool) bool { balances := make(map[PublicKey]uint64) txns := mp.GetTransactions() for _, txn := range txns { - if txn.TxnNonce == nil { + if txn.Tx.TxnNonce == nil { t.Errorf("PosMempool transaction has nil nonce") return false } - pk := NewPublicKey(txn.PublicKey) - if txnNt := mp.nonceTracker.GetTxnByPublicKeyNonce(*pk, *txn.TxnNonce); !assert.Equal(t, txn.GetTxn(), txnNt.Tx) { + pk := NewPublicKey(txn.Tx.PublicKey) + if txnNt := mp.nonceTracker.GetTxnByPublicKeyNonce(*pk, *txn.Tx.TxnNonce); !assert.Equal(t, txn.Tx, txnNt.Tx) { t.Errorf("PosMempool nonceTracker and transactions are out of sync") return false } - balances[*pk] += txn.TxnFeeNanos + balances[*pk] += txn.Tx.TxnFeeNanos } return true } diff --git a/lib/server.go b/lib/server.go index 43221146d..a1928513e 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1979,7 +1979,7 @@ func (srv *Server) _relayTransactions() { invVect := &InvVect{ Type: InvTypeTx, - Hash: *newTxn.Hash(), + Hash: *newTxn.Hash, } // If the peer has this txn already then skip it. @@ -2061,8 +2061,7 @@ func (srv *Server) _addNewTxn( // txn addition into the PoW mempool to succeed, while the addition into the PoS // mempool fails. This error handling catches that and gives the user the correct // feedback on the txn addition's success. - mempoolTxn := NewMempoolTransaction(txn, time.Now(), false) - if err := srv.posMempool.AddTransaction(mempoolTxn); err != nil { + if err := srv.posMempool.AddTransaction(txn, time.Now()); err != nil { if uint64(tipHeight) >= srv.params.GetFinalPoWBlockHeight() { return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") } @@ -2527,8 +2526,7 @@ func (srv *Server) ProcessSingleTxnWithChainLock(pp *Peer, txn *MsgDeSoTxn) ([]* // txn addition into the PoW mempool to succeed, while the addition into the PoS // mempool fails. This error handling catches that and gives the user the correct // feedback on the txn addition's success. - mempoolTxn := NewMempoolTransaction(txn, time.Now(), false) - if err := srv.posMempool.AddTransaction(mempoolTxn); err != nil { + if err := srv.posMempool.AddTransaction(txn, time.Now()); err != nil { if uint64(tipHeight) >= srv.params.GetFinalPoWBlockHeight() { return nil, errors.Wrapf(err, "Server._addNewTxn: problem adding txn to pos mempool") } From a3fa9fe780e09e1c1e62fde2ad9b9e575744bc9e Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 14 May 2024 18:48:08 -0400 Subject: [PATCH 720/762] Fix HasHighS func (#1313) --- lib/block_view_test.go | 20 ++++++++++++++++++++ lib/network.go | 6 +++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 3df861ece..88e07154e 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" + ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "math" _ "net/http/pprof" "reflect" @@ -1957,6 +1958,25 @@ func TestBasicTransfer(t *testing.T) { utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) require.Error(err) require.Contains(err.Error(), RuleErrorInvalidTransactionSignature) + + utxoView = NewUtxoView(db, params, postgres, chain.snapshot, chain.eventManager) + + // Sign the transaction with the sender's key, but then make S have + // a high value. + _signTxn(t, txn, senderPrivString) + r := txn.Signature.Sign.R() + s := txn.Signature.Sign.S() + txn.Signature.SetSignature(ecdsa2.NewSignature(&r, s.Negate())) + + txHash = txn.Hash() + _, _, _, _, err = + utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) + if blockHeight < params.ForkHeights.BalanceModelBlockHeight { + require.NoError(err) + } else { + require.Error(err) + require.Contains(err.Error(), RuleErrorTxnSigHasHighS) + } } // A block reward with a bad signature should fail. diff --git a/lib/network.go b/lib/network.go index 3dc9ff253..1c6e6bb27 100644 --- a/lib/network.go +++ b/lib/network.go @@ -3354,9 +3354,9 @@ func (desoSign *DeSoSignature) HasHighS() bool { } // We reject high-S signatures as they lead to inconsistent public key recovery // https://github.com/indutny/elliptic/blob/master/lib/elliptic/ec/index.js#L147 - // TODO: this should be removed once we have a proper fix for this. - //return desoSign.Sign.S.Cmp(big.NewInt(0).Rsh(secp256k1.Params().N, 1)) != -1 - return false + s := desoSign.Sign.S() + sBytes := (&s).Bytes() + return big.NewInt(0).SetBytes(sBytes[:]).Cmp(big.NewInt(0).Rsh(secp256k1.Params().N, 1)) != -1 } // ToBytes encodes the signature in accordance to the DeSo-DER ECDSA format. From fde3aa10639e7b6beb1fc847ea6b5193bcbd8259 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 14 May 2024 18:50:44 -0400 Subject: [PATCH 721/762] Remove snapshot validator set total stake amount nanos (#1318) --- lib/block_view.go | 16 ---- lib/block_view_flush.go | 3 - lib/block_view_validator.go | 8 -- lib/db_utils.go | 13 ++-- lib/pos_epoch_complete_hook.go | 5 -- lib/pos_epoch_complete_hook_test.go | 10 --- lib/pos_snapshot_entries.go | 115 ---------------------------- 7 files changed, 6 insertions(+), 164 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 5b95300b5..9faf178ec 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -165,11 +165,6 @@ type UtxoView struct { // of every validator that makes up the validator set at the given SnapshotAtEpochNumber. SnapshotValidatorBLSPublicKeyPKIDPairEntries map[SnapshotValidatorBLSPublicKeyMapKey]*BLSPublicKeyPKIDPairEntry - // SnapshotValidatorSetTotalStakeAmountNanos is a map of SnapshotAtEpochNumber to the sum TotalStakeAmountNanos - // for the validator set of for an epoch. - // It contains the snapshot value of the GlobalActiveStakeAmountNanos at the given SnapshotAtEpochNumber. - SnapshotValidatorSetTotalStakeAmountNanos map[uint64]*uint256.Int - // SnapshotLeaderSchedule is a map of to a ValidatorPKID. // It contains the PKID of the validator at the given index in the leader schedule // generated at the given SnapshotAtEpochNumber. @@ -314,9 +309,6 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { // SnapshotValidatorBLSPublicKeyPKIDPairEntries bav.SnapshotValidatorBLSPublicKeyPKIDPairEntries = make(map[SnapshotValidatorBLSPublicKeyMapKey]*BLSPublicKeyPKIDPairEntry) - // SnapshotValidatorSetTotalStakeAmountNanos - bav.SnapshotValidatorSetTotalStakeAmountNanos = make(map[uint64]*uint256.Int) - // SnapshotLeaderSchedule bav.SnapshotLeaderSchedule = make(map[SnapshotLeaderScheduleMapKey]*PKID) bav.HasFullSnapshotLeaderScheduleByEpoch = make(map[uint64]bool) @@ -671,14 +663,6 @@ func (bav *UtxoView) CopyUtxoView() *UtxoView { newView.SnapshotValidatorBLSPublicKeyPKIDPairEntries[mapKey] = blsPublicKeyPKIDPairEntry.Copy() } - // Copy the SnapshotValidatorSetTotalStakeAmountNanos - newView.SnapshotValidatorSetTotalStakeAmountNanos = make(map[uint64]*uint256.Int, - len(bav.SnapshotValidatorSetTotalStakeAmountNanos), - ) - for epochNumber, totalStakeAmountNanos := range bav.SnapshotValidatorSetTotalStakeAmountNanos { - newView.SnapshotValidatorSetTotalStakeAmountNanos[epochNumber] = totalStakeAmountNanos.Clone() - } - // Copy the SnapshotLeaderSchedule newView.SnapshotLeaderSchedule = make(map[SnapshotLeaderScheduleMapKey]*PKID, len(bav.SnapshotLeaderSchedule)) for mapKey, validatorPKID := range bav.SnapshotLeaderSchedule { diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 4bd55d255..52a210636 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -192,9 +192,6 @@ func (bav *UtxoView) FlushToDBWithoutAncestralRecordsFlushWithTxn(txn *badger.Tx if err := bav._flushSnapshotValidatorSetToDbWithTxn(txn, blockHeight); err != nil { return err } - if err := bav._flushSnapshotValidatorSetTotalStakeAmountNanosToDbWithTxn(txn, blockHeight); err != nil { - return err - } if err := bav._flushSnapshotLeaderScheduleToDbWithTxn(txn, blockHeight); err != nil { return err } diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index b245012ff..eb8b40090 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -2478,14 +2478,6 @@ func (bav *UtxoView) CreateUnjailValidatorTxindexMetadata( return &UnjailValidatorTxindexMetadata{}, affectedPublicKeys } -func SumValidatorEntriesTotalStakeAmountNanos(validatorEntries []*ValidatorEntry) *uint256.Int { - totalStakeAmountNanos := uint256.NewInt(0) - for _, validatorEntry := range validatorEntries { - totalStakeAmountNanos.Add(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) - } - return totalStakeAmountNanos -} - // ParseValidatorDomain expects a domain string in the format "host:port" and returns the host and port. // If the domain is not in the expected format, it returns an error. func ParseValidatorDomain(domain string) (_host string, _port uint64, _err error) { diff --git a/lib/db_utils.go b/lib/db_utils.go index 33eb73dad..4bad2b0dd 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -558,10 +558,9 @@ type DBPrefixes struct { // Note: we parse the ValidatorPKID from the key and the value is nil to save space. PrefixSnapshotValidatorSetByStakeAmount []byte `prefix_id:"[89]" is_state:"true"` - // PrefixSnapshotValidatorSetTotalStakeAmountNanos: Retrieve a snapshot of the validator set's total amount of - // staked DESO by SnapshotAtEpochNumber. - // Prefix, -> *uint256.Int - PrefixSnapshotValidatorSetTotalStakeAmountNanos []byte `prefix_id:"[90]" is_state:"true"` + // Prefix 90 is deprecated. It was previously used for the PrefixSnapshotValidatorSetTotalStakeAmountNanos + // prefix. The data stored in this prefix was never used in consensus and just lead to unnecessary data + // in the DB, slowing down hypersync and encoder migrations. // PrefixSnapshotLeaderSchedule: Retrieve a ValidatorPKID by . // Prefix, , -> ValidatorPKID @@ -880,9 +879,9 @@ func StatePrefixToDeSoEncoder(prefix []byte) (_isEncoder bool, _encoder DeSoEnco } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetByStakeAmount) { // prefix_id:"[89]" return false, nil - } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotValidatorSetTotalStakeAmountNanos) { - // prefix_id:"[90]" - return false, nil + // prefix_id:"[90]" is deprecated. It was previously used for the PrefixSnapshotValidatorSetTotalStakeAmountNanos + // prefix. The data stored in this prefix was never used in consensus and just lead to unnecessary data + // in the DB, slowing down hypersync and encoder migrations. } else if bytes.Equal(prefix, Prefixes.PrefixSnapshotLeaderSchedule) { // prefix_id:"[91]" return true, &PKID{} diff --git a/lib/pos_epoch_complete_hook.go b/lib/pos_epoch_complete_hook.go index 370d9f218..ef9b17750 100644 --- a/lib/pos_epoch_complete_hook.go +++ b/lib/pos_epoch_complete_hook.go @@ -319,11 +319,6 @@ func (bav *UtxoView) generateAndSnapshotValidatorSet(epochNumber uint64, validat for _, validatorEntry := range validatorSet { bav._setSnapshotValidatorSetEntry(validatorEntry, epochNumber) } - - // Snapshot the current validator set's total stake. Note, the validator set is already filtered to the top n - // active validators for the epoch. The total stake is the sum of all of the active validators' stakes. - validatorSetTotalStakeAmountNanos := SumValidatorEntriesTotalStakeAmountNanos(validatorSet) - bav._setSnapshotValidatorSetTotalStakeAmountNanos(validatorSetTotalStakeAmountNanos, epochNumber) } func (bav *UtxoView) generateAndSnapshotLeaderSchedule(epochNumber uint64, validatorSet []*ValidatorEntry) error { diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index 8312638a7..bd19427bd 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -289,11 +289,6 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(700)) require.Equal(t, validatorEntries[6].TotalStakeAmountNanos, uint256.NewInt(100)) - // Test SnapshotValidatorSetTotalStakeAmountNanos is populated. - snapshotValidatorSetTotalStakeAmountNanos, err := _newUtxoView(testMeta).GetSnapshotValidatorSetTotalStakeAmountNanos() - require.NoError(t, err) - require.Equal(t, snapshotValidatorSetTotalStakeAmountNanos, uint256.NewInt(2800)) - // Test SnapshotLeaderSchedule is populated. for index := range validatorPKIDs { snapshotLeaderScheduleValidator, err := _newUtxoView(testMeta).GetSnapshotLeaderScheduleValidator(uint16(index)) @@ -948,11 +943,6 @@ func _assertEmptyValidatorSnapshots(testMeta *TestMeta) { require.NoError(testMeta.t, err) require.Empty(testMeta.t, validatorEntries) - // Test SnapshotValidatorSetTotalStakeAmountNanos is zero. - snapshotValidatorSetTotalStakeAmountNanos, err := _newUtxoView(testMeta).GetSnapshotValidatorSetTotalStakeAmountNanos() - require.NoError(testMeta.t, err) - require.True(testMeta.t, snapshotValidatorSetTotalStakeAmountNanos.IsZero()) - // Test SnapshotLeaderSchedule is nil. for index := range validatorEntries { snapshotLeaderScheduleValidator, err := _newUtxoView(testMeta).GetSnapshotLeaderScheduleValidator(uint16(index)) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index f49093a08..d2d941c2a 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -10,7 +10,6 @@ import ( "github.com/deso-protocol/core/collections" "github.com/dgraph-io/badger/v4" "github.com/golang/glog" - "github.com/holiman/uint256" "github.com/pkg/errors" ) @@ -897,120 +896,6 @@ func (bav *UtxoView) _flushSnapshotValidatorBLSPublicKeyPKIDPairEntryToDbWithTxn return nil } -// -// SnapshotValidatorSetTotalStakeAmountNanos -// - -func (bav *UtxoView) GetSnapshotValidatorSetTotalStakeAmountNanos() (*uint256.Int, error) { - // Calculate the SnapshotEpochNumber. - snapshotAtEpochNumber, err := bav.GetCurrentSnapshotEpochNumber() - if err != nil { - return nil, errors.Wrapf(err, "GetSnapshotValidatorSetTotalStakeAmountNanos: problem calculating SnapshotEpochNumber: ") - } - // Check the UtxoView first. - if globalActiveStakeAmountNanos, exists := bav.SnapshotValidatorSetTotalStakeAmountNanos[snapshotAtEpochNumber]; exists { - return globalActiveStakeAmountNanos.Clone(), nil - } - // If we don't have it in the UtxoView, check the db. - globalActiveStakeAmountNanos, err := DBGetSnapshotValidatorSetTotalStakeAmountNanos(bav.Handle, bav.Snapshot, snapshotAtEpochNumber) - if err != nil { - return nil, errors.Wrapf( - err, - "GetSnapshotValidatorSetTotalStakeAmountNanos: problem retrieving SnapshotValidatorSetTotalStakeAmountNanos from db: ", - ) - } - if globalActiveStakeAmountNanos == nil { - globalActiveStakeAmountNanos = uint256.NewInt(0) - } - // Cache the result in the UtxoView. - bav._setSnapshotValidatorSetTotalStakeAmountNanos(globalActiveStakeAmountNanos, snapshotAtEpochNumber) - return globalActiveStakeAmountNanos, nil -} - -func (bav *UtxoView) _setSnapshotValidatorSetTotalStakeAmountNanos(globalActiveStakeAmountNanos *uint256.Int, snapshotAtEpochNumber uint64) { - if globalActiveStakeAmountNanos == nil { - glog.Errorf("_setSnapshotValidatorSetTotalStakeAmountNanos: called with nil entry, this should never happen") - return - } - bav.SnapshotValidatorSetTotalStakeAmountNanos[snapshotAtEpochNumber] = globalActiveStakeAmountNanos.Clone() -} - -func (bav *UtxoView) _flushSnapshotValidatorSetTotalStakeAmountNanosToDbWithTxn(txn *badger.Txn, blockHeight uint64) error { - for snapshotAtEpochNumber, globalActiveStakeAmountNanos := range bav.SnapshotValidatorSetTotalStakeAmountNanos { - if globalActiveStakeAmountNanos == nil { - return fmt.Errorf( - "_flushSnapshotValidatorSetTotalStakeAmountNanosToDbWithTxn: found nil entry for EpochNumber %d, this should never happen", - snapshotAtEpochNumber, - ) - } - if err := DBPutSnapshotValidatorSetTotalStakeAmountNanosWithTxn( - txn, bav.Snapshot, globalActiveStakeAmountNanos, snapshotAtEpochNumber, blockHeight, bav.EventManager, - ); err != nil { - return errors.Wrapf( - err, - "_flushSnapshotValidatorSetTotalStakeAmountNanosToDbWithTxn: problem setting SnapshotValidatorSetTotalStake for EpochNumber %d: ", - snapshotAtEpochNumber, - ) - } - } - return nil -} - -func DBKeyForSnapshotValidatorSetTotalStakeAmountNanos(snapshotAtEpochNumber uint64) []byte { - key := append([]byte{}, Prefixes.PrefixSnapshotValidatorSetTotalStakeAmountNanos...) - key = append(key, EncodeUint64(snapshotAtEpochNumber)...) - return key -} - -func DBGetSnapshotValidatorSetTotalStakeAmountNanos(handle *badger.DB, snap *Snapshot, snapshotAtEpochNumber uint64) (*uint256.Int, error) { - var ret *uint256.Int - err := handle.View(func(txn *badger.Txn) error { - var innerErr error - ret, innerErr = DBGetSnapshotValidatorSetTotalStakeAmountNanosWithTxn(txn, snap, snapshotAtEpochNumber) - return innerErr - }) - return ret, err -} - -func DBGetSnapshotValidatorSetTotalStakeAmountNanosWithTxn(txn *badger.Txn, snap *Snapshot, snapshotAtEpochNumber uint64) (*uint256.Int, error) { - // Retrieve from db. - key := DBKeyForSnapshotValidatorSetTotalStakeAmountNanos(snapshotAtEpochNumber) - globalActiveStakeAmountNanosBytes, err := DBGetWithTxn(txn, snap, key) - if err != nil { - // We don't want to error if the key isn't found. Instead, return nil. - if err == badger.ErrKeyNotFound { - return nil, nil - } - return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetTotalStakeAmountNanosWithTxn: problem retrieving value") - } - - // Decode from bytes. - var globalActiveStakeAmountNanos *uint256.Int - rr := bytes.NewReader(globalActiveStakeAmountNanosBytes) - globalActiveStakeAmountNanos, err = VariableDecodeUint256(rr) - if err != nil { - return nil, errors.Wrapf(err, "DBGetSnapshotValidatorSetTotalStakeAmountNanosWithTxn: problem decoding value") - } - return globalActiveStakeAmountNanos, nil -} - -func DBPutSnapshotValidatorSetTotalStakeAmountNanosWithTxn( - txn *badger.Txn, - snap *Snapshot, - globalActiveStakeAmountNanos *uint256.Int, - snapshotAtEpochNumber uint64, - blockHeight uint64, - eventManager *EventManager, -) error { - if globalActiveStakeAmountNanos == nil { - // This should never happen but is a sanity check. - glog.Errorf("DBPutSnapshotValidatorSetTotalStakeAmountNanosWithTxn: called with nil GlobalActiveStake, this should never happen") - return nil - } - key := DBKeyForSnapshotValidatorSetTotalStakeAmountNanos(snapshotAtEpochNumber) - return DBSetWithTxn(txn, snap, key, VariableEncodeUint256(globalActiveStakeAmountNanos), eventManager) -} - // // SnapshotStakeToReward // From c73633c8a94b69a2b0a45f08908a39ddfd1e0a6d Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 15 May 2024 16:39:01 -0400 Subject: [PATCH 722/762] State syncer pos enhancements (#1322) --- lib/legacy_mempool.go | 1 + lib/state_change_syncer.go | 15 ++++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 61bfdbd7b..0ee18f4e5 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -873,6 +873,7 @@ func (mp *DeSoMempool) addTransaction( Height: height, Fee: fee, FeePerKB: fee * 1000 / serializedLen, + validated: true, // index will be set by the heap code. } diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 2844b74f5..55eb0d25f 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -816,10 +816,6 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser glog.V(2).Infof("Time since db flush: %v", time.Since(startTime)) mempoolTxUtxoView := NewUtxoView(server.blockchain.db, server.blockchain.params, server.blockchain.postgres, nil, &mempoolEventManager) glog.V(2).Infof("Time since utxo view: %v", time.Since(startTime)) - startTime = time.Now() - - // Loop through all the transactions in the mempool and connect them and their utxo ops to the mempool view. - mempoolTxns := server.GetMempool().GetOrderedTransactions() // Get the uncommitted blocks from the chain. uncommittedBlocks, err := server.blockchain.GetUncommittedBlocks(mempoolUtxoView.TipHash) @@ -836,9 +832,6 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser }) return false, errors.Wrapf(err, "StateChangeSyncer.SyncMempoolToStateSyncer: ") } - glog.V(2).Infof("Time since getting transactions: %v", time.Since(startTime)) - startTime = time.Now() - glog.V(2).Infof("Mempool synced len after flush: %d", len(stateChangeSyncer.MempoolSyncedKeyValueMap)) // TODO: Have Z look at if we need to do some caching in the uncommitted blocks logic. // First connect the uncommitted blocks to the mempool view. @@ -872,6 +865,11 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser mempoolTxUtxoView.EventManager = &mempoolEventManager } + // Loop through all the transactions in the mempool and connect them and their utxo ops to the mempool view. + mempoolTxns := server.GetMempool().GetOrderedTransactions() + startTime = time.Now() + glog.V(2).Infof("Mempool synced len after flush: %d", len(stateChangeSyncer.MempoolSyncedKeyValueMap)) + //Check to see if every txn hash in our cached txns is in the first n txns in the mempool. //N represents the length of our cached txn map. for ii, mempoolTx := range mempoolTxns { @@ -905,6 +903,9 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser txnStateChangeEntry = stateChangeEntries[0] utxoOpStateChangeEntry = stateChangeEntries[1] } else { + if !mempoolTx.validated { + continue + } utxoOpsForTxn, _, _, _, err := mempoolTxUtxoView.ConnectTransaction( mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), currentTimestamp, false, false /*ignoreUtxos*/) if err != nil { From 63615f910e02f30fffd393eeef1b257298d86117 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 May 2024 18:34:21 -0400 Subject: [PATCH 723/762] More Code Cleanup and Comment Fixes (#1321) --- consensus/event_loop.go | 2 +- consensus/types.go | 3 ++ consensus/utils.go | 8 ++--- lib/block_view.go | 74 ++++++++++++----------------------------- lib/blockchain.go | 4 +-- lib/pos_blockchain.go | 10 +++--- 6 files changed, 36 insertions(+), 65 deletions(-) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index cacc8c80c..686f579fc 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -699,7 +699,7 @@ func (fe *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff for ii, validator := range validatorList { totalStake = uint256.NewInt(0).Add(totalStake, validator.GetStakeAmount()) - // Skip the validator if it hasn't voted for the the block + // Skip the validator if it hasn't voted for the block vote, hasVoted := votesByValidator[validator.GetPublicKey().ToString()] if !hasVoted { continue diff --git a/consensus/types.go b/consensus/types.go index 2c9130f26..587b45601 100644 --- a/consensus/types.go +++ b/consensus/types.go @@ -138,6 +138,9 @@ type Block interface { GetBlockHash() BlockHash GetHeight() uint64 GetView() uint64 + // The QC field is intentionally excluded from the Block interface to minimize the number of assumptions + // and validation the event loop has to make on incoming blocks. This is especially important for the + // PoW -> PoS cutover blocks that do not have QCs. } type BlockWithValidatorList struct { diff --git a/consensus/utils.go b/consensus/utils.go index 966adfcd3..41d14f50f 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -154,8 +154,8 @@ func isSuperMajorityStakeSignersList(signersList *bitset.Bitset, validators []Va return true, validatorPublicKeysInQC } -// When voting on a block, validators sign the payload sha3-256(View, BlockHash) with their BLS -// private key. This hash guarantees that the view and block hash fields in a VoteMessage +// When voting on a block, validators sign the payload sha3-256(ViewOpCode, View, BlockHash) with +// their BLS private key. This hash guarantees that the view and block hash fields in a VoteMessage // have not been tampered with, while maintaining all existing guarantees that the validator // has voted for a given block. // @@ -173,8 +173,8 @@ func GetVoteSignaturePayload(view uint64, blockHash BlockHash) [32]byte { return sha3.Sum256(payload) } -// When timing out for a view, validators sign the payload sha3-256(View, HighQCView) with their BLS -// private key. This hash guarantees that the view and high QC view fields in a TimeoutMessage +// When timing out for a view, validators sign the payload sha3-256(TimeoutOpCode, View, HighQCView) with +// their BLS private key. This hash guarantees that the view and high QC view fields in a TimeoutMessage // have not been tampered with. func GetTimeoutSignaturePayload(view uint64, highQCView uint64) [32]byte { viewBytes := make([]byte, 8) diff --git a/lib/block_view.go b/lib/block_view.go index 9faf178ec..6e9f3f2ca 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -169,7 +169,7 @@ type UtxoView struct { // It contains the PKID of the validator at the given index in the leader schedule // generated at the given SnapshotAtEpochNumber. SnapshotLeaderSchedule map[SnapshotLeaderScheduleMapKey]*PKID - // HasFullSnapshotLeaderScheduleByEpoch is a map of SnapshotAtEpochNumber to a boolean. If the leader schedule + // HasFullSnapshotLeaderScheduleByEpoch is a map of SnapshotAtEpochNumber to a boolean. If the leader entire schedule // for a given epoch has been loaded from the DB, the value is true. HasFullSnapshotLeaderScheduleByEpoch map[uint64]bool @@ -3479,15 +3479,15 @@ func (bav *UtxoView) _connectUpdateGlobalParams( val, bytesRead := Uvarint( extraData[MempoolCongestionFactorBasisPointsKey], ) - if val > MaxBasisPoints { + if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: MempoolCongestionFactorBasisPoints must be <= %d", - MaxBasisPoints, + "_connectUpdateGlobalParams: unable to decode MempoolCongestionFactorBasisPoints as uint64", ) } - if bytesRead <= 0 { + if val > MaxBasisPoints { return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: unable to decode MempoolCongestionFactorBasisPoints as uint64", + "_connectUpdateGlobalParams: MempoolCongestionFactorBasisPoints must be <= %d", + MaxBasisPoints, ) } newGlobalParamsEntry.MempoolCongestionFactorBasisPoints = val @@ -3496,15 +3496,15 @@ func (bav *UtxoView) _connectUpdateGlobalParams( val, bytesRead := Uvarint( extraData[MempoolPastBlocksCongestionFactorBasisPointsKey], ) - if val > MaxBasisPoints { + if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: MempoolPastBlocksCongestionFactorBasisPoints must be <= %d", - MaxBasisPoints, + "_connectUpdateGlobalParams: unable to decode MempoolPastBlocksCongestionFactorBasisPoints as uint64", ) } - if bytesRead <= 0 { + if val > MaxBasisPoints { return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: unable to decode MempoolPastBlocksCongestionFactorBasisPoints as uint64", + "_connectUpdateGlobalParams: MempoolPastBlocksCongestionFactorBasisPoints must be <= %d", + MaxBasisPoints, ) } newGlobalParamsEntry.MempoolPastBlocksCongestionFactorBasisPoints = val @@ -3513,15 +3513,15 @@ func (bav *UtxoView) _connectUpdateGlobalParams( val, bytesRead := Uvarint( extraData[MempoolPriorityPercentileBasisPointsKey], ) - if val > MaxBasisPoints { + if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: MempoolPriorityPercentileBasisPoints must be <= %d", - MaxBasisPoints, + "_connectUpdateGlobalParams: unable to decode MempoolPriorityPercentileBasisPoints as uint64", ) } - if bytesRead <= 0 { + if val > MaxBasisPoints { return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: unable to decode MempoolPriorityPercentileBasisPoints as uint64", + "_connectUpdateGlobalParams: MempoolPriorityPercentileBasisPoints must be <= %d", + MaxBasisPoints, ) } newGlobalParamsEntry.MempoolPriorityPercentileBasisPoints = val @@ -3530,15 +3530,15 @@ func (bav *UtxoView) _connectUpdateGlobalParams( val, bytesRead := Uvarint( extraData[MempoolPastBlocksPriorityPercentileBasisPointsKey], ) - if val > MaxBasisPoints { + if bytesRead <= 0 { return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: MempoolPastBlocksPriorityPercentileBasisPoints must be <= %d", - MaxBasisPoints, + "_connectUpdateGlobalParams: unable to decode MempoolPastBlocksPriorityPercentileBasisPoints as uint64", ) } - if bytesRead <= 0 { + if val > MaxBasisPoints { return 0, 0, nil, fmt.Errorf( - "_connectUpdateGlobalParams: unable to decode MempoolPastBlocksPriorityPercentileBasisPoints as uint64", + "_connectUpdateGlobalParams: MempoolPastBlocksPriorityPercentileBasisPoints must be <= %d", + MaxBasisPoints, ) } newGlobalParamsEntry.MempoolPastBlocksPriorityPercentileBasisPoints = val @@ -4246,38 +4246,6 @@ func (bav *UtxoView) _connectSingleTxn( return utxoOpsForTxn, totalInput, totalOutput, fees, nil } -// ConnectTransactionIntoNewUtxoView is a fail safe way to connect a transaction to a view. It connects the transaction -// to a copy of the view and returns the new view, the UtxoOperations, total inputs, total outputs, fees, and an error if -// one occurred. The function does not modify the original view. The function is useful when you want to connect a -// transaction to a view without modifying the original view through unintended side effects. -func (bav *UtxoView) ConnectTransactionIntoNewUtxoView( - txn *MsgDeSoTxn, - txHash *BlockHash, - blockHeight uint32, - blockTimestampNanoSecs int64, - verifySignatures bool, - ignoreUtxos bool, -) ( - _finalUtxoView *UtxoView, // The new view with the transaction connected. - _utxoOps []*UtxoOperation, - _totalInput uint64, - _totalOutput uint64, - _fees uint64, - _err error, -) { - // Copy the view so we can try connecting the transaction without modifying the original view. - copiedView := bav.CopyUtxoView() - - // Connect the transaction to the copied view. - utxoOpsForTxn, totalInput, totalOutput, fees, err := copiedView.ConnectTransaction( - txn, txHash, blockHeight, blockTimestampNanoSecs, verifySignatures, ignoreUtxos) - if err != nil { - return nil, nil, 0, 0, 0, errors.Wrapf(err, "TryConnectTransaction: Problem connecting txn on copy view") - } - - return copiedView, utxoOpsForTxn, totalInput, totalOutput, fees, nil -} - func (bav *UtxoView) ValidateTransactionNonce(txn *MsgDeSoTxn, blockHeight uint64) error { if txn == nil || txn.TxnNonce == nil { return fmt.Errorf("ValidateTransactionNonce: Nonce or txn is nil for public key %v", diff --git a/lib/blockchain.go b/lib/blockchain.go index d01a306c3..45eae0877 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -672,7 +672,7 @@ func (bc *Blockchain) CopyBlockIndexes() (_blockIndexByHash map[BlockHash]*Block return newBlockIndexByHash, newBlockIndexByHeight } -func (bc *Blockchain) ConstructBlockIndexByHeight() map[uint64]map[BlockHash]*BlockNode { +func (bc *Blockchain) constructBlockIndexByHeight() map[uint64]map[BlockHash]*BlockNode { newBlockIndex := make(map[uint64]map[BlockHash]*BlockNode) for _, blockNode := range bc.blockIndexByHash { blockHeight := uint64(blockNode.Height) @@ -792,7 +792,7 @@ func (bc *Blockchain) _initChain() error { if err != nil { return errors.Wrapf(err, "_initChain: Problem reading block index from db") } - bc.blockIndexByHeight = bc.ConstructBlockIndexByHeight() + bc.blockIndexByHeight = bc.constructBlockIndexByHeight() // At this point the blockIndexByHash should contain a full node tree with all // nodes pointing to valid parent nodes. diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 1705e57b3..a98741758 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -1229,8 +1229,8 @@ func (bav *UtxoView) hasValidBlockProposerPoS(block *MsgDeSoBlock) (_isValidBloc // of the validator set has voted (or timed out). It special cases the first block after the PoS cutover // by overriding the validator set used to validate the high QC in the first block after the PoS cutover. func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validatorSet []*ValidatorEntry) error { - voteQCValidators := toConsensusValidators(validatorSet) - aggregateQCValidators := voteQCValidators + highQCValidators := toConsensusValidators(validatorSet) + aggregateQCValidators := highQCValidators voteQC := block.Header.ValidatorsVoteQC timeoutAggregateQC := block.Header.ValidatorsTimeoutAggregateQC @@ -1250,20 +1250,20 @@ func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validator if err != nil { return errors.Wrapf(err, "isValidPoSQuorumCertificate: Problem building PoS cutover validator") } - voteQCValidators = []consensus.Validator{posCutoverValidator} + highQCValidators = []consensus.Validator{posCutoverValidator} } } // Validate the timeout aggregate QC. if !timeoutAggregateQC.isEmpty() { - if !consensus.IsValidSuperMajorityAggregateQuorumCertificate(timeoutAggregateQC, aggregateQCValidators, voteQCValidators) { + if !consensus.IsValidSuperMajorityAggregateQuorumCertificate(timeoutAggregateQC, aggregateQCValidators, highQCValidators) { return RuleErrorInvalidTimeoutQC } return nil } // Validate the vote QC. - if !consensus.IsValidSuperMajorityQuorumCertificate(voteQC, voteQCValidators) { + if !consensus.IsValidSuperMajorityQuorumCertificate(voteQC, highQCValidators) { return RuleErrorInvalidVoteQC } From 15b6757fb1ac4400632657aaebce71e5981aeb7d Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 May 2024 19:18:18 -0400 Subject: [PATCH 724/762] Request Mempool INV Message From Peer When In Steady State (#1324) --- lib/server.go | 49 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/lib/server.go b/lib/server.go index a1928513e..a08c18ba2 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1085,7 +1085,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // its initial state is fully current we'll always bootstrap our mempools with a // mempool request. The alternative is that our state is not fully current // when we boot up, and we cover this second case in the _handleBlock function. - srv._maybeRequestSync(pp) + srv._tryRequestMempoolFromPeer(pp) // At this point we should have processed all the headers. Now we will // make a decision on whether to request more headers from this peer based @@ -1828,6 +1828,7 @@ func (srv *Server) HandleAcceptedPeer(rn *RemoteNode) { if rn == nil || rn.GetPeer() == nil { return } + pp := rn.GetPeer() pp.SetServiceFlag(rn.GetServiceFlag()) pp.SetLatestBlockHeight(rn.GetLatestBlockHeight()) @@ -1839,13 +1840,14 @@ func (srv *Server) HandleAcceptedPeer(rn *RemoteNode) { "syncPeerIsNil=(%v), IsSyncing=(%v), ChainState=(%v)", pp, isSyncCandidate, (srv.SyncPeer == nil), isSyncing, chainState) - // Request a sync if we're ready - srv._maybeRequestSync(pp) + // Request a mempool sync if we're ready + srv._tryRequestMempoolFromPeer(pp) // Start syncing by choosing the best candidate. if isSyncCandidate && srv.SyncPeer == nil { srv._startSync() } + if !isSyncCandidate { glog.Infof("Peer is not sync candidate: %v (isOutbound: %v)", pp, pp.isOutbound) } @@ -2147,20 +2149,35 @@ func (srv *Server) _updatePosMempoolAfterTipChange() error { return nil } -func (srv *Server) _maybeRequestSync(pp *Peer) { - // Send the mempool message if DeSo and Bitcoin are fully current - if srv.blockchain.chainState() == SyncStateFullyCurrent { - // If peer is not nil and we haven't set a max sync blockheight, we will - if pp != nil && srv.blockchain.MaxSyncBlockHeight == 0 { - glog.V(1).Infof("Server._maybeRequestSync: Sending mempool message: %v", pp) - pp.AddDeSoMessage(&MsgDeSoMempool{}, false) - } else { - glog.V(1).Infof("Server._maybeRequestSync: NOT sending mempool message because peer is nil: %v", pp) - } +// _tryRequestMempoolFromPeer checks if the blockchain is current or in the steady state. If so, +// it sends a MsgDeSoMempool to request the peer's mempool. After this point, the peer will send +// us inv messages for transactions that we don't have in our mempool. +func (srv *Server) _tryRequestMempoolFromPeer(pp *Peer) { + // If the peer is nil, then there's nothing to do. + if pp == nil { + glog.V(1).Infof("Server._tryRequestMempoolFromPeer: NOT sending mempool message because peer is nil: %v", pp) + return + } + + // If the node was only configured to sync to a certain block height, then there's nothing to do. + if srv.blockchain.MaxSyncBlockHeight != 0 { + return + } + + // We are OK to request the peer's mempool as long as the chain is current or we are running the + // FastHotStuffConsensus in the steady state. + isChainCurrent := srv.blockchain.chainState() == SyncStateFullyCurrent + isRunningFastHotStuffConsensus := srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() + + if isChainCurrent || isRunningFastHotStuffConsensus { + glog.V(1).Infof("Server._tryRequestMempoolFromPeer: Sending mempool message: %v", pp) + pp.AddDeSoMessage(&MsgDeSoMempool{}, false) } else { - glog.V(1).Infof("Server._maybeRequestSync: NOT sending mempool message because not current: %v, %v", + glog.V(1).Infof( + "Server._tryRequestMempoolFromPeer: NOT sending mempool message. The node is still syncing: %v, %v", srv.blockchain.chainState(), - pp) + pp, + ) } } @@ -2401,7 +2418,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { // If we get here, it means we're in SyncStateFullyCurrent, which is great. // In this case we shoot a MEMPOOL message over to the peer to bootstrap the mempool. - srv._maybeRequestSync(pp) + srv._tryRequestMempoolFromPeer(pp) // Exit early if the chain isn't SyncStateFullyCurrent. if srv.blockchain.chainState() != SyncStateFullyCurrent { From d9673127df4a89442ca0066a56f7b70f92cf1e29 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 May 2024 19:20:32 -0400 Subject: [PATCH 725/762] Request Mempool Only Once From Peer (#1325) --- lib/peer.go | 10 +++++++++- lib/server.go | 14 +++++++++++--- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/lib/peer.go b/lib/peer.go index 4583af34e..c2c24ba9c 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -108,7 +108,10 @@ type Peer struct { // Whether the peer is ready to receive INV messages. For a peer that // still needs a mempool download, this is false. - canReceiveInvMessagess bool + canReceiveInvMessages bool + // Whether we have sent a MEMPOOL message to the peer to request INV messages. + // This makes sure that we only ever send one MEMPOOL message to the peer. + hasReceivedMempoolMessage bool // We process GetTransaction requests in a separate loop. This allows us // to ensure that the responses are ordered. @@ -954,6 +957,11 @@ out: } } + // if we're sending a MEMPOOL message, then we + if msg.GetMsgType() == MsgTypeMempool { + pp.hasReceivedMempoolMessage = true + } + // If we're sending a block, remove it from our blocksToSend map to allow // the peer to request more blocks after receiving this one. if msg.GetMsgType() == MsgTypeBlock { diff --git a/lib/server.go b/lib/server.go index a08c18ba2..8221a6943 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1966,9 +1966,9 @@ func (srv *Server) _relayTransactions() { txnList := mempool.GetTransactions() for _, pp := range allPeers { - if !pp.canReceiveInvMessagess { + if !pp.canReceiveInvMessages { glog.V(1).Infof("Skipping invs for peer %v because not ready "+ - "yet: %v", pp, pp.canReceiveInvMessagess) + "yet: %v", pp, pp.canReceiveInvMessages) continue } // For each peer construct an inventory message that excludes transactions @@ -2159,6 +2159,14 @@ func (srv *Server) _tryRequestMempoolFromPeer(pp *Peer) { return } + // If we have already requested the mempool from the peer, then there's nothing to do. + if pp.hasReceivedMempoolMessage { + glog.V(1).Infof( + "Server._tryRequestMempoolFromPeer: NOT sending mempool message because we have already sent one: %v", pp, + ) + return + } + // If the node was only configured to sync to a certain block height, then there's nothing to do. if srv.blockchain.MaxSyncBlockHeight != 0 { return @@ -2630,7 +2638,7 @@ func (srv *Server) _handleTransactionBundleV2(pp *Peer, msg *MsgDeSoTransactionB func (srv *Server) _handleMempool(pp *Peer, msg *MsgDeSoMempool) { glog.V(1).Infof("Server._handleMempool: Received Mempool message from Peer %v", pp) - pp.canReceiveInvMessagess = true + pp.canReceiveInvMessages = true } func (srv *Server) StartStatsdReporter() { From e70a7761ad5a9ba1975512c034308d3da649f0e5 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 May 2024 19:22:51 -0400 Subject: [PATCH 726/762] Fix Nil Pointer Reference For Snapshot Object During Blocksync (#1326) Fix Nil Pointer Reference For Snapshot Object During Blocksync Fix naming --- lib/server.go | 50 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/lib/server.go b/lib/server.go index 8221a6943..95545f393 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1104,26 +1104,16 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // If we get here it means that we've just finished syncing headers and we will proceed to // syncing state either through hyper sync or block sync. First let's check if the peer // supports hypersync and if our block tip is old enough so that it makes sense to sync state. + if NodeCanHypersyncState(srv.cmgr.SyncType) && srv.blockchain.isHyperSyncCondition() { // If hypersync conditions are satisfied, we will be syncing state. This assignment results // in srv.blockchain.chainState() to be equal to SyncStateSyncingSnapshot srv.blockchain.syncingState = true } - // Regardless of whether we are going to hypersync from the peer, we want to compute the - // expectedSnapshotHeight. - bestHeaderHeight := uint64(srv.blockchain.headerTip().Height) - // The peer's snapshot block height period before the first PoS fork height is expected to be the - // PoW default value. After the fork height, it's expected to be the value defined in the params. - snapshotBlockHeightPeriod := srv.params.GetSnapshotBlockHeightPeriod( - bestHeaderHeight, - srv.snapshot.GetSnapshotBlockHeightPeriod(), - ) - expectedSnapshotHeight := bestHeaderHeight - (bestHeaderHeight % snapshotBlockHeightPeriod) - posSetupForkHeight := uint64(srv.params.ForkHeights.ProofOfStake1StateSetupBlockHeight) - if expectedSnapshotHeight < posSetupForkHeight { - expectedSnapshotHeight = posSetupForkHeight - (posSetupForkHeight % srv.params.DefaultPoWSnapshotBlockHeightPeriod) - } + // Fetch the header tip height once before we do anything in case we need it to compute the expected + // snapshot height. + currentHeaderTipHeight := uint64(srv.blockchain.headerTip().Height) if srv.blockchain.chainState() == SyncStateSyncingSnapshot { glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* state starting at "+ @@ -1131,6 +1121,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // If node is a hyper sync node and we haven't finished syncing state yet, we will kick off state sync. if srv.cmgr.HyperSync { + expectedSnapshotHeight := srv.computeExpectedSnapshotHeight(currentHeaderTipHeight) srv.blockchain.snapshot.Migrations.CleanupMigrations(expectedSnapshotHeight) if len(srv.HyperSyncProgress.PrefixProgress) != 0 { @@ -1214,12 +1205,12 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // If we have exhausted the peer's headers but our blocks aren't current, // send a GetBlocks message to the peer for as many blocks as we can get. if srv.blockchain.chainState() == SyncStateSyncingBlocks { - // Regardless of whether we're hypersyncing, we need to ensure that the - // FirstSnapshotBlockHeight is set correctly. This ensures that we won't do unnecessary - // hypersync computations until we absolutely have to. - if srv.snapshot != nil && - srv.snapshot.CurrentEpochSnapshotMetadata != nil && - srv.snapshot.CurrentEpochSnapshotMetadata.FirstSnapshotBlockHeight == 0 { + // Regardless of whether we're hypersyncing, we need to ensure that the/ FirstSnapshotBlockHeight + // is set correctly. This ensures that we won't do unnecessary hypersync computations until we + // absolutely have to. We + hasSnapshotMetadata := srv.snapshot != nil && srv.snapshot.CurrentEpochSnapshotMetadata != nil + if hasSnapshotMetadata && srv.snapshot.CurrentEpochSnapshotMetadata.FirstSnapshotBlockHeight == 0 { + expectedSnapshotHeight := srv.computeExpectedSnapshotHeight(currentHeaderTipHeight) srv.snapshot.CurrentEpochSnapshotMetadata.FirstSnapshotBlockHeight = expectedSnapshotHeight } @@ -1323,6 +1314,25 @@ func (srv *Server) _handleGetSnapshot(pp *Peer, msg *MsgDeSoGetSnapshot) { pp.AddDeSoMessage(msg, true /*inbound*/) } +// computeExpectedSnapshotHeight computes the highest expected Hypersync snapshot height based on the +// a header tips height. The returned value is a block height < headerTipHeight that represents the +// highest block height that we expect the network to have produced a snapshot for. +func (srv *Server) computeExpectedSnapshotHeight(headerTipHeight uint64) uint64 { + // The peer's snapshot block height period before the first PoS fork height is expected to be the + // PoW default value. After the fork height, it's expected to be the value defined in the params. + snapshotBlockHeightPeriod := srv.params.GetSnapshotBlockHeightPeriod( + headerTipHeight, + srv.snapshot.GetSnapshotBlockHeightPeriod(), + ) + expectedSnapshotHeight := headerTipHeight - (headerTipHeight % snapshotBlockHeightPeriod) + posSetupForkHeight := uint64(srv.params.ForkHeights.ProofOfStake1StateSetupBlockHeight) + if expectedSnapshotHeight < posSetupForkHeight { + expectedSnapshotHeight = posSetupForkHeight - (posSetupForkHeight % srv.params.DefaultPoWSnapshotBlockHeightPeriod) + } + + return expectedSnapshotHeight +} + // _handleSnapshot gets called when we receive a SnapshotData message from a peer. The message contains // a snapshot chunk, which is a sorted list of pairs representing a section of the database // at current snapshot epoch. We will set these entries in our node's database as well as update the checksum. From 9156ed119550a319a10b91e56de9b85c0661a32c Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 May 2024 20:27:18 -0400 Subject: [PATCH 727/762] Update Ancestor Lineage Helper Function To Only Supported Stored Blocks (#1327) --- lib/blockchain.go | 2 +- lib/pos_blockchain.go | 38 ++++++++++++++++++++++++++------------ lib/pos_blockchain_test.go | 29 +++++++++++++++++++---------- 3 files changed, 46 insertions(+), 23 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 45eae0877..6af1b95d8 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -868,7 +868,7 @@ func (bc *Blockchain) _applyUncommittedBlocksToBestChain() error { ////////////////////////// Update the bestChain in-memory data structures ////////////////////////// // Fetch the lineage of blocks from the committed tip through the uncommitted tip. - lineageFromCommittedTip, err := bc.getLineageFromCommittedTip(uncommittedTipBlockNode.Header) + lineageFromCommittedTip, _, err := bc.getStoredLineageFromCommittedTip(uncommittedTipBlockNode.Header) if err != nil { return errors.Wrapf(err, "_applyUncommittedBlocksToBestChain: ") } diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index a98741758..1068c6651 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -250,7 +250,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // is an orphan, then we store it after performing basic validations. // If the block extends from any committed block other than the committed tip, // then we throw it away. - lineageFromCommittedTip, err := bc.getLineageFromCommittedTip(block.Header) + lineageFromCommittedTip, missingBlockHashes, err := bc.getStoredLineageFromCommittedTip(block.Header) if errors.Is(err, RuleErrorDoesNotExtendCommittedTip) || errors.Is(err, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock) || errors.Is(err, RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight) || @@ -265,7 +265,6 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // on our best chain. Try to process the orphan by running basic validations. // If it passes basic integrity checks, we'll store it with the hope that we // will eventually get a parent that connects to our best chain. - missingBlockHashes := []*BlockHash{block.Header.PrevBlockHash} return false, true, missingBlockHashes, bc.processOrphanBlockPoS(block) } @@ -1270,13 +1269,19 @@ func (bc *Blockchain) isValidPoSQuorumCertificate(block *MsgDeSoBlock, validator return nil } -// getLineageFromCommittedTip returns the ancestors of the block provided up to, but not +// getStoredLineageFromCommittedTip returns the ancestors of the block provided up to, but not // including the committed tip. The first block in the returned slice is the first uncommitted -// ancestor. -func (bc *Blockchain) getLineageFromCommittedTip(header *MsgDeSoHeader) ([]*BlockNode, error) { +// ancestor. if a valid lineage is returned, it means that we have all of the blocks in the +// lineage stored and that we are able to build the state of the chain up to the parent of the +// given header. +func (bc *Blockchain) getStoredLineageFromCommittedTip(header *MsgDeSoHeader) ( + _lineageFromCommittedTip []*BlockNode, + _missingBlockHashes []*BlockHash, + _err error, +) { highestCommittedBlock, idx := bc.GetCommittedTip() if idx == -1 || highestCommittedBlock == nil { - return nil, errors.New("getLineageFromCommittedTip: No committed blocks found") + return nil, nil, errors.New("getStoredLineageFromCommittedTip: No committed blocks found") } currentHash := header.PrevBlockHash.NewBlockHash() ancestors := []*BlockNode{} @@ -1285,29 +1290,38 @@ func (bc *Blockchain) getLineageFromCommittedTip(header *MsgDeSoHeader) ([]*Bloc for { currentBlock, exists := bc.blockIndexByHash[*currentHash] if !exists { - return nil, RuleErrorMissingAncestorBlock + return nil, []*BlockHash{currentHash}, RuleErrorMissingAncestorBlock } if currentBlock.Hash.IsEqual(highestCommittedBlock.Hash) { break } if currentBlock.IsCommitted() { - return nil, RuleErrorDoesNotExtendCommittedTip + return nil, nil, RuleErrorDoesNotExtendCommittedTip } if currentBlock.IsValidateFailed() { - return nil, RuleErrorAncestorBlockValidationFailed + return nil, nil, RuleErrorAncestorBlockValidationFailed } if uint64(currentBlock.Header.Height)+1 != prevHeight { - return nil, RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight + return nil, nil, RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight } if currentBlock.Header.GetView() >= prevView { - return nil, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock + return nil, nil, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock } + + // If the current block is not marked as ValidateFailed but is also not Stored, it + // means we have never seen the block before. We have it in the block index because + // we previously saw its header. We need to request the block again from a peer and + // consider it to be missing. + if !currentBlock.IsStored() { + return nil, []*BlockHash{currentHash}, RuleErrorMissingAncestorBlock + } + ancestors = append(ancestors, currentBlock) currentHash = currentBlock.Header.PrevBlockHash prevHeight = currentBlock.Header.Height prevView = currentBlock.Header.GetView() } - return collections.Reverse(ancestors), nil + return collections.Reverse(ancestors), nil, nil } // getOrCreateBlockNodeFromBlockIndex returns the block node from the block index if it exists. diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 9013d9573..881930100 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -822,15 +822,18 @@ func TestGetLineageFromCommittedTip(t *testing.T) { }, } // If parent is committed tip, we'll have 0 ancestors. - ancestors, err := bc.getLineageFromCommittedTip(block.Header) + ancestors, missingBlockHashes, err := bc.getStoredLineageFromCommittedTip(block.Header) require.NoError(t, err) require.Len(t, ancestors, 0) + require.Len(t, missingBlockHashes, 0) // If parent block is not in block index, we should get an error block.Header.PrevBlockHash = NewBlockHash(RandomBytes(32)) - ancestors, err = bc.getLineageFromCommittedTip(block.Header) + ancestors, missingBlockHashes, err = bc.getStoredLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorMissingAncestorBlock) + require.Len(t, missingBlockHashes, 1) + require.True(t, missingBlockHashes[0].IsEqual(block.Header.PrevBlockHash)) require.Nil(t, ancestors) // If this block extends from a committed block that is not the tip, we should get an error. @@ -845,38 +848,44 @@ func TestGetLineageFromCommittedTip(t *testing.T) { }, StatusBlockStored|StatusBlockValidated|StatusBlockCommitted) bc.bestChain = append(bc.bestChain, block2) bc.blockIndexByHash[*hash2] = block2 - ancestors, err = bc.getLineageFromCommittedTip(block.Header) + ancestors, missingBlockHashes, err = bc.getStoredLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorDoesNotExtendCommittedTip) + require.Len(t, missingBlockHashes, 0) // update block to be uncommitted block2.Status = StatusBlockStored | StatusBlockValidated // set new block's parent as block 2. block.Header.PrevBlockHash = hash2 - ancestors, err = bc.getLineageFromCommittedTip(block.Header) + ancestors, missingBlockHashes, err = bc.getStoredLineageFromCommittedTip(block.Header) require.NoError(t, err) require.Len(t, ancestors, 1) + require.Len(t, missingBlockHashes, 0) // Testing error cases // Set block 2 to be ValidateFailed block2.Status = StatusBlockStored | StatusBlockValidateFailed - ancestors, err = bc.getLineageFromCommittedTip(block.Header) + ancestors, missingBlockHashes, err = bc.getStoredLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorAncestorBlockValidationFailed) + require.Len(t, missingBlockHashes, 0) // Revert block 2 status. block2.Status = StatusBlockStored | StatusBlockValidated // Set block's height to be <= block2's height block.Header.Height = 2 - ancestors, err = bc.getLineageFromCommittedTip(block.Header) + ancestors, missingBlockHashes, err = bc.getStoredLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight) + require.Len(t, missingBlockHashes, 0) + // Revert block 2's height and set block's view to be <= block2's view block.Header.Height = 3 block.Header.ProposedInView = 2 - ancestors, err = bc.getLineageFromCommittedTip(block.Header) + ancestors, missingBlockHashes, err = bc.getStoredLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock) + require.Len(t, missingBlockHashes, 0) } // TestIsValidPoSQuorumCertificate tests that isValidPoSQuorumCertificate works as expected. @@ -1325,7 +1334,7 @@ func TestTryApplyNewTip(t *testing.T) { newBlockHash, err := newBlock.Hash() require.NoError(t, err) - ancestors, err := bc.getLineageFromCommittedTip(newBlock.Header) + ancestors, _, err := bc.getStoredLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) checkBestChainForHash := func(hash *BlockHash) bool { return collections.Any(bc.bestChain, func(bn *BlockNode) bool { @@ -1403,7 +1412,7 @@ func TestTryApplyNewTip(t *testing.T) { newBlockNode.Header.Height = 7 newBlockNode.Height = 7 require.NoError(t, err) - ancestors, err = bc.getLineageFromCommittedTip(newBlock.Header) + ancestors, _, err = bc.getStoredLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) // Try to apply newBlock as tip. @@ -1462,7 +1471,7 @@ func TestTryApplyNewTip(t *testing.T) { newBlockNode.Header.Height = 5 newBlockNode.Height = 5 require.NoError(t, err) - ancestors, err = bc.getLineageFromCommittedTip(newBlock.Header) + ancestors, _, err = bc.getStoredLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) appliedNewTip, connectedBlockHashes, disconnectedBlockHashes, err = bc.tryApplyNewTip(newBlockNode, 6, ancestors) require.True(t, appliedNewTip) From 78e0a5f329dec5385d8da2f29f3a8b02a9f3990e Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 May 2024 20:29:30 -0400 Subject: [PATCH 728/762] Add Helper Function To Heal Orphan BlockNode Parent Pointers (#1328) --- lib/pos_blockchain.go | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 1068c6651..589b34f96 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -72,6 +72,10 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures b return false, false, errors.Wrapf(err, "processHeaderPoS: Problem validating and indexing header: ") } + // Now that we know we have a valid header, we check the block index for it any orphan children for it + // and heal the parent pointers for all of them. + bc.healPointersForOrphanChildren(blockNode) + // Exit early if the header is an orphan. if isOrphan { return false, true, nil @@ -85,7 +89,7 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures b } // The header is not an orphan and has a higher view than the current tip. We reorg the header chain - // and apply the incoming header is the new tip. + // and apply the incoming header as the new tip. _, blocksToDetach, blocksToAttach := GetReorgBlocks(currentTip, blockNode) bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( bc.bestHeaderChain, @@ -98,6 +102,37 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures b return true, false, nil } +// healPointersForOrphanChildren fixes an inconsistency in the block index that may have +// occurred as a result of a node restart. In cases where we have an orphan node that we store in the +// DB, then on restart, that node's parent will not be in the block index. When processing the parent +// later on, we not only need to store the parent in the block index but also need to update the +// pointer from the orphan block's BlockNode to the parent. We do that dynamically here as we +// process headers. +func (bc *Blockchain) healPointersForOrphanChildren(blockNode *BlockNode) { + // Fetch all potential children of this blockNode from the block index. + blockNodesAtNextHeight, exists := bc.blockIndexByHeight[blockNode.Header.Height+1] + if !exists { + // No children of this blockNode exist in the block index. Exit early. + return + } + + // Iterate through all block nodes at the next block height and update their parent pointers. + for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { + // Check if it's a child of the parent block node. + if !blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) { + continue + } + + // Check if it has its parent pointer set. If it does, then we exit early. + if blockNodeAtNextHeight.Parent != nil { + continue + } + + // If the parent block node is not set, then we set it to the parent block node. + blockNodeAtNextHeight.Parent = blockNode + } +} + func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) ( _headerBlockNode *BlockNode, _isOrphan bool, _err error, ) { From d1b99cb76216df0d9969a5a15b3e5937423a61e9 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 May 2024 20:31:33 -0400 Subject: [PATCH 729/762] Request Missing Ancestors of Orphan Blocks From Peer (#1329) --- lib/pos_consensus.go | 19 +++--- lib/server.go | 141 ++++++++++++++++++++++++++++++------------- 2 files changed, 107 insertions(+), 53 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index b98109d4d..f6853c10e 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -570,7 +570,7 @@ func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoVa return nil } -func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) (_isOprhan bool, _err error) { +func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) (missingBlockHashes []*BlockHash, _err error) { glog.V(2).Infof("FastHotStuffConsensus.HandleBlock: Received block: \n%s", msg.String()) glog.V(2).Infof("FastHotStuffConsensus.HandleBlock: %s", fc.fastHotStuffEventLoop.ToString()) @@ -580,7 +580,7 @@ func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) (_isOp defer fc.lock.Unlock() if !fc.fastHotStuffEventLoop.IsRunning() { - return false, errors.Errorf("FastHotStuffConsensus.HandleBlock: FastHotStuffEventLoop is not running") + return nil, errors.Errorf("FastHotStuffConsensus.HandleBlock: FastHotStuffEventLoop is not running") } // Hold the blockchain's write lock so that the chain cannot be mutated underneath us. @@ -596,11 +596,12 @@ func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) (_isOp if err != nil { // If we get an error here, it means something went wrong with the block processing algorithm. // Nothing we can do to recover here. - return false, errors.Errorf("FastHotStuffConsensus.HandleBlock: Error processing block as new tip: %v", err) + return nil, errors.Errorf("FastHotStuffConsensus.HandleBlock: Error processing block as new tip: %v", err) } // If there are missing block hashes, then we need to fetch the missing blocks from the network - // and retry processing the block as a new tip. We'll request the blocks from the same peer. + // and retry processing the block as a new tip. We'll return the missing block hashes so that + // the server can request them from the same peer in a standardized manner. // // If we need to optimize this in the future, we can additionally send the block hash of our // current committed tip. The peer can then send us all of the blocks that are missing starting @@ -608,17 +609,11 @@ func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) (_isOp // // See https://github.com/deso-protocol/core/pull/875#discussion_r1460183510 for more details. if len(missingBlockHashes) > 0 { - remoteNode := fc.networkManager.GetRemoteNodeFromPeer(pp) - if remoteNode == nil { - glog.Errorf("FastHotStuffConsensus.HandleBlock: RemoteNode not found for peer: %v", pp) - } else { - sendMessageToRemoteNodeAsync(remoteNode, &MsgDeSoGetBlocks{HashList: missingBlockHashes}) - } - return true, nil + return missingBlockHashes, nil } // Happy path. The block was processed successfully and applied as the new tip. Nothing left to do. - return false, nil + return nil, nil } // tryProcessBlockAsNewTip tries to apply a new tip block to both the Blockchain and FastHotStuffEventLoop data diff --git a/lib/server.go b/lib/server.go index 95545f393..e0cdb4b8e 100644 --- a/lib/server.go +++ b/lib/server.go @@ -879,20 +879,11 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { // GetBlocks computes what blocks we need to fetch and asks for them from the // corresponding peer. It is typically called after we have exited // SyncStateSyncingHeaders. -func (srv *Server) GetBlocks(pp *Peer, maxHeight int) { - // Fetch as many blocks as we can from this peer. - // If our peer is on PoS then we can safely request a lot more blocks from them in - // each flight. - maxBlocksInFlight := MaxBlocksInFlight - if pp.Params.ProtocolVersion >= ProtocolVersion2 && - (srv.params.IsPoSBlockHeight(uint64(srv.blockchain.blockTip().Height)) || - srv.params.NetworkType == NetworkType_TESTNET) { - maxBlocksInFlight = MaxBlocksInFlightPoS - } - numBlocksToFetch := maxBlocksInFlight - len(pp.requestedBlocks) - +func (srv *Server) RequestBlocksUpToHeight(pp *Peer, maxHeight int) { + numBlocksToFetch := srv.getMaxBlocksInFlight(pp) - len(pp.requestedBlocks) blockNodesToFetch := srv.blockchain.GetBlockNodesToFetch( - numBlocksToFetch, maxHeight, pp.requestedBlocks) + numBlocksToFetch, maxHeight, pp.requestedBlocks, + ) if len(blockNodesToFetch) == 0 { // This can happen if, for example, we're already requesting the maximum // number of blocks we can. Just return in this case. @@ -903,18 +894,55 @@ func (srv *Server) GetBlocks(pp *Peer, maxHeight int) { hashList := []*BlockHash{} for _, node := range blockNodesToFetch { hashList = append(hashList, node.Hash) - pp.requestedBlocks[*node.Hash] = true } - pp.AddDeSoMessage(&MsgDeSoGetBlocks{ - HashList: hashList, - }, false) + + pp.AddDeSoMessage(&MsgDeSoGetBlocks{HashList: hashList}, false) glog.V(1).Infof("GetBlocks: Downloading %d blocks from header %v to header %v from peer %v", len(blockNodesToFetch), blockNodesToFetch[0].Header, blockNodesToFetch[len(blockNodesToFetch)-1].Header, - pp) + pp, + ) +} + +// RequestBlocksByHash requests the exact blocks specified by the block hashes from the peer. +func (srv *Server) RequestBlocksByHash(pp *Peer, blockHashes []*BlockHash) { + numBlocksToFetch := srv.getMaxBlocksInFlight(pp) - len(pp.requestedBlocks) + if numBlocksToFetch <= 0 { + return + } + + // We will only request the blocks that we haven't already requested. + blocksToRequest := []*BlockHash{} + for _, blockHash := range blockHashes { + if pp.requestedBlocks[*blockHash] { + continue + } + blocksToRequest = append(blocksToRequest, blockHash) + pp.requestedBlocks[*blockHash] = true + } + + if len(blocksToRequest) == 0 { + return + } + + pp.AddDeSoMessage(&MsgDeSoGetBlocks{HashList: blocksToRequest}, false) + + glog.V(1).Infof("GetBlockByHash: Downloading %d blocks from peer %v", len(blocksToRequest), pp) +} + +func (srv *Server) getMaxBlocksInFlight(pp *Peer) int { + // Fetch as many blocks as we can from this peer. If our peer is on PoS + // then we can safely request a lot more blocks from them in each flight. + maxBlocksInFlight := MaxBlocksInFlight + if pp.Params.ProtocolVersion >= ProtocolVersion2 && + (srv.params.IsPoSBlockHeight(uint64(srv.blockchain.blockTip().Height)) || + srv.params.NetworkType == NetworkType_TESTNET) { + maxBlocksInFlight = MaxBlocksInFlightPoS + } + return maxBlocksInFlight } // shouldVerifySignatures determines if we should verify signatures for headers or not. @@ -1224,7 +1252,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { blockTip.Header.Height+1, msg.TipHeight, pp) maxHeight := -1 srv.blockchain.updateCheckpointBlockInfo() - srv.GetBlocks(pp, maxHeight) + srv.RequestBlocksUpToHeight(pp, maxHeight) return } @@ -1255,7 +1283,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { glog.V(1).Infof("Server._handleHeaderBundle: *Downloading* blocks starting at "+ "block tip %v out of %d from peer %v", blockTip.Header, msg.TipHeight, pp) - srv.GetBlocks(pp, int(msg.TipHeight)) + srv.RequestBlocksUpToHeight(pp, int(msg.TipHeight)) return } @@ -1691,7 +1719,7 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { } headerTip := srv.blockchain.headerTip() - srv.GetBlocks(pp, int(headerTip.Height)) + srv.RequestBlocksUpToHeight(pp, int(headerTip.Height)) } // dirtyHackUpdateDbOpts closes the current badger DB instance and re-opens it with the provided options. @@ -2171,7 +2199,7 @@ func (srv *Server) _tryRequestMempoolFromPeer(pp *Peer) { // If we have already requested the mempool from the peer, then there's nothing to do. if pp.hasReceivedMempoolMessage { - glog.V(1).Infof( + glog.V(2).Infof( "Server._tryRequestMempoolFromPeer: NOT sending mempool message because we have already sent one: %v", pp, ) return @@ -2311,26 +2339,37 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { return } - // Only verify signatures for recent blocks. var isOrphan bool + var blockHashesToRequest []*BlockHash + + // Process the block using the FastHotStuffConsensus or through the blockchain directly. If we're in the + // PoS steady state, we pass the block to the FastHotStuffConsensus to handle the block. If we're still + // syncing, then we pass the block to the blockchain to handle the block with signature verification on or off. if srv.fastHotStuffConsensus != nil && srv.fastHotStuffConsensus.IsRunning() { // If the FastHotStuffConsensus has been initialized, then we pass the block to the new consensus // which will validate the block, try to apply it, and handle the orphan case by requesting missing // parents. - isOrphan, err = srv.fastHotStuffConsensus.HandleBlock(pp, blk) + glog.V(1).Infof(CLog(Cyan, fmt.Sprintf( + "Server._handleBlock: Processing block %v with FastHotStuffConsensus with SyncState=%v for peer %v", + blk, srv.blockchain.chainState(), pp, + ))) + blockHashesToRequest, err = srv.fastHotStuffConsensus.HandleBlock(pp, blk) + isOrphan = len(blockHashesToRequest) > 0 } else if !verifySignatures { - glog.V(1).Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Processing block %v WITHOUT "+ - "signature checking because SyncState=%v for peer %v", - blk, srv.blockchain.chainState(), pp))) - _, isOrphan, _, err = srv.blockchain.ProcessBlock(blk, false) + glog.V(1).Infof(CLog(Cyan, fmt.Sprintf( + "Server._handleBlock: Processing block %v WITHOUT signature checking because SyncState=%v for peer %v", + blk, srv.blockchain.chainState(), pp, + ))) + _, isOrphan, blockHashesToRequest, err = srv.blockchain.ProcessBlock(blk, false) } else { // TODO: Signature checking slows things down because it acquires the ChainLock. // The optimal solution is to check signatures in a way that doesn't acquire the // ChainLock, which is what Bitcoin Core does. - glog.V(1).Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlock: Processing block %v WITH "+ - "signature checking because SyncState=%v for peer %v", - blk, srv.blockchain.chainState(), pp))) - _, isOrphan, _, err = srv.blockchain.ProcessBlock(blk, true) + glog.V(1).Infof(CLog(Cyan, fmt.Sprintf( + "Server._handleBlock: Processing block %v WITH signature checking because SyncState=%v for peer %v", + blk, srv.blockchain.chainState(), pp, + ))) + _, isOrphan, blockHashesToRequest, err = srv.blockchain.ProcessBlock(blk, true) } // If we hit an error then abort mission entirely. We should generally never @@ -2355,14 +2394,6 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { } } - if isOrphan { - // It's possible to receive an orphan block if we're connected directly to the - // block producer, and they are broadcasting blocks in the steady state. We log - // a warning in this case and move on. - glog.Warningf("ERROR: Received orphan block with hash %v height %v.", blockHash, blk.Header.Height) - return - } - srv.timer.End("Server._handleBlock: Process Block") srv.timer.Print("Server._handleBlock: General") @@ -2375,6 +2406,34 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { return } + if isOrphan { + // It's possible to receive an orphan block from the peer for a variety of reasons. If we + // see an orphan block, we do one of two things: + // 1. With the PoS protocol where it is possible to receive an orphan from the block producer + // for any number of reasons, the ProcessBlockPoS returns a non-empty blockHashesToRequest list + // for us to request from the peer. + // 2. With the PoW protocol where we do not expect to ever receive an orphan block due to how + // we request header first before requesting blocks, we disconnect from the peer. + + glog.Warningf("ERROR: Received orphan block with hash %v height %v.", blockHash, blk.Header.Height) + + // Request the missing blocks from the peer if needed. + if len(blockHashesToRequest) > 0 { + glog.Warningf( + "Server._handleBlock: Orphan block %v at height %d. Requesting missing ancestors from peer: %v", + blockHash, + blk.Header.Height, + pp, + ) + srv.RequestBlocksByHash(pp, blockHashesToRequest) + } else { + // If we don't have any blocks to request, then we disconnect from the peer. + srv._logAndDisconnectPeer(pp, blk, "Received orphan block") + } + + return + } + // We shouldn't be receiving blocks while syncing headers, but we can end up here // if it took longer than MaxTipAge to sync blocks to this point. We'll revert to // syncing headers and then resume syncing blocks once we're current again. @@ -2409,7 +2468,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { // peer, which is OK because we can assume the peer has all of them when // we're syncing. maxHeight := -1 - srv.GetBlocks(pp, maxHeight) + srv.RequestBlocksUpToHeight(pp, maxHeight) return } From aa03565dbf5742872956a268e4259291d797c8ef Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 May 2024 20:54:56 -0400 Subject: [PATCH 730/762] Request Missing HighQC Block From Timeout Message (#1330) * Request Missing HighQC Block From Timeout Message * Delete unused function --- lib/pos_consensus.go | 24 ++++++------------------ lib/server.go | 8 +++++++- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index f6853c10e..be284952f 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -536,7 +536,7 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo // HandleValidatorTimeout is called when we receive a validator timeout message from a peer. This function // processes the timeout locally in the FastHotStuffEventLoop. -func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) error { +func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeout) ([]*BlockHash, error) { glog.V(2).Infof("FastHotStuffConsensus.HandleValidatorTimeout: %s", msg.ToString()) glog.V(2).Infof("FastHotStuffConsensus.HandleValidatorTimeout: %s", fc.fastHotStuffEventLoop.ToString()) @@ -546,16 +546,15 @@ func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoVa defer fc.lock.Unlock() if !fc.fastHotStuffEventLoop.IsRunning() { - return errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: FastHotStuffEventLoop is not running") + return nil, errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: FastHotStuffEventLoop is not running") } // If we don't have the highQC's block on hand, then we need to request it from the peer. We do // that first before storing the timeout message locally in the FastHotStuffEventLoop. This // prevents spamming of timeout messages by peers. if !fc.blockchain.HasBlockInBlockIndex(msg.HighQC.BlockHash) { - fc.trySendMessageToPeer(pp, &MsgDeSoGetBlocks{HashList: []*BlockHash{msg.HighQC.BlockHash}}) - glog.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Requesting missing highQC's block: %v", msg.HighQC.BlockHash) - return errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Missing highQC's block: %v", msg.HighQC.BlockHash) + err := errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Missing highQC's block: %v", msg.HighQC.BlockHash) + return []*BlockHash{msg.HighQC.BlockHash}, err } // Process the timeout message locally in the FastHotStuffEventLoop @@ -563,11 +562,11 @@ func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoVa // If we can't process the timeout locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. glog.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout msg: %v", err) - return errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout msg: ") + return nil, errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout msg: ") } // Happy path - return nil + return nil, nil } func (fc *FastHotStuffConsensus) HandleBlock(pp *Peer, msg *MsgDeSoBlock) (missingBlockHashes []*BlockHash, _err error) { @@ -938,17 +937,6 @@ func (fc *FastHotStuffConsensus) updateActiveValidatorConnections() error { return nil } -func (fc *FastHotStuffConsensus) trySendMessageToPeer(pp *Peer, msg DeSoMessage) { - remoteNode := fc.networkManager.GetRemoteNodeFromPeer(pp) - if remoteNode == nil { - glog.Errorf("FastHotStuffConsensus.trySendMessageToPeer: RemoteNode not found for peer: %v", pp) - return - } - - // Send the message to the peer - remoteNode.SendMessage(msg) -} - // Finds the epoch entry for the block and returns the epoch number. func getEpochEntryForBlockHeight(blockHeight uint64, epochEntries []*EpochEntry) (*EpochEntry, error) { for _, epochEntry := range epochEntries { diff --git a/lib/server.go b/lib/server.go index e0cdb4b8e..f74fd9476 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2952,9 +2952,15 @@ func (srv *Server) _handleValidatorTimeout(pp *Peer, msg *MsgDeSoValidatorTimeou return } - if err := srv.fastHotStuffConsensus.HandleValidatorTimeout(pp, msg); err != nil { + missingBlockHashes, err := srv.fastHotStuffConsensus.HandleValidatorTimeout(pp, msg) + if err != nil { glog.Errorf("Server._handleValidatorTimeout: Error handling timeout message from peer: %v", err) } + + // If we have missing blocks to request, then we send a GetBlocks message to the peer. + if len(missingBlockHashes) > 0 { + srv.RequestBlocksByHash(pp, missingBlockHashes) + } } // _startConsensusEventLoop contains the top-level event loop to run both the PoW and PoS consensus. It is From 7a37bd63b4618dc8142b79619d39c4ff140c569f Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 15 May 2024 20:59:56 -0400 Subject: [PATCH 731/762] Simplify IsValidStakeLimitKey (#1292) --- lib/block_view_derived_key.go | 6 ++--- lib/block_view_stake.go | 20 +--------------- lib/block_view_stake_test.go | 43 ----------------------------------- 3 files changed, 4 insertions(+), 65 deletions(-) diff --git a/lib/block_view_derived_key.go b/lib/block_view_derived_key.go index 08b721a92..801f1dece 100644 --- a/lib/block_view_derived_key.go +++ b/lib/block_view_derived_key.go @@ -330,7 +330,7 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( } // StakeLimitMap for stakeLimitKey, stakingLimit := range transactionSpendingLimit.StakeLimitMap { - if err = bav.IsValidStakeLimitKey(txn.PublicKey, stakeLimitKey); err != nil { + if err = bav.IsValidStakeLimitKey(txn.PublicKey); err != nil { return 0, 0, nil, err } if stakingLimit.IsZero() { @@ -341,7 +341,7 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( } // UnstakeLimitMap for unstakeLimitKey, unstakingLimit := range transactionSpendingLimit.UnstakeLimitMap { - if err = bav.IsValidStakeLimitKey(txn.PublicKey, unstakeLimitKey); err != nil { + if err = bav.IsValidStakeLimitKey(txn.PublicKey); err != nil { return 0, 0, nil, err } if unstakingLimit.IsZero() { @@ -352,7 +352,7 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( } // UnlockStakeLimitMap for unlockStakeLimitKey, transactionCount := range transactionSpendingLimit.UnlockStakeLimitMap { - if err = bav.IsValidStakeLimitKey(txn.PublicKey, unlockStakeLimitKey); err != nil { + if err = bav.IsValidStakeLimitKey(txn.PublicKey); err != nil { return 0, 0, nil, err } if transactionCount == 0 { diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 12c0be2b3..c33a55d83 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -3147,31 +3147,13 @@ func (bav *UtxoView) _checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey( return derivedKeyEntry, errors.Wrapf(RuleErrorUnlockStakeTransactionSpendingLimitNotFound, "UtxoView._checkUnlockStakeTxnSpendingLimitAndUpdateDerivedKey: ") } -func (bav *UtxoView) IsValidStakeLimitKey(transactorPublicKeyBytes []byte, stakeLimitKey StakeLimitKey) error { +func (bav *UtxoView) IsValidStakeLimitKey(transactorPublicKeyBytes []byte) error { // Convert TransactorPublicKeyBytes to TransactorPKID. transactorPKIDEntry := bav.GetPKIDForPublicKey(transactorPublicKeyBytes) if transactorPKIDEntry == nil || transactorPKIDEntry.isDeleted { return errors.Wrapf(RuleErrorTransactionSpendingLimitInvalidStaker, "UtxoView.IsValidStakeLimitKey: ") } - // Verify ValidatorEntry. - if stakeLimitKey.ValidatorPKID.IsZeroPKID() { - // The ZeroPKID is a special case that indicates that the spending limit - // applies to any validator. In this case, we don't need to check that the - // validator exists, as there is no validator registered for the ZeroPKID. - return nil - } - validatorEntry, err := bav.GetValidatorByPKID(&stakeLimitKey.ValidatorPKID) - if err != nil { - return errors.Wrapf(err, "IsValidStakeLimitKey: ") - } - if validatorEntry == nil || validatorEntry.isDeleted { - return errors.Wrapf(RuleErrorTransactionSpendingLimitInvalidValidator, "UtxoView.IsValidStakeLimitKey: ") - } - if !transactorPKIDEntry.PKID.Eq(&stakeLimitKey.ValidatorPKID) && validatorEntry.DisableDelegatedStake { - return errors.Wrapf(RuleErrorTransactionSpendingLimitValidatorDisabledDelegatedStake, "UtxoView.IsValidStakeLimitKey: ") - } - return nil } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index a931e343d..b50772fbe 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -809,7 +809,6 @@ func TestStakingWithDerivedKey(t *testing.T) { m0PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m0PkBytes).PKID m1PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m1PkBytes).PKID - m2PKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, m2PkBytes).PKID senderPkBytes, _, err := Base58CheckDecode(senderPkString) require.NoError(t, err) @@ -963,48 +962,6 @@ func TestStakingWithDerivedKey(t *testing.T) { _, err = _submitRegisterAsValidatorTxn(testMeta, m1Pub, m1Priv, registerAsValidatorMetadata, nil, true) require.NoError(t, err) } - { - // RuleErrorTransactionSpendingLimitInvalidValidator - // sender tries to create a DerivedKey to stake with m2. Validator doesn't exist. Errors. - stakeLimitKey := MakeStakeLimitKey(m2PKID) - txnSpendingLimit := &TransactionSpendingLimit{ - GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit - TransactionCountLimitMap: map[TxnType]uint64{ - TxnTypeAuthorizeDerivedKey: 1, - }, - StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(100)}, - } - derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) - require.Error(t, err) - } - { - // RuleErrorTransactionSpendingLimitInvalidValidator - // sender tries to create a DerivedKey to unstake from m2. Validator doesn't exist. Errors. - stakeLimitKey := MakeStakeLimitKey(m2PKID) - txnSpendingLimit := &TransactionSpendingLimit{ - GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit - TransactionCountLimitMap: map[TxnType]uint64{ - TxnTypeAuthorizeDerivedKey: 1, - }, - UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(100)}, - } - derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) - require.Error(t, err) - } - { - // RuleErrorTransactionSpendingLimitInvalidValidator - // sender tries to create a DerivedKey to stake with m2. Validator doesn't exist. Errors. - stakeLimitKey := MakeStakeLimitKey(m2PKID) - txnSpendingLimit := &TransactionSpendingLimit{ - GlobalDESOLimit: NanosPerUnit, // 1 $DESO spending limit - TransactionCountLimitMap: map[TxnType]uint64{ - TxnTypeAuthorizeDerivedKey: 1, - }, - UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 100}, - } - derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) - require.Error(t, err) - } { // sender stakes with m0 using a DerivedKey. From 0c29c043834b353d6ce2e50a2546ae082ee08dd1 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 15 May 2024 21:24:36 -0400 Subject: [PATCH 732/762] Straggler Items in PoS Feature Review (#1331) * Straggler Items in PoS Feature Review * Copy forbiddenPubKeyEntry * Add unit tests for FastHotStuffEvent Validation * Address uint256 comments * Fix broken test * Address Nina's final comments --- consensus/utils.go | 2 +- consensus/utils_test.go | 151 +++++++++++++++++++++++++ lib/block_view.go | 3 +- lib/block_view_balance_entry.go | 5 +- lib/block_view_dao_coin_limit_order.go | 11 +- lib/block_view_types.go | 9 ++ lib/pos_fee_estimator.go | 2 - 7 files changed, 171 insertions(+), 12 deletions(-) diff --git a/consensus/utils.go b/consensus/utils.go index 41d14f50f..ca20c91d9 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -16,7 +16,7 @@ import ( func IsProperlyFormedConstructVoteQCEvent(event *FastHotStuffEvent) bool { return isProperlyFormedGenericEvent(event) && event.EventType == FastHotStuffEventTypeConstructVoteQC && // Event type is QC construction - !isInterfaceNil(event.QC) // The high QC is non-nil + isProperlyFormedQC(event.QC) // The high QC is properly formed } func IsProperlyFormedConstructTimeoutQCEvent(event *FastHotStuffEvent) bool { diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 5c4266773..1b0429c80 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -9,6 +9,157 @@ import ( "github.com/stretchr/testify/require" ) +func TestIsProperlyFormedConstructVoteQCEvent(t *testing.T) { + testIsProperlyFormedGenericEvent(t) + + // Test wrong event type + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), EventType: FastHotStuffEventTypeTimeout, + } + require.False(t, IsProperlyFormedConstructVoteQCEvent(&event)) + } + + // Test nil QC + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), + EventType: FastHotStuffEventTypeConstructVoteQC, QC: nil, + } + require.False(t, IsProperlyFormedConstructVoteQCEvent(&event)) + } + + // Test properly formed vote QC event + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), + EventType: FastHotStuffEventTypeConstructVoteQC, QC: createDummyQC(3, createDummyBlockHash()), + } + require.True(t, IsProperlyFormedConstructVoteQCEvent(&event)) + } +} + +func TestIsProperlyFormedConstructTimeoutQCEvent(t *testing.T) { + testIsProperlyFormedGenericEvent(t) + + // Test wrong event type + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), EventType: FastHotStuffEventTypeTimeout, + } + require.False(t, IsProperlyFormedConstructTimeoutQCEvent(&event)) + } + + // Test nil AggregateQC + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), + EventType: FastHotStuffEventTypeConstructTimeoutQC, AggregateQC: nil, + } + require.False(t, IsProperlyFormedConstructTimeoutQCEvent(&event)) + } + + // Test properly formed timeout QC event + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), + EventType: FastHotStuffEventTypeConstructTimeoutQC, AggregateQC: createDummyAggQc(3, 2), + } + require.True(t, IsProperlyFormedConstructTimeoutQCEvent(&event)) + } +} + +func TestIsProperlyFormedVoteEvent(t *testing.T) { + testIsProperlyFormedGenericEvent(t) + + // Test wrong event type + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), EventType: FastHotStuffEventTypeTimeout, + } + require.False(t, IsProperlyFormedVoteEvent(&event)) + } + + // Test non-nil QC + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), + EventType: FastHotStuffEventTypeVote, QC: createDummyQC(2, createDummyBlockHash()), + } + require.False(t, IsProperlyFormedVoteEvent(&event)) + } + + // Test properly formed vote event + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), + EventType: FastHotStuffEventTypeVote, QC: nil, + } + require.True(t, IsProperlyFormedVoteEvent(&event)) + } +} + +func TestIsProperlyFormedTimeoutEvent(t *testing.T) { + testIsProperlyFormedGenericEvent(t) + + // Test wrong event type + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), EventType: FastHotStuffEventTypeVote, + } + require.False(t, IsProperlyFormedTimeoutEvent(&event)) + } + + // Test non-nil QC + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), + EventType: FastHotStuffEventTypeTimeout, QC: createDummyQC(2, createDummyBlockHash()), + } + require.False(t, IsProperlyFormedTimeoutEvent(&event)) + } + + // Test properly formed timeout event + { + event := FastHotStuffEvent{ + View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash(), + EventType: FastHotStuffEventTypeTimeout, QC: nil, + } + require.True(t, IsProperlyFormedTimeoutEvent(&event)) + } +} + +func testIsProperlyFormedGenericEvent(t *testing.T) { + // Test nil event + { + require.False(t, isProperlyFormedGenericEvent(nil)) + } + + // Test 0 view + { + event := FastHotStuffEvent{View: 0} + require.False(t, isProperlyFormedGenericEvent(&event)) + } + + // Test 0 tip height + { + event := FastHotStuffEvent{View: 1, TipBlockHeight: 0} + require.False(t, isProperlyFormedGenericEvent(&event)) + } + + // Test nil tip hash + { + event := FastHotStuffEvent{View: 1, TipBlockHeight: 1, TipBlockHash: nil} + require.False(t, isProperlyFormedGenericEvent(&event)) + } + + // Test properly formed generic event + { + event := FastHotStuffEvent{View: 1, TipBlockHeight: 1, TipBlockHash: createDummyBlockHash()} + require.True(t, isProperlyFormedGenericEvent(&event)) + } +} + func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { // Test malformed QC { diff --git a/lib/block_view.go b/lib/block_view.go index 6e9f3f2ca..421b6d626 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -361,8 +361,7 @@ func (bav *UtxoView) CopyUtxoView() *UtxoView { if forbiddenPubKeyEntry == nil { continue } - newForbiddenPubKeyEntry := *forbiddenPubKeyEntry - newView.ForbiddenPubKeyToForbiddenPubKeyEntry[pkMapKey] = &newForbiddenPubKeyEntry + newView.ForbiddenPubKeyToForbiddenPubKeyEntry[pkMapKey] = forbiddenPubKeyEntry.Copy() } // Copy the post data diff --git a/lib/block_view_balance_entry.go b/lib/block_view_balance_entry.go index 84f6391e4..1d797e29e 100644 --- a/lib/block_view_balance_entry.go +++ b/lib/block_view_balance_entry.go @@ -2,11 +2,12 @@ package lib import ( "fmt" + "reflect" + "github.com/btcsuite/btcd/btcec/v2" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "reflect" ) func (bav *UtxoView) _getBalanceEntryForHODLerPKIDAndCreatorPKID( @@ -433,7 +434,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( bav._deleteBalanceEntryMappings(receiverBalanceEntry, receiverPublicKey, profilePublicKey, isDAOCoin) bav._setBalanceEntryMappings(receiverBalanceEntry, isDAOCoin) - if senderBalanceEntry.BalanceNanos.Gt(uint256.NewInt(0)) { + if senderBalanceEntry.BalanceNanos.Sign() > 0 { bav._setBalanceEntryMappings(senderBalanceEntry, isDAOCoin) } diff --git a/lib/block_view_dao_coin_limit_order.go b/lib/block_view_dao_coin_limit_order.go index aa520caff..db9c036a9 100644 --- a/lib/block_view_dao_coin_limit_order.go +++ b/lib/block_view_dao_coin_limit_order.go @@ -3,13 +3,14 @@ package lib import ( "bytes" "fmt" + "math/big" + "sort" + "strings" + "github.com/davecgh/go-spew/spew" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" - "math/big" - "sort" - "strings" ) func adjustBalance( @@ -1784,7 +1785,7 @@ func (bav *UtxoView) IsValidDAOCoinLimitOrder(order *DAOCoinLimitOrderEntry) err if err != nil { return err } - if baseUnitsToBuy.Eq(uint256.NewInt(0)) { + if baseUnitsToBuy.IsZero() { return errors.Wrapf(RuleErrorDAOCoinLimitOrderTotalCostIsLessThanOneNano, "baseUnitsToBuy: ") } // If buying $DESO, validate that qty to buy is less than the max uint64. @@ -1797,7 +1798,7 @@ func (bav *UtxoView) IsValidDAOCoinLimitOrder(order *DAOCoinLimitOrderEntry) err if err != nil { return err } - if baseUnitsToSell.Eq(uint256.NewInt(0)) { + if baseUnitsToSell.IsZero() { return errors.Wrapf(RuleErrorDAOCoinLimitOrderTotalCostIsLessThanOneNano, "baseUnitsToSell: ") } diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 68a3fbeee..79dde9604 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -3342,6 +3342,15 @@ type ForbiddenPubKeyEntry struct { isDeleted bool } +func (entry *ForbiddenPubKeyEntry) Copy() *ForbiddenPubKeyEntry { + pubKeyCopy := make([]byte, len(entry.PubKey)) + copy(pubKeyCopy, entry.PubKey) + return &ForbiddenPubKeyEntry{ + PubKey: pubKeyCopy, + isDeleted: entry.isDeleted, + } +} + func (entry *ForbiddenPubKeyEntry) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte data = append(data, EncodeByteArray(entry.PubKey)...) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 34212a9f6..a0c54ff09 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -658,8 +658,6 @@ func (posFeeEstimator *PoSFeeEstimator) estimateFeeRateNanosPerKBGivenTransactio } totalTxnsSize += tx.TxSizeBytes txns = append(txns, tx) - // TODO: I think we want to include the txn that puts us over the limit, but - // we can just move this check up a few lines if that's wrong. if totalTxnsSize > maxSizeOfNumBlocks { break } From 38cba454c3231257c5cbf6db54fe98df5c3a9e8b Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 15 May 2024 21:25:32 -0400 Subject: [PATCH 733/762] initNewUtxoView doesn't fetch tip hash, copy view sets it and new utxo view handles the db fetch (#1332) --- lib/block_view.go | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 421b6d626..d475d986e 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -320,6 +320,7 @@ func (bav *UtxoView) _ResetViewMappingsAfterFlush() { func (bav *UtxoView) CopyUtxoView() *UtxoView { newView := initNewUtxoView(bav.Handle, bav.Params, bav.Postgres, bav.Snapshot, bav.EventManager) + newView.TipHash = bav.TipHash.NewBlockHash() // Handle items loaded from DB with _ResetViewMappingsAfterFlush newView.NumUtxoEntries = bav.NumUtxoEntries newView.NanosPurchased = bav.NanosPurchased @@ -730,15 +731,8 @@ func initNewUtxoView( _eventManager *EventManager, ) *UtxoView { return &UtxoView{ - Handle: _handle, - Params: _params, - // Note that the TipHash does not get reset as part of - // _ResetViewMappingsAfterFlush because it is not something that is affected by a - // flush operation. Moreover, its value is consistent with the view regardless of - // whether the view is flushed or not. Additionally, the utxo view does not concern - // itself with the header chain (see comment on GetBestHash for more info on that). - TipHash: DbGetBestHash(_handle, _snapshot, ChainTypeDeSoBlock /* don't get the header chain */), - + Handle: _handle, + Params: _params, Postgres: _postgres, Snapshot: _snapshot, EventManager: _eventManager, From 604777d48a5bbbaaddd8db9da3dd4b3b2b1977e0 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 16 May 2024 11:21:43 -0400 Subject: [PATCH 734/762] Add Comment to Explain UVarint Encoding Validation (#1333) --- lib/varint.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/varint.go b/lib/varint.go index a58a0f175..f5935505f 100644 --- a/lib/varint.go +++ b/lib/varint.go @@ -119,6 +119,9 @@ func ReadUvarint(r io.Reader) (uint64, error) { return x, err } b := buf[0] + // The Uvarint encoding should cap out at 8 bytes. If it extends beyond that, + // then the encoded value is too large to fit into a uint64. The encoded value + // is not valid. if i == 9 && b > 1 { return x, overflow } From a66a59758faaf53c1dd326af954370f0407d9468 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Fri, 17 May 2024 12:04:35 -0400 Subject: [PATCH 735/762] Set NegotiatedVersion on Peer Object (#1334) * Set NegotiatedVersion on Peer Object * Fix naming --- lib/peer.go | 12 +++++++++++- lib/remote_node.go | 5 +++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/lib/peer.go b/lib/peer.go index c2c24ba9c..86082a3aa 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -38,6 +38,9 @@ type DeSoMessageMeta struct { // Any communication with other nodes happens via this object, which maintains a // queue of messages to send to the other node. type Peer struct { + // TODO: Remove this and merge it with the RemoteNode.HandshakeMetaData type + NegotiatedProtocolVersion ProtocolVersionType + // These stats should be accessed atomically. bytesReceived uint64 bytesSent uint64 @@ -401,7 +404,7 @@ func (pp *Peer) HandleGetBlocks(msg *MsgDeSoGetBlocks) { // Before Version2 we would send each block in a single message, which was quite // slow. Now when we receive a GetBlocks message we will send the blocks in large // batches, which is much faster. - if pp.Params.ProtocolVersion == ProtocolVersion2 { + if pp.NegotiatedProtocolVersion == ProtocolVersion2 { allBlocks := MsgDeSoBlockBundle{} for _, hashToSend := range msg.HashList { blockToSend := pp.srv.blockchain.GetBlock(hashToSend) @@ -931,6 +934,13 @@ func (pp *Peer) SetServiceFlag(sf ServiceFlag) { pp.serviceFlags = sf } +func (pp *Peer) SetNegotiatedProtocolVersion(negotiatedProtocolVersion ProtocolVersionType) { + pp.PeerInfoMtx.Lock() + defer pp.PeerInfoMtx.Unlock() + + pp.NegotiatedProtocolVersion = negotiatedProtocolVersion +} + func (pp *Peer) outHandler() { pp.startGroup.Done() glog.V(1).Infof("Peer.outHandler: Starting outHandler for Peer %v", pp) diff --git a/lib/remote_node.go b/lib/remote_node.go index a90214b9b..bbfb37f95 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -546,7 +546,12 @@ func (rn *RemoteNode) HandleVersionMessage(verMsg *MsgDeSoVersion, responseNonce negotiatedVersion = NewProtocolVersionType(verMsg.Version) } + // TODO: everywhere we update the negotiated protocol version on the handshake metadata, we also update + // the value in the peer object. The two should be merged to reduce duplication of data in the future. vMeta.negotiatedProtocolVersion = negotiatedVersion + if rn.peer != nil { + rn.peer.SetNegotiatedProtocolVersion(negotiatedVersion) + } // Record the services the peer is advertising. vMeta.serviceFlag = verMsg.Services From 82844345ba653c4db9daf8c96e6e692fdf9f3466 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 17 May 2024 17:27:36 -0400 Subject: [PATCH 736/762] Fix computeExpectedSnapshotHeight (#1335) --- lib/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/server.go b/lib/server.go index f74fd9476..8848c31de 100644 --- a/lib/server.go +++ b/lib/server.go @@ -1354,7 +1354,7 @@ func (srv *Server) computeExpectedSnapshotHeight(headerTipHeight uint64) uint64 ) expectedSnapshotHeight := headerTipHeight - (headerTipHeight % snapshotBlockHeightPeriod) posSetupForkHeight := uint64(srv.params.ForkHeights.ProofOfStake1StateSetupBlockHeight) - if expectedSnapshotHeight < posSetupForkHeight { + if headerTipHeight > posSetupForkHeight && expectedSnapshotHeight < posSetupForkHeight { expectedSnapshotHeight = posSetupForkHeight - (posSetupForkHeight % srv.params.DefaultPoWSnapshotBlockHeightPeriod) } From f837a3d8776da4ca1ef04eaf8435a24a07db46cb Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 17 May 2024 17:29:50 -0400 Subject: [PATCH 737/762] Revert removal of BlockRewardPatchBlockHeight in constants.go (#1336) --- lib/constants.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/constants.go b/lib/constants.go index 7a1877d81..24352e558 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1061,6 +1061,9 @@ var MainnetForkHeights = ForkHeights{ // Mon Apr 24 2023 @ 9am PST BalanceModelBlockHeight: uint32(226839), + // Tues May 23 2023 @ 9am PST + BlockRewardPatchBlockHeight: uint32(235134), + // FIXME: set to real block height when ready ProofOfStake1StateSetupBlockHeight: uint32(math.MaxUint32), From be50d07bf81b53fbc40d691b2a44216cf97b2cea Mon Sep 17 00:00:00 2001 From: superzordon Date: Wed, 22 May 2024 14:07:59 -0400 Subject: [PATCH 738/762] Updates to state change syncer --- lib/state_change_syncer.go | 11 ++++ lib/txn_reconciliation_test.go | 92 ++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 lib/txn_reconciliation_test.go diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 55eb0d25f..6fa54a2b7 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -197,6 +197,11 @@ func (stateChangeEntry *StateChangeEntry) RawDecodeWithoutMetadata(blockHeight u } stateChangeEntry.BlockHeight = entryBlockHeight + // Don't decode the block if the encoder type is not a utxo operation. + if stateChangeEntry.EncoderType != EncoderTypeUtxoOperation && stateChangeEntry.EncoderType != EncoderTypeUtxoOperationBundle { + return nil + } + block := &MsgDeSoBlock{} if exist, err := DecodeFromBytes(block, rr); exist && err == nil { stateChangeEntry.Block = block @@ -509,6 +514,7 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerFlush(event *State glog.V(2).Infof( "The flush ID has changed, bailing now. Event: %v, Event block sync: %v, Global block sync: %v\n", event.FlushId, event.BlockSyncFlushId, stateChangeSyncer.BlockSyncFlushId) + fmt.Printf("Handle flush reset.\n") stateChangeSyncer.ResetMempool() return } @@ -539,6 +545,7 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerFlush(event *State // Confirm that the block sync ID hasn't shifted. If it has, bail now. if cachedSCE.FlushId != stateChangeSyncer.BlockSyncFlushId { glog.V(2).Infof("The flush ID has changed, inside key/value check, bailing now.\n") + fmt.Printf("Flush ID Changed. Bailing now.\n") stateChangeSyncer.ResetMempool() return } @@ -577,11 +584,13 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerFlush(event *State // After flushing blocksync transactions to file, reset the block sync flush ID, and reset the mempool. stateChangeSyncer.BlockSyncFlushId = uuid.New() glog.V(2).Infof("Setting a new blocksync flush ID: %v\n", stateChangeSyncer.BlockSyncFlushId) + fmt.Printf("Setting a new blocksync flush ID: %v\n", stateChangeSyncer.BlockSyncFlushId) stateChangeSyncer.ResetMempool() } } func (stateChangeSyncer *StateChangeSyncer) ResetMempool() { + fmt.Printf("Resetting mempool.\n") glog.V(2).Info("Resetting mempool.\n") stateChangeSyncer.MempoolSyncedKeyValueMap = make(map[string]*StateChangeEntry) stateChangeSyncer.MempoolNewlyFlushedTxns = make(map[string]*StateChangeEntry) @@ -697,6 +706,8 @@ func (stateChangeSyncer *StateChangeSyncer) FlushTransactionsToFile(event *State return fmt.Errorf("Error flushing to %s state change file: FlushId %v has nil bytes\n", stateChangeType, flushId) } + fmt.Printf("Writing bytes to %v file: %+v\n", event.IsMempoolFlush, unflushedBytes) + // Write the encoded StateChangeEntry bytes to the state changer file. _, err := flushFile.Write(unflushedBytes.StateChangeBytes) diff --git a/lib/txn_reconciliation_test.go b/lib/txn_reconciliation_test.go new file mode 100644 index 000000000..b88010fed --- /dev/null +++ b/lib/txn_reconciliation_test.go @@ -0,0 +1,92 @@ +package lib + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "testing" +) + +type Transaction struct { + TransactionIDBase58Check string `json:"TransactionIDBase58Check"` + TransactionHashHex string `json:"TransactionHashHex"` + // Add other fields as necessary +} + +type TransactionFile struct { + Error string `json:"Error"` + Transactions []Transaction `json:"Transactions"` +} + +func loadTransactions(filePath string) (*TransactionFile, error) { + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + defer file.Close() + + bytes, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + + var transactions TransactionFile + if err := json.Unmarshal(bytes, &transactions); err != nil { + return nil, err + } + + return &transactions, nil +} + +func findUniqueTransactions(transactions1, transactions2 []Transaction) []Transaction { + transactionMap := make(map[string]Transaction) + for _, tx := range transactions2 { + transactionMap[tx.TransactionHashHex] = tx + } + + fmt.Printf("Test: %+v\n", transactionMap["a2bfab455c31f8ed418c9aa66edb40749c308cb29f232ff6a3882340520d422f"]) + + uniqueTransactions := []Transaction{} + for _, tx := range transactions1 { + if _, found := transactionMap[tx.TransactionHashHex]; !found { + uniqueTransactions = append(uniqueTransactions, tx) + } + } + + return uniqueTransactions +} + +func TestReconcileTransactions(t *testing.T) { + blueTxnJsonPath := "/Users/zordon/Library/Application Support/JetBrains/GoLand2022.2/scratches/blue_transactions.json" + greenTxnJsonPath := "/Users/zordon/Library/Application Support/JetBrains/GoLand2022.2/scratches/green_transactions.json" + + blueTxns, err := loadTransactions(blueTxnJsonPath) + if err != nil { + fmt.Println("Error loading transactions from blue file:", err) + return + } + + greenTxns, err := loadTransactions(greenTxnJsonPath) + if err != nil { + fmt.Println("Error loading transactions from green file:", err) + return + } + + fmt.Println("Loaded", len(blueTxns.Transactions), "transactions from blue file") + + blueUniqueTxns := findUniqueTransactions(blueTxns.Transactions, greenTxns.Transactions) + greenUniqueTxns := findUniqueTransactions(greenTxns.Transactions, blueTxns.Transactions) + + fmt.Println("Transactions unique to blue:") + for _, tx := range blueUniqueTxns { + txJson, _ := json.MarshalIndent(tx, "", " ") + fmt.Println(string(txJson)) + } + + fmt.Println("Transactions unique to green:") + for _, tx := range greenUniqueTxns { + txJson, _ := json.MarshalIndent(tx, "", " ") + fmt.Println(string(txJson)) + } +} From 8dfff90cdb8d0241d20511e8b736f3b23d55e599 Mon Sep 17 00:00:00 2001 From: superzordon Date: Fri, 24 May 2024 17:54:44 -0400 Subject: [PATCH 739/762] Small bugfixes to pos state syncer --- lib/state_change_syncer.go | 8 --- lib/txn_reconciliation_test.go | 92 ---------------------------------- 2 files changed, 100 deletions(-) delete mode 100644 lib/txn_reconciliation_test.go diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 6fa54a2b7..2ac5ad164 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -514,7 +514,6 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerFlush(event *State glog.V(2).Infof( "The flush ID has changed, bailing now. Event: %v, Event block sync: %v, Global block sync: %v\n", event.FlushId, event.BlockSyncFlushId, stateChangeSyncer.BlockSyncFlushId) - fmt.Printf("Handle flush reset.\n") stateChangeSyncer.ResetMempool() return } @@ -545,7 +544,6 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerFlush(event *State // Confirm that the block sync ID hasn't shifted. If it has, bail now. if cachedSCE.FlushId != stateChangeSyncer.BlockSyncFlushId { glog.V(2).Infof("The flush ID has changed, inside key/value check, bailing now.\n") - fmt.Printf("Flush ID Changed. Bailing now.\n") stateChangeSyncer.ResetMempool() return } @@ -584,13 +582,11 @@ func (stateChangeSyncer *StateChangeSyncer) _handleStateSyncerFlush(event *State // After flushing blocksync transactions to file, reset the block sync flush ID, and reset the mempool. stateChangeSyncer.BlockSyncFlushId = uuid.New() glog.V(2).Infof("Setting a new blocksync flush ID: %v\n", stateChangeSyncer.BlockSyncFlushId) - fmt.Printf("Setting a new blocksync flush ID: %v\n", stateChangeSyncer.BlockSyncFlushId) stateChangeSyncer.ResetMempool() } } func (stateChangeSyncer *StateChangeSyncer) ResetMempool() { - fmt.Printf("Resetting mempool.\n") glog.V(2).Info("Resetting mempool.\n") stateChangeSyncer.MempoolSyncedKeyValueMap = make(map[string]*StateChangeEntry) stateChangeSyncer.MempoolNewlyFlushedTxns = make(map[string]*StateChangeEntry) @@ -706,8 +702,6 @@ func (stateChangeSyncer *StateChangeSyncer) FlushTransactionsToFile(event *State return fmt.Errorf("Error flushing to %s state change file: FlushId %v has nil bytes\n", stateChangeType, flushId) } - fmt.Printf("Writing bytes to %v file: %+v\n", event.IsMempoolFlush, unflushedBytes) - // Write the encoded StateChangeEntry bytes to the state changer file. _, err := flushFile.Write(unflushedBytes.StateChangeBytes) @@ -920,8 +914,6 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser utxoOpsForTxn, _, _, _, err := mempoolTxUtxoView.ConnectTransaction( mempoolTx.Tx, mempoolTx.Hash, uint32(blockHeight+1), currentTimestamp, false, false /*ignoreUtxos*/) if err != nil { - //fmt.Printf("Right before the mempool flush error: %v\n", err) - //continue mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: originalCommittedFlushId, Succeeded: false, diff --git a/lib/txn_reconciliation_test.go b/lib/txn_reconciliation_test.go deleted file mode 100644 index b88010fed..000000000 --- a/lib/txn_reconciliation_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package lib - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "testing" -) - -type Transaction struct { - TransactionIDBase58Check string `json:"TransactionIDBase58Check"` - TransactionHashHex string `json:"TransactionHashHex"` - // Add other fields as necessary -} - -type TransactionFile struct { - Error string `json:"Error"` - Transactions []Transaction `json:"Transactions"` -} - -func loadTransactions(filePath string) (*TransactionFile, error) { - file, err := os.Open(filePath) - if err != nil { - return nil, err - } - defer file.Close() - - bytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - - var transactions TransactionFile - if err := json.Unmarshal(bytes, &transactions); err != nil { - return nil, err - } - - return &transactions, nil -} - -func findUniqueTransactions(transactions1, transactions2 []Transaction) []Transaction { - transactionMap := make(map[string]Transaction) - for _, tx := range transactions2 { - transactionMap[tx.TransactionHashHex] = tx - } - - fmt.Printf("Test: %+v\n", transactionMap["a2bfab455c31f8ed418c9aa66edb40749c308cb29f232ff6a3882340520d422f"]) - - uniqueTransactions := []Transaction{} - for _, tx := range transactions1 { - if _, found := transactionMap[tx.TransactionHashHex]; !found { - uniqueTransactions = append(uniqueTransactions, tx) - } - } - - return uniqueTransactions -} - -func TestReconcileTransactions(t *testing.T) { - blueTxnJsonPath := "/Users/zordon/Library/Application Support/JetBrains/GoLand2022.2/scratches/blue_transactions.json" - greenTxnJsonPath := "/Users/zordon/Library/Application Support/JetBrains/GoLand2022.2/scratches/green_transactions.json" - - blueTxns, err := loadTransactions(blueTxnJsonPath) - if err != nil { - fmt.Println("Error loading transactions from blue file:", err) - return - } - - greenTxns, err := loadTransactions(greenTxnJsonPath) - if err != nil { - fmt.Println("Error loading transactions from green file:", err) - return - } - - fmt.Println("Loaded", len(blueTxns.Transactions), "transactions from blue file") - - blueUniqueTxns := findUniqueTransactions(blueTxns.Transactions, greenTxns.Transactions) - greenUniqueTxns := findUniqueTransactions(greenTxns.Transactions, blueTxns.Transactions) - - fmt.Println("Transactions unique to blue:") - for _, tx := range blueUniqueTxns { - txJson, _ := json.MarshalIndent(tx, "", " ") - fmt.Println(string(txJson)) - } - - fmt.Println("Transactions unique to green:") - for _, tx := range greenUniqueTxns { - txJson, _ := json.MarshalIndent(tx, "", " ") - fmt.Println(string(txJson)) - } -} From ae0a5d05cf7ad22d900397d7cd37161c2bfcde83 Mon Sep 17 00:00:00 2001 From: superzordon Date: Wed, 29 May 2024 15:04:20 -0400 Subject: [PATCH 740/762] Update state change metadata encoder --- lib/state_change_syncer.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 2ac5ad164..2e4cce5f6 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -127,10 +127,6 @@ func (stateChangeEntry *StateChangeEntry) RawEncodeWithoutMetadata(blockHeight u if stateChangeEntry.EncoderType == EncoderTypeUtxoOperation || stateChangeEntry.EncoderType == EncoderTypeUtxoOperationBundle { data = append(data, EncodeToBytes(blockHeight, stateChangeEntry.Block)...) - } else { - // If the encoder type is not a utxo operation, encode a nil value. - // We do this to simplify the decode logic and avoid an encoder migration. - data = append(data, EncodeToBytes(blockHeight, nil)...) } return data From 0cd0119a976af368db720b187c4cade4be9ca88f Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Wed, 29 May 2024 17:06:19 -0400 Subject: [PATCH 741/762] Fix peer version to use NegotiatedProtocolVersion (#1345) --- lib/server.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/server.go b/lib/server.go index 8848c31de..30bd0551a 100644 --- a/lib/server.go +++ b/lib/server.go @@ -707,7 +707,7 @@ func (srv *Server) _handleGetHeaders(pp *Peer, msg *MsgDeSoGetHeaders) { // provided locator are known. This does mean the client will start // over with the genesis block if unknown block locators are provided. maxHeadersPerMsg := MaxHeadersPerMsg - if pp.Params.ProtocolVersion >= ProtocolVersion2 { + if pp.NegotiatedProtocolVersion >= ProtocolVersion2 { maxHeadersPerMsg = MaxHeadersPerMsgPos } headers := srv.blockchain.LocateBestBlockChainHeaders(msg.BlockLocator, msg.StopHash, maxHeadersPerMsg) @@ -823,7 +823,7 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { // We find the first block that's not stored and get ready to download blocks starting from this block onwards. if blockNode.Status&StatusBlockStored == 0 { maxBlocksInFlight := MaxBlocksInFlight - if pp.Params.ProtocolVersion >= ProtocolVersion2 && + if pp.NegotiatedProtocolVersion >= ProtocolVersion2 && (srv.params.IsPoSBlockHeight(uint64(blockNode.Height)) || srv.params.NetworkType == NetworkType_TESTNET) { @@ -937,7 +937,7 @@ func (srv *Server) getMaxBlocksInFlight(pp *Peer) int { // Fetch as many blocks as we can from this peer. If our peer is on PoS // then we can safely request a lot more blocks from them in each flight. maxBlocksInFlight := MaxBlocksInFlight - if pp.Params.ProtocolVersion >= ProtocolVersion2 && + if pp.NegotiatedProtocolVersion >= ProtocolVersion2 && (srv.params.IsPoSBlockHeight(uint64(srv.blockchain.blockTip().Height)) || srv.params.NetworkType == NetworkType_TESTNET) { maxBlocksInFlight = MaxBlocksInFlightPoS @@ -1125,7 +1125,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // likely we have not hit the tip of our peer's chain, and so requesting more // headers from the peer would likely be useful. maxHeadersPerMsg := MaxHeadersPerMsg - if pp.Params.ProtocolVersion >= ProtocolVersion2 { + if pp.NegotiatedProtocolVersion >= ProtocolVersion2 { maxHeadersPerMsg = MaxHeadersPerMsgPos } if uint32(len(msg.Headers)) < maxHeadersPerMsg || srv.blockchain.isTipMaxed(srv.blockchain.headerTip()) { From 3dac3c5157e8127601a7b253d10e3c4ac78fcb36 Mon Sep 17 00:00:00 2001 From: superzordon Date: Thu, 30 May 2024 19:23:07 -0400 Subject: [PATCH 742/762] Handle unhandled errors --- lib/state_change_syncer.go | 6 ++++++ lib/state_change_syncer_test.go | 1 + 2 files changed, 7 insertions(+) diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 2e4cce5f6..06053de6e 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -185,6 +185,9 @@ func (stateChangeEntry *StateChangeEntry) RawDecodeWithoutMetadata(blockHeight u return errors.Wrapf(err, "StateChangeEntry.RawDecodeWithoutMetadata: error decoding flush UUID") } stateChangeEntry.FlushId, err = uuid.FromBytes(flushIdBytes) + if err != nil { + return errors.Wrapf(err, "StateChangeEntry.RawDecodeWithoutMetadata: error decoding flush UUID") + } // Decode the block height. entryBlockHeight, err := ReadUvarint(rr) @@ -322,6 +325,9 @@ func NewStateChangeSyncer(stateChangeDir string, nodeSyncType NodeSyncType, memp glog.Fatalf("Error opening stateChangeIndexFile: %v", err) } stateChangeMempoolFile, err := openOrCreateLogFile(stateChangeMempoolFilePath) + if err != nil { + glog.Fatalf("Error opening stateChangeMempoolFile: %v", err) + } stateChangeFileInfo, err := stateChangeFile.Stat() if err != nil { glog.Fatalf("Error getting stateChangeFileInfo: %v", err) diff --git a/lib/state_change_syncer_test.go b/lib/state_change_syncer_test.go index a77559c4c..6d6f56ab7 100644 --- a/lib/state_change_syncer_test.go +++ b/lib/state_change_syncer_test.go @@ -28,6 +28,7 @@ func TestStateChangeEntryEncoder(t *testing.T) { } bodyBytes, err := json.Marshal(postBody) + require.NoError(t, err) currentTimeNanos := time.Now() From 2279b244dd0a2f111b636fb4dc6b74a8739106f5 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Fri, 31 May 2024 10:48:59 -0400 Subject: [PATCH 743/762] Downgrade uint256 to 1.2.3 (#1341) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 52f1bb91c..476df804d 100644 --- a/go.mod +++ b/go.mod @@ -18,13 +18,13 @@ require ( github.com/deso-protocol/go-merkle-tree v1.0.0 github.com/dgraph-io/badger/v4 v4.2.0 github.com/emirpasic/gods v1.18.1 - github.com/ethereum/go-ethereum v1.13.14 + github.com/ethereum/go-ethereum v1.13.5 github.com/fatih/color v1.16.0 github.com/gernest/mention v2.0.0+incompatible github.com/go-pg/pg/v10 v10.12.0 github.com/golang/glog v1.2.1 github.com/google/uuid v1.6.0 - github.com/holiman/uint256 v1.2.4 + github.com/holiman/uint256 v1.2.3 github.com/mitchellh/go-homedir v1.1.0 github.com/oleiade/lane v1.0.1 github.com/onflow/crypto v0.25.1 diff --git a/go.sum b/go.sum index 5686bbc48..bad071c84 100644 --- a/go.sum +++ b/go.sum @@ -122,8 +122,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.13.14 h1:EwiY3FZP94derMCIam1iW4HFVrSgIcpsu0HwTQtm6CQ= -github.com/ethereum/go-ethereum v1.13.14/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU= +github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk= +github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= @@ -215,8 +215,8 @@ github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06A github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= -github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= -github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= +github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= From 199ccf121b14d7d21d57c79d7b55dbea5c148d09 Mon Sep 17 00:00:00 2001 From: diamondhands0 <81935176+diamondhands0@users.noreply.github.com> Date: Mon, 3 Jun 2024 09:46:27 -0700 Subject: [PATCH 744/762] remove dirty hack (#1337) --- cmd/node.go | 55 +------------------------------------ lib/server.go | 76 --------------------------------------------------- 2 files changed, 1 insertion(+), 130 deletions(-) diff --git a/cmd/node.go b/cmd/node.go index fdca7939f..e3a085ef2 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -141,60 +141,7 @@ func (node *Node) Start(exitChannels ...*chan struct{}) { // Setup chain database dbDir := lib.GetBadgerDbPath(node.Config.DataDirectory) - var opts badger.Options - - // If we're in hypersync mode, we use the default badger options. Otherwise, we use performance options. - // This is because hypersync mode is very I/O intensive, so we want to use the default options to reduce - // the amount of memory consumed by the database. - // Blocksync requires performance options because certain indexes tracked by blocksync have extremely large - // records (e.g. PrefixBlockHashToUtxoOperations). These large records will overflow the default badger mem table - // size. - // - // FIXME: We should rewrite the code so that PrefixBlockHashToUtxoOperations is either removed or written - // to badger in such a way as to not require the use of PerformanceBadgerOptions. See the comment on - // dirtyHackUpdateDbOpts. - - // Check to see if this node has already been initialized with performance or default options. - // If so, we should continue to use those options. - // If not and the db directory exists, we will use PerformanceOptions as the default. This is because - // prior to the use of default options for hypersync, all nodes were initialized with performance options. - // So all nodes that are upgrading will want to continue using performance options. Only nodes that are - // hypersyncing from scratch can use default options. - // If not, this means we have a clean data directory and it should be based on the sync type. - // The reason we do this check is because once a badger database is initialized with performance options, - // re-opening it with non-performance options results in a memory error panic. In order to prevent this transition - // from default -> performance -> default settings, we save the db options to a file. This takes the form of a - // boolean which indicates whether the db was initialized with performance options or not. Upon restart, if the - // file exists, we use the same options. If the file does not exist, we use the options based on the sync type. - performanceOptions, err := lib.DbInitializedWithPerformanceOptions(node.Config.DataDirectory) - - // We hardcode performanceOptions to true if we're not using a hypersync sync-type. This helps - // nodes recover that were running an older version that wrote the incorrect boolean to the file. - if node.Config.SyncType != lib.NodeSyncTypeHyperSync && - node.Config.SyncType != lib.NodeSyncTypeHyperSyncArchival { - performanceOptions = true - } - // If the db options haven't yet been saved, we should base the options on the existence of the - // data directory and the sync type. - if os.IsNotExist(err) { - // Check if the db directory exists. - _, err = os.Stat(dbDir) - isHypersync := node.Config.SyncType == lib.NodeSyncTypeHyperSync || - node.Config.SyncType == lib.NodeSyncTypeHyperSyncArchival - performanceOptions = !os.IsNotExist(err) || !isHypersync - // Save the db options for future runs. - lib.SaveBoolToFile(lib.GetDbPerformanceOptionsFilePath(node.Config.DataDirectory), performanceOptions) - } else if err != nil { - // If we get an error other than "file does not exist", we should panic. - panic(err) - } - - if performanceOptions { - opts = lib.PerformanceBadgerOptions(dbDir) - } else { - opts = lib.DefaultBadgerOptions(dbDir) - } - + opts := lib.PerformanceBadgerOptions(dbDir) opts.ValueDir = dbDir node.ChainDB, err = badger.Open(opts) if err != nil { diff --git a/lib/server.go b/lib/server.go index 30bd0551a..4cc329c60 100644 --- a/lib/server.go +++ b/lib/server.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "fmt" "net" - "path/filepath" "reflect" "runtime" "strings" @@ -1628,15 +1627,6 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { } } - // Reset the badger DB options to the performance options. This is done by closing the current DB instance - // and re-opening it with the new options. - // This is necessary because the blocksync process syncs indexes with records that are too large for the default - // badger options. The large records overflow the default setting value log size and cause the DB to crash. - dbDir := GetBadgerDbPath(srv.datadir) - opts := PerformanceBadgerOptions(dbDir) - opts.ValueDir = dbDir - srv.dirtyHackUpdateDbOpts(opts) - // After syncing state from a snapshot, we will sync remaining blocks. To do so, we will // start downloading blocks from the snapshot height up to the blockchain tip. Since we // already synced all the state corresponding to the sub-blockchain ending at the snapshot @@ -1722,72 +1712,6 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { srv.RequestBlocksUpToHeight(pp, int(headerTip.Height)) } -// dirtyHackUpdateDbOpts closes the current badger DB instance and re-opens it with the provided options. -// -// FIXME: This is a dirty hack that we did in order to decrease memory usage. The reason why we needed it is -// as follows: -// - When we run a node with --hypersync or --hypersync-archival, using PerformanceOptions the whole way -// through causes it to use too much memory. -// - The problem is that if we use DefaultOptions, then the block sync after HyperSync is complete will fail -// because it writes really big entries in a single transaction to the PrefixBlockHashToUtxoOperations -// index. -// - So, in order to keep memory usage reasonable, we need to use DefaultOptions during the HyperSync portion -// and then *switch over* to PerformanceOptions once the HyperSync is complete. That is what this function -// is used for. -// - Running a node with --blocksync requires that we use PerformanceOptions the whole way through, but we -// are moving away from syncing nodes that way, so we don't need to worry too much about that case right now. -// -// The long-term solution is to break the writing of the PrefixBlockHashToUtxoOperations index into chunks, -// or to remove it entirely. We don't want to do that work right now, but we want to reduce the memory usage -// for the "common" case, which is why we're doing this dirty hack for now. -func (srv *Server) dirtyHackUpdateDbOpts(opts badger.Options) { - // Make sure that a mempool process doesn't try to access the DB while we're closing and re-opening it. - srv.mempool.mtx.Lock() - defer srv.mempool.mtx.Unlock() - // Make sure that the pos mempool process doesn't try to access the DB while we're closing and re-opening it. - srv.posMempool.Lock() - defer srv.posMempool.Unlock() - srv.posMempool.augmentedReadOnlyLatestBlockViewMutex.Lock() - defer srv.posMempool.augmentedReadOnlyLatestBlockViewMutex.Unlock() - srv.snapshot.SnapshotDbMutex.Lock() - defer srv.snapshot.SnapshotDbMutex.Unlock() - // Make sure that a server process doesn't try to access the DB while we're closing and re-opening it. - srv.DbMutex.Lock() - defer srv.DbMutex.Unlock() - srv.blockchain.db.Close() - db, err := badger.Open(opts) - if err != nil { - // If we can't open the DB with the new options, we need to exit the process. - glog.Fatalf("Server._handleSnapshot: Problem switching badger db to performance opts, error: (%v)", err) - } - srv.blockchain.db = db - srv.snapshot.mainDb = srv.blockchain.db - srv.snapshot.CurrentEpochSnapshotMetadata.mainDb = srv.blockchain.db - srv.snapshot.Status.mainDb = srv.blockchain.db - srv.snapshot.Checksum.mainDb = srv.blockchain.db - srv.snapshot.Migrations.mainDb = srv.blockchain.db - srv.snapshot.OperationChannel.mainDb = srv.blockchain.db - srv.mempool.bc.db = srv.blockchain.db - srv.mempool.backupUniversalUtxoView.Handle = srv.blockchain.db - srv.mempool.universalUtxoView.Handle = srv.blockchain.db - - if srv.posMempool.readOnlyLatestBlockView != nil { - srv.posMempool.readOnlyLatestBlockView.Handle = srv.blockchain.db - } - if srv.posMempool.validateTransactionsReadOnlyLatestBlockView != nil { - srv.posMempool.validateTransactionsReadOnlyLatestBlockView.Handle = srv.blockchain.db - } - if srv.posMempool.augmentedReadOnlyLatestBlockView != nil { - srv.posMempool.augmentedReadOnlyLatestBlockView.Handle = srv.blockchain.db - } - // Save the new options to the DB so that we know what to use if the node restarts. - isPerformanceOptions := DbOptsArePerformance(&opts) - err = SaveBoolToFile(GetDbPerformanceOptionsFilePath(filepath.Dir(opts.ValueDir)), isPerformanceOptions) - if err != nil { - glog.Errorf("Server._handleSnapshot: Problem saving performance options to file, error: (%v)", err) - } -} - func (srv *Server) _startSync() { // Return now if we're already syncing. if srv.SyncPeer != nil { From 0f27eef94d6a2b7108ad471157897be0933f8318 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 3 Jun 2024 14:13:48 -0400 Subject: [PATCH 745/762] Exclude PoW Txns from PoS Mempool (#1346) --- lib/pos_fee_estimator.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index a0c54ff09..37a033656 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -140,7 +140,19 @@ func blockToCachedBlock(block *MsgDeSoBlock) (*CachedBlock, error) { // Add all non-nil and non-block reward txns to the txns slice. This filters out malformed txns // txns and the genesis block block reward txn. for _, txn := range block.Txns { - if txn == nil || txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { + if txn == nil { + continue + } + + // Always skip block reward txns because they are not counted as part of the block size during + // fee estimation. + if txn.TxnMeta.GetTxnType() == TxnTypeBlockReward { + continue + } + + // Catch-all check for any txns with no fees or txns with utxos. Only balance model txns are + // supported in PoS. + if txn.TxnFeeNanos == 0 || len(txn.TxInputs) > 0 { continue } From ff119b9e9cc89b18505ac5962d124cd0345fefc1 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 3 Jun 2024 14:26:32 -0400 Subject: [PATCH 746/762] Use TxnVersion Comparison to Filter Utxo Model Txns From PoS Fee Etimator (#1347) --- lib/pos_fee_estimator.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/pos_fee_estimator.go b/lib/pos_fee_estimator.go index 37a033656..39c198e75 100644 --- a/lib/pos_fee_estimator.go +++ b/lib/pos_fee_estimator.go @@ -150,9 +150,9 @@ func blockToCachedBlock(block *MsgDeSoBlock) (*CachedBlock, error) { continue } - // Catch-all check for any txns with no fees or txns with utxos. Only balance model txns are - // supported in PoS. - if txn.TxnFeeNanos == 0 || len(txn.TxInputs) > 0 { + // Catch-all check for any txns with no fees or utxo model txns. PoS only supports balance + // model txns, which have version 1 and above. + if txn.TxnFeeNanos == 0 || txn.TxnVersion == DeSoTxnVersion0 { continue } From d2b530a66addcb195ff7cf0cd0c2d1e669218a5d Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Tue, 4 Jun 2024 16:24:13 -0400 Subject: [PATCH 747/762] V0 logging level for processing blocks (#1358) --- lib/server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/server.go b/lib/server.go index 4cc329c60..5bca91b4d 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2273,14 +2273,14 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { // If the FastHotStuffConsensus has been initialized, then we pass the block to the new consensus // which will validate the block, try to apply it, and handle the orphan case by requesting missing // parents. - glog.V(1).Infof(CLog(Cyan, fmt.Sprintf( + glog.V(0).Infof(CLog(Cyan, fmt.Sprintf( "Server._handleBlock: Processing block %v with FastHotStuffConsensus with SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp, ))) blockHashesToRequest, err = srv.fastHotStuffConsensus.HandleBlock(pp, blk) isOrphan = len(blockHashesToRequest) > 0 } else if !verifySignatures { - glog.V(1).Infof(CLog(Cyan, fmt.Sprintf( + glog.V(0).Infof(CLog(Cyan, fmt.Sprintf( "Server._handleBlock: Processing block %v WITHOUT signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp, ))) @@ -2289,7 +2289,7 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { // TODO: Signature checking slows things down because it acquires the ChainLock. // The optimal solution is to check signatures in a way that doesn't acquire the // ChainLock, which is what Bitcoin Core does. - glog.V(1).Infof(CLog(Cyan, fmt.Sprintf( + glog.V(0).Infof(CLog(Cyan, fmt.Sprintf( "Server._handleBlock: Processing block %v WITH signature checking because SyncState=%v for peer %v", blk, srv.blockchain.chainState(), pp, ))) From 6a71496d0b439d6f3c5b7c31fa125f05067bcced Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Tue, 4 Jun 2024 17:12:27 -0400 Subject: [PATCH 748/762] Revert "Replace usage of uint256.NewInt(0).SetUint64(xx) with uint256.NewInt(xx) (#1285)" (#1351) This reverts commit c4d48b7552563c7640ab17ffaf9ae14a95811442. --- consensus/event_loop_test.go | 8 +++---- consensus/integration_test.go | 22 +++++++++---------- consensus/utils.go | 10 ++++----- consensus/utils_test.go | 40 +++++++++++++++++------------------ 4 files changed, 40 insertions(+), 40 deletions(-) diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index 9353fa4c9..f025047c5 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -878,11 +878,11 @@ func TestVoteQCConstructionSignal(t *testing.T) { validatorList := []Validator{ &validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(70), + stakeAmount: uint256.NewInt(0).SetUint64(70), }, &validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(30), + stakeAmount: uint256.NewInt(0).SetUint64(30), }, } @@ -1064,11 +1064,11 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { validatorList := []Validator{ &validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(70), + stakeAmount: uint256.NewInt(0).SetUint64(70), }, &validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(30), + stakeAmount: uint256.NewInt(0).SetUint64(30), }, } diff --git a/consensus/integration_test.go b/consensus/integration_test.go index 1e399ec59..2f34a9069 100644 --- a/consensus/integration_test.go +++ b/consensus/integration_test.go @@ -16,10 +16,10 @@ import ( func TestNetworkWithOfflineValidators(t *testing.T) { // Create 4 nodes with equal stake. The network has super-majority of stake online // as long as 3 out of 4 nodes are online. - node1 := newValidatorNode(uint256.NewInt(50), true) // block proposer - node2 := newValidatorNode(uint256.NewInt(50), false) // validator - node3 := newValidatorNode(uint256.NewInt(50), false) // validator - node4 := newValidatorNode(uint256.NewInt(50), false) // validator + node1 := newValidatorNode(uint256.NewInt(0).SetUint64(50), true) // block proposer + node2 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node3 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node4 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator allNodes := []*validatorNode{node1, node2, node3, node4} @@ -104,10 +104,10 @@ func TestNetworkWithOfflineValidators(t *testing.T) { func TestNetworkWithOfflineBlockProposer(t *testing.T) { // Create 4 nodes with equal stake. The network has super-majority of stake online // as long as 3 out of 4 nodes are online. - node1 := newValidatorNode(uint256.NewInt(50), true) // block proposer - node2 := newValidatorNode(uint256.NewInt(50), false) // validator - node3 := newValidatorNode(uint256.NewInt(50), false) // validator - node4 := newValidatorNode(uint256.NewInt(50), false) // validator + node1 := newValidatorNode(uint256.NewInt(0).SetUint64(50), true) // block proposer + node2 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node3 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node4 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator allNodes := []*validatorNode{node1, node2, node3, node4} @@ -181,9 +181,9 @@ func TestNetworkWithOfflineBlockProposer(t *testing.T) { func TestNetworkRecoveryAfterCatastrophicFailure(t *testing.T) { // Create 3 nodes with equal stake. Node 3 has a super-majority of the the stake // and needs to stay online for the network to remain live. - node1 := newValidatorNode(uint256.NewInt(10), true) // block proposer - node2 := newValidatorNode(uint256.NewInt(10), false) // validator - node3 := newValidatorNode(uint256.NewInt(80), false) // validator + node1 := newValidatorNode(uint256.NewInt(0).SetUint64(10), true) // block proposer + node2 := newValidatorNode(uint256.NewInt(0).SetUint64(10), false) // validator + node3 := newValidatorNode(uint256.NewInt(0).SetUint64(80), false) // validator allNodes := []*validatorNode{node1, node2, node3} diff --git a/consensus/utils.go b/consensus/utils.go index ca20c91d9..9e7d1c59f 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -392,11 +392,11 @@ func isSuperMajorityStake(stake *uint256.Int, totalStake *uint256.Int) bool { } // Compute 3Cq - honestStakeComponent := uint256.NewInt(0).Mul(stake, uint256.NewInt(3)) + honestStakeComponent := uint256.NewInt(0).Mul(stake, uint256.NewInt(0).SetUint64(3)) // Compute 2N + 1 - totalStakeComponent := uint256.NewInt(0).Mul(totalStake, uint256.NewInt(2)) - totalStakeComponent = uint256.NewInt(0).Add(totalStakeComponent, uint256.NewInt(1)) + totalStakeComponent := uint256.NewInt(0).Mul(totalStake, uint256.NewInt(0).SetUint64(2)) + totalStakeComponent = uint256.NewInt(0).Add(totalStakeComponent, uint256.NewInt(0).SetUint64(1)) // Check if 3Cq >= 2N + 1 return honestStakeComponent.Cmp(totalStakeComponent) >= 0 @@ -450,11 +450,11 @@ func createValidatorListForPrivateKeys(pk1 *bls.PrivateKey, pk2 *bls.PrivateKey) validators := []*validator{ { publicKey: pk1.PublicKey(), - stakeAmount: uint256.NewInt(100), + stakeAmount: uint256.NewInt(0).SetUint64(100), }, { publicKey: pk2.PublicKey(), - stakeAmount: uint256.NewInt(50), + stakeAmount: uint256.NewInt(0).SetUint64(50), }, } // Cast the slice of concrete structs []*validators to a slice of interfaces []Validator diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 1b0429c80..0185308f5 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -178,17 +178,17 @@ func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { validator1 := validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(3), + stakeAmount: uint256.NewInt(0).SetUint64(3), } validator2 := validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(2), + stakeAmount: uint256.NewInt(0).SetUint64(2), } validator3 := validator{ publicKey: validatorPrivateKey3.PublicKey(), - stakeAmount: uint256.NewInt(1), + stakeAmount: uint256.NewInt(0).SetUint64(1), } validators := []Validator{&validator1, &validator2, &validator3} @@ -261,17 +261,17 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { validator1 := validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(3), + stakeAmount: uint256.NewInt(0).SetUint64(3), } validator2 := validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(1), + stakeAmount: uint256.NewInt(0).SetUint64(1), } validator3 := validator{ publicKey: validatorPrivateKey3.PublicKey(), - stakeAmount: uint256.NewInt(1), + stakeAmount: uint256.NewInt(0).SetUint64(1), } validators := []Validator{&validator1, &validator2, &validator3} @@ -508,7 +508,7 @@ func TestIsProperlyFormedValidatorSet(t *testing.T) { // Test nil public key { - validator := validator{publicKey: nil, stakeAmount: uint256.NewInt(1)} + validator := validator{publicKey: nil, stakeAmount: uint256.NewInt(0).SetUint64(1)} require.False(t, isProperlyFormedValidatorSet([]Validator{&validator})) } @@ -526,7 +526,7 @@ func TestIsProperlyFormedValidatorSet(t *testing.T) { // Test valid validator { - validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt(1)} + validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt(0).SetUint64(1)} require.True(t, isProperlyFormedValidatorSet([]Validator{&validator})) } } @@ -632,47 +632,47 @@ func TestIsSuperMajorityStake(t *testing.T) { // Test stake amount greater than total stake { - require.False(t, isSuperMajorityStake(uint256.NewInt(2), uint256.NewInt(1))) + require.False(t, isSuperMajorityStake(uint256.NewInt(0).SetUint64(2), uint256.NewInt(0).SetUint64(1))) } // Test stake amount much less than super majority { - stake := uint256.NewInt(1) - totalStake := uint256.NewInt(1000) + stake := uint256.NewInt(0).SetUint64(1) + totalStake := uint256.NewInt(0).SetUint64(1000) require.False(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount less than super majority { - stake := uint256.NewInt(666) - totalStake := uint256.NewInt(1000) + stake := uint256.NewInt(0).SetUint64(666) + totalStake := uint256.NewInt(0).SetUint64(1000) require.False(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount equal to super majority { - stake := uint256.NewInt(667) - totalStake := uint256.NewInt(1000) + stake := uint256.NewInt(0).SetUint64(667) + totalStake := uint256.NewInt(0).SetUint64(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount greater than super majority { - stake := uint256.NewInt(668) - totalStake := uint256.NewInt(1000) + stake := uint256.NewInt(0).SetUint64(668) + totalStake := uint256.NewInt(0).SetUint64(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount much greater than super majority { - stake := uint256.NewInt(999) - totalStake := uint256.NewInt(1000) + stake := uint256.NewInt(0).SetUint64(999) + totalStake := uint256.NewInt(0).SetUint64(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount equal to total stake { - totalStake := uint256.NewInt(1000) + totalStake := uint256.NewInt(0).SetUint64(1000) require.True(t, isSuperMajorityStake(totalStake, totalStake)) } } From 5a8c92ff724e466bd83384dc1e86a0c4c6d481f3 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 5 Jun 2024 17:24:01 -0400 Subject: [PATCH 749/762] Revert "Upgrade all deps (#1163)" (#1352) This reverts commit 528f5092a6557bbbccd6a23247cfd5ea79675158. --- cmd/node.go | 6 +- consensus/event_loop.go | 16 +- consensus/event_loop_test.go | 8 +- consensus/integration_test.go | 22 +- consensus/utils.go | 14 +- consensus/utils_test.go | 44 +- go.mod | 189 ++--- go.sum | 801 ++++++++++++-------- integration_testing/connection_bridge.go | 2 +- lib/base58.go | 2 +- lib/bitcoin_burner.go | 4 +- lib/block_producer.go | 21 +- lib/block_view.go | 20 +- lib/block_view_access_group_members_test.go | 28 +- lib/block_view_access_group_test.go | 10 +- lib/block_view_association_test.go | 4 +- lib/block_view_atomic_txns.go | 7 +- lib/block_view_atomic_txns_test.go | 5 +- lib/block_view_balance_entry.go | 22 +- lib/block_view_bitcoin.go | 4 +- lib/block_view_bitcoin_test.go | 4 +- lib/block_view_creator_coin.go | 26 +- lib/block_view_dao_coin.go | 38 +- lib/block_view_dao_coin_limit_order.go | 28 +- lib/block_view_dao_coin_limit_order_test.go | 406 +++++----- lib/block_view_dao_coin_test.go | 115 +-- lib/block_view_derived_key.go | 8 +- lib/block_view_derived_key_test.go | 97 +-- lib/block_view_flush.go | 2 +- lib/block_view_follow.go | 4 +- lib/block_view_like.go | 2 +- lib/block_view_lockups.go | 54 +- lib/block_view_lockups_test.go | 431 +++++------ lib/block_view_message.go | 4 +- lib/block_view_message_test.go | 24 +- lib/block_view_new_message_test.go | 22 +- lib/block_view_nft.go | 6 +- lib/block_view_post.go | 2 +- lib/block_view_profile.go | 27 +- lib/block_view_stake.go | 26 +- lib/block_view_stake_test.go | 186 ++--- lib/block_view_test.go | 59 +- lib/block_view_types.go | 10 +- lib/block_view_types_test.go | 4 +- lib/block_view_validator.go | 6 +- lib/block_view_validator_test.go | 108 +-- lib/blockchain.go | 6 +- lib/blockchain_test.go | 10 +- lib/connection_manager.go | 38 +- lib/constants.go | 3 - lib/db_utils.go | 8 +- lib/db_utils_test.go | 14 +- lib/deso_math.go | 14 +- lib/deso_math_test.go | 24 +- lib/errors.go | 4 +- lib/legacy_mempool.go | 7 +- lib/load_test.go | 4 +- lib/miner.go | 4 +- lib/network.go | 121 +-- lib/network_connection.go | 10 +- lib/network_connection_test.go | 6 +- lib/network_manager.go | 20 +- lib/network_test.go | 68 +- lib/peer.go | 8 +- lib/pos_block_producer.go | 4 +- lib/pos_blockchain_test.go | 14 +- lib/pos_constants.go | 2 +- lib/pos_epoch_complete_hook_test.go | 66 +- lib/pos_leader_schedule.go | 6 +- lib/pos_leader_schedule_test.go | 2 +- lib/pos_random_seed.go | 2 +- lib/pos_random_seed_test.go | 4 +- lib/pos_server_regtest.go | 2 +- lib/pos_staking_rewards.go | 4 +- lib/pos_transaction_register_test.go | 8 +- lib/postgres.go | 14 +- lib/remote_node.go | 10 +- lib/server.go | 13 +- lib/types.go | 4 +- lib/utils.go | 14 +- 80 files changed, 1736 insertions(+), 1730 deletions(-) diff --git a/cmd/node.go b/cmd/node.go index e3a085ef2..ee3dfbc50 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -515,20 +515,20 @@ func addIPsForHost(desoAddrMgr *addrmgr.AddrManager, host string, params *lib.De glog.V(1).Infof("_addSeedAddrs: Adding seed IPs from seed %s: %v\n", host, ipAddrs) // Convert addresses to NetAddress'es. - netAddrs, err := lib.SafeMakeSliceWithLength[*wire.NetAddressV2](uint64(len(ipAddrs))) + netAddrs, err := lib.SafeMakeSliceWithLength[*wire.NetAddress](uint64(len(ipAddrs))) if err != nil { glog.V(2).Infof("_addSeedAddrs: Problem creating netAddrs slice with length %d", len(ipAddrs)) return } for ii, ip := range ipAddrs { - netAddrs[ii] = wire.NetAddressV2FromBytes( + netAddrs[ii] = wire.NewNetAddressTimestamp( // We initialize addresses with a // randomly selected "last seen time" between 3 // and 7 days ago similar to what bitcoind does. time.Now().Add(-1*time.Second*time.Duration(lib.SecondsIn3Days+ lib.RandInt32(lib.SecondsIn4Days))), 0, - ip[:], + ip, params.DefaultSocketPort) } glog.V(1).Infof("_addSeedAddrs: Computed the following wire.NetAddress'es: %s", spew.Sdump(netAddrs)) diff --git a/consensus/event_loop.go b/consensus/event_loop.go index 686f579fc..22db4df92 100644 --- a/consensus/event_loop.go +++ b/consensus/event_loop.go @@ -687,8 +687,8 @@ func (fe *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff votesByValidator := fe.votesSeenByBlockHash[voteSignaturePayload] // Compute the total stake and total stake with votes - totalStake := uint256.NewInt(0) - totalVotingStake := uint256.NewInt(0) + totalStake := uint256.NewInt() + totalVotingStake := uint256.NewInt() // Track the signatures and signers list for the chain tip signersList := bitset.NewBitset() @@ -697,7 +697,7 @@ func (fe *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff // Iterate through the entire validator list and check if each one has voted for the tip block. Track // all voters and their stakes. for ii, validator := range validatorList { - totalStake = uint256.NewInt(0).Add(totalStake, validator.GetStakeAmount()) + totalStake = uint256.NewInt().Add(totalStake, validator.GetStakeAmount()) // Skip the validator if it hasn't voted for the block vote, hasVoted := votesByValidator[validator.GetPublicKey().ToString()] @@ -706,7 +706,7 @@ func (fe *fastHotStuffEventLoop) tryConstructVoteQCInCurrentView() *FastHotStuff } // Track the vote's signature, stake, and place in the validator list - totalVotingStake = uint256.NewInt(0).Add(totalVotingStake, validator.GetStakeAmount()) + totalVotingStake = uint256.NewInt().Add(totalVotingStake, validator.GetStakeAmount()) signersList.Set(ii, true) signatures = append(signatures, vote.GetSignature()) } @@ -796,8 +796,8 @@ func (fe *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt } // Compute the total stake and total stake with timeouts - totalStake := uint256.NewInt(0) - totalTimedOutStake := uint256.NewInt(0) + totalStake := uint256.NewInt() + totalTimedOutStake := uint256.NewInt() // Track the high QC view for each validator highQCViews := make([]uint64, len(validatorList)) @@ -812,7 +812,7 @@ func (fe *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt // for the signersList bitset. In practice, the validator list is expected to be <= 1000 in size, so // this loop will be fast. for ii, validator := range validatorList { - totalStake = uint256.NewInt(0).Add(totalStake, validator.GetStakeAmount()) + totalStake = uint256.NewInt().Add(totalStake, validator.GetStakeAmount()) // Skip the validator if it hasn't timed out for the previous view timeout, hasTimedOut := timeoutsByValidator[validator.GetPublicKey().ToString()] @@ -821,7 +821,7 @@ func (fe *fastHotStuffEventLoop) tryConstructTimeoutQCInCurrentView() *FastHotSt } // Track the signatures, timed out stake, and high QC views for the validator - totalTimedOutStake = uint256.NewInt(0).Add(totalTimedOutStake, validator.GetStakeAmount()) + totalTimedOutStake = uint256.NewInt().Add(totalTimedOutStake, validator.GetStakeAmount()) signersList.Set(ii, true) signatures = append(signatures, timeout.GetSignature()) highQCViews[ii] = timeout.GetHighQC().GetView() diff --git a/consensus/event_loop_test.go b/consensus/event_loop_test.go index f025047c5..2e5ec5adc 100644 --- a/consensus/event_loop_test.go +++ b/consensus/event_loop_test.go @@ -878,11 +878,11 @@ func TestVoteQCConstructionSignal(t *testing.T) { validatorList := []Validator{ &validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(70), + stakeAmount: uint256.NewInt().SetUint64(70), }, &validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(30), + stakeAmount: uint256.NewInt().SetUint64(30), }, } @@ -1064,11 +1064,11 @@ func TestTimeoutQCConstructionSignal(t *testing.T) { validatorList := []Validator{ &validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(70), + stakeAmount: uint256.NewInt().SetUint64(70), }, &validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(30), + stakeAmount: uint256.NewInt().SetUint64(30), }, } diff --git a/consensus/integration_test.go b/consensus/integration_test.go index 2f34a9069..d7d4e941b 100644 --- a/consensus/integration_test.go +++ b/consensus/integration_test.go @@ -16,10 +16,10 @@ import ( func TestNetworkWithOfflineValidators(t *testing.T) { // Create 4 nodes with equal stake. The network has super-majority of stake online // as long as 3 out of 4 nodes are online. - node1 := newValidatorNode(uint256.NewInt(0).SetUint64(50), true) // block proposer - node2 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator - node3 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator - node4 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node1 := newValidatorNode(uint256.NewInt().SetUint64(50), true) // block proposer + node2 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + node3 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + node4 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator allNodes := []*validatorNode{node1, node2, node3, node4} @@ -104,10 +104,10 @@ func TestNetworkWithOfflineValidators(t *testing.T) { func TestNetworkWithOfflineBlockProposer(t *testing.T) { // Create 4 nodes with equal stake. The network has super-majority of stake online // as long as 3 out of 4 nodes are online. - node1 := newValidatorNode(uint256.NewInt(0).SetUint64(50), true) // block proposer - node2 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator - node3 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator - node4 := newValidatorNode(uint256.NewInt(0).SetUint64(50), false) // validator + node1 := newValidatorNode(uint256.NewInt().SetUint64(50), true) // block proposer + node2 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + node3 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator + node4 := newValidatorNode(uint256.NewInt().SetUint64(50), false) // validator allNodes := []*validatorNode{node1, node2, node3, node4} @@ -181,9 +181,9 @@ func TestNetworkWithOfflineBlockProposer(t *testing.T) { func TestNetworkRecoveryAfterCatastrophicFailure(t *testing.T) { // Create 3 nodes with equal stake. Node 3 has a super-majority of the the stake // and needs to stay online for the network to remain live. - node1 := newValidatorNode(uint256.NewInt(0).SetUint64(10), true) // block proposer - node2 := newValidatorNode(uint256.NewInt(0).SetUint64(10), false) // validator - node3 := newValidatorNode(uint256.NewInt(0).SetUint64(80), false) // validator + node1 := newValidatorNode(uint256.NewInt().SetUint64(10), true) // block proposer + node2 := newValidatorNode(uint256.NewInt().SetUint64(10), false) // validator + node3 := newValidatorNode(uint256.NewInt().SetUint64(80), false) // validator allNodes := []*validatorNode{node1, node2, node3} diff --git a/consensus/utils.go b/consensus/utils.go index 9e7d1c59f..bd17ef325 100644 --- a/consensus/utils.go +++ b/consensus/utils.go @@ -132,8 +132,8 @@ func IsValidSuperMajorityAggregateQuorumCertificate(aggQC AggregateQuorumCertifi func isSuperMajorityStakeSignersList(signersList *bitset.Bitset, validators []Validator) (bool, []*bls.PublicKey) { // Compute the total stake in the QC and the total stake in the network - stakeInQC := uint256.NewInt(0) - totalStake := uint256.NewInt(0) + stakeInQC := uint256.NewInt() + totalStake := uint256.NewInt() // Fetch the validators in the QC validatorPublicKeysInQC := []*bls.PublicKey{} @@ -392,11 +392,11 @@ func isSuperMajorityStake(stake *uint256.Int, totalStake *uint256.Int) bool { } // Compute 3Cq - honestStakeComponent := uint256.NewInt(0).Mul(stake, uint256.NewInt(0).SetUint64(3)) + honestStakeComponent := uint256.NewInt().Mul(stake, uint256.NewInt().SetUint64(3)) // Compute 2N + 1 - totalStakeComponent := uint256.NewInt(0).Mul(totalStake, uint256.NewInt(0).SetUint64(2)) - totalStakeComponent = uint256.NewInt(0).Add(totalStakeComponent, uint256.NewInt(0).SetUint64(1)) + totalStakeComponent := uint256.NewInt().Mul(totalStake, uint256.NewInt().SetUint64(2)) + totalStakeComponent = uint256.NewInt().Add(totalStakeComponent, uint256.NewInt().SetUint64(1)) // Check if 3Cq >= 2N + 1 return honestStakeComponent.Cmp(totalStakeComponent) >= 0 @@ -450,11 +450,11 @@ func createValidatorListForPrivateKeys(pk1 *bls.PrivateKey, pk2 *bls.PrivateKey) validators := []*validator{ { publicKey: pk1.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(100), + stakeAmount: uint256.NewInt().SetUint64(100), }, { publicKey: pk2.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(50), + stakeAmount: uint256.NewInt().SetUint64(50), }, } // Cast the slice of concrete structs []*validators to a slice of interfaces []Validator diff --git a/consensus/utils_test.go b/consensus/utils_test.go index 0185308f5..917650039 100644 --- a/consensus/utils_test.go +++ b/consensus/utils_test.go @@ -178,17 +178,17 @@ func TestIsValidSuperMajorityQuorumCertificate(t *testing.T) { validator1 := validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(3), + stakeAmount: uint256.NewInt().SetUint64(3), } validator2 := validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(2), + stakeAmount: uint256.NewInt().SetUint64(2), } validator3 := validator{ publicKey: validatorPrivateKey3.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(1), + stakeAmount: uint256.NewInt().SetUint64(1), } validators := []Validator{&validator1, &validator2, &validator3} @@ -261,17 +261,17 @@ func TestIsValidSuperMajorityAggregateQuorumCertificate(t *testing.T) { validator1 := validator{ publicKey: validatorPrivateKey1.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(3), + stakeAmount: uint256.NewInt().SetUint64(3), } validator2 := validator{ publicKey: validatorPrivateKey2.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(1), + stakeAmount: uint256.NewInt().SetUint64(1), } validator3 := validator{ publicKey: validatorPrivateKey3.PublicKey(), - stakeAmount: uint256.NewInt(0).SetUint64(1), + stakeAmount: uint256.NewInt().SetUint64(1), } validators := []Validator{&validator1, &validator2, &validator3} @@ -508,7 +508,7 @@ func TestIsProperlyFormedValidatorSet(t *testing.T) { // Test nil public key { - validator := validator{publicKey: nil, stakeAmount: uint256.NewInt(0).SetUint64(1)} + validator := validator{publicKey: nil, stakeAmount: uint256.NewInt().SetUint64(1)} require.False(t, isProperlyFormedValidatorSet([]Validator{&validator})) } @@ -520,13 +520,13 @@ func TestIsProperlyFormedValidatorSet(t *testing.T) { // Test zero stake amount { - validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt(0)} + validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt()} require.False(t, isProperlyFormedValidatorSet([]Validator{&validator})) } // Test valid validator { - validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt(0).SetUint64(1)} + validator := validator{publicKey: createDummyBLSPublicKey(), stakeAmount: uint256.NewInt().SetUint64(1)} require.True(t, isProperlyFormedValidatorSet([]Validator{&validator})) } } @@ -627,52 +627,52 @@ func TestIsSuperMajorityStake(t *testing.T) { // Test zero values { - require.False(t, isSuperMajorityStake(uint256.NewInt(0), uint256.NewInt(0))) + require.False(t, isSuperMajorityStake(uint256.NewInt(), uint256.NewInt())) } // Test stake amount greater than total stake { - require.False(t, isSuperMajorityStake(uint256.NewInt(0).SetUint64(2), uint256.NewInt(0).SetUint64(1))) + require.False(t, isSuperMajorityStake(uint256.NewInt().SetUint64(2), uint256.NewInt().SetUint64(1))) } // Test stake amount much less than super majority { - stake := uint256.NewInt(0).SetUint64(1) - totalStake := uint256.NewInt(0).SetUint64(1000) + stake := uint256.NewInt().SetUint64(1) + totalStake := uint256.NewInt().SetUint64(1000) require.False(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount less than super majority { - stake := uint256.NewInt(0).SetUint64(666) - totalStake := uint256.NewInt(0).SetUint64(1000) + stake := uint256.NewInt().SetUint64(666) + totalStake := uint256.NewInt().SetUint64(1000) require.False(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount equal to super majority { - stake := uint256.NewInt(0).SetUint64(667) - totalStake := uint256.NewInt(0).SetUint64(1000) + stake := uint256.NewInt().SetUint64(667) + totalStake := uint256.NewInt().SetUint64(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount greater than super majority { - stake := uint256.NewInt(0).SetUint64(668) - totalStake := uint256.NewInt(0).SetUint64(1000) + stake := uint256.NewInt().SetUint64(668) + totalStake := uint256.NewInt().SetUint64(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount much greater than super majority { - stake := uint256.NewInt(0).SetUint64(999) - totalStake := uint256.NewInt(0).SetUint64(1000) + stake := uint256.NewInt().SetUint64(999) + totalStake := uint256.NewInt().SetUint64(1000) require.True(t, isSuperMajorityStake(stake, totalStake)) } // Test stake amount equal to total stake { - totalStake := uint256.NewInt(0).SetUint64(1000) + totalStake := uint256.NewInt().SetUint64(1000) require.True(t, isSuperMajorityStake(totalStake, totalStake)) } } diff --git a/go.mod b/go.mod index 476df804d..d61858fed 100644 --- a/go.mod +++ b/go.mod @@ -3,151 +3,114 @@ module github.com/deso-protocol/core go 1.22 require ( - github.com/DataDog/datadog-go v4.8.3+incompatible + github.com/DataDog/datadog-go v4.5.0+incompatible github.com/brianvoe/gofakeit v3.18.0+incompatible - github.com/btcsuite/btcd v0.24.0 - github.com/btcsuite/btcd/btcec/v2 v2.3.3 - github.com/btcsuite/btcd/btcutil v1.1.5 - github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 + github.com/btcsuite/btcd v0.21.0-beta + github.com/btcsuite/btcd/btcec/v2 v2.2.1 + github.com/btcsuite/btcutil v1.0.2 github.com/bxcodec/faker v2.0.1+incompatible - github.com/cloudflare/circl v1.3.7 - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 - github.com/decred/dcrd/lru v1.1.2 + github.com/cloudflare/circl v1.1.0 + github.com/davecgh/go-spew v1.1.1 + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 + github.com/decred/dcrd/lru v1.1.1 github.com/deso-protocol/go-deadlock v1.0.0 github.com/deso-protocol/go-merkle-tree v1.0.0 github.com/dgraph-io/badger/v4 v4.2.0 github.com/emirpasic/gods v1.18.1 - github.com/ethereum/go-ethereum v1.13.5 - github.com/fatih/color v1.16.0 + github.com/ethereum/go-ethereum v1.9.25 + github.com/fatih/color v1.13.0 github.com/gernest/mention v2.0.0+incompatible - github.com/go-pg/pg/v10 v10.12.0 - github.com/golang/glog v1.2.1 - github.com/google/uuid v1.6.0 - github.com/holiman/uint256 v1.2.3 + github.com/go-pg/pg/v10 v10.10.0 + github.com/golang/glog v1.0.0 + github.com/google/uuid v1.2.0 + github.com/holiman/uint256 v1.1.1 github.com/mitchellh/go-homedir v1.1.0 github.com/oleiade/lane v1.0.1 - github.com/onflow/crypto v0.25.1 + github.com/onflow/crypto v0.25.0 github.com/pkg/errors v0.9.1 - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 + github.com/pmezard/go-difflib v1.0.0 github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0 github.com/shibukawa/configdir v0.0.0-20170330084843-e180dbdc8da0 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.1.3 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.9.0 - github.com/tyler-smith/go-bip39 v1.1.0 - github.com/unrolled/secure v1.14.0 - golang.org/x/crypto v0.22.0 - golang.org/x/sync v0.7.0 - gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 + github.com/spf13/viper v1.7.1 + github.com/stretchr/testify v1.8.0 + github.com/tyler-smith/go-bip39 v1.0.2 + github.com/unrolled/secure v1.0.8 + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 + gopkg.in/DataDog/dd-trace-go.v1 v1.29.0 ) require ( - github.com/fergusstrange/embedded-postgres v1.26.0 - github.com/lib/pq v1.10.9 // indirect + github.com/fergusstrange/embedded-postgres v1.19.0 + github.com/lib/pq v1.10.4 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect ) require ( - github.com/AlecAivazis/survey/v2 v2.3.7 // indirect - github.com/DataDog/appsec-internal-go v1.5.0 // indirect - github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 // indirect - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 // indirect - github.com/DataDog/datadog-go/v5 v5.5.0 // indirect - github.com/DataDog/go-libddwaf/v2 v2.4.2 // indirect - github.com/DataDog/go-sqllexer v0.0.11 // indirect - github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect - github.com/DataDog/gostackparse v0.7.0 // indirect - github.com/DataDog/sketches-go v1.4.4 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/andygrunwald/go-jira v1.16.0 // indirect + github.com/Microsoft/go-winio v0.4.16 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect - github.com/bwesterb/go-ristretto v1.2.3 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/coreos/go-semver v0.3.1 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect - github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect + github.com/bwesterb/go-ristretto v1.2.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/ebitengine/purego v0.7.1 // indirect - github.com/fatih/structs v1.1.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/git-chglog/git-chglog v0.15.4 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373 // indirect github.com/go-pg/zerochecker v0.2.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/flatbuffers v24.3.25+incompatible // indirect - github.com/google/go-querystring v1.1.0 // indirect - github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/hcl v1.0.1-vault-5 // indirect - github.com/huandu/xstrings v1.4.0 // indirect - github.com/imdario/mergo v0.3.16 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.3 // indirect + github.com/google/flatbuffers v2.0.0+incompatible // indirect + github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/imdario/mergo v0.3.8 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.17.8 // indirect - github.com/kyokomi/emoji/v2 v2.2.12 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/goveralls v0.0.12 // indirect - github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/nxadm/tail v1.4.11 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/onsi/gomega v1.32.0 // indirect - github.com/outcaste-io/ristretto v0.2.3 // indirect - github.com/pelletier/go-toml/v2 v2.2.0 // indirect - github.com/petermattis/goid v0.0.0-20240327183114-c42a807a84ba // indirect - github.com/philhofer/fwd v1.1.2 // indirect - github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/tinylib/msgp v1.1.9 // indirect + github.com/klauspost/compress v1.12.3 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mattn/go-colorable v0.1.9 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/goveralls v0.0.6 // indirect + github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/onsi/ginkgo v1.15.0 // indirect + github.com/onsi/gomega v1.10.5 // indirect + github.com/pelletier/go-toml v1.7.0 // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/philhofer/fwd v1.1.1 // indirect + github.com/russross/blackfriday/v2 v2.0.1 // indirect + github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/cast v1.3.0 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/tinylib/msgp v1.1.2 // indirect github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect - github.com/trivago/tgo v1.0.7 // indirect github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df // indirect - github.com/urfave/cli/v2 v2.27.1 // indirect + github.com/urfave/cli v1.22.1 // indirect github.com/vmihailenco/bufpool v0.1.11 // indirect - github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect + github.com/vmihailenco/msgpack/v5 v5.3.1 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect - go.opencensus.io v0.24.0 // indirect - go.uber.org/atomic v1.11.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.20.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect + go.opencensus.io v0.23.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect + golang.org/x/tools v0.1.12 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + gonum.org/v1/gonum v0.6.1 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/AlecAivazis/survey.v1 v1.8.7 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/ini.v1 v1.51.0 // indirect + gopkg.in/kyokomi/emoji.v1 v1.5.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - mellium.im/sasl v0.3.1 // indirect + mellium.im/sasl v0.2.1 // indirect ) diff --git a/go.sum b/go.sum index bad071c84..acd6529a2 100644 --- a/go.sum +++ b/go.sum @@ -1,106 +1,117 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= -github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/appsec-internal-go v1.5.0 h1:8kS5zSx5T49uZ8dZTdT19QVAvC/B8ByyZdhQKYQWHno= -github.com/DataDog/appsec-internal-go v1.5.0/go.mod h1:pEp8gjfNLtEOmz+iZqC8bXhu0h4k7NUsW/qiQb34k1U= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 h1:/oxF4p/4XUGNpNw2TE7vDu/pJV3elEAZ+jES0/MWtiI= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1/go.mod h1:AVPQWekk3h9AOC7+plBlNB68Sy6UIGFoMMVUDeSoNoI= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 h1:mmkGuCHBFuDBpuwNMcqtY1x1I2fCaPH2Br4xPAAjbkM= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1/go.mod h1:JhAilx32dkIgoDkFXquCTfaWDsAOfe+vfBaxbiZoPI0= -github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= -github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= -github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-libddwaf/v2 v2.4.2 h1:ilquGKUmN9/Ty0sIxiEyznVRxP3hKfmH15Y1SMq5gjA= -github.com/DataDog/go-libddwaf/v2 v2.4.2/go.mod h1:gsCdoijYQfj8ce/T2bEDNPZFIYnmHluAgVDpuQOWMZE= -github.com/DataDog/go-sqllexer v0.0.11 h1:OfPBjmayreblOXreszbrOTICNZ3qWrA6Bg4sypvxpbw= -github.com/DataDog/go-sqllexer v0.0.11/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= -github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= -github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= -github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4= -github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= -github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8= -github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v4.4.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v4.5.0+incompatible h1:MyyuIz5LVAI3Im+0F/tfo64ETyH4sNVynZ29yOiHm50= +github.com/DataDog/datadog-go v4.5.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= -github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= -github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/andygrunwald/go-jira v1.16.0 h1:PU7C7Fkk5L96JvPc6vDVIrd99vdPnYudHu4ju2c2ikQ= -github.com/andygrunwald/go-jira v1.16.0/go.mod h1:UQH4IBVxIYWbgagc0LF/k9FRs9xjIiQ8hIcC6HfLwFU= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/brianvoe/gofakeit v3.18.0+incompatible h1:wDOmHc9DLG4nRjUVVaxA+CEglKOW72Y5+4WNxUIkjM8= github.com/brianvoe/gofakeit v3.18.0+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc= +github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= -github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= -github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo= -github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4= -github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= -github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcec/v2 v2.3.3 h1:6+iXlDKE8RMtKsvK0gshlXIuPbyWM/h84Ensb7o3sC0= -github.com/btcsuite/btcd/btcec/v2 v2.3.3/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= -github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= -github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= -github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= -github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= -github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= +github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0 h1:Tvd0BfvqX9o823q1j2UZ/epQo09eJh6dTcRp79ilIN4= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0 h1:ZxaA6lo2EpxGddsA8JwWOcxlzRybb444sgmeJQMJGQE= github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bwesterb/go-ristretto v1.2.3 h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bwesterb/go-ristretto v1.2.0 h1:xxWOVbN5m8NNKiSDZXE1jtZvZnC6JSJ9cYFADiZcWtw= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= +github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= +github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= -github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= -github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/decred/dcrd/lru v1.1.2 h1:KdCzlkxppuoIDGEvCGah1fZRicrDH36IipvlB1ROkFY= -github.com/decred/dcrd/lru v1.1.2/go.mod h1:gEdCVgXs1/YoBvFWt7Scgknbhwik3FgVSzlnCcXL2N8= +github.com/decred/dcrd/lru v1.1.1 h1:kWFDaW0OWx6AD6Ki342c+JPmHbiVdE6rK81pT3fuo/Y= +github.com/decred/dcrd/lru v1.1.1/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/deso-protocol/go-deadlock v1.0.0 h1:mw0pHy/19zgC+JFBStuQt1+1Ehv5OKA5NxXqecnL5ic= github.com/deso-protocol/go-deadlock v1.0.0/go.mod h1:K0Wd2OV2x7ck7SMYDraWerpKjFKUeBqaFcwz21tmkb8= github.com/deso-protocol/go-merkle-tree v1.0.0 h1:9zkI5dQsITYy77s4kbTGPQmZnhQ+LsH/kRdL5l/Yzvg= @@ -109,59 +120,69 @@ github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8Bzu github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/ebitengine/purego v0.7.1 h1:6/55d26lG3o9VCZX8lping+bZcmShseiqlh2bnUDiPA= -github.com/ebitengine/purego v0.7.1/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ= +github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk= -github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0= +github.com/ethereum/go-ethereum v1.9.25 h1:mMiw/zOOtCLdGLWfcekua0qPrJTe7FVIiHJ4IKNTfR0= +github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM= +github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fergusstrange/embedded-postgres v1.26.0 h1:mTgUBNST+6zro0TkIb9Fuo9Qg8mSU0ILus9jZKmFmJg= -github.com/fergusstrange/embedded-postgres v1.26.0/go.mod h1:t/MLs0h9ukYM6FSt99R7InCHs1nW0ordoVCcnzmpTYw= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fergusstrange/embedded-postgres v1.19.0 h1:NqDufJHeA03U7biULlPHZ0pZ10/mDOMKPILEpT50Fyk= +github.com/fergusstrange/embedded-postgres v1.19.0/go.mod h1:0B+3bPsMvcNgR9nN+bdM2x9YaNYDnf3ksUqYp1OAub0= +github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gernest/mention v2.0.0+incompatible h1:pTXnujBC6tqlw5awDkLojq92TXbt0F+4+8FBlQC+di8= github.com/gernest/mention v2.0.0+incompatible/go.mod h1:/z3Hb+4gaPF+vL8og/lj6Au5j8hh5EfU7/EknmDUuO4= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373 h1:MHrlpWOOFhCfY1L9iCIUy5cv5HgDtempICenzJt+7ws= github.com/git-chglog/git-chglog v0.0.0-20200414013904-db796966b373/go.mod h1:Dcsy1kii/xFyNad5JqY/d0GO5mu91sungp5xotbm3Yk= -github.com/git-chglog/git-chglog v0.15.4 h1:BwPDj7AghQTfpXO+UxG4mZM5MUTe9wfDuenF3jpyNf0= -github.com/git-chglog/git-chglog v0.15.4/go.mod h1:BmWdTpqBVzPjKNrBTZGcQCrQV9zq6gFKurhWNnJbYDA= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-pg/pg/v10 v10.5.0/go.mod h1:BfgPoQnD2wXNd986RYEHzikqv9iE875PrFaZ9vXvtNM= -github.com/go-pg/pg/v10 v10.12.0 h1:rBmfDDHTN7FQW0OemYmcn5UuBy6wkYWgh/Oqt1OBEB8= -github.com/go-pg/pg/v10 v10.12.0/go.mod h1:USA08CdIasAn0F6wC1nBf5nQhMHewVQodWoH89RPXaI= +github.com/go-pg/pg/v10 v10.10.0 h1:xc5zWYQ/55XI8pk5NkK+ixXqbJh1vnOun3VODPmbYfY= +github.com/go-pg/pg/v10 v10.10.0/go.mod h1:EmoJGYErc+stNN/1Jf+o4csXuprjxcRztBnn6cHe38E= github.com/go-pg/zerochecker v0.2.0 h1:pp7f72c3DobMWOb2ErtZsnrPaSvHd2W4o9//8HtF4mU= github.com/go-pg/zerochecker v0.2.0/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -175,13 +196,16 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= -github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -189,179 +213,242 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo= -github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20210125172800-10e9aeb4a998/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= -github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ= github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= -github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= -github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= -github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= -github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= +github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw= +github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kyokomi/emoji/v2 v2.2.12 h1:sSVA5nH9ebR3Zji1o31wu3yOwD1zKXQA2z0zUyeit60= -github.com/kyokomi/emoji/v2 v2.2.12/go.mod h1:JUcn42DTdsXJo1SWanHh4HKDEyPaR5CqkmoirZZP9qE= -github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= -github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/goveralls v0.0.6 h1:cr8Y0VMo/MnEZBjxNN/vh6G90SZ7IMb6lms1dzMoO+Y= github.com/mattn/goveralls v0.0.6/go.mod h1:h8b4ow6FxSPMQHF6o2ve3qsclnffZjYTNEKmLesRwqw= -github.com/mattn/goveralls v0.0.12 h1:PEEeF0k1SsTjOBQ8FOmrOAoCu4ytuMaWCnWe94zxbCg= -github.com/mattn/goveralls v0.0.12/go.mod h1:44ImGEUfmqH8bBtaMrYKsM65LXfNLWmwaxFGjZwgMSQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= -github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oleiade/lane v1.0.1 h1:hXofkn7GEOubzTwNpeL9MaNy8WxolCYb9cInAIeqShU= github.com/oleiade/lane v1.0.1/go.mod h1:IyTkraa4maLfjq/GmHR+Dxb4kCMtEGeb+qmhlrQ5Mk4= -github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= -github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onflow/crypto v0.25.0 h1:BeWbLsh3ZD13Ej+Uky6kg1PL1ZIVBDVX+2MVBNwqddg= +github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= +github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= -github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= -github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo= -github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/petermattis/goid v0.0.0-20240327183114-c42a807a84ba h1:3jPgmsFGBID1wFfU2AbYocNcN4wqU68UaHSdMjiw/7U= -github.com/petermattis/goid v0.0.0-20240327183114-c42a807a84ba/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= -github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= -github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= -github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0 h1:0/H63lDsoNYVn5YmP6VLDEnnKkoVYiHx7udTWCK4BUI= github.com/robinjoseph08/go-pg-migrations/v3 v3.0.0/go.mod h1:nOkSFfwwDUBFnDDQqMRC2p4PDE7GZb/KSVqILVB3bmw= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= -github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shibukawa/configdir v0.0.0-20170330084843-e180dbdc8da0 h1:Xuk8ma/ibJ1fOy4Ee11vHhUFHQNpHhrBneOCNHVXS5w= github.com/shibukawa/configdir v0.0.0-20170330084843-e180dbdc8da0/go.mod h1:7AwjWCpdPhkSmNAgUv5C7EJ4AbmjEB3r047r3DXWu3Y= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= +github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -370,210 +457,269 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= -github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= +github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= +github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs= -github.com/trivago/tgo v1.0.7 h1:uaWH/XIy9aWYWpjm2CU3RpcqZXmX2ysQ9/Go+d9gyrM= -github.com/trivago/tgo v1.0.7/go.mod h1:w4dpD+3tzNIIiIfkWWa85w5/B77tlvdZckQ+6PkFnhc= github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df h1:Y2l28Jr3vOEeYtxfVbMtVfOdAwuUqWaP9fvNKiBVeXY= github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df/go.mod h1:pnyouUty/nBr/zm3GYwTIt+qFTLWbdjeLjZmJdzJOu8= -github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= -github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= -github.com/unrolled/secure v1.14.0 h1:u9vJTU/pR4Bny0ntLUMxdfLtmIRGvQf2sEFuA0TG9AE= -github.com/unrolled/secure v1.14.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= +github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8= +github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/unrolled/secure v1.0.8 h1:JaMvKbe4CRt8oyxVXn+xY+6jlqd7pyJNSVkmsBxxQsM= +github.com/unrolled/secure v1.0.8/go.mod h1:fO+mEan+FLB0CdEnHf6Q4ZZVNqG+5fuLFnP8p0BXDPI= +github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= -github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/vmihailenco/bufpool v0.1.11 h1:gOq2WmBrq0i2yW5QJ16ykccQ4wH9UyEsgLm6czKAd94= github.com/vmihailenco/bufpool v0.1.11/go.mod h1:AFf/MOy3l2CFTKbxwt0mp2MwnqjNEs5H/UxrkA5jxTQ= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1/go.mod h1:xlngVLeyQ/Qi05oQxhQ+oTuqa03RjMwMfk/7/TCs+QI= github.com/vmihailenco/msgpack/v5 v5.0.0-beta.8/go.mod h1:HVxBVPUK/+fZMonk4bi1islLa8V3cfnBug0+4dykPzo= -github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= -github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/msgpack/v5 v5.3.1 h1:0i85a4dsZh8mC//wmyyTEzidDLPQfQAxZIOLtafGbFY= +github.com/vmihailenco/msgpack/v5 v5.3.1/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= -github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= -github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 h1:ESSUROHIBHg7USnszlcdmjBEwdMj9VUvU+OPk4yl2mc= -golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180606202747-9527bec2660b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201026173827-119d4633e4d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.1 h1:/LSrTrgZtpbXyAR6+0e152SROCkJJSh7goYWVmdPFGc= +gonum.org/v1/gonum v0.6.1/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -588,27 +734,36 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/AlecAivazis/survey.v1 v1.8.7 h1:oBJqtgsyBLg9K5FK9twNUbcPnbCPoh+R9a+7nag3qJM= gopkg.in/AlecAivazis/survey.v1 v1.8.7/go.mod h1:iBNOmqKz/NUbZx3bA+4hAGLRC7fSK7tgtVDT4tB22XA= -gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 h1:jeZxE4ZlfAc+R0zO5TEmJBwOLet3NThsOfYJeSQg1x0= -gopkg.in/DataDog/dd-trace-go.v1 v1.62.0/go.mod h1:YTvYkk3PTsfw0OWrRFxV/IQ5Gy4nZ5TRvxTAP3JcIzs= +gopkg.in/DataDog/dd-trace-go.v1 v1.29.0 h1:3C1EEjgFTPqrnS2SXuSqkBbZGacIOPJ7ScGJk4nrP9s= +gopkg.in/DataDog/dd-trace-go.v1 v1.29.0/go.mod h1:FLwUDeuH0z5hkvgvd04/M3MHQN4AF5pQDnedeWRWvok= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/kyokomi/emoji.v1 v1.5.1 h1:beetH5mWDMzFznJ+Qzd5KVHp79YKhVUMcdO8LpRLeGw= gopkg.in/kyokomi/emoji.v1 v1.5.1/go.mod h1:N9AZ6hi1jHOPn34PsbpufQZUcKftSD7WgS2pgpmH4Lg= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= +gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -616,12 +771,14 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/gotraceui v0.2.0 h1:dmNsfQ9Vl3GwbiVD7Z8d/osC6WtGGrasyrC2suc4ZIQ= -honnef.co/go/gotraceui v0.2.0/go.mod h1:qHo4/W75cA3bX0QQoSvDjbJa4R8mAyyFjbWAj63XElc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +mellium.im/sasl v0.2.1 h1:nspKSRg7/SyO0cRGY71OkfHab8tf9kCts6a6oTDut0w= mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= -mellium.im/sasl v0.3.1 h1:wE0LW6g7U83vhvxjC1IY8DnXM+EU095yeo8XClvCdfo= -mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw= pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/integration_testing/connection_bridge.go b/integration_testing/connection_bridge.go index 9bfd98678..764cfe61e 100644 --- a/integration_testing/connection_bridge.go +++ b/integration_testing/connection_bridge.go @@ -103,7 +103,7 @@ func (bridge *ConnectionBridge) createInboundConnection(node *cmd.Node) *lib.Pee panic(err) } - ip, _, err := net.ParseCIDR(netAddress.Addr.String()) + ip, _, err := net.ParseCIDR(netAddress.IP.String()) if err != nil { panic(err) } diff --git a/lib/base58.go b/lib/base58.go index 2cfc583d2..c02743312 100644 --- a/lib/base58.go +++ b/lib/base58.go @@ -3,7 +3,7 @@ package lib import ( "crypto/sha256" "fmt" - "github.com/btcsuite/btcd/btcutil/base58" + "github.com/btcsuite/btcutil/base58" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/bitcoin_burner.go b/lib/bitcoin_burner.go index e77428674..20865be74 100644 --- a/lib/bitcoin_burner.go +++ b/lib/bitcoin_burner.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/btcsuite/btcd/btcutil" "github.com/davecgh/go-spew/spew" "io/ioutil" "math" @@ -16,10 +15,11 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" ) // bitcoin_burner.go finds the Bitcoin UTXOs associated with a Bitcoin diff --git a/lib/block_producer.go b/lib/block_producer.go index fc09daf9b..c54f6771e 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -3,7 +3,6 @@ package lib import ( "encoding/hex" "fmt" - "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "math" "strings" "sync" @@ -15,7 +14,7 @@ import ( "github.com/deso-protocol/go-deadlock" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -90,7 +89,7 @@ func NewDeSoBlockProducer( if err != nil { return nil, fmt.Errorf("NewDeSoBlockProducer: Error decoding hex seed: %+v", err) } - privKey, _ = btcec.PrivKeyFromBytes(privKeyBytes) + privKey, _ = btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) } else { seedBytes, err := bip39.NewSeedWithErrorChecking(blockProducerSeed, "") if err != nil { @@ -145,7 +144,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) lastNode := desoBlockProducer.chain.blockTip() // Compute the public key to contribute the reward to. - rewardPk, err := btcec.ParsePubKey(publicKey) + rewardPk, err := btcec.ParsePubKey(publicKey, btcec.S256()) if err != nil { return nil, nil, nil, errors.Wrapf(err, "DeSoBlockProducer._getBlockTemplate: ") } @@ -287,7 +286,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) desoBlockProducer.postgres, desoBlockProducer.chain.snapshot, nil) // Parse the public key that should be used for the block reward. - blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutput.PublicKey) + blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutput.PublicKey, btcec.S256()) if err != nil { return nil, nil, nil, errors.Wrapf(err, "DeSoBlockProducer._getBlockTemplate: problem parsing block reward output public key: ") } @@ -307,7 +306,7 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) if blockRet.Header.Height >= uint64(desoBlockProducer.params.ForkHeights.BlockRewardPatchBlockHeight) { if txnInBlock.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { // Parse the transactor's public key to compare with the block reward output public key. - transactorPublicKey, err := btcec.ParsePubKey(txnInBlock.PublicKey) + transactorPublicKey, err := btcec.ParsePubKey(txnInBlock.PublicKey, btcec.S256()) if err != nil { return nil, nil, nil, errors.Wrapf(err, @@ -467,7 +466,7 @@ func RecomputeBlockRewardWithBlockRewardOutputPublicKey( blockRewardOutputPublicKeyBytes []byte, params *DeSoParams, ) (*MsgDeSoBlock, error) { - blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutputPublicKeyBytes) + blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutputPublicKeyBytes, btcec.S256()) if err != nil { return nil, errors.Wrap( fmt.Errorf("RecomputeBlockRewardWithBlockRewardOutpubPublicKey: Problem parsing block reward output public key: %v", err), "") @@ -478,7 +477,7 @@ func RecomputeBlockRewardWithBlockRewardOutputPublicKey( totalFees := uint64(0) for _, txn := range block.Txns[1:] { if txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { - transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey) + transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey, btcec.S256()) if err != nil { glog.Errorf("DeSoMiner._startThread: Error parsing transactor public key: %v", err) continue @@ -619,7 +618,11 @@ func (desoBlockProducer *DeSoBlockProducer) SignBlock(blockFound *MsgDeSoBlock) fmt.Errorf("Error computing block hash from header submitted: %v", err), "") } - signature := ecdsa.Sign(desoBlockProducer.blockProducerPrivateKey, blockHash[:]) + signature, err := desoBlockProducer.blockProducerPrivateKey.Sign(blockHash[:]) + if err != nil { + return errors.Wrap( + fmt.Errorf("Error signing block: %v", err), "") + } // If we get here, we now have a valid signature for the block. // Embed the signature into the block. diff --git a/lib/block_view.go b/lib/block_view.go index d475d986e..1af470167 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -10,17 +10,15 @@ import ( "strings" "time" - "github.com/btcsuite/btcd/btcec/v2" - "github.com/dgraph-io/badger/v4" - + "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections/bitset" - - "github.com/btcsuite/btcd/wire" "github.com/holiman/uint256" "github.com/davecgh/go-spew/spew" + "github.com/btcsuite/btcd/btcec" + "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -1856,7 +1854,7 @@ func (bav *UtxoView) _verifySignature(txn *MsgDeSoTxn, blockHeight uint32) (_der } // If we got a derived key then try parsing it. if isDerived { - derivedPk, err = btcec.ParsePubKey(derivedPkBytes) + derivedPk, err = btcec.ParsePubKey(derivedPkBytes, btcec.S256()) if err != nil { return nil, fmt.Errorf("%v %v", RuleErrorDerivedKeyInvalidExtraData, RuleErrorDerivedKeyInvalidRecoveryId) } @@ -1864,7 +1862,7 @@ func (bav *UtxoView) _verifySignature(txn *MsgDeSoTxn, blockHeight uint32) (_der // Get the owner public key and attempt turning it into *btcec.PublicKey. ownerPkBytes := txn.PublicKey - ownerPk, err := btcec.ParsePubKey(ownerPkBytes) + ownerPk, err := btcec.ParsePubKey(ownerPkBytes, btcec.S256()) if err != nil { return nil, errors.Wrapf(err, "_verifySignature: Problem parsing owner public key: ") } @@ -4167,7 +4165,7 @@ func (bav *UtxoView) _connectSingleTxn( "ConnectTransaction: TxnTypeUnlockStake must correspond to OperationTypeUnlockStake", ) } - totalLockedAmountNanos := uint256.NewInt(0) + totalLockedAmountNanos := uint256.NewInt() for _, prevLockedStakeEntry := range utxoOp.PrevLockedStakeEntries { totalLockedAmountNanos, err = SafeUint256().Add( totalLockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos, @@ -4194,7 +4192,7 @@ func (bav *UtxoView) _connectSingleTxn( "ConnectTransaction: TxnTypeCoinUnlock must correspond to OperationTypeCoinUnlock", ) } - totalLockedDESOAmountNanos := uint256.NewInt(0) + totalLockedDESOAmountNanos := uint256.NewInt() for _, prevLockedBalanceEntry := range utxoOp.PrevLockedBalanceEntries { totalLockedDESOAmountNanos, err = SafeUint256().Add( totalLockedDESOAmountNanos, &prevLockedBalanceEntry.BalanceBaseUnits) @@ -4365,7 +4363,7 @@ func (bav *UtxoView) ConnectBlock( } var err error blockRewardOutputPublicKey, err = - btcec.ParsePubKey(desoBlock.Txns[0].TxOutputs[0].PublicKey) + btcec.ParsePubKey(desoBlock.Txns[0].TxOutputs[0].PublicKey, btcec.S256()) if err != nil { return nil, fmt.Errorf("ConnectBlock: Problem parsing block reward public key: %v", err) } @@ -4405,7 +4403,7 @@ func (bav *UtxoView) ConnectBlock( if blockHeight >= uint64(bav.Params.ForkHeights.BlockRewardPatchBlockHeight) && txn.TxnMeta.GetTxnType() != TxnTypeBlockReward && txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { - transactorPubKey, err := btcec.ParsePubKey(txn.PublicKey) + transactorPubKey, err := btcec.ParsePubKey(txn.PublicKey, btcec.S256()) if err != nil { return nil, fmt.Errorf("ConnectBlock: Problem parsing transactor public key: %v", err) } diff --git a/lib/block_view_access_group_members_test.go b/lib/block_view_access_group_members_test.go index ec8799230..040bcb5b2 100644 --- a/lib/block_view_access_group_members_test.go +++ b/lib/block_view_access_group_members_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/golang/glog" "github.com/pkg/errors" "github.com/stretchr/testify/require" @@ -67,7 +67,7 @@ func TestAccessGroupMembersAdd(t *testing.T) { randomMemberPublicKeys := []*PublicKey{} randomMemberGroupKeys := []*GroupKeyName{} for ii := 0; ii < randomMemberCounter; ii++ { - privateKey, err := btcec.NewPrivateKey() + privateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) privateKeyBase58Check := Base58CheckEncode( privateKey.Serialize(), true, &DeSoTestnetParams) @@ -113,16 +113,16 @@ func TestAccessGroupMembersAdd(t *testing.T) { initChainCallback: initChainCallback, } - groupPriv1, err := btcec.NewPrivateKey() + groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() - groupPriv2, err := btcec.NewPrivateKey() + groupPriv2, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk2 := groupPriv2.PubKey().SerializeCompressed() _ = groupPk2 - groupPriv3, err := btcec.NewPrivateKey() + groupPriv3, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk3 := groupPriv3.PubKey().SerializeCompressed() _ = groupPk3 @@ -410,7 +410,7 @@ func TestAccessGroupMembersRemove(t *testing.T) { randomMemberPublicKeys1 := []*PublicKey{} randomMemberPublicKeys2 := []*PublicKey{} for ii := 0; ii < randomMemberCounter1; ii++ { - privateKey, err := btcec.NewPrivateKey() + privateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) privateKeyBase58Check := Base58CheckEncode( privateKey.Serialize(), true, &DeSoTestnetParams) @@ -420,7 +420,7 @@ func TestAccessGroupMembersRemove(t *testing.T) { randomMemberPublicKeys1 = append(randomMemberPublicKeys1, publicKey) } for ii := 0; ii < randomMemberCounter2; ii++ { - privateKey, err := btcec.NewPrivateKey() + privateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) privateKeyBase58Check := Base58CheckEncode( privateKey.Serialize(), true, &DeSoTestnetParams) @@ -459,17 +459,17 @@ func TestAccessGroupMembersRemove(t *testing.T) { initChainCallback: initChainCallback, } - groupPriv1, err := btcec.NewPrivateKey() + groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() _ = groupPk1 - groupPriv2, err := btcec.NewPrivateKey() + groupPriv2, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk2 := groupPriv2.PubKey().SerializeCompressed() _ = groupPk2 - groupPriv3, err := btcec.NewPrivateKey() + groupPriv3, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk3 := groupPriv3.PubKey().SerializeCompressed() _ = groupPk3 @@ -903,17 +903,17 @@ func TestAccessGroupMembersUpdate(t *testing.T) { initChainCallback: initChainCallback, } - groupPriv1, err := btcec.NewPrivateKey() + groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() _ = groupPk1 - groupPriv2, err := btcec.NewPrivateKey() + groupPriv2, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk2 := groupPriv2.PubKey().SerializeCompressed() _ = groupPk2 - groupPriv3, err := btcec.NewPrivateKey() + groupPriv3, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk3 := groupPriv3.PubKey().SerializeCompressed() _ = groupPk3 @@ -1641,7 +1641,7 @@ func TestAccessGroupMembersTxnWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) // Helper funcs _submitAuthorizeDerivedKeyTxn := func( diff --git a/lib/block_view_access_group_test.go b/lib/block_view_access_group_test.go index f6a1ffaa4..b5ac3641e 100644 --- a/lib/block_view_access_group_test.go +++ b/lib/block_view_access_group_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/pkg/errors" "github.com/stretchr/testify/require" ) @@ -71,16 +71,16 @@ func TestAccessGroup(t *testing.T) { initChainCallback: initChainCallback, } - groupPriv1, err := btcec.NewPrivateKey() + groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() - groupPriv2, err := btcec.NewPrivateKey() + groupPriv2, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk2 := groupPriv2.PubKey().SerializeCompressed() _ = groupPk2 - groupPriv3, err := btcec.NewPrivateKey() + groupPriv3, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk3 := groupPriv3.PubKey().SerializeCompressed() _ = groupPk3 @@ -522,7 +522,7 @@ func TestAccessGroupTxnWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) // Helper funcs _submitAuthorizeDerivedKeyTxn := func(accessGroupLimitKey AccessGroupLimitKey, count int) string { diff --git a/lib/block_view_association_test.go b/lib/block_view_association_test.go index a1e278edb..2cc28d3c9 100644 --- a/lib/block_view_association_test.go +++ b/lib/block_view_association_test.go @@ -7,7 +7,7 @@ import ( "sort" "testing" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/stretchr/testify/require" ) @@ -2255,7 +2255,7 @@ func _testAssociationsWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) // Helper funcs _submitAuthorizeDerivedKeyTxn := func(txnType TxnType, associationLimitKey AssociationLimitKey, count int) (string, error) { diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index bc15c5cf5..278f256cb 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -3,9 +3,10 @@ package lib import ( "bytes" "fmt" - "github.com/btcsuite/btcd/btcec/v2" - "github.com/pkg/errors" "io" + + "github.com/btcsuite/btcd/btcec" + "github.com/pkg/errors" ) // @@ -449,7 +450,7 @@ func filterOutBlockRewardRecipientFees(txns []*MsgDeSoTxn, publicRewardPublicKey for _, txn := range txns { // If the transaction is performed by any public key other than block reward recipient transaction, // add the fees to the total. - transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey) + transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey, btcec.S256()) if err != nil { return 0, errors.Wrap(err, "filterBlockRewardRecipientFees: failed to parse public key") } diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 8ab7afc63..6f50bff4f 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -2,8 +2,7 @@ package lib import ( "bytes" - - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v4" "github.com/pkg/errors" "github.com/stretchr/testify/assert" @@ -568,7 +567,7 @@ func _generateUnsignedDependentAtomicTransactions( // Generate the atomic transactions. for ii := 0; ii < numberOfTransactions-1; ii++ { // Generate a new public/private key pair. - privKey_ii, err := btcec.NewPrivateKey() + privKey_ii, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(testMeta.t, err) pubKey_ii := privKey_ii.PubKey() receiverPrivateKeysBase58 = append(receiverPrivateKeysBase58, Base58CheckEncode( diff --git a/lib/block_view_balance_entry.go b/lib/block_view_balance_entry.go index 1d797e29e..7031d634b 100644 --- a/lib/block_view_balance_entry.go +++ b/lib/block_view_balance_entry.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" @@ -210,7 +210,7 @@ func (bav *UtxoView) GetBalanceEntry(holderPkid *PKID, creatorPkid *PKID, isDAOC return &BalanceEntry{ CreatorPKID: creatorPkid, HODLerPKID: holderPkid, - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } return balanceEntry @@ -276,7 +276,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( txMeta := txn.TxnMeta.(*CreatorCoinTransferMetadataa) receiverPublicKey = txMeta.ReceiverPublicKey profilePublicKey = txMeta.ProfilePublicKey - coinToTransferNanos = uint256.NewInt(txMeta.CreatorCoinToTransferNanos) + coinToTransferNanos = uint256.NewInt().SetUint64(txMeta.CreatorCoinToTransferNanos) } // Connect basic txn to get the total input and the total output without @@ -298,7 +298,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( if len(receiverPublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, RuleErrorCoinTransferInvalidReceiverPubKeySize } - if _, err = btcec.ParsePubKey(receiverPublicKey); err != nil { + if _, err = btcec.ParsePubKey(receiverPublicKey, btcec.S256()); err != nil { return 0, 0, nil, errors.Wrap( RuleErrorCoinTransferInvalidReceiverPubKey, err.Error()) } @@ -313,7 +313,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( if len(profilePublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, RuleErrorCoinTransferInvalidProfilePubKeySize } - if _, err = btcec.ParsePubKey(profilePublicKey); err != nil { + if _, err = btcec.ParsePubKey(profilePublicKey, btcec.S256()); err != nil { return 0, 0, nil, errors.Wrap( RuleErrorCoinTransferInvalidProfilePubKey, err.Error()) } @@ -394,7 +394,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( receiverBalanceEntry = &BalanceEntry{ HODLerPKID: receiverPKID.PKID, CreatorPKID: creatorPKID.PKID, - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } @@ -402,10 +402,10 @@ func (bav *UtxoView) HelpConnectCoinTransfer( prevSenderBalanceEntry := *senderBalanceEntry // Subtract the number of coins being given from the sender and add them to the receiver. - senderBalanceEntry.BalanceNanos = *uint256.NewInt(0).Sub( + senderBalanceEntry.BalanceNanos = *uint256.NewInt().Sub( &senderBalanceEntry.BalanceNanos, coinToTransferNanos) - receiverBalanceEntry.BalanceNanos = *uint256.NewInt(0).Add( + receiverBalanceEntry.BalanceNanos = *uint256.NewInt().Add( &receiverBalanceEntry.BalanceNanos, coinToTransferNanos) @@ -419,10 +419,10 @@ func (bav *UtxoView) HelpConnectCoinTransfer( // // CreatorCoins can't exceed a uint64 if senderBalanceEntry.BalanceNanos.Uint64() < bav.Params.CreatorCoinAutoSellThresholdNanos { - receiverBalanceEntry.BalanceNanos = *uint256.NewInt(0).Add( + receiverBalanceEntry.BalanceNanos = *uint256.NewInt().Add( &receiverBalanceEntry.BalanceNanos, &senderBalanceEntry.BalanceNanos) - senderBalanceEntry.BalanceNanos = *uint256.NewInt(0) + senderBalanceEntry.BalanceNanos = *uint256.NewInt() senderBalanceEntry.HasPurchased = false } } @@ -434,7 +434,7 @@ func (bav *UtxoView) HelpConnectCoinTransfer( bav._deleteBalanceEntryMappings(receiverBalanceEntry, receiverPublicKey, profilePublicKey, isDAOCoin) bav._setBalanceEntryMappings(receiverBalanceEntry, isDAOCoin) - if senderBalanceEntry.BalanceNanos.Sign() > 0 { + if senderBalanceEntry.BalanceNanos.Gt(uint256.NewInt()) { bav._setBalanceEntryMappings(senderBalanceEntry, isDAOCoin) } diff --git a/lib/block_view_bitcoin.go b/lib/block_view_bitcoin.go index 96e68aeab..e4ea9667e 100644 --- a/lib/block_view_bitcoin.go +++ b/lib/block_view_bitcoin.go @@ -2,11 +2,11 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec/v2" - "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" "github.com/pkg/errors" "math" "math/big" diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index 99352cf84..4c669a5eb 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -11,7 +11,7 @@ import ( "strings" "testing" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" merkletree "github.com/deso-protocol/go-merkle-tree" "github.com/dgraph-io/badger/v4" @@ -163,7 +163,7 @@ func _privStringToKeys(t *testing.T, privString string) (*btcec.PrivateKey, *btc result, _, err := Base58CheckDecodePrefix(privString, 1) require.NoError(err) result = result[:len(result)-1] - return btcec.PrivKeyFromBytes(result) + return btcec.PrivKeyFromBytes(btcec.S256(), result) } func _updateUSDCentsPerBitcoinExchangeRate(t *testing.T, chain *Blockchain, db *badger.DB, diff --git a/lib/block_view_creator_coin.go b/lib/block_view_creator_coin.go index f1405ebb7..d5a9de4e9 100644 --- a/lib/block_view_creator_coin.go +++ b/lib/block_view_creator_coin.go @@ -2,7 +2,7 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/holiman/uint256" "github.com/pkg/errors" "math" @@ -293,7 +293,7 @@ func (bav *UtxoView) _disconnectCreatorCoin( creatorBalanceEntry = &BalanceEntry{ HODLerPKID: creatorPKID, CreatorPKID: creatorPKID, - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } @@ -789,7 +789,7 @@ func (bav *UtxoView) HelpConnectCreatorCoinBuy( existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos, creatorCoinToMintNanos) } // Setting the value in this way is guaranteed to not mess up the prevCoinEntry - existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt( + existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().SetUint64( existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos.Uint64() + creatorCoinToMintNanos) // Calculate the *Creator Coin nanos* to give as a founder reward. @@ -877,7 +877,7 @@ func (bav *UtxoView) HelpConnectCreatorCoinBuy( HODLerPKID: hodlerPKID, // The creator is the owner of the profile that corresponds to the coin. CreatorPKID: creatorPKID, - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } @@ -908,7 +908,7 @@ func (bav *UtxoView) HelpConnectCreatorCoinBuy( creatorBalanceEntry = &BalanceEntry{ HODLerPKID: hodlerPKID, CreatorPKID: creatorPKID, - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } } @@ -954,8 +954,8 @@ func (bav *UtxoView) HelpConnectCreatorCoinBuy( } // Finally increment the buyerBalanceEntry.BalanceNanos to reflect // the purchased coinsBuyerGetsNanos. If coinsBuyerGetsNanos is greater than 0, we set HasPurchased to true. - buyerBalanceEntry.BalanceNanos = *uint256.NewInt(0).Add( - &buyerBalanceEntry.BalanceNanos, uint256.NewInt(coinsBuyerGetsNanos)) + buyerBalanceEntry.BalanceNanos = *uint256.NewInt().Add( + &buyerBalanceEntry.BalanceNanos, uint256.NewInt().SetUint64(coinsBuyerGetsNanos)) buyerBalanceEntry.HasPurchased = true // If the creator is buying their own coin, this will just be modifying @@ -990,9 +990,9 @@ func (bav *UtxoView) HelpConnectCreatorCoinBuy( bav._setProfileEntryMappings(existingProfileEntry) } // CreatorCoin balances can't exceed uint64 - creatorBalanceEntry.BalanceNanos = *uint256.NewInt(0).Add( + creatorBalanceEntry.BalanceNanos = *uint256.NewInt().Add( &creatorBalanceEntry.BalanceNanos, - uint256.NewInt(creatorCoinFounderRewardNanos)) + uint256.NewInt().SetUint64(creatorCoinFounderRewardNanos)) // At this point the balances for the buyer and the creator should be correct // so set the mappings in the view. @@ -1251,7 +1251,7 @@ func (bav *UtxoView) HelpConnectCreatorCoinSell( "is selling %v exceeds CreatorCoin nanos in circulation %v", creatorCoinToSellNanos, existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos) } - existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt( + existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().SetUint64( existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos.Uint64() - creatorCoinToSellNanos) // Check if this is a complete sell of the seller's remaining creator coins @@ -1266,7 +1266,7 @@ func (bav *UtxoView) HelpConnectCreatorCoinSell( // It's okay to modify these values because they are saved in the PrevCoinEntry. if existingProfileEntry.CreatorCoinEntry.NumberOfHolders == 0 { existingProfileEntry.CreatorCoinEntry.DeSoLockedNanos = 0 - existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt(0) + existingProfileEntry.CreatorCoinEntry.CoinsInCirculationNanos = *uint256.NewInt() } // Save the seller's balance before we modify it. We don't need to save the @@ -1281,9 +1281,9 @@ func (bav *UtxoView) HelpConnectCreatorCoinSell( // below CreatorCoinAutoSellThresholdNanos. // // CreatorCoin balances can't exceed uint64 - sellerBalanceEntry.BalanceNanos = *uint256.NewInt(0).Sub( + sellerBalanceEntry.BalanceNanos = *uint256.NewInt().Sub( &sellerBalanceEntry.BalanceNanos, - uint256.NewInt(creatorCoinToSellNanos)) + uint256.NewInt().SetUint64(creatorCoinToSellNanos)) // If the seller's balance will be zero after this transaction, set HasPurchased to false // diff --git a/lib/block_view_dao_coin.go b/lib/block_view_dao_coin.go index 36ef321c5..9e0e7930c 100644 --- a/lib/block_view_dao_coin.go +++ b/lib/block_view_dao_coin.go @@ -2,7 +2,7 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/holiman/uint256" "github.com/pkg/errors" "reflect" @@ -68,7 +68,7 @@ func (bav *UtxoView) _disconnectDAOCoin( transactorBalanceEntry = &BalanceEntry{ CreatorPKID: creatorPKID, HODLerPKID: hodlerPKID, - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } @@ -84,7 +84,7 @@ func (bav *UtxoView) _disconnectDAOCoin( return fmt.Errorf("_disconnectDAOCoin: Must mint more than zero coins; this should never happen") } // Coins minted + prev coin entry's coins in circulation matches new coin entry's coins in circulation - CoinsInCirculationPlusCoinsToMintNanos := uint256.NewInt(0).Add( + CoinsInCirculationPlusCoinsToMintNanos := uint256.NewInt().Add( &operationData.PrevCoinEntry.CoinsInCirculationNanos, &txMeta.CoinsToMintNanos) if !existingProfileEntry.DAOCoinEntry.CoinsInCirculationNanos.Eq( @@ -97,7 +97,7 @@ func (bav *UtxoView) _disconnectDAOCoin( } // Check that creator's current balance is equal to previous balance plus coins to mint. Note: the creator is // the transactor in this case - PrevBalanceNanosPlusCoinsToMintNanos := uint256.NewInt(0).Add( + PrevBalanceNanosPlusCoinsToMintNanos := uint256.NewInt().Add( &operationData.PrevCreatorBalanceEntry.BalanceNanos, &txMeta.CoinsToMintNanos) if !transactorBalanceEntry.BalanceNanos.Eq(PrevBalanceNanosPlusCoinsToMintNanos) { @@ -118,7 +118,7 @@ func (bav *UtxoView) _disconnectDAOCoin( return fmt.Errorf("_disconnctDAOCoin: Must burn more than zero coins; this should never happen") } // prev coin entry's coins in circulation minus coins burned matches new coin entry's coins in circulation - PrevCoinsInCirculationMinusCoinsToBurnNanos := uint256.NewInt(0).Sub( + PrevCoinsInCirculationMinusCoinsToBurnNanos := uint256.NewInt().Sub( &operationData.PrevCoinEntry.CoinsInCirculationNanos, &txMeta.CoinsToBurnNanos) if !existingProfileEntry.DAOCoinEntry.CoinsInCirculationNanos.Eq( @@ -133,7 +133,7 @@ func (bav *UtxoView) _disconnectDAOCoin( // prev balance entry - coins burned matches the new balance entry's balance // Check that transactor's current balance is equal to previous balance minus coins to mint. Note: the creator is // the transactor in this case - PrevBalanceNanosMinusCoinsToBurnNanos := uint256.NewInt(0).Sub( + PrevBalanceNanosMinusCoinsToBurnNanos := uint256.NewInt().Sub( &operationData.PrevTransactorBalanceEntry.BalanceNanos, &txMeta.CoinsToBurnNanos) if !transactorBalanceEntry.BalanceNanos.Eq(PrevBalanceNanosMinusCoinsToBurnNanos) { @@ -227,7 +227,7 @@ func (bav *UtxoView) _disconnectDAOCoinTransfer( PkToStringBoth(currentTxn.PublicKey), PkToStringBoth(txMeta.ProfilePublicKey)) } senderPrevBalanceNanos := operationData.PrevSenderBalanceEntry.BalanceNanos - senderCurrBalanceNanos := *uint256.NewInt(0) + senderCurrBalanceNanos := *uint256.NewInt() // Since the sender may have given away their whole balance, their BalanceEntry can be nil. if senderBalanceEntry != nil && !senderBalanceEntry.isDeleted { // This assignment is OK because we never modify values in-place @@ -245,7 +245,7 @@ func (bav *UtxoView) _disconnectDAOCoinTransfer( PkToStringBoth(currentTxn.PublicKey), PkToStringBoth(txMeta.ProfilePublicKey)) } receiverCurrBalanceNanos := receiverBalanceEntry.BalanceNanos - receiverPrevBalanceNanos := *uint256.NewInt(0) + receiverPrevBalanceNanos := *uint256.NewInt() if operationData.PrevReceiverBalanceEntry != nil { // This assignment is OK because we never modify values in-place receiverPrevBalanceNanos = operationData.PrevReceiverBalanceEntry.BalanceNanos @@ -266,8 +266,8 @@ func (bav *UtxoView) _disconnectDAOCoinTransfer( } // Sanity check the sender's increase equals the receiver's decrease after disconnect. - senderBalanceIncrease := uint256.NewInt(0).Sub(&senderPrevBalanceNanos, &senderCurrBalanceNanos) - receiverBalanceDecrease := uint256.NewInt(0).Sub(&receiverCurrBalanceNanos, &receiverPrevBalanceNanos) + senderBalanceIncrease := uint256.NewInt().Sub(&senderPrevBalanceNanos, &senderCurrBalanceNanos) + receiverBalanceDecrease := uint256.NewInt().Sub(&receiverCurrBalanceNanos, &receiverPrevBalanceNanos) if !senderBalanceIncrease.Eq(receiverBalanceDecrease) { return fmt.Errorf("_disconnectDAOCoinTransfer: Sender's balance increase "+ "of %d will not equal the receiver's balance decrease of %v after disconnect.", @@ -331,7 +331,7 @@ func (bav *UtxoView) HelpConnectDAOCoinInitialization(txn *MsgDeSoTxn, txHash *B return 0, 0, nil, nil, RuleErrorDAOCoinInvalidPubKeySize } - if _, err = btcec.ParsePubKey(txMeta.ProfilePublicKey); err != nil { + if _, err = btcec.ParsePubKey(txMeta.ProfilePublicKey, btcec.S256()); err != nil { return 0, 0, nil, nil, errors.Wrap(RuleErrorDAOCoinInvalidPubKey, err.Error()) } @@ -381,14 +381,14 @@ func (bav *UtxoView) HelpConnectDAOCoinMint( // // if CoinsInCirculationNanos > MaxUint256 - CoinsToMintNanos if creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos.Gt( - uint256.NewInt(0).Sub(MaxUint256, &txMeta.CoinsToMintNanos)) { + uint256.NewInt().Sub(MaxUint256, &txMeta.CoinsToMintNanos)) { return 0, 0, nil, errors.Wrapf( RuleErrorOverflowWhileMintingDAOCoins, fmt.Sprintf( "_connectDAOCoin: Overflow while summing CoinsInCirculationNanos and CoinsToMinNanos: %v, %v", creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos, txMeta.CoinsToMintNanos)) } // CoinsInCirculationNanos = CoinsInCirculationNanos + CoinsToMintNanos - creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt(0).Add( + creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().Add( &creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos, &txMeta.CoinsToMintNanos) // Increase Balance entry for owner @@ -398,7 +398,7 @@ func (bav *UtxoView) HelpConnectDAOCoinMint( profileOwnerBalanceEntry = &BalanceEntry{ HODLerPKID: hodlerPKID, CreatorPKID: creatorPKID, - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } @@ -407,14 +407,14 @@ func (bav *UtxoView) HelpConnectDAOCoinMint( // Check for overflow of the uint256 // if profileOwnerBalanceEntry.BalanceNanos > MaxUint256-txMeta.CoinsToMintNanos - if profileOwnerBalanceEntry.BalanceNanos.Gt(uint256.NewInt(0).Sub( + if profileOwnerBalanceEntry.BalanceNanos.Gt(uint256.NewInt().Sub( MaxUint256, &txMeta.CoinsToMintNanos)) { return 0, 0, nil, fmt.Errorf( "_connectDAOCoin: Overflow while summing profileOwnerBalanceEntry.BalanceNanos and CoinsToMintNanos: %v, %v", profileOwnerBalanceEntry.BalanceNanos, txMeta.CoinsToMintNanos) } - profileOwnerBalanceEntry.BalanceNanos = *uint256.NewInt(0).Add( + profileOwnerBalanceEntry.BalanceNanos = *uint256.NewInt().Add( &profileOwnerBalanceEntry.BalanceNanos, &txMeta.CoinsToMintNanos) bav._setDAOCoinBalanceEntryMappings(profileOwnerBalanceEntry) @@ -496,13 +496,13 @@ func (bav *UtxoView) HelpConnectDAOCoinBurn( // Now we're safe to burn the coins // Reduce the total number of coins in circulation prevCoinEntry := creatorProfileEntry.DAOCoinEntry - creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt(0).Sub( + creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().Sub( &creatorProfileEntry.DAOCoinEntry.CoinsInCirculationNanos, &daoCoinToBurn) // Burn them from the burner's balance entry prevTransactorBalanceEntry := *burnerBalanceEntry - burnerBalanceEntry.BalanceNanos = *uint256.NewInt(0).Sub( + burnerBalanceEntry.BalanceNanos = *uint256.NewInt().Sub( &burnerBalanceEntry.BalanceNanos, &daoCoinToBurn) @@ -517,7 +517,7 @@ func (bav *UtxoView) HelpConnectDAOCoinBurn( // Set the new BalanceEntry in our mappings for the burner and set the // ProfileEntry mappings as well since everything is up to date. - if burnerBalanceEntry.BalanceNanos.Gt(uint256.NewInt(0)) { + if burnerBalanceEntry.BalanceNanos.Gt(uint256.NewInt()) { bav._setDAOCoinBalanceEntryMappings(burnerBalanceEntry) } bav._setProfileEntryMappings(creatorProfileEntry) diff --git a/lib/block_view_dao_coin_limit_order.go b/lib/block_view_dao_coin_limit_order.go index db9c036a9..5379be9f1 100644 --- a/lib/block_view_dao_coin_limit_order.go +++ b/lib/block_view_dao_coin_limit_order.go @@ -53,7 +53,7 @@ func (bav *UtxoView) getAdjustedDAOCoinBalanceForUserInBaseUnits( return nil, err } return adjustBalance( - uint256.NewInt(transactorDESOBalanceNanos), delta) + uint256.NewInt().SetUint64(transactorDESOBalanceNanos), delta) } // If we get here, we know we're dealing with a DAO coin now. @@ -62,7 +62,7 @@ func (bav *UtxoView) getAdjustedDAOCoinBalanceForUserInBaseUnits( // If the balance entry doesn't exist or is deleted then return zero if transactorBalanceEntry == nil || transactorBalanceEntry.isDeleted { - return adjustBalance(uint256.NewInt(0), delta) + return adjustBalance(uint256.NewInt(), delta) } // Make a copy and return just to be safe @@ -128,7 +128,7 @@ func (bav *UtxoView) balanceChange( oldBalanceEntry = &BalanceEntry{ HODLerPKID: userPKID, CreatorPKID: daoCoinPKID, - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } } @@ -483,7 +483,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( } if updatedTransactorOrderQuantityToFill.IsZero() { // Transactor's order was fully filled. - transactorOrder.QuantityToFillInBaseUnits = uint256.NewInt(0) + transactorOrder.QuantityToFillInBaseUnits = uint256.NewInt() orderFilled = true transactorOrderFilledOrder.IsFulfilled = true } else { @@ -916,7 +916,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( newBalanceEntry = &BalanceEntry{ HODLerPKID: &userPKID, CreatorPKID: &daoCoinPKID, - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } else { // Otherwise, we create a copy of the previous balance entry before updating. @@ -1252,7 +1252,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( transactorQuantityToFillInBaseUnits.Eq(matchingOrderQuantityToBuy) { // The matching order fully fills the transactor's order, so there won't be anything // left to fill after this order is matched. - updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt(0) + updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt() // The transactor quantity specifies the amount of coin they want to sell // and their order is fully filled. We use the matching order's exchange @@ -1296,7 +1296,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( // than the other way around. // There is nothing left in the matching order - updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt(0) + updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt() // We calculate what is left over for the transactor's order. Note that matchingOrderQuantityToBuy // can't overflow because we checked it earlier. @@ -1335,7 +1335,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( if transactorQuantityToFillInBaseUnits.Lt(matchingOrderQuantityToSell) || transactorQuantityToFillInBaseUnits.Eq(matchingOrderQuantityToSell) { // The matching order fulfills the transactor's order. - updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt(0) + updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt() // The transactor quantity specifies the amount of coin they want to buy // and their order is fully fulfilled. @@ -1370,7 +1370,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( // in terms of quantity. // The matching order has no quantity left after this match. - updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt(0) + updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt() // We calculate what is left over for the transactor's order. updatedTransactorQuantityToFillInBaseUnits, err := SafeUint256().Sub( @@ -1400,7 +1400,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( if transactorQuantityToFillInBaseUnits.Lt(matchingOrder.QuantityToFillInBaseUnits) || transactorQuantityToFillInBaseUnits.Eq(matchingOrder.QuantityToFillInBaseUnits) { // The matching order will fully fill the transactor's order. - updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt(0) + updatedTransactorQuantityToFillInBaseUnits := uint256.NewInt() // We calculate what is left for the matching order. updatedMatchingQuantityToFillInBaseUnits, err := SafeUint256().Sub( @@ -1445,7 +1445,7 @@ func _calculateDAOCoinsTransferredInLimitOrderMatch( } // If we get here, the transactor's order fully covers the matching order. - updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt(0) + updatedMatchingQuantityToFillInBaseUnits := uint256.NewInt() // We calculate what is left for the transactor. updatedTransactorQuantityToFillInBaseUnits, err := SafeUint256().Sub( @@ -1785,7 +1785,7 @@ func (bav *UtxoView) IsValidDAOCoinLimitOrder(order *DAOCoinLimitOrderEntry) err if err != nil { return err } - if baseUnitsToBuy.IsZero() { + if baseUnitsToBuy.Eq(uint256.NewInt()) { return errors.Wrapf(RuleErrorDAOCoinLimitOrderTotalCostIsLessThanOneNano, "baseUnitsToBuy: ") } // If buying $DESO, validate that qty to buy is less than the max uint64. @@ -1798,7 +1798,7 @@ func (bav *UtxoView) IsValidDAOCoinLimitOrder(order *DAOCoinLimitOrderEntry) err if err != nil { return err } - if baseUnitsToSell.IsZero() { + if baseUnitsToSell.Eq(uint256.NewInt()) { return errors.Wrapf(RuleErrorDAOCoinLimitOrderTotalCostIsLessThanOneNano, "baseUnitsToSell: ") } @@ -2013,7 +2013,7 @@ func (bav *UtxoView) GetDESONanosToFillOrder(transactorOrder *DAOCoinLimitOrderE // and add that as an additional fee when adding inputs and outputs. var lastSeenOrder *DAOCoinLimitOrderEntry - desoNanosToFulfillOrders := uint256.NewInt(0) + desoNanosToFulfillOrders := uint256.NewInt() transactorQuantityToFill := transactorOrder.QuantityToFillInBaseUnits.Clone() for transactorQuantityToFill.GtUint64(0) { diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index 80fcd6fe4..dbdab90da 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -151,9 +151,9 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { // Store how many $DESO and DAO coin units will be transferred. bb, _ := big.NewInt(0).SetString("100000000", 10) - daoCoinQuantityChange := uint256.NewInt(0) + daoCoinQuantityChange := uint256.NewInt() daoCoinQuantityChange.SetFromBig(bb) - desoQuantityChange := uint256.NewInt(1000) + desoQuantityChange := uint256.NewInt().SetUint64(1000) // Mint DAO coins for m0. { @@ -167,7 +167,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { //daoCoinTransferMetadata := DAOCoinTransferMetadata{ // ProfilePublicKey: m0PkBytes, - // DAOCoinToTransferNanos: *uint256.NewInt(3000), + // DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(3000), // ReceiverPublicKey: m1PkBytes, //} // @@ -269,7 +269,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Store how many $DESO and DAO coin units will be transferred. bb, _ = big.NewInt(0).SetString("100", 10) - daoCoinQuantity := uint256.NewInt(0) + daoCoinQuantity := uint256.NewInt() daoCoinQuantity.SetFromBig(bb) { // Construct an ask from m0. See above for description @@ -299,7 +299,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Construct a bid from m1. See above for description val, _ := uint256.FromBig(big.NewInt(1)) - newQuantity := uint256.NewInt(0).Sub(daoCoinQuantity, val) + newQuantity := uint256.NewInt().Sub(daoCoinQuantity, val) exchangeRate, err := CalculateScaledExchangeRateFromString("1.0") require.NoError(err) metadataM1 := DAOCoinLimitOrderMetadata{ @@ -327,7 +327,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Store how many $DESO and DAO coin units will be transferred. bb, _ = big.NewInt(0).SetString("100", 10) - daoCoinQuantity := uint256.NewInt(0) + daoCoinQuantity := uint256.NewInt() daoCoinQuantity.SetFromBig(bb) { // Construct an ask from m0. See above for description @@ -357,7 +357,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Construct a bid from m1. See above for description val, _ := uint256.FromBig(big.NewInt(1)) - newQuantity := uint256.NewInt(0).Sub(daoCoinQuantity, val) + newQuantity := uint256.NewInt().Sub(daoCoinQuantity, val) exchangeRate, err := CalculateScaledExchangeRateFromString("1.2") require.NoError(err) metadataM1 := DAOCoinLimitOrderMetadata{ @@ -383,7 +383,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Store how many $DESO and DAO coin units will be transferred. bb, _ = big.NewInt(0).SetString("100", 10) - daoCoinQuantity := uint256.NewInt(0) + daoCoinQuantity := uint256.NewInt() daoCoinQuantity.SetFromBig(bb) { // Construct an ask from m0. See above for description @@ -413,7 +413,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Construct a bid from m1. See above for description val, _ := uint256.FromBig(big.NewInt(1)) - newQuantity := uint256.NewInt(0).Sub(daoCoinQuantity, val) + newQuantity := uint256.NewInt().Sub(daoCoinQuantity, val) exchangeRate, err := CalculateScaledExchangeRateFromString("1.0") require.NoError(err) metadataM1 := DAOCoinLimitOrderMetadata{ @@ -440,7 +440,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Store how many $DESO and DAO coin units will be transferred. bb, _ = big.NewInt(0).SetString("100", 10) - daoCoinQuantity := uint256.NewInt(0) + daoCoinQuantity := uint256.NewInt() daoCoinQuantity.SetFromBig(bb) { // Construct an ask from m0. See above for description @@ -470,7 +470,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Construct a bid from m1. See above for description val, _ := uint256.FromBig(big.NewInt(1)) - newQuantity := uint256.NewInt(0).Sub(daoCoinQuantity, val) + newQuantity := uint256.NewInt().Sub(daoCoinQuantity, val) exchangeRate, err := CalculateScaledExchangeRateFromString("1.000001") require.NoError(err) metadataM1 := DAOCoinLimitOrderMetadata{ @@ -501,7 +501,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Store how many $DESO and DAO coin units will be transferred. bb, _ = big.NewInt(0).SetString("100", 10) - daoCoinQuantity := uint256.NewInt(0) + daoCoinQuantity := uint256.NewInt() daoCoinQuantity.SetFromBig(bb) { // Construct an ask from m0. See above for description @@ -531,7 +531,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { { // Construct a bid from m1. See above for description val, _ := uint256.FromBig(big.NewInt(1)) - newQuantity := uint256.NewInt(0).Sub(daoCoinQuantity, val) + newQuantity := uint256.NewInt().Sub(daoCoinQuantity, val) exchangeRate, err := CalculateScaledExchangeRateFromString("0.9") require.NoError(err) metadataM1 := DAOCoinLimitOrderMetadata{ @@ -566,7 +566,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { ScaledExchangeRateCoinsToSellPerCoinToBuy: MaxUint256, // Buy one nano of DESO with the DAO coin. This should work as long as one DAO coin // base unit costs less than one full DESO (which is our current balance). - QuantityToFillInBaseUnits: uint256.NewInt(1), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeImmediateOrCancel, BlockHeight: math.MaxUint32, @@ -589,7 +589,7 @@ func TestZeroCostOrderEdgeCaseDAOCoinLimitOrder(t *testing.T) { ScaledExchangeRateCoinsToSellPerCoinToBuy: MaxUint256, // Buy one nano of DESO with the DAO coin. This should work as long as one DAO coin // base unit costs less than one full DESO (which is our current balance). - QuantityToFillInBaseUnits: uint256.NewInt(1), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeImmediateOrCancel, BlockHeight: math.MaxUint32, @@ -731,8 +731,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { // ----------------------- // Store how many $DESO and DAO coin units will be transferred. - daoCoinQuantityChange := uint256.NewInt(100) - desoQuantityChange := uint256.NewInt(10) + daoCoinQuantityChange := uint256.NewInt().SetUint64(100) + desoQuantityChange := uint256.NewInt().SetUint64(10) // Construct metadata for a m0 limit order: // * Buying: DAO coin @@ -821,7 +821,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // RuleErrorDAOCoinLimitOrderInvalidExchangeRate: zero { originalValue := metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy - metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy = uint256.NewInt(0) + metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy = uint256.NewInt() _, _, _, err = _doDAOCoinLimitOrderTxn( t, chain, db, params, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) @@ -834,7 +834,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // RuleErrorDAOCoinLimitOrderInvalidQuantity: zero { originalValue := metadataM0.QuantityToFillInBaseUnits - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(0) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt() _, _, _, err = _doDAOCoinLimitOrderTxn( t, chain, db, params, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) @@ -869,7 +869,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // 100 * .009 = .9, which should truncate to 0 coins to sell metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRateFromString(".009") require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(100) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(100) // Perform txn. _, _, _, err = _doDAOCoinLimitOrderTxn( @@ -886,8 +886,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { { originalPrice := metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy originalQuantity := metadataM0.QuantityToFillInBaseUnits - metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy = uint256.NewInt(1) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(1) + metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy = uint256.NewInt().SetUint64(1) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(1) // Perform txn. _, _, _, err = _doDAOCoinLimitOrderTxn( @@ -906,7 +906,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { originalQuantity := metadataM0.QuantityToFillInBaseUnits metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(1.0) require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(math.MaxUint64) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(math.MaxUint64) _, _, _, err = _doDAOCoinLimitOrderTxn( t, chain, db, params, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) @@ -1041,14 +1041,14 @@ func TestDAOCoinLimitOrder(t *testing.T) { daoCoinMintMetadata := DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1e4), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e4), } _daoCoinTxnWithTestMeta(testMeta, feeRateNanosPerKb, m0Pub, m0Priv, daoCoinMintMetadata) daoCoinTransferMetadata := DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(3000), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(3000), ReceiverPublicKey: m1PkBytes, } @@ -1104,7 +1104,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { updatedM0DESOBalance) require.Equal( - *uint256.NewInt(0).Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt().Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM0DAOCoinBalance.BalanceNanos) // m1's order is fulfilled buying $DESO so: @@ -1116,7 +1116,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { int64(updatedM1DESOBalance)) require.Equal( - *uint256.NewInt(0).Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt().Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM1DAOCoinBalance.BalanceNanos) } @@ -1139,13 +1139,13 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m1 submits order buying 20 $DESO @ 11 DAO coin / $DESO. metadataM1.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(11.0) require.NoError(err) - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(20) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(20) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) // m1 submits order buying 5 $DESO nanos @ 12 DAO coin / $DESO. metadataM1.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(12.0) require.NoError(err) - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(5) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(5) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) // Confirm 2 existing limit orders. @@ -1173,7 +1173,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // Construct metadata for m0's order buying 240 DAO coin nanos @ 1/8 $DESO / DAO coin. metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(0.125) // 1.0 / 8.0 = 0.125 require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(240) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(240) // Confirm matching limit orders exist. orderEntries, err = dbAdapter.GetMatchingDAOCoinLimitOrders( @@ -1194,7 +1194,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(len(orderEntries), 1) metadataM1.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(11.0) require.NoError(err) - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(10) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(10) require.True(orderEntries[0].Eq(metadataM1.ToEntry(m1PKID.PKID, savedHeight, toPKID))) // Calculate updated $DESO balances. @@ -1206,8 +1206,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { updatedM1DAOCoinBalance := dbAdapter.GetBalanceEntry(m1PKID.PKID, m0PKID.PKID, true) // Calculate changes in $DESO and DAO coins. - daoCoinQuantityChange = uint256.NewInt(240) - desoQuantityChange := uint256.NewInt(20) + daoCoinQuantityChange = uint256.NewInt().SetUint64(240) + desoQuantityChange := uint256.NewInt().SetUint64(20) // m0's order to buy DAO coins is fulfilled so: // * His $DESO balance decreases and @@ -1218,7 +1218,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { int64(updatedM0DESOBalance)) require.Equal( - *uint256.NewInt(0).Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt().Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM0DAOCoinBalance.BalanceNanos) // m1's orders to buy $DESO are fulfilled so: @@ -1228,7 +1228,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { originalM1DESOBalance+desoQuantityChange.Uint64(), updatedM1DESOBalance) require.Equal( - *uint256.NewInt(0).Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt().Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM1DAOCoinBalance.BalanceNanos) } @@ -1245,7 +1245,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m1 tries to cancel non-existent order. cancelMetadataM1 := DAOCoinLimitOrderMetadata{ - CancelOrderID: NewBlockHash(uint256.NewInt(1).Bytes()), + CancelOrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), } _, _, _, err = _doDAOCoinLimitOrderTxn( @@ -1284,7 +1284,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m0 submits order buying 100 DAO coin units @ 10 $DESO / DAO coin. metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(10.0) require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(100) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(100) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) // Confirm order is stored. @@ -1304,7 +1304,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m1 submits order selling 50 DAO coin units @ 5 $DESO / DAO coin. metadataM1.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(0.2) require.NoError(err) - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(250) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(250) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) // m0's order is partially fulfilled with 75 coins remaining. m1's order is fully @@ -1315,7 +1315,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrders() require.NoError(err) require.Equal(len(orderEntries), 1) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(75) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(75) require.True(orderEntries[0].Eq(metadataM0.ToEntry(m0PKID.PKID, savedHeight, toPKID))) // Calculate updated $DESO balances. @@ -1327,8 +1327,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { updatedM1DAOCoinBalance := dbAdapter.GetBalanceEntry(m1PKID.PKID, m0PKID.PKID, true) // Calculate changes in $DESO and DAO coins. - daoCoinQuantityChange = uint256.NewInt(25) - desoQuantityChange := uint256.NewInt(250) + daoCoinQuantityChange = uint256.NewInt().SetUint64(25) + desoQuantityChange := uint256.NewInt().SetUint64(250) // m0's order buying DAO coins is partially fulfilled so: // * His $DESO balance decreases and @@ -1338,7 +1338,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { updatedM0DESOBalance) require.Equal( - *uint256.NewInt(0).Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt().Add(&originalM0DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM0DAOCoinBalance.BalanceNanos) // m1's order selling DAO coins is fulfilled so: @@ -1350,7 +1350,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { int64(updatedM1DESOBalance)) require.Equal( - *uint256.NewInt(0).Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), + *uint256.NewInt().Sub(&originalM1DAOCoinBalance.BalanceNanos, daoCoinQuantityChange), updatedM1DAOCoinBalance.BalanceNanos) // m0 cancels the remainder of his order. @@ -1373,7 +1373,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m0 submits order buying 100 DAO coins @ 0.1 $DESO / DAO coin. metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(0.1) require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(100) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(100) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) // Confirm 1 existing limit order from m0. @@ -1460,7 +1460,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, ReceiverPublicKey: m2PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(m0BalanceEntry.BalanceNanos.Uint64() - 1), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(m0BalanceEntry.BalanceNanos.Uint64() - 1), }, ) @@ -1514,7 +1514,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(7339), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(7339), }, ) metadataM0.CancelOrderID = nil @@ -1573,7 +1573,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { daoCoinMintMetadata := DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1e5), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e5), } // 29 @@ -1582,7 +1582,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // Transfer 10K nanos to M2 daoCoinTransferMetadata := DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(1e4), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1e4), ReceiverPublicKey: m2PkBytes, } @@ -1599,7 +1599,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(10), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(10), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1620,7 +1620,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(5), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(5), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1654,7 +1654,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(300), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(300), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1675,7 +1675,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { orders, err = dbAdapter.GetAllDAOCoinLimitOrdersForThisDAOCoinPair(m1PKID.PKID, &ZeroPKID) require.NoError(err) require.Len(orders, 1) - require.True(orders[0].QuantityToFillInBaseUnits.Eq(uint256.NewInt(240))) + require.True(orders[0].QuantityToFillInBaseUnits.Eq(uint256.NewInt().SetUint64(240))) // Get balance entries for all users. m0BalanceEntryAfter := dbAdapter.GetBalanceEntry(m0PKID.PKID, m1PKID.PKID, true) @@ -1684,13 +1684,13 @@ func TestDAOCoinLimitOrder(t *testing.T) { m2Decrease, err := SafeUint256().Sub(&m2BalanceEntryBefore.BalanceNanos, &m2BalanceEntryAfter.BalanceNanos) require.NoError(err) - require.True(m2Decrease.Eq(uint256.NewInt(10))) + require.True(m2Decrease.Eq(uint256.NewInt().SetUint64(10))) m1Decrease, err := SafeUint256().Sub(&m1BalanceEntryBefore.BalanceNanos, &m1BalanceEntryAfter.BalanceNanos) require.NoError(err) - require.True(m1Decrease.Eq(uint256.NewInt(50))) + require.True(m1Decrease.Eq(uint256.NewInt().SetUint64(50))) - require.True(m0BalanceEntryAfter.BalanceNanos.Eq(uint256.NewInt(60))) + require.True(m0BalanceEntryAfter.BalanceNanos.Eq(uint256.NewInt().SetUint64(60))) m0DESOBalanceAfter := _getBalance(t, chain, mempool, m0Pub) m1DESOBalanceAfter := _getBalance(t, chain, mempool, m1Pub) @@ -1761,12 +1761,12 @@ func TestDAOCoinLimitOrder(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.9) require.NoError(err) queryEntry := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m1PKID.PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m1PKID.PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), } orderEntries, err = utxoView.GetNextLimitOrdersToFill(queryEntry, nil, savedHeight) @@ -1781,7 +1781,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(1.0) require.NoError(err) require.Equal(orderEntries[0].ScaledExchangeRateCoinsToSellPerCoinToBuy, exchangeRate) - require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt(240)) + require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt().SetUint64(240)) // m0 submits another order slightly better than previous. // Transactor: m0 @@ -1793,7 +1793,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM0.SellingDAOCoinCreatorPublicKey = &ZeroPublicKey metadataM0.ScaledExchangeRateCoinsToSellPerCoinToBuy, err = CalculateScaledExchangeRate(1.05) require.NoError(err) - metadataM0.QuantityToFillInBaseUnits = uint256.NewInt(110) + metadataM0.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(110) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m0Pub, m0Priv, metadataM0) orderEntries, err = utxoView._getAllDAOCoinLimitOrders() require.NoError(err) @@ -1813,23 +1813,23 @@ func TestDAOCoinLimitOrder(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(1.05) require.NoError(err) require.Equal(orderEntries[0].ScaledExchangeRateCoinsToSellPerCoinToBuy, exchangeRate) - require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt(110)) + require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt().SetUint64(110)) // Test get matching DAO coin limit orders. // Query with identical order as before but higher quantity. // Should match both of m0's orders with better listed first. - queryEntry.QuantityToFillInBaseUnits = uint256.NewInt(150) + queryEntry.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(150) orderEntries, err = utxoView.GetNextLimitOrdersToFill(queryEntry, nil, savedHeight) require.NoError(err) require.Equal(len(orderEntries), 2) exchangeRate, err = CalculateScaledExchangeRate(1.05) require.NoError(err) require.Equal(orderEntries[0].ScaledExchangeRateCoinsToSellPerCoinToBuy, exchangeRate) - require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt(110)) + require.Equal(orderEntries[0].QuantityToFillInBaseUnits, uint256.NewInt().SetUint64(110)) exchangeRate, err = CalculateScaledExchangeRate(1.0) require.NoError(err) require.Equal(orderEntries[1].ScaledExchangeRateCoinsToSellPerCoinToBuy, exchangeRate) - require.Equal(orderEntries[1].QuantityToFillInBaseUnits, uint256.NewInt(240)) + require.Equal(orderEntries[1].QuantityToFillInBaseUnits, uint256.NewInt().SetUint64(240)) } { @@ -1850,7 +1850,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeBID, CancelOrderID: orderEntries[1].OrderID, } @@ -1888,7 +1888,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(160), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(160), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1935,7 +1935,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(250), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(250), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -1958,7 +1958,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrders() require.NoError(err) require.Equal(len(orderEntries), 2) - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(60) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(60) require.True(orderEntries[1].Eq(metadataM1.ToEntry(m1PKID.PKID, savedHeight, toPKID))) // m0 has 1 remaining open orders. @@ -1987,7 +1987,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(200), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(200), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2021,7 +2021,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2042,8 +2042,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { m1DAOCoinBalanceM0After := dbAdapter.GetBalanceEntry(m1PKID.PKID, m0PKID.PKID, true).BalanceNanos m1DAOCoinBalanceM1After := dbAdapter.GetBalanceEntry(m1PKID.PKID, m1PKID.PKID, true).BalanceNanos - daoCoinM0UnitsTransferred := uint256.NewInt(100) - daoCoinM1UnitsTransferred := uint256.NewInt(200) + daoCoinM0UnitsTransferred := uint256.NewInt().SetUint64(100) + daoCoinM1UnitsTransferred := uint256.NewInt().SetUint64(200) m0DAOCoinM0Decrease, err := SafeUint256().Sub(&m0DAOCoinBalanceM0Before, &m0DAOCoinBalanceM0After) require.NoError(err) @@ -2086,7 +2086,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(50), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2108,7 +2108,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(110), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(110), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2141,21 +2141,21 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(m0DESONanosIncrease, uint64(45)) m0DAOCoinUnitsDecrease, err := SafeUint256().Sub(&m0DAOCoinBalanceUnitsBefore, &m0DAOCoinBalanceUnitsAfter) require.NoError(err) - require.Equal(m0DAOCoinUnitsDecrease, uint256.NewInt(50)) + require.Equal(m0DAOCoinUnitsDecrease, uint256.NewInt().SetUint64(50)) // m1's accounting m1DESONanosIncrease := m1DESOBalanceNanosAfter - m1DESOBalanceNanosBefore require.Equal(m1DESONanosIncrease, uint64(50)) m1DAOCoinUnitsDecrease, err := SafeUint256().Sub(&m1DAOCoinBalanceUnitsBefore, &m1DAOCoinBalanceUnitsAfter) require.NoError(err) - require.Equal(m1DAOCoinUnitsDecrease, uint256.NewInt(60)) + require.Equal(m1DAOCoinUnitsDecrease, uint256.NewInt().SetUint64(60)) // m2's accounting m2DESONanosDecrease := m2DESOBalanceNanosBefore - m2DESOBalanceNanosAfter require.Equal(m2DESONanosDecrease, uint64(95)+_feeNanos()) m2DAOCoinUnitsIncrease, err := SafeUint256().Sub(&m2DAOCoinBalanceUnitsAfter, &m2DAOCoinBalanceUnitsBefore) require.NoError(err) - require.Equal(m2DAOCoinUnitsIncrease, uint256.NewInt(110)) + require.Equal(m2DAOCoinUnitsIncrease, uint256.NewInt().SetUint64(110)) } { @@ -2175,7 +2175,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(300), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(300), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2196,7 +2196,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(600), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(600), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2224,7 +2224,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(900), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(900), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2250,21 +2250,21 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(m0DESONanosDecrease, uint64(30)) m0DAOCoinUnitsIncrease, err := SafeUint256().Sub(&m0DAOCoinBalanceUnitsAfter, &m0DAOCoinBalanceUnitsBefore) require.NoError(err) - require.Equal(m0DAOCoinUnitsIncrease, uint256.NewInt(300)) + require.Equal(m0DAOCoinUnitsIncrease, uint256.NewInt().SetUint64(300)) // m1's accounting m1DESONanosDecrease := m1DESOBalanceNanosBefore - m1DESOBalanceNanosAfter require.Equal(m1DESONanosDecrease, uint64(120)) m1DAOCoinUnitsIncrease, err := SafeUint256().Sub(&m1DAOCoinBalanceUnitsAfter, &m1DAOCoinBalanceUnitsBefore) require.NoError(err) - require.Equal(m1DAOCoinUnitsIncrease, uint256.NewInt(600)) + require.Equal(m1DAOCoinUnitsIncrease, uint256.NewInt().SetUint64(600)) // m2's accounting m2DESONanosIncrease := m2DESOBalanceNanosAfter - m2DESOBalanceNanosBefore require.Equal(m2DESONanosIncrease, uint64(150)-_feeNanos()) m2DAOCoinUnitsDecrease, err := SafeUint256().Sub(&m2DAOCoinBalanceUnitsBefore, &m2DAOCoinBalanceUnitsAfter) require.NoError(err) - require.Equal(m2DAOCoinUnitsDecrease, uint256.NewInt(900)) + require.Equal(m2DAOCoinUnitsDecrease, uint256.NewInt().SetUint64(900)) } { @@ -2284,7 +2284,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(89), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(89), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2353,7 +2353,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(10), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(10), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2415,7 +2415,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(50), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2433,7 +2433,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(50), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2666,7 +2666,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(50), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2684,7 +2684,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(50), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2768,7 +2768,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2789,8 +2789,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM1 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), - QuantityToFillInBaseUnits: uint256.NewInt(200), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(200), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: 99, } @@ -2823,7 +2823,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(originalM1BalanceM1Coins, updatedM1BalanceM1Coins) // m1 submits a FillOrKill order buying 100 m1 DAO coins that is filled. - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(100) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(100) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrders() require.NoError(err) @@ -2857,7 +2857,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2878,8 +2878,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM1 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), - QuantityToFillInBaseUnits: uint256.NewInt(200), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(200), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeFillOrKill, } @@ -2905,7 +2905,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { require.Equal(originalM1BalanceM1Coins, updatedM1BalanceM1Coins) // m1 submits a FillOrKill order selling 100 m1 DAO coins that is filled. - metadataM1.QuantityToFillInBaseUnits = uint256.NewInt(100) + metadataM1.QuantityToFillInBaseUnits = uint256.NewInt().SetUint64(100) _doDAOCoinLimitOrderTxnWithTestMeta(testMeta, feeRateNanosPerKb, m1Pub, m1Priv, metadataM1) orderEntries, err = dbAdapter.GetAllDAOCoinLimitOrders() require.NoError(err) @@ -2939,7 +2939,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -2961,8 +2961,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM1 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), - QuantityToFillInBaseUnits: uint256.NewInt(200), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(200), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeImmediateOrCancel, } @@ -3000,7 +3000,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3022,8 +3022,8 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM1 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), - QuantityToFillInBaseUnits: uint256.NewInt(200), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(200), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeImmediateOrCancel, } @@ -3085,14 +3085,14 @@ func TestDAOCoinLimitOrder(t *testing.T) { // Confirm that m0 cannot afford to fulfill m1's order. m1RequestedDESONanos, err := m1OrderEntry.BaseUnitsToBuyUint256() require.NoError(err) - require.True(m1RequestedDESONanos.Gt(uint256.NewInt(originalM0DESOBalance))) + require.True(m1RequestedDESONanos.Gt(uint256.NewInt().SetUint64(originalM0DESOBalance))) // m0 submits a FillOrKill order trying to fulfill m1's order. // m0 does not have sufficient $DESO. metadataM0 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), QuantityToFillInBaseUnits: metadataM0.QuantityToFillInBaseUnits, OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeFillOrKill, @@ -3187,7 +3187,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { metadataM0 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), - ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(0), + ScaledExchangeRateCoinsToSellPerCoinToBuy: uint256.NewInt(), QuantityToFillInBaseUnits: metadataM0.QuantityToFillInBaseUnits, OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeFillOrKill, @@ -3245,7 +3245,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3271,7 +3271,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(50), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeFillOrKill, } @@ -3354,7 +3354,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { // m0 submits an order selling m1 DAO coin units for $DESO. Order is stored. exchangeRate, err := CalculateScaledExchangeRate(1.0) require.NoError(err) - quantityToFill := uint256.NewInt(60) + quantityToFill := uint256.NewInt().SetUint64(60) metadataM0 = DAOCoinLimitOrderMetadata{ BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, @@ -3415,7 +3415,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m0PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3470,7 +3470,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: NewPublicKey(m3PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(350), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(350), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3505,7 +3505,7 @@ func TestDAOCoinLimitOrder(t *testing.T) { BuyingDAOCoinCreatorPublicKey: &ZeroPublicKey, SellingDAOCoinCreatorPublicKey: NewPublicKey(m3PkBytes), ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3531,12 +3531,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(1000), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3545,12 +3545,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.1) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(1000), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3562,10 +3562,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(100)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(1000)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) // m1 = transactor, m0 = matching order updatedTransactorQuantityToFillInBaseUnits, @@ -3574,10 +3574,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(1000)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(100)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) } // Scenario 2: one BID, one ASK, matching orders w/ mismatched prices @@ -3586,12 +3586,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(1000), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3600,12 +3600,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.2) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(500), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(500), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3618,10 +3618,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(500)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(500)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(2500)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(500)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(500)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(2500)) // m1 = transactor, m0 = matching order // m1 sells 500 DAO coin base units @ 10 $DESO / DAO coin. @@ -3631,10 +3631,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(500)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(5000)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(500)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(500)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(5000)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(500)) } // Scenario 3: m0 and m1 both submit BIDs that should match @@ -3643,12 +3643,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3657,12 +3657,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.1) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(1000), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3675,10 +3675,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(100)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(1000)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) // m1 = transactor, m0 = matching order // m1 buys 1000 $DESO @ 0.1 DAO coin / $DESO. @@ -3688,10 +3688,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(1000)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(100)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) } // Scenario 4: m0 and m1 both submit BIDs that match, m1 gets a better price than expected @@ -3700,12 +3700,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3714,12 +3714,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.2) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(250), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(250), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3732,10 +3732,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(50)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(50)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(250)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(50)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(50)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(250)) // m1 = transactor, m0 = matching order // m1 buys 250 $DESO @ 0.1 DAO coins / $DESO. @@ -3745,10 +3745,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(75)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(250)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(25)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(75)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(250)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(25)) } // Scenario 5: m0 and m1 both submit ASKs that should match @@ -3757,12 +3757,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(1000), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3771,12 +3771,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.1) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3789,10 +3789,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(100)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(1000)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) // m1 = transactor, m0 = matching order // m1 sells 100 DAO coin base units @ 0.1 DAO coin / $DESO. @@ -3802,10 +3802,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(1000)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(100)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(1000)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) } // Scenario 6: m0 and m1 both submit ASKs that match, m1 gets a better price than expected @@ -3814,12 +3814,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(1000), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3828,12 +3828,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.2) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(50), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(50), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3846,10 +3846,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(750)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(50)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(250)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(750)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(50)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(250)) // m1 = transactor, m0 = matching order // m1 sells 50 DAO coin units for 0.1 DAO coin / $DESO. @@ -3859,10 +3859,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(500)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(500)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(50)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(500)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(500)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(50)) } // Scenario 7: @@ -3873,12 +3873,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err := CalculateScaledExchangeRate(10.0) require.NoError(err) m0Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m0PKID, BuyingDAOCoinCreatorPKID: &ZeroPKID, SellingDAOCoinCreatorPKID: m0PKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(1000), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(1000), OperationType: DAOCoinLimitOrderOperationTypeASK, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3887,12 +3887,12 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { exchangeRate, err = CalculateScaledExchangeRate(0.2) require.NoError(err) m1Order := &DAOCoinLimitOrderEntry{ - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), // Not used + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), // Not used TransactorPKID: m1PKID, BuyingDAOCoinCreatorPKID: m0PKID, SellingDAOCoinCreatorPKID: &ZeroPKID, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(500), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(500), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3905,10 +3905,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err := _calculateDAOCoinsTransferredInLimitOrderMatch(m1Order, m0Order.OperationType, m0Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(500)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(100)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(500)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(500)) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(100)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(500)) // m1 = transactor, m0 = matching order // m1 buys 500 DAO coin units @ 10 DAO coin / $DESO. @@ -3918,10 +3918,10 @@ func TestCalculateDAOCoinsTransferredInLimitOrderMatch(t *testing.T) { transactorSellingCoinBaseUnitsTransferred, err = _calculateDAOCoinsTransferredInLimitOrderMatch(m0Order, m1Order.OperationType, m1Order.QuantityToFillInBaseUnits) require.NoError(err) - require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt(0)) - require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt(500)) - require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt(500)) - require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt(50)) + require.Equal(updatedTransactorQuantityToFillInBaseUnits, uint256.NewInt()) + require.Equal(updatedMatchingQuantityToFillInBaseUnits, uint256.NewInt().SetUint64(500)) + require.Equal(transactorBuyingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(500)) + require.Equal(transactorSellingCoinBaseUnitsTransferred, uint256.NewInt().SetUint64(50)) } } @@ -3935,7 +3935,7 @@ func TestComputeBaseUnitsToBuyUint256(t *testing.T) { if !worked { panic(fmt.Sprintf("Failed to convert sell quantity %v into bigint", quantityToSellStr)) } - selLValUint256 := uint256.NewInt(0) + selLValUint256 := uint256.NewInt() overflow := selLValUint256.SetFromBig(sellValBig) if overflow { panic(fmt.Sprintf("Failed to convert sell quantity %v into uint256 because of overflow", quantityToSellStr)) @@ -3947,7 +3947,7 @@ func TestComputeBaseUnitsToBuyUint256(t *testing.T) { if !worked { panic(fmt.Sprintf("Failed to convert buy quantity %v into bigint", quantityToBuyStr)) } - buyValUint256 := uint256.NewInt(0) + buyValUint256 := uint256.NewInt() overflow = buyValUint256.SetFromBig(buyValBig) if overflow { panic(fmt.Sprintf("Failed to convert buy quantity %v into uint256 because of overflow", quantityToBuyStr)) @@ -3958,10 +3958,10 @@ func TestComputeBaseUnitsToBuyUint256(t *testing.T) { assertEqual := func(exchangeRateFloat float64, quantityToSellInt int, quantityToBuyInt int) { exchangeRate, err := CalculateScaledExchangeRate(exchangeRateFloat) require.NoError(err) - quantityToSell := uint256.NewInt(uint64(quantityToSellInt)) + quantityToSell := uint256.NewInt().SetUint64(uint64(quantityToSellInt)) quantityToBuy, err := ComputeBaseUnitsToBuyUint256(exchangeRate, quantityToSell) require.NoError(err) - require.Equal(quantityToBuy, uint256.NewInt(uint64(quantityToBuyInt))) + require.Equal(quantityToBuy, uint256.NewInt().SetUint64(uint64(quantityToBuyInt))) // We also call assertEqualStr when this function is used assertEqualStr( @@ -4032,7 +4032,7 @@ func TestComputeBaseUnitsToBuyUint256(t *testing.T) { if !worked { panic(fmt.Sprintf("Failed to convert sell quantity %v into bigint", quantityToSellStr)) } - selLValUint256 := uint256.NewInt(0) + selLValUint256 := uint256.NewInt() overflow := selLValUint256.SetFromBig(sellValBig) if overflow { panic(fmt.Sprintf("Failed to convert sell quantity %v into uint256 because of overflow", quantityToSellStr)) @@ -4243,7 +4243,7 @@ func (txnData *DAOCoinLimitOrderMetadata) ToEntry( // one here for testing purposes as OrderID can't be nil. // Note: the OrderID is skipped when we compare if two // order entries are equal in these tests for this reason. - OrderID: NewBlockHash(uint256.NewInt(1).Bytes()), + OrderID: NewBlockHash(uint256.NewInt().SetUint64(1).Bytes()), TransactorPKID: transactorPKID, BuyingDAOCoinCreatorPKID: toPKID(txnData.BuyingDAOCoinCreatorPublicKey), SellingDAOCoinCreatorPKID: toPKID(txnData.SellingDAOCoinCreatorPublicKey), @@ -4338,7 +4338,7 @@ func TestFlushingDAOCoinLimitOrders(t *testing.T) { BuyingDAOCoinCreatorPublicKey: buying.PublicKey, SellingDAOCoinCreatorPublicKey: selling.PublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(quantity), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(quantity), OperationType: operationType, FillType: fillType, } @@ -4422,7 +4422,7 @@ func TestFlushingDAOCoinLimitOrders(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, feeRateNanosPerKb, m0.Pub, m0.Priv, DAOCoinMetadata{ ProfilePublicKey: m0.PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1e12), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e12), }) } { diff --git a/lib/block_view_dao_coin_test.go b/lib/block_view_dao_coin_test.go index caa148f4e..e59c05acf 100644 --- a/lib/block_view_dao_coin_test.go +++ b/lib/block_view_dao_coin_test.go @@ -1,13 +1,14 @@ package lib import ( - "github.com/btcsuite/btcd/btcec/v2" + "reflect" + "testing" + + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v4" "github.com/holiman/uint256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "reflect" - "testing" ) func _daoCoinTxn(t *testing.T, chain *Blockchain, db *badger.DB, @@ -230,7 +231,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(100), + CoinsToMintNanos: *uint256.NewInt().SetUint64(100), }) require.Error(err) @@ -263,7 +264,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m1Pub, m1Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(100), + CoinsToMintNanos: *uint256.NewInt().SetUint64(100), }) require.Error(err) require.Contains(err.Error(), RuleErrorOnlyProfileOwnerCanMintDAOCoin) @@ -284,7 +285,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(0), + CoinsToMintNanos: *uint256.NewInt().SetUint64(0), }) require.Error(err) require.Contains(err.Error(), RuleErrorDAOCoinMustMintNonZeroDAOCoin) @@ -299,7 +300,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(oneMCoins), + CoinsToMintNanos: *uint256.NewInt().SetUint64(oneMCoins), }, ) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -321,7 +322,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt(hundredKCoins), + CoinsToBurnNanos: *uint256.NewInt().SetUint64(hundredKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -339,7 +340,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m1Pub, m1Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt(100), + CoinsToBurnNanos: *uint256.NewInt().SetUint64(100), }) require.Error(err) require.Contains(err.Error(), RuleErrorDAOCoinBurnInsufficientCoins) @@ -355,7 +356,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, ReceiverPublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(tenKCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(tenKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -384,7 +385,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt(tenKCoins), + CoinsToBurnNanos: *uint256.NewInt().SetUint64(tenKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -399,7 +400,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(tenKCoins), + CoinsToMintNanos: *uint256.NewInt().SetUint64(tenKCoins), }) daoBalanceEntry = DBGetBalanceEntryForHODLerAndCreatorPKIDs( db, chain.snapshot, m0PKID.PKID, m0PKID.PKID, true) @@ -423,7 +424,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt(oneKCoins), + CoinsToBurnNanos: *uint256.NewInt().SetUint64(oneKCoins), }) m1DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -454,7 +455,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(100), + CoinsToMintNanos: *uint256.NewInt().SetUint64(100), }) require.Error(err) require.Contains(err.Error(), RuleErrorDAOCoinCannotMintIfMintingIsDisabled) @@ -474,7 +475,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(oneMCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneMCoins), ReceiverPublicKey: m2PkBytes, }) require.Error(err) @@ -485,7 +486,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(oneKCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneKCoins), ReceiverPublicKey: m0PkBytes, }) require.Error(err) @@ -496,7 +497,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m2Pub, m2Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(1), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1), ReceiverPublicKey: m0PkBytes, }) require.Error(err) @@ -507,7 +508,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m2PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(oneMCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneMCoins), ReceiverPublicKey: m2PkBytes, }) require.Error(err) @@ -518,7 +519,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(oneMCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneMCoins), ReceiverPublicKey: m2PkBytes[:10], }) require.Error(err) @@ -529,7 +530,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes[:10], - DAOCoinToTransferNanos: *uint256.NewInt(oneMCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneMCoins), ReceiverPublicKey: m2PkBytes, }) require.Error(err) @@ -541,7 +542,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m0Pub, m0Priv, DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt(oneMCoins), + CoinsToBurnNanos: *uint256.NewInt().SetUint64(oneMCoins), }) require.Error(err) require.Contains(err.Error(), RuleErrorDAOCoinBurnInsufficientCoins) @@ -558,7 +559,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(tenKCoins - oneKCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(tenKCoins - oneKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -568,7 +569,7 @@ func TestDAOCoinBasic(t *testing.T) { m1DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( db, chain.snapshot, m1PKID.PKID, m0PKID.PKID, true) // M1's balance entry is deleted because they have nothing - require.Equal(true, m1DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt(0))) + require.Equal(true, m1DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt())) profileEntry := DBGetProfileEntryForPKID(db, chain.snapshot, m0PKID.PKID) require.Equal(profileEntry.DAOCoinEntry.CoinsInCirculationNanos.Uint64(), oneMCoins-hundredKCoins-oneKCoins) @@ -586,7 +587,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, ReceiverPublicKey: m2PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(tenKCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(tenKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -636,7 +637,7 @@ func TestDAOCoinBasic(t *testing.T) { // M0 shouldn't own any M3 DAO Coin m0DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs(db, chain.snapshot, m0PKID.PKID, m3PKID.PKID, true) - require.Equal(true, m0DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt(0))) + require.Equal(true, m0DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt())) // M3's DAO Balance entry should be what M0's was prior to the swap m3DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs(db, chain.snapshot, m3PKID.PKID, m3PKID.PKID, true) @@ -674,7 +675,7 @@ func TestDAOCoinBasic(t *testing.T) { { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m2Pub, m2Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(1), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1), ReceiverPublicKey: m1PkBytes, }) require.Error(err) @@ -692,7 +693,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m3PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(oneKCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -721,7 +722,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m3Pub, m3Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(oneKCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneKCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -755,7 +756,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m4PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(100), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(100), }) require.Error(err) @@ -775,7 +776,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m2PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(hundredCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(hundredCoins), }) m1DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -807,7 +808,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m3Pub, m3Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m4PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(hundredCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(hundredCoins), }) m3DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -839,7 +840,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m4PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(hundredCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(hundredCoins), }) m1DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -861,7 +862,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTransferTxn(t, chain, db, params, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(100), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(100), }) require.Error(err) @@ -909,7 +910,7 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(hundredCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(hundredCoins), }) m1DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( @@ -942,12 +943,12 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m3Pub, m3Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m3PkBytes, ReceiverPublicKey: m2PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(oneMCoins - hundredKCoins - tenKCoins - oneKCoins - hundredCoins), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(oneMCoins - hundredKCoins - tenKCoins - oneKCoins - hundredCoins), }) daoBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( db, chain.snapshot, m3PKID.PKID, m3PKID.PKID, true) - require.Equal(true, daoBalanceEntry.BalanceNanos.Eq(uint256.NewInt(0))) + require.Equal(true, daoBalanceEntry.BalanceNanos.Eq(uint256.NewInt())) m2DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( db, chain.snapshot, m2PKID.PKID, m3PKID.PKID, true) @@ -974,12 +975,12 @@ func TestDAOCoinBasic(t *testing.T) { _daoCoinTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m3PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt(oneMCoins - hundredKCoins - oneKCoins - oneKCoins), + CoinsToBurnNanos: *uint256.NewInt().SetUint64(oneMCoins - hundredKCoins - oneKCoins - oneKCoins), }) m2DAOBalanceEntry := DBGetBalanceEntryForHODLerAndCreatorPKIDs( db, chain.snapshot, m2PKID.PKID, m3PKID.PKID, true) - require.Equal(true, m2DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt(0))) + require.Equal(true, m2DAOBalanceEntry.BalanceNanos.Eq(uint256.NewInt())) profileEntry := DBGetProfileEntryForPKID(db, chain.snapshot, m3PKID.PKID) require.Equal(profileEntry.DAOCoinEntry.CoinsInCirculationNanos.Uint64(), oneKCoins) @@ -1011,7 +1012,7 @@ func TestDAOCoinBasic(t *testing.T) { // - nobody has any // M2 DAO cap table after: // - M2: max-1k - maxMinus1k := uint256.NewInt(0).Sub(MaxUint256, uint256.NewInt(1000)) + maxMinus1k := uint256.NewInt().Sub(MaxUint256, uint256.NewInt().SetUint64(1000)) { _daoCoinTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, @@ -1033,7 +1034,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1001), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1001), }) require.Error(err) require.Contains(err.Error(), RuleErrorOverflowWhileMintingDAOCoins) @@ -1045,10 +1046,10 @@ func TestDAOCoinBasic(t *testing.T) { // M2 DAO cap table after: // - M1: (max-1k) / 2 // - M2: (max-1k) / 2 + 1 - maxMinus1kDiv2 := uint256.NewInt(0).Div(maxMinus1k, uint256.NewInt(2)) - maxMinus1kDiv2PlusOne := uint256.NewInt(0).Add( + maxMinus1kDiv2 := uint256.NewInt().Div(maxMinus1k, uint256.NewInt().SetUint64(2)) + maxMinus1kDiv2PlusOne := uint256.NewInt().Add( maxMinus1kDiv2, - uint256.NewInt(1)) + uint256.NewInt().SetUint64(1)) { _daoCoinTransferTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m2PkBytes, @@ -1078,7 +1079,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1001), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1001), }) require.Error(err) require.Contains(err.Error(), RuleErrorOverflowWhileMintingDAOCoins) @@ -1091,13 +1092,13 @@ func TestDAOCoinBasic(t *testing.T) { // M2 DAO cap table after: // - M1: (max-1k)/2 // - M2: (max-1k)/2 + 1k + 1 - maxMinus1kDiv2Plus1kPlusOne := uint256.NewInt(0).Add( - maxMinus1kDiv2, uint256.NewInt(1001)) + maxMinus1kDiv2Plus1kPlusOne := uint256.NewInt().Add( + maxMinus1kDiv2, uint256.NewInt().SetUint64(1001)) { _daoCoinTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1000), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1000), }) { @@ -1124,16 +1125,16 @@ func TestDAOCoinBasic(t *testing.T) { // M2 DAO cap table after: // - M1: (max-1k)/2 - 2k // - M2: (max-1k)/2 + 1k + 1 - maxMinus1kDiv2Minus2k := uint256.NewInt(0).Sub( - maxMinus1kDiv2, uint256.NewInt(2000)) - maxMinus1kDiv2Minus1kPlus1 := uint256.NewInt(0).Add( + maxMinus1kDiv2Minus2k := uint256.NewInt().Sub( + maxMinus1kDiv2, uint256.NewInt().SetUint64(2000)) + maxMinus1kDiv2Minus1kPlus1 := uint256.NewInt().Add( maxMinus1kDiv2Minus2k, maxMinus1kDiv2Plus1kPlusOne) { _daoCoinTxnWithTestMeta(testMeta, 10, m1Pub, m1Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt(2000), + CoinsToBurnNanos: *uint256.NewInt().SetUint64(2000), }) { @@ -1160,15 +1161,15 @@ func TestDAOCoinBasic(t *testing.T) { // M2 DAO cap table after: // - M1: (max-1k)/2 - 2k // - M2: (max-1k)/2 + 2k + 1 - maxMinus1kDiv2Plus2k := uint256.NewInt(0).Add( - maxMinus1kDiv2, uint256.NewInt(2000)) - maxMinus1kDiv2Plus2kPlus1 := uint256.NewInt(0).Add( - maxMinus1kDiv2Plus2k, uint256.NewInt(1)) + maxMinus1kDiv2Plus2k := uint256.NewInt().Add( + maxMinus1kDiv2, uint256.NewInt().SetUint64(2000)) + maxMinus1kDiv2Plus2kPlus1 := uint256.NewInt().Add( + maxMinus1kDiv2Plus2k, uint256.NewInt().SetUint64(1)) { _daoCoinTxnWithTestMeta(testMeta, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1000), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1000), }) { @@ -1193,7 +1194,7 @@ func TestDAOCoinBasic(t *testing.T) { _, _, _, err = _daoCoinTxn(t, chain, db, params, 10, m2Pub, m2Priv, DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1001), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1001), }) require.Error(err) require.Contains(err.Error(), RuleErrorOverflowWhileMintingDAOCoins) diff --git a/lib/block_view_derived_key.go b/lib/block_view_derived_key.go index 801f1dece..b64edf9f7 100644 --- a/lib/block_view_derived_key.go +++ b/lib/block_view_derived_key.go @@ -3,7 +3,7 @@ package lib import ( "bytes" "fmt" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/holiman/uint256" "github.com/pkg/errors" "reflect" @@ -129,7 +129,7 @@ func (bav *UtxoView) _connectAuthorizeDerivedKey( if len(ownerPublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, RuleErrorAuthorizeDerivedKeyInvalidOwnerPublicKey } - if _, err := btcec.ParsePubKey(ownerPublicKey); err != nil { + if _, err := btcec.ParsePubKey(ownerPublicKey, btcec.S256()); err != nil { return 0, 0, nil, errors.Wrap( RuleErrorAuthorizeDerivedKeyInvalidOwnerPublicKey, err.Error()) } @@ -489,7 +489,7 @@ func (bav *UtxoView) _disconnectAuthorizeDerivedKey( if len(currentTxn.PublicKey) != btcec.PubKeyBytesLenCompressed { return fmt.Errorf("_disconnectAuthorizeDerivedKey invalid public key: %v", currentTxn.PublicKey) } - _, err := btcec.ParsePubKey(currentTxn.PublicKey) + _, err := btcec.ParsePubKey(currentTxn.PublicKey, btcec.S256()) if err != nil { return fmt.Errorf("_disconnectAuthorizeDerivedKey invalid public key: %v", err) } @@ -500,7 +500,7 @@ func (bav *UtxoView) _disconnectAuthorizeDerivedKey( if len(txMeta.DerivedPublicKey) != btcec.PubKeyBytesLenCompressed { return fmt.Errorf("_disconnectAuthorizeDerivedKey invalid derived key: %v", txMeta.DerivedPublicKey) } - _, err = btcec.ParsePubKey(txMeta.DerivedPublicKey) + _, err = btcec.ParsePubKey(txMeta.DerivedPublicKey, btcec.S256()) if err != nil { return fmt.Errorf("_disconnectAuthorizeDerivedKey invalid derived key: %v", err) } diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index 17821e7f3..6024bc898 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -10,8 +10,7 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec/v2" - "github.com/btcsuite/btcd/btcec/v2/ecdsa" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v4" "github.com/holiman/uint256" "github.com/stretchr/testify/assert" @@ -544,7 +543,7 @@ func _doTxnWithBlockHeight( // ====== Access Group Fork ====== // We will only have utxoop expectation if spending limit contains CC, DAO, or NFT spending limits. transactorPrivBytes, _, err := Base58CheckDecode(TransactorPrivKeyBase58Check) - _, transactorPub := btcec.PrivKeyFromBytes(transactorPrivBytes) + _, transactorPub := btcec.PrivKeyFromBytes(btcec.S256(), transactorPrivBytes) transactorPubBytes := transactorPub.SerializeCompressed() require.NoError(err) if !utxoView.GetDerivedKeyMappingForOwner(txn.PublicKey, transactorPubBytes).TransactionSpendingLimitTracker.IsUnlimited { @@ -649,14 +648,15 @@ func _getAuthorizeDerivedKeyMetadata( require := require.New(t) // Generate a random derived key pair - derivedPrivateKey, err := btcec.NewPrivateKey() + derivedPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err, "_getAuthorizeDerivedKeyMetadata: Error generating a derived key pair") derivedPublicKey := derivedPrivateKey.PubKey().SerializeCompressed() // Create access signature expirationBlockByte := EncodeUint64(expirationBlock) accessBytes := append(derivedPublicKey, expirationBlockByte[:]...) - accessSignature := ecdsa.Sign(ownerPrivateKey, Sha256DoubleHash(accessBytes)[:]) + accessSignature, err := ownerPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) + require.NoError(err, "_getAuthorizeDerivedKeyMetadata: Error creating access signature") // Determine operation type var operationType AuthorizeDerivedKeyOperationType @@ -684,7 +684,7 @@ func _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit( require := require.New(t) // Generate a random derived key pair - derivedPrivateKey, err := btcec.NewPrivateKey() + derivedPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err, "_getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit: Error generating a derived key pair") derivedPublicKey := derivedPrivateKey.PubKey().SerializeCompressed() @@ -712,8 +712,9 @@ func _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit( accessBytes = AssembleAccessBytesWithMetamaskStrings(derivedPublicKey, expirationBlock, transactionSpendingLimit, &DeSoTestnetParams) } - signature := ecdsa.Sign(ownerPrivateKey, Sha256DoubleHash(accessBytes)[:]) + signature, err := ownerPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) accessSignature := signature.Serialize() + require.NoError(err, "_getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit: Error creating access signature") return &AuthorizeDerivedKeyMetadata{ derivedPublicKey, @@ -743,7 +744,8 @@ func _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimitAndDerivedPrivat require.NoError(err, "_getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit: Error in transaction spending limit to bytes") accessBytes = append(accessBytes, transactionSpendingLimitBytes[:]...) - accessSignature := ecdsa.Sign(ownerPrivateKey, Sha256DoubleHash(accessBytes)[:]) + accessSignature, err := ownerPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) + require.NoError(err, "_getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit: Error creating access signature") // Determine operation type var operationType AuthorizeDerivedKeyOperationType @@ -773,7 +775,10 @@ func _getAccessSignature( return nil, err } accessBytes = append(accessBytes, transactionSpendingLimitBytes...) - accessSignature := ecdsa.Sign(ownerPrivateKey, Sha256DoubleHash(accessBytes)[:]) + accessSignature, err := ownerPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) + if err != nil { + return nil, err + } return accessSignature.Serialize(), nil } @@ -917,7 +922,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { require.NoError(err) // Get AuthorizeDerivedKey txn metadata with expiration at block 6 - senderPriv, _ := btcec.PrivKeyFromBytes(senderPrivBytes) + senderPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) var transactionSpendingLimit *TransactionSpendingLimit authTxnMeta, derivedPriv := _getAuthorizeDerivedKeyMetadata(t, senderPriv, 6, false) derivedPrivBase58Check := Base58CheckEncode(derivedPriv.Serialize(), true, params) @@ -944,7 +949,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // This must fail because the txn has to be signed either by owner or derived key. { utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) _, _, _, err = _doAuthorizeTxn( @@ -970,11 +975,11 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // an invalid private key. This must fail. { utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) expirationBlockByte := UintToBuf(authTxnMeta.ExpirationBlock) accessBytes := append(authTxnMeta.DerivedPublicKey, expirationBlockByte[:]...) - accessSignatureRandom := ecdsa.Sign(randomPrivateKey, Sha256DoubleHash(accessBytes)[:]) + accessSignatureRandom, err := randomPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) require.NoError(err) _, _, _, err = _doAuthorizeTxn( testMeta, @@ -1076,7 +1081,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Should fail. Well... theoretically, it could pass in a distant future. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -1148,7 +1153,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -1200,7 +1205,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, @@ -1294,7 +1299,7 @@ func TestAuthorizeDerivedKeyBasic(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -1730,7 +1735,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { require.NoError(err) // Get AuthorizeDerivedKey txn metadata with expiration at block 6 - senderPriv, _ := btcec.PrivKeyFromBytes(senderPrivBytes) + senderPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) transactionCountLimitMap := make(map[TxnType]uint64) transactionCountLimitMap[TxnTypeAuthorizeDerivedKey] = 1 transactionCountLimitMap[TxnTypeBasicTransfer] = 1 @@ -1766,7 +1771,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // This must fail because the txn has to be signed either by owner or derived key. { utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) _, _, _, err = _doAuthorizeTxn( @@ -1792,11 +1797,11 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // an invalid private key. This must fail. { utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) expirationBlockByte := UintToBuf(authTxnMeta.ExpirationBlock) accessBytes := append(authTxnMeta.DerivedPublicKey, expirationBlockByte[:]...) - accessSignatureRandom := ecdsa.Sign(randomPrivateKey, Sha256DoubleHash(accessBytes)[:]) + accessSignatureRandom, err := randomPrivateKey.Sign(Sha256DoubleHash(accessBytes)[:]) require.NoError(err) _, _, _, err = _doAuthorizeTxn( testMeta, @@ -1897,7 +1902,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Should fail. Well... theoretically, it could pass in a distant future. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -1969,7 +1974,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -2021,7 +2026,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) _, _, err = _derivedKeyBasicTransfer(t, db, chain, params, senderPkBytes, recipientPkBytes, @@ -2147,7 +2152,7 @@ func TestAuthorizeDerivedKeyBasicWithTransactionLimits(t *testing.T) { // Should fail. { // Generate a random key pair - randomPrivateKey, err := btcec.NewPrivateKey() + randomPrivateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivBase58Check := Base58CheckEncode(randomPrivateKey.Serialize(), true, params) utxoView := NewUtxoView(db, params, chain.postgres, chain.snapshot, chain.eventManager) @@ -2667,7 +2672,7 @@ REPEAT: utxoView, err := mempool.GetAugmentedUniversalView() require.NoError(err) m1PrivKeyBytes, _, err := Base58CheckDecode(m1Priv) - m1PrivateKey, _ := btcec.PrivKeyFromBytes(m1PrivKeyBytes) + m1PrivateKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), m1PrivKeyBytes) m1PKID := utxoView.GetPKIDForPublicKey(m1PkBytes).PKID transactionSpendingLimit := &TransactionSpendingLimit{ GlobalDESOLimit: 100, @@ -2721,8 +2726,8 @@ REPEAT: &DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(100 * NanosPerUnit), - CoinsToBurnNanos: *uint256.NewInt(0), + CoinsToMintNanos: *uint256.NewInt().SetUint64(100 * NanosPerUnit), + CoinsToBurnNanos: *uint256.NewInt(), }, nil, blockHeight+1, @@ -2739,8 +2744,8 @@ REPEAT: &DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(100 * NanosPerUnit), - CoinsToBurnNanos: *uint256.NewInt(0), + CoinsToMintNanos: *uint256.NewInt().SetUint64(100 * NanosPerUnit), + CoinsToBurnNanos: *uint256.NewInt(), }, nil, blockHeight+1, @@ -2762,7 +2767,7 @@ REPEAT: &DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(10 * NanosPerUnit), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(10 * NanosPerUnit), }, nil, blockHeight+1, @@ -2779,7 +2784,7 @@ REPEAT: &DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(10 * NanosPerUnit), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(10 * NanosPerUnit), }, nil, blockHeight+1, @@ -2793,7 +2798,7 @@ REPEAT: utxoView, err := mempool.GetAugmentedUniversalView() require.NoError(err) derivedPrivBytes, _, err := Base58CheckDecode(derivedPrivBase58Check) - _, derivedPub := btcec.PrivKeyFromBytes(derivedPrivBytes) + _, derivedPub := btcec.PrivKeyFromBytes(btcec.S256(), derivedPrivBytes) derivedPubBytes := derivedPub.SerializeCompressed() require.NoError(err) @@ -2854,7 +2859,7 @@ REPEAT: &DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, ReceiverPublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(10 * NanosPerUnit), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(10 * NanosPerUnit), }, nil, blockHeight+1, @@ -2879,8 +2884,8 @@ REPEAT: &DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(100 * NanosPerUnit), - CoinsToBurnNanos: *uint256.NewInt(0), + CoinsToMintNanos: *uint256.NewInt().SetUint64(100 * NanosPerUnit), + CoinsToBurnNanos: *uint256.NewInt(), }, nil, blockHeight+1, @@ -3011,7 +3016,7 @@ REPEAT: &DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeBurn, - CoinsToBurnNanos: *uint256.NewInt(10 * NanosPerUnit), + CoinsToBurnNanos: *uint256.NewInt().SetUint64(10 * NanosPerUnit), }, nil, blockHeight+1, @@ -3027,7 +3032,7 @@ REPEAT: } m0PrivKeyBytes, _, err := Base58CheckDecode(m0Priv) - m0PrivateKey, _ := btcec.PrivKeyFromBytes(m0PrivKeyBytes) + m0PrivateKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), m0PrivKeyBytes) blockHeight, err = GetBlockTipHeight(db, false) require.NoError(err) m0AuthTxnMeta, derived0Priv := _getAuthorizeDerivedKeyMetadataWithTransactionSpendingLimit( @@ -3621,7 +3626,7 @@ REPEAT: BuyingDAOCoinCreatorPublicKey: NewPublicKey(m1PkBytes), SellingDAOCoinCreatorPublicKey: &ZeroPublicKey, ScaledExchangeRateCoinsToSellPerCoinToBuy: exchangeRate, - QuantityToFillInBaseUnits: uint256.NewInt(100), + QuantityToFillInBaseUnits: uint256.NewInt().SetUint64(100), OperationType: DAOCoinLimitOrderOperationTypeBID, FillType: DAOCoinLimitOrderFillTypeGoodTillCancelled, } @@ -3897,11 +3902,11 @@ func TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t *testing // // For each spending limit, we will submit a bunch of txns to make sure the limit works properly. // We will also try updating a spending limit. - groupPriv1, err := btcec.NewPrivateKey() + groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() _ = groupPk1 - derivedPriv1, err := btcec.NewPrivateKey() + derivedPriv1, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupKeyName1 := NewGroupKeyName([]byte("group 1")) groupKeyName2 := NewGroupKeyName([]byte("group 2")) @@ -3920,7 +3925,7 @@ func TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t *testing "test vector for m0PublicKey before the block height authorizing access group transaction", m0Priv, m0PubBytes, derivedPriv1, tv1SpendingLimit, []byte{}, false, AuthorizeDerivedKeyOperationValid, 100, nil, nil) - derivedPriv2, err := btcec.NewPrivateKey() + derivedPriv2, err := btcec.NewPrivateKey(btcec.S256()) tv2SpendingLimit := TransactionSpendingLimit{ GlobalDESOLimit: 10, AccessGroupMemberMap: map[AccessGroupMemberLimitKey]uint64{ @@ -3935,7 +3940,7 @@ func TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t *testing "test vector for m0PublicKey before the block height authorizing access group member transaction", m0Priv, m0PubBytes, derivedPriv2, tv2SpendingLimit, []byte{}, false, AuthorizeDerivedKeyOperationValid, 100, nil, nil) - derivedPriv3, err := btcec.NewPrivateKey() + derivedPriv3, err := btcec.NewPrivateKey(btcec.S256()) tv3SpendingLimit := TransactionSpendingLimit{ GlobalDESOLimit: 10, AccessGroupMap: tv1SpendingLimit.AccessGroupMap, @@ -3945,7 +3950,7 @@ func TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t *testing "test vector for m0PublicKey before the block height authorizing access group and access group member transactions", m0Priv, m0PubBytes, derivedPriv3, tv3SpendingLimit, []byte{}, false, AuthorizeDerivedKeyOperationValid, 100, nil, nil) - derivedPriv4, err := btcec.NewPrivateKey() + derivedPriv4, err := btcec.NewPrivateKey(btcec.S256()) tv4SpendingLimit := TransactionSpendingLimit{ GlobalDESOLimit: 10, TransactionCountLimitMap: map[TxnType]uint64{ @@ -3957,7 +3962,7 @@ func TestAuthorizeDerivedKeyWithTransactionSpendingLimitsAccessGroups(t *testing derivedPriv4, tv4SpendingLimit, []byte{}, false, AuthorizeDerivedKeyOperationValid, 100, nil, nil) - derivedPriv4p5, err := btcec.NewPrivateKey() + derivedPriv4p5, err := btcec.NewPrivateKey(btcec.S256()) tv4p5SpendingLimit := TransactionSpendingLimit{ IsUnlimited: true, } @@ -4091,7 +4096,7 @@ func _getDerivedKeyMetadata(t *testing.T, ownerPrivateKeyString string, derivedP ownerPriv, _, err := Base58CheckDecode(ownerPrivateKeyString) require.NoError(err) - ownerPrivKey, _ := btcec.PrivKeyFromBytes(ownerPriv) + ownerPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), ownerPriv) spendingLimit := limit accessSignature, err := _getAccessSignature( derivedPublicKey, expirationBlock, &spendingLimit, ownerPrivKey, blockHeight) diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 52a210636..d1e7cde6f 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/pkg/errors" diff --git a/lib/block_view_follow.go b/lib/block_view_follow.go index 6c78bcadc..55a8b2b96 100644 --- a/lib/block_view_follow.go +++ b/lib/block_view_follow.go @@ -2,7 +2,7 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/golang/glog" "github.com/pkg/errors" "reflect" @@ -207,7 +207,7 @@ func (bav *UtxoView) _connectFollow( } // TODO: This check feels unnecessary and is expensive - //_, err := btcec.ParsePubKey(txMeta.FollowedPublicKey) + //_, err := btcec.ParsePubKey(txMeta.FollowedPublicKey, btcec.S256()) //if err != nil { // return 0, 0, nil, errors.Wrapf( // RuleErrorFollowParsePubKeyError, "_connectFollow: Parse error: %v", err) diff --git a/lib/block_view_like.go b/lib/block_view_like.go index 3af7983aa..c3bb9355c 100644 --- a/lib/block_view_like.go +++ b/lib/block_view_like.go @@ -2,7 +2,7 @@ package lib import ( "fmt" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/golang/glog" "github.com/pkg/errors" "reflect" diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index c5d0000a9..85fab796f 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -6,7 +6,7 @@ import ( "reflect" "sort" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/holiman/uint256" @@ -187,7 +187,7 @@ func (lockedBalanceEntry *LockedBalanceEntry) RawDecodeWithoutMetadata(blockHeig if err != nil { return errors.Wrap(err, "LockedBalanceEntry.Decode: Problem reading BalanceBaseUnits") } - lockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt(0).SetBytes(balanceBaseUnitsBytes) + lockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt().SetBytes(balanceBaseUnitsBytes) return nil } @@ -1178,14 +1178,14 @@ func (bav *UtxoView) _connectCoinLockup( // Spend the transactor's DAO coin balance. transactorBalanceEntry.BalanceNanos = - *uint256.NewInt(0).Sub(&transactorBalanceEntry.BalanceNanos, txMeta.LockupAmountBaseUnits) + *uint256.NewInt().Sub(&transactorBalanceEntry.BalanceNanos, txMeta.LockupAmountBaseUnits) bav._setDAOCoinBalanceEntryMappings(transactorBalanceEntry) // Create a copy of the associated CoinEntry in the event we must roll back the transaction. prevCoinEntry := profileEntry.DAOCoinEntry.Copy() // Update CoinsInCirculation and NumberOfHolders associated with the DAO coin balance. - profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt(0).Sub( + profileEntry.DAOCoinEntry.CoinsInCirculationNanos = *uint256.NewInt().Sub( &profileEntry.DAOCoinEntry.CoinsInCirculationNanos, txMeta.LockupAmountBaseUnits) if transactorBalanceEntry.BalanceNanos.IsZero() && !prevTransactorBalanceEntry.BalanceNanos.IsZero() { @@ -1216,7 +1216,7 @@ func (bav *UtxoView) _connectCoinLockup( // If this is an unvested lockup, compute any accrued yield. // In the vested lockup case, the yield earned is always zero. - yieldFromTxn := uint256.NewInt(0) + yieldFromTxn := uint256.NewInt() if profileEnablesYield && txMeta.UnlockTimestampNanoSecs == txMeta.VestingEndTimestampNanoSecs { // Compute the lockup duration in nanoseconds. lockupDurationNanoSeconds := txMeta.UnlockTimestampNanoSecs - blockTimestampNanoSecs @@ -1251,8 +1251,8 @@ func (bav *UtxoView) _connectCoinLockup( } // Convert variables to a consistent uint256 representation. This is to use them in SafeUint256 math. - txnYieldBasisPoints256 := uint256.NewInt(txnYieldBasisPoints) - txnYieldEarningDurationNanoSecs256 := uint256.NewInt(uint64(txnYieldEarningDurationNanoSecs)) + txnYieldBasisPoints256 := uint256.NewInt().SetUint64(txnYieldBasisPoints) + txnYieldEarningDurationNanoSecs256 := uint256.NewInt().SetUint64(uint64(txnYieldEarningDurationNanoSecs)) // Compute the yield associated with this operation, checking to ensure there's no overflow. yieldFromTxn, err = @@ -1300,7 +1300,7 @@ func (bav *UtxoView) _connectCoinLockup( ProfilePKID: profilePKID, UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: txMeta.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(0), + BalanceBaseUnits: *uint256.NewInt(), } } @@ -1772,7 +1772,7 @@ func SplitVestedLockedBalanceEntry( remainingLockedBalanceEntry.BalanceBaseUnits = *remainingValue // Sanity check the split does not print money. - if uint256.NewInt(0).Add( + if uint256.NewInt().Add( &splitLockedBalanceEntry.BalanceBaseUnits, &remainingLockedBalanceEntry.BalanceBaseUnits). Gt(&lockedBalanceEntry.BalanceBaseUnits) { return nil, nil, @@ -1803,12 +1803,12 @@ func CalculateLockupValueOverElapsedDuration( } // Convert the elapsedDuration to an uint256 - numerator := uint256.NewInt(uint64(elapsedDuration)) + numerator := uint256.NewInt().SetUint64(uint64(elapsedDuration)) // Compute the time that passes over the duration of the locked balance entry denominator, err := SafeUint256().Sub( - uint256.NewInt(uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs)), - uint256.NewInt(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))) + uint256.NewInt().SetUint64(uint64(lockedBalanceEntry.VestingEndTimestampNanoSecs)), + uint256.NewInt().SetUint64(uint64(lockedBalanceEntry.UnlockTimestampNanoSecs))) if err != nil { return nil, errors.Wrap(err, "CalculateLockupSplitValue: "+ "(lockedBalanceEntry.UnlockTimestamp - lockedBalanceEntry.VestingEndTimestamp) underflow") @@ -1853,13 +1853,13 @@ func CalculateLockupYield( // The SafeUint256 Library uses division to ensure there's no overflow. This leads to possible // unnecessary false overflows in the event the duration or the yield is 0. Hence, we do a separate check here. if apyYieldBasisPoints.IsZero() || durationNanoSecs.IsZero() { - return uint256.NewInt(0), nil + return uint256.NewInt(), nil } // Compute the denominators from the nanosecond to year conversion and the basis point computation. denominators, err := SafeUint256().Mul( - uint256.NewInt(NanoSecsPerYear), - uint256.NewInt(10000)) + uint256.NewInt().SetUint64(NanoSecsPerYear), + uint256.NewInt().SetUint64(10000)) if err != nil { return nil, errors.Wrap(RuleErrorCoinLockupCoinYieldOverflow, "CalculateLockupYield (nanoSecsPerYear * 10000)") @@ -1930,7 +1930,7 @@ func (bav *UtxoView) _disconnectCoinLockup( ProfilePKID: operationData.PrevLockedBalanceEntry.ProfilePKID, UnlockTimestampNanoSecs: operationData.PrevLockedBalanceEntry.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: operationData.PrevLockedBalanceEntry.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(0), + BalanceBaseUnits: *uint256.NewInt(), } } if lockedBalanceEntry.BalanceBaseUnits.Lt(&operationData.PrevLockedBalanceEntry.BalanceBaseUnits) { @@ -2308,7 +2308,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( ProfilePKID: profilePKID, UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(0), + BalanceBaseUnits: *uint256.NewInt(), } } prevSenderLockedBalanceEntry := senderLockedBalanceEntry.Copy() @@ -2320,7 +2320,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( } // Debit the sender's balance entry. - senderLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt(0).Sub( + senderLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt().Sub( &senderLockedBalanceEntry.BalanceBaseUnits, txMeta.LockedCoinsToTransferBaseUnits) // Fetch the recipient's balance entry. @@ -2340,7 +2340,7 @@ func (bav *UtxoView) _connectCoinLockupTransfer( ProfilePKID: profilePKID, UnlockTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: txMeta.UnlockTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(0), + BalanceBaseUnits: *uint256.NewInt(), } } prevReceiverLockedBalanceEntry := receiverLockedBalanceEntry.Copy() @@ -2467,7 +2467,7 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( ProfilePKID: operationData.PrevSenderLockedBalanceEntry.ProfilePKID, UnlockTimestampNanoSecs: operationData.PrevSenderLockedBalanceEntry.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: operationData.PrevSenderLockedBalanceEntry.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(0), + BalanceBaseUnits: *uint256.NewInt(), } } receiverLockedBalanceEntry, err := @@ -2485,7 +2485,7 @@ func (bav *UtxoView) _disconnectCoinLockupTransfer( ProfilePKID: operationData.PrevReceiverLockedBalanceEntry.ProfilePKID, UnlockTimestampNanoSecs: operationData.PrevReceiverLockedBalanceEntry.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: operationData.PrevReceiverLockedBalanceEntry.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(0), + BalanceBaseUnits: *uint256.NewInt(), } } @@ -2596,7 +2596,7 @@ func (bav *UtxoView) _connectCoinUnlock( } // Create an unlockedBalance uint256 to track what will be given back to the user. - unlockedBalance := uint256.NewInt(0) + unlockedBalance := uint256.NewInt() // Unlock all unvested unlockable locked balance entries. var prevLockedBalanceEntries []*LockedBalanceEntry @@ -2612,7 +2612,7 @@ func (bav *UtxoView) _connectCoinUnlock( prevLockedBalanceEntries = append(prevLockedBalanceEntries, unlockableLockedBalanceEntry.Copy()) // Update the LockedBalanceEntry and delete the record. - unlockableLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt(0) + unlockableLockedBalanceEntry.BalanceBaseUnits = *uint256.NewInt() bav._deleteLockedBalanceEntry(unlockableLockedBalanceEntry) } @@ -2721,7 +2721,7 @@ func CalculateVestedEarnings( ) { // Check if this lockup should not be unlocked right now. if blockTimestampNanoSecs <= lockedBalanceEntry.UnlockTimestampNanoSecs { - return uint256.NewInt(0), nil + return uint256.NewInt(), nil } // Check if this lockup should be fully unlocked. @@ -2734,14 +2734,14 @@ func CalculateVestedEarnings( lockedBalanceEntry, blockTimestampNanoSecs-lockedBalanceEntry.UnlockTimestampNanoSecs) if err != nil { - return uint256.NewInt(0), + return uint256.NewInt(), errors.Wrap(err, "CalculateVestedEarnings failed to compute vestedEarnings") } // Sanity check that vestedEarnings < BalanceBaseUnits if vestedEarnings.Gt(&lockedBalanceEntry.BalanceBaseUnits) || vestedEarnings.Eq(&lockedBalanceEntry.BalanceBaseUnits) { - return uint256.NewInt(0), + return uint256.NewInt(), errors.New("ComputeVestedEarnings: " + "vested earnings >= outstanding balance; this shouldn't be possible") } @@ -2804,7 +2804,7 @@ func (bav *UtxoView) _disconnectCoinUnlock( ProfilePKID: prevLockedBalanceEntry.ProfilePKID, UnlockTimestampNanoSecs: prevLockedBalanceEntry.UnlockTimestampNanoSecs, VestingEndTimestampNanoSecs: prevLockedBalanceEntry.VestingEndTimestampNanoSecs, - BalanceBaseUnits: *uint256.NewInt(0), + BalanceBaseUnits: *uint256.NewInt(), } } if prevLockedBalanceEntry.BalanceBaseUnits.Lt(&lockedBalanceEntry.BalanceBaseUnits) { diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 33c52bc9f..4c4cf5255 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -2,9 +2,10 @@ package lib import ( "testing" + "time" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v4" "github.com/holiman/uint256" "github.com/pkg/errors" @@ -71,7 +72,7 @@ func TestCoinLockupsForkHeight(t *testing.T) { m0Pub, 1000, 1000, - uint256.NewInt(100), + uint256.NewInt().SetUint64(100), 0) _, _, _, err2 := _updateCoinLockupParams( t, chain, db, params, @@ -92,7 +93,7 @@ func TestCoinLockupsForkHeight(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m0PkBytes), 1000, - uint256.NewInt(1)) + uint256.NewInt().SetUint64(1)) _, _, _, err4 := _coinUnlockWithConnectTimestamp( t, chain, db, params, feeRateNanosPerKb, @@ -117,33 +118,33 @@ func TestCalculateLockupYield(t *testing.T) { // Ensure that a lockup with zero duration has zero yield. yield, err = CalculateLockupYield( MaxUint256, - uint256.NewInt(0), - uint256.NewInt(1)) + uint256.NewInt(), + uint256.NewInt().SetUint64(1)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt(0)) + require.Equal(t, *yield, *uint256.NewInt()) // Ensure that a lockup with zero apyYieldBasisPoints has zero yield. yield, err = CalculateLockupYield( MaxUint256, - uint256.NewInt(1), - uint256.NewInt(0)) + uint256.NewInt().SetUint64(1), + uint256.NewInt()) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt(0)) + require.Equal(t, *yield, *uint256.NewInt()) // Ensure that when principal is MaxUint256 and the apy yield is 2bp, // the operation fails due to lack of precision. _, err = CalculateLockupYield( MaxUint256, - uint256.NewInt(2), - uint256.NewInt(1)) + uint256.NewInt().SetUint64(2), + uint256.NewInt().SetUint64(1)) require.Contains(t, err.Error(), RuleErrorCoinLockupCoinYieldOverflow) // Ensure that when principal is MaxUint256 and the duration is 2ns, // the operation fails due to lack of precision. _, err = CalculateLockupYield( MaxUint256, - uint256.NewInt(1), - uint256.NewInt(2)) + uint256.NewInt().SetUint64(1), + uint256.NewInt().SetUint64(2)) require.Contains(t, err.Error(), RuleErrorCoinLockupCoinYieldOverflow) // Ensure that the CalculateLockupYield operation acts as a floor of @@ -161,11 +162,11 @@ func TestCalculateLockupYield(t *testing.T) { // In theory, this should return a yield of 1 without any overflow in the operation. // We test this below: yield, err = CalculateLockupYield( - uint256.NewInt(365*24*10000), - uint256.NewInt(60*60), - uint256.NewInt(1e9)) + uint256.NewInt().SetUint64(365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt(1)) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(1)) // Knowing this, we can now check to ensure the edges of the CalculateLockupYield // operation are behaving correctly and never minting more coins than expected. @@ -173,21 +174,21 @@ func TestCalculateLockupYield(t *testing.T) { // To test this, we set duration = 1e9 - 1. // (This decreases only the largest factor, leading to the smallest decrease possible in the numerator) yield, err = CalculateLockupYield( - uint256.NewInt(365*24*10000), - uint256.NewInt(60*60), - uint256.NewInt(1e9-1)) + uint256.NewInt().SetUint64(365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9-1)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt(0)) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(0)) // If we only slightly increase the numerator, we should expect to see the yield remain the same. // To test this, we set duration = 1e9 + 1 // (This increases only the largest factor, leading to the smallest increase possible in the numerator) yield, err = CalculateLockupYield( - uint256.NewInt(365*24*10000), - uint256.NewInt(60*60), - uint256.NewInt(1e9+1)) + uint256.NewInt().SetUint64(365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9+1)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt(1)) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(1)) // We should only see an increase to the output yield if the numerator is scaled by a constant. // To do this, we can iterate through various constants and see if the output yield matches. @@ -195,19 +196,19 @@ func TestCalculateLockupYield(t *testing.T) { // We also ensure that slight deviations do not alter the output. for ii := uint64(0); ii < 100000; ii++ { yield, err = CalculateLockupYield( - uint256.NewInt(ii*365*24*10000), - uint256.NewInt(60*60), - uint256.NewInt(1e9)) + uint256.NewInt().SetUint64(ii*365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt(ii)) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(ii)) // Slight increase to the numerator. Ensure we don't create more yield than expected. yield, err = CalculateLockupYield( - uint256.NewInt(ii*365*24*10000), - uint256.NewInt(60*60), - uint256.NewInt(1e9+1)) + uint256.NewInt().SetUint64(ii*365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9+1)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt(ii)) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(ii)) // Slight decrease to the numerator. Ensure we create strictly less yield. expectedValue := ii - 1 @@ -215,11 +216,11 @@ func TestCalculateLockupYield(t *testing.T) { expectedValue = 0 } yield, err = CalculateLockupYield( - uint256.NewInt(ii*365*24*10000), - uint256.NewInt(60*60), - uint256.NewInt(1e9-1)) + uint256.NewInt().SetUint64(ii*365*24*10000), + uint256.NewInt().SetUint64(60*60), + uint256.NewInt().SetUint64(1e9-1)) require.NoError(t, err) - require.Equal(t, *yield, *uint256.NewInt(expectedValue)) + require.Equal(t, *yield, *uint256.NewInt().SetUint64(expectedValue)) } } @@ -236,7 +237,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 0, 0, uint256.NewInt(0), 0) + 0, 0, uint256.NewInt(), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupOfAmountZero) } @@ -246,7 +247,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m2Pub, m0Pub, - 0, 0, uint256.NewInt(1), 0) + 0, 0, uint256.NewInt().SetUint64(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupOnNonExistentProfile) } @@ -256,7 +257,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), m0Pub, - 0, 0, uint256.NewInt(1), 0) + 0, 0, uint256.NewInt().SetUint64(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupCannotLockupZeroKey) } @@ -266,7 +267,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 0, 0, uint256.NewInt(1), 0) + 0, 0, uint256.NewInt().SetUint64(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) } @@ -276,7 +277,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 0, 0, uint256.NewInt(1), 1) + 0, 0, uint256.NewInt().SetUint64(1), 1) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidLockupDuration) } @@ -286,7 +287,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 900, uint256.NewInt(1), 950) + 1000, 900, uint256.NewInt().SetUint64(1), 950) require.Contains(t, err.Error(), RuleErrorCoinLockupInvalidVestingEndTimestamp) } @@ -296,7 +297,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, Base58CheckEncode(ZeroPublicKey.ToBytes(), false, testMeta.params), - 1000, 1000, uint256.NewInt(1), 950) + 1000, 1000, uint256.NewInt().SetUint64(1), 950) require.Contains(t, err.Error(), RuleErrorCoinLockupZeroPublicKeyAsRecipient) } @@ -306,7 +307,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1, 1, uint256.NewInt(1e10), 0) + 1, 1, uint256.NewInt().SetUint64(1e10), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupInsufficientCoins) } @@ -325,12 +326,12 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { ) _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(1000), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), ReceiverPublicKey: m1PkBytes, }) _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m0PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(1000), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), ReceiverPublicKey: m2PkBytes, }) @@ -340,7 +341,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m0Pub, m0Pub, - 1000, 1000, uint256.NewInt(1000), 0) + 1000, 1000, uint256.NewInt().SetUint64(1000), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) } @@ -363,7 +364,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m0Pub, m3Pub, - 1000, 1000, uint256.NewInt(1000), 0) + 1000, 1000, uint256.NewInt().SetUint64(1000), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToDAOMembers) } @@ -373,7 +374,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m0Pub, m2Pub, - 1000, 1000, uint256.NewInt(1000), 0) + 1000, 1000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -399,7 +400,7 @@ func TestCoinLockupTxnRuleErrors(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 1000, uint256.NewInt(1000), 0) + 1000, 1000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } } @@ -547,7 +548,7 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m0PkBytes), 0, - uint256.NewInt(0)) + uint256.NewInt()) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferOfAmountZero) } @@ -631,7 +632,7 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { _coinLockupWithTestMetaAndConnectTimestamp( testMeta, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1, 1, uint256.NewInt(1e6), 0) + 1, 1, uint256.NewInt().SetUint64(1e6), 0) // Send 1000 locked M0 coins to M2. _coinLockupTransferWithTestMeta( @@ -642,7 +643,7 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { NewPublicKey(m2PkBytes), NewPublicKey(m0PkBytes), 1, - uint256.NewInt(1e6), + uint256.NewInt().SetUint64(1e6), ) // Attempt to have M2 send locked M0 coins to M3. @@ -655,7 +656,7 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m0PkBytes), 1, - uint256.NewInt(1)) + uint256.NewInt().SetUint64(1)) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) } @@ -685,7 +686,7 @@ func TestCoinLockupTransferTxnRuleErrors(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m0PkBytes), 1, - uint256.NewInt(1)) + uint256.NewInt().SetUint64(1)) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToDAOMembers) } @@ -794,7 +795,7 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { // Ensure CoinsInCirculationNanos and NumberOfHolders are now zero utxoView := NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) profileEntry := utxoView.GetProfileEntryForPublicKey(m2PkBytes) - require.Equal(t, *uint256.NewInt(0), profileEntry.DAOCoinEntry.CoinsInCirculationNanos) + require.Equal(t, *uint256.NewInt(), profileEntry.DAOCoinEntry.CoinsInCirculationNanos) require.Equal(t, uint64(0), profileEntry.DAOCoinEntry.NumberOfHolders) } @@ -820,7 +821,7 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m2Pub, m2Priv, m2Pub, m2Pub, - 1000, 1000, uint256.NewInt(1), 0) + 1000, 1000, uint256.NewInt().SetUint64(1), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupYieldCausesOverflowInLockedBalanceEntry) } @@ -964,7 +965,7 @@ func TestLockupBasedOverflowsOnProfiles(t *testing.T) { DAOCoinMetadata{ ProfilePublicKey: m2PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1), CoinsToBurnNanos: uint256.Int{}, TransferRestrictionStatus: 0, }) @@ -1050,7 +1051,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 365*24*60*60*1e9+365*12*60*60*1e9, 365*24*60*60*1e9+365*12*60*60*1e9, - uint256.NewInt(10000), + uint256.NewInt().SetUint64(10000), 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10000 base units. @@ -1063,15 +1064,15 @@ func TestLockupStandardProfileFlows(t *testing.T) { 365*24*60*60*1e9+365*12*60*60*1e9, 365*24*60*60*1e9+365*12*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(10000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(10000), lockedBalanceEntry.BalanceBaseUnits) // Check to ensure that the BalanceEntry has decreased by exactly 10000. newBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( m1PkBytes, m1PkBytes, true) require.True(t, originalBalanceEntry.BalanceNanos.Gt(&newBalanceEntry.BalanceNanos)) require.Equal(t, - *uint256.NewInt(0).Sub(&originalBalanceEntry.BalanceNanos, &newBalanceEntry.BalanceNanos), - *uint256.NewInt(10000)) + *uint256.NewInt().Sub(&originalBalanceEntry.BalanceNanos, &newBalanceEntry.BalanceNanos), + *uint256.NewInt().SetUint64(10000)) } // Have m1 lockup 10000 m1 DAO tokens for one year. @@ -1093,7 +1094,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, - uint256.NewInt(10000), + uint256.NewInt().SetUint64(10000), 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10500 base units. @@ -1106,15 +1107,15 @@ func TestLockupStandardProfileFlows(t *testing.T) { 2*365*24*60*60*1e9, 2*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(10500), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(10500), lockedBalanceEntry.BalanceBaseUnits) // Check to ensure that the BalanceEntry has decreased by exactly 10000. newBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( m1PkBytes, m1PkBytes, true) require.True(t, originalBalanceEntry.BalanceNanos.Gt(&newBalanceEntry.BalanceNanos)) require.Equal(t, - *uint256.NewInt(0).Sub(&originalBalanceEntry.BalanceNanos, &newBalanceEntry.BalanceNanos), - *uint256.NewInt(10000)) + *uint256.NewInt().Sub(&originalBalanceEntry.BalanceNanos, &newBalanceEntry.BalanceNanos), + *uint256.NewInt().SetUint64(10000)) } // Have m1 lockup 10000 m1 DAO tokens for one and a half year. @@ -1127,7 +1128,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 2*365*24*60*60*1e9+365*12*60*60*1e9, 2*365*24*60*60*1e9+365*12*60*60*1e9, - uint256.NewInt(10000), + uint256.NewInt().SetUint64(10000), 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 10500 base units. @@ -1142,7 +1143,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { 2*365*24*60*60*1e9+365*12*60*60*1e9, 2*365*24*60*60*1e9+365*12*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(10500), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(10500), lockedBalanceEntry.BalanceBaseUnits) } // Have m1 lockup 10000 m1 DAO tokens for two years. @@ -1154,7 +1155,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 3*365*24*60*60*1e9, 3*365*24*60*60*1e9, - uint256.NewInt(10000), + uint256.NewInt().SetUint64(10000), 365*24*60*60*1e9) // Check to ensure the resulting locked balance entry has 12000 base units. @@ -1169,7 +1170,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { 3*365*24*60*60*1e9, 3*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(12000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(12000), lockedBalanceEntry.BalanceBaseUnits) } // Have m1 distribute 1 year locked tokens. @@ -1182,7 +1183,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { NewPublicKey(m2PkBytes), NewPublicKey(m1PkBytes), 2*365*24*60*60*1e9, - uint256.NewInt(500), + uint256.NewInt().SetUint64(500), ) _coinLockupTransferWithTestMeta( testMeta, @@ -1192,7 +1193,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m1PkBytes), 2*365*24*60*60*1e9, - uint256.NewInt(500), + uint256.NewInt().SetUint64(500), ) _coinLockupTransferWithTestMeta( testMeta, @@ -1202,7 +1203,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { NewPublicKey(m4PkBytes), NewPublicKey(m1PkBytes), 2*365*24*60*60*1e9, - uint256.NewInt(500), + uint256.NewInt().SetUint64(500), ) // Check to ensure the resulting locked balance entry for m1 has 9000 base units. @@ -1217,7 +1218,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { 2*365*24*60*60*1e9, 2*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(9000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(9000), lockedBalanceEntry.BalanceBaseUnits) } // Check to make sure locked tokens are not liquid. @@ -1229,7 +1230,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { NewPublicKey(m3PkBytes), NewPublicKey(m1PkBytes), 2*365*24*60*60*1e9, - uint256.NewInt(500), + uint256.NewInt().SetUint64(500), ) require.Contains(t, err.Error(), RuleErrorCoinLockupTransferRestrictedToProfileOwner) } @@ -1259,7 +1260,7 @@ func TestLockupStandardProfileFlows(t *testing.T) { newBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey( m2PkBytes, m1PkBytes, true) require.True(t, newBalanceEntry.BalanceNanos.Gt(&originalBalanceEntry.BalanceNanos)) - require.Equal(t, *uint256.NewInt(500), *uint256.NewInt(0).Sub( + require.Equal(t, *uint256.NewInt().SetUint64(500), *uint256.NewInt().Sub( &newBalanceEntry.BalanceNanos, &originalBalanceEntry.BalanceNanos)) } } @@ -1283,7 +1284,7 @@ func TestLockupWithDerivedKey(t *testing.T) { senderPrivBytes, _, err := Base58CheckDecode(m0Priv) require.NoError(t, err) - m0PrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) + m0PrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) // Setup helper functions for creating m0 derived keys newUtxoView := func() *UtxoView { @@ -1493,7 +1494,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1506,7 +1507,7 @@ func TestLockupWithDerivedKey(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 365*24*60*60*1e9, 365*24*60*60*1e9, - uint256.NewInt(1000), + uint256.NewInt().SetUint64(1000), 0) _coinLockupTransferWithTestMeta( testMeta, @@ -1516,7 +1517,7 @@ func TestLockupWithDerivedKey(t *testing.T) { NewPublicKey(m0PkBytes), NewPublicKey(m1PkBytes), 365*24*60*60*1e9, - uint256.NewInt(1000), + uint256.NewInt().SetUint64(1000), ) coinUnlockMetadata := &CoinUnlockMetadata{ProfilePublicKey: NewPublicKey(m1PkBytes)} _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( @@ -1528,7 +1529,7 @@ func TestLockupWithDerivedKey(t *testing.T) { // (Correct profile + correct operation) _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(1000), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), ReceiverPublicKey: m0PkBytes, }) coinLockupMetadata = &CoinLockupMetadata{ @@ -1536,7 +1537,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m1PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1546,7 +1547,7 @@ func TestLockupWithDerivedKey(t *testing.T) { // Ensure the operation cannot be performed again as the transaction limit was set to 1. _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(1000), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), ReceiverPublicKey: m0PkBytes, }) coinLockupMetadata = &CoinLockupMetadata{ @@ -1554,7 +1555,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m1PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1582,7 +1583,7 @@ func TestLockupWithDerivedKey(t *testing.T) { // Have m1 transfer 1000 unlocked m1 coins to m0 _daoCoinTransferTxnWithTestMeta(testMeta, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, DAOCoinTransferMetadata{ ProfilePublicKey: m1PkBytes, - DAOCoinToTransferNanos: *uint256.NewInt(1000), + DAOCoinToTransferNanos: *uint256.NewInt().SetUint64(1000), ReceiverPublicKey: m0PkBytes, }) @@ -1593,7 +1594,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m1PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1607,7 +1608,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1629,7 +1630,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 365*24*60*60*1e9+1, @@ -1664,7 +1665,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1677,7 +1678,7 @@ func TestLockupWithDerivedKey(t *testing.T) { m1Pub, m1Priv, m1Pub, m1Pub, 365*24*60*60*1e9, 365*24*60*60*1e9, - uint256.NewInt(1000), + uint256.NewInt().SetUint64(1000), 0) _coinLockupTransferWithTestMeta( testMeta, @@ -1687,7 +1688,7 @@ func TestLockupWithDerivedKey(t *testing.T) { NewPublicKey(m0PkBytes), NewPublicKey(m1PkBytes), 365*24*60*60*1e9, - uint256.NewInt(1000), + uint256.NewInt().SetUint64(1000), ) // Have m0 unlock the 1,000 locked m1 tokens. @@ -1704,7 +1705,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 365*24*60*60*1e9+1, @@ -1787,7 +1788,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1802,7 +1803,7 @@ func TestLockupWithDerivedKey(t *testing.T) { 365*24*60*60*1e9, 365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(1000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) // Perform the second lockup operation of 1000 m0 coins at 2yrs @@ -1811,7 +1812,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 0, @@ -1826,7 +1827,7 @@ func TestLockupWithDerivedKey(t *testing.T) { 2*365*24*60*60*1e9, 2*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(1000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(2*365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) // Perform the first transfer operation to m1 of 500 locked m0 coins @ 1yr @@ -1834,7 +1835,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m1PkBytes), ProfilePublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 365 * 24 * 60 * 60 * 1e9, - LockedCoinsToTransferBaseUnits: uint256.NewInt(500), + LockedCoinsToTransferBaseUnits: uint256.NewInt().SetUint64(500), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupTransferMetadata}, 0, @@ -1849,7 +1850,7 @@ func TestLockupWithDerivedKey(t *testing.T) { 365*24*60*60*1e9, 365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(500), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) // Perform the second transfer operation to m1 of 500 locked m0 coins @ 2yrs @@ -1857,7 +1858,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m1PkBytes), ProfilePublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 2 * 365 * 24 * 60 * 60 * 1e9, - LockedCoinsToTransferBaseUnits: uint256.NewInt(500), + LockedCoinsToTransferBaseUnits: uint256.NewInt().SetUint64(500), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupTransferMetadata}, 0, @@ -1872,7 +1873,7 @@ func TestLockupWithDerivedKey(t *testing.T) { 2*365*24*60*60*1e9, 2*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(500), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(500), lockedBalanceEntry.BalanceBaseUnits) require.Equal(t, int64(2*365*24*60*60*1e9), lockedBalanceEntry.UnlockTimestampNanoSecs) // Perform the first unlock operation of 500 m1 tokens @ 1yr @@ -1889,8 +1890,8 @@ func TestLockupWithDerivedKey(t *testing.T) { testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) balanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) require.True(t, balanceEntry.BalanceNanos.Gt(&startingBalance)) - require.Equal(t, *uint256.NewInt(500), - *uint256.NewInt(0).Sub(&balanceEntry.BalanceNanos, &startingBalance)) + require.Equal(t, *uint256.NewInt().SetUint64(500), + *uint256.NewInt().Sub(&balanceEntry.BalanceNanos, &startingBalance)) lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -1911,8 +1912,8 @@ func TestLockupWithDerivedKey(t *testing.T) { testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) balanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) require.True(t, balanceEntry.BalanceNanos.Gt(&startingBalance)) - require.Equal(t, *uint256.NewInt(1000), - *uint256.NewInt(0).Sub(&balanceEntry.BalanceNanos, &startingBalance)) + require.Equal(t, *uint256.NewInt().SetUint64(1000), + *uint256.NewInt().Sub(&balanceEntry.BalanceNanos, &startingBalance)) lockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -1928,7 +1929,7 @@ func TestLockupWithDerivedKey(t *testing.T) { RecipientPublicKey: NewPublicKey(m0PkBytes), UnlockTimestampNanoSecs: 3 * 365 * 24 * 60 * 60 * 1e9, VestingEndTimestampNanoSecs: 3 * 365 * 24 * 60 * 60 * 1e9, - LockupAmountBaseUnits: uint256.NewInt(1000), + LockupAmountBaseUnits: uint256.NewInt().SetUint64(1000), } _, err = _submitLockupTxnWithDerivedKeyAndTimestamp( m0PkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: coinLockupMetadata}, 2*365*24*60*60*1e9+2, @@ -1955,7 +1956,7 @@ func TestLockupDisconnects(t *testing.T) { m0Pub, m0Priv, m0Pub, m0Pub, 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, - uint256.NewInt(1000), + uint256.NewInt().SetUint64(1000), 365*24*60*60*1e9) require.NoError(t, err) utxoOps2, txn2, _, err := _coinLockupWithConnectTimestamp( @@ -1963,7 +1964,7 @@ func TestLockupDisconnects(t *testing.T) { m0Pub, m0Priv, m0Pub, m0Pub, 2*365*24*60*60*1e9, 2*365*24*60*60*1e9, - uint256.NewInt(1000), + uint256.NewInt().SetUint64(1000), 365*24*60*60*1e9) require.NoError(t, err) txHash := txn2.Hash() @@ -1983,9 +1984,9 @@ func TestLockupDisconnects(t *testing.T) { 2*365*24*60*60*1e9, 2*365*24*60*60*1e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(1000), lockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(1000), lockedBalanceEntry.BalanceBaseUnits) balanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) - require.Equal(t, *uint256.NewInt(999000), balanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt().SetUint64(999000), balanceEntry.BalanceNanos) err = utxoView.DisconnectTransaction(txn1, txn1.Hash(), utxoOps1, blockHeight) require.NoError(t, utxoView.FlushToDb(uint64(blockHeight))) require.NoError(t, err) @@ -1999,7 +2000,7 @@ func TestLockupDisconnects(t *testing.T) { 2*365*24*60*60*1e9) require.True(t, lockedBalanceEntry == nil) balanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) - require.Equal(t, *uint256.NewInt(1000000), balanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt().SetUint64(1000000), balanceEntry.BalanceNanos) // // Test Update Coin Lockup Params for Profiles @@ -2221,7 +2222,7 @@ func TestLockupDisconnects(t *testing.T) { m4be, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) require.NoError(t, err) require.Equal(t, *MaxUint256, m4LockedBalanceEntry.BalanceBaseUnits) - require.Equal(t, *uint256.NewInt(0), m4be.BalanceNanos) + require.Equal(t, *uint256.NewInt(), m4be.BalanceNanos) utxoOps, txn, _, err = _coinUnlockWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, @@ -2263,7 +2264,7 @@ func TestLockupDisconnects(t *testing.T) { 1000) require.NoError(t, err) m4be, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m4PkBytes, m4PkBytes) - require.Equal(t, *uint256.NewInt(0), m4be.BalanceNanos) + require.Equal(t, *uint256.NewInt(), m4be.BalanceNanos) require.Equal(t, *MaxUint256, m4LockedBalanceEntry.BalanceBaseUnits) } @@ -2290,8 +2291,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.True(t, m0LeftYieldCurvePoint == nil) m0BalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) - require.Equal(t, *uint256.NewInt(1000000), m0BalanceEntry.BalanceNanos) - require.Equal(t, *uint256.NewInt(0), m3BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt().SetUint64(1000000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2322,12 +2323,12 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { _signTxn(t, updateTxn, m0Priv) lockupTxn, _, _, _, err := testMeta.chain.CreateCoinLockupTxn( m0PkBytes, m0PkBytes, m0PkBytes, tipTimestamp+2e9, tipTimestamp+2e9, - uint256.NewInt(1000), nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + uint256.NewInt().SetUint64(1000), nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) _signTxn(t, lockupTxn, m0Priv) transferTxn, _, _, _, err := testMeta.chain.CreateCoinLockupTransferTxn( m0PkBytes, m3PkBytes, m0PkBytes, tipTimestamp+2e9, - uint256.NewInt(1000), nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) + uint256.NewInt().SetUint64(1000), nil, testMeta.feeRateNanosPerKb, nil, []*DeSoOutput{}) require.NoError(t, err) _signTxn(t, transferTxn, m0Priv) @@ -2369,8 +2370,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.Equal(t, uint64(1000), m0LeftYieldCurvePoint.LockupYieldAPYBasisPoints) m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) - require.Equal(t, *uint256.NewInt(999000), m0BalanceEntry.BalanceNanos) - require.Equal(t, *uint256.NewInt(0), m3BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2386,7 +2387,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { tipTimestamp+2e9, tipTimestamp+2e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(1000), m3LockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(1000), m3LockedBalanceEntry.BalanceBaseUnits) // // Construct a subsequent second block and test unlock. @@ -2424,8 +2425,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) - require.Equal(t, *uint256.NewInt(999000), m0BalanceEntry.BalanceNanos) - require.Equal(t, *uint256.NewInt(1000), m3BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt().SetUint64(1000), m3BalanceEntry.BalanceNanos) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2477,8 +2478,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.Equal(t, uint64(1000), m0LeftYieldCurvePoint.LockupYieldAPYBasisPoints) m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) - require.Equal(t, *uint256.NewInt(999000), m0BalanceEntry.BalanceNanos) - require.Equal(t, *uint256.NewInt(0), m3BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt().SetUint64(999000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2494,7 +2495,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { tipTimestamp+2e9, tipTimestamp+2e9) require.NoError(t, err) - require.Equal(t, *uint256.NewInt(1000), m3LockedBalanceEntry.BalanceBaseUnits) + require.Equal(t, *uint256.NewInt().SetUint64(1000), m3LockedBalanceEntry.BalanceBaseUnits) // // Disconnect the first block and ensure state is reverted. @@ -2529,8 +2530,8 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.True(t, m0LeftYieldCurvePoint == nil) m0BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) m3BalanceEntry, _, _ = utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m3PkBytes, m0PkBytes) - require.Equal(t, *uint256.NewInt(1000000), m0BalanceEntry.BalanceNanos) - require.Equal(t, *uint256.NewInt(0), m3BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt().SetUint64(1000000), m0BalanceEntry.BalanceNanos) + require.Equal(t, *uint256.NewInt(), m3BalanceEntry.BalanceNanos) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForHODLerPKIDProfilePKIDUnlockTimestampNanoSecsVestingEndTimestampNanoSecs( m0PKID, @@ -2561,7 +2562,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m3Pub, - 1000, 1000, uint256.NewInt(1000), 0) + 1000, 1000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2581,7 +2582,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { }) require.NoError(t, err) require.True(t, m3LockedBalanceEntry != nil) - require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1000))) + require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) // Check the m0 LockedBalanceEntry as non-existent m0LockedBalanceEntry, err := utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ @@ -2598,7 +2599,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m3Pub, - 1050, 1100, uint256.NewInt(1000), 0) + 1050, 1100, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2611,7 +2612,7 @@ func TestCoinLockupIndirectRecipients(t *testing.T) { }) require.NoError(t, err) require.True(t, m3LockedBalanceEntry != nil) - require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1000))) + require.True(t, m3LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) // Check the m0 LockedBalanceEntry as non-existent m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ @@ -2636,7 +2637,7 @@ func TestSimpleVestedLockup(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2679,14 +2680,14 @@ func TestSimpleVestedLockup(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) // Get the updated m0 balance entry base units and ensure it's been credited 500 base units. utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) updatedBalanceEntry, _, _ := utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) - require.True(t, uint256.NewInt(500).Eq( - uint256.NewInt(0).Sub( + require.True(t, uint256.NewInt().SetUint64(500).Eq( + uint256.NewInt().Sub( &updatedBalanceEntry.BalanceNanos, &originalBalanceEntry.BalanceNanos))) originalBalanceEntry = updatedBalanceEntry @@ -2723,14 +2724,14 @@ func TestSimpleVestedLockup(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(250))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) // Get the updated m0 balance entry base units and ensure it's been credited 250 base units. utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) updatedBalanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) - require.True(t, uint256.NewInt(250).Eq( - uint256.NewInt(0).Sub( + require.True(t, uint256.NewInt().SetUint64(250).Eq( + uint256.NewInt().Sub( &updatedBalanceEntry.BalanceNanos, &originalBalanceEntry.BalanceNanos))) originalBalanceEntry = updatedBalanceEntry @@ -2772,14 +2773,14 @@ func TestSimpleVestedLockup(t *testing.T) { utxoView = NewUtxoView(testMeta.db, testMeta.params, testMeta.chain.postgres, testMeta.chain.snapshot, nil) updatedBalanceEntry, _, _ = utxoView.GetBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes, true) - require.True(t, uint256.NewInt(250).Eq( - uint256.NewInt(0).Sub( + require.True(t, uint256.NewInt().SetUint64(250).Eq( + uint256.NewInt().Sub( &updatedBalanceEntry.BalanceNanos, &originalBalanceEntry.BalanceNanos))) originalBalanceEntry = updatedBalanceEntry // Check that we're back to where we started (1e6 base units) - require.True(t, uint256.NewInt(1e6).Eq(&updatedBalanceEntry.BalanceNanos)) + require.True(t, uint256.NewInt().SetUint64(1e6).Eq(&updatedBalanceEntry.BalanceNanos)) } func TestNoOverlapVestedLockupConsolidation(t *testing.T) { @@ -2794,7 +2795,7 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2803,7 +2804,7 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 3000, 4000, uint256.NewInt(1000), 0) + 3000, 4000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2822,7 +2823,7 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1000))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -2831,7 +2832,7 @@ func TestNoOverlapVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1000))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) } func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { @@ -2846,7 +2847,7 @@ func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2855,7 +2856,7 @@ func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2874,7 +2875,7 @@ func TestPerfectOverlapVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(2000))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(2000))) } func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { @@ -2896,7 +2897,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2905,7 +2906,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1500, 2000, uint256.NewInt(1000), 0) + 1500, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2924,7 +2925,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -2933,7 +2934,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) // Now we test the opposite vested lockup consolidation type: // existing lockup: ------------------- @@ -2947,7 +2948,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1500, 2000, uint256.NewInt(1000), 0) + 1500, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2956,7 +2957,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -2971,7 +2972,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -2980,7 +2981,7 @@ func TestLeftOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1500))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) } func TestRightOverhangVestedLockupConsolidation(t *testing.T) { @@ -3002,7 +3003,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3011,7 +3012,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 1499, uint256.NewInt(1000), 0) + 1000, 1499, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3030,7 +3031,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1499))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1499))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3039,7 +3040,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(501))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(501))) // Now we test the opposite vested lockup consolidation type: // existing lockup: ------------------- @@ -3053,7 +3054,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1000, 1499, uint256.NewInt(1000), 0) + 1000, 1499, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3062,7 +3063,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3077,7 +3078,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1500))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3086,7 +3087,7 @@ func TestRightOverhangVestedLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) } func TestExternalThreeWayLockupConsolidation(t *testing.T) { @@ -3107,7 +3108,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1250, 1750, uint256.NewInt(1000), 0) + 1250, 1750, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3116,7 +3117,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 1500, uint256.NewInt(1000), 0) + 1000, 1500, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3135,7 +3136,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3144,7 +3145,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3153,7 +3154,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1000))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1000))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3174,7 +3175,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1000, 1500, uint256.NewInt(1000), 0) + 1000, 1500, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3183,7 +3184,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1250, 1750, uint256.NewInt(1000), 0) + 1250, 1750, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3198,7 +3199,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3207,7 +3208,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1002))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1002))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3216,7 +3217,7 @@ func TestExternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(498))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(498))) } func TestInternalThreeWayLockupConsolidation(t *testing.T) { @@ -3237,7 +3238,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3246,7 +3247,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1250, 1750, uint256.NewInt(1000), 0) + 1250, 1750, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3265,7 +3266,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(250))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3274,7 +3275,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1500))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3283,7 +3284,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(250))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) // Now we test the opposite vested lockup consolidation type: // existing lockup: ------------------ @@ -3296,7 +3297,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1250, 1750, uint256.NewInt(1000), 0) + 1250, 1750, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3305,7 +3306,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m1Pub, m1Priv, m1Pub, m1Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3320,7 +3321,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(250))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(250))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3329,7 +3330,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1501))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1501))) m1LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m1PKID, ProfilePKID: *m1PKID, @@ -3338,7 +3339,7 @@ func TestInternalThreeWayLockupConsolidation(t *testing.T) { }) require.NoError(t, err) require.True(t, m1LockedBalanceEntry != nil) - require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(249))) + require.True(t, m1LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(249))) } func TestSimpleJointExistingVestedLockups(t *testing.T) { @@ -3359,7 +3360,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 1500, uint256.NewInt(1000), 0) + 1000, 1500, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3368,7 +3369,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1501, 2000, uint256.NewInt(1000), 0) + 1501, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3377,7 +3378,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3396,7 +3397,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1501))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1501))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3405,7 +3406,7 @@ func TestSimpleJointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(1499))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(1499))) } func TestSimpleDisjointExistingVestedLockups(t *testing.T) { @@ -3426,7 +3427,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1000, 2000, uint256.NewInt(1000), 0) + 1000, 2000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3435,7 +3436,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 3000, 4000, uint256.NewInt(1000), 0) + 3000, 4000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3444,7 +3445,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - 1500, 3500, uint256.NewInt(1000), 0) + 1500, 3500, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3463,7 +3464,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3472,7 +3473,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(750))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(750))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3481,7 +3482,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(499))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(499))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3490,7 +3491,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(751))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(751))) m0LockedBalanceEntry, err = utxoView.GetLockedBalanceEntryForLockedBalanceEntryKey(LockedBalanceEntryKey{ HODLerPKID: *m0PKID, ProfilePKID: *m0PKID, @@ -3499,7 +3500,7 @@ func TestSimpleDisjointExistingVestedLockups(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(500))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(500))) } func TestVestingIntersectionLimit(t *testing.T) { @@ -3520,7 +3521,7 @@ func TestVestingIntersectionLimit(t *testing.T) { _, _, _, err := _coinLockupWithConnectTimestamp( t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, - int64(ii*1000)+1, int64(ii*1000)+1000, uint256.NewInt(1000), 0) + int64(ii*1000)+1, int64(ii*1000)+1000, uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } } @@ -3536,7 +3537,7 @@ func TestVestingIntersectionLimit(t *testing.T) { DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1e9), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e9), CoinsToBurnNanos: uint256.Int{}, TransferRestrictionStatus: 0, }) @@ -3549,7 +3550,7 @@ func TestVestingIntersectionLimit(t *testing.T) { t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, 1, int64((maxIntersections-1)*1000)+1000, - uint256.NewInt(uint64(maxIntersections)*1000), 0) + uint256.NewInt().SetUint64(uint64(maxIntersections)*1000), 0) require.NoError(t, err) } @@ -3567,7 +3568,7 @@ func TestVestingIntersectionLimit(t *testing.T) { }) require.NoError(t, err) require.True(t, m0LockedBalanceEntry != nil) - require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt(2000))) + require.True(t, m0LockedBalanceEntry.BalanceBaseUnits.Eq(uint256.NewInt().SetUint64(2000))) } // Now add another vested lockup, pushing us over the limit. @@ -3577,7 +3578,7 @@ func TestVestingIntersectionLimit(t *testing.T) { m0Pub, m0Priv, m0Pub, m0Pub, int64(maxIntersections*1000)+1, int64(maxIntersections*1000)+1000, - uint256.NewInt(1000), 0) + uint256.NewInt().SetUint64(1000), 0) require.NoError(t, err) } @@ -3588,7 +3589,7 @@ func TestVestingIntersectionLimit(t *testing.T) { t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, 1, int64((maxIntersections)*1000)+1000, - uint256.NewInt(uint64(maxIntersections)*1000), 0) + uint256.NewInt().SetUint64(uint64(maxIntersections)*1000), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupViolatesVestingIntersectionLimit) } @@ -3600,7 +3601,7 @@ func TestVestingIntersectionLimit(t *testing.T) { t, testMeta.chain, testMeta.db, testMeta.params, testMeta.feeRateNanosPerKb, m0Pub, m0Priv, m0Pub, m0Pub, 1000, int64((maxIntersections)*1000)+1, - uint256.NewInt(uint64(maxIntersections)*1000), 0) + uint256.NewInt().SetUint64(uint64(maxIntersections)*1000), 0) require.Contains(t, err.Error(), RuleErrorCoinLockupViolatesVestingIntersectionLimit) } @@ -3624,8 +3625,8 @@ func TestVestingIntersectionLimit(t *testing.T) { finalBalanceEntry, _, _ := utxoView.GetDAOCoinBalanceEntryForHODLerPubKeyAndCreatorPubKey(m0PkBytes, m0PkBytes) require.True(t, finalBalanceEntry != nil) require.True(t, - uint256.NewInt(0).Sub(&finalBalanceEntry.BalanceNanos, &startingBalanceEntry.BalanceNanos).Eq( - uint256.NewInt(uint64(maxIntersections)*2000+1000))) + uint256.NewInt().Sub(&finalBalanceEntry.BalanceNanos, &startingBalanceEntry.BalanceNanos).Eq( + uint256.NewInt().SetUint64(uint64(maxIntersections)*2000+1000))) // Now just to be extra sure, check to make sure there's no more unlockable locked balance entries. unvestedUnlockable, vestedUnlockable, err := @@ -3666,7 +3667,7 @@ func TestRealWorldLockupsUseCase(t *testing.T) { DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1e7), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e7), CoinsToBurnNanos: uint256.Int{}, TransferRestrictionStatus: 0, }) @@ -3684,7 +3685,7 @@ func TestRealWorldLockupsUseCase(t *testing.T) { startTime := time.Date(2024, time.January, 1, 14, 0, 0, 0, time.UTC) // We iterate for 10 years. - totalLocked := uint256.NewInt(0) + totalLocked := uint256.NewInt() for ii := 0; ii < 365*5; ii++ { // Check if it's time for a deposit. if ii%7 != 0 { @@ -3713,14 +3714,14 @@ func TestRealWorldLockupsUseCase(t *testing.T) { m0Pub, m0Priv, m0Pub, m0Pub, nextLockupStartTime.UnixNano(), nextLockupEndTime.UnixNano()-1, - uint256.NewInt(1000), + uint256.NewInt().SetUint64(1000), blockConnectTime.UnixNano()) require.NoError(t, err) } // Add to total locked. - totalLocked = uint256.NewInt(0).Add( - totalLocked, uint256.NewInt(1000)) + totalLocked = uint256.NewInt().Add( + totalLocked, uint256.NewInt().SetUint64(1000)) } // Verify the locked balance entries in the db. @@ -3732,10 +3733,10 @@ func TestRealWorldLockupsUseCase(t *testing.T) { require.NoError(t, err) // Verify the lockedBalanceEntries locked the correct amount and that the entries are consecutive. - totalLockedFound := uint256.NewInt(0) + totalLockedFound := uint256.NewInt() for ii, lockedBalanceEntry := range lockedBalanceEntries { // Add to the balance found. - totalLockedFound = uint256.NewInt(0).Add( + totalLockedFound = uint256.NewInt().Add( totalLockedFound, &lockedBalanceEntry.BalanceBaseUnits) // Check if we're consecutive. @@ -3820,7 +3821,7 @@ func _setUpProfilesAndMintM0M1DAOCoins(testMeta *TestMeta) { DAOCoinMetadata{ ProfilePublicKey: m0PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1e6), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e6), CoinsToBurnNanos: uint256.Int{}, TransferRestrictionStatus: 0, }) @@ -3836,7 +3837,7 @@ func _setUpProfilesAndMintM0M1DAOCoins(testMeta *TestMeta) { DAOCoinMetadata{ ProfilePublicKey: m1PkBytes, OperationType: DAOCoinOperationTypeMint, - CoinsToMintNanos: *uint256.NewInt(1e9), + CoinsToMintNanos: *uint256.NewInt().SetUint64(1e9), CoinsToBurnNanos: uint256.Int{}, TransferRestrictionStatus: 0, }) diff --git a/lib/block_view_message.go b/lib/block_view_message.go index 3e55a3702..1b64f481e 100644 --- a/lib/block_view_message.go +++ b/lib/block_view_message.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/golang/glog" "github.com/pkg/errors" "math" @@ -814,7 +814,7 @@ func (bav *UtxoView) _connectMessagingGroup( var messagingPublicKey *PublicKey if reflect.DeepEqual(txMeta.MessagingPublicKey, GetS256BasePointCompressed()) { messagingGroupKey = NewMessagingGroupKey(NewPublicKey(GetS256BasePointCompressed()), txMeta.MessagingGroupKeyName) - _, keyPublic := btcec.PrivKeyFromBytes(Sha256DoubleHash(txMeta.MessagingGroupKeyName)[:]) + _, keyPublic := btcec.PrivKeyFromBytes(btcec.S256(), Sha256DoubleHash(txMeta.MessagingGroupKeyName)[:]) messagingPublicKey = NewPublicKey(keyPublic.SerializeCompressed()) } else { messagingGroupKey = NewMessagingGroupKey(NewPublicKey(txn.PublicKey), txMeta.MessagingGroupKeyName) diff --git a/lib/block_view_message_test.go b/lib/block_view_message_test.go index 63dee2e69..7e6fcc5f5 100644 --- a/lib/block_view_message_test.go +++ b/lib/block_view_message_test.go @@ -9,8 +9,7 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec/v2" - "github.com/btcsuite/btcd/btcec/v2/ecdsa" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,19 +19,20 @@ func TestBasePointSignature(t *testing.T) { require := require.New(t) // Retrieve the base point bytes and parse them to a public key. basePointBytes := GetS256BasePointCompressed() - basePoint, err := btcec.ParsePubKey(basePointBytes) + basePoint, err := btcec.ParsePubKey(basePointBytes, btcec.S256()) require.NoError(err) // Verify that k = 1 is the correct private key for the secp256k1 base point priveKeyBytes := []byte{1} - priveKey, publicKey := btcec.PrivKeyFromBytes(priveKeyBytes) + priveKey, publicKey := btcec.PrivKeyFromBytes(btcec.S256(), priveKeyBytes) require.Equal(basePointBytes, publicKey.SerializeCompressed()) require.Equal(basePoint.SerializeCompressed(), publicKey.SerializeCompressed()) // Now test signing messages with the private key of the base point k = 1. message := []byte("Test message") messageHash := Sha256DoubleHash(message) - messageSignature := ecdsa.Sign(priveKey, messageHash[:]) + messageSignature, err := priveKey.Sign(messageHash[:]) + require.NoError(err) // Now make sure the base point passes signature verification. require.Equal(true, messageSignature.Verify(messageHash[:], basePoint)) @@ -332,7 +332,7 @@ func TestPrivateMessage(t *testing.T) { require.Equal(messageEntry.RecipientPublicKey[:], _strToPk(t, m1Pub)) require.Equal(messageEntry.TstampNanos, tstamp1) require.Equal(messageEntry.isDeleted, false) - priv, _ := btcec.PrivKeyFromBytes(_strToPk(t, m1Priv)) + priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), _strToPk(t, m1Priv)) decryptedBytes, err := DecryptBytesWithPrivateKey(messageEntry.EncryptedText, priv.ToECDSA()) require.NoError(err) require.Equal(message1, string(decryptedBytes)) @@ -618,13 +618,13 @@ func TestPrivateMessage(t *testing.T) { func _generateMessagingKey(senderPub []byte, senderPriv []byte, keyName []byte) ( priv *btcec.PrivateKey, sign []byte, messagingKeyEntry *MessagingGroupEntry) { - senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPriv) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPriv) - priv, _ = btcec.NewPrivateKey() + priv, _ = btcec.NewPrivateKey(btcec.S256()) pub := priv.PubKey().SerializeCompressed() payload := append(pub, keyName...) - signature := ecdsa.Sign(senderPrivKey, Sha256DoubleHash(payload)[:]) + signature, _ := senderPrivKey.Sign(Sha256DoubleHash(payload)[:]) return priv, signature.Serialize(), _initMessagingKey(senderPub, pub, keyName) } @@ -1461,7 +1461,7 @@ func TestMessagingKeys(t *testing.T) { nil) // The DB entry should have the messaging public key derived deterministically from the group key name. // Compute the public key and compare it with the DB entry. - _, groupPkBytes := btcec.PrivKeyFromBytes(Sha256DoubleHash(groupKeyName)[:]) + _, groupPkBytes := btcec.PrivKeyFromBytes(btcec.S256(), Sha256DoubleHash(groupKeyName)[:]) groupPk := NewPublicKey(groupPkBytes.SerializeCompressed()) expectedEntry := &MessagingGroupEntry{} rr := bytes.NewReader(EncodeToBytes(0, entry)) @@ -2260,7 +2260,7 @@ func TestGroupMessages(t *testing.T) { // Define helper functions for encryption/decryption so that we can do some real crypto. encrypt := func(plain, recipient []byte) []byte { - recipientPk, err := btcec.ParsePubKey(recipient) + recipientPk, err := btcec.ParsePubKey(recipient, btcec.S256()) if err != nil { return nil } @@ -2272,7 +2272,7 @@ func TestGroupMessages(t *testing.T) { return encryptedMessageBytes } decrypt := func(cipher, recipientPrivKey []byte) []byte { - recipientPriv, _ := btcec.PrivKeyFromBytes(recipientPrivKey) + recipientPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), recipientPrivKey) plain, err := DecryptBytesWithPrivateKey(cipher, recipientPriv.ToECDSA()) if err != nil { fmt.Println(err) diff --git a/lib/block_view_new_message_test.go b/lib/block_view_new_message_test.go index 4a0a45475..8a8bf2e61 100644 --- a/lib/block_view_new_message_test.go +++ b/lib/block_view_new_message_test.go @@ -3,7 +3,7 @@ package lib import ( "bytes" "fmt" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/pkg/errors" "github.com/stretchr/testify/require" "math" @@ -91,17 +91,17 @@ func TestNewMessage(t *testing.T) { initChainCallback: initChainCallback, } - groupPriv1, err := btcec.NewPrivateKey() + groupPriv1, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk1 := groupPriv1.PubKey().SerializeCompressed() _ = groupPk1 - groupPriv2, err := btcec.NewPrivateKey() + groupPriv2, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk2 := groupPriv2.PubKey().SerializeCompressed() _ = groupPk2 - groupPriv3, err := btcec.NewPrivateKey() + groupPriv3, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupPk3 := groupPriv3.PubKey().SerializeCompressed() _ = groupPk3 @@ -376,13 +376,13 @@ func TestNewMessage(t *testing.T) { // (m0, defaultKey) -> // (m1, defaultKey) -> // (m3, defaultKey) -> - m0DefaultKeyPriv, err := btcec.NewPrivateKey() + m0DefaultKeyPriv, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) m0DefaultKeyPk := NewPublicKey(m0DefaultKeyPriv.PubKey().SerializeCompressed()) - m1DefaultKeyPriv, err := btcec.NewPrivateKey() + m1DefaultKeyPriv, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) m1DefaultKeyPk := NewPublicKey(m1DefaultKeyPriv.PubKey().SerializeCompressed()) - m3DefaultKeyPriv, err := btcec.NewPrivateKey() + m3DefaultKeyPriv, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) m3DefaultKeyPk := NewPublicKey(m3DefaultKeyPriv.PubKey().SerializeCompressed()) @@ -406,7 +406,7 @@ func TestNewMessage(t *testing.T) { // (m3, groupName3) -> // (m3, defaultKey) // (m0, defaultKey) - groupName3SharedPriv, err := btcec.NewPrivateKey() + groupName3SharedPriv, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) groupName3SharedPk := NewPublicKey(groupName3SharedPriv.PubKey().SerializeCompressed()) groupName3SharedPk_EncryptedTo_m0DefaultPk := _encryptBytes(groupName3SharedPriv.Serialize(), *m0DefaultKeyPk) @@ -1155,7 +1155,7 @@ func _verifyGroupMessageEntriesDecryption(t *testing.T, utxoView *UtxoView, grou require.NoError(err) require.NotNil(memberAccessGroupEntry) require.Equal(false, memberAccessGroupEntry.isDeleted) - _, memberAccessGroupPublicKeyFromPriv := btcec.PrivKeyFromBytes(memberAccessGroupPrivateKey) + _, memberAccessGroupPublicKeyFromPriv := btcec.PrivKeyFromBytes(btcec.S256(), memberAccessGroupPrivateKey) require.Equal(true, bytes.Equal(memberAccessGroupEntry.AccessGroupPublicKey.ToBytes(), memberAccessGroupPublicKeyFromPriv.SerializeCompressed())) // Decrypt the EncryptedKey present in the memberEntry to get the message encryption/decryption key. decryptionKey := _decryptBytes(memberEntry.EncryptedKey, memberAccessGroupPrivateKey) @@ -1179,7 +1179,7 @@ func _verifyEqualMessageEntries(t *testing.T, messageEntryA *NewMessageEntry, me } func _encryptBytes(plainText []byte, publicKey PublicKey) []byte { - pk, err := btcec.ParsePubKey(publicKey.ToBytes()) + pk, err := btcec.ParsePubKey(publicKey.ToBytes(), btcec.S256()) if err != nil { return nil } @@ -1192,7 +1192,7 @@ func _encryptBytes(plainText []byte, publicKey PublicKey) []byte { } func _decryptBytes(cipherText []byte, privateKey []byte) []byte { - recipientPriv, _ := btcec.PrivKeyFromBytes(privateKey) + recipientPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), privateKey) plain, err := DecryptBytesWithPrivateKey(cipherText, recipientPriv.ToECDSA()) if err != nil { fmt.Println(err) diff --git a/lib/block_view_nft.go b/lib/block_view_nft.go index fbeaefd26..3875af09b 100644 --- a/lib/block_view_nft.go +++ b/lib/block_view_nft.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/golang/glog" "github.com/pkg/errors" "math" @@ -506,7 +506,7 @@ func (bav *UtxoView) extractAdditionalRoyaltyMap( pkBytess := pkBytesIter // Validate the public key - if _, err = btcec.ParsePubKey(pkBytess[:]); err != nil { + if _, err = btcec.ParsePubKey(pkBytess[:], btcec.S256()); err != nil { return nil, 0, errors.Wrapf( RuleErrorAdditionalRoyaltyPubKeyMustBeValid, "Error parsing public key: %v, %v", PkToStringBoth(pkBytess[:]), err) @@ -1286,7 +1286,7 @@ func (bav *UtxoView) _helpConnectNFTSold(args HelpConnectNFTSoldStruct) ( return 0, nil, fmt.Errorf( "_helpConnectNFTSold: invalid public key found for pkid in additional DESO royalty map") } - if _, err = btcec.ParsePubKey(pkBytes); err != nil { + if _, err = btcec.ParsePubKey(pkBytes, btcec.S256()); err != nil { return 0, nil, errors.Wrapf(err, "Unable to parse public key") } diff --git a/lib/block_view_post.go b/lib/block_view_post.go index e199e459c..2ea329a0c 100644 --- a/lib/block_view_post.go +++ b/lib/block_view_post.go @@ -11,7 +11,7 @@ import ( "sort" "strings" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" "github.com/dgraph-io/badger/v4" "github.com/gernest/mention" diff --git a/lib/block_view_profile.go b/lib/block_view_profile.go index 36176ceff..35400c350 100644 --- a/lib/block_view_profile.go +++ b/lib/block_view_profile.go @@ -3,9 +3,8 @@ package lib import ( "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" - ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "github.com/ethereum/go-ethereum/crypto" "github.com/golang/glog" "github.com/holiman/uint256" @@ -452,10 +451,10 @@ func (bav *UtxoView) setProfileMappings(profile *PGProfile) (*ProfileEntry, *PKI var err error daoCoinsInCirculationNanos, err = uint256.FromHex(profile.DAOCoinCoinsInCirculationNanos) if err != nil { - daoCoinsInCirculationNanos = uint256.NewInt(0) + daoCoinsInCirculationNanos = uint256.NewInt() } } else { - daoCoinsInCirculationNanos = uint256.NewInt(0) + daoCoinsInCirculationNanos = uint256.NewInt() } profileEntry = &ProfileEntry{ PublicKey: profile.PublicKey.ToBytes(), @@ -466,7 +465,7 @@ func (bav *UtxoView) setProfileMappings(profile *PGProfile) (*ProfileEntry, *PKI CreatorBasisPoints: profile.CreatorBasisPoints, DeSoLockedNanos: profile.DeSoLockedNanos, NumberOfHolders: profile.NumberOfHolders, - CoinsInCirculationNanos: *uint256.NewInt(profile.CoinsInCirculationNanos), + CoinsInCirculationNanos: *uint256.NewInt().SetUint64(profile.CoinsInCirculationNanos), CoinWatermarkNanos: profile.CoinWatermarkNanos, MintingDisabled: profile.MintingDisabled, }, @@ -577,7 +576,7 @@ func (bav *UtxoView) _connectUpdateProfile( if len(txMeta.ProfilePublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, errors.Wrapf(RuleErrorProfilePublicKeySize, "_connectUpdateProfile: %#v", txMeta.ProfilePublicKey) } - _, err := btcec.ParsePubKey(txMeta.ProfilePublicKey) + _, err := btcec.ParsePubKey(txMeta.ProfilePublicKey, btcec.S256()) if err != nil { return 0, 0, nil, errors.Wrapf(RuleErrorProfileBadPublicKey, "_connectUpdateProfile: %v", err) } @@ -826,7 +825,7 @@ func (bav *UtxoView) _connectSwapIdentity( if len(fromPublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, RuleErrorFromPublicKeyIsRequired } - if _, err := btcec.ParsePubKey(fromPublicKey); err != nil { + if _, err := btcec.ParsePubKey(fromPublicKey, btcec.S256()); err != nil { return 0, 0, nil, errors.Wrap(RuleErrorInvalidFromPublicKey, err.Error()) } @@ -835,7 +834,7 @@ func (bav *UtxoView) _connectSwapIdentity( if len(toPublicKey) != btcec.PubKeyBytesLenCompressed { return 0, 0, nil, RuleErrorToPublicKeyIsRequired } - if _, err := btcec.ParsePubKey(toPublicKey); err != nil { + if _, err := btcec.ParsePubKey(toPublicKey, btcec.S256()); err != nil { return 0, 0, nil, errors.Wrap(RuleErrorInvalidToPublicKey, err.Error()) } @@ -969,13 +968,13 @@ func _verifyDeSoSignature(signer, data, signature []byte) error { bytes := Sha256DoubleHash(data) // Convert signature to *btcec.Signature. - sign, err := ecdsa2.ParseDERSignature(signature) + sign, err := btcec.ParseDERSignature(signature, btcec.S256()) if err != nil { return errors.Wrapf(err, "_verifyBytesSignature: Problem parsing access signature: ") } // Verify signature. - ownerPk, _ := btcec.ParsePubKey(signer) + ownerPk, _ := btcec.ParsePubKey(signer, btcec.S256()) if !sign.Verify(bytes[:], ownerPk) { return fmt.Errorf("_verifyBytesSignature: Invalid signature") } @@ -995,17 +994,17 @@ func TextAndHash(data []byte) ([]byte, string) { func VerifyEthPersonalSignature(signer, data, signature []byte) error { // Ethereum likes uncompressed public keys while we use compressed keys a lot. Make sure we have uncompressed pk bytes. var uncompressedSigner []byte - pubKey, err := btcec.ParsePubKey(signer) + pubKey, err := btcec.ParsePubKey(signer, btcec.S256()) if err != nil { return errors.Wrapf(err, "VerifyEthPersonalSignature: Problem parsing signer public key") } if len(signer) == btcec.PubKeyBytesLenCompressed { uncompressedSigner = pubKey.SerializeUncompressed() - } else if len(signer) == BtcecPubKeyBytesLenUncompressed { + } else if len(signer) == btcec.PubKeyBytesLenUncompressed { uncompressedSigner = signer } else { return fmt.Errorf("VerifyEthPersonalSignature: Public key has incorrect length. It should be either "+ - "(%v) for compressed key or (%v) for uncompressed key", btcec.PubKeyBytesLenCompressed, BtcecPubKeyBytesLenUncompressed) + "(%v) for compressed key or (%v) for uncompressed key", btcec.PubKeyBytesLenCompressed, btcec.PubKeyBytesLenUncompressed) } // Change the data bytes into Ethereum's personal_sign message standard. This will prepend the message prefix and hash @@ -1055,7 +1054,7 @@ func (bav *UtxoView) _disconnectUpdateProfile( if len(txMeta.ProfilePublicKey) != btcec.PubKeyBytesLenCompressed { return fmt.Errorf("_disconnectUpdateProfile: %#v", txMeta.ProfilePublicKey) } - _, err := btcec.ParsePubKey(txMeta.ProfilePublicKey) + _, err := btcec.ParsePubKey(txMeta.ProfilePublicKey, btcec.S256()) if err != nil { return fmt.Errorf("_disconnectUpdateProfile: %v", err) } diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index c33a55d83..c6b07e19e 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -1617,7 +1617,7 @@ func (bav *UtxoView) _connectUnstake( } // 2. Create a currentStakeEntry, if updated StakeAmountNanos > 0. var currentStakeEntry *StakeEntry - if stakeAmountNanos.Sign() > 0 { + if stakeAmountNanos.Cmp(uint256.NewInt()) > 0 { currentStakeEntry = prevStakeEntry.Copy() currentStakeEntry.StakeAmountNanos = stakeAmountNanos.Clone() } @@ -1894,7 +1894,7 @@ func (bav *UtxoView) _connectUnlockStake( } // Calculate the TotalUnlockedAmountNanos and delete the PrevLockedStakeEntries. - totalUnlockedAmountNanos := uint256.NewInt(0) + totalUnlockedAmountNanos := uint256.NewInt() for _, prevLockedStakeEntry := range prevLockedStakeEntries { totalUnlockedAmountNanos, err = SafeUint256().Add( totalUnlockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos, @@ -1982,7 +1982,7 @@ func (bav *UtxoView) _disconnectUnlockStake( } // Calculate the TotalUnlockedAmountNanos. - totalUnlockedAmountNanos := uint256.NewInt(0) + totalUnlockedAmountNanos := uint256.NewInt() var err error for _, prevLockedStakeEntry := range operationData.PrevLockedStakeEntries { totalUnlockedAmountNanos, err = SafeUint256().Add( @@ -2048,7 +2048,7 @@ func (bav *UtxoView) IsValidStakeMetadata(transactorPkBytes []byte, metadata *St if err != nil { return errors.Wrapf(err, "UtxoView.IsValidStakeMetadata: ") } - if uint256.NewInt(transactorDeSoBalanceNanos).Cmp(metadata.StakeAmountNanos) < 0 { + if uint256.NewInt().SetUint64(transactorDeSoBalanceNanos).Cmp(metadata.StakeAmountNanos) < 0 { return errors.Wrapf(RuleErrorInvalidStakeInsufficientBalance, "UtxoView.IsValidStakeMetadata: ") } @@ -2187,7 +2187,7 @@ func (bav *UtxoView) IsCorrectValidatorTotalStakeAmountNanos(validatorEntry *Val } } // Calculate the total stake amount for the validator. - totalStakeAmountNanos := uint256.NewInt(0) + totalStakeAmountNanos := uint256.NewInt() for _, stakeEntry := range stakeEntryMap { // If an entry is deleted, we don't count it towards the total. if stakeEntry.isDeleted { @@ -2231,7 +2231,7 @@ func (bav *UtxoView) SanityCheckStakeTxn( } // Validate StakeEntry.StakeAmountNanos increase. - prevStakeEntry := &StakeEntry{StakeAmountNanos: uint256.NewInt(0)} + prevStakeEntry := &StakeEntry{StakeAmountNanos: uint256.NewInt()} if len(utxoOp.PrevStakeEntries) == 1 { prevStakeEntry = utxoOp.PrevStakeEntries[0] } @@ -2267,7 +2267,7 @@ func (bav *UtxoView) SanityCheckStakeTxn( if err != nil { return errors.Wrapf(err, "SanityCheckStakeTxn: error including fees in TransactorBalance decrease: ") } - if !uint256.NewInt(transactorBalanceNanosDecrease).Eq(amountNanos) { + if !uint256.NewInt().SetUint64(transactorBalanceNanosDecrease).Eq(amountNanos) { return errors.New("SanityCheckStakeTxn: TransactorBalance decrease does not match") } @@ -2319,7 +2319,7 @@ func (bav *UtxoView) SanityCheckUnstakeTxn(transactorPKID *PKID, utxoOp *UtxoOpe return errors.Wrapf(err, "SanityCheckUnstakeTxn: error retrieving StakeEntry: ") } if currentStakeEntry == nil { - currentStakeEntry = &StakeEntry{StakeAmountNanos: uint256.NewInt(0)} + currentStakeEntry = &StakeEntry{StakeAmountNanos: uint256.NewInt()} } stakeEntryStakeAmountNanosDecrease, err := SafeUint256().Sub( prevStakeEntry.StakeAmountNanos, currentStakeEntry.StakeAmountNanos, @@ -2332,7 +2332,7 @@ func (bav *UtxoView) SanityCheckUnstakeTxn(transactorPKID *PKID, utxoOp *UtxoOpe } // Validate LockedStakeEntry.LockedAmountNanos increase. - prevLockedStakeEntry := &LockedStakeEntry{LockedAmountNanos: uint256.NewInt(0)} + prevLockedStakeEntry := &LockedStakeEntry{LockedAmountNanos: uint256.NewInt()} if len(utxoOp.PrevLockedStakeEntries) == 1 { prevLockedStakeEntry = utxoOp.PrevLockedStakeEntries[0] } @@ -2383,7 +2383,7 @@ func (bav *UtxoView) SanityCheckUnlockStakeTxn( if utxoOp.PrevLockedStakeEntries == nil || len(utxoOp.PrevLockedStakeEntries) == 0 { return errors.New("SanityCheckUnlockStakeTxn: PrevLockedStakeEntries is empty") } - totalUnlockedAmountNanos := uint256.NewInt(0) + totalUnlockedAmountNanos := uint256.NewInt() var err error for _, prevLockedStakeEntry := range utxoOp.PrevLockedStakeEntries { totalUnlockedAmountNanos, err = SafeUint256().Add(totalUnlockedAmountNanos, prevLockedStakeEntry.LockedAmountNanos) @@ -2410,7 +2410,7 @@ func (bav *UtxoView) SanityCheckUnlockStakeTxn( if err != nil { return errors.Wrapf(err, "SanityCheckStakeTxn: error including fees in TransactorBalance decrease: ") } - if !uint256.NewInt(transactorBalanceNanosIncrease).Eq(amountNanos) { + if !uint256.NewInt().SetUint64(transactorBalanceNanosIncrease).Eq(amountNanos) { return errors.New("SanityCheckUnlockStakeTxn: TransactorBalance increase does not match") } @@ -2895,7 +2895,7 @@ func (bav *UtxoView) CreateUnlockStakeTxindexMetadata(utxoOp *UtxoOperation, txn validatorPublicKeyBase58Check := PkToString(metadata.ValidatorPublicKey.ToBytes(), bav.Params) // Calculate TotalUnlockedAmountNanos. - totalUnlockedAmountNanos := uint256.NewInt(0) + totalUnlockedAmountNanos := uint256.NewInt() var err error for _, prevLockedStakeEntry := range utxoOp.PrevLockedStakeEntries { totalUnlockedAmountNanos, err = SafeUint256().Add( @@ -2903,7 +2903,7 @@ func (bav *UtxoView) CreateUnlockStakeTxindexMetadata(utxoOp *UtxoOperation, txn ) if err != nil { glog.Errorf("CreateUnlockStakeTxindexMetadata: error calculating TotalUnlockedAmountNanos: %v", err) - totalUnlockedAmountNanos = uint256.NewInt(0) + totalUnlockedAmountNanos = uint256.NewInt() break } } diff --git a/lib/block_view_stake_test.go b/lib/block_view_stake_test.go index b50772fbe..30fafa3aa 100644 --- a/lib/block_view_stake_test.go +++ b/lib/block_view_stake_test.go @@ -6,7 +6,7 @@ import ( "math" "testing" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/holiman/uint256" "github.com/stretchr/testify/require" ) @@ -127,7 +127,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -144,7 +144,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m2PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt(0), + StakeAmountNanos: uint256.NewInt(), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -157,7 +157,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: 99, - StakeAmountNanos: uint256.NewInt(1), + StakeAmountNanos: uint256.NewInt().SetUint64(1), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -183,7 +183,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt(0), + StakeAmountNanos: uint256.NewInt(), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -209,7 +209,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt(math.MaxUint64), + StakeAmountNanos: uint256.NewInt().SetUint64(math.MaxUint64), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -223,7 +223,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } extraData := map[string][]byte{"TestKey": []byte("TestValue")} feeNanos, err := _submitStakeTxn( @@ -236,14 +236,14 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.NotNil(t, stakeEntry) require.Equal(t, stakeEntry.RewardMethod, StakingRewardMethodPayToBalance) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue")) // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) @@ -255,7 +255,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt(50), + StakeAmountNanos: uint256.NewInt().SetUint64(50), } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} feeNanos, err := _submitStakeTxn( @@ -268,14 +268,14 @@ func _testStaking(t *testing.T, flushToDB bool) { require.NoError(t, err) require.NotNil(t, stakeEntry) require.Equal(t, stakeEntry.RewardMethod, StakingRewardMethodPayToBalance) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(150)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(150)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue2")) // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(150)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) @@ -287,7 +287,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), RewardMethod: StakingRewardMethodRestake, - StakeAmountNanos: uint256.NewInt(0), + StakeAmountNanos: uint256.NewInt(), } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} feeNanos, err := _submitStakeTxn( @@ -299,7 +299,7 @@ func _testStaking(t *testing.T, flushToDB bool) { stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(150)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(150)) require.Equal(t, stakeEntry.ExtraData["TestKey"], []byte("TestValue2")) // Verify the StakeEntry.RewardMethod has changed to StakingRewardMethodRestake. @@ -309,7 +309,7 @@ func _testStaking(t *testing.T, flushToDB bool) { validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(150)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) // Verify m1's DESO balance decreases by StakeAmountNanos (net of fees). m1NewDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) @@ -326,7 +326,7 @@ func _testStaking(t *testing.T, flushToDB bool) { unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(40), + UnstakeAmountNanos: uint256.NewInt().SetUint64(40), } _, err = _submitUnstakeTxn( testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, @@ -342,7 +342,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidValidatorPKID unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m2PkBytes), - UnstakeAmountNanos: uint256.NewInt(40), + UnstakeAmountNanos: uint256.NewInt().SetUint64(40), } _, err = _submitUnstakeTxn( testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, @@ -354,7 +354,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidUnstakeNoStakeFound unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(40), + UnstakeAmountNanos: uint256.NewInt().SetUint64(40), } _, err = _submitUnstakeTxn( testMeta, m2Pub, m2Priv, unstakeMetadata, nil, flushToDB, @@ -378,7 +378,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // RuleErrorInvalidUnstakeAmountNanos unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(0), + UnstakeAmountNanos: uint256.NewInt(), } _, err = _submitUnstakeTxn( testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, @@ -403,7 +403,7 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(40), + UnstakeAmountNanos: uint256.NewInt().SetUint64(40), } extraData := map[string][]byte{"TestKey": []byte("TestValue")} feeNanos, err := _submitUnstakeTxn( @@ -414,17 +414,17 @@ func _testStaking(t *testing.T, flushToDB bool) { // Verify StakeEntry.StakeAmountNanos. stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(110)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(110)) // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(110)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(110)) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(40)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(40)) require.Equal(t, lockedStakeEntry.ExtraData["TestKey"], []byte("TestValue")) // Verify m1's balance stays the same (net of fees). @@ -436,7 +436,7 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(30), + UnstakeAmountNanos: uint256.NewInt().SetUint64(30), } extraData := map[string][]byte{"TestKey": []byte("TestValue2")} feeNanos, err := _submitUnstakeTxn( @@ -447,17 +447,17 @@ func _testStaking(t *testing.T, flushToDB bool) { // Verify StakeEntry.StakeAmountNanos. stakeEntry, err := utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(80)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(80)) // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(80)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(80)) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(70)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(70)) require.Equal(t, lockedStakeEntry.ExtraData["TestKey"], []byte("TestValue2")) // Verify m1's balance stays the same (net of fees). @@ -469,7 +469,7 @@ func _testStaking(t *testing.T, flushToDB bool) { m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(80), + UnstakeAmountNanos: uint256.NewInt().SetUint64(80), } feeNanos, err := _submitUnstakeTxn( testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, @@ -484,12 +484,12 @@ func _testStaking(t *testing.T, flushToDB bool) { // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(0)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) // Verify LockedStakeEntry.UnstakeAmountNanos. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(150)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(150)) require.Equal(t, lockedStakeEntry.ExtraData["TestKey"], []byte("TestValue2")) // Verify m1's balance stays the same (net of fees). @@ -566,7 +566,7 @@ func _testStaking(t *testing.T, flushToDB bool) { ) require.NoError(t, err) require.Equal(t, len(lockedStakeEntries), 1) - require.Equal(t, lockedStakeEntries[0].LockedAmountNanos, uint256.NewInt(150)) + require.Equal(t, lockedStakeEntries[0].LockedAmountNanos, uint256.NewInt().SetUint64(150)) m1OldDESOBalanceNanos := getDESOBalanceNanos(m1PkBytes) unlockStakeMetadata := &UnlockStakeMetadata{ @@ -587,7 +587,7 @@ func _testStaking(t *testing.T, flushToDB bool) { // Verify ValidatorEntry.TotalStakeAmountNanos. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(0)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt()) // Verify LockedStakeEntry.isDeleted. lockedStakeEntry, err := utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) @@ -814,7 +814,7 @@ func TestStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { @@ -972,7 +972,7 @@ func TestStakingWithDerivedKey(t *testing.T) { TransactionCountLimitMap: map[TxnType]uint64{ TxnTypeAuthorizeDerivedKey: 1, }, - StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(100)}, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(100)}, } derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) require.NoError(t, err) @@ -980,7 +980,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to stake 100 $DESO nanos with m1 using the DerivedKey. Errors. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, @@ -991,7 +991,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to stake 200 $DESO nanos with m0 using the DerivedKey. Errors. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(200), + StakeAmountNanos: uint256.NewInt().SetUint64(200), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, @@ -1003,7 +1003,7 @@ func TestStakingWithDerivedKey(t *testing.T) { senderOldDESOBalanceNanos := getDESOBalanceNanos(senderPkBytes) stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } feeNanos, err := _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, @@ -1014,7 +1014,7 @@ func TestStakingWithDerivedKey(t *testing.T) { stakeEntry, err := newUtxoView().GetStakeEntry(m0PKID, senderPKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) // Verify sender's DESO balance is reduced by StakeAmountNanos (net of fees). senderNewDESOBalanceNanos := getDESOBalanceNanos(senderPkBytes) @@ -1030,7 +1030,7 @@ func TestStakingWithDerivedKey(t *testing.T) { TransactionCountLimitMap: map[TxnType]uint64{ TxnTypeAuthorizeDerivedKey: 1, }, - UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(50)}, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(50)}, } derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) require.NoError(t, err) @@ -1038,7 +1038,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to unstake 50 $DESO nanos from m1 using the DerivedKey. Errors. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt(50), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1049,7 +1049,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes 50 $DESO nanos with m1. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt(50), + StakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1059,7 +1059,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to unstake 50 $DESO nanos from m1 using the DerivedKey. Errors. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt(50), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1070,7 +1070,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to unstake 200 $DESO nanos from m0 using the DerivedKey. Errors. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(200), + UnstakeAmountNanos: uint256.NewInt().SetUint64(200), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1081,7 +1081,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender tries to unstake 100 $DESO nanos from m0 using the DerivedKey. Errors. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(100), + UnstakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1092,7 +1092,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes 50 $DESO nanos from m0 using the DerivedKey. Succeeds. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(50), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1103,13 +1103,13 @@ func TestStakingWithDerivedKey(t *testing.T) { stakeEntry, err := newUtxoView().GetStakeEntry(m0PKID, senderPKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(50)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(50)) // LockedStakeEntry was created. lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, senderPKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(50)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(50)) } { // sender unlocks stake using a DerivedKey. @@ -1141,7 +1141,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes 50 $DESO nanos from m1. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt(50), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1184,7 +1184,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes + unstakes 50 $DESO nanos with m0. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(50), + StakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1192,7 +1192,7 @@ func TestStakingWithDerivedKey(t *testing.T) { require.NoError(t, err) unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(50), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1221,8 +1221,8 @@ func TestStakingWithDerivedKey(t *testing.T) { TransactionCountLimitMap: map[TxnType]uint64{ TxnTypeAuthorizeDerivedKey: 1, }, - StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(50)}, - UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt(50)}, + StakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(50)}, + UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{stakeLimitKey: uint256.NewInt().SetUint64(50)}, UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 2}, } derivedKeyPriv, err = _submitAuthorizeDerivedKeyTxn(txnSpendingLimit) @@ -1231,7 +1231,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m0 using the DerivedKey. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(25), + StakeAmountNanos: uint256.NewInt().SetUint64(25), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1241,7 +1241,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m1 using the DerivedKey. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt(25), + StakeAmountNanos: uint256.NewInt().SetUint64(25), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1251,7 +1251,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m0 using the DerivedKey. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(25), + UnstakeAmountNanos: uint256.NewInt().SetUint64(25), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1261,7 +1261,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m1 using the DerivedKey. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt(25), + UnstakeAmountNanos: uint256.NewInt().SetUint64(25), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1304,7 +1304,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m0 using the DerivedKey. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(25), + StakeAmountNanos: uint256.NewInt().SetUint64(25), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1314,7 +1314,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m1 using the DerivedKey. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt(25), + StakeAmountNanos: uint256.NewInt().SetUint64(25), } _, err = _submitStakeTxn( testMeta, senderPkString, senderPrivString, stakeMetadata, nil, true, @@ -1324,7 +1324,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m0 using the DerivedKey. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(25), + UnstakeAmountNanos: uint256.NewInt().SetUint64(25), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1334,7 +1334,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m1 using the DerivedKey. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt(25), + UnstakeAmountNanos: uint256.NewInt().SetUint64(25), } _, err = _submitUnstakeTxn( testMeta, senderPkString, senderPrivString, unstakeMetadata, nil, true, @@ -1377,12 +1377,12 @@ func TestStakingWithDerivedKey(t *testing.T) { TxnTypeAuthorizeDerivedKey: 1, }, StakeLimitMap: map[StakeLimitKey]*uint256.Int{ - scopedStakeLimitKey: uint256.NewInt(100), - globalStakeLimitKey: uint256.NewInt(200), + scopedStakeLimitKey: uint256.NewInt().SetUint64(100), + globalStakeLimitKey: uint256.NewInt().SetUint64(200), }, UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{ - scopedStakeLimitKey: uint256.NewInt(100), - globalStakeLimitKey: uint256.NewInt(200), + scopedStakeLimitKey: uint256.NewInt().SetUint64(100), + globalStakeLimitKey: uint256.NewInt().SetUint64(200), }, UnlockStakeLimitMap: map[StakeLimitKey]uint64{scopedStakeLimitKey: 1, globalStakeLimitKey: 1}, } @@ -1392,7 +1392,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m1 using the global TransactionSpendingLimit. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt(200), + StakeAmountNanos: uint256.NewInt().SetUint64(200), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, @@ -1402,7 +1402,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m1 using the global TransactionSpendingLimit. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt(200), + UnstakeAmountNanos: uint256.NewInt().SetUint64(200), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1423,7 +1423,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender stakes with m1 using the scoped TransactionSpendingLimit. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: stakeMetadata}, @@ -1433,7 +1433,7 @@ func TestStakingWithDerivedKey(t *testing.T) { // sender unstakes from m1 using the scoped TransactionSpendingLimit. unstakeMetadata = &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt(100), + UnstakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxnWithDerivedKey( senderPkBytes, derivedKeyPriv, MsgDeSoTxn{TxnMeta: unstakeMetadata}, @@ -1461,11 +1461,11 @@ func TestStakingWithDerivedKey(t *testing.T) { TxnTypeAuthorizeDerivedKey: 1, }, StakeLimitMap: map[StakeLimitKey]*uint256.Int{ - stakeLimitKey1: uint256.NewInt(uint64(1.5 * float64(NanosPerUnit))), - stakeLimitKey2: uint256.NewInt(uint64(2.0 * float64(NanosPerUnit))), + stakeLimitKey1: uint256.NewInt().SetUint64(uint64(1.5 * float64(NanosPerUnit))), + stakeLimitKey2: uint256.NewInt().SetUint64(uint64(2.0 * float64(NanosPerUnit))), }, UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{ - stakeLimitKey1: uint256.NewInt(uint64(3.25 * float64(NanosPerUnit))), + stakeLimitKey1: uint256.NewInt().SetUint64(uint64(3.25 * float64(NanosPerUnit))), }, UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey1: 2, stakeLimitKey2: 3}, } @@ -1509,10 +1509,10 @@ func TestStakingWithDerivedKey(t *testing.T) { TxnTypeAuthorizeDerivedKey: 1, }, StakeLimitMap: map[StakeLimitKey]*uint256.Int{ - stakeLimitKey: uint256.NewInt(uint64(0.65 * float64(NanosPerUnit))), + stakeLimitKey: uint256.NewInt().SetUint64(uint64(0.65 * float64(NanosPerUnit))), }, UnstakeLimitMap: map[StakeLimitKey]*uint256.Int{ - stakeLimitKey: uint256.NewInt(uint64(2.1 * float64(NanosPerUnit))), + stakeLimitKey: uint256.NewInt().SetUint64(uint64(2.1 * float64(NanosPerUnit))), }, UnlockStakeLimitMap: map[StakeLimitKey]uint64{stakeLimitKey: 1}, } @@ -1621,7 +1621,7 @@ func _testGetTopStakesByStakeAmount(t *testing.T, flushToDB bool) { stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(validatorPkBytes), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt(amountNanos), + StakeAmountNanos: uint256.NewInt().SetUint64(amountNanos), } _, err := _submitStakeTxn(testMeta, stakerPk, stakerPriv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -1909,7 +1909,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { // m0 stakes with himself. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxn(testMeta, m0Pub, m0Priv, stakeMetadata, nil, true) require.NoError(t, err) @@ -1917,13 +1917,13 @@ func TestStakeLockupEpochDuration(t *testing.T) { stakeEntry, err := newUtxoView().GetStakeEntry(m0PKID, m0PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // m0 unstakes from himself. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(100), + UnstakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitUnstakeTxn(testMeta, m0Pub, m0Priv, unstakeMetadata, nil, true) require.NoError(t, err) @@ -1935,7 +1935,7 @@ func TestStakeLockupEpochDuration(t *testing.T) { lockedStakeEntry, err := newUtxoView().GetLockedStakeEntry(m0PKID, m0PKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(100)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(100)) } { // RuleErrorInvalidUnlockStakeMustWaitLockupDuration @@ -2092,7 +2092,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m1 stakes with m0. m0 is active. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(150), + StakeAmountNanos: uint256.NewInt().SetUint64(150), } _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2100,13 +2100,13 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos increases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(150)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) } { // m1 unstakes some from m0. m0 is active. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(50), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2114,7 +2114,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos decreases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // Jail m0. Since this update takes place outside a transaction, @@ -2129,13 +2129,13 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) // m0 TotalStakeAmountNanos stays the same. - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // m1 stakes more with m0. m0 is jailed. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(50), + StakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2143,13 +2143,13 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos increases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(150)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) } { // m1 unstakes some from m0. m0 is jailed. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(50), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2157,7 +2157,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos decreases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // m0 unjails himself. @@ -2167,13 +2167,13 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos stays the same. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // m1 stakes more with m0. m0 is active. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(50), + StakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2181,13 +2181,13 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos increases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(150)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) } { // m1 unstakes some from m0. m0 is active. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(50), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -2195,7 +2195,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos decreases. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // Jail m0 again. Since this update takes place outside a transaction, @@ -2206,7 +2206,7 @@ func testStakingToJailedValidator(t *testing.T, flushToDB bool) { // m0 TotalStakeAmountNanos stays the same. validatorEntry, err := utxoView().GetValidatorByPKID(m0PKID) require.NoError(t, err) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // m0 unregisters as a validator. diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 88e07154e..91e4e5db6 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/hex" "fmt" - ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "math" _ "net/http/pprof" "reflect" @@ -13,7 +12,7 @@ import ( "github.com/deso-protocol/core/bls" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/decred/dcrd/lru" "github.com/dgraph-io/badger/v4" embeddedpostgres "github.com/fergusstrange/embedded-postgres" @@ -1963,20 +1962,20 @@ func TestBasicTransfer(t *testing.T) { // Sign the transaction with the sender's key, but then make S have // a high value. - _signTxn(t, txn, senderPrivString) - r := txn.Signature.Sign.R() - s := txn.Signature.Sign.S() - txn.Signature.SetSignature(ecdsa2.NewSignature(&r, s.Negate())) - - txHash = txn.Hash() - _, _, _, _, err = - utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) - if blockHeight < params.ForkHeights.BalanceModelBlockHeight { - require.NoError(err) - } else { - require.Error(err) - require.Contains(err.Error(), RuleErrorTxnSigHasHighS) - } + // _signTxn(t, txn, senderPrivString) + // r := txn.Signature.Sign.R + // s := txn.Signature.Sign.S + // txn.Signature.SetSignature(&btcec.Signature{R: r, S: big.NewInt(0).Neg(s)}) + + // txHash = txn.Hash() + // _, _, _, _, err = + // utxoView.ConnectTransaction(txn, txHash, blockHeight, 0, true, false) + // if blockHeight < params.ForkHeights.BalanceModelBlockHeight { + // require.NoError(err) + // } else { + // require.Error(err) + // require.Contains(err.Error(), RuleErrorTxnSigHasHighS) + // } } // A block reward with a bad signature should fail. @@ -2182,7 +2181,7 @@ func TestBasicTransferSignatures(t *testing.T) { require.NoError(err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) recipientPkBytes, _, err := Base58CheckDecode(recipientPkString) require.NoError(err) @@ -2235,12 +2234,8 @@ func TestBasicTransferSignatures(t *testing.T) { // Now fetch all transactions from the db and verify their signatures have been properly persisted. if postgres != nil { pgTxn := postgres.GetTransactionByHash(transactionHash) - r := txn.Signature.Sign.R() - rBytes := (&r).Bytes() - s := txn.Signature.Sign.S() - sBytes := (&s).Bytes() - require.Equal(true, reflect.DeepEqual(rBytes[:], HashToBigint(pgTxn.R).Bytes())) - require.Equal(true, reflect.DeepEqual(sBytes[:], HashToBigint(pgTxn.S).Bytes())) + require.Equal(true, reflect.DeepEqual(txn.Signature.Sign.R.Bytes(), HashToBigint(pgTxn.R).Bytes())) + require.Equal(true, reflect.DeepEqual(txn.Signature.Sign.S.Bytes(), HashToBigint(pgTxn.S).Bytes())) require.Equal(txn.Signature.RecoveryId, byte(pgTxn.RecoveryId)) require.Equal(txn.Signature.IsRecoverable, pgTxn.IsRecoverable) } else { @@ -2248,16 +2243,8 @@ func TestBasicTransferSignatures(t *testing.T) { require.NoError(err) for _, blockTxn := range dbBlock.Txns { if reflect.DeepEqual(transactionHash.ToBytes(), blockTxn.Hash().ToBytes()) { - rTxn := txn.Signature.Sign.R() - rTxnBytes := (&rTxn).Bytes() - sTxn := txn.Signature.Sign.S() - sTxnBytes := (&sTxn).Bytes() - rBlockTxn := blockTxn.Signature.Sign.R() - rBlockTxnBytes := (&rBlockTxn).Bytes() - sBlockTxn := blockTxn.Signature.Sign.S() - sBlockTxnBytes := (&sBlockTxn).Bytes() - require.Equal(true, reflect.DeepEqual(rTxnBytes[:], rBlockTxnBytes[:])) - require.Equal(true, reflect.DeepEqual(sTxnBytes[:], sBlockTxnBytes[:])) + require.Equal(true, reflect.DeepEqual(txn.Signature.Sign.R.Bytes(), blockTxn.Signature.Sign.R.Bytes())) + require.Equal(true, reflect.DeepEqual(txn.Signature.Sign.S.Bytes(), blockTxn.Signature.Sign.S.Bytes())) require.Equal(txn.Signature.RecoveryId, blockTxn.Signature.RecoveryId) require.Equal(txn.Signature.IsRecoverable, blockTxn.Signature.IsRecoverable) } @@ -2386,7 +2373,7 @@ func TestBasicTransferSignatures(t *testing.T) { testRandomVector := [3]RuleError{ RuleErrorInvalidTransactionSignature, RuleErrorDerivedKeyNotAuthorized, RuleErrorDerivedKeyNotAuthorized, } - randomPrivKey, err := btcec.NewPrivateKey() + randomPrivKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivKeyBase58Check := Base58CheckEncode(randomPrivKey.Serialize(), true, params) @@ -2441,7 +2428,7 @@ func TestBasicTransferSignatures(t *testing.T) { testRandomKeyVector := [3]RuleError{ RuleErrorInvalidTransactionSignature, RuleErrorDerivedKeyNotAuthorized, RuleErrorDerivedKeyNotAuthorized, } - randomPrivKey, err := btcec.NewPrivateKey() + randomPrivKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivKeyBase58Check := Base58CheckEncode(randomPrivKey.Serialize(), true, params) allTxns = append(allTxns, mempoolProcessAllSignatureCombinations( @@ -2504,7 +2491,7 @@ func TestBasicTransferSignatures(t *testing.T) { testMoneyRandomVector := [3]RuleError{ RuleErrorInvalidTransactionSignature, RuleErrorDerivedKeyNotAuthorized, RuleErrorDerivedKeyNotAuthorized, } - randomPrivKey, err := btcec.NewPrivateKey() + randomPrivKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) randomPrivKeyBase58Check := Base58CheckEncode(randomPrivKey.Serialize(), true, params) diff --git a/lib/block_view_types.go b/lib/block_view_types.go index 79dde9604..e2c536d93 100644 --- a/lib/block_view_types.go +++ b/lib/block_view_types.go @@ -12,7 +12,7 @@ import ( "sort" "strings" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" @@ -5019,7 +5019,7 @@ func (ce *CoinEntry) Copy() *CoinEntry { CreatorBasisPoints: ce.CreatorBasisPoints, DeSoLockedNanos: ce.DeSoLockedNanos, NumberOfHolders: ce.NumberOfHolders, - CoinsInCirculationNanos: *uint256.NewInt(0).Set(&ce.CoinsInCirculationNanos), + CoinsInCirculationNanos: *uint256.NewInt().Set(&ce.CoinsInCirculationNanos), CoinWatermarkNanos: ce.CoinWatermarkNanos, MintingDisabled: ce.MintingDisabled, TransferRestrictionStatus: ce.TransferRestrictionStatus, @@ -5682,7 +5682,7 @@ func VariableDecodeUint256(rr *bytes.Reader) (*uint256.Int, error) { if err != nil { return nil, errors.Wrapf(err, "DecodeUint256: Error reading uint256") } - return uint256.NewInt(0).SetBytes(numberBytes), nil + return uint256.NewInt().SetBytes(numberBytes), nil } else if err != nil { return nil, errors.Wrapf(err, "DecodeUint256: Error reading uint256") } else { @@ -5996,7 +5996,7 @@ func ComputeBaseUnitsToBuyUint256( // If the quantity to sell is zero then return zero (selling // zero means you should buy zero) if quantityToSellBaseUnits.IsZero() { - return uint256.NewInt(0), nil + return uint256.NewInt(), nil } // Perform calculation. @@ -6063,7 +6063,7 @@ func ComputeBaseUnitsToSellUint256( // If the quantity to buy is zero then return zero (buying // zero means you should sell zero) if quantityToBuyBaseUnits.IsZero() { - return uint256.NewInt(0), nil + return uint256.NewInt(), nil } // Perform calculation. diff --git a/lib/block_view_types_test.go b/lib/block_view_types_test.go index 4a27dc019..8eb1b7b79 100644 --- a/lib/block_view_types_test.go +++ b/lib/block_view_types_test.go @@ -416,8 +416,8 @@ func TestUtxoEntryEncodeDecode(t *testing.T) { func TestEncodingUint256s(t *testing.T) { // Create three uint256.Ints. - num1 := uint256.NewInt(0) - num2 := uint256.NewInt(598128756) + num1 := uint256.NewInt() + num2 := uint256.NewInt().SetUint64(598128756) num3 := MaxUint256 // Encode them to bytes using VariableEncodeUint256. diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index eb8b40090..7b478289a 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -1268,7 +1268,7 @@ func (bav *UtxoView) _connectRegisterAsValidator( } // Calculate TotalStakeAmountNanos. - totalStakeAmountNanos := uint256.NewInt(0) + totalStakeAmountNanos := uint256.NewInt() if prevValidatorEntry != nil { totalStakeAmountNanos = prevValidatorEntry.TotalStakeAmountNanos.Clone() } @@ -1449,7 +1449,7 @@ func (bav *UtxoView) _connectUnregisterAsValidator( // Delete each StakeEntry and create or update the corresponding LockedStakeEntry. // Track TotalUnstakedAmountNanos and PrevLockedStakeEntries. - totalUnstakedAmountNanos := uint256.NewInt(0) + totalUnstakedAmountNanos := uint256.NewInt() var prevLockedStakeEntries []*LockedStakeEntry for _, prevStakeEntry := range prevStakeEntries { @@ -1989,7 +1989,7 @@ func (bav *UtxoView) SanityCheckUnregisterAsValidatorTxn( } // Sanity check the deleted StakeEntries. - totalUnstakedAmountNanos := uint256.NewInt(0) + totalUnstakedAmountNanos := uint256.NewInt() for _, stakeEntry := range utxoOp.PrevStakeEntries { totalUnstakedAmountNanos, err = SafeUint256().Add(totalUnstakedAmountNanos, stakeEntry.StakeAmountNanos) if err != nil { diff --git a/lib/block_view_validator_test.go b/lib/block_view_validator_test.go index 9fcc6027c..e58d7afe5 100644 --- a/lib/block_view_validator_test.go +++ b/lib/block_view_validator_test.go @@ -6,7 +6,7 @@ import ( "math" "testing" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/deso-protocol/core/bls" "github.com/holiman/uint256" "github.com/stretchr/testify/require" @@ -501,7 +501,7 @@ func TestValidatorRegistrationWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { @@ -836,7 +836,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { // m3 stakes 100 DESO nanos with m0. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxn(testMeta, m3Pub, m3Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -844,7 +844,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { // m3 stakes 200 DESO nanos with m1. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt(200), + StakeAmountNanos: uint256.NewInt().SetUint64(200), } _, err = _submitStakeTxn(testMeta, m3Pub, m3Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -852,7 +852,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { // m3 stakes 300 DESO nanos with m2. stakeMetadata = &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m2PkBytes), - StakeAmountNanos: uint256.NewInt(300), + StakeAmountNanos: uint256.NewInt().SetUint64(300), } _, err = _submitStakeTxn(testMeta, m3Pub, m3Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -862,17 +862,17 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(300)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(200)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(200)) require.Equal(t, validatorEntries[2].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // m3 unstakes from m1. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt(150), + UnstakeAmountNanos: uint256.NewInt().SetUint64(150), } _, err = _submitUnstakeTxn(testMeta, m3Pub, m3Priv, unstakeMetadata, nil, flushToDB) @@ -881,17 +881,17 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(300)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) require.Equal(t, validatorEntries[2].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt(50)) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50)) } { // m3 unstakes more from m1. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - UnstakeAmountNanos: uint256.NewInt(50), + UnstakeAmountNanos: uint256.NewInt().SetUint64(50), } _, err = _submitUnstakeTxn(testMeta, m3Pub, m3Priv, unstakeMetadata, nil, flushToDB) @@ -900,9 +900,9 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(300)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // m2 unregisters as validator. @@ -914,13 +914,13 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // m4 stakes with m1. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt(150), + StakeAmountNanos: uint256.NewInt().SetUint64(150), } _, err = _submitStakeTxn(testMeta, m4Pub, m4Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -930,15 +930,15 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(150)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(150)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // m4 stakes more with m1. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m1PkBytes), - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxn(testMeta, m4Pub, m4Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -948,9 +948,9 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 2) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(250)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(250)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // Verify top validators with LIMIT. @@ -958,7 +958,7 @@ func _testGetTopActiveValidatorsByStakeAmount(t *testing.T, flushToDB bool) { require.NoError(t, err) require.Len(t, validatorEntries, 1) require.Equal(t, validatorEntries[0].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(250)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(250)) } // Flush mempool to the db and test rollbacks. @@ -992,7 +992,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { votingPublicKey, votingAuthorization := _generateVotingPublicKeyAndAuthorization(t, m0PkBytes) validatorEntry := &ValidatorEntry{ ValidatorPKID: m0PKID, - TotalStakeAmountNanos: uint256.NewInt(100), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(100), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1004,7 +1004,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m0PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) // Verify m0 is not stored in the UtxoView. require.Empty(t, utxoView.ValidatorPKIDToValidatorEntry) @@ -1013,7 +1013,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m1PkBytes) validatorEntry = &ValidatorEntry{ ValidatorPKID: m1PKID, - TotalStakeAmountNanos: uint256.NewInt(400), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(400), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, JailedAtEpochNumber: 1, @@ -1025,14 +1025,14 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m1PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(400)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(400)) require.Equal(t, validatorEntry.Status(), ValidatorStatusJailed) // Store m2's ValidatorEntry in the db with TotalStake = 300 nanos. votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m2PkBytes) m2ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m2PKID, - TotalStakeAmountNanos: uint256.NewInt(300), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(300), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1043,13 +1043,13 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m2PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(300)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) // Store m3's ValidatorEntry in the db with TotalStake = 600 nanos. votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m3PkBytes) m3ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m3PKID, - TotalStakeAmountNanos: uint256.NewInt(600), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(600), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1060,7 +1060,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { validatorEntry, err = DBGetValidatorByPKID(db, chain.snapshot, m3PKID) require.NoError(t, err) require.NotNil(t, validatorEntry) - require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt(600)) + require.Equal(t, validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(600)) // Fetch m2 so it is also cached in the UtxoView. validatorEntry, err = utxoView.GetValidatorByPKID(m2PKID) @@ -1073,7 +1073,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal( t, utxoView.ValidatorPKIDToValidatorEntry[*m2ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, - uint256.NewInt(300), + uint256.NewInt().SetUint64(300), ) // Store m3's ValidatorEntry in the UtxoView with isDeleted=true. @@ -1087,7 +1087,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m4PkBytes) m4ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m4PKID, - TotalStakeAmountNanos: uint256.NewInt(50), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(50), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, } @@ -1104,14 +1104,14 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal( t, utxoView.ValidatorPKIDToValidatorEntry[*m4ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, - uint256.NewInt(50), + uint256.NewInt().SetUint64(50), ) // Store m5's jailed ValidatorEntry in the UtxoView with TotalStake = 500 nanos. votingPublicKey, votingAuthorization = _generateVotingPublicKeyAndAuthorization(t, m5PkBytes) m5ValidatorEntry := &ValidatorEntry{ ValidatorPKID: m5PKID, - TotalStakeAmountNanos: uint256.NewInt(500), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(500), VotingPublicKey: votingPublicKey, VotingAuthorization: votingAuthorization, JailedAtEpochNumber: 1, @@ -1129,7 +1129,7 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.Equal( t, utxoView.ValidatorPKIDToValidatorEntry[*m5ValidatorEntry.ValidatorPKID].TotalStakeAmountNanos, - uint256.NewInt(500), + uint256.NewInt().SetUint64(500), ) require.Equal( t, utxoView.ValidatorPKIDToValidatorEntry[*m5ValidatorEntry.ValidatorPKID].Status(), ValidatorStatusJailed, @@ -1140,11 +1140,11 @@ func TestGetTopActiveValidatorsByStakeMergingDbAndUtxoView(t *testing.T) { require.NoError(t, err) require.Len(t, validatorEntries, 3) require.Equal(t, validatorEntries[0].ValidatorPKID, m2PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(300)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(300)) require.Equal(t, validatorEntries[1].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) require.Equal(t, validatorEntries[2].ValidatorPKID, m4PKID) - require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt(50)) + require.Equal(t, validatorEntries[2].TotalStakeAmountNanos, uint256.NewInt().SetUint64(50)) } func TestUpdatingValidatorDisableDelegatedStake(t *testing.T) { @@ -1257,7 +1257,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m0 stakes with himself. This is allowed even though DisableDelegatedStake = TRUE. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxn( testMeta, m0Pub, m0Priv, stakeMetadata, nil, flushToDB, @@ -1273,7 +1273,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m1 tries to stake with m0. Errors. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -1302,7 +1302,7 @@ func _testUpdatingValidatorDisableDelegatedStake(t *testing.T, flushToDB bool) { // m1 stakes with m0. Succeeds. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -1425,7 +1425,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { // m0 stakes with himself. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(600), + StakeAmountNanos: uint256.NewInt().SetUint64(600), } _, err = _submitStakeTxn( testMeta, m0Pub, m0Priv, stakeMetadata, nil, flushToDB, @@ -1435,13 +1435,13 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m0PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(600)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(600)) } { // m1 stakes with m0. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(400), + StakeAmountNanos: uint256.NewInt().SetUint64(400), } _, err = _submitStakeTxn( testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB, @@ -1451,13 +1451,13 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(400)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(400)) } { // m1 partially unstakes with m0. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(100), + UnstakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitUnstakeTxn( testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB, @@ -1467,13 +1467,13 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { stakeEntry, err = utxoView().GetStakeEntry(m0PKID, m1PKID) require.NoError(t, err) require.NotNil(t, stakeEntry) - require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt(300)) + require.Equal(t, stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(300)) // m1 has a LockedStakeEntry created. lockedStakeEntry, err = utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(100)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(100)) } { // m0 unregisters as a validator. @@ -1494,7 +1494,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { lockedStakeEntry, err = utxoView().GetLockedStakeEntry(m0PKID, m0PKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(600)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(600)) // m1 is unstaked. // m1's StakeEntry is deleted. @@ -1505,7 +1505,7 @@ func _testUnregisterAsValidator(t *testing.T, flushToDB bool) { lockedStakeEntry, err = utxoView().GetLockedStakeEntry(m0PKID, m1PKID, currentEpochNumber) require.NoError(t, err) require.NotNil(t, lockedStakeEntry) - require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt(400)) + require.Equal(t, lockedStakeEntry.LockedAmountNanos, uint256.NewInt().SetUint64(400)) } // Flush mempool to the db and test rollbacks. @@ -1636,7 +1636,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // m1 stakes with m0. Succeeds. You can stake to a jailed validator. stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - StakeAmountNanos: uint256.NewInt(100), + StakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitStakeTxn(testMeta, m1Pub, m1Priv, stakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -1649,7 +1649,7 @@ func _testUnjailValidator(t *testing.T, flushToDB bool) { // m1 unstakes from m0. Succeeds. You can unstake from a jailed validator. unstakeMetadata := &UnstakeMetadata{ ValidatorPublicKey: NewPublicKey(m0PkBytes), - UnstakeAmountNanos: uint256.NewInt(100), + UnstakeAmountNanos: uint256.NewInt().SetUint64(100), } _, err = _submitUnstakeTxn(testMeta, m1Pub, m1Priv, unstakeMetadata, nil, flushToDB) require.NoError(t, err) @@ -1773,7 +1773,7 @@ func TestUnjailValidatorWithDerivedKey(t *testing.T) { require.NoError(t, err) senderPrivBytes, _, err := Base58CheckDecode(senderPrivString) require.NoError(t, err) - senderPrivKey, _ := btcec.PrivKeyFromBytes(senderPrivBytes) + senderPrivKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), senderPrivBytes) senderPKID := DBGetPKIDEntryForPublicKey(db, chain.snapshot, senderPkBytes).PKID newUtxoView := func() *UtxoView { diff --git a/lib/blockchain.go b/lib/blockchain.go index 6af1b95d8..b4397d5c6 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -25,7 +25,7 @@ import ( btcdchain "github.com/btcsuite/btcd/blockchain" chainlib "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" "github.com/deso-protocol/go-deadlock" @@ -2226,7 +2226,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // trusted. signature := desoBlock.BlockProducerInfo.Signature - pkObj, err := btcec.ParsePubKey(publicKey) + pkObj, err := btcec.ParsePubKey(publicKey, btcec.S256()) if err != nil { return false, false, errors.Wrapf(err, "ProcessBlock: Error parsing block producer public key: %v.", @@ -3243,7 +3243,7 @@ func (bc *Blockchain) CreatePrivateMessageTxn( // Encrypt the passed-in message text with the recipient's public key. // // Parse the recipient public key. - recipientPk, err := btcec.ParsePubKey(recipientPublicKey) + recipientPk, err := btcec.ParsePubKey(recipientPublicKey, btcec.S256()) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "CreatePrivateMessageTxn: Problem parsing "+ "recipient public key: ") diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 13da5dd31..0ecace8dd 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -4,7 +4,6 @@ import ( "encoding/hex" "flag" "fmt" - ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "log" "math" "math/big" @@ -18,8 +17,7 @@ import ( "github.com/go-pg/pg/v10" chainlib "github.com/btcsuite/btcd/blockchain" - - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v4" "github.com/golang/glog" "github.com/stretchr/testify/assert" @@ -907,7 +905,7 @@ func _signTxn(t *testing.T, txn *MsgDeSoTxn, privKeyStrArg string) { privKeyBytes, _, err := Base58CheckDecode(privKeyStrArg) require.NoError(err) - privKey, _ := btcec.PrivKeyFromBytes(privKeyBytes) + privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) txnSignature, err := txn.Sign(privKey) require.NoError(err) txn.Signature.SetSignature(txnSignature) @@ -925,7 +923,7 @@ func _signTxnWithDerivedKeyAndType(t *testing.T, txn *MsgDeSoTxn, privKeyStrBase privKeyBytes, _, err := Base58CheckDecode(privKeyStrBase58Check) require.NoError(err) - privateKey, publicKey := btcec.PrivKeyFromBytes(privKeyBytes) + privateKey, publicKey := btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) // We will randomly sign with the standard DER encoding + ExtraData, or with the DeSo-DER encoding. if signatureType == 0 { @@ -1668,7 +1666,7 @@ func TestBadBlockSignature(t *testing.T) { // Since MineAndProcesssSingleBlock returns a valid block above, we can play with its // signature and re-process the block to see what happens. - blockProducerInfoCopy := &BlockProducerInfo{Signature: &ecdsa2.Signature{}} + blockProducerInfoCopy := &BlockProducerInfo{Signature: &btcec.Signature{}} blockProducerInfoCopy.PublicKey = append([]byte{}, finalBlock1.BlockProducerInfo.PublicKey...) *blockProducerInfoCopy.Signature = *finalBlock1.BlockProducerInfo.Signature diff --git a/lib/connection_manager.go b/lib/connection_manager.go index 90f951d84..1a8448521 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -158,7 +158,7 @@ func NewConnectionManager( } // Check if the address passed shares a group with any addresses already in our data structures. -func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddressV2) bool { +func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddress) bool { groupKey := addrmgr.GroupKey(na) // For the sake of running multiple nodes on the same machine, we allow localhost connections. if groupKey == "local" { @@ -172,7 +172,7 @@ func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddr if numGroupsForKey != 0 && numGroupsForKey != 1 { glog.V(2).Infof("IsFromRedundantOutboundIPAddress: Found numGroupsForKey != (0 or 1). Is (%d) "+ "instead for addr (%s) and group key (%s). This "+ - "should never happen.", numGroupsForKey, na.Addr.String(), groupKey) + "should never happen.", numGroupsForKey, na.IP.String(), groupKey) } if numGroupsForKey == 0 { @@ -181,7 +181,7 @@ func (cmgr *ConnectionManager) IsFromRedundantOutboundIPAddress(na *wire.NetAddr return true } -func (cmgr *ConnectionManager) AddToGroupKey(na *wire.NetAddressV2) { +func (cmgr *ConnectionManager) AddToGroupKey(na *wire.NetAddress) { groupKey := addrmgr.GroupKey(na) cmgr.mtxOutboundConnIPGroups.Lock() @@ -189,7 +189,7 @@ func (cmgr *ConnectionManager) AddToGroupKey(na *wire.NetAddressV2) { cmgr.mtxOutboundConnIPGroups.Unlock() } -func (cmgr *ConnectionManager) subFromGroupKey(na *wire.NetAddressV2) { +func (cmgr *ConnectionManager) subFromGroupKey(na *wire.NetAddress) { groupKey := addrmgr.GroupKey(na) cmgr.mtxOutboundConnIPGroups.Lock() @@ -197,7 +197,7 @@ func (cmgr *ConnectionManager) subFromGroupKey(na *wire.NetAddressV2) { cmgr.mtxOutboundConnIPGroups.Unlock() } -func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddressV2, unit time.Duration) (_retryDuration time.Duration) { +func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddress, unit time.Duration) (_retryDuration time.Duration) { // No delay if we haven't tried yet or if the number of retries isn't positive. if retryCount <= 0 { return 0 @@ -207,7 +207,7 @@ func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddressV2, if persistentAddrForLogging != nil { glog.V(1).Infof("Retrying connection to outbound persistent peer: "+ - "(%s:%d) in (%d) seconds.", persistentAddrForLogging.Addr.String(), + "(%s:%d) in (%d) seconds.", persistentAddrForLogging.IP.String(), persistentAddrForLogging.Port, numSecs) } else { glog.V(2).Infof("Retrying connection to outbound non-persistent peer in (%d) seconds.", numSecs) @@ -215,41 +215,41 @@ func _delayRetry(retryCount uint64, persistentAddrForLogging *wire.NetAddressV2, return retryDelay } -func (cmgr *ConnectionManager) IsConnectedOutboundIpAddress(netAddr *wire.NetAddressV2) bool { +func (cmgr *ConnectionManager) IsConnectedOutboundIpAddress(netAddr *wire.NetAddress) bool { cmgr.mtxAddrsMaps.RLock() defer cmgr.mtxAddrsMaps.RUnlock() return cmgr.connectedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] } -func (cmgr *ConnectionManager) IsAttemptedOutboundIpAddress(netAddr *wire.NetAddressV2) bool { +func (cmgr *ConnectionManager) IsAttemptedOutboundIpAddress(netAddr *wire.NetAddress) bool { cmgr.mtxAddrsMaps.RLock() defer cmgr.mtxAddrsMaps.RUnlock() return cmgr.attemptedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] } -func (cmgr *ConnectionManager) AddAttemptedOutboundAddrs(netAddr *wire.NetAddressV2) { +func (cmgr *ConnectionManager) AddAttemptedOutboundAddrs(netAddr *wire.NetAddress) { cmgr.mtxAddrsMaps.Lock() defer cmgr.mtxAddrsMaps.Unlock() cmgr.attemptedOutboundAddrs[addrmgr.NetAddressKey(netAddr)] = true } -func (cmgr *ConnectionManager) RemoveAttemptedOutboundAddrs(netAddr *wire.NetAddressV2) { +func (cmgr *ConnectionManager) RemoveAttemptedOutboundAddrs(netAddr *wire.NetAddress) { cmgr.mtxAddrsMaps.Lock() defer cmgr.mtxAddrsMaps.Unlock() delete(cmgr.attemptedOutboundAddrs, addrmgr.NetAddressKey(netAddr)) } // DialPersistentOutboundConnection attempts to connect to a persistent peer. -func (cmgr *ConnectionManager) DialPersistentOutboundConnection(persistentAddr *wire.NetAddressV2, attemptId uint64) (_attemptId uint64) { +func (cmgr *ConnectionManager) DialPersistentOutboundConnection(persistentAddr *wire.NetAddress, attemptId uint64) (_attemptId uint64) { glog.V(2).Infof("ConnectionManager.DialPersistentOutboundConnection: Connecting to peer (IP=%v, Port=%v)", - persistentAddr.Addr.String(), persistentAddr.Port) + persistentAddr.IP.String(), persistentAddr.Port) return cmgr._dialOutboundConnection(persistentAddr, attemptId, true) } // DialOutboundConnection attempts to connect to a non-persistent peer. -func (cmgr *ConnectionManager) DialOutboundConnection(addr *wire.NetAddressV2, attemptId uint64) { +func (cmgr *ConnectionManager) DialOutboundConnection(addr *wire.NetAddress, attemptId uint64) { glog.V(2).Infof("ConnectionManager.ConnectOutboundConnection: Connecting to peer (IP=%v, Port=%v)", - addr.Addr.String(), addr.Port) + addr.IP.String(), addr.Port) cmgr._dialOutboundConnection(addr, attemptId, false) } @@ -266,7 +266,7 @@ func (cmgr *ConnectionManager) CloseAttemptedConnection(attemptId uint64) { // _dialOutboundConnection is the internal method that spawns and initiates an OutboundConnectionAttempt, which handles the // connection attempt logic. It returns the attemptId of the attempt that was created. -func (cmgr *ConnectionManager) _dialOutboundConnection(addr *wire.NetAddressV2, attemptId uint64, isPersistent bool) (_attemptId uint64) { +func (cmgr *ConnectionManager) _dialOutboundConnection(addr *wire.NetAddress, attemptId uint64, isPersistent bool) (_attemptId uint64) { connectionAttempt := NewOutboundConnectionAttempt(attemptId, addr, isPersistent, cmgr.params.DialTimeout, cmgr.outboundConnectionChan) cmgr.mtxConnectionAttempts.Lock() @@ -284,7 +284,7 @@ func (cmgr *ConnectionManager) _dialOutboundConnection(addr *wire.NetAddressV2, // is set, then we will connect only to that addr. Otherwise, we will use // the addrmgr to randomly select addrs and create OUTBOUND connections // with them until we find a worthy peer. -func (cmgr *ConnectionManager) ConnectPeer(id uint64, conn net.Conn, na *wire.NetAddressV2, isOutbound bool, +func (cmgr *ConnectionManager) ConnectPeer(id uint64, conn net.Conn, na *wire.NetAddress, isOutbound bool, isPersistent bool) *Peer { // At this point Conn is set so create a peer object to do a version negotiation. @@ -305,7 +305,7 @@ func (cmgr *ConnectionManager) ConnectPeer(id uint64, conn net.Conn, na *wire.Ne return peer } -func (cmgr *ConnectionManager) IsDuplicateInboundIPAddress(netAddr *wire.NetAddressV2) bool { +func (cmgr *ConnectionManager) IsDuplicateInboundIPAddress(netAddr *wire.NetAddress) bool { cmgr.mtxPeerMaps.RLock() defer cmgr.mtxPeerMaps.RUnlock() @@ -316,7 +316,7 @@ func (cmgr *ConnectionManager) IsDuplicateInboundIPAddress(netAddr *wire.NetAddr // If the IP is a localhost IP let it slide. This is useful for testing fake // nodes on a local machine. // TODO: Should this be a flag? - if net.IP([]byte{127, 0, 0, 1}).Equal(netAddr.ToLegacy().IP) { + if net.IP([]byte{127, 0, 0, 1}).Equal(netAddr.IP) { glog.V(1).Infof("ConnectionManager.IsDuplicateInboundIPAddress: Allowing " + "localhost IP address to connect") return false @@ -324,7 +324,7 @@ func (cmgr *ConnectionManager) IsDuplicateInboundIPAddress(netAddr *wire.NetAddr for _, peer := range cmgr.inboundPeers { // If the peer's IP is equal to the passed IP then we have found a duplicate // inbound connection - if peer.netAddr.ToLegacy().IP.Equal(netAddr.ToLegacy().IP) { + if peer.netAddr.IP.Equal(netAddr.IP) { return true } } diff --git a/lib/constants.go b/lib/constants.go index 24352e558..e57016961 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1918,6 +1918,3 @@ const DefaultMainnetCheckpointProvider = "https://node.deso.org" const DefaultTestnetCheckpointProvider = "https://test.deso.org" const RoutePathGetCommittedTipBlockInfo = "/api/v0/get-committed-tip-block-info" - -// Constants that was removed from newer version of Btcec -const BtcecPubKeyBytesLenUncompressed = 65 diff --git a/lib/db_utils.go b/lib/db_utils.go index 4bad2b0dd..78f0ca79e 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -21,7 +21,7 @@ import ( "github.com/holiman/uint256" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" "github.com/dgraph-io/badger/v4" "github.com/golang/glog" @@ -9152,7 +9152,7 @@ func DBGetBalanceEntryForHODLerAndCreatorPKIDsWithTxn(txn *badger.Txn, snap *Sna return &BalanceEntry{ HODLerPKID: hodlerPKID.NewPKID(), CreatorPKID: creatorPKID.NewPKID(), - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } balanceEntryObj := &BalanceEntry{} @@ -9228,7 +9228,7 @@ func DBPutBalanceEntryMappingsWithTxn(txn *badger.Txn, snap *Snapshot, blockHeig // If the balance is zero, then there is no point in storing this entry. // We already placeholder a "zero" balance entry in connect logic. - if balanceEntry.BalanceNanos.Eq(uint256.NewInt(0)) && !balanceEntry.HasPurchased { + if balanceEntry.BalanceNanos.Eq(uint256.NewInt()) && !balanceEntry.HasPurchased { return nil } @@ -9310,7 +9310,7 @@ func DbGetHolderPKIDCreatorPKIDToBalanceEntryWithTxn(txn *badger.Txn, snap *Snap return &BalanceEntry{ HODLerPKID: holder.NewPKID(), CreatorPKID: creator.NewPKID(), - BalanceNanos: *uint256.NewInt(0), + BalanceNanos: *uint256.NewInt(), } } diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index 1dd32fb9a..712120468 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -250,15 +250,15 @@ func TestPrivateMessages(t *testing.T) { db, _ := GetTestBadgerDb() defer CleanUpBadger(db) - priv1, err := btcec.NewPrivateKey() + priv1, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) pk1 := priv1.PubKey().SerializeCompressed() - priv2, err := btcec.NewPrivateKey() + priv2, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) pk2 := priv2.PubKey().SerializeCompressed() - priv3, err := btcec.NewPrivateKey() + priv3, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) pk3 := priv3.PubKey().SerializeCompressed() @@ -514,15 +514,15 @@ func TestFollows(t *testing.T) { db, _ := GetTestBadgerDb() defer CleanUpBadger(db) - priv1, err := btcec.NewPrivateKey() + priv1, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) pk1 := priv1.PubKey().SerializeCompressed() - priv2, err := btcec.NewPrivateKey() + priv2, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) pk2 := priv2.PubKey().SerializeCompressed() - priv3, err := btcec.NewPrivateKey() + priv3, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) pk3 := priv3.PubKey().SerializeCompressed() diff --git a/lib/deso_math.go b/lib/deso_math.go index 7d593cdf6..7c1359578 100644 --- a/lib/deso_math.go +++ b/lib/deso_math.go @@ -5,7 +5,7 @@ import ( "math" "math/big" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/holiman/uint256" ) @@ -245,11 +245,11 @@ func SafeUint256() *_SafeUint256 { } func (safeUint256 *_SafeUint256) Add(x *uint256.Int, y *uint256.Int) (*uint256.Int, error) { - if uint256.NewInt(0).Sub(MaxUint256, y).Lt(x) { + if uint256.NewInt().Sub(MaxUint256, y).Lt(x) { return nil, fmt.Errorf("addition overflows uint256") } - return uint256.NewInt(0).Add(x, y), nil + return uint256.NewInt().Add(x, y), nil } func (safeUint256 *_SafeUint256) Sub(x *uint256.Int, y *uint256.Int) (*uint256.Int, error) { @@ -257,15 +257,15 @@ func (safeUint256 *_SafeUint256) Sub(x *uint256.Int, y *uint256.Int) (*uint256.I return nil, fmt.Errorf("subtraction underflows uint256") } - return uint256.NewInt(0).Sub(x, y), nil + return uint256.NewInt().Sub(x, y), nil } func (safeUint256 *_SafeUint256) Mul(x *uint256.Int, y *uint256.Int) (*uint256.Int, error) { - if uint256.NewInt(0).Div(MaxUint256, y).Lt(x) { + if uint256.NewInt().Div(MaxUint256, y).Lt(x) { return nil, fmt.Errorf("multiplication overflows uint256") } - return uint256.NewInt(0).Mul(x, y), nil + return uint256.NewInt().Mul(x, y), nil } func (safeUint256 *_SafeUint256) Div(x *uint256.Int, y *uint256.Int) (*uint256.Int, error) { @@ -273,7 +273,7 @@ func (safeUint256 *_SafeUint256) Div(x *uint256.Int, y *uint256.Int) (*uint256.I return nil, fmt.Errorf("division by zero") } - return uint256.NewInt(0).Div(x, y), nil + return uint256.NewInt().Div(x, y), nil } // SafeUint64 allows for arithmetic operations that error diff --git a/lib/deso_math_test.go b/lib/deso_math_test.go index 58766dda7..8ccc278f0 100644 --- a/lib/deso_math_test.go +++ b/lib/deso_math_test.go @@ -13,47 +13,47 @@ func TestSafeUint256(t *testing.T) { var err error // Test Add(): sad path - result, err = SafeUint256().Add(uint256.NewInt(1), MaxUint256) + result, err = SafeUint256().Add(uint256.NewInt().SetUint64(1), MaxUint256) require.Nil(result) require.Error(err) require.Contains(err.Error(), "addition overflows uint256") // Test Add(): happy path - result, err = SafeUint256().Add(uint256.NewInt(2), uint256.NewInt(2)) - require.Equal(result, uint256.NewInt(4)) + result, err = SafeUint256().Add(uint256.NewInt().SetUint64(2), uint256.NewInt().SetUint64(2)) + require.Equal(result, uint256.NewInt().SetUint64(4)) require.NoError(err) // Test Sub(): sad path - result, err = SafeUint256().Sub(uint256.NewInt(1), uint256.NewInt(2)) + result, err = SafeUint256().Sub(uint256.NewInt().SetUint64(1), uint256.NewInt().SetUint64(2)) require.Nil(result) require.Error(err) require.Contains(err.Error(), "subtraction underflows uint256") // Test Sub(): happy path - result, err = SafeUint256().Sub(uint256.NewInt(3), uint256.NewInt(2)) - require.Equal(result, uint256.NewInt(1)) + result, err = SafeUint256().Sub(uint256.NewInt().SetUint64(3), uint256.NewInt().SetUint64(2)) + require.Equal(result, uint256.NewInt().SetUint64(1)) require.NoError(err) // Test Mul(): sad path - result, err = SafeUint256().Mul(MaxUint256, uint256.NewInt(2)) + result, err = SafeUint256().Mul(MaxUint256, uint256.NewInt().SetUint64(2)) require.Nil(result) require.Error(err) require.Contains(err.Error(), "multiplication overflows uint256") // Test Mul(): happy path - result, err = SafeUint256().Mul(uint256.NewInt(3), uint256.NewInt(4)) - require.Equal(result, uint256.NewInt(12)) + result, err = SafeUint256().Mul(uint256.NewInt().SetUint64(3), uint256.NewInt().SetUint64(4)) + require.Equal(result, uint256.NewInt().SetUint64(12)) require.NoError(err) // Test Div(): sad path - result, err = SafeUint256().Div(uint256.NewInt(3), uint256.NewInt(0)) + result, err = SafeUint256().Div(uint256.NewInt().SetUint64(3), uint256.NewInt()) require.Nil(result) require.Error(err) require.Contains(err.Error(), "division by zero") // Test Div(): happy path - result, err = SafeUint256().Div(uint256.NewInt(9), uint256.NewInt(3)) - require.Equal(result, uint256.NewInt(3)) + result, err = SafeUint256().Div(uint256.NewInt().SetUint64(9), uint256.NewInt().SetUint64(3)) + require.Equal(result, uint256.NewInt().SetUint64(3)) require.NoError(err) } diff --git a/lib/errors.go b/lib/errors.go index 51aaf7641..83692f6c8 100644 --- a/lib/errors.go +++ b/lib/errors.go @@ -5,7 +5,7 @@ import ( "reflect" "strings" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" ) // RuleError is an error type that specifies an error occurred during @@ -620,7 +620,7 @@ func IsByteArrayValidPublicKey(bytes []byte) error { if len(bytes) != btcec.PubKeyBytesLenCompressed { return RuleErrorPubKeyLen } - _, err := btcec.ParsePubKey(bytes) + _, err := btcec.ParsePubKey(bytes, btcec.S256()) if err != nil { return RuleErrorParsePublicKey } diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 0ee18f4e5..7c9f466a2 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -16,12 +16,11 @@ import ( "sync/atomic" "time" - "github.com/btcsuite/btcd/btcutil" - "github.com/gernest/mention" - + "github.com/btcsuite/btcutil" "github.com/dgraph-io/badger/v4" + "github.com/gernest/mention" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" "github.com/pkg/errors" diff --git a/lib/load_test.go b/lib/load_test.go index 2092ad91c..158a0b8ab 100644 --- a/lib/load_test.go +++ b/lib/load_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -35,7 +35,7 @@ func TestComputeMaxTPS(t *testing.T) { for ii := 0; ii < numProfiles; ii++ { fmt.Println("Processing top txn: ", len(txns)) // Compute a private/public key pair - privKey, err := btcec.NewPrivateKey() + privKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) privKeys = append(privKeys, privKey) pubKeys = append(pubKeys, privKey.PubKey()) diff --git a/lib/miner.go b/lib/miner.go index 67ed8976a..5bc1e45d1 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -17,7 +17,7 @@ import ( "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/desohash" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" merkletree "github.com/deso-protocol/go-merkle-tree" "github.com/golang/glog" @@ -47,7 +47,7 @@ func NewDeSoMiner(_minerPublicKeys []string, _numThreads uint32, if err != nil { return nil, errors.Wrapf(err, "NewDeSoMiner: ") } - pkObj, err := btcec.ParsePubKey(pkBytes) + pkObj, err := btcec.ParsePubKey(pkBytes, btcec.S256()) if err != nil { return nil, errors.Wrapf(err, "NewDeSoMiner: ") } diff --git a/lib/network.go b/lib/network.go index 1c6e6bb27..8bb111568 100644 --- a/lib/network.go +++ b/lib/network.go @@ -8,8 +8,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/decred/dcrd/dcrec/secp256k1/v4" - ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "io" "math" "math/big" @@ -21,7 +19,9 @@ import ( "github.com/golang/glog" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/decred/dcrd/dcrec/secp256k1/v4" + + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" decredEC "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" @@ -2762,7 +2762,7 @@ func (msg *MsgDeSoHeader) String() string { type BlockProducerInfo struct { PublicKey []byte - Signature *ecdsa2.Signature + Signature *btcec.Signature } func (bpi *BlockProducerInfo) Serialize() []byte { @@ -2823,7 +2823,7 @@ func (bpi *BlockProducerInfo) Deserialize(data []byte) error { } ret.Signature = nil if sigLen > 0 { - sig, err := ecdsa2.ParseDERSignature(sigBytes) + sig, err := btcec.ParseDERSignature(sigBytes, btcec.S256()) if err != nil { return errors.Wrapf(err, "BlockProducerInfo.Deserialize: Error parsing signature bytes: ") } @@ -3326,7 +3326,7 @@ const ( // DeSoSignature is a wrapper around ECDSA signatures used primarily in the MsgDeSoTxn transaction type. type DeSoSignature struct { // Sign stores the main ECDSA signature. We use the btcec crypto package for most of the heavy-lifting. - Sign *ecdsa2.Signature + Sign *btcec.Signature // RecoveryId is the public key recovery id. The RecoveryId is taken from the DeSo-DER signature header magic byte and // must be in the [0, 3] range. @@ -3335,7 +3335,7 @@ type DeSoSignature struct { IsRecoverable bool } -func (desoSign *DeSoSignature) SetSignature(sign *ecdsa2.Signature) { +func (desoSign *DeSoSignature) SetSignature(sign *btcec.Signature) { desoSign.Sign = sign } @@ -3354,9 +3354,7 @@ func (desoSign *DeSoSignature) HasHighS() bool { } // We reject high-S signatures as they lead to inconsistent public key recovery // https://github.com/indutny/elliptic/blob/master/lib/elliptic/ec/index.js#L147 - s := desoSign.Sign.S() - sBytes := (&s).Bytes() - return big.NewInt(0).SetBytes(sBytes[:]).Cmp(big.NewInt(0).Rsh(secp256k1.Params().N, 1)) != -1 + return desoSign.Sign.S.Cmp(big.NewInt(0).Rsh(secp256k1.Params().N, 1)) != -1 } // ToBytes encodes the signature in accordance to the DeSo-DER ECDSA format. @@ -3401,7 +3399,7 @@ func (desoSign *DeSoSignature) FromBytes(signatureBytes []byte) error { signatureBytesCopy[0] = derSigMagicOffset } // Parse the signature assuming it's encoded in the standard DER format. - desoSign.Sign, err = ecdsa2.ParseDERSignature(signatureBytesCopy) + desoSign.Sign, err = btcec.ParseDERSignature(signatureBytesCopy, btcec.S256()) if err != nil { return errors.Wrapf(err, "Problem parsing signatureBytes") } @@ -3444,12 +3442,16 @@ func (desoSign *DeSoSignature) _btcecSerializeCompact() ([]byte, error) { // We will change from the btcec signature type to the dcrec signature type. To achieve this, we will create the // ecdsa (R, S) pair using the decred's package. // Reference: https://github.com/decred/dcrd/blob/1eff7/dcrec/secp256k1/modnscalar_test.go#L26 - r := desoSign.Sign.R() + rBytes := desoSign.Sign.R.Bytes() + r := &secp256k1.ModNScalar{} + r.SetByteSlice(rBytes) - s := desoSign.Sign.S() + sBytes := desoSign.Sign.S.Bytes() + s := &secp256k1.ModNScalar{} + s.SetByteSlice(sBytes) // To make sure the signature has been correctly parsed, we verify DER encoding of both signatures matches. - verifySignature := decredEC.NewSignature(&r, &s) + verifySignature := decredEC.NewSignature(r, s) if !bytes.Equal(verifySignature.Serialize(), desoSign.Sign.Serialize()) { return nil, fmt.Errorf("_btcecSerializeCompact: Problem sanity-checking signature") } @@ -3475,7 +3477,7 @@ func (desoSign *DeSoSignature) RecoverPublicKey(messageHash []byte) (*btcec.Publ } // Now recover the public key from the compact encoding. - recoveredPublicKey, _, err := ecdsa2.RecoverCompact(signatureBytes, messageHash) + recoveredPublicKey, _, err := btcec.RecoverCompact(btcec.S256(), signatureBytes, messageHash) if err != nil { return nil, errors.Wrapf(err, "RecoverPublicKey: Problem recovering public key from the signature bytes") } @@ -3490,12 +3492,18 @@ func (desoSign *DeSoSignature) RecoverPublicKey(messageHash []byte) (*btcec.Publ // the first byte. This makes it so that the first byte will be between [0x31, 0x34] inclusive, // instead of being 0x30, which is the standard DER signature magic number. func SignRecoverable(bb []byte, privateKey *btcec.PrivateKey) (*DeSoSignature, error) { - signature := ecdsa2.Sign(privateKey, bb) + signature, err := privateKey.Sign(bb) + if err != nil { + return nil, err + } // We use SignCompact from the btcec library to get the recoverID. This results in a non-standard // encoding that we need to manipulate in order to get the recoveryID back out. See comment on // _btcecSerializeCompact for more information. - signatureCompact := ecdsa2.SignCompact(privateKey, bb, true) + signatureCompact, err := btcec.SignCompact(btcec.S256(), privateKey, bb, true) + if err != nil { + return nil, err + } recoveryId := (signatureCompact[0] - btcecCompactSigMagicOffset) & ^byte(btcecCompactSigCompPubKey) return &DeSoSignature{ @@ -3505,72 +3513,6 @@ func SignRecoverable(bb []byte, privateKey *btcec.PrivateKey) (*DeSoSignature, e }, nil } -// We define this struct to support JSON encoding of DeSoSignature. We didn't need this -// initially, but then at some point we upgraded our signature from btcec.Signature to -// ecdsa2.Signature, and the latter keeps its R and S values *private* rather than public, which -// then breaks the ability to naively JSON-serialize the signature of a txn. To workaround this -// we define this legacy struct that makes R and S *public*, we copy the ecdsa2.Signature -// into it, and then JSON serialize it. To deserialize, we do the reverse: read the bytes into -// the legacy struct and then convert them into the ecdsa2.Signature. -type legacySigStruct struct { - R [32]byte - S [32]byte -} - -// See comment on legacySigStruct for why we need this -func (sig DeSoSignature) MarshalJSON() ([]byte, error) { - var legacySig *legacySigStruct - if sig.Sign != nil { - r := sig.Sign.R() - rr := &r - - s := sig.Sign.S() - ss := &s - - legacySig = &legacySigStruct{ - R: rr.Bytes(), - S: ss.Bytes(), - } - } - - return json.Marshal(struct { - Sign *legacySigStruct - RecoveryId byte - IsRecoverable bool - }{ - Sign: legacySig, - RecoveryId: sig.RecoveryId, - IsRecoverable: sig.IsRecoverable, - }) -} - -// See comment on legacySigStruct for why we need this -func (sig *DeSoSignature) UnmarshalJSON(data []byte) error { - aux := struct { - Sign *legacySigStruct - RecoveryId byte - IsRecoverable bool - }{} - - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - if aux.Sign != nil { - r := secp256k1.ModNScalar{} - rr := aux.Sign.R - r.SetBytes(&rr) - s := secp256k1.ModNScalar{} - ss := aux.Sign.S - s.SetBytes(&ss) - - sig.Sign = ecdsa2.NewSignature(&r, &s) - } - sig.RecoveryId = aux.RecoveryId - sig.IsRecoverable = aux.IsRecoverable - - return nil -} - // DeSoNonce is a nonce that can be used to prevent replay attacks. It is used in the DeSo protocol // to prevent replay attacks when a user is trying to create a transaction. The nonce comprises // two uint64s: the expiration block height and the partial ID. The expiration block height is the @@ -4064,7 +4006,7 @@ func (msg *MsgDeSoTxn) Copy() (*MsgDeSoTxn, error) { return newTxn, nil } -func (msg *MsgDeSoTxn) Sign(privKey *btcec.PrivateKey) (*ecdsa2.Signature, error) { +func (msg *MsgDeSoTxn) Sign(privKey *btcec.PrivateKey) (*btcec.Signature, error) { // Serialize the transaction without the signature portion. txnBytes, err := msg.ToBytes(true /*preSignature*/) if err != nil { @@ -4073,7 +4015,10 @@ func (msg *MsgDeSoTxn) Sign(privKey *btcec.PrivateKey) (*ecdsa2.Signature, error // Compute a hash of the transaction bytes without the signature // portion and sign it with the passed private key. txnSignatureHash := Sha256DoubleHash(txnBytes) - txnSignature := ecdsa2.Sign(privKey, txnSignatureHash[:]) + txnSignature, err := privKey.Sign(txnSignatureHash[:]) + if err != nil { + return nil, err + } return txnSignature, nil } @@ -8124,7 +8069,7 @@ func (txnData *DAOCoinMetadata) FromBytes(data []byte) error { if err != nil { return fmt.Errorf("DAOCoinMetadata.FromBytes: Error reading coinsToMintBytes: %v", err) } - ret.CoinsToMintNanos = *uint256.NewInt(0).SetBytes(coinsToMintBytes) + ret.CoinsToMintNanos = *uint256.NewInt().SetBytes(coinsToMintBytes) } { @@ -8145,7 +8090,7 @@ func (txnData *DAOCoinMetadata) FromBytes(data []byte) error { if err != nil { return fmt.Errorf("DAOCoinMetadata.FromBytes: Error reading coinsToBurnBytes: %v", err) } - ret.CoinsToBurnNanos = *uint256.NewInt(0).SetBytes(coinsToBurnBytes) + ret.CoinsToBurnNanos = *uint256.NewInt().SetBytes(coinsToBurnBytes) } transferRestrictionStatus, err := rr.ReadByte() @@ -8234,7 +8179,7 @@ func (txnData *DAOCoinTransferMetadata) FromBytes(data []byte) error { if err != nil { return fmt.Errorf("DAOCoinTransferMetadata.FromBytes: Error reading coinsToTransferBytes: %v", err) } - ret.DAOCoinToTransferNanos = *uint256.NewInt(0).SetBytes(coinsToTransferBytes) + ret.DAOCoinToTransferNanos = *uint256.NewInt().SetBytes(coinsToTransferBytes) } // ReceiverPublicKey diff --git a/lib/network_connection.go b/lib/network_connection.go index 0c7b1193f..4d50d22a8 100644 --- a/lib/network_connection.go +++ b/lib/network_connection.go @@ -16,7 +16,7 @@ type outboundConnection struct { terminated bool attemptId uint64 - address *wire.NetAddressV2 + address *wire.NetAddress connection net.Conn isPersistent bool failed bool @@ -76,7 +76,7 @@ type OutboundConnectionAttempt struct { attemptId uint64 // netAddr is the address of the peer we are attempting to connect to. - netAddr *wire.NetAddressV2 + netAddr *wire.NetAddress // isPersistent is used to indicate whether we should retry connecting to the peer if the connection attempt fails. // If isPersistent is true, we will retry connecting to the peer until we are successful. Each time such connection // fails, we will sleep according to exponential backoff. Otherwise, we will only attempt to connect to the peer once. @@ -104,7 +104,7 @@ const ( outboundConnectionAttemptTerminated outboundConnectionAttemptStatus = 2 ) -func NewOutboundConnectionAttempt(attemptId uint64, netAddr *wire.NetAddressV2, isPersistent bool, +func NewOutboundConnectionAttempt(attemptId uint64, netAddr *wire.NetAddress, isPersistent bool, dialTimeout time.Duration, connectionChan chan *outboundConnection) *OutboundConnectionAttempt { return &OutboundConnectionAttempt{ @@ -202,11 +202,11 @@ func (oca *OutboundConnectionAttempt) SetTimeoutUnit(timeoutUnit time.Duration) // Otherwise, it will return nil. func (oca *OutboundConnectionAttempt) attemptOutboundConnection() net.Conn { // If the peer is not persistent, update the addrmgr. - glog.V(1).Infof("Attempting to connect to addr: %v:%v", oca.netAddr.Addr.String(), oca.netAddr.Port) + glog.V(1).Infof("Attempting to connect to addr: %v:%v", oca.netAddr.IP.String(), oca.netAddr.Port) var err error tcpAddr := net.TCPAddr{ - IP: oca.netAddr.ToLegacy().IP, + IP: oca.netAddr.IP, Port: int(oca.netAddr.Port), } conn, err := net.DialTimeout(tcpAddr.Network(), tcpAddr.String(), oca.dialTimeout) diff --git a/lib/network_connection_test.go b/lib/network_connection_test.go index 224dd5de6..6b630eb02 100644 --- a/lib/network_connection_test.go +++ b/lib/network_connection_test.go @@ -14,7 +14,7 @@ import ( type simpleListener struct { t *testing.T ll net.Listener - addr *wire.NetAddressV2 + addr *wire.NetAddress closed bool connectionChan chan Connection @@ -102,9 +102,9 @@ func verifyOutboundConnection(t *testing.T, conn *outboundConnection, sl *simple return } - require.Equal(conn.address.ToLegacy().IP.String(), sl.getTCPAddr().IP.String()) + require.Equal(conn.address.IP.String(), sl.getTCPAddr().IP.String()) require.Equal(conn.address.Port, uint16(sl.getTCPAddr().Port)) - require.Equal(conn.address.ToLegacy().IP.String(), sl.getTCPAddr().IP.String()) + require.Equal(conn.address.IP.String(), sl.getTCPAddr().IP.String()) require.Equal(conn.address.Port, uint16(sl.getTCPAddr().Port)) } diff --git a/lib/network_manager.go b/lib/network_manager.go index 7f6a52adb..750f85390 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -413,7 +413,7 @@ func (nm *NetworkManager) processOutboundConnection(conn Connection) (*RemoteNod if oc.failed { return nil, fmt.Errorf("NetworkManager.handleOutboundConnection: Failed to connect to peer (%s:%v)", - oc.address.Addr.String(), oc.address.Port) + oc.address.IP.String(), oc.address.Port) } if !oc.isPersistent { @@ -820,7 +820,7 @@ func (nm *NetworkManager) connectNonValidators() { } // getRandomUnconnectedAddress returns a random address from the address manager that we are not already connected to. -func (nm *NetworkManager) getRandomUnconnectedAddress() *wire.NetAddressV2 { +func (nm *NetworkManager) getRandomUnconnectedAddress() *wire.NetAddress { for tries := 0; tries < 100; tries++ { addr := nm.AddrMgr.GetAddress() if addr == nil { @@ -868,7 +868,7 @@ func (nm *NetworkManager) CreateValidatorConnection(ipStr string, publicKey *bls remoteNode := nm.newRemoteNode(publicKey, true, false) if err := remoteNode.DialOutboundConnection(netAddr); err != nil { return errors.Wrapf(err, "NetworkManager.CreateValidatorConnection: Problem calling DialPersistentOutboundConnection "+ - "for addr: (%s:%v)", netAddr.Addr.String(), netAddr.Port) + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) } nm.setRemoteNode(remoteNode) // Since we're initiating this connection, add the RemoteNode to the outbound validator index. @@ -888,7 +888,7 @@ func (nm *NetworkManager) CreateNonValidatorPersistentOutboundConnection(ipStr s remoteNode := nm.newRemoteNode(nil, true, true) if err := remoteNode.DialPersistentOutboundConnection(netAddr); err != nil { return 0, errors.Wrapf(err, "NetworkManager.CreateNonValidatorPersistentOutboundConnection: Problem calling DialPersistentOutboundConnection "+ - "for addr: (%s:%v)", netAddr.Addr.String(), netAddr.Port) + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) } nm.setRemoteNode(remoteNode) nm.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) @@ -903,7 +903,7 @@ func (nm *NetworkManager) CreateNonValidatorOutboundConnection(ipStr string) err return nm.createNonValidatorOutboundConnection(netAddr) } -func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.NetAddressV2) error { +func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.NetAddress) error { if netAddr == nil { return fmt.Errorf("NetworkManager.CreateNonValidatorOutboundConnection: netAddr is nil") } @@ -911,14 +911,14 @@ func (nm *NetworkManager) createNonValidatorOutboundConnection(netAddr *wire.Net remoteNode := nm.newRemoteNode(nil, true, false) if err := remoteNode.DialOutboundConnection(netAddr); err != nil { return errors.Wrapf(err, "NetworkManager.CreateNonValidatorOutboundConnection: Problem calling DialOutboundConnection "+ - "for addr: (%s:%v)", netAddr.Addr.String(), netAddr.Port) + "for addr: (%s:%v)", netAddr.IP.String(), netAddr.Port) } nm.setRemoteNode(remoteNode) nm.GetNonValidatorOutboundIndex().Set(remoteNode.GetId(), remoteNode) return nil } -func (nm *NetworkManager) AttachInboundConnection(conn net.Conn, na *wire.NetAddressV2) (*RemoteNode, error) { +func (nm *NetworkManager) AttachInboundConnection(conn net.Conn, na *wire.NetAddress) (*RemoteNode, error) { remoteNode := nm.newRemoteNode(nil, false, false) if err := remoteNode.AttachInboundConnection(conn, na); err != nil { @@ -932,7 +932,7 @@ func (nm *NetworkManager) AttachInboundConnection(conn net.Conn, na *wire.NetAdd } func (nm *NetworkManager) AttachOutboundConnection( - conn net.Conn, na *wire.NetAddressV2, remoteNodeId uint64, isPersistent bool, + conn net.Conn, na *wire.NetAddress, remoteNodeId uint64, isPersistent bool, ) (*RemoteNode, error) { id := NewRemoteNodeId(remoteNodeId) @@ -1330,7 +1330,7 @@ func (nm *NetworkManager) handleHandshakeCompletePoSMessage(remoteNode *RemoteNo // ## Helper Functions // ########################### -func (nm *NetworkManager) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAddressV2, error) { +func (nm *NetworkManager) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAddress, error) { netAddr, err := IPToNetAddr(ipStr, nm.AddrMgr, nm.params) if err != nil { return nil, errors.Wrapf(err, @@ -1344,7 +1344,7 @@ func (nm *NetworkManager) ConvertIPStringToNetAddress(ipStr string) (*wire.NetAd return netAddr, nil } -func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) (*wire.NetAddressV2, error) { +func IPToNetAddr(ipStr string, addrMgr *addrmgr.AddrManager, params *DeSoParams) (*wire.NetAddress, error) { port := params.DefaultSocketPort host, portstr, err := net.SplitHostPort(ipStr) if err != nil { diff --git a/lib/network_test.go b/lib/network_test.go index af1ca2e92..b88ec0e18 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -3,8 +3,6 @@ package lib import ( "bytes" "encoding/hex" - "encoding/json" - ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" "math/big" "math/rand" "reflect" @@ -18,7 +16,7 @@ import ( "github.com/holiman/uint256" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" "github.com/bxcodec/faker" @@ -436,9 +434,10 @@ func createTestBlockVersion1(t *testing.T) *MsgDeSoBlock { newBlockV1 := *expectedBlock // Add a signature to the block V1 - priv, err := btcec.NewPrivateKey() + priv, err := btcec.NewPrivateKey(btcec.S256()) + require.NoError(err) + newBlockV1.BlockProducerInfo.Signature, err = priv.Sign([]byte{0x01, 0x02, 0x03}) require.NoError(err) - newBlockV1.BlockProducerInfo.Signature = ecdsa2.Sign(priv, []byte{0x01, 0x02, 0x03}) return &newBlockV1 } @@ -1317,7 +1316,7 @@ func TestDAOCoin(t *testing.T) { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, 0x02} txMeta.OperationType = DAOCoinOperationTypeMint - txMeta.CoinsToMintNanos = *uint256.NewInt(100) + txMeta.CoinsToMintNanos = *uint256.NewInt().SetUint64(100) data, err := txMeta.ToBytes(false) require.NoError(err) @@ -1337,7 +1336,7 @@ func TestDAOCoin(t *testing.T) { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, 0x02} txMeta.OperationType = DAOCoinOperationTypeBurn - txMeta.CoinsToBurnNanos = *uint256.NewInt(100) + txMeta.CoinsToBurnNanos = *uint256.NewInt().SetUint64(100) data, err := txMeta.ToBytes(false) require.NoError(err) @@ -1406,7 +1405,7 @@ func TestDAOCoinTransfer(t *testing.T) { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, 0x02} - txMeta.DAOCoinToTransferNanos = *uint256.NewInt(100) + txMeta.DAOCoinToTransferNanos = *uint256.NewInt().SetUint64(100) data, err := txMeta.ToBytes(false) require.NoError(err) @@ -1424,9 +1423,10 @@ func TestMessagingKey(t *testing.T) { m0PrivBytes, _, err := Base58CheckDecode(m0Priv) require.NoError(err) - privKey, pubKey := btcec.PrivKeyFromBytes(m0PrivBytes) + privKey, pubKey := btcec.PrivKeyFromBytes(btcec.S256(), m0PrivBytes) hash := Sha256DoubleHash([]byte{0x00, 0x01}) - signature := ecdsa2.Sign(privKey, hash[:]) + signature, err := privKey.Sign(hash[:]) + require.NoError(err) encrypted, err := EncryptBytesWithPublicKey(hash[:], pubKey.ToECDSA()) require.NoError(err) @@ -1771,7 +1771,7 @@ func TestDeSoSignature_SerializeCompact(t *testing.T) { for ; numTestCases > 0; numTestCases-- { // Generate a random (private, public) keypair. - privateKey, err := btcec.NewPrivateKey() + privateKey, err := btcec.NewPrivateKey(btcec.S256()) require.NoError(err) publicKeyBytes := privateKey.PubKey().SerializeCompressed() @@ -1783,7 +1783,8 @@ func TestDeSoSignature_SerializeCompact(t *testing.T) { require.NoError(err) // Verify that the compact signature is equal to what we serialized. - signatureCompact := ecdsa2.SignCompact(privateKey, messageHash, true) + signatureCompact, err := btcec.SignCompact(btcec.S256(), privateKey, messageHash, true) + require.NoError(err) // Use the DeSoSignature.SerializeCompact encoding. signatureCompactCustom, err := desoSignature._btcecSerializeCompact() @@ -1792,7 +1793,7 @@ func TestDeSoSignature_SerializeCompact(t *testing.T) { require.Equal(true, reflect.DeepEqual(signatureCompact, signatureCompactCustom)) // Recover the public key from our custom encoding. - recoveredPublicKey, _, err := ecdsa2.RecoverCompact(signatureCompactCustom, messageHash) + recoveredPublicKey, _, err := btcec.RecoverCompact(btcec.S256(), signatureCompactCustom, messageHash) require.NoError(err) // Verify that the recovered public key matches the original public key. @@ -1801,44 +1802,3 @@ func TestDeSoSignature_SerializeCompact(t *testing.T) { } } } - -func TestTxnJsonEncodeDecode(t *testing.T) { - priv, err := btcec.NewPrivateKey() - require.NoError(t, err) - - // We want to test with v0 and v1 txns. - for _, includeV1Txns := range []bool{true, false} { - // The case without a signature can fail with a nil pointer so we - // want to make sure we test it explicitly. - for _, withSignature := range []bool{true, false} { - for _, txn := range expectedTransactions(includeV1Txns) { - // Copy the txn - txnCopy, err := txn.Copy() - require.NoError(t, err) - - // Sign the txn - txBytes, err := txn.ToBytes(true /*preSignature*/) - require.NoError(t, err) - txHash := Sha256DoubleHash(txBytes)[:] - desoSignature, err := SignRecoverable(txHash, priv) - require.NoError(t, err) - if withSignature { - txnCopy.Signature = *desoSignature - } - - // Serialize the txn using json encoding - txnJson, err := json.Marshal(txnCopy) - require.NoError(t, err) - - // Deserialize the txn using json decoding - txn2 := MsgDeSoTxn{} - err = json.Unmarshal(txnJson, &txn2) - require.NoError(t, err) - - // Make sure it's the same - require.Equal(t, txnCopy, &txn2) - } - } - - } -} diff --git a/lib/peer.go b/lib/peer.go index 86082a3aa..a8c865ed3 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -75,7 +75,7 @@ type Peer struct { serviceFlags ServiceFlag latestHeight uint64 addrStr string - netAddr *wire.NetAddressV2 + netAddr *wire.NetAddress minTxFeeRateNanosPerKB uint64 // Messages for which we are expecting a reply within a fixed // amount of time. This list is always sorted by ExpectedTime, @@ -627,7 +627,7 @@ func (pp *Peer) StartDeSoMessageProcessor() { } // NewPeer creates a new Peer object. -func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddressV2, +func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress, _isPersistent bool, _stallTimeoutSeconds uint64, _minFeeRateNanosPerKB uint64, params *DeSoParams, @@ -801,12 +801,12 @@ func (pp *Peer) Address() string { return pp.addrStr } -func (pp *Peer) NetAddress() *wire.NetAddressV2 { +func (pp *Peer) NetAddress() *wire.NetAddress { return pp.netAddr } func (pp *Peer) IP() string { - return pp.netAddr.ToLegacy().IP.String() + return pp.netAddr.IP.String() } func (pp *Peer) Port() uint16 { diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 705899a5b..d0c8bad42 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -4,7 +4,7 @@ import ( "math" "time" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" @@ -347,7 +347,7 @@ func (pbp *PosBlockProducer) getBlockTransactions( return nil, 0, errors.New("Error casting txn meta to AtomicSwapMetadata") } - blockProducerPublicKeyBtcec, err := btcec.ParsePubKey(blockProducerPublicKey.ToBytes()) + blockProducerPublicKeyBtcec, err := btcec.ParsePubKey(blockProducerPublicKey.ToBytes(), btcec.S256()) if err != nil { return nil, 0, errors.Wrapf(err, "Error parsing block producer public key: ") diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 881930100..5746ac875 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -911,21 +911,21 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { validator1Entry := &ValidatorEntry{ ValidatorPKID: m1PKID, VotingPublicKey: m1VotingPrivateKey.PublicKey(), - TotalStakeAmountNanos: uint256.NewInt(3), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(3), } m2PKID := DBGetPKIDEntryForPublicKey(bc.db, nil, m2PkBytes).PKID m2VotingPrivateKey := _generateRandomBLSPrivateKey(t) validator2Entry := &ValidatorEntry{ ValidatorPKID: m2PKID, VotingPublicKey: m2VotingPrivateKey.PublicKey(), - TotalStakeAmountNanos: uint256.NewInt(2), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(2), } m3PKID := DBGetPKIDEntryForPublicKey(bc.db, nil, m3PkBytes).PKID m3VotingPrivateKey := _generateRandomBLSPrivateKey(t) validator3Entry := &ValidatorEntry{ ValidatorPKID: m3PKID, VotingPublicKey: m3VotingPrivateKey.PublicKey(), - TotalStakeAmountNanos: uint256.NewInt(1), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(1), } validatorSet := []*ValidatorEntry{validator1Entry, validator2Entry, validator3Entry} @@ -970,7 +970,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { // Malformed validators should fail { // Zero stake amount - validatorSet[0].TotalStakeAmountNanos = uint256.NewInt(0) + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(0) err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidVoteQC) @@ -982,7 +982,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { require.Equal(t, err, RuleErrorInvalidVoteQC) // Reset stake amount - validatorSet[0].TotalStakeAmountNanos = uint256.NewInt(3) + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(3) // Nil voting public key validatorSet[0].VotingPublicKey = nil err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) @@ -1175,7 +1175,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { { // Invalid validator set tests // Zero stake amount - validatorSet[0].TotalStakeAmountNanos = uint256.NewInt(0) + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(0) err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) require.Error(t, err) require.Equal(t, err, RuleErrorInvalidTimeoutQC) @@ -1187,7 +1187,7 @@ func TestIsValidPoSQuorumCertificate(t *testing.T) { require.Equal(t, err, RuleErrorInvalidTimeoutQC) // Reset stake amount - validatorSet[0].TotalStakeAmountNanos = uint256.NewInt(3) + validatorSet[0].TotalStakeAmountNanos = uint256.NewInt().SetUint64(3) // Nil voting public key validatorSet[0].VotingPublicKey = nil err = bc.isValidPoSQuorumCertificate(desoBlock, validatorSet) diff --git a/lib/pos_constants.go b/lib/pos_constants.go index 19cf429f4..da61d3cb7 100644 --- a/lib/pos_constants.go +++ b/lib/pos_constants.go @@ -39,7 +39,7 @@ func BuildProofOfStakeCutoverValidator() (consensus.Validator, error) { } validatorEntry := &ValidatorEntry{ VotingPublicKey: blsPrivateKey.PublicKey(), - TotalStakeAmountNanos: uint256.NewInt(1e9), + TotalStakeAmountNanos: uint256.NewInt().SetUint64(1e9), } return validatorEntry, nil } diff --git a/lib/pos_epoch_complete_hook_test.go b/lib/pos_epoch_complete_hook_test.go index bd19427bd..d04f0ce36 100644 --- a/lib/pos_epoch_complete_hook_test.go +++ b/lib/pos_epoch_complete_hook_test.go @@ -286,8 +286,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Len(t, validatorEntries, 7) require.Equal(t, validatorEntries[0].ValidatorPKID, m6PKID) require.Equal(t, validatorEntries[6].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(700)) - require.Equal(t, validatorEntries[6].TotalStakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(700)) + require.Equal(t, validatorEntries[6].TotalStakeAmountNanos, uint256.NewInt().SetUint64(100)) // Test SnapshotLeaderSchedule is populated. for index := range validatorPKIDs { @@ -303,8 +303,8 @@ func TestRunEpochCompleteHook(t *testing.T) { require.Len(t, stakeEntries, 7) require.Equal(t, stakeEntries[0].StakerPKID, m6PKID) require.Equal(t, stakeEntries[6].StakerPKID, m0PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt(700)) - require.Equal(t, stakeEntries[6].StakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(700)) + require.Equal(t, stakeEntries[6].StakeAmountNanos, uint256.NewInt().SetUint64(100)) } { // Test snapshotting changing stake. @@ -339,7 +339,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _sortStakeEntriesByStakeAmount(stakeEntries) require.Len(t, stakeEntries, 7) require.Equal(t, stakeEntries[1].StakerPKID, m5PKID) - require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt(600)) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(600)) // Run OnEpochCompleteHook(). incrViewNumber() @@ -356,7 +356,7 @@ func TestRunEpochCompleteHook(t *testing.T) { _sortStakeEntriesByStakeAmount(stakeEntries) require.Len(t, stakeEntries, 7) require.Equal(t, stakeEntries[0].StakerPKID, m5PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt(800)) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(800)) } { // Test snapshotting changing GlobalParams. @@ -607,11 +607,11 @@ func TestStakingRewardDistribution(t *testing.T) { // Validator m0 has 500 nanos staked in total: 400 staked by itself and 100 delegated by m2. require.Equal(t, validatorEntries[0].ValidatorPKID, m0PKID) - require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt(500)) + require.Equal(t, validatorEntries[0].TotalStakeAmountNanos, uint256.NewInt().SetUint64(500)) // Validator m1 has 250 nanos staked in total: 200 staked by itself and 50 delegated by m3. require.Equal(t, validatorEntries[1].ValidatorPKID, m1PKID) - require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt(250)) + require.Equal(t, validatorEntries[1].TotalStakeAmountNanos, uint256.NewInt().SetUint64(250)) } { @@ -621,13 +621,13 @@ func TestStakingRewardDistribution(t *testing.T) { require.Len(t, stakeEntries, 4) require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt(400)) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) require.Equal(t, stakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) require.Equal(t, stakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) require.Equal(t, stakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) + require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) } { @@ -648,13 +648,13 @@ func TestStakingRewardDistribution(t *testing.T) { require.NoError(t, err) require.Len(t, stakeEntries, 4) require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt(400)) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) require.Equal(t, stakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) require.Equal(t, stakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) require.Equal(t, stakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) + require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) // Test that DESO wallet balances are unchanged. m0Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m0PkBytes) @@ -676,13 +676,13 @@ func TestStakingRewardDistribution(t *testing.T) { _sortStakeEntriesByStakeAmount(snapshotStakeEntries) require.Len(t, snapshotStakeEntries, 4) require.Equal(t, snapshotStakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt(400)) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) require.Equal(t, snapshotStakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) + require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) require.Equal(t, snapshotStakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) require.Equal(t, snapshotStakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) + require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) } { @@ -714,7 +714,7 @@ func TestStakingRewardDistribution(t *testing.T) { // Final stake amount: // - m0's final stake is: 400 + 42 + 2 = 444 nanos require.Equal(t, stakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt(444)) + require.Equal(t, stakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(444)) // Test that m0's DESO wallet balance is unchanged. m0Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m0PkBytes) @@ -740,7 +740,7 @@ func TestStakingRewardDistribution(t *testing.T) { // Test that m1's stake is unchanged. require.Equal(t, stakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) + require.Equal(t, stakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) // Test reward computation and restaking for m2: // - m2's original stake was 100 nanos @@ -754,7 +754,7 @@ func TestStakingRewardDistribution(t *testing.T) { // Final stake amount: // - m2's final stake is: 100 + 10 - 2 = 108 nanos require.Equal(t, stakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt(108)) + require.Equal(t, stakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(108)) // Test that m2's DESO wallet balance is unchanged. m2Balance, err := _newUtxoView(testMeta).GetDeSoBalanceNanosForPublicKey(m2PkBytes) @@ -779,7 +779,7 @@ func TestStakingRewardDistribution(t *testing.T) { // Test that m3's stake is unchanged. require.Equal(t, stakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) + require.Equal(t, stakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) // Make sure the validator's total stake amount is correct. // M0's total stake amount nanos should be the sum of M0 and M2's stakes (444 + 108) @@ -800,13 +800,13 @@ func TestStakingRewardDistribution(t *testing.T) { _sortStakeEntriesByStakeAmount(snapshotStakeEntries) require.Len(t, snapshotStakeEntries, 4) require.Equal(t, snapshotStakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt(400)) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(400)) require.Equal(t, snapshotStakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) + require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) require.Equal(t, snapshotStakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt(100)) + require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(100)) require.Equal(t, snapshotStakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) + require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) } { @@ -824,13 +824,13 @@ func TestStakingRewardDistribution(t *testing.T) { _sortStakeEntriesByStakeAmount(snapshotStakeEntries) require.Len(t, snapshotStakeEntries, 4) require.Equal(t, snapshotStakeEntries[0].StakerPKID, m0PKID) - require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt(444)) + require.Equal(t, snapshotStakeEntries[0].StakeAmountNanos, uint256.NewInt().SetUint64(444)) require.Equal(t, snapshotStakeEntries[1].StakerPKID, m1PKID) - require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt(200)) + require.Equal(t, snapshotStakeEntries[1].StakeAmountNanos, uint256.NewInt().SetUint64(200)) require.Equal(t, snapshotStakeEntries[2].StakerPKID, m2PKID) - require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt(108)) + require.Equal(t, snapshotStakeEntries[2].StakeAmountNanos, uint256.NewInt().SetUint64(108)) require.Equal(t, snapshotStakeEntries[3].StakerPKID, m3PKID) - require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt(50)) + require.Equal(t, snapshotStakeEntries[3].StakeAmountNanos, uint256.NewInt().SetUint64(50)) } } @@ -916,7 +916,7 @@ func _stakeToValidator( stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(validatorPkBytes), RewardMethod: rewardMethod, - StakeAmountNanos: uint256.NewInt(stakeAmountNanos), + StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), } _, err = _submitStakeTxn(testMeta, stakerPubKey, stakerPrivKey, stakeMetadata, nil, true) require.NoError(testMeta.t, err) diff --git a/lib/pos_leader_schedule.go b/lib/pos_leader_schedule.go index 246b206be..618e54bf5 100644 --- a/lib/pos_leader_schedule.go +++ b/lib/pos_leader_schedule.go @@ -30,7 +30,7 @@ func (bav *UtxoView) GenerateLeaderSchedule(validatorSet []*ValidatorEntry) ([]* } // Sum TotalStakeAmountNanos. - totalStakeAmountNanos := uint256.NewInt(0) + totalStakeAmountNanos := uint256.NewInt() for _, validatorEntry := range validatorEntries { totalStakeAmountNanos, err = SafeUint256().Add(totalStakeAmountNanos, validatorEntry.TotalStakeAmountNanos) if err != nil { @@ -66,10 +66,10 @@ func (bav *UtxoView) GenerateLeaderSchedule(validatorSet []*ValidatorEntry) ([]* } // Take RandomUint256 % TotalStakeAmountNanos. - randomUint256 := uint256.NewInt(0).Mod(currentRandomSeedHash.ToUint256(), totalStakeAmountNanos) + randomUint256 := uint256.NewInt().Mod(currentRandomSeedHash.ToUint256(), totalStakeAmountNanos) // Keep track of the stake seen so far in this loop. - sumStakeAmountNanos := uint256.NewInt(0) + sumStakeAmountNanos := uint256.NewInt() for _, validatorEntry := range validatorEntries { // Skip if ValidatorEntry has already been added to the leader schedule. diff --git a/lib/pos_leader_schedule_test.go b/lib/pos_leader_schedule_test.go index 95684a4a6..d0b12ab09 100644 --- a/lib/pos_leader_schedule_test.go +++ b/lib/pos_leader_schedule_test.go @@ -85,7 +85,7 @@ func TestGenerateLeaderSchedule(t *testing.T) { } stakeMetadata := &StakeMetadata{ ValidatorPublicKey: NewPublicKey(pkBytes), - StakeAmountNanos: uint256.NewInt(stakeAmountNanos), + StakeAmountNanos: uint256.NewInt().SetUint64(stakeAmountNanos), } _, err = _submitStakeTxn(testMeta, publicKey, privateKey, stakeMetadata, nil, true) require.NoError(t, err) diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index fe318ee69..b8430e29e 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -19,7 +19,7 @@ import ( type RandomSeedHash [32]byte func (randomSeedHash *RandomSeedHash) ToUint256() *uint256.Int { - return uint256.NewInt(0).SetBytes(randomSeedHash.ToBytes()) + return uint256.NewInt().SetBytes(randomSeedHash.ToBytes()) } func (randomSeedHash *RandomSeedHash) Copy() *RandomSeedHash { diff --git a/lib/pos_random_seed_test.go b/lib/pos_random_seed_test.go index 3dcd8aa5e..768b1462b 100644 --- a/lib/pos_random_seed_test.go +++ b/lib/pos_random_seed_test.go @@ -82,10 +82,10 @@ func TestCurrentRandomSeedHash(t *testing.T) { // Test RandomSeedHash.ToUint256(). Generates a valid uint256. // Idempotent: generates the same uint256 each time. - require.True(t, randomSeedHash1.ToUint256().Cmp(uint256.NewInt(0)) > 0) + require.True(t, randomSeedHash1.ToUint256().Cmp(uint256.NewInt()) > 0) require.True(t, randomSeedHash1.ToUint256().Cmp(MaxUint256) < 0) require.True(t, randomSeedHash1.ToUint256().Eq(randomSeedHash1.ToUint256())) - require.True(t, randomSeedHash2.ToUint256().Cmp(uint256.NewInt(0)) > 0) + require.True(t, randomSeedHash2.ToUint256().Cmp(uint256.NewInt()) > 0) require.True(t, randomSeedHash2.ToUint256().Cmp(MaxUint256) < 0) require.True(t, randomSeedHash2.ToUint256().Eq(randomSeedHash2.ToUint256())) require.False(t, randomSeedHash1.ToUint256().Eq(randomSeedHash2.ToUint256())) diff --git a/lib/pos_server_regtest.go b/lib/pos_server_regtest.go index 93be1d628..2e561d426 100644 --- a/lib/pos_server_regtest.go +++ b/lib/pos_server_regtest.go @@ -68,7 +68,7 @@ func (srv *Server) submitRegtestValidatorRegistrationTxns(block *MsgDeSoBlock) { stakeTxnMeta := StakeMetadata{ ValidatorPublicKey: NewPublicKey(transactorPubKey), RewardMethod: StakingRewardMethodPayToBalance, - StakeAmountNanos: uint256.NewInt(10 * 1e6), + StakeAmountNanos: uint256.NewInt().SetUint64(10 * 1e6), } stakeTxn, _, _, _, err := srv.blockProducer.chain.CreateStakeTxn( diff --git a/lib/pos_staking_rewards.go b/lib/pos_staking_rewards.go index 6672f69e6..775e2d213 100644 --- a/lib/pos_staking_rewards.go +++ b/lib/pos_staking_rewards.go @@ -225,9 +225,9 @@ func (bav *UtxoView) distributeStakingReward( IsValidatorCommission: isValidatorCommission, }, } - stakeEntry.StakeAmountNanos = uint256.NewInt(0).Add(stakeEntry.StakeAmountNanos, uint256.NewInt(rewardNanos)) + stakeEntry.StakeAmountNanos = uint256.NewInt().Add(stakeEntry.StakeAmountNanos, uint256.NewInt().SetUint64(rewardNanos)) bav._setStakeEntryMappings(stakeEntry) - validatorEntry.TotalStakeAmountNanos = uint256.NewInt(0).Add(validatorEntry.TotalStakeAmountNanos, uint256.NewInt(rewardNanos)) + validatorEntry.TotalStakeAmountNanos = uint256.NewInt().Add(validatorEntry.TotalStakeAmountNanos, uint256.NewInt().SetUint64(rewardNanos)) bav._setValidatorEntryMappings(validatorEntry) return utxoOperation, nil } diff --git a/lib/pos_transaction_register_test.go b/lib/pos_transaction_register_test.go index f05f9a7fd..cad0a44d0 100644 --- a/lib/pos_transaction_register_test.go +++ b/lib/pos_transaction_register_test.go @@ -8,9 +8,7 @@ import ( "testing" "time" - ecdsa2 "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" - - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/stretchr/testify/require" ) @@ -388,9 +386,9 @@ func _testGetDefaultGlobalParams() *GlobalParamsEntry { func _testGetRandomMempoolTxns(rand *rand.Rand, feeMin uint64, feeMax uint64, sizeMax uint64, timestampRange uint64, numTxns int) []*MempoolTx { txnPool := []*MempoolTx{} for ii := 0; ii < numTxns; ii++ { - randPriv, _ := btcec.NewPrivateKey() + randPriv, _ := btcec.NewPrivateKey(btcec.S256()) randMsg := RandomBytes(32) - randSig := ecdsa2.Sign(randPriv, randMsg) + randSig, _ := randPriv.Sign(randMsg) fee := rand.Uint64()%(feeMax-feeMin) + feeMin txnPool = append(txnPool, &MempoolTx{ diff --git a/lib/postgres.go b/lib/postgres.go index cc606705d..f3cae7833 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -720,7 +720,7 @@ func (balance *PGCreatorCoinBalance) NewBalanceEntry() *BalanceEntry { HODLerPKID: balance.HolderPKID, CreatorPKID: balance.CreatorPKID, // FIXME: This will break if the value exceeds uint256 - BalanceNanos: *uint256.NewInt(balance.BalanceNanos), + BalanceNanos: *uint256.NewInt().SetUint64(balance.BalanceNanos), HasPurchased: balance.HasPurchased, } } @@ -1093,14 +1093,14 @@ func (messageEntry *PGNewMessageGroupChatThreadEntry) ToAccessGroupId() AccessGr } func HexToUint256(input string) *uint256.Int { - output := uint256.NewInt(0) + output := uint256.NewInt() if input != "" { var err error output, err = uint256.FromHex(input) if err != nil { - output = uint256.NewInt(0) + output = uint256.NewInt() } } @@ -1439,12 +1439,8 @@ func (postgres *Postgres) InsertTransactionsTx(tx *pg.Tx, desoTxns []*MsgDeSoTxn } if txn.Signature.Sign != nil { - r := txn.Signature.Sign.R() - s := txn.Signature.Sign.S() - rBytes := (&r).Bytes() - sBytes := (&s).Bytes() - transaction.R = NewBlockHash(rBytes[:]) - transaction.S = NewBlockHash(sBytes[:]) + transaction.R = BigintToHash(txn.Signature.Sign.R) + transaction.S = BigintToHash(txn.Signature.Sign.S) transaction.RecoveryId = uint32(txn.Signature.RecoveryId) transaction.IsRecoverable = txn.Signature.IsRecoverable } diff --git a/lib/remote_node.go b/lib/remote_node.go index bbfb37f95..aaa295c6a 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -244,7 +244,7 @@ func (rn *RemoteNode) GetUserAgent() string { return rn.handshakeMetadata.userAgent } -func (rn *RemoteNode) GetNetAddress() *wire.NetAddressV2 { +func (rn *RemoteNode) GetNetAddress() *wire.NetAddress { if !rn.IsHandshakeCompleted() || rn.GetPeer() == nil { return nil } @@ -307,7 +307,7 @@ func (rn *RemoteNode) hasValidatorServiceFlag() bool { } // DialOutboundConnection dials an outbound connection to the provided netAddr. -func (rn *RemoteNode) DialOutboundConnection(netAddr *wire.NetAddressV2) error { +func (rn *RemoteNode) DialOutboundConnection(netAddr *wire.NetAddress) error { rn.mtx.Lock() defer rn.mtx.Unlock() @@ -321,7 +321,7 @@ func (rn *RemoteNode) DialOutboundConnection(netAddr *wire.NetAddressV2) error { } // DialPersistentOutboundConnection dials a persistent outbound connection to the provided netAddr. -func (rn *RemoteNode) DialPersistentOutboundConnection(netAddr *wire.NetAddressV2) error { +func (rn *RemoteNode) DialPersistentOutboundConnection(netAddr *wire.NetAddress) error { rn.mtx.Lock() defer rn.mtx.Unlock() @@ -335,7 +335,7 @@ func (rn *RemoteNode) DialPersistentOutboundConnection(netAddr *wire.NetAddressV } // AttachInboundConnection creates an inbound peer once a successful inbound connection has been established. -func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddressV2) error { +func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddress) error { rn.mtx.Lock() defer rn.mtx.Unlock() @@ -359,7 +359,7 @@ func (rn *RemoteNode) AttachInboundConnection(conn net.Conn, na *wire.NetAddress } // AttachOutboundConnection creates an outbound peer once a successful outbound connection has been established. -func (rn *RemoteNode) AttachOutboundConnection(conn net.Conn, na *wire.NetAddressV2, isPersistent bool) error { +func (rn *RemoteNode) AttachOutboundConnection(conn net.Conn, na *wire.NetAddress, isPersistent bool) error { rn.mtx.Lock() defer rn.mtx.Unlock() diff --git a/lib/server.go b/lib/server.go index 5bca91b4d..7c1265b99 100644 --- a/lib/server.go +++ b/lib/server.go @@ -2695,10 +2695,9 @@ func (srv *Server) _handleAddrMessage(pp *Peer, desoMsg DeSoMessage) { } // Add all the addresses we received to the addrmgr. - netAddrsReceived := []*wire.NetAddressV2{} + netAddrsReceived := []*wire.NetAddress{} for _, addr := range msg.AddrList { - addrAsNetAddr := wire.NetAddressV2FromBytes( - addr.Timestamp, (wire.ServiceFlag)(addr.Services), addr.IP[:], addr.Port) + addrAsNetAddr := wire.NewNetAddressIPPort(addr.IP, addr.Port, (wire.ServiceFlag)(addr.Services)) if !addrmgr.IsRoutable(addrAsNetAddr) { glog.V(1).Infof("Server._handleAddrMessage: Dropping address %v from peer %v because it is not routable", addr, pp) continue @@ -2715,7 +2714,7 @@ func (srv *Server) _handleAddrMessage(pp *Peer, desoMsg DeSoMessage) { "peer %v", len(msg.AddrList), pp) sourceAddr := &SingleAddr{ Timestamp: time.Now(), - IP: pp.netAddr.ToLegacy().IP, + IP: pp.netAddr.IP, Port: pp.netAddr.Port, Services: pp.serviceFlags, } @@ -2762,7 +2761,7 @@ func (srv *Server) _handleGetAddrMessage(pp *Peer, desoMsg DeSoMessage) { for _, netAddr := range netAddrsFound { singleAddr := &SingleAddr{ Timestamp: time.Now(), - IP: netAddr.ToLegacy().IP, + IP: netAddr.IP, Port: netAddr.Port, Services: (ServiceFlag)(netAddr.Services), } @@ -3015,12 +3014,12 @@ func (srv *Server) _startAddressRelayer() { bestAddress := srv.AddrMgr.GetBestLocalAddress(netAddr) if bestAddress != nil { glog.V(2).Infof("Server.startAddressRelayer: Relaying address %v to "+ - "RemoteNode (id= %v)", bestAddress.Addr.String(), rn.GetId()) + "RemoteNode (id= %v)", bestAddress.IP.String(), rn.GetId()) addrMsg := &MsgDeSoAddr{ AddrList: []*SingleAddr{ { Timestamp: time.Now(), - IP: bestAddress.ToLegacy().IP, + IP: bestAddress.IP, Port: bestAddress.Port, Services: (ServiceFlag)(bestAddress.Services), }, diff --git a/lib/types.go b/lib/types.go index 1c4649c1e..5395d360b 100644 --- a/lib/types.go +++ b/lib/types.go @@ -298,9 +298,9 @@ func FixedWidthDecodeUint256(rr *bytes.Reader) (*uint256.Int, error) { valBytes := make([]byte, 32, 32) _, err := io.ReadFull(rr, valBytes) if err != nil { - return uint256.NewInt(0), fmt.Errorf("ReadUint256: Error reading value bytes: %v", err) + return uint256.NewInt(), fmt.Errorf("ReadUint256: Error reading value bytes: %v", err) } - return uint256.NewInt(0).SetBytes(valBytes), nil + return uint256.NewInt().SetBytes(valBytes), nil } return nil, nil } diff --git a/lib/utils.go b/lib/utils.go index f938d9105..aed694865 100644 --- a/lib/utils.go +++ b/lib/utils.go @@ -5,15 +5,15 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/btcsuite/btcd/btcutil/hdkeychain" "math/big" "os" "reflect" "sort" "strings" - "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcutil/hdkeychain" "github.com/pkg/errors" "github.com/unrolled/secure" "golang.org/x/crypto/sha3" @@ -96,23 +96,23 @@ func ComputeKeysFromSeedWithNet(seedBytes []byte, index uint32, isTestnet bool) // m/44'/0'/0'/0/0 also maps to the first // address you'd get if you put the user's seed into most standard // Bitcoin wallets (Mycelium, Electrum, Ledger, iancoleman, etc...). - purpose, err := masterKey.Derive(hdkeychain.HardenedKeyStart + 44) + purpose, err := masterKey.Child(hdkeychain.HardenedKeyStart + 44) if err != nil { return nil, nil, "", fmt.Errorf("ComputeKeyFromSeed: Error encountered generating 'purpose' from seed (%v)", err) } - coinTypeKey, err := purpose.Derive(hdkeychain.HardenedKeyStart + 0) + coinTypeKey, err := purpose.Child(hdkeychain.HardenedKeyStart + 0) if err != nil { return nil, nil, "", fmt.Errorf("ComputeKeyFromSeed: Error encountered generating 'coinType' from seed (%v)", err) } - accountKey, err := coinTypeKey.Derive(hdkeychain.HardenedKeyStart + 0) + accountKey, err := coinTypeKey.Child(hdkeychain.HardenedKeyStart + 0) if err != nil { return nil, nil, "", fmt.Errorf("ComputeKeyFromSeed: Error encountered generating 'accountKey' from seed (%v)", err) } - changeKey, err := accountKey.Derive(0) + changeKey, err := accountKey.Child(0) if err != nil { return nil, nil, "", fmt.Errorf("ComputeKeyFromSeed: Error encountered generating 'changeKey' from seed (%v)", err) } - addressKey, err := changeKey.Derive(index) + addressKey, err := changeKey.Child(index) if err != nil { return nil, nil, "", fmt.Errorf("ComputeKeyFromSeed: Error encountered generating 'addressKey' from seed (%v)", err) } From c0d781f5f1d07778bb432624f3ae52c906efa979 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Wed, 5 Jun 2024 17:33:43 -0400 Subject: [PATCH 750/762] Revert "Upgrade go to 1.22 (#1190)" (#1353) This reverts commit fc0aaea51ed446f3b5dff8c55f4c039e86a2986e. --- .github/workflows/ci.yml | 4 ++-- Dockerfile | 2 +- go.mod | 2 +- test.Dockerfile | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 876eff8ec..afb4dc21c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,7 +15,7 @@ jobs: - name: Install Go uses: actions/setup-go@v2 with: - go-version: "1.22" + go-version: "1.20" - name: Checkout branch uses: actions/checkout@v3 @@ -52,7 +52,7 @@ jobs: - name: Install Go uses: actions/setup-go@v2 with: - go-version: "1.22" + go-version: "1.20" - name: Checkout branch uses: actions/checkout@v3 diff --git a/Dockerfile b/Dockerfile index c43024ee1..cf98b7feb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM alpine:latest AS core RUN apk update && apk upgrade RUN apk add --update bash cmake g++ gcc git make vips vips-dev -COPY --from=golang:1.22-alpine /usr/local/go/ /usr/local/go/ +COPY --from=golang:1.20-alpine /usr/local/go/ /usr/local/go/ ENV PATH="/usr/local/go/bin:${PATH}" WORKDIR /deso/src/core diff --git a/go.mod b/go.mod index d61858fed..18fc48e99 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/deso-protocol/core -go 1.22 +go 1.20 require ( github.com/DataDog/datadog-go v4.5.0+incompatible diff --git a/test.Dockerfile b/test.Dockerfile index f962f2377..3eb8c7501 100644 --- a/test.Dockerfile +++ b/test.Dockerfile @@ -4,7 +4,7 @@ RUN apk update RUN apk upgrade RUN apk add --update bash cmake g++ gcc git make vips vips-dev -COPY --from=golang:1.22-alpine /usr/local/go/ /usr/local/go/ +COPY --from=golang:1.20-alpine /usr/local/go/ /usr/local/go/ ENV PATH="/usr/local/go/bin:${PATH}" WORKDIR /deso/src/core From ad79f13386616b07c8c1e80aafbf0aad451dc1db Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:33:46 -0400 Subject: [PATCH 751/762] Revert "Use Sign instead of comparing big ints to 0 (#1228)" (#1361) This reverts commit 9a9c7f3ebc7dc434c6a9b1a431f057700ba4d82c. --- lib/block_view_dao_coin_limit_order.go | 15 ++++++++------- lib/block_view_nft.go | 2 +- lib/deso_math.go | 2 +- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/lib/block_view_dao_coin_limit_order.go b/lib/block_view_dao_coin_limit_order.go index 5379be9f1..7fc35ce61 100644 --- a/lib/block_view_dao_coin_limit_order.go +++ b/lib/block_view_dao_coin_limit_order.go @@ -3,6 +3,7 @@ package lib import ( "bytes" "fmt" + "math" "math/big" "sort" "strings" @@ -20,7 +21,7 @@ func adjustBalance( retBig := big.NewInt(0).Add(balanceBig, delta) // If we're below zero, just return zero. The caller should generally // prevent this from happening. - if retBig.Sign() < 0 { + if retBig.Cmp(big.NewInt(0)) < 0 { return nil, fmt.Errorf("adjustBalance: Went below zero. This should never happen.") } if retBig.Cmp(MaxUint256.ToBig()) > 0 { @@ -186,7 +187,7 @@ func (bav *UtxoView) _sanityCheckLimitOrderMoneyPrinting( // we did not print money. for creatorPKID, deltaBalanceBaseUnits := range finalDeltasMap { // If delta is > 0, throw an error. - if deltaBalanceBaseUnits.Sign() > 0 { + if deltaBalanceBaseUnits.Cmp(big.NewInt(0)) > 0 { return fmt.Errorf( "_connectDAOCoinLimitOrder: printing %v new coin base units for creatorPKID %v", deltaBalanceBaseUnits, creatorPKID) @@ -775,7 +776,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( } } for creatorPKIDIter, balanceDelta := range balanceDeltaSanityCheckMap { - if balanceDelta.Sign() != 0 { + if balanceDelta.Cmp(big.NewInt(0)) != 0 { return 0, 0, nil, errors.Wrapf( RuleErrorDAOCoinLimitOrderBalanceDeltasNonZero, "_connectDAOCoinLimitOrder: Balance for PKID %v is %v", creatorPKIDIter, balanceDelta.String(), @@ -827,7 +828,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( } if blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight { - cmpVal := newDESOSurplus.Sign() + cmpVal := newDESOSurplus.Cmp(big.NewInt(0)) if cmpVal == 0 { continue } @@ -874,10 +875,10 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( // Note that if we ever go negative then that's an error because // we already maxed out the DESO we're allowed to spend before // entering this loop. - if newDESOSurplus.Sign() < 0 { + if newDESOSurplus.Cmp(big.NewInt(0)) < 0 { return 0, 0, nil, RuleErrorDAOCoinLimitOrderOverspendingDESO } - if !newDESOSurplus.IsUint64() { + if newDESOSurplus.Cmp(big.NewInt(0).SetUint64(math.MaxUint64)) > 0 { return 0, 0, nil, RuleErrorDAOCoinLimitOrderOverflowsDESO } @@ -924,7 +925,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( } newBalance := big.NewInt(0).Add(newBalanceEntry.BalanceNanos.ToBig(), delta) - if newBalance.Sign() < 0 { + if newBalance.Cmp(big.NewInt(0)) < 0 { return 0, 0, nil, RuleErrorDAOCoinLimitOrderOverspendingDAOCoin } if newBalance.Cmp(MaxUint256.ToBig()) > 0 { diff --git a/lib/block_view_nft.go b/lib/block_view_nft.go index 3875af09b..fd6b1983d 100644 --- a/lib/block_view_nft.go +++ b/lib/block_view_nft.go @@ -1665,7 +1665,7 @@ func (bav *UtxoView) _helpConnectNFTSold(args HelpConnectNFTSoldStruct) ( totalDiff := big.NewInt(0).Add(sellerPlusBidderDiff, creatorPlusCoinDiff) totalDiff = totalDiff.Add(totalDiff, totalAdditionalRoyaltiesDiff) - if totalDiff.Sign() > 0 { + if totalDiff.Cmp(big.NewInt(0)) > 0 { return 0, 0, nil, fmt.Errorf( "_helpConnectNFTSold: Sum of participant diffs is >0 (%d, %d, %d, %d, %d, %d)", sellerDiff, bidderDiff, creatorDiff, coinDiff, additionalDESORoyaltiesDiff.Int64(), diff --git a/lib/deso_math.go b/lib/deso_math.go index 7c1359578..161681c8d 100644 --- a/lib/deso_math.go +++ b/lib/deso_math.go @@ -200,7 +200,7 @@ func BigFloatPow(z *big.Float, w *big.Float) *big.Float { if z.Sign() < 0 { panic("Pow: negative base") } - if z.Sign() == 0 { + if z.Cmp(bigZero) == 0 { return bigZero } From 3efe21902b6d05cecc4e0a21e37c8ba1a3a7b878 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:35:37 -0400 Subject: [PATCH 752/762] Revert "Add BigIntFromUint64 helper (#1227)" (#1362) This reverts commit 45a0350976fb3fb5b27a4a3807a1261a579a6986. --- lib/block_view.go | 20 +++++++++++--------- lib/block_view_dao_coin_limit_order.go | 6 +++--- lib/deso_math.go | 9 --------- lib/network.go | 2 +- lib/network_test.go | 2 +- 5 files changed, 16 insertions(+), 23 deletions(-) diff --git a/lib/block_view.go b/lib/block_view.go index 1af470167..25b4275c3 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -4124,7 +4124,7 @@ func (bav *UtxoView) _connectSingleTxn( if txn.TxnMeta.GetTxnType() != TxnTypeBlockReward && txn.TxnMeta.GetTxnType() != TxnTypeBitcoinExchange && blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight { - balanceDelta, err := bav._compareBalancesToSnapshot(balanceSnapshot) + balanceDelta, _, err := bav._compareBalancesToSnapshot(balanceSnapshot) if err != nil { return nil, 0, 0, 0, errors.Wrapf(err, "ConnectTransaction: error comparing current balances to snapshot") } @@ -4135,8 +4135,8 @@ func (bav *UtxoView) _connectSingleTxn( if creatorProfile == nil || creatorProfile.IsDeleted() { return nil, 0, 0, 0, fmt.Errorf("ConnectTransaction: Profile for CreatorCoin being sold does not exist") } - desoLockedDelta = big.NewInt(0).Sub(BigIntFromUint64(creatorProfile.CreatorCoinEntry.DeSoLockedNanos), - BigIntFromUint64(creatorCoinSnapshot.DeSoLockedNanos)) + desoLockedDelta = big.NewInt(0).Sub(big.NewInt(0).SetUint64(creatorProfile.CreatorCoinEntry.DeSoLockedNanos), + big.NewInt(0).SetUint64(creatorCoinSnapshot.DeSoLockedNanos)) } if txn.TxnMeta.GetTxnType() == TxnTypeAcceptNFTBid || txn.TxnMeta.GetTxnType() == TxnTypeNFTBid { @@ -4148,8 +4148,8 @@ func (bav *UtxoView) _connectSingleTxn( } desoLockedDelta = desoLockedDelta.Sub(desoLockedDelta, big.NewInt(0).Sub( - BigIntFromUint64(creatorProfile.CreatorCoinEntry.DeSoLockedNanos), - BigIntFromUint64(coinEntry.DeSoLockedNanos)), + big.NewInt(0).SetUint64(creatorProfile.CreatorCoinEntry.DeSoLockedNanos), + big.NewInt(0).SetUint64(coinEntry.DeSoLockedNanos)), ) } } @@ -4297,24 +4297,26 @@ func computeBMF(fee uint64) (_burnFee uint64, _utilityFee uint64) { } func (bav *UtxoView) _compareBalancesToSnapshot(balanceSnapshot map[PublicKey]uint64) ( - *big.Int, error) { + *big.Int, map[PublicKey]*big.Int, error) { runningTotal := big.NewInt(0) + balanceDeltasMap := make(map[PublicKey]*big.Int) for publicKey, balance := range bav.PublicKeyToDeSoBalanceNanos { snapshotBalance, exists := balanceSnapshot[publicKey] if !exists { // Get it from the DB dbBalance, err := bav.GetDbAdapter().GetDeSoBalanceForPublicKey(publicKey.ToBytes()) if err != nil { - return nil, err + return nil, nil, err } snapshotBalance = dbBalance balanceSnapshot[publicKey] = snapshotBalance } // New - Old - delta := big.NewInt(0).Sub(BigIntFromUint64(balance), BigIntFromUint64(snapshotBalance)) + delta := big.NewInt(0).Sub(big.NewInt(0).SetUint64(balance), big.NewInt(0).SetUint64(snapshotBalance)) + balanceDeltasMap[publicKey] = delta runningTotal = big.NewInt(0).Add(runningTotal, delta) } - return runningTotal, nil + return runningTotal, balanceDeltasMap, nil } func (bav *UtxoView) ConnectBlock( diff --git a/lib/block_view_dao_coin_limit_order.go b/lib/block_view_dao_coin_limit_order.go index 7fc35ce61..01fe250e2 100644 --- a/lib/block_view_dao_coin_limit_order.go +++ b/lib/block_view_dao_coin_limit_order.go @@ -815,7 +815,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( pubKey := bav.GetPublicKeyForPKID(&userPKID) desoSurplus := desoAllowedToSpendByPublicKey[*NewPublicKey(pubKey)] newDESOSurplus := big.NewInt(0).Add( - delta, BigIntFromUint64(desoSurplus)) + delta, big.NewInt(0).SetUint64(desoSurplus)) // If the current delta is for the transactor, we need // to deduct the fees specified in the metadata from the output @@ -824,7 +824,7 @@ func (bav *UtxoView) _connectDAOCoinLimitOrder( if blockHeight < bav.Params.ForkHeights.BalanceModelBlockHeight && transactorPKIDEntry.PKID.Eq(&userPKID) { - newDESOSurplus = big.NewInt(0).Sub(newDESOSurplus, BigIntFromUint64(txMeta.FeeNanos)) + newDESOSurplus = big.NewInt(0).Sub(newDESOSurplus, big.NewInt(0).SetUint64(txMeta.FeeNanos)) } if blockHeight >= bav.Params.ForkHeights.BalanceModelBlockHeight { @@ -1992,7 +1992,7 @@ func ScaleFloatFormatStringToUint256(floatStr string, scaleFactor *uint256.Int) } newWholePart := big.NewInt(0).Mul(wholePart, scaleFactor.ToBig()) newDecimalPart := big.NewInt(0).Mul(decimalPart, big.NewInt(0).Exp( - big.NewInt(10), big.NewInt(int64(decimalExponent)), nil)) + big.NewInt(0).SetUint64(10), big.NewInt(0).SetUint64(uint64(decimalExponent)), nil)) sumBig := big.NewInt(0).Add(newWholePart, newDecimalPart) ret, overflow := uint256.FromBig(sumBig) diff --git a/lib/deso_math.go b/lib/deso_math.go index 161681c8d..b9fe6bcd6 100644 --- a/lib/deso_math.go +++ b/lib/deso_math.go @@ -55,15 +55,6 @@ func IntAdd(a *big.Int, b *big.Int) *big.Int { return big.NewInt(0).Add(a, b) } -// BigIntFromUint64 returns a big.Int from a uint64. It is 20x faster to -// simply cast a uint64 to a big.Int than to use big.NewInt(0).SetUint64(). -func BigIntFromUint64(x uint64) *big.Int { - if x < math.MaxInt64 { - return big.NewInt(int64(x)) - } - return big.NewInt(0).SetUint64(x) -} - func Sub(a *big.Float, b *big.Float) *big.Float { // TODO(performance): This code currently calls NewFloat() too often. It // does this in order to make the code easier to read but if it ever becomes diff --git a/lib/network.go b/lib/network.go index 8bb111568..f433ab0a0 100644 --- a/lib/network.go +++ b/lib/network.go @@ -6085,7 +6085,7 @@ func (tsl *TransactionSpendingLimit) ToMetamaskString(params *DeSoParams) string // GlobalDESOLimit if tsl.GlobalDESOLimit > 0 { str += _indt(indentationCounter) + "Total $DESO Limit: " + FormatScaledUint256AsDecimalString( - BigIntFromUint64(tsl.GlobalDESOLimit), big.NewInt(int64(NanosPerUnit))) + " $DESO\n" + big.NewInt(0).SetUint64(tsl.GlobalDESOLimit), big.NewInt(int64(NanosPerUnit))) + " $DESO\n" } // Sort an array of strings and add them to the spending limit string str. This will come in handy below, diff --git a/lib/network_test.go b/lib/network_test.go index b88ec0e18..2598ab0f1 100644 --- a/lib/network_test.go +++ b/lib/network_test.go @@ -1655,7 +1655,7 @@ func TestSpendingLimitMetamaskString(t *testing.T) { encoding := spendingLimit.ToMetamaskString(params) if tsl.GlobalDESOLimit > 0 { if !strings.Contains(encoding, FormatScaledUint256AsDecimalString( - BigIntFromUint64(tsl.GlobalDESOLimit), big.NewInt(int64(NanosPerUnit)))) { + big.NewInt(0).SetUint64(tsl.GlobalDESOLimit), big.NewInt(int64(NanosPerUnit)))) { return false } } From 8a6e70ffdfb0b50d231a4e16cf79c4c73371176b Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:46:41 -0400 Subject: [PATCH 753/762] Revert Badger version from v4 to v3 (#1363) --- cmd/node.go | 2 +- go.mod | 5 ++-- go.sum | 29 +++++++++++++++++---- integration_testing/tools.go | 2 +- lib/block_view.go | 2 +- lib/block_view_atomic_txns_test.go | 9 ++++--- lib/block_view_bitcoin_test.go | 2 +- lib/block_view_creator_coin_test.go | 2 +- lib/block_view_dao_coin_limit_order_test.go | 2 +- lib/block_view_dao_coin_test.go | 2 +- lib/block_view_derived_key_test.go | 2 +- lib/block_view_flush.go | 2 +- lib/block_view_follow_test.go | 2 +- lib/block_view_like_test.go | 2 +- lib/block_view_lockups.go | 2 +- lib/block_view_lockups_test.go | 2 +- lib/block_view_message_test.go | 2 +- lib/block_view_nft_test.go | 2 +- lib/block_view_post.go | 2 +- lib/block_view_post_test.go | 2 +- lib/block_view_profile_test.go | 2 +- lib/block_view_stake.go | 2 +- lib/block_view_test.go | 2 +- lib/block_view_validator.go | 2 +- lib/blockchain.go | 2 +- lib/blockchain_test.go | 2 +- lib/db_adapter.go | 2 +- lib/db_utils.go | 2 +- lib/db_utils_test.go | 2 +- lib/legacy_mempool.go | 2 +- lib/notifier.go | 2 +- lib/pos_blockchain.go | 2 +- lib/pos_epoch.go | 2 +- lib/pos_mempool.go | 2 +- lib/pos_mempool_persister.go | 2 +- lib/pos_mempool_persister_test.go | 2 +- lib/pos_mempool_test.go | 2 +- lib/pos_random_seed.go | 2 +- lib/pos_snapshot_cache.go | 2 +- lib/pos_snapshot_entries.go | 2 +- lib/postgres.go | 2 +- lib/server.go | 2 +- lib/snapshot.go | 2 +- lib/txindex.go | 2 +- 44 files changed, 73 insertions(+), 52 deletions(-) diff --git a/cmd/node.go b/cmd/node.go index ee3dfbc50..35893b6dc 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -18,7 +18,7 @@ import ( "github.com/deso-protocol/core/lib" "github.com/deso-protocol/core/migrate" "github.com/deso-protocol/go-deadlock" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/go-pg/pg/v10" "github.com/golang/glog" migrations "github.com/robinjoseph08/go-pg-migrations/v3" diff --git a/go.mod b/go.mod index 18fc48e99..31047fa1b 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,6 @@ require ( github.com/DataDog/datadog-go v4.5.0+incompatible github.com/brianvoe/gofakeit v3.18.0+incompatible github.com/btcsuite/btcd v0.21.0-beta - github.com/btcsuite/btcd/btcec/v2 v2.2.1 github.com/btcsuite/btcutil v1.0.2 github.com/bxcodec/faker v2.0.1+incompatible github.com/cloudflare/circl v1.1.0 @@ -15,7 +14,7 @@ require ( github.com/decred/dcrd/lru v1.1.1 github.com/deso-protocol/go-deadlock v1.0.0 github.com/deso-protocol/go-merkle-tree v1.0.0 - github.com/dgraph-io/badger/v4 v4.2.0 + github.com/dgraph-io/badger/v3 v3.2103.5 github.com/emirpasic/gods v1.18.1 github.com/ethereum/go-ethereum v1.9.25 github.com/fatih/color v1.13.0 @@ -50,8 +49,10 @@ require ( require ( github.com/Microsoft/go-winio v0.4.16 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/bwesterb/go-ristretto v1.2.0 // indirect + github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect diff --git a/go.sum b/go.sum index acd6529a2..3698c73f3 100644 --- a/go.sum +++ b/go.sum @@ -34,6 +34,7 @@ github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= @@ -44,6 +45,7 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -79,6 +81,7 @@ github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40 github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -92,10 +95,14 @@ github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtM github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -116,8 +123,8 @@ github.com/deso-protocol/go-deadlock v1.0.0 h1:mw0pHy/19zgC+JFBStuQt1+1Ehv5OKA5N github.com/deso-protocol/go-deadlock v1.0.0/go.mod h1:K0Wd2OV2x7ck7SMYDraWerpKjFKUeBqaFcwz21tmkb8= github.com/deso-protocol/go-merkle-tree v1.0.0 h1:9zkI5dQsITYy77s4kbTGPQmZnhQ+LsH/kRdL5l/Yzvg= github.com/deso-protocol/go-merkle-tree v1.0.0/go.mod h1:V/vbg/maaNv6G7zf9VVs645nLFx/jsO2L/awFB/S/ZU= -github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= -github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= +github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= +github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -177,6 +184,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -204,6 +212,7 @@ github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -213,6 +222,7 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -310,6 +320,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -409,6 +420,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -421,17 +433,19 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= @@ -439,6 +453,7 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= @@ -472,6 +487,7 @@ github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df/go.mod h1:p github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8= github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/unrolled/secure v1.0.8 h1:JaMvKbe4CRt8oyxVXn+xY+6jlqd7pyJNSVkmsBxxQsM= github.com/unrolled/secure v1.0.8/go.mod h1:fO+mEan+FLB0CdEnHf6Q4ZZVNqG+5fuLFnP8p0BXDPI= github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= @@ -494,12 +510,14 @@ github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+m github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY= @@ -512,6 +530,7 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -558,7 +577,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -612,6 +630,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -744,6 +763,7 @@ gopkg.in/DataDog/dd-trace-go.v1 v1.29.0/go.mod h1:FLwUDeuH0z5hkvgvd04/M3MHQN4AF5 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -779,6 +799,5 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt mellium.im/sasl v0.2.1 h1:nspKSRg7/SyO0cRGY71OkfHab8tf9kCts6a6oTDut0w= mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= -pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/integration_testing/tools.go b/integration_testing/tools.go index ff50d3bf9..3347c7096 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -14,7 +14,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" "github.com/stretchr/testify/require" diff --git a/lib/block_view.go b/lib/block_view.go index 25b4275c3..2cd959419 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -18,7 +18,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index 6f50bff4f..5152a118a 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -2,14 +2,15 @@ package lib import ( "bytes" + "math" + "reflect" + "testing" + "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "math" - "reflect" - "testing" ) func TestAtomicTxnsWrapperTxIndexMetadataEncoder(t *testing.T) { diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index 4c669a5eb..dd1498a52 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -14,7 +14,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" merkletree "github.com/deso-protocol/go-merkle-tree" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_creator_coin_test.go b/lib/block_view_creator_coin_test.go index 7e4a3a05b..e25af8ac9 100644 --- a/lib/block_view_creator_coin_test.go +++ b/lib/block_view_creator_coin_test.go @@ -5,7 +5,7 @@ import ( "strconv" "testing" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index dbdab90da..2c8faa27d 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/holiman/uint256" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_dao_coin_test.go b/lib/block_view_dao_coin_test.go index e59c05acf..86a7d5d7b 100644 --- a/lib/block_view_dao_coin_test.go +++ b/lib/block_view_dao_coin_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/holiman/uint256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index 6024bc898..f9540a6ea 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/holiman/uint256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index d1e7cde6f..484ad1b56 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -5,7 +5,7 @@ import ( "reflect" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/block_view_follow_test.go b/lib/block_view_follow_test.go index ef3823f63..fedb91a03 100644 --- a/lib/block_view_follow_test.go +++ b/lib/block_view_follow_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_like_test.go b/lib/block_view_like_test.go index a757c7f70..01b715475 100644 --- a/lib/block_view_like_test.go +++ b/lib/block_view_like_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index 85fab796f..316362ede 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -7,7 +7,7 @@ import ( "sort" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 4c4cf5255..6d25cb1fa 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/holiman/uint256" "github.com/pkg/errors" "github.com/stretchr/testify/assert" diff --git a/lib/block_view_message_test.go b/lib/block_view_message_test.go index 7e6fcc5f5..8e5fab0a7 100644 --- a/lib/block_view_message_test.go +++ b/lib/block_view_message_test.go @@ -10,7 +10,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_nft_test.go b/lib/block_view_nft_test.go index 2f4cfd64c..ba00d8ece 100644 --- a/lib/block_view_nft_test.go +++ b/lib/block_view_nft_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_post.go b/lib/block_view_post.go index 2ea329a0c..2aca533d6 100644 --- a/lib/block_view_post.go +++ b/lib/block_view_post.go @@ -13,7 +13,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/gernest/mention" "github.com/golang/glog" "github.com/pkg/errors" diff --git a/lib/block_view_post_test.go b/lib/block_view_post_test.go index 264df7d69..e84eaa1c9 100644 --- a/lib/block_view_post_test.go +++ b/lib/block_view_post_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index c59e37cd5..01101d005 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index c6b07e19e..df94f152d 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -5,7 +5,7 @@ import ( "fmt" "sort" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 91e4e5db6..b6c67b3ef 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -14,7 +14,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/decred/dcrd/lru" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" embeddedpostgres "github.com/fergusstrange/embedded-postgres" "github.com/golang/glog" "github.com/stretchr/testify/assert" diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 7b478289a..be5b4693f 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -13,7 +13,7 @@ import ( "github.com/deso-protocol/core/consensus" "github.com/deso-protocol/core/bls" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" diff --git a/lib/blockchain.go b/lib/blockchain.go index b4397d5c6..2cfe1b0a5 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -30,7 +30,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/deso-protocol/go-deadlock" merkletree "github.com/deso-protocol/go-merkle-tree" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index 0ecace8dd..77929e34d 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -18,7 +18,7 @@ import ( chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/lib/db_adapter.go b/lib/db_adapter.go index 8cff5d757..4eea4d0da 100644 --- a/lib/db_adapter.go +++ b/lib/db_adapter.go @@ -4,7 +4,7 @@ import ( "bytes" "sort" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/db_utils.go b/lib/db_utils.go index 78f0ca79e..d0285f75d 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -23,7 +23,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index 712120468..24b46c51d 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 7c9f466a2..f195293a9 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -17,7 +17,7 @@ import ( "time" "github.com/btcsuite/btcutil" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/gernest/mention" "github.com/btcsuite/btcd/btcec" diff --git a/lib/notifier.go b/lib/notifier.go index b6c88c37d..a42ffb3ab 100644 --- a/lib/notifier.go +++ b/lib/notifier.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/gernest/mention" "github.com/go-pg/pg/v10" diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 589b34f96..dc398bebe 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -9,7 +9,7 @@ import ( "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index ef4e3c78d..fed62d90e 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -3,7 +3,7 @@ package lib import ( "bytes" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index b867e5214..7e0ba023e 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -9,7 +9,7 @@ import ( "github.com/decred/dcrd/lru" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/pos_mempool_persister.go b/lib/pos_mempool_persister.go index ceeface31..05a1ef617 100644 --- a/lib/pos_mempool_persister.go +++ b/lib/pos_mempool_persister.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/pos_mempool_persister_test.go b/lib/pos_mempool_persister_test.go index 73887a01f..6cd1ef25d 100644 --- a/lib/pos_mempool_persister_test.go +++ b/lib/pos_mempool_persister_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/require" ) diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 8dced4464..d04883919 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index b8430e29e..8e26977aa 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -6,7 +6,7 @@ import ( "golang.org/x/crypto/sha3" "github.com/deso-protocol/core/bls" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/holiman/uint256" "github.com/pkg/errors" diff --git a/lib/pos_snapshot_cache.go b/lib/pos_snapshot_cache.go index f320e99b0..ec495633c 100644 --- a/lib/pos_snapshot_cache.go +++ b/lib/pos_snapshot_cache.go @@ -3,7 +3,7 @@ package lib import ( "sync" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/pkg/errors" ) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index d2d941c2a..393f9aecc 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -8,7 +8,7 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/postgres.go b/lib/postgres.go index f3cae7833..fae62bc16 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -10,7 +10,7 @@ import ( "regexp" "strings" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/go-pg/pg/v10" "github.com/go-pg/pg/v10/orm" "github.com/golang/glog" diff --git a/lib/server.go b/lib/server.go index 7c1265b99..cc12055cf 100644 --- a/lib/server.go +++ b/lib/server.go @@ -23,7 +23,7 @@ import ( chainlib "github.com/btcsuite/btcd/blockchain" "github.com/davecgh/go-spew/spew" "github.com/deso-protocol/go-deadlock" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/snapshot.go b/lib/snapshot.go index b5d7c294d..1d2af495e 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -15,7 +15,7 @@ import ( "github.com/cloudflare/circl/group" "github.com/decred/dcrd/lru" "github.com/deso-protocol/go-deadlock" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/fatih/color" "github.com/golang/glog" "github.com/google/uuid" diff --git a/lib/txindex.go b/lib/txindex.go index 7a2ef725e..ffe1ac745 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" chainlib "github.com/btcsuite/btcd/blockchain" "github.com/deso-protocol/go-deadlock" From 3fe7c3093345b2bebf56c6759e5bcf56fe932d6f Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Thu, 6 Jun 2024 19:03:58 -0400 Subject: [PATCH 754/762] Revert errors.Is(...) Usage (#1364) --- lib/db_utils.go | 5 ++--- lib/snapshot.go | 12 ++++++------ 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index d0285f75d..d566a363c 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -1112,7 +1112,7 @@ func DBSetWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, value []byte, eve ancestralValue, getError = DBGetWithTxn(txn, snap, key) // If there is some error with the DB read, other than non-existent key, we return. - if getError != nil && !errors.Is(getError, badger.ErrKeyNotFound) { + if getError != nil && getError != badger.ErrKeyNotFound { return errors.Wrapf(getError, "DBSetWithTxn: problem reading record "+ "from DB with key: %v", key) } @@ -1130,8 +1130,7 @@ func DBSetWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, value []byte, eve keyString := hex.EncodeToString(key) // Update ancestral record structures depending on the existing DB record. - if err = snap.PrepareAncestralRecord( - keyString, ancestralValue, !errors.Is(getError, badger.ErrKeyNotFound)); err != nil { + if err = snap.PrepareAncestralRecord(keyString, ancestralValue, getError != badger.ErrKeyNotFound); err != nil { return errors.Wrapf(err, "DBSetWithTxn: Problem preparing ancestral record") } // Now save the newest record to cache. diff --git a/lib/snapshot.go b/lib/snapshot.go index 1d2af495e..6961f7ffd 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -850,7 +850,7 @@ func (snap *Snapshot) flushAncestralRecordsHelper( // No need to set it in that case. continue } - if !errors.Is(err, badger.ErrKeyNotFound) { + if err != badger.ErrKeyNotFound { // In this case, we hit a real error with Badger, so we should return. return errors.Wrapf(err, "Snapshot.FlushAncestralRecords: Problem "+ "reading exsiting record in the DB at key: %v", key) @@ -1439,7 +1439,7 @@ func (sc *StateChecksum) Initialize(mainDb *badger.DB, snapshotDbMutex *sync.Mut // If we get here, it means we've saved a checksum in the db, so we will set it to the checksum. return sc.FromBytes(value) }) - if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { + if err != nil && err != badger.ErrKeyNotFound { return errors.Wrapf(err, "StateChecksum.Initialize: Problem reading checksum from the db") } return nil @@ -1731,7 +1731,7 @@ func (metadata *SnapshotEpochMetadata) Initialize(mainDb *badger.DB, snapshotDbM }) // If we're starting the hyper sync node for the first time, then there will be no snapshot saved // and we'll get ErrKeyNotFound error. That's why we don't error when it happens. - if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { + if err != nil && err != badger.ErrKeyNotFound { return errors.Wrapf(err, "Snapshot.NewSnapshot: Problem retrieving snapshot information from db") } return nil @@ -1943,7 +1943,7 @@ func (opChan *SnapshotOperationChannel) Initialize( opChan.StateSemaphore = int32(stateSemaphore) return nil }) - if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { + if err != nil && err != badger.ErrKeyNotFound { return errors.Wrapf(err, "SnapshotOperationChannel.Initialize: Problem reading StateSemaphore from db") } @@ -2102,7 +2102,7 @@ func (status *SnapshotStatus) ReadStatus() error { rr := bytes.NewReader(statusBytes) return status.FromBytes(rr) }) - if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { + if err != nil && err != badger.ErrKeyNotFound { return errors.Wrapf(err, "SnapshotStatus.ReadStatus: Problem reading status from db") } return nil @@ -2220,7 +2220,7 @@ func (migration *EncoderMigration) Initialize( migration.migrationChecksums = migrationChecksums return nil }) - if err != nil && !errors.Is(err, badger.ErrKeyNotFound) { + if err != nil && err != badger.ErrKeyNotFound { return errors.Wrapf(err, "EncoderMigrationChecksum.Initialize: Problem reading migration from db") } From 8df294319f1de1e71ebeaa419cf16d85715849e4 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 10 Jun 2024 11:31:51 -0400 Subject: [PATCH 755/762] Remove Redundant Error Check After NewUtxoView (#1365) --- lib/blockchain.go | 12 +----------- lib/txindex.go | 8 -------- 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/lib/blockchain.go b/lib/blockchain.go index 2cfe1b0a5..94cdd6994 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -2687,9 +2687,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // almost certainly be more efficient than doing a separate db call for each input // and output utxoView := NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return false, false, errors.Wrapf(err, "processblock: Problem initializing UtxoView in reorg") - } + // Verify that the utxo view is pointing to the current tip. if *utxoView.TipHash != *currentTip.Hash { return false, false, fmt.Errorf("ProcessBlock: Tip hash for utxo view (%v) is "+ @@ -4190,10 +4188,6 @@ func (bc *Blockchain) CreateNFTBidTxn( } } else { utxoView = NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrapf(err, - "CreateNFTBidTxn: Problem creating new utxo view: ") - } } nftKey := MakeNFTKey(NFTPostHash, SerialNumber) @@ -5002,10 +4996,6 @@ func (bc *Blockchain) CreateMaxSpend( } } else { utxoView = NewUtxoView(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager) - if err != nil { - return nil, 0, 0, 0, errors.Wrapf(err, - "Blockchain.CreateMaxSpend: Problem getting UtxoView: ") - } } spendableBalance, err := utxoView.GetSpendableDeSoBalanceNanosForPublicKey( senderPkBytes, bc.BlockTip().Height) diff --git a/lib/txindex.go b/lib/txindex.go index ffe1ac745..2d23cf7e4 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -325,10 +325,6 @@ func (txi *TXIndex) Update() error { // Now that all the transactions have been deleted from our txindex, // it's safe to disconnect the block from our txindex chain. utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) - if err != nil { - return fmt.Errorf( - "Update: Error initializing UtxoView: %v", err) - } utxoOps, err := GetUtxoOperationsForBlock( txi.TXIndexChain.DB(), nil, blockToDetach.Hash) if err != nil { @@ -409,10 +405,6 @@ func (txi *TXIndex) Update() error { // // Only set a BitcoinManager if we have one. This makes some tests pass. utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) - if err != nil { - return fmt.Errorf( - "Update: Error initializing UtxoView: %v", err) - } if blockToAttach.Header.PrevBlockHash != nil { var utxoViewAndUtxoOps *BlockViewAndUtxoOps utxoViewAndUtxoOps, err = txi.TXIndexChain.getUtxoViewAndUtxoOpsAtBlockHash(*blockToAttach.Header.PrevBlockHash) From 1d271b45ca6fec01e8136c03f6d35847e37810c8 Mon Sep 17 00:00:00 2001 From: tholonious <99746187+tholonious@users.noreply.github.com> Date: Mon, 10 Jun 2024 14:44:37 -0400 Subject: [PATCH 756/762] Remove DatabaseCache Update in DBGetWithTxn (#1367) --- lib/db_utils.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/db_utils.go b/lib/db_utils.go index d566a363c..7688006fa 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -1190,13 +1190,6 @@ func DBGetWithTxn(txn *badger.Txn, snap *Snapshot, key []byte) ([]byte, error) { return nil, err } - // TODO: Do we want to update the database cache when performing GETs? I think it would be - // safer to ONLY update the cache when performing SETs. This way, we can avoid the possibility - // of the cache getting out of sync with the database when a badger view transaction is started - // before a badger update transaction begins. - if isState { - snap.DatabaseCache.Add(keyString, itemData) - } return itemData, nil } From f6280fee435abd45a3184a488a4435c65538d83f Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Thu, 13 Jun 2024 09:40:32 -0400 Subject: [PATCH 757/762] Save snapshot status (#1368) --- lib/snapshot.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/snapshot.go b/lib/snapshot.go index 6961f7ffd..7112b9847 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -997,6 +997,7 @@ func (snap *Snapshot) snapshotProcessBlockNoLock(blockNode *BlockNode) { height := uint64(blockNode.Height) if height > snap.Status.CurrentBlockHeight { snap.Status.CurrentBlockHeight = height + snap.Status.SaveStatus() // If we haven't hit the first snapshot block height yet, don't bother. if !snap.shouldPerformSnapshotOperations() { return @@ -1569,8 +1570,8 @@ func (sc *StateChecksum) AddOrRemoveBytesWithMigrations(keyInput []byte, valueIn // We add the current key, value encoding and encodings for all migrations. encodings = append(encodings, EncodeKeyAndValueForChecksum(key, value, blockHeight)) - checksumLock.Lock() - defer checksumLock.Unlock() + checksumLock.RLock() + defer checksumLock.RUnlock() for _, migration := range encoderMigrationChecksums { added := false migrationEncoding := EncodeKeyAndValueForChecksum(key, value, migration.BlockHeight) From a001099c9772273e7c87ec17540786bb8dee01f0 Mon Sep 17 00:00:00 2001 From: superzordon <88362450+superzordon@users.noreply.github.com> Date: Thu, 13 Jun 2024 12:42:40 -0400 Subject: [PATCH 758/762] Fix issue with incorrect field being passed via post state change metadata (#1370) --- lib/block_view_post.go | 2 +- lib/state_change_metadata.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/block_view_post.go b/lib/block_view_post.go index 2aca533d6..445d8241e 100644 --- a/lib/block_view_post.go +++ b/lib/block_view_post.go @@ -1123,7 +1123,7 @@ func (bav *UtxoView) _connectSubmitPost(txn *MsgDeSoTxn, txHash *BlockHash, bloc // Create metadata object to store in the db alongside the transaction. stateChangeMetadata := &SubmitPostStateChangeMetadata{ - PostEntry: newPostEntry, + ParentPostEntry: newParentPostEntry, ProfilesMentioned: profilesMentioned, RepostPostEntry: newRepostedPostEntry, } diff --git a/lib/state_change_metadata.go b/lib/state_change_metadata.go index f320d1091..db8eaed42 100644 --- a/lib/state_change_metadata.go +++ b/lib/state_change_metadata.go @@ -126,14 +126,14 @@ func (ccTransferSCM *CCTransferStateChangeMetadata) GetEncoderType() EncoderType } type SubmitPostStateChangeMetadata struct { - PostEntry *PostEntry + ParentPostEntry *PostEntry ProfilesMentioned []*ProfileEntry RepostPostEntry *PostEntry } func (submitPostSCM *SubmitPostStateChangeMetadata) RawEncodeWithoutMetadata(blockHeight uint64, skipMetadata ...bool) []byte { var data []byte - data = append(data, EncodeToBytes(blockHeight, submitPostSCM.PostEntry, skipMetadata...)...) + data = append(data, EncodeToBytes(blockHeight, submitPostSCM.ParentPostEntry, skipMetadata...)...) data = append(data, EncodeToBytes(blockHeight, submitPostSCM.RepostPostEntry, skipMetadata...)...) data = append(data, EncodeDeSoEncoderSlice(submitPostSCM.ProfilesMentioned, blockHeight, skipMetadata...)...) @@ -144,7 +144,7 @@ func (submitPostSCM *SubmitPostStateChangeMetadata) RawEncodeWithoutMetadata(blo func (submitPostSCM *SubmitPostStateChangeMetadata) RawDecodeWithoutMetadata(blockHeight uint64, rr *bytes.Reader) error { var err error - if submitPostSCM.PostEntry, err = DecodeDeSoEncoder(&PostEntry{}, rr); err != nil { + if submitPostSCM.ParentPostEntry, err = DecodeDeSoEncoder(&PostEntry{}, rr); err != nil { return errors.Wrapf(err, "SubmitPostStateChangeMetadata.Decode: Problem reading PostEntry") } From 5e6fc541543a2191b05c69737569a88e73985898 Mon Sep 17 00:00:00 2001 From: diamondhands Date: Fri, 14 Jun 2024 16:15:27 -0700 Subject: [PATCH 759/762] Add key for trading fees --- lib/constants.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/constants.go b/lib/constants.go index e57016961..659c1d743 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1792,6 +1792,12 @@ const ( // the amount of royalties that should be added to pkid's creator coin upon sale of this NFT. CoinRoyaltiesMapKey = "CoinRoyaltiesMap" + // Key in transaction's extra data map. If present, the value represents a map of pkid to basis points. The + // key represents who to pay the fee to and the value indicates how many basis points to charge. All fees are + // charged as "taker fees" and are paid to the pkid in the map in the *quote currency* of the market (typically + // DESO, USDC, or FOCUS). + TokenTradingFeesMapKey = "TokenTradingFeesMap" + // Used to distinguish v3 messages from previous iterations MessagesVersionString = "V" MessagesVersion1 = 1 From 17b0d5699c89fcc66a8b1a8d350c1170a8ac06bd Mon Sep 17 00:00:00 2001 From: diamondhands Date: Sun, 7 Jul 2024 17:05:07 -0700 Subject: [PATCH 760/762] Fix fee computation for legacy pos mempool --- lib/legacy_mempool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index f195293a9..023056fa1 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2508,7 +2508,7 @@ func EstimateMaxTxnFeeV1(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) uint64 { func (mp *DeSoMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) (uint64, error) { feeRate := mp.EstimateFeeRate(minFeeRateNanosPerKB) - return EstimateMaxTxnFeeV1(txn, feeRate), nil + return computeFeeRecursive(txn, feeRate) } func (mp *DeSoMempool) EstimateFeeRate(minFeeRateNanosPerKB uint64) uint64 { From 62609cee73f5797b0ace6b92931b6dc47a541499 Mon Sep 17 00:00:00 2001 From: diamondhands Date: Fri, 12 Jul 2024 13:23:42 -0700 Subject: [PATCH 761/762] change key name --- lib/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/constants.go b/lib/constants.go index 8b2645bbc..96e86bce8 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1795,7 +1795,7 @@ const ( // key represents who to pay the fee to and the value indicates how many basis points to charge. All fees are // charged as "taker fees" and are paid to the pkid in the map in the *quote currency* of the market (typically // DESO, USDC, or FOCUS). - TokenTradingFeesMapKey = "TokenTradingFeesMap" + TokenTradingFeesByPkidMapKey = "TokenTradingFeesByPkidMap" // Used to distinguish v3 messages from previous iterations MessagesVersionString = "V" From c677661ea4e40e60e5a61d80bd1a085528328e03 Mon Sep 17 00:00:00 2001 From: diamondhands Date: Fri, 12 Jul 2024 19:30:27 -0700 Subject: [PATCH 762/762] Revert a legacy mempool update --- lib/legacy_mempool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 023056fa1..f195293a9 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2508,7 +2508,7 @@ func EstimateMaxTxnFeeV1(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) uint64 { func (mp *DeSoMempool) EstimateFee(txn *MsgDeSoTxn, minFeeRateNanosPerKB uint64) (uint64, error) { feeRate := mp.EstimateFeeRate(minFeeRateNanosPerKB) - return computeFeeRecursive(txn, feeRate) + return EstimateMaxTxnFeeV1(txn, feeRate), nil } func (mp *DeSoMempool) EstimateFeeRate(minFeeRateNanosPerKB uint64) uint64 {